text
stringlengths
56
7.94M
\begin{document} \title*{Parabolic Anderson model with voter catalysts: dichotomy in the behavior of Lyapunov exponents} \titlerunning{PAM with voter catalysts: dichotomy in the behavior of Lyapunov exponents} \author{G.\ Maillard, T.\ Mountford and S.\ Sch\"opfer} \institute{G.\ Maillard \at CMI-LATP, Universit\'e de Provence, 39 rue F. Joliot-Curie, F-13453 Marseille Cedex 13, France, \email{[email protected]}, and EURANDOM, P.O.\ Box 513, 5600 MB Eindhoven, The Netherlands \and T.\ Mountford \at Institut de Math\'ematiques, \'Ecole Polytechnique F\'ed\'erale, Station 8, 1015 Lausanne, Switzerland, \email{[email protected]} \and S.\ Sch\" opfer \at Institut de Math\'ematiques, \'Ecole Polytechnique F\'ed\'erale, Station 8, 1015 Lausanne, Switzerland, \email{[email protected]}} \maketitle \abstract*{} \abstract{We consider the parabolic Anderson model $\partial u/\partial t = \kappa\Delta u + \gamma\xi u$ with $u\colon\, \Z^d\times \R^+\to \R^+$, where $\kappa\in\R^+$ is the diffusion constant, $\Delta$ is the discrete Laplacian, $\gamma\in\R^+$ is the coupling constant, and $\xi\colon\,\Z^d\times \R^+\to\{0,1\}$ is the voter model starting from Bernoulli product measure $\nu_{\rho}$ with density $\rho\in (0,1)$. The solution of this equation describes the evolution of a ``reactant'' $u$ under the influence of a ``catalyst'' $\xi$.\newline\boldsymbol{1}ent In G\"artner, den Hollander and Maillard \cite{garholmai10} the behavior of the \emph{annealed} Lyapunov exponents, i.e., the exponential growth rates of the successive moments of $u$ w.r.t.\ $\xi$, was investigated. It was shown that these exponents exhibit an interesting dependence on the dimension and on the diffusion constant.\newline\boldsymbol{1}ent In the present paper we address some questions left open in \cite{garholmai10} by considering specifically when the Lyapunov exponents are the a priori maximal value in terms of strong transience of the Markov process underlying the voter model.} \section{Introduction} \label{S1} \subsection{Model} \label{S1.1} The parabolic Anderson model (PAM) is the partial differential equation \begin{equation} \label{pA} \frac{\partial}{\partial t}u(x,t) = \kappa\Delta u(x,t) + \gamma\xi(x,t)u(x,t), \qquad x\in\Z^d,\,t\geq 0, \end{equation} with $u$ a $\R^+$-valued field, $\kappa\in\R^+$ a diffusion constant, $\Delta$ the discrete Laplacian, acting on $u$ as \begin{equation*} \label{} \Delta u(x,t) = \sum_{{y\in\Z^d} \atop {y\sim x}} [u(y,t)-u(x,t)] \end{equation*} ($y\sim x$ meaning that $y$ is a nearest neighbor of $x$), $\gamma\in\R^+$ a coupling constant and \begin{equation*} \label{} \xi = (\xi_t)_{t \geq 0} \quad\text{with}\quad \xi_t = \{\xi_{t}(x):=\xi(x,t) \colon\,x\in\Z^d\} \end{equation*} the \emph{Voter Model} (VM) taking values in $\{0,1\}^{\Z^d\times\R^+}$. As initial condition, we choose \begin{equation} \label{inicond} u(x,0) = 1, \qquad x\in \Z^d. \end{equation} One can interpret (\ref{pA}) in terms of population dynamics. Consider a system of two types of particles, $A$ ``catalyst'' and $B$ ``reactant'', subject to: \begin{itemize} \item $A$-particles evolve autonomously according to the voter dynamics; \item $B$-particles perform independent random walks at rate $2d\kappa$ and split into two at a rate that is equal to $\gamma$ times the number of $A$-particles present at the same location; \item the initial configuration of $B$-particles is one particle everywhere. \end{itemize} Then $u(x,t)$ can be interpreted as the average number of $B$-particles at site $x$ at time $t$ conditioned on the evolution of the $A$-particles. \subsection{Voter Model} \label{S1.2} The VM is the Markov process on $\{0, 1\}^{\Z^d}$ with generator $L$ acting on cylindrical functions $f$ as \begin{equation*} L f(\eta) =\sum_{x\in\Z^{d}}p(x,y)\sum_{{y\in\Z^d} \atop {y\sim x}}\Big(f(\eta^{x,y})- f(\eta)\Big), \end{equation*} where $p\colon\Z^d\times\Z^d\to[0,1]$ is the transition kernel of an irreducible random walk and $\eta^{x,y}$ is the configuration \begin{equation*} \begin{cases} \eta^{x,y} (z)= \eta (z)& \forall z\not=x,\\ \eta^{x, y} (x)= \eta(y). \end{cases} \end{equation*} In words, $\xi(x,t)=1$ and $\xi(x,t)=0$ mean the presence and the absence of a particle at site $x$ at time $t$, respectively. Under the VM dynamics, the presence and absence of particles are imposed according to the random walk transition kernel $p(\cdot,\cdot)$. The VM was introduced independently by Clifford and Sudbury \cite{clisud73} and by Holley and Liggett \cite{hollig75}, where the basic results concerning equilibria were shown. Let $(S_{t})_{t\geq 0}$ be the Markov semigroup associated with $L$, $p^{(s)}(x,y) = (1/2)[p(x,y)+p(y,x)]$, $x,y\in\Z^d$, be the \emph{symmetrized} transition kernel associated with $p(\cdot,\cdot)$, and $\mu_\rho$ the \emph{equilibrium measure} with density $\rho\in(0,1)$. When $p^{(s)}(\cdot\,,\cdot)$ is \emph{recurrent} all equilibria are \emph{trivial}, i.e., of the form $\mu_\rho = (1-\rho)\delta_0+\rho\delta_1$, while when $p^{(s)}(\cdot\,,\cdot)$ is \emph{transient} there are also \emph{non-trivial} equilibria, i.e., ergodic measures $\mu_\rho$, different from the previous one, which are the unique shift-invariant and ergodic equilibrium with density $\rho\in (0,1)$. For both cases we have \begin{equation} \label{VMerg1} \mu S_{t} \to \mu_\rho \qquad \mbox{ weakly as } t\to\infty \end{equation} for any starting measure $\mu$ that is stationary and ergodic with density $\rho$ (see Liggett \cite{lig85}, Corollary~V.1.13). This is in particular the case for our choice $\mu:=\nu_{\rho}$, the Bernoulli product measure with density $\rho\in(0,1)$. \subsection{Lyapunov exponents} \label{S1.3} Our focus of interest will be on the $p$-th \emph{annealed} Lyapunov exponent, defined by \begin{equation} \label{aLyapdef} \lambda_p = \lim_{t\to\infty} \frac{1}{t} \log \EE_{\,\nu_{\rho}} \big([u(0,t)]^p \big)^{1/p}, \qquad p \in \N, \end{equation} which represents the exponential growth rate of the $p$-th moment of the solution of the PAM (\ref{pA}), where $\EE_{\,\nu_{\rho}}$ denotes the expectation w.r.t.\ the $\xi$-process starting from Bernoulli product measure $\nu_{\rho}$ with density $\rho\in(0,1)$. Note that $\lambda_{p}$ depends on the parameters $\kappa$, $d$, $\gamma$ and $\rho$ with the two latter being fixed from now. If the above limit exists, then, by H\"older's inequality, $\kappa\mapsto\lambda_{p}(\kappa)$ satisfies \begin{equation*} \lambda_{p}(\kappa) \in [\rho\gamma,\gamma] \qquad\forall \kappa\in[0,\infty). \end{equation*} The behavior of the annealed Lyapunov exponents with VM catalysts has already been investigated by G\" artner, den Hollander and Maillard \cite{garholmai10}, where it was shown that: \begin{itemize} \item the Lyapunov exponents defined in (\ref{aLyapdef}) exist and do not depend on the choice of the starting measure $\nu_{\rho} S_{T}$, $T\in[0,\infty]$, where $\nu_{\rho}S_{\infty}:=\mu_{\rho}$ denotes the equilibrium measure of density $\rho$ (recall (\ref{VMerg1})); \item the function $\kappa\mapsto \lambda_{p}(\kappa)$ is globally Lipschitz outside any neighborhood of $0$ and satisfies $\lambda_{p}(\kappa)>\rho\gamma$ for all $\kappa\in[0,\infty)$; \item the Lyapunov exponents satisfy the following dichotomy (see Figure \ref{fig-lambda01}): \begin{itemize} \item when $1\leq d\leq 4$, if $p(\cdot,\cdot)$ has zero mean and finite variance, then $\lambda_{p}(\kappa)=\gamma$ for all $\kappa\in[0,\infty)$; \item when $d\geq 5$, \begin{itemize} \item $\lim_{\kappa\to 0}\lambda_{p}(\kappa)=\lambda_{p}(0)$; \item $\lim_{\kappa\to \infty}\lambda_{p}(\kappa)=\rho\gamma$; \item if $p(\cdot,\cdot)$ has zero mean and finite variance, then $p\mapsto \lambda_{p}(\kappa)$ is strictly increasing for $\kappa\ll 1$. \end{itemize} \end{itemize} \end{itemize} The following questions were left open (see \cite{garholmai10}, Section 1.8): \begin{description}[(Q1)] \item[(Q1)] Does $\lambda_{p}<\gamma$ when $d\geq 5$ if $p(\cdot,\cdot)$ has zero mean and finite variance? \item[(Q2)] Is there a full dichotomy in the behavior of the Lyapunov exponents? Namely, $\lambda_{p}<\gamma$ if and only if $p^{(s)}(\cdot,\cdot)$ is strongly transient, i.e., \begin{equation*} \label{strongtransient} \int_{0}^\infty t p_{t}^{(s)}(0,0) \,dt<\infty \, . \end{equation*} \end{description} Since any transition kernel $p(\cdot,\cdot)$ in $d\geq 5$ satisfies $\int_{0}^{\infty}t\, p_{t}(0,0)dt<\infty$, a positive answer to (Q2) will also ensure a positive one to (Q1) in the particular case when $p(\cdot,\cdot)$ is symmetric. Theorems \ref{th1}--\ref{th3} in Section \ref{S1.4} give answers to question (Q2), depending on the symmetry of $p(\cdot,\cdot)$. A positive answer to (Q1), given in Theorem \ref{th0}, can also be deduced from our proof of Theorem \ref{th1}. \begin{figure} \caption{\small Dichotomy of the behavior of $\kappa\mapsto\lambda_p(\kappa)$ when $p(\cdot\,,\cdot)$ has zero mean and finite variance.} \label{fig-lambda01} \end{figure} By the Feynman-Kac formula, the solution of (\ref{pA}--\ref{inicond}) reads \begin{equation*} u(x,t) = \ES_{\,x}\left(\exp\left[\gamma\int_0^t \xi\left(X^\kappa(s),t-s\right)\,ds\right]\right), \end{equation*} where $X^\kappa=(X^\kappa(t))_{t \geq 0}$ is a simple random walk on $\Z^d$ with step rate $2d\kappa$ and $\ES_{\,x}$ denotes the expectation with respect to $X^\kappa$ given $X^\kappa(0)=x$. This leads to the following representation of the Lyapunov exponents \begin{equation*} \lambda_{p}=\lim_{t\to \infty}\Lambda_{p}(t) \end{equation*} with \begin{equation*} \label{} \Lambda_p(t) = \frac{1}{pt} \log \big(\EE_{\,\nu_{\rho}}\otimes\ES_{\,0}^{\otimes p}\big) \Bigg(\exp\Bigg[\gamma\int_0^t \sum_{j=1}^{p} \xi\big(X_{j}^{\kappa}(s),t-s\big)\,ds\Bigg]\Bigg), \end{equation*} where $X_{j}^\kappa$, $j=1,\ldots,p$, are $p$ independent copies of $X^\kappa$. In the above expression, the $\xi$ and $X^\kappa$ processes are evolving in time reversed directions. It is nevertheless possible to let them run in the same time evolution by using the following arguments. Let $\widetilde\Lambda_p(t)$ denote the $\xi$-\emph{time-reversal} analogue of $\Lambda_p(t)$ defined by \begin{equation*} \label{} \widetilde\Lambda_p(t)= \frac{1}{pt} \log \big(\EE_{\,\nu_{\rho}}\otimes\ES_{\,0}^{\otimes p}\big) \Bigg(\exp\Bigg[\gamma\int_0^t \sum_{j=1}^{p} \xi\big(X_{j}^{\kappa}(s),s\big)\,ds\Bigg]\Bigg) \end{equation*} and denote $\underline\Lambda_p(t) =$ \begin{eqnarray*} &&\frac{1}{pt} \log \max_{x\in\Z^d}\big(\EE_{\,\nu_{\rho}}\otimes\ES_{\,0}^{\otimes p}\big) \Bigg(\exp\Bigg[\gamma\int_0^t \sum_{j=1}^{p} \xi\big(X_{j}^{\kappa}(s),t-s\big)\,ds\Bigg] \prod_{j=1}^{p}\delta_{x}\big(X_{j}^\kappa(t)\big)\Bigg)\nonumber\\ &&=\frac{1}{pt} \log \max_{x\in\Z^d}\big(\EE_{\,\nu_{\rho}}\otimes\ES_{\,0}^{\otimes p}\big) \Bigg(\exp\Bigg[\gamma\int_0^t \sum_{j=1}^{p} \xi\big(X_{j}^{\kappa}(s),s\big)\,ds\Bigg] \prod_{j=1}^{p}\delta_{x}\big(X_{j}^\kappa(t)\big)\Bigg),\\ \end{eqnarray*} where in the last line we reverse the time of the $\xi$-process by using that $\nu_{\rho}$ is shift-invariant and $X_{j}^\kappa$, $j=1,\ldots,p$, are time-reversible. As noted in \cite{garhol06}, Section 2.1, $\lim_{t\to\infty} [\Lambda_{p}(t)-\underline\Lambda_{p}(t)]=0$ and, using the same argument, $\lim_{t\to\infty} [\widetilde\Lambda_{p}(t)-\underline\Lambda_{p}(t)]=0$, after which we can conclude that \begin{equation*} \label{fey-kac1} \lambda_p(\kappa)= \lim_{t\to\infty}\frac{1}{pt} \log \big(\EE_{\,\nu_{\rho}}\otimes\ES_{\,0}^{\otimes p}\big) \Bigg(\exp\Bigg[\gamma\int_0^t \sum_{j=1}^{p} \xi\big(X_{j}^{\kappa}(s),s\big)\,ds\Bigg]\Bigg). \end{equation*} \subsection{Main results} \label{S1.4} In what follows we give answers to questions (Q1) and (Q2) addressed in \cite{garholmai10} concerning when the Lyapunov exponents are trivial, i.e., equal to their a priori maximal value $\gamma$. Our first theorem gives a positive answer to (Q1). It will be proved in Section \ref{S2} as a consequence of the proof of Theorem \ref{th1}. \begin{theorem} \label{th0} If $d\geq 5$ and $p(\cdot,\cdot)$ has zero mean and finite variance, then $\lambda_{p}(\kappa)<\gamma$ for all $p\geq 1$ and $\kappa\in[0,\infty)$. \end{theorem} Our two next theorems state that the full dichotomy in (Q2) holds in the case when $p(\cdot,\cdot)$ is symmetric (see Fig.\ \ref{fig-lambda02}). They will be proved in Section \ref{S2} and \ref{S3}, respectively. \begin{theorem} \label{th1} If $p(\cdot,\cdot)$ is symmetric and strongly transient, then $\lambda_{p}(\kappa)<\gamma$ for all $p\geq 1$ and $\kappa\in[0,\infty)$. \end{theorem} \begin{theorem} \label{th2} If $p(\cdot,\cdot)$ is symmetric and not strongly transient, then $\lambda_{p}(\kappa)=\gamma$ for all $p\geq 1$ and $\kappa\in[0,\infty)$. \end{theorem} \begin{figure} \caption{\small Full dichotomy of the behavior of $\kappa\mapsto\lambda_p(\kappa)$ when $p(\cdot,\cdot)$ is symmetric.} \label{fig-lambda02} \end{figure} A similar full dichotomy also holds for the case where $\xi$ is symmetric exclusion process in equilibrium, between recurrent and transient $p(\cdot,\cdot)$ (see \cite{garholmai07}). Our fourth theorem shows that this full dichotomy only holds for symmetric transition kernels $p(\cdot,\cdot)$, ensuring that the assertion in (Q2) is not true in its full generality. \begin{theorem} \label{th3} There exists $p(\cdot,\cdot)$ not symmetric with $p^{(s)}(\cdot,\cdot)$ not strongly transient such that $\lambda_{p}(\kappa)<\gamma$ for all $p\geq 1$ and $\kappa\in[0,\infty)$. \end{theorem} In the strongly transient regime, the following problems remain open: \begin{description}[(a)] \item[(a)] $\lim_{\kappa\to 0} \lambda_{p}(\kappa)=\lambda_{p}(0)$; \item[(b)] $\lim_{\kappa\to\infty}\lambda_{p}(\kappa)=\rho\gamma$; \item[(c)] $p\mapsto\lambda_{p}(\kappa)$ is strictly increasing for $\kappa\ll 1$; \item[(d)] $\kappa\mapsto \lambda_{p}(\kappa)$ is convex on $[0,\infty)$. \end{description} In \cite{garholmai10}, (a) and (b) were established when $d\geq 5$, and (c) when $d\geq 5$ and $p(\cdot,\cdot)$ has zero mean and finite variance. Their extension to the case when $p(\cdot,\cdot)$ is strongly transient remains open. In what follows, we use generic notation $\PP$ and $\EE$ for probability and expectation whatever the corresponding process is (even for joint processes) and denote $\xi_{s}(x):=\xi(s,x)$. \section{Proof of Theorems \ref{th0} and \ref{th1}} \label{S2} We first give the proof of Theorem \ref{th1}. Recall that the transition kernel associated to the Voter Model $\xi$ is assumed to be symmetric. At the end of the section we will explain how to derive the proof of Theorem \ref{th0}. We have to show that $\lambda_{p}(\kappa)<\gamma$ for all $\kappa\in[0,\infty)$. In what follows we assume without loss of generality that $p=1$, the extension to arbitrary $p\geq 1$ being straightforward. Our approach is to pick a \emph{bad environment set} $B_{E}$ associated to the $\xi$-process and a \emph{bad random walk set} $B_{W}$ associated to the random walk $X^{\kappa}$ so that, for all $n \in \N$, \begin{eqnarray} \label{th1.1} &&\EE\Big(\exp\Big[\gamma\int_{0}^n \xi_{s}(X^\kappa(s))\, ds\Big]\Big)\\ &&\qquad\leq \Big(\PP(B_{E})+\PS(B_{W})\Big)e^{\gamma n} + \EE \Big(\rlap{\mbox{\small\rm 1}}\kern.15em 1_{B_{E}^\c\cap B_{W}^\c} \exp\Big[\gamma\int_{0}^n \xi_{s}(X^\kappa(s))\, ds\Big]\Big) \nonumber \end{eqnarray} with, for some $0<\delta<1$, \begin{equation} \label{th1.2} \PP(B_{E})\leq e^{-\delta n}, \qquad \PS(B_{W})\leq e^{-\delta n}, \end{equation} and, \begin{equation} \label{th1.3} \int_{0}^{n} \xi_{s}(X^\kappa(s))\,ds \leq n(1-\delta) \qquad\text{on } B_{E}^\c\cap B_{W}^\c. \end{equation} Since, combining (\ref{th1.1}--\ref{th1.3}), we obtain \begin{equation*} \label{th1.4} \lim_{n\to\infty}\frac{1}{n}\log \EE\Big(\exp\Big[\gamma\int_{0}^n \xi_{s}(X^\kappa(s))\, ds\Big]\Big) < \gamma \, , \end{equation*} it is enough to prove (\ref{th1.2}) and (\ref{th1.3}). The proof of (\ref{th1.2}) is given in Sections \ref{S2.1}--\ref{S2.3} below, and (\ref{th1.3}) will be obvious from our definitions of $B_E$ and $B_W$. \subsection{Coarse-graining and skeletons} \label{S2.1} Write $\Z_{\rm e}^d=2\Z^d$ and $\Z_{\rm o}^d=2\Z^d+1$, where $1=(1,\ldots,1)\in\Z^d$. We are going to use a \emph{coarse-graining representation} defined by a \emph{space-time block partition} $B_y^j$ and a \emph{random walk skeleton} $(y_i)_{i\geq 0}$. To that aim, for a fixed $M$, consider \begin{equation*} B_y^j = \prod_{k=1}^d \big[(y_{k}-1)M, (y_{k}+1)M\big) \times \big[jM,(j+1)M\big) \subset \Z^d \times \R_+\, , \end{equation*} where $j \in \N_0:=\N \cup \{0\}$ and \begin{equation*} y \in \left\{\begin{array}{ll}\Z_{\rm e}^d & \text{when $j$ is even,} \\ \Z_{\rm o}^d & \text{when $j$ is odd.} \end{array}\right. \end{equation*} Without loss of generality we can consider random walks trajectories on interval $[0,n]$ with $n \in \N$ multiple of $M$. Define the \emph{$M$-skeleton set} set by \begin{equation*} \Xi=\left\{\left(y_0,\ldots,y_{\frac{n}{M}}\right) \in (\Z^d)^{\frac{n}{M}+1} \colon y_{2k} \in \Z_{\rm e}^d,\,\,\, y_{2k+1} \in \Z_{\rm o}^d\,\,\, \forall k \in \N_0\right\} \end{equation*} and the \emph{$M$-skeleton set associated to a random walk} $X$ by \begin{equation*} \Xi(X) = \left\{\left(y_0, \ldots ,y_{\frac{n}{M}}\right) \in \Xi\colon X(kM) \in B_{y_k}^k \, \forall k \in \left\{0, \ldots ,n/M\right\}\right\} \, . \end{equation*} In what follows, we will consider the $M$-skeleton $\Xi(X^\kappa)$, but, as $X^\kappa$ starts from $0 \in \Z^d$, the first point of our $M$-skeleton will always be $y_{0}:=0\in\Z^d$ (see Fig.\ \ref{fig-skeleton}). \begin{figure} \caption{\small Illustration of a $M$-skeleton $(y_{0} \label{fig-skeleton} \end{figure} In the next lemma we prove that the number of $M$-skeletons not oscillating too much is at most exponential in $n/M$. For that, define \begin{equation} \label{2.1.01} \Xi_A=\left\{\big(y_0, \ldots ,y_{\frac{n}{M}}\big)\in \Xi \colon \sum_{j=1}^{n/M} (\|y_j-y_{j-1}\|_\infty-1) \leq \frac{n}{Md}\right\} \, , \end{equation} where $\|\cdot\|_\infty$ is the standard $l_\infty$ norm, the set of all $M$-skeletons that are \emph{appropriate}. \begin{lemma} \label{appropriate} There exists some universal constant $K \in (1,\infty)$ such that, for any $n,M \in \N$, \begin{equation*} |\Xi_A| \leq K^{n/M} \, . \end{equation*} \end{lemma} \begin{proof} For any fixed $y_1 \in \Z^d$ and $N\in\N_{0}$, let \begin{equation*} I(N)=\big|\big\{y_2 \in \Z^d \colon \|y_1-y_2\|_\infty -1 = N \big\}\big| \end{equation*} be the number of elements of $\Z^d$ on the boundary of the cube of size $2N+3$ centered at $y_1$. For any $N \in \N$, we have $I(N) = (2N+3)^d-(2N+1)^d$ and $I(0)=3^d-1$, therefore, for any $N \in \N_0$, \begin{equation} \label{IBd} I(N) \leq 3^d(N+1)^d. \end{equation} Define, for any $N,k \in \N$, \begin{equation} \label{2.1.02} I(N,k)=\left|\left\{(y_j)_{0 \leq j \leq k} \in (\Z^d)^{k+1}\colon \sum_{j=1}^k (\|y_j-y_{j-1}\|_\infty -1) = N\right\}\right| \end{equation} the number of sequences in $(\Z^d)^{k+1}$ having size $N$. By (\ref{IBd}) and (\ref{2.1.02}), we have \begin{eqnarray} \label{2.1.03} I(N,k) &=& \sum_{{(N_1, \ldots ,N_k)\colon}\atop{\sum_{i=1}^k N_i = N}} \left( \prod_{i=1}^k I(N_i)\right) \nonumber\\ &\leq& 3^{dk} \sum_{{(N_1, \ldots ,N_k)\colon}\atop{\sum_{i=1}^k N_i = N}} \left( \prod_{i=1}^k (N_i+1)\right)^d\nonumber\\ &\leq& 3^{dk} \binom{k+N+1}{N} 2^{dN} \, , \end{eqnarray} where, in the last line, we used that \begin{equation*} \max_{{(N_1, \ldots ,N_k)\colon}\atop{\sum_{i=1}^k N_i = N}} \prod_{i=1}^k (N_i+1) = 2^N. \end{equation*} Using (\ref{2.1.03}) and the fact that $\Xi \subset \Z^d$, we obtain \begin{eqnarray*} |\Xi_{A}| &\leq& \sum_{N=0}^{\left\lfloor \frac{n}{Md} \right\rfloor} I\left(N,\frac{n}{M}\right) \leq 3^{\frac{n}{M}} \sum_{N=0}^{\left\lfloor \frac{n}{Md} \right\rfloor} \binom{\frac{n}{M}+N+1}{N} 2^{dN}\\ &\leq & 3^{\frac{n}{M}} \sum_{N=0}^{\left\lfloor \frac{n}{Md} \right\rfloor} \binom{\frac{2n}{M}}{N} 2^{dN} \leq 3^{\frac{n}{M}} \sum_{N=0}^{\frac{2n}{M}} \binom{\frac{2n}{M}}{N} 2^{dN}\\ & = & 3^{\frac{n}{M}} (2^d+1)^{\frac{2n}{M}}\, , \end{eqnarray*} which ends the proof of the lemma. \qed \end{proof} \subsection{The bad environment set $B_E$} \label{S2.2} This section is devoted to the proof of the leftmost part of (\ref{th1.2}) for suitable set $B_E$ defined below. We say that an environment $\xi$ is \emph{good} w.r.t.\ an $M$-skeleton $(y_0, \cdots ,y_{\frac{n}{M}})$ if we have \begin{equation*} \label{2.2.01} \left| \left\{0 \leq j < \frac{n}{M}\colon \exists (x,jM) \in B_{y_j}^j \text{ s.t. } \xi_s(x)=0 \,\,\, \forall s \in [jM,jM+1]\right\}\right| \geq \frac{n}{4M} \, . \end{equation*} Since we want the environment to be good w.r.t.\ all appropriate $M$-skeleton, we define the \emph{bad environment set} as \begin{equation*} B_E = \{\exists \text{ an $M$-skeleton} \in \Xi_A \text{ s.t. } \xi \text{ is not good w.r.t.\ it}\}. \end{equation*} In the next lemma we prove that for any fixed $M$-skeleton, the probability that $\xi$ is not good w.r.t.\ it is at most exponentially small in $n/M$. \begin{lemma} \label{be.lem} Take $(y_i)_{0 \leq i \leq \frac{n}{M}} \in \Xi$ an $M$-skeleton. For $M$ big enough, we have \begin{equation*} \PP\left(\xi \text{ is not good w.r.t. } (y_i)_{0 \leq i \leq \frac{n}{M}}\right) \leq (4K)^{-n/M} \, , \end{equation*} where $K$ is the universal constant defined in Lemma \ref{appropriate}. \end{lemma} Therefore, combining Lemmas \ref{appropriate} and \ref{be.lem}, we get \begin{equation*} \PP(B_E) \leq 4^{-n/M} \end{equation*} for $M$ big enough, from which we obtain the leftmost part of (\ref{th1.2}). Before proving Lemma \ref{be.lem}, we first give an auxiliary lemma. In order to study the evolution of the VM, we consider, as usual, the dual process, namely, a coalescing random system that evolves backward in time. To that aim, define $(X^{x,t}(s))_{0\leq s\leq t}$ to be the random walk starting from $0$ at time $t$, (i.e., $X^{x,t}(0)=0$). From the graphical representation of the VM, we can write $\xi_{t}(x)=\xi_{0}(X^{x,t}(t))$, $x\in\Z^d$, and therefore the the VM process can be expressed in terms of its initial configuration and a system of coalescing random walks. Two random walks $X^{x,s}$ and $X^{x^\prime,s^\prime}$ with $s^\prime<s$ meet if there exists $u\leq s^\prime$ such that $X^{x^\prime,s^\prime}(s^\prime-u)=X^{x,s}(s-u)$. It is therefore the same to say that $X^{x,s}$ and $X^{x^\prime,s^\prime}$ with $s^\prime<s$ meet (in some appropriate time interval) if there exists $t\geq 0$ in this interval (by letting $t= s^\prime-u$) such that \begin{equation*} X^{x^\prime,s^\prime}(t)=X^{x,s}(t+s-s^\prime)\, . \end{equation*} For convenience we will adopt this notation in the rest of the section. \begin{lemma} \label{2.2.lem1} Take two independent random walks $X^{x,s}$ and $X^{x^\prime,s^\prime}$ with $s^\prime<s$. Then the probability they ever meet is bounded above by \begin{equation*} \int_{s-s^\prime}^\infty p_t(0,0)\, dt \, . \end{equation*} \end{lemma} \begin{proof} Consider the random variable \begin{equation*} W=\int_0^\infty 1 \left\{X^{x,s}(t)=X^{x^\prime,s^\prime}(t+(s-s^\prime))\right\} \, dt\, . \end{equation*} By symmetry, its expectation satisfies \begin{eqnarray*} \EE(W) &=& \int_0^\infty \PP\left(X^{0,0}(2t+s-s^\prime)=x-x^\prime\right) \, dt\\ &\leq& \int_0^\infty \PP\left(X^{0,0}(2t+s-s^\prime)=0\right) \, dt\\ &=& \frac{1}{2} \int_{s-s^\prime}^\infty p_t(0,0) \, dt \, . \end{eqnarray*} Moreover, we have \begin{equation*} \EE(W \,|\, W>0) = \int_0^\infty p_{2t}(0,0)\, dt \geq \frac{1}{2} \end{equation*} and then, since $\EE(W)=\EE(W \,|\, W>0)\, \PP(W>0)$, it follows that \begin{equation*} \PP(W>0) \leq 2\EE(W)=\int_{s-s^\prime}^\infty p_t(0,0) \, dt \, . \end{equation*} \qed \end{proof} We are now ready to prove the Lemma \ref{be.lem}. \begin{proof} Recall that $\xi_s(x)=\xi_0(X^{x,s}(s))$, where $\xi_0$ is distributed by a product Bernoulli law with density $\rho\in(0,1)$. We first consider any $M$-skeleton $(y_0, \ldots ,y_{n/M}) \in \Xi$ (even not appropriate). For each $0 \leq j < n/M$, we choose $R$ sites $(x_1^j,jM), \ldots ,$ $(x_R^j,jM) \in B_{y_j}^j$ such that \begin{equation} \label{cond2} \PP\left(\exists 0 \leq k,k^\prime \leq R,\,\,\, k\neq k^\prime\colon X^{x_k^j,jM}(s)=X^{x_{k^\prime}^j,jM}(s) \text{ for some } s \in [0,jM]\right) \leq \epsilon \end{equation} for $\epsilon \ll 1$ to be specified later (see Fig.\ \ref{fig-x_k}). Remark that we first fix $\epsilon$ and $R$ and then we choose $M$ large enough so we can find these $R$ sites. As we are in the strongly transient regime, we know that these points exist. If two such random walks hit each other, then we freeze all the random walks issuing from the corresponding block $j$. \begin{figure} \caption{\small Illustration of sites $(x_k^j,jM)$, $k\in\{1,\ldots,R\} \label{fig-x_k} \end{figure} For any $1 \leq j < \frac{n}{M}$, $1 \leq k \leq R$ for some $R>0$, we have \begin{eqnarray*} &&\EE\left(\sum_{j^\prime=j+1}^{\frac{n}{M}-1} \,\, \sum_{k^\prime=1}^R 1\left\{X^{x_{k^\prime}^{j^\prime},j^\prime M}(s+(j^\prime-j)M) = X^{x_k^j,jM}(s) \text{ for some } s \in [0,jM]\right\}\right)\\ &&\qquad \leq R \sum_{j^\prime=j+1}^{\frac{n}{M}-1} \int_{(j^\prime-j)M}^\infty p_t(0,0) \, dt\nonumber\\ &&\qquad \leq R \int_M^\infty\frac{t}{M}\,p_t(0,0) \, dt \, , \end{eqnarray*} and therefore, summing over $1\leq k\leq R$, we get \begin{eqnarray} \label{2.2.08} &&\EE\left(\sum_{j^\prime=j+1}^{\frac{n}{M}-1} \,\, \sum_{k,k^\prime=1}^R 1 \left\{X^{x_{k^\prime}^{j^\prime},j^\prime M}(s+(j^\prime-j)M) = X^{x_k^j,jM}(s) \text{ for some } s \in [0,jM]\right\}\right)\nonumber\\ &&\qquad \leq \frac{R^2}{M} \int_M^\infty t \,p_t(0,0) \, dt \leq \epsilon^2, \end{eqnarray} for $M$ sufficiently large. Again, remark that we first fix $\epsilon$ and $R$, then we choose $M$ large enough. For each $j$, we now define the filtration \begin{equation*} \label{2.2.10} \mathcal{F}^j_t = \sigma\left(X^{x_k^j,jM}(s)\colon 0 \leq s \leq t,\,\, 1\leq k \leq R\right) \end{equation*} and the sub-martingale $Z^j(t):=$ \begin{equation*} \EE\left(\sum_{j^\prime=j+1}^{\frac{n}{M}-1} \,\, \sum_{k,k^\prime=1}^R 1\left\{X^{x_{k^\prime}^{j^\prime},j^\prime M}(s+(j^\prime-j)) = X^{x_k^j,jM}(s) \text{ for some } s\in [0,t] \right\}\,\,\,\bigg|\,\,\, \mathcal{F}_t^j\right) \, , \end{equation*} with the stopping time \begin{equation*} \tau^j=\inf\left\{t \geq 0\colon Z^j(t)>\epsilon \right\} \, . \end{equation*} We freeze every random walk issuing from block $j$ at time $jM \wedge \tau^j$. Using (\ref{2.2.08}) and the Doob's inequality, we can see that \begin{equation} \label{2.2.12} \PP(\tau^j \leq jM)\leq \PP\left(\sup_{0 \leq t \leq jM} Z^j_t \geq \epsilon\right) \leq \frac{R^2}{M\epsilon} \, \int_M^\infty t p_t(0,0) \, dt \leq \epsilon \, . \end{equation} Since, $Z^j$ is a continuous sub-martingale except at jump times of one of the random walks $X^{x_k^j,jM}$ and when a jump occurs, the increment is at most \begin{equation*} R \sum_{j^\prime=j+1}^{\frac{n}{M}-1} p_{(j^\prime-j)M}(0,0) \leq \epsilon^2 \end{equation*} if $M$ is big enough. Therefore, for all $0 \leq t \leq \tau^j$, we get \begin{equation} \label{2.2.15} Z^j(t) < \epsilon+\epsilon^2 \leq 2\epsilon \qquad \PP\text{-a.s.}\, . \end{equation} Now we say that $j$ is \emph{good} if \begin{itemize} \item $\tau^j> jM$; \item the $R$ random walks $X^{x_1^j,jM}, \cdots ,X^{x_R^j,jM}$ do not meet; \item the random walks $X^{x_k^j,jM}$ do not hit any point $x_{k^\prime}^{j^\prime}$ during interval $[(j-j^\prime )M-1,(j-j^\prime )M]$ for $j^\prime < j$; \item the random walks $X^{x_k^j,jM}$ do not meet $X^{x_{k^\prime}^{j^\prime},j^\prime M}$ for $j^\prime < j$. \end{itemize} By (\ref{2.2.12}), we know that the probability that the first condition does not occur is smaller than $\epsilon$. By definition of the sites $x_1^j, \ldots,x_R^j$, we know that the probability that random walks issuing from the same block $j$ at sites $x_{k}^j$, $k=1,\ldots,R$, hit each other is smaller than $\epsilon$ (recall (\ref{cond2})). Moreover, the probability that the third condition does not occur is bounded from above by \begin{equation*} R \sum_{j^\prime=1}^{j-1} \, \int_{(j-j^\prime)M-1}^{(j-j^\prime)M} p_t(0,0) \, dt \end{equation*} which is as small as we want for $M$ large because we are in a transient case. We still have to compute the probability that the fourth condition does not occur. Furthermore, we can see that two random walks issuing from the same block evolve independently until they meet, provided they do not meet a previous random walk. From the above consideration, we get \begin{eqnarray*} \PP\left(j \text{ is not good}\,\,\big|\,\,\mathcal{G}^{j-1}\right) &\leq& 3\epsilon + \sum_{j^\prime=1}^{j-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M} \,\,\big|\,\, \mathcal{G}^{j^\prime}\right) \, , \end{eqnarray*} where \begin{equation*} \label{2.2.16} \mathcal{G}^j= \sigma\left(X^{x_k^{j^\prime},j^\prime M}(s)\colon 1\leq k \leq R,\, 1\leq j^\prime \leq j,\, 0 \leq s \leq j^\prime M \wedge \tau^{j^\prime}\right)\, . \end{equation*} Here, we recall that $X^{x_k^j,jM}$ meets $X^{x_{k^\prime}^{j^\prime},j^\prime M}$ (with $j^\prime<j$) if we have \begin{equation*} X^{x_{k}^{j},j M}(s+(j-j^\prime)M) = X^{x_k^{j^\prime},j^\prime M}(s) \text{ for some } s\in \big[0,\tau^{j^\prime} \wedge j^\prime M\big] \, . \end{equation*} By (\ref{2.2.15}), we have, for all $j^\prime$ fixed, \begin{equation*} \sum_{j=j^\prime+1}^{\frac{n}{M}-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M}\,\,\,\bigg|\,\,\, \mathcal{G}^{j^\prime}\right) \leq 2\epsilon \, . \end{equation*} Summing over all $1\leq j^\prime\leq n/M-2$, we get \begin{equation*} \sum_{j^\prime=1}^{\frac{n}{M}-2} \,\, \sum_{j=j^\prime+1}^{\frac{n}{M}-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M}\,\,\,\bigg|\,\,\, \mathcal{G}^{j^\prime}\right) \leq \frac{n}{M} \, 2\epsilon \, , \end{equation*} and then, interchanging the sums, we arrive at \begin{equation*} \sum_{j=2}^{\frac{n}{M}-1} \,\, \sum_{j^\prime=1}^{j-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M}\,\,\,\bigg|\,\,\, \mathcal{G}^{j^\prime}\right) \leq \frac{n}{M} \, 2\epsilon \, . \end{equation*} Thus, there are at most $\left\lfloor \frac{n}{2M} \right\rfloor$ random positions $j$ with the property \begin{equation*} \sum_{j^\prime=1}^{j-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M}\,\,\,\bigg|\,\,\, \mathcal{G}^{j^\prime}\right) \geq 4\epsilon \, , \end{equation*} and so at least $\left\lceil \frac{n}{2M} \right\rceil-2$ random positions $j$ have the property \begin{equation*} \sum_{j^\prime=1}^{j-1} \,\, \sum_{k,k^\prime=1}^R \PP\left(X^{x_k^j,jM} \text{ meets } X^{x_{k^\prime}^{j^\prime},j^\prime M}\,\,\,\bigg|\,\,\, \mathcal{G}^{j^\prime}\right) < 4\epsilon \, . \end{equation*} For these random positions $j$, we then have \begin{equation*} \PP\big(j \text{ is good } |\,\, \mathcal{G}^{j-1}\big) \geq 1-7\epsilon. \end{equation*} Using an elementary coupling, we have at least $\frac{n}{3M}$ positions that are good with probability bounded by \begin{equation*} \PP\left(Y \geq \frac{n}{3M}\right) \geq 1-e^{-c(\epsilon)n/M} \end{equation*} for \begin{equation*} Y \sim B\left(\left\lceil \frac{n}{2M} \right\rceil-2,1-7\epsilon\right) \end{equation*} and $c(\epsilon) \to \infty$ as $\epsilon \to 0$. Therefore, outside of a small probability $e^{-c(\epsilon)n/M}$, for at least $\frac{n}{3M}$ positions $j$, we have that the random walks $X^{x_k^{j},j M}$ are disjoint and so the values $\xi_{0}(X^{x_k^{j},j M}(s))$ are independent until time $s\leq j M$. Then, using the fact that $(\xi_0(x))_{x \in \Z^d}$ are i.i.d.\ Bernoulli product with parameter $\rho$, we have that the number of positions $j$ so that there exists $(x,j M) \in B^{j}_{y_{j}}$ with $\xi_s(x)=0$ and $s\in[jM, jM+1]$ is at least $\frac{n}{4M}$ outside the probability \begin{equation*} \PP\left(Y^\prime \geq \frac{n}{12M}\right) \leq \left(\frac{1}{4K}\right)^{n/M} \end{equation*} with $Y^\prime \sim B\left(\frac{n}{3M},\left((1-e^{-1}(1-\rho))^R\right)\right)$, where $\epsilon$ and $R$ are chosen small and large enough, respectively. \qed \end{proof} The proof of the leftmost part of (\ref{th1.2}) is now completed. \subsection{The bad random walk set $B_W$} \label{S2.3} This section is devoted to the proof of the rightmost part of (\ref{th1.2}). We are now interested in the random walk $X^\kappa$. We are going to prove that $(X^\kappa(s))_{0\leq s\leq n}$ has an appropriate $M$-skeleton and touches enough zeros outside a probability event exponentially small in $n$ (see Lemmas \ref{bw1} and \ref{bw2} below, respectively). To define the bad random set $B_{W}$ announced in (\ref{}), we are going to define $B_{W}=B_{W_{1}}\cup B_{W_{2}}$, where the bad sets $B_{W_{1}}$ and $B_{W_{2}}$ correspond, respectively, to random walks trajectories $X^\kappa$ which do not have appropriate $M$-skeleton and do not touch enough sites occupied by a zero configuration of the VM. To be more precise, define \begin{equation*} B_{W_{1}}=\big\{(\Xi(X^\kappa) \notin \Xi_A\big\}\, . \end{equation*} In the next lemma, we prove that the probability of $B_{W_{1}}$ is exponentially small in $n$. \begin{lemma} \label{bw1} Take $(X^\kappa(s))_{0 \leq s \leq n}$ and $\Xi(X^\kappa)=(y_0, \cdots ,y_{n/M})$ the associated $M$-skeleton. Then, there exists a constant $K^\prime$ not depending on $n$ such that \begin{equation*} \label{2.3.10} \PP(B_{W_{1}})\leq e^{-K^\prime n} \, . \end{equation*} \end{lemma} \begin{proof} In order to have the random walk moving from one block of the skeleton to a nonadjacent one, the random walk has to make at least $M$ steps in the same direction. Keeping that in mind, define \begin{equation*} Y_j(s) = X^\kappa(jM+s)-X^\kappa(jM) \end{equation*} and let \begin{equation*} \tau_1^j = \inf\{s\colon \|Y_j(s)\|_\infty \geq M\}, \qquad \tau_i^j = \inf\big\{s > \tau_{i-1}^j\colon \|Y_j(s)-Y_j(\tau_{i-1}^j)\|_\infty \geq M\big\}. \end{equation*} Next, define \begin{equation*} W_i^j = \boldsymbol{1}_{\{\tau^i_j < M\}} \end{equation*} and use an elementary coupling to have \begin{equation*} \PP(W_1^j=1) \leq e^{-c/M} \quad \text{and} \quad \PP(W_i^j=1|W_{i-1}^j=1) \leq e^{-c(i)/M} \leq e^{-c/M} \end{equation*} for some constants $c$ and $c(i)$ which verify $c(i) \geq c$. Therefore, we have that the number of jumps for the $j$th block is bounded above by the number of $W_i^j$ equals to 1. Using a coupling we can see that this is bounded above by a geometric law with parameter $e^{-c/M}$. Now if we consider all the blocks, by elementary properties of geometric random variables, we have \begin{equation*} \PP(B_{W_1}) \leq \PP\left(Y \geq \frac{n}{Md}\right) \leq e^{-c^\prime n} \end{equation*} for $Y \sim B\left(\frac{n}{M}\left(1+\frac{1}{d}\right),e^{-c/M}\right)$, some constant $c^\prime>0$, $M$ being large and the proof is done. \qed \end{proof} Lemma \ref{bw1} proves the first part of the rightmost part of (\ref{th1.2}), namely the part concerned with bad set $B_{W_{1}}$. Now we look at the number of times $X^\kappa$ stays on a site where the VM has zero value. For that, define \begin{equation*} \tau_{i+1}=\inf\left\{t>\tau_i+1\colon \exists x \in \Z^d \text{ s.t. } \|x-X^\kappa(t)\|_\infty \leq 2M,\, \xi_s(x)=0 \,\,\, \forall s \in [t,t+1] \right\} \end{equation*} with $\tau_0=0$ and \begin{equation*} k(M)=e^{-1/2}\inf_{\|x\|_\infty \leq 2M}\PP\big(X^\kappa(1/2)=x\,\,|\,\,X^\kappa(0)=0\big) \end{equation*} (remark that $k(M)$ does not depend on $n$ and is strictly positive). Finally, we define \begin{equation*} \label{3.4.1} B_{W_2}=\left\{(X^\kappa,\xi) \colon \tau_{\left\lfloor\frac{n}{2M}\right\rfloor} \leq n-1 \text{ and } \int_0^n \xi_s(X^\kappa(s))\, ds \geq n \left(1-\frac{k(M)}{8M}\right)\right\} \end{equation*} as being the bad set corresponding to random walks trajectories $X^\kappa$ which do not touch enough sites occupied by a zero configuration of the VM. In the next lemma, we prove that such a set has an exponentially small probability in $n$. \begin{lemma} \label{bw2} There exists a constant $\delta > 0$ not depending on $n$, such that, for $M$ big enough we have \begin{equation*} \PP(B_{W_2}) \leq e^{-n\delta}\, . \end{equation*} \end{lemma} \begin{proof} Take any realization of $\xi$ and for each time $\tau_i$, define a random variable $Y_i$ which take value $1$ if $X^\kappa$ reaches a site with value zero at time $\tau_i+\frac{1}{2}$ and stays at that point until time $\tau_i+1$, and takes value $0$ otherwise. Remark that after having fixed $n$, we can choose the state of $\xi_{t}$, $t>n$, as we want, for example, full of zeros. Continue until $\tau_{\left\lfloor n/(2M) \right\rfloor}$ which is finite if $\xi$ is well chosen after time $n$. Using the strong Markov property, for every $k_i \in \{0,1\}$, we see that \begin{equation*} \PP(Y_i=1|Y_j=k_j, j<i) = \PP(Y_i=1|Y_{i-1}=k_{i-1}) \geq k(M) \, . \end{equation*} Then, it follows that $Y:=\sum_{i=1}^{\left\lfloor n/(2M) \right\rfloor} Y_i$ is stochastically greater than $Y^\prime$ the binomial random variable $B\left(\frac{n}{2M},k(M)\right)$. Moreover, if $\tau_{\left\lfloor n/(2M) \right\rfloor}\leq n-1$, we have \begin{equation*} \int_0^n \xi_s(X(s))\,ds \leq n-\frac{1}{2} \, Y \, . \end{equation*} Hence, we get \begin{eqnarray*} \PP(B_{W_2}) &\leq& \PP\left(\tau_{\left\lfloor \frac{n}{2M} \right\rfloor} \leq n-1 \,\,\text{ and }\,\, n-\frac{1}{2}\,Y \geq n-\frac{nk(M)}{8M}\right)\\ &\leq& \PP\left(n-\frac{Y}{2} \geq n -\frac{nk(M)}{8M}\right)\nonumber\\ &=& \PP\left(Y \leq \frac{nk(M)}{4M}\right)\nonumber\\ &\leq& e^{-cn} \end{eqnarray*} for $n$ sufficiently large and $c$, a positive constant not depending on $n$. This result being shown for any realization of $\xi$ (up to time $n$), this ends the proof. \qed \end{proof} Lemma \ref{bw2} proves the second part of the rightmost part of (\ref{th1.2}), namely the part concerned with bad set $B_{W_{2}}$. To complete the proof of (\ref{th1.3}), it suffices to use the definition of $B_E$ and $B_W=B_{W_1} \cup B_{W_2}$, and to remark that if $B_E$ and $B_{W_1}$ do not occur, then the first condition of $B_{W_2}$, namely $\tau_{\lfloor n/2M \rfloor}\leq n-1$, is satisfied and therefore the second must be violated. \subsection{Proof of Theorem \ref{th0}} \label{S2.4} The proof of Theorem \ref{th0} can be deduced from the proof of Theorem \ref{th1}. Without assuming that $p(\cdot,\cdot)$ is symmetric, it is enough to see that \begin{itemize} \item $\displaystyle\int_{0}^{\infty} t p_{t}(0,0)\, dt<\infty$, by local CLT, and \item there is enough symmetry because there exists some $C>0$ such that $p_{t}(x,0)\leq C p_{t}(0,0)$ for all $x\in\Z^d$ and $t\in[0,\infty)$. Therefore, Lemma \ref{be.lem} can still be applied. \end{itemize} From these two observations, the proof of Theorem \ref{th0} goes through the same lines as the one of Theorem \ref{th1}. \section{Proof of Theorem \ref{th2}} \label{S3} In this section we consider the Lyapunov exponents when the random walk kernel associated to the voter model noise is symmetric and also not strongly transient, that is \begin{equation*} \int_{0}^{\infty} t p_{t} (0,0)\, dt = \infty \, . \end{equation*} We want to show that when $p(\cdot,\cdot)$ is symmetric and not strongly transient, then \begin{equation*} \lambda_{p} (\kappa) \equiv \gamma \qquad \forall \kappa \in (0, \infty),\,\, \forall p \geq 1 \, . \end{equation*} Since the result is easily seen for recurrent random walks, we can and will assume in the following that \begin{equation*} \int_{0}^{\infty} p_{t} (0,0)\, dt < \infty \, . \end{equation*} Given the reasoning of \cite{garholmai07}, Section 3.1 and \cite{garholmai10}, Section 5.1 this result will follow from Proposition $\ref{prop2}$, below. Consider, in the graphical representation associated to the VM $\xi$, \begin{equation*} \chi(t) : = \text{ number of distinct coalescing random walks produced on } \{0\} \times [0,t] \end{equation*} (This quantity is discussed in Bramson, Cox and Griffeath \cite{bramcoxgri88}). \begin{proposition} \label{prop2} Assume that $p(\cdot,\cdot)$, is symmetric and not strongly transient, then for any $\epsilon > 0$, we have that \begin{equation*} \lim_{t \to \infty} \PP \big(\chi(t) \leq \epsilon t\big) = 1 \, . \end{equation*} \end{proposition} Before proving Proposition \ref{prop2}, we will first give the proof of Theorem \ref{th2}. \begin{proof} From the graphical representation of the VM and Proposition \ref{prop2}, we can see that for all $\delta > 0$ and $M < \infty$, \begin{equation*} \PP\Big(\xi (x, s) = 1 \,\,\, \forall \|x \|_{\infty} \leq M, \,\, \forall s \in [0, t]\Big) \geq e^{-\delta t} \end{equation*} for all $t$ sufficiently large (see \cite{garholmai10}, proof of Lemma 5.1). Thus, just as in $\cite{garholmai10}$, Section 5.1, we have for all $p \geq 1$, \begin{eqnarray*} &&\EE ([u(0, t)]^{p})\\ &&\qquad\geq e^{\gamma p t} \PP\Big(\|X^\kappa(s) \|_{\infty} < M \,\,\, \forall s\in [0,t]\Big)^{p}\, \PP\Big(\xi (x, s) = 1 \,\,\, \forall \| x \|_{\infty} < M,\, \forall s\in [0,t]\Big)\\ &&\qquad\geq e^{t (\gamma p - \delta -c(M) p)} \end{eqnarray*} for $c(M)\to 0$ as $M\to \infty$. From this, it is immediate that \begin{equation*} \lim_{t\to\infty}\frac{1}{t} \log \EE([u(0, t)]^{p})^{1/p} = \gamma. \end{equation*} \qed \end{proof} To prove Proposition \ref{prop2}, we consider the following system of coalescing random walks \begin{equation*} I= \{X^{t} \colon t \in \cP \} \end{equation*} for $\cP$ a two sided, rate one Poisson process and $X^{t}$ a random walk defined on $s \in [t, \infty)$, starting at $0$ at time $t$ (we could equally well consider a system of random walks indexed by $h \Z$ for some constant $h$). The coalescence is such that for $t < t^\prime \in \cP$, $X^{t}$, $X^{t^\prime}$ evolve independently until $T^{t, t^\prime}= \inf\{s > t^\prime \colon X^{t}(s) = X^{t^\prime}(s)\}$, and then, for $s \geq T^{t, t^\prime}$, $X^{t}(s) = X^{t^\prime}(s)$. We will be interested in the density or number of distinct random walks at certain times. To aid this line we will adopt a labelling procedure for the random walks, whereby effectively when two random walks meet for the first time, one of them (chosen at random) dies; in this optic the number of distinct random walks will be the number still alive. Our labeling scheme involves defining for each $t\in \cP, $ the label process $l_{s}^{t}$ for $s \geq t-$ (it will be helpful to be able to define $l_{t-}^{t} = t$, though since at time $t$ there may well be other random walks present at the origin, it will not necessarily be the case that $ l_{t}^{t} = t$). These processes will be defined by the following properties: \begin{itemize} \item if for $t \ne t^\prime \in \cP, X^t(s) \ne X^{t^\prime}(s)$, then $l^{t }_s \ne l^{t^\prime}_{s}$; \item if $t_{1}, t_{2} , \ldots,t_{r} $ are elements of $\cP$, then at $s \geq \max \{ t_{1}, \ldots, t_{r} \}$, if $X^{t_{1}}(s) =X^{t_{2}}(s) = \cdots= X^{t_{r}}(s)$, then $l^{t_{1 } }_{s}=l^{t_{2}}_{s} = \cdots =l^{t_{r}}_{s} =u $ for some $ u \in \cP$ with $ X^{t_{1}}(s) =X^{u}(s)$; \item if for $t \ne t^\prime \in \cP$, $X^t $ meets $X^{t^\prime}$ for the first time at $s$, then independently of past and future random walks or labeling decisions $l^{t }_s = l^{t^\prime}_s = l^{t^\prime}_{s-}$ with probability $\frac12 $ and with equal probability $l^{t }_s = l^{t^\prime}_s = l^{t}_{s-}$; \item the process $l^t_{s}$ can only change at moments where $X^t $ meets a distinct random walk for the first time. \end{itemize} For $t \in \cP$, $s > t$, we say that $t$ is \emph{alive at time} $s$, if $l^t_s = t$; it \emph{dies} at time $s$ if $l_{s-}^{t} = t, l_{s}^{t} \ne t$. We say $X^t$, $X^u$ \emph{coalesce} at time $s$ if this is the first time at which the two labels are equal. The following are easily seen: \begin{itemize} \item the events $A_{s}^{t} = \{l_{s}^{t} = t \}$ for $t \in \cP$ are decreasing in $s$; \item $A_{s}^{t}$ depends only on the random motions of the coalescing random walks and on the labeling choices involving $X^{t}$; \item for $s > 0, $ the number of independent random walks $X^{t}(s) $, $t \in \cP \cap [-n, 0]$ is simply equal to the number of distinct labels $l_{s}^{t}$, $t \in \cP \cap [-n, 0]$. \end{itemize} Let \begin{equation} \label{Atslim} c_0 = \lim_{s \to \infty} \PP^{t} (A_{s}^{t}) \in [0,1], \end{equation} according to palm measure, $\PP^{t}$, for $t \in \cP$. We obtain easily: \begin{proposition} \label{lem3} \begin{equation*} \lim_{s\to\infty}\frac{1}{s} \big| \big\{ \text{distinct random walks } X^{t}(0) \colon t \in \cP \cap [-s,0) \big\}\big| = c_0\qquad\text{a.s.} \end{equation*} \end{proposition} \begin{proof} Using the definition of $c_0$ in (\ref{Atslim}) and ergodicity of the system we see that the limit is greater than $c_0$. Then, Lemma \ref{prop4} gives the result. \end{proof} \begin{lemma} \label{prop4} For $c_0$ as defined in (\ref{Atslim}), for each $\epsilon > 0$, there exists $R < \infty$ so that if we consider the finite system of coalescing random walks ${(X^{t})}_{t \in (-R, 0] \cap \cP}$, then with probability at least $ 1-\epsilon$ at time $R$ there are less than $(c_0 + \epsilon)R$ distinct random walks labels. \end{lemma} \begin{proof} By definition of $c_0$, for all $\epsilon>0$ there exists a $T_{0}$ so that \begin{equation*} \PP^{0} \big(\text{ label } 0 \text{ is alive at time } s\big) < c_0 + \frac{\epsilon}{100} \qquad \forall s \geq T_{0}. \end{equation*} Now pick $R_{1}$ so that \begin{equation*} \PP \big( \|X^{0}(s)\|_{\infty} \leq R_{1} \,\,\, \forall s\in(0,T_{0})\big) \geq 1- \frac{\epsilon}{100}. \end{equation*} Therefore, \begin{eqnarray*} &&\PP^{0} \big(\text{ label } 0 \text{ is not alive at time } s\geq T_{0} \text{ and } \|X^{0}(s)\|_{\infty} \leq R_{1} \,\,\, \forall s\in(0,T_{0})\big) \\ &&\qquad\geq 1 - c_{0} - \frac{2\epsilon}{100}\, . \end{eqnarray*} We then pick $T_{1}$ so that \begin{equation*} \PP\big(\exists t \in \cP \cap {[-T_{1}, T_{1}]}^{\c} \colon \big\|X^{t}(s)\big\|_{\infty} \leq R_{1}\,\,\, \text{for some } s \in (0, T_{0})\big) < \frac{\epsilon}{100}. \end{equation*} Thus \begin{equation*} \PP^0\big(\exists t \in \cP \cap [-T_{1}, T_{1}] \setminus \{0\} \colon l_{R_1}^{0}=t\big) \geq 1-c_0- \frac{\epsilon}{30}. \end{equation*} From the translation invariant property of the system and ergodicity if \begin{equation*} \lambda_{s} := \Big|\Big\{t \in [-s,s] \cap \cP\colon X^{t} \text{ loses its label to a random walk } X^{t^\prime} \text{ with } |t-t^\prime| \leq T_{1}\Big\}\Big|, \end{equation*} then \begin{equation*} \liminf_{s \rightarrow \infty} \frac{\lambda_{s}}{2 s} \geq 1-c_0-\frac{\epsilon}{30}\qquad \text{a.s.} \end{equation*} The result now follows easily. \qed \end{proof} Proposition \ref{prop2} will be proven by showing: \begin{proposition} \label{prop3} If $p(\cdot,\cdot)$ is symmetric and not strongly transient, then $c_0 =0$. \end{proposition} The proof of Proposition \ref{prop3} will work for any Poisson process rate, in particular for $\cP$ having rate $M \gg 1$. The distinct random walks treated in Proposition \ref{prop2} can be divided into those coalesced with a random walk from the system derived from $\cP$ (and so by Proposition \ref{prop3} of small ``density'') and those uncoalesced (also of small ``density'' if $M$ is large). Thus Proposition \ref{prop2} follows almost immediately from Proposition \ref{prop3}. The argument for Proposition \ref{prop3} is low level and intuitive. We argue by contradiction and suppose that $c_0 > 0$. From this we can deduce, loosely speaking, that after a certain time either a random walk has lost its original label, or it will keep it forever. We then introduce coupling on these random walks so that we may regard these random walks as essentially independent random walks starting at $0$ (at different times). We then introduce convenient comparison systems so that we can analyze subsequent coalescences. We will use automatically, without reference, the following ``obvious" result: \begin{lemma} Consider two collections of coalescing random walks $\{Y^i\} $ and $\{(Y^\prime)^i\}$ for $i$ in some index set. If the coalescence rule is weaker for the $\{(Y^\prime)^i\}$ system, in that if two walks $(Y^\prime)^i$ and $(Y^\prime)^j$ are permitted to coalesce at time $t$, then so are $Y^{i }$ and $Y^{j}$, then there is a coupling of the two systems so that the weaker contains the stronger. \end{lemma} We now fix $\epsilon > 0$ so that $\epsilon \ll c_0$ (by hypothesis $c_0 >0)$. We choose $R$ according to Lemma $\ref{prop4}$ and divide up time into intervals $I_{j} = [jR, (j+1)R)$. We first consider the coalescing system where random walks $X^{t}$, $X^{t^\prime}$, $t, t^\prime \in \cP$, can only ``coalesce'' (or destroy a label $t$ or $t^\prime$) if $t, t^\prime$ are in the same $I_{j}$ interval. Thus we have a system of random walks that is invariant to time shifts by integer multiples of $R$. We now introduce a system of random walks $Y^{t}$, $t \in V := \cup_j \{ [jR, (j+1)R) \cap jR+\frac{1}{c_0} \Z \}$. The random walks $Y^{t}$, $t \in \big[jR,(j+1)R\big)$ are not permitted to coalesce up until time $(j+1)R$ (at least) and will evolve independently of the system ${(X^{t})}_{t \in \cP}$ until time $(j+1)R$. We will match up the points in $V \cap I_j$, with those in $\cP \cap I_{j}\cap K$ in a maximal measurable way for $K = \{t \in \cP \colon \text{label } t \text{ survives to time } (j+1)R\}$. \begin{lemma} \label{lem8} Unmatched points in $\cup_{j} \cP \cap I_{j} \cap K$ and in $V$ have density less than $2 \epsilon$ for $R$ fixed sufficiently large. \end{lemma} Remark: the system is not translation invariant with respect to all shifts but it possesses enough invariance for us to speak of densities. We similarly have \begin{lemma} Unmatched $Y$ particles have density less than $2\epsilon$ for $R$ fixed sufficiently large. \end{lemma} It is elementary that two random walks $X$, $Z$ can be coupled so that for $t$ sufficiently large $X(t) = Z(t)$. For given $\epsilon > 0$ we choose $M_{0}$ and then $M_{1}$ so that \begin{equation} \label{M0def} \PP\Big(\sup_{t \leq R} \|X(t)\|_{\infty} \geq M_{0} \Big) < \frac{\epsilon}{10} \end{equation} and \begin{equation} \label{M1def} \sup_{|z|\leq 2 M_{0}} \PP \Big(X_0, Z_{z} \text{ not coupled by time } M_{1}\Big) < \frac{\epsilon}{10}, \end{equation} where $X_0$ and $Z_{z}$ denote that the random walks $X$ and $Z$ start at point $0$ and point $z$ respectively. We then (on interval $I_{j}$) couple systems $Y$ and $X$ by letting married pairs $Y^{t}$, $X^{t^\prime}$, $t \in V$, $t^\prime \in \cP \cap K$, evolve independently of other $Y$, $X$ random walks so that they couple by time $M_{1} + (j+1) R$ with probability at least $1- \frac{3\epsilon}{10}$. Thus we have two types of random walk labels, $l^t$, for the $X$ system which are equal to $t$ at time $t+R+M_{1}$: those for which the associated random walk was paired with a $Y$ random walk and such that the random walks have coupled by time $t+R+M_{1}$ said to be \emph{coupled} and the others, said to be \emph{decoupled}. Similarly for the points in $V$ associated to $Y$ random walks. We note that the foregoing implies that the density of uncoupled labels is bounded by $2\epsilon+\frac{3\epsilon}{10} \leq 3 \epsilon$. The point is that modulo this small density, we have an identification of the coalescing $X$ random walks and the $Y$ random walks. We now try to show that enough $Y$ particles will coalesce in a subsequent time interval to imply that there will be a significant decrease in surviving labels for the $X$ system. To do this we must bear in mind that, essentially, it will be sufficient to show a decrease in the density of $Y$ random walk labels definitely greater than $\epsilon$. Secondly, as already noted, we will adopt a coalescence scheme that is a little complicated namely $Y^{i}, Y^{i^\prime}$ in $V$ can only ``coalesce'' at time $t \geq \max\{i, i^\prime\}$ if \begin{itemize} \item $(t-i^\prime, t-i)$ are in some time set to be specified; \item for $i<i^\prime$, $\frac{t-i^\prime}{i^\prime-i} \in \left(\frac{9}{10}, \frac{11}{10} \right)$. \end{itemize} We now begin to specify our coalescence rules for the random walk system $\{Y^{i}\}_{i \in V}$. The objective here will be to facilitate the necessary calculations. A first objective is to have coalescence of $Y^{i}, Y^{i^\prime}$ at times $t > \max\{i, i^\prime\}$ so that $p_{ t}(0,0)$ is well behaved around $t-i$, $t-i^\prime$. It follows from symmetry of the random walk that $t \mapsto p_{t}(0,0)$ is decreasing. The problem we address is that it is not immediate how to achieve bounds in the opposite direction. This is the purpose of the next result. \begin{lemma} \label{lem5} Consider positive $\{a_{n}\}_{n\geq 0}$ so that $\sum_{n=0}^{\infty} a_{n} = \infty$. For all $r \in \Z_{+}$, there exists a subsequence $\{a_{n_{i}}\}_{i\geq 0}$ so that\\ (i) $\sum a_{n_{i}} = \infty$;\\ (ii) $a_{n_{i}} > \frac{1}{2} a_{n_{i}-r}$, $\forall i\geq 0$. \end{lemma} \begin{proof} If $r > 1$ we may consider the $r$ subsequences ${\{a_{ri +j}\}}_{i\geq 0}$ for $j \in \{0, 1, \cdots, r-1\}$. At least one of these must satisfy $\sum {a_{ri+j}} = \infty$ so, without loss of generality, we take $r=1$. Now we classify $i$ as good or bad according to whether $a_{i} > a_{i-1}/2$ or not. This decomposes $\Z$ into intervals of bad sites, alternating with intervals of good sites. By geometric bounds, the sum of bad sites is bounded by the sum of the good $a_{i}$ for which $i$ is the right end point of a good interval. Thus we have \begin{equation*} \sum_{i \textrm{ good}} a_{i}= \infty, \end{equation*} from which the result is immediate. \qed \end{proof} \begin{corollary} \label{lem5.1} For our symmetric kernel $p_{t}(0,0)$ we can find $n_{i} \uparrow \infty$ so that\\ (i) $\displaystyle \sum_{i} \int_{2^{n_{i}}}^{2^{n_{i}+1}} t p_{t}(0,0) dt = \infty$;\\ (ii) $\displaystyle p_{2^{n_{i}-1}}(0,0) \leq 2^{12}p_{2 ^{n_{i} +3}} (0,0)$. \end{corollary} \begin{proof} In Lemma \ref{lem5} take \begin{equation*} a_{n}= \int_{2^{n+3}}^{2^{n+4}} t p_{t} (0,0) dt \end{equation*} and take $r=5$. Then by the monotonicity of $t \rightarrow p_t(0,0)$, we have \begin{equation*} 2^{2n_{i}+7}p_{2^{n_{i}+3}} (0,0) \geq a_{n_{i}} \geq \frac{1}{2} \, a_{n_{i}-5} \geq \frac{1}{2} \, 2^{2n_{i}-4}p_{2^{n_{i}-1}} (0,0). \end{equation*} \qed \end{proof} We fix such a sequence $\{n_{j}\}_{j\geq 1}$ once and for all. We assume, as we may, that $n_{j} < n_{j+1} -4$ for all $j\geq 1$ and also assume again, as we may, that \begin{equation*} \int_{2^{n_{j}}}^{2^{n_{j} +1}} t p_{t} (0,0) dt < \frac{\epsilon}{100} \end{equation*} for all $j\geq 1$. We are now ready to consider our coalescence rules. We choose $\epsilon \ll \alpha \ll 1$ (we will fully specify $\alpha$ later on but we feel it more natural to defer the technical relations). We then choose $k_{0}$ so that $2^{n_{k_{0}}} > R+M_{1}$ with $R$ as in Lemma $\ref{prop4}$ and Lemma $\ref{lem8}$ and $M_{1}$ as in (\ref{M1def}), and \begin{equation} \label{k1def} k_{1} := \inf \bigg\{k > k_{0} \colon \sum_{j=k_{0}}^{k} \int_{2^{n_{j}}}^{2^{n_{j}+1}} t p_{t} (0,0)\, dt > \alpha\bigg\}. \end{equation} We have coalescence between $Y^{i}$ and $Y^{i^\prime}$, for $i < i^\prime$ only at $t \in [i^\prime+2^{n_{j}},i^\prime+ 2^{n_{j}+1} ]$, $j \in [k_{0}, k_{1}]$ if \begin{description}[(a)] \item[(a)] $\frac{t-i^\prime}{i^\prime-i} \in (9/10, 11/10)$; \item[(b)] the interval of $t \in [i^\prime+2^{n_{j}}, i^\prime+2^{n_{j}+1} ]$ satisfying (a) is of length at least $2$. \end{description} We say $(i,i^\prime)$ and $(i^\prime,i)$ are in $j$ and write $(i,i^\prime) \in j$ if the above relations hold. To show that sufficient coalescence occurs, we essentially use Bonferroni inequalities (see, e.g., \cite{durrett05}, p.\ 21). To aid our argument we introduce a family of independent (non coalescing) random walks $\{(Z^i(s))_{s \geq i}\}_{i \in V}$ such that for each $i \in V$, $Y^i(s) = Z^i(s)$ for $s \geq i$ such that $l_s^i = i$. In the following we will deal with random walks $Y^0, Z^0$, but lack of total translation invariance notwithstanding, it will be easy to see that all bounds obtained for these random walks remain valid for more general random walks $Y^i, Z^i$. For a given random walk $Y^{0} $, say, the probability that $Y^{0}$ is killed by $Y^{i}$ (with $i$ possible in the sense of the above rules) is in principle a complicated event given the whole system of coalescing random walks. Certainly the event \begin{equation*} \big\{Z^{0} \text{ meets }Z^{i} \text{ in appropriate time interval after first having met }Z^{k}\big\} \end{equation*} is easier to deal with than the corresponding $Y$ event. From this point on we will shorten our phraseology by taking ``$Z^i $ hits $Z^k$" to mean that $Z^i $ meets $Z^k$ at a time $t$ satisfying the conditions (a) and (b) above with respect to $i$, $k$. For ${(Z^{0}(s))}_{s \geq 0}$ and ${(Z^{i}(s))}_{s \geq i}$ independent random walks each beginning at 0, we first estimate \begin{equation*} \sum_{i} \PP\big(Z^{0} \text{ hits } Z^{i} \big). \end{equation*} This of course decomposes as \begin{equation*} \sum_{j} \sum_{(0, i) \in j} \PP \big( Z^{0} \text{ hits } Z^{i} \big). \end{equation*} We fix $j$ and consider $i>0$ so that $(0, i) \in j$ (the case $i < 0$ is similar). That is the interval of times $s$ with \begin{equation*} (s-i)/i \in (9/10, 11/10), \,\,\, s \in [i+2^{n_{j}} , i+2^{n_{j}+1}] \end{equation*} is at least $2$ in length: we note that for each $i \in \left( \frac{5}{4} 2^{n_j}, \frac{7}{4} 2^{n_j} \right )$ the relevant interval, $\frac{19}{10} i \leq s \leq \frac{21}{10} i$ is an interval of length greater than $\frac{5}{4} 2^{n_{j}} \frac{1}{5} = \frac{2^{n_{j}}}{4}$. \begin{lemma} \label{lem11} There exists $c_{2} \in (0, \infty)$ so that for any interval $I$ of length at least $1$ contained in $(1, \infty)$, \begin{equation*} \frac{1}{c_{2}} \int_{I} p_t(0,0) \,dt \leq \PP\big(X^0(t) =0 \text{ for some } t \in I\big) \leq c_{2} \int_{I} p_t(0,0) \,dt \end{equation*} \end{lemma} \begin{proof} Consider random variable $W =\int_{a}^{b+1} 1_{\{X^0(s) =0\}}\, ds$ for $I= [a,b]$. Then \begin{equation*} \EE(W) = \int_{a}^{b+1} \PP(X^0(s) =0)\, ds = \int_{a}^{b+1} p_{s}(0,0)\, ds \leq 2 \int_{I} p_{s}(0,0) \,ds \, , \end{equation*} by monotonicity of $p_{s}(0,0)$ and the fact that $b-a \geq 1$. But for $\tau := \inf \{s \in I\colon X^0(s) =0 \}$ we have $\EE(W \vert {{\cal{F}}}_{ \tau}) \geq e^{-1}$ on $\{\tau < \infty\}$ so \begin{equation*} \PP(\tau < \infty) = \PP\big(X^0(t) = 0 \text{ for some } t \in I\big) \leq \EE (W) e \leq 2 e \int_{I} p_{s}(0,0) ds \, . \end{equation*} Equally for $W^\prime = \int_{a}^{b} 1_{\{X^0(s) = 0\}} \, ds$, we have \begin{equation*} \EE(W^\prime \,|\, {{\cal{F}}}_{\tau}) \leq \gamma = \int_{0}^{\infty} p_{s} (0,0) \, ds \quad \text{on } \{\tau < \infty\} \end{equation*} and so \begin{equation*} \PP(\tau < \infty) \geq \frac{\EE(W^\prime)}{\gamma} = \frac{1}{\gamma} \int_{a}^{b} p_{s}(0,0) \, ds \, . \end{equation*} \qed \end{proof} \begin{proposition} \label{} For some universal $c_{3} \in (0, \infty)$, \begin{equation*} c_{3}^{-1} 2^{2n_{j}} p_{2^{n_{j}}}(0,0) \leq \sum_{(0, i) \in j} \PP\big(Z^{0}\text{ hits }Z^{i}\big) \leq c_{3} 2^{2n_{j}} p_{2^{n_{j}}} (0,0)\, . \end{equation*} \end{proposition} \begin{proof} We consider first the upper bound. There are less than $2^{n_{j}}$ relevant $i$. For such an $i$, \begin{equation*} \PP\big(Z^{0}\text{ hits }Z^{i}\big) \leq \PP \big(X^0(t) \text{ hits } 0 \text{ for some } t\in \big[a+2^{n_{j}}, a+3 \cdot 2^{n_{j}}\big]\big) \end{equation*} for some $a\geq 0$. By monotonicity of $t \to p_t(0,0)$ and using Lemma \ref{lem11}, this is bounded by \begin{equation*} c_{2} \int_{2^{n_{j}}}^{3 \cdot 2^{n_{j}}} p_{s}(0,0) \, ds \leq c_{3} 2^{n_{j}}p_{2^{n_{j}}}(0,0) \end{equation*} for some $c_3>0$. On the other side the number of $i \in \left( \frac{5}{4} 2^{n_j} , \frac{7}{4} 2^{n_j} \right)$ is greater than $c_{1} \frac{1}{3} 2^{n_j}$ if $R$ was fixed sufficiently large and for each such $i$, $(\frac{9}{10}i$, $\frac{11}{10} i) \subset [2^{n_{j}}, 2^{n_{j}+1}]$. Moreover, we have \begin{eqnarray*} \PP\big(Z^{0}\text{ hits }Z^{i}\big) &\geq& \frac{1}{c_2} \, \int_{\frac{28}{10}i}^{\frac{32}{10} i} p_{s}(0,0) \, ds \geq \frac{1}{c_2} \, \frac{4}{10}\,i p_{2^{n_j+3}}(0,0) \geq \frac{1}{c_2} \, 2^{n_{j}-1} p_{2^{n_j+3}} (0,0)\\ &\geq& \frac{2^{-13}}{c_2} \, 2^{n_{j}}p_{2^{n_{j}-1}}(0,0) \geq c_3^{-1} \, 2^{n_{j}}p_{2^{n_{j}}}(0,0), \end{eqnarray*} because of Lemma \ref{lem11}, Corollary \ref{lem5.1} (by our choice of $j$), monotonicity of $t \to p_t(0,0)$ and possibly after increasing $c_3$. \qed \end{proof} Thus, using that $j\in[k_{0},k_{1}]$ (recall \ref{k1def}), we have a universal $c_{4}$ such that \begin{equation*} c_{4} \alpha \geq \sum_{j} \sum_{(0,i)\in j} \PP\big(Z^{0}\text{ hits }Z^{i} \big) \geq \frac{\alpha}{c_{4}}. \end{equation*} There are two issues to address \begin{description}[(a)] \item[(a)] to show that \begin{equation*} \PP\big(\exists j, \exists i \text{ so that } (0,i) \in j, \,\,\, Z^{0}\text{ hits }Z^{i}\big) \end{equation*} is of the order $\alpha$; \item[(b)] to show that (a) holds with $Z^{0}$, $Z^{i}$ replaced by our coalescing random walks $Y^{0}$, $Y^{i}$. \end{description} In fact both parts are resolved by the same calculation. We consider the probability that random walk $Z^{0}$ is involved in a ``3-way'' collision with $Z^{i}$ and $Z^{i^\prime}$ either due to $Z^{0}$ hitting $Z^{i}$ in the appropriate time interval and then hitting $Z^{i^\prime}$, or $Z^{0}$ hitting $Z^{i}$ and, subsequently $Z^{i}$ hitting $Z^{i^\prime}$. The first case is important to bound so that one can use simple Bonferroni bounds to get a lower bound on $\PP( \exists i \text{ so that } Z^{i} \text{ hits }Z^{0} )$. The second is to take account of the fact that we are interested in the future coalescence of a given random walk $Y^{0}$. As already noted, we can couple the systems in the usual way so that for all $t$, $\cup_{i} \{Y_{t}^{i} \} \subseteq \cup_{i} \{Z_{t}^{i}\}$. The problem is that if for some $i$, $Z^{i}$ hits $Z^{0}$ due to coalescence this need not imply that $Y^{i}$ hits $Y^{0}$: if the $Y^{i}$ particles coalesced with a $Y^{i^\prime}$ before $Z^{i}$ hits $Z^{0}$. Fortunately this event is contained in the union of events above over $i$, $i^\prime$. \begin{proposition} \label{bte} There exists universal constant $K$ so that for all $i$ and $i^\prime$ with $(0,i^\prime) \in j^\prime$ \begin{equation*} \PP\big(Z^{0} \text{ hits }Z^{i} \text{ and then }Z^{i^\prime}\big) \leq K 2^{n_{j^\prime}}p_{2^{n_{j^\prime}}}(0,0) \PP\big(Z^{0} \text{ hits }Z^{i}\big)\,. \end{equation*} \end{proposition} \begin{proof} There are several cases to consider: $i<0<i^\prime$, $i<i^\prime<0$, $i^\prime<i<0$, $i^\prime<0<i$, $0<i<i^\prime$ and $0<i^\prime<i$. All are essentially the same so we consider explicitly $0<i<i^\prime$. We leave the reader to verify that the other cases are analogous. We choose $j$, $j^\prime$ so that $(0, i) \in j$ and $(0, i^\prime) \in j^\prime$ (so necessarilly $j^\prime \geq j$). We condition on $T_{j}$, $Z^{i}(T_{j})(= Z^{0}(T_{j}))$, for \begin{equation*} T_{j} := \inf \bigg\{s \in \left(\frac{19i}{10}, \frac{21i}{10} \right) \cap \Big[i+2^{n_{j}}, i+2^{n_{j}+1}\Big] \colon Z^{i}(s) = Z^{0}(s)\bigg\} < \infty. \end{equation*} With $x=Z^0(T_{j})$ we have \begin{eqnarray*} &&\PP\left( \exists s^\prime \geq T_j \in \left(\frac{19i^\prime}{10}, \frac{21i^\prime}{10} \right) \cap \Big[i^\prime+2^{n_{j^\prime}}, i^\prime+2^{n_{j^\prime}+1}\Big] \colon Z^{i^\prime} (s^\prime)= Z^{0}(s^\prime) \,\,\,\Big\vert\,\,\, G^{0,i}\right)\\ &&\qquad= \PP \big(Z^0(t) = x \mbox{ for some } t \in I_j\big), \end{eqnarray*} where $I_j$ is the image of the interval \begin{equation*} \Big[(i^\prime+2^{n_{j^\prime}}) \vee T_{j} \vee \frac{19i^\prime}{10}, i^\prime+2^{n_{j^\prime}+1} \wedge \frac{21i^\prime}{10}\Big] \bigg), \end{equation*} by the function $t \mapsto 2t-T_j-i^\prime$, for $G^{0, i} = \sigma (Z^{0}(s), Z^{i}(s) \colon s \leq T_{j})$. By elementary algebra this is less than \begin{equation*} \PP \left( Z^0(t) = x \text{ for }t \in \left(\frac{9i^\prime}{10}, \frac{16i^\prime}{5} \right)\right), \end{equation*} but by arguing as in Lemma $\ref{lem11}$, this is bounded by \begin{eqnarray*} c_2 \int^{\frac{16i^\prime}{5}}_{\frac{9i^\prime}{10}}p_{s}(0,x) \,ds &\leq& c_2\int^{\frac{16}{5}i^\prime}_{\frac{9}{10}i^\prime} p_{s}(0,0) \,ds \leq c_2 \frac{23}{10}\,i^\prime p_{2^{n_{j^\prime}-1}}(0,0) \\ &\leq& c_2 \frac{23}{9} 2^{13}2^{n_{j^\prime}}p_{2^{n_{j^\prime}+3}}(0,0) \leq c^{\prime} 2^{n_{j^\prime}}p_{2^{n_{j^\prime}}}(0,0) \end{eqnarray*} for some universal constant $c^\prime$, where we use symmetry and monotonicity of $p_{s}(\cdot,\cdot)$ and Corollary \ref{lem5.1}, by the choice of our $n_{j^\prime}$. So given that \begin{equation*} \PP(T_{i} < \infty) = \PP\big(Z^{0} \text{ hits }Z^{i} \big), \end{equation*} the desired bound is achieved. \qed \end{proof} \begin{corollary} \label{cor12} For $\alpha$ sufficiently small \begin{equation*} \PP\big( \exists i \colon Z^{0}\text{ hits }Z^{i}\big) \geq \frac{\alpha}{2}\, . \end{equation*} \end{corollary} \begin{proof} By Bonferroni, the desired probability is superior to \begin{equation*} \sum_{i} \PP\big(Z^{0}\text{ hits }Z^{i}\big) - \sum_{i, i^\prime} \PP\big(Z^{0}\text{ hits }Z^{i}\text{ and then }Z^{i^\prime}\big) \geq \alpha - Kc_{3}^2\alpha^{2} \geq \frac{\alpha}{2} \end{equation*} if $\alpha \leq 1/(2Kc_3^2)$. \qed \end{proof} We similarly show \begin{proposition} \label{prop7} \noindent There exists universal constant $K$ so that for all $i^\prime$ and $i$ with $(0,i) \in j$, \begin{equation*} \PP\big(Z^{0}\text{ hits }Z^{i}\text{ after }Z^{i}\text{ hits }Z^{i^\prime}\big) \leq K 2^{n_{j}}p_{2^{n_{j}}}(0,0) \PP\big(Z^{i}\text{ hits }Z^{i^\prime}\big)\, . \end{equation*} \end{proposition} This gives as a corollary \begin{corollary} \label{cor13} For the coalescing system $\{Y^{i}\}_{i\in V}$ provided $\alpha$ is sufficiently small, \begin{equation*} \PP\big(Y^{i}\text{ dies after time }R\big) \geq \frac{\alpha}{5}\, . \end{equation*} \end{corollary} \begin{proof} We have of course from the labeling scheme \begin{eqnarray*} &&\PP\big(Y^{i} \text{ dies after time }R\big)\\ &&\qquad\geq\ \frac{1}{2} \PP\big(Y^{i} \text{ hits }Y^{i^\prime}\text{ in appropriate time interval for some }i^\prime\big)\\ &&\qquad\geq \frac{1}{2} \PP\big(Z^{i}\text{ hits }Z^{i^\prime}\text{ in appropriate time interval for some }i^\prime\big) \\ &&\qquad- \frac{1}{2} \PP\big(Z^{i}\text{ hits }Z^{i^\prime}\text{ in appropriate time interval for some }i^\prime \text{ so that }\\ &&\qquad\qquad\qquad Z^{i^\prime}\text{ hits some }Z^{i^{\prime\prime}} \text{ previously}\big)\\ &&\qquad\geq \frac{\alpha}{4} - Kc_3^2 \alpha^{2} \geq \frac{\alpha}{5} \end{eqnarray*} for $\alpha \leq 1/(20Kc_3 ^2)$. \qed \end{proof} We can now complete the proof of Proposition \ref{prop3} and hence that of Proposition \ref{prop2}. If we have $c_0 > 0$, then we can find $0<\epsilon< \alpha / 200$ and $\alpha$ so small that the relevant results above hold, in particular Corollary \ref{cor13}. Thus the density of Y's is reduced by at least $\alpha / 5$. But by our choice of $\epsilon$ and Lemma \ref{lem8}, the density of $X$'s is reduced by at least $\alpha/5 -6 \epsilon \geq \alpha / 6 \geq 3 \epsilon $ which is a contradiction with Proposition \ref{lem3}, because it would entail the density falling strictly below $c_{0}$. \section{Proof of Theorem \ref{th3}} \label{S4} In what follows we assume, as in Section \ref{S2}, that $p=1$, the extension to arbitrary $p\geq 1$ being straightforward. We begin by specifying the random walk $(X(t))_{t\geq 0}$ on $\Z^4$ defined by \begin{equation*} \label{rw1.4} X(t) = S(t)+ e_{1}N(t) \end{equation*} with $(S(t))_{t\geq 0}$ denoting a simple random walk on $\Z^4$, $(N(t))_{t\geq 0}$ a rate $1$ Poisson process and $e_{1}=(1,0,0,0)$ the first unit vector in $\Z^4$. Thus our random walk $(X(t))_{t\geq 0}$ is highly transient but its symmetrization is a mean zero random walk and by the local central limit theorem, we have \begin{equation*} \label{} \int_{0}^{\infty} t p_{t}^{(s)}(0,0) \,dt = \infty, \end{equation*} where $p_{t}(\cdot,\cdot)$ is the semigroup associated to $(X(t))_{t\geq 0}$. It remains to show that $\lambda_{p}(\kappa)<\gamma$ for all $\kappa\in[0,\infty)$. Our approach is modeled on the proof of the first part of Theorem \ref{th1}. We wish again to pick \emph{bad environment set} $B_{E}$ associated to the $\xi$-process and \emph{bad random walk set} $B_{W}$ associated to the random walk $X^{\kappa}$ so that \begin{eqnarray} \label{lambdaupbd3} &&\EE \Big(\exp\Big[\gamma\int_{0}^n\xi_{s}(X^\kappa(s))\, ds\Big]\Big)\\ &&\qquad\leq \Big(\PP(B_{E})+\PS(B_{W})\Big)e^{\gamma n} + \EE\Big(\rlap{\mbox{\small\rm 1}}\kern.15em 1_{B_{E}^\c\cap B_{W}^\c} \exp\Big[\gamma\int_{0}^n\xi_{s}(X^\kappa(s))\, ds\Big]\Big) \nonumber\\ \end{eqnarray} with, for some $0<\delta<1$, \begin{equation} \label{badbound} \PP(B_{E})\leq e^{-\delta n}, \qquad \PS(B_{W})\leq e^{-\delta n}, \end{equation} and, automatically from the definition of $B_{E}$ and $B_{W}$, \begin{equation} \label{goodbound} \int_{0}^{n} \xi_{s}(X^\kappa(s))\,ds \leq n(1-\delta) \qquad\text{on } B_{E}^\c\cap B_{W}^\c \end{equation} (as in the proof of Theorem \ref{th1}). Since, combining (\ref{lambdaupbd3}--\ref{goodbound}), we obtain \begin{equation*} \label{} \lim_{n\to\infty}\frac{1}{n}\log \EE\Big(\exp\Big[\gamma\int_{0}^n\xi_{s}(X^\kappa(s))\, ds\Big]\Big) < \gamma\, , \end{equation*} it is enough to prove (\ref{badbound}). All of this has been done in the proof of Theorem \ref{th1} in a different situation. The major difference is that we need to modify the collection of skeletons used. \begin{lemma} \label{lem1-countex} Let $X^{\kappa} (\cdot)$ be a speed $\kappa$ simple random walk in four dimensions. Fix $M\in\N\setminus \{1\}$. There exists $c>0$ so that for $M$ large and all $n$, outside of an $e^{-cn}$ probability event, there exists $0\leq i_{1}<i_{2}<\cdots < i_{n/2M}\leq n$ so that \begin{equation} \label{driftSkl} X^{\kappa}_{(1)}(i_{j}M+kM) - X^{\kappa}_{(1)}(i_{j}M) > - \frac{kM}{2} , \qquad j\in\{1,\cdots,n/(2M)\},\, k\geq 0\, , \end{equation} where $(X^{\kappa}_{(1)}(t))_{t\geq 0}$ denotes the first coordinate of $(X^{\kappa}(t))_{t\geq 0}$. \end{lemma} \begin{proof} Define \begin{equation*} \sigma_{1} = \inf \big\{kM >0\colon X^{\kappa}_{(1)}(kM) \leq -kM/2 \big\} \end{equation*} and recursively \begin{equation*} \sigma_{i+1} = \inf \big\{kM >\sigma_{i}\colon X^{\kappa}_{(1)}(kM)-X_{(1)}^\kappa(\sigma_{i}) \leq -(kM-\sigma_{i})/2 \big\} \, . \end{equation*} Since the event \begin{equation*} \{rM \leq \sigma_{1} < \infty \} \subset \ \cup_{k=r}^{\infty }\big\{X^{\kappa}_{(1)}(kM) \leq -kM/2 \big\} \, , \end{equation*} we have easily that, for all $r$, \begin{equation*} \PP (rM \leq \sigma_{1} < \infty) \ \leq \ e^{-rMc} \end{equation*} for $c>0$ not depending on $n$ or $M$. If we now define \begin{equation*} \tau_{1} = \inf \big\{kM >0\colon X_{(1)}^\kappa(jM)-X^{\kappa}_{(1)}(kM) > -(j-k)M/2 \,\,\,\forall j>k\big\} \end{equation*} and recursively \begin{equation*} \tau_{i+1} = \inf \big\{kM >\tau_{i}\colon X^{\kappa}_{(1)}(jM)-X^{\kappa}_{(1)}(kM) > -(j-k)M/2 \,\,\,\forall j>k \big\}\, , \end{equation*} it is easily seen that \begin{eqnarray*} \PP(\tau_{1} \geq rM) &\leq& \PP(\exists 1\leq k \leq r\colon rM \leq \sigma_{k} < \infty)\\ &\leq& \sum_{k=1}^{r-1}\,\,\, \sum_{0<x_{1 }< \cdots < x_{k} < r} \PP\big(\sigma_{i} =x_{i}M \,\,\,\forall i \leq k, \, rM \leq \sigma_{k+1} < \infty \big)\\ &\leq& \sum_{k=1}^{r-1}\,\,\, \sum_{0<x_{1 }< \cdots < x_{k} <r} e^{-rMc} \leq \ e^{-rMc}2^r \end{eqnarray*} which is less than $e^{-rMc/2}$ if $M$ is fixed sufficiently large. We have \begin{itemize} \item $(\tau_{i+1} - \tau_{i})_{i \geq 1}$ are i.i.d. (this follows from Kuczek's argument (see \cite{kuc89})). \item Provided $M$ has been fixed sufficiently large for each integer $r \geq 1$, $\PP(\tau_{i+1} - \tau_{i} \geq rM) \leq e^{-rMc/4}$. This follows from the fact that random variable $\tau_{i+1} - \tau_{i}$ is simply the random variable $\tau_{1}$ conditioned on an event of probability at least $1/2$ (provided $M$ was fixed large). \end{itemize} Thus by elementary properties of geometric random variables we have \begin{equation*} \PP(\tau_{n/2M} >n) \leq \PP\left(Y \geq \frac{n}{2M}\right) \leq e^{-cn} \end{equation*} for $Y \sim B\left(\frac{n}{M},e^{-cM/4}\right)$, $c>0$ and $M$ large. This completes the proof of the lemma. \qed \end{proof} Given that the path of the random walk satisfies the condition of this lemma, we call the (not uniquely defined) points $i_{1}, i_{2}, \cdots $ \emph{regular } points. Given this result, we consider the $M$-skeleton induced by the values $X^\kappa (jM)$, $0\leq j \leq n/M\}$, discretized via spatial cubes of length $M/8$ (rather than $2M$ as in the proof of Theorem \ref{th1}). It is to be noted that if $(X^ \kappa (t))_{0\leq t\leq n}$ satisfies the claim for Lemma \ref{lem1-countex} and $y_{0}:=0, y_{1}, y_{2}, \cdots, y_{n/M}$ with $y_{k}\in \Z^4$, $0\leq k\leq n/M$, is its $M$-skeleton, namely, \begin{equation} \label{Xkappaskl*} X^\kappa(kM)\in C_{y_{k}} :=\prod_{j=1}^{4}\bigg[y_{k}^{(j)} \frac{M}{8}, (y_{k}^{(j)}+1)\frac{M}{8}\bigg), \qquad 0\leq k\leq n/M, \end{equation} where $y_{k}^{(j)}$ denotes the $j$-th coordinate of $y_{k}$ (we suppose without loss of generality that $M$ is a multiple of $8$). Then, by (\ref{driftSkl}) and (\ref{Xkappaskl*}), we must have \begin{equation*} y_{i_{j}}^{(1)}-4k\leq y_{i_{j}+k}^{(1)} +1. \end{equation*} In particular, we must have \begin{equation} \label{sklcond} y^{(1)}_{i_{j^\prime}} -4 (i_{j}-i_{j^\prime}) \leq y^{(1)}_{i_{j}} +1 \qquad \forall i_{j^\prime}<i_{j}. \end{equation} In the following we modify the definition of appropriate skeletons by adding in the requirement that the skeleton must possess at least $n/2M$ indices $i_{1}, i_{2}, \ldots, i_{n/2M}$ with the corresponding $y_{i_{j}}$ satisfying (\ref{sklcond}). We note that the resizing of the cubes makes the notion of acceptability a little more stringent but does not change the essentials. Remark first that Lemma \ref{bw2} is still valid in our new setting. Lemma \ref{lem1-countex} immediately gives that with this new definition, Lemma \ref{bw1} remains true. Of course since this definition is more restrictive we have \begin{equation*} \label{} |\Xi_{A}|\leq K^{n/M} \end{equation*} for $K$ as in Lemma \ref{appropriate}. In fact in our program all that remains to do, that is in any substantive way different from the proof of Theorem \ref{th1}, is to give a bound on the probability of $B_{E}$ for appropriate $B_{E}$. This is the content of the lemma below (analogous to Lemma \ref{be.lem}). Given this lemma, we can then proceed exactly as with the proof of Theorem \ref{th1}. \begin{lemma} For any skeleton $(y_{k})_{0\leq k\leq n/M}$ in $\Xi_{A}$, the probability that $\xi $ is not good for $(y_{k})_{0\leq k\leq n/M}$, i.e., \begin{equation*} \not\exists \frac{n}{4M} \text{ indices } 1\leq j\leq \frac{n}{M} \colon \xi_{s}(z)=0\,\,\, \forall s\in[jM,jM+1] \text{ for some } z\in C_{y_{j}}\, , \end{equation*} is less than $(4K)^{-n/M}$. \end{lemma} \begin{proof} We note that proving the analogous result for Theorem \ref{th1}, we did not need our skeleton to be in $\Xi_{A}$, the proof worked over any skeleton. For us however it is vital that our skeleton satisfies (\ref{sklcond}). We consider a skeleton in $\Xi_{A}$. Let the first $n/2M$ regular points of our skeleton be $ i_{1}, i_{2}, \cdots i_{n/2M}$. For each $1\leq i_{j}\leq n/2M$, we choose $R$ points \begin{equation*} x_{1}^{i_{j}},\ldots, x_{R}^{i_{j}} \in C_{ y_{i_{j}}} \end{equation*} so spread out that for random walks $(X(t))_{t\geq 0}$ as in (\ref{rw1.4}) beginning at the points $x_{k}^{i_{j}}$, $k=1,\cdots,R$, the chance that two of them meet is less than $0<\epsilon\ll 1$. Now, consider $i_{j^\prime}<i_{j}$ and the probability that a random walk starting at $(x_{k}^{i_{j}}, i_{j} M)$ meets a random walk starting at $(x_{k^\prime}^{i_{j^\prime}}, i_{j^\prime} M)$ satisfies the following lemma. \begin{lemma} For $i_{j^\prime}<i_{j}$, there exits $K>0$ such that \begin{equation*} \PP\bigg(X^{x_{k}^{i_{j}},i_{j} M} \,\,\text{ meets }\,\, X^{x_{k^\prime}^{i_{j^\prime}},i_{j^\prime} M}\bigg)\leq \frac{K}{M^2(i_{j}-i_{j^\prime})^2} \, . \end{equation*} \end{lemma} \begin{proof} The important point is that since our skeleton is in $\Xi_{A}$, \begin{equation} \label{imp.lem4.3} \Big(x_{k^\prime}^{i_{j^\prime}}\Big)^{(1)} \leq \Big(x_{k}^{i_{j}}\Big)^{(1)} + (i_{j}-i_{j^\prime})\frac{M}{2}+ \frac{M}{4}\, , \end{equation} and so we have \begin{eqnarray*} &&\PP\bigg(X^{x_{k}^{i_{j}},i_{j} M} \,\,\text{ meets }\,\, X^{x_{k^\prime}^{i_{j^\prime}},i_{j^\prime} M}\bigg) \\ &&\qquad\leq \PP\bigg(\Big(X^{x_{k}^{i_{j}},i_{j} M}\Big)^{(1)} \left((i_{j}-i_{j^\prime})M\right) \geq \big(x_{k}^{i_{j}}\big)^{(1)} + (i_{j}-i_{j^\prime})\frac{3M}{4}\, ,\\ && \qquad\qquad\quad X^{x_{k}^{i_{j}},i_{j} M} \,\,\text{ meets }\,\, X^{x_{k^\prime}^{i_{j^\prime}},i_{j^\prime} M}\bigg) \\ &&\qquad + \PP\bigg(\Big(X^{x_{k}^{i_{j}},i_{j} M}\Big)^{(1)} \left((i_{j}-i_{j^\prime})M\right) \leq \big(x_{k}^{i_{j}}\big)^{(1)} + (i_{j}-i_{j^\prime})\frac{3M}{4}\bigg). \end{eqnarray*} By standard large deviations bounds, \begin{equation*} \PP\bigg(\Big(X^{x_{k}^{i_{j}},i_{j} M}\Big)^{(1)} \left((i_{j}-i_{j^\prime})M\right) \leq \big(x_{k}^{i_{j}}\big)^{(1)} + (i_{j}-i_{j^\prime})\frac{3M}{4}\bigg) \leq e^{-CM(i_{j}-i_{j^\prime})}, \end{equation*} for some universal $C\in (0,\infty)$. For the other term, we have the following lemma. \begin{lemma} For two independent processes $X=(X(t))_{t\geq 0}$ and $Y=(Y(t))_{t\geq 0}$ with $X(0)=x\in\Z^4$ and $Y(0)=y\in\Z^4$, the probability that $X$ ever meets $Y$ is bounded by $K/\|x-y\|_{\infty}^2$. \end{lemma} \begin{proof} $X-Y$ is not exactly a simple random walk, but it is a symmetric random walk and so local CLT gives appropriate random walks bounds (see, e.g., \cite{law10}). \qed \end{proof} From this and inequality (\ref{imp.lem4.3}), we have \begin{eqnarray*} &&\PP\bigg(X^{x_{k}^{i_{j}},i_{j} M} \,\text{meets } X^{x_{k^\prime}^{i_{j^\prime}},i_{j^\prime} M} \Big\vert \Big(X^{x_{k}^{i_{j}},i_{j} M}\Big)^{(1)} \left((i_{j}-i_{j^\prime})M\right) \geq \big(x_{k}^{i_{j}}\big)^{(1)} + (i_{j}-i_{j^\prime})\frac{3M}{4}\bigg)\\ &&\qquad\leq \frac{K}{M^2 (i_{j}-i_{j^\prime})^2}\, . \end{eqnarray*} \qed \end{proof} Thus, for any $R$ large but fixed, we can choose $M$ so that for all skeletons in $\Xi_{A}$ and each $i_{j^\prime}$, we have \begin{eqnarray*} \sum_{i_{j}<i_{j^\prime}}\sum_{k,k^\prime} \PP\bigg(X^{x_{k}^{i_{j}},i_{j} M} \,\,\text{ meets }\,\, X^{x_{k^\prime}^{i_{j^\prime}},i_{j^\prime} M}\bigg) &\leq& \frac{R^2 K}{M^2}\sum_{r=1}^{+\infty}\frac{1}{r^2}\\ &\leq& \frac{R^2 K^\prime}{M^2} < \epsilon^2 \end{eqnarray*} with $M$ chosen sufficiently large, which is analogous to (\ref{2.2.08}). From this point on, the rest follows as for the proof of Lemma \ref{be.lem}. \qed \end{proof} \end{document}
\begin{document} \title{{f Webs and projective structures on a plane} \begin{abstract} We prove that there is a correspondence between projective structures defined by torsion-free connections with skew-symmetric Ricci tensor and Veronese webs on a plane. The correspondence is used to characterise the projective structures in terms of second order ODEs. \end{abstract} \section{Introduction} A web is a family of foliations on a manifold. In the present paper we concentrate on the simplest example which is a 3-web on a plane, i.e. a triple of one-dimensional foliations in the general position on $\mathbb{R}^2$ (see \cite{AG,N}). We show that a 3-web defines a projective structure on a plane. The projective structures obtained in this way are very special. Namely, they are defined by linear connections with skew-symmetric Ricci tensor. Additionally, the associated twistor space fibers over the projective space $\mathbb{R} P^1$. The existence of the fibration in the twistor picture suggests that the projective structures defined by 3-webs are two-dimensional counterparts of so-called hyper-CR Einstein-Weyl structures on $\mathbb{R}^3$ \cite{Dun}. In \cite{DK} we showed that the hyper-CR Einstein-Weyl structures are in a one-to-one correspondence with Veronese webs, i.e. special 1-parameter families of foliations introduced by Gelfand and Zakharevich \cite{GZ} in connection to bi-Hamiltonian systems. The similar phenomenon takes place on the plane. Indeed, one can easily extend a 3-web to a Veronese web on a plane and it is an intermediate step in the construction of a projective structure out of a 3-web. The approach gives new and simple proof of Wong's theorem \cite{W} in the stronger version of Derdzinski \cite{Der}. We also provide local forms of connections with constant skew-symmetric Ricci tensor. The projective structures defined by connections with skew-symmetric Ricci tensors were investigated recently in \cite{Der,DW,R}. In particular \cite{R} provides a characterisation of this class of projective structures in terms of the associated second order differential equation. We describe an alternative approach in terms of the dual equation at the end of the paper. The result is based on our earlier characterisation of Veronese webs (and more generally Kronecker webs) in terms of ODEs \cite{K2} and involve so-called time-preserving contact transformations \cite{JK}. \section{3-webs and Veronese webs} A 3-web on a plane is a triple $\{\mathcal{F}_1,\mathcal{F}_2,\mathcal{F}_3\}$ of one-dimensional foliations such that at any point $x\in \mathbb{R}^2$ any two of them intersect transversely. One can always find a coordinate system $(x,y)$ such that $$ T\mathcal{F}_1=\ker dx,\qquad T\mathcal{F}_3=\ker dy. $$ Then $$ T\mathcal{F}_2=\ker dw=\ker(w_xdx+w_ydy) $$ for some function $w=w(x,y)$. By the assumption on the transversality of the foliations we get that both $w_x$ and $w_y$ are nowhere vanishing. Let us notice that any 3-web can be extended to a 1-parameter family $\{\mathcal{F}_{(s:t)}\}_{(s:t)\in\mathbb{R} P^1}$ of foliations parametrised by points in a projective line $\mathbb{R} P^1$ and such that \begin{equation}\label{eq1} \mathcal{F}_{(1:0)}=\mathcal{F}_1,\qquad\mathcal{F}_{(0:1)}=\mathcal{F}_3,\qquad\mathcal{F}_{(s_0:t_0)}=\mathcal{F}_2 \end{equation} for some fixed point $(s_0:t_0)\in\mathbb{R} P^1$. Namely, we can consider the following family of one-forms \begin{equation}\label{eq2} \omega_{(s:t)}=st_0w_xdx+ts_0w_ydy \end{equation} and then one sees that condition \eqref{eq1} is satisfied if we define $\mathcal{F}_{(s:t)}$ by $$ T\mathcal{F}_{(s:t)}=\ker\omega_{(s:t)} $$ The so-obtained family $\{\mathcal{F}_{(s:t)}\}$ is very special. It depends linearly on a projective parameter $(s:t)$ and it is an example of so-called Veronese webs \cite{GZ,Z}. In general, a 1-parameter family of corank-one foliations on a manifold $M$ of dimension $n+1$ is a Veronese web if any $x\in M$ has a neighbourhood $U$ such that there exist point-wise independent one-forms $\omega_0,\ldots,\omega_n$ on $U$ such that $$ T\mathcal{F}_{(s:t)}|_U=\ker{s^n\omega_0+s^{n-1}t\omega_1+\cdots+t^n\omega_n}. $$ At any point $x\in M$ the mapping $$ (s:t)\mapsto\mathbb{R}(s^n\omega_0(x)+s^{n-1}t\omega_1(x)+\cdots+t^n\omega_n(x))\in P(T^*_xM) $$ is a Veronese embeding and it justifies the terminology. Specifying to $n=2$ we get the following correspondence \begin{proposition}\label{prop1} Let $(s_0:t_0)\in \mathbb{R} P^1$ be fixed. Any 3-web on a plane extends uniquely to a Veronese web on $\mathbb{R}^2$ such that \eqref{eq1} is satisfied. Conversely, for a Veronese web $\{\mathcal{F}_{(s:t)}\}$, the triple $\{\mathcal{F}_{(1:0)},\mathcal{F}_{(s_0:t_0)},\mathcal{F}_{(0:1)}\}$ is a 3-web. \end{proposition} The uniqueness above follows from the fact that a Veronese curve in $\mathbb{R} P^1$ is determined by values at three distinct points. In higher dimensions there is no so simple correspondence between finite families of foliations and Veronese webs since one has to impose additional integrability conditions on function $w$. In particular in dimension 3 one gets the Hirota equation \cite{DK,Z}. In what follow, for the sake of convenience, we will use the affine parameter $t=(1:t)\in\mathbb{R} P^1$ rather than the projective one $(s:t)$. The foliation corresponding to $(0:1)$ will be denoted $\mathcal{F}_\infty$. Formula \eqref{eq2} can be equivalently written as \begin{equation}\label{eq2b} \omega_t=t_0w_xdx+tw_ydy \end{equation} We have investigated the geometry of Veronese webs in \cite{JK,K2}. In particular we have introduced a linear connection associated to a web. In the present paper we will denote it $\nabla^\mathcal{F}$. In dimension 2 it can be written explicitly in the following form \begin{eqnarray} \nabla^\mathcal{F}_{\partial_x}\partial_x=\frac{w_yw_{xx}-w_xw_{xy}}{w_xw_y}\partial_x, &\quad& \nabla^\mathcal{F}_{\partial_y}\partial_y=\frac{w_xw_{yy}-w_yw_{xy}}{w_xw_y}\partial_y,\label{eqCon}\\ \nabla^\mathcal{F}_{\partial_x}\partial_y=0, &\quad&\nabla^\mathcal{F}_{\partial_y}\partial_x=0.\nonumber \end{eqnarray} On the other hand, for a 3-web there is a notion of the Chern connection (see \cite{N}). If a Veronese web is defined by a 3-web then $\nabla^\mathcal{F}$ coincides with the Chern connection of the 3-web. Indeed, it can be verified by a direct inspection that the formulae for $\nabla^\mathcal{F}$ can be computed as in \cite[Theorem 1.6]{N}. \begin{proposition}\label{prop2} Let $\{\mathcal{F}_t\}$ be a Veronese web on $\mathbb{R}^2$. The associated connection $\nabla^\mathcal{F}$ has the following properties: \begin{enumerate} \item[(a)] All leaves of $\mathcal{F}_t$ are geodesics of $\nabla^\mathcal{F}$ for any $t\in\mathbb{R}$. \item[(b)] The torsion of $\nabla^\mathcal{F}$ vanishes. \item[(c)] The Ricci curvature tensor of $\nabla^\mathcal{F}$ is skew-symmetric. \end{enumerate} \end{proposition} \begin{proof} We assume that a Veronese web is defined by \eqref{eq2b}. Then the leaves of $\mathcal{F}_t$ are integral curves of the vector field $$ w_y\partial_x-tw_x\partial_y. $$ We directly compute $$ \nabla^\mathcal{F}_{w_y\partial_x-tw_x\partial_y}(w_y\partial_x-tw_x\partial_y)= \left(\frac{w_y^2w_{xx}-twx^2w_{yy}}{w_xw_y}\right)(w_y\partial_x-tw_x\partial_y) $$ and it proves Statement (a). Statement (b) immediately follows from the definition of $\nabla^\mathcal{F}$. To prove Statement~(c) we compute non-trivial components of the curvature (3,1)-tensor tensor $R(\nabla^\mathcal{F})$. We get $$ R(\nabla^\mathcal{F})(\partial_x,\partial_y)\partial_x=\rho\partial_x,\qquad R(\nabla^\mathcal{F})(\partial_x,\partial_y)\partial_y=\rho\partial_y, $$ where \begin{equation}\label{eqR} \rho=\frac{w_{xx}w_{xy}}{w_x^2}-\frac{w_{yy}w_{xy}}{w_y^2}-\frac{w_{xxy}}{w_x}+\frac{w_{xyy}}{w_y}. \end{equation} It follows that the Ricci tensor of $\nabla^\mathcal{F}$ is represented by the matrix $$ Ric(\nabla^\mathcal{F})=\left(\begin{array}{cc} 0 & \rho \\ -\rho & 0 \end{array} \right). $$ \end{proof} It can be deduced from \cite[Corollary 7.5]{K2} that any torsion-free connection with skew-symmetric Ricci tensor can be obtained as a conneciton $\nabla^\mathcal{F}$ for a web. Indeed, in the proof of \cite[Corollary 7.5]{K2} with $m=1$ it is shown how to construct a Veronese web on a plane with a curvature being an arbitrary function. The result was obtained in terms of the canonical frames. Here we will show a different reasoning. Let $\nabla$ be a torsion-free connection on a plane. If $Ric(\nabla)$ is skew-symmetric then it follows from linear algebra that there exists a function $\rho$ such that $R(\nabla)(\partial_x,\partial_y)V=\rho V$ for any vector field $V$. Let us fix a point $x\in\mathbb{R}^2$ and chose a frame $X(x),Y(x)\in T_x\mathbb{R}$. Moreover, for any other $y\in\mathbb{R}^2$ let us choose a smooth curve $\gamma_y$ joining $x$ and $y$ and define $X(y),Y(y)\in T_y\mathbb{R}^2$ by the parallel transport of $X(x)$ and $Y(x)$ along $\gamma_y$. In this way we construct two vector fields $X$ and $Y$. It follows from the property $R(\nabla)(\partial_x,\partial_y)V=\rho V$ that the frame bundle of $T\mathbb{R}^2$ reduces to a $GL(1,\mathbb{R})$-bundle and consequently the choice of different cures $\gamma_y$ leads to vector fields $\tilde X$ and $\tilde Y$ which are proportional to $X$ and $Y$ in the same way, i.e. $\tilde X=fX$ and $\tilde Y=fY$ for some function $f\colon\mathbb{R}^2\to\mathbb{R}$. We define a Veronese web $\{\mathcal{F}_t\}$ imposing that the set of leaves of $\mathcal{F}_t$ is the set of integral curves of the vector field $$ X+tY. $$ Note that $\tilde X+t\tilde Y$ has the same integral curves as $X+tY$ and hence the web is well defined. The choice of a different frame at the initial point $x$ leads to a M\"obius transformation of the projective parameter $(s:t)$ which parametrises the foliations. In this way we proved \begin{theorem}\label{thm1} There is a one-to-one correspondence between Veronese webs (given up to a M\"obius transformation of the projective parameter $(s:t)$) on a plane and torsion-free connections on $\mathbb{R}^2$ with skew-symmetric Ricci tensor. \end{theorem} We provide the following examples as applications of Theorem \ref{thm1}: \vskip 1ex 1. {\bf Flat case.} It is clear that the flat connection corresponds to the linear function $w(x,y)=x+y$ and the associated web is defined by the one-form $$ \omega_t=dx+tdy. $$ \vskip 2ex 2. {\bf Constant curvature.} In order to find a torsion-free connection with constant skew-symmetric Ricci tensor one has to solve the equation $$ \rho=C, $$ where $\rho$ is given by \eqref{eqR} and $C\in\mathbb{R}$ is constant. The formula for $\rho$ can be written in more compact way $$ \rho=\left(\frac{w_{xy}}{w_y}\right)_y -\left(\frac{w_{xy}}{w_x}\right)_x= \ln(w_y)_{xy}-\ln(w_x)_{xy}= \ln\left(\frac{w_y}{w_x}\right)_{xy}. $$ Thus, the equation $\rho=C$ gives $$ \frac{w_y}{w_x}=e^{Cxy} $$ or $w_y=e^{Cxy}w_x$. This equation can be solved using the method of characteristics. However the knowledge of an exact solution is not necessary because we can always multiply the one-form $\omega_t$ from formula \eqref{eq2b} by a function and the resulting one-form defines the same Veronese web. Thus, multiplying $\omega_t$ by $w_x^{-1}$, we get that the web corresponding to a connection with $\rho=C$ is defined by the one-form $$ dx+te^{Cxy}dy. $$ The connection is given by $$ \nabla^\mathcal{F}_{\partial_x}\partial_x=-Cy\partial_x,\quad\nabla^\mathcal{F}_{\partial_y}\partial_y=Cx\partial_y, \quad \nabla^\mathcal{F}_{\partial_x}\partial_y=0,\quad \nabla^\mathcal{F}_{\partial_y}\partial_x=0. $$ These formulae can be derived directly from equation \eqref{eqCon} because $$ \frac{w_yw_{xx}-w_xw_{xy}}{w_xw_y}=-\frac{w_x}{w_y}\partial_x\left(\frac{w_y}{w_x}\right), \qquad \frac{w_xw_{yy}-w_yw_{xy}}{w_xw_y}=\frac{w_x}{w_y}\partial_y\left(\frac{w_y}{w_x}\right). $$ \vskip 2ex 3. {\bf Wong's theorem.} Derdzinski \cite[Theorem 6.1]{Der} proved that for a torsion-free connection $\nabla$ with skew-symmetric Ricci tensor one can always choose local coordinates $(x_1,x_2)$ such that the Christoffel symbols have the form $\Gamma^1_{11}=-\partial_{x_1}f$, $\Gamma^2_{22}=\partial_{x_2}f$ for some function $f$ and $\Gamma^i_{jk}=0$ unless $i=j=k$. In view of formula~\eqref{eqCon} and our Theorem \ref{thm1} this is evident as we can write $\nabla^\mathcal{F}_{\partial_x}\partial_x=-\partial_x\ln\left(\frac{w_y}{w_x}\right)\partial_x$ and $\nabla^\mathcal{F}_{\partial_y}\partial_y=\partial_y\ln\left(\frac{w_y}{w_x}\right)\partial_y$ if $\frac{w_y}{w_x}>0$ or $\nabla^\mathcal{F}_{\partial_x}\partial_x=-\partial_x\ln\left(-\frac{w_y}{w_x}\right)\partial_x$ and $\nabla^\mathcal{F}_{\partial_y}\partial_y=\partial_y\ln\left(-\frac{w_y}{w_x}\right)\partial_y$ if $\frac{w_y}{w_x}<0$. Note that the sign of $\frac{w_y}{w_x}$ is always fixed because both $w_y$ and $w_x$ never vanish since all foliations intersect transversely. \section{Projective structures} Two connections on a manifold $M$ are projectively equivalent if their sets of unparametrised geodesics coincide. A projective structure is a set of unparametrised geodesics of a connection, or, equivalently, it is a class of projectively equivalent connections. Any projective structure on a plane can be locally described in terms of a second order ODE. Namely, fixing local coordinates $(x,y)$ one can look for an equation in the form \begin{equation}\label{eq3} y''=\Phi(x,y,y') \end{equation} such that the solutions $(x,y(x))$ are geodesics for the projective structure. It can be shown that the equation satisfies \begin{equation}\label{eqcond} \partial_{y'}^4\Phi=0 \end{equation} and conversely any equation satisfying this condition defines a projective structure. The condition is point invariant and in fact any projective structure corresponds to a class of point equivalent equations. We will show now that we can construct a projective structure out of a Veronese web. Indeed we have the following \begin{proposition}\label{prop3} If $\{\mathcal{F}_t\}$ is a Veronese web on $\mathbb{R}^2$ then the union of all leaves of all foliations $\mathcal{F}_t$ is a projective structure defined by the associated connection $\nabla^\mathcal{F}$. If $\{\mathcal{F}_t\}$ is given by the one-form \eqref{eq2b} then the corresponding second order ODE is of the form \begin{equation}\label{eq3b} y''=\frac{1}{w_xw_y}\left((w_yw_{xx}-w_xw_{xy})y'+(w_yw_{xy}-w_xw_{yy})(y')^2\right). \end{equation} \end{proposition} \begin{proof} The first part follows directly from Statement (a) of Proposition \ref{prop2}. To get a description in terms of an ODE we recall that the leaves of $\mathcal{F}_t$ are integral curves of the vector field $w_y\partial_x-tw_x\partial_y$. It follows that for a fixed $t$ they are solutions to the following first order equation $$ y'=-t\frac{w_x}{w_y}. $$ Differentiating this equation with respect to $x$ and eliminating parameter $t$ by substitution $t=-\frac{w_y}{w_x}y'$ we get \eqref{eq3b}. \end{proof} Moreover we have \begin{lemma}\label{lemma1} Equation \eqref{eq3b} is point equivalent to the derivative of a first order ODE. \end{lemma} \begin{proof} Let $\phi\colon \mathbb{R}^2\to\mathbb{R}$ be a solution to $$ \partial_y\phi=\frac{w_y}{w_x}. $$ We define the following point transformation $$ \tilde x=x,\qquad \tilde y=\phi(x,y) $$ and verify that in the coordinates $(\tilde x, \tilde y)$ equation \eqref{eq3b} takes the form \begin{equation}\label{eq3c} \tilde y''=\phi_x(\tilde x,\phi^{-1}(\tilde x, \tilde y))_{\tilde x}+\phi_x(\tilde x,\phi^{-1}(\tilde x, \tilde y))_{\tilde y}\tilde y'. \end{equation} In above $\phi_x$ is the derivative of the mapping $(x,y)\mapsto \phi(x,y)$ with respect to the first coordinate, whereas $\phi_x(\tilde x,\phi^{-1}(\tilde x, \tilde y))_{\tilde x}$ and $\phi_x(\tilde x,\phi^{-1}(\tilde x, \tilde y))_{\tilde y}$ are derivatives of $(\tilde x,\tilde y)\mapsto \phi_x(\tilde x,\phi^{-1}(\tilde x, \tilde y))$ with respect to $\tilde x$ and $\tilde y$, respectively. The inverse $\phi^{-1}$ is taken with respect to the second coordinate function. Equation \eqref{eq3c} is the derivative of $$ \tilde y'=\phi_x(\tilde x,\phi^{-1}(\tilde x,\tilde y)). $$ \end{proof} As a corollary we get the following characterisation of linear connections projectively equivalent to a connection with skew-symmetric Ricci tensor. \begin{theorem}\label{thm2} Let $\nabla$ be a linear connection on $\mathbb{R}^2$. The following conditions are equivalent \begin{enumerate} \item[(a)] $\nabla$ is projectively equivalent to a connection with skew-symmetric Ricci curvature tensor. \item[(b)] $\nabla$ is projectively equivalent to the Chern connection of a 3-web (or the connection $\nabla^\mathcal{F}$ associated to a Veronese web $\{\mathcal{F}_t\}$). \item[(c)] Unparametrised geodesics of $\nabla$ are described by solutions to a second order ODE which is the derivative of a first order ODE. \end{enumerate} \end{theorem} \begin{proof} The equivalence (a)$\iff$(b) follows from Theorem \ref{thm1} and the implication (b)$\implies$(c) follows from Lemma \ref{lemma1}. Therefore it is sufficient to prove that a second order ODE which is a derivative of a first order ODE gives a projective structure defined by a connection with skew-symmetric Ricci tensor. This fact was proved in \cite{DW} (see Theorem \ref{thmDW} below). Note that the condition \eqref{eqcond} is always satisfied for the derivatives of first order ODEs. \end{proof} \section{Twistor space and dual ODE} The twistor space of a projective structure is the set of unparamterised geodesics. In the case of the projective structure on a plane the twistor space is a manifold of dimension two. Theorem \ref{thm2} should be compared to the following result of Dunajski and West. \begin{theorem}{\cite[Section 6.4, Proposition 3]{DW}}\label{thmDW} There is a one-to-one correspondence between projective structures on a plane for which the twistor space fibers over $\mathbb{R} P^1$ and point equivalent classes of second order ODEs which are derivatives of first order ODEs. \end{theorem} The fibration over $\mathbb{R} P^1$ can be easily seen from the point of view of Veronese webs. Indeed, to a geodesic which is a leaf of $\mathcal{F}_{(s:t)}$ one assigns the point $(s:t)\in\mathbb{R} P^1$. In \cite{K2} we have characterised Veronese webs on in terms of ODEs in the following way (the analogous results are also proved for higher order ODEs and systems of ODEs). \begin{theorem}{\cite[Theorem 1.1]{K2}}\label{thmK} There is a one-to-one correspondence between Veronese webs on $\mathbb{R}^2$ and time-preserving contact equivalent classes of second order ODEs given in the form \begin{equation}\label{eq4} z''=F(t,z,z') \end{equation} for which the invariant $$ K_0=-\partial_zF+\frac{1}{2}X_F(\partial_{z'}F)-\frac{1}{4}(\partial_{z'}F)^2, $$ vanishes, where $X_F=\partial_t+z'\partial_z+F\partial_{z'}$ is the total derivative. \end{theorem} The invariant $K_0$ is sometimes called the Jacobi endomorphism \cite{CMS} and it also appears in \cite{G} where is denoted $T$. It should be stressed that it is not a point invariant of the equation. It is invariant with respect to contact transformations which preserve the independent variable $t$ (see \cite{JK} for the general theory of such transformations). The class of transformations is strictly more rigid than the class of point transformations. Actually, one can also allow the M\"obius transformations of $t$ (i.e. transformations of the form $t\mapsto \frac{at+b}{ct+d}$ where $a,b,c,d\in\mathbb{R}$ are constant and satisfy $ad-bc\neq 0$) and $K_0$ remains invariant. The M\"obius transformations of the independent variable correspond to the transformations of the projective parameter which parametrises the corresponding Veronese web. Summarising, Theorem \ref{thmK} together with Theorem \ref{thm1} give the following characterisation of torsion-free connections with skew-symmetric Ricci tensor in terms of invariants of ODEs. \begin{corollary} There is a one-to-one correspondence between torsion-free connections with skew-symmetric Ricci tensor and second order ODEs satisfying $K_0=0$ and given modulo time-preserving contact transformations and M\"obius transformations of the independent variable. \end{corollary} Equation \eqref{eq4} is dual to equation \eqref{eq3} in the sense of the Cartan duality for second order ODEs. To be more precise, the class of point equivalent equations defined by \eqref{eq4} is dual to the class of point equivalent equations defined by \eqref{eq3}. Equation \eqref{eq4} is an equation on the twistor space, i.e. both $t$ and $z$ can be considered as coordinates on the twistor space. Additionally $t$ is exactly the parameter which defines the fibration over $\mathbb{R} P^1$. The Veronese web for equation \eqref{eq4} is defined on the space of its solutions which is the $(x,y)$-space for equation \eqref{eq3}. Conversely, the twistor space is the solutions space for equation \eqref{eq3}. Theorem \ref{thmK} together with Theorem \ref{thm2} give the following result, which, in a sense, is dual to the result of \cite{R}. \begin{corollary} A second order ODE is point equivalent to the derivative of a first order ODE if and only if its dual equation is point equivalent to an equation for which $K_0=0$. \end{corollary} \vskip 2ex {\bf Acknowledgements.} I wish to thank Maciej Dunajski for useful discussions. The work has been partially supported by the Polish National Science Centre grant ST1/03902. \end{document}
\begin{document} \title{Using dark states to charge and stabilize open quantum batteries} \author{James Q. Quach} \email{[email protected]} \affiliation{Institute for Photonics and Advanced Sensing and School of Chemistry and Physics, The University of Adelaide, South Australia 5005, Australia} \author{William J. Munro} \affiliation{NTT Basic Research Laboratories \& NTT Research Center for Theoretical Quantum Physics, NTT Corporation, 3-1 Morinosato-Wakamiya, Atsugi-shi, Kanagawa 243-0198, Japan} \affiliation{National Institute of Informatics, 2-1-2 Hitotsubashi, Chiyoda-ku, Tokyo 101-8430, Japan} \begin{abstract} We introduce an open quantum battery protocol using dark states to achieve both superextensive capacity and power density, with non-interacting spins coupled to a reservoir. Further, our power density actually scales with the of number of spins $N$ in the battery. We show that the enhanced capacity and power is correlated with entanglement. Whilst connected to the charger, the charged state of the battery is a steady state, stabilized through quantum interference in the open system. \end{abstract} \maketitle \section{Introduction} \label{sec:Intro} The recent interest in quantum technologies is driven by the potential power of quantum mechanics~\cite{nielsen02,dowling03,spiller05}, and the push towards technological miniaturization Harnessing the unique properties of quantum mechanics, such as entanglement and superposition, promises to open new vistas in computing, sensing, cryptography, and other quantum technologies~\cite{feynman82,caves82,bennett84,deutsch85,lloyd96,bennet96,vanenk98,shor99,gisin02,giovannetti04,gisin07,ladd10}. The increasing rate of technology miniaturization, in particular electronics, has meant that we need to account for quantum effects. This has driven the relatively new field of quantum thermodynamics, which tries to understand thermodynamic concepts such as work, heat, and entropy in a quantum context~\cite{kosloff13,pekola15,vinjanampathy16,goold16,anders17,lostaglio19,binder19,mitchison19}. Quantum batteries (QBs) aim to harness the unique properties of quantum thermodynamics to build batteries that are fundamentally different from conventional batteries~\cite{alicki13,hovhannisyan13,skrzypczyk14,binder15}. Typically, QBs were modeled as a collection of $N$ identical quantum subsystems to which an external field, which acted as the energy source, was applied~\cite{alicki13}. Alicki and Fannes~\cite{alicki13} sought to understand whether entanglement could enhance the amount of extractable work in this model. Under closed unitary evolution, they showed that one can extract more work with entanglement than without. Further work revealed that it may be possible to reduce the amount of entanglement without detrimentally affecting the maximal work extraction, with the caveat that with reduced entanglement one requires more operations~\cite{hovhannisyan13}. This then lead to the notion that entanglement boosted the charging rate of QBs, as it reduced that number of traversed states in the Hilbert space between the initial and final separable states~\cite{hovhannisyan13}. This conjecture was supported by Binder \textit{et al.}~\cite{binder15}, who showed that entangled spins can superextensively charge $N$ time faster than non-interacting spins, where $N$ is the number of spins. The main finding was that using global entangling operators, where all spins can interact with each other, can result in a speed-up of the charging power as compared to charging them individually. Further work argued that \textit{N} power scaling is the theoretical upper bound of the quantum advantage, constrained by quantum speed limits~\cite{campaioli17} All these studies assumed global operators, which in practice is difficult to implement. Ferraro \textit{et al.}~\cite{ferraro18} overcame this problem by showing that, by locally coupling all of the spins coherently to the same quantum energy source in a photonic cavity, one can realize effective long-range interactions amongst all the spins. Known as the Dicke QB, after the Hamiltonian that describes it, they showed that the time taken to reach the maximum stored energy in the spin ensemble reduced as the ensemble got larger, such that the charging power scaled with $\sqrt{N}$ for large $N$. This increased the potential for QBs to be physically realized. However, recent work has shown that entanglement does not underlie the charging speedup in the Dicke QB, instead it is the result of an enhanced effective cavity coupling strength, which arises out of coherent cooperative interactions~\cite{zhang18}. Recently, QBs have been considered in an open system context~\cite{farina19,liu19}. This is important as QB must interact with its environment for the device to ever be practical. In particular, protocols are needed to stabilize the charged state of the QB in an open system. A recent attempt proposed the continual measurement of the system for stabilization~\cite{gherardini19}. However, this protocol requires continuous access to the battery, and the measurement process itself is costly, consuming energy. Here we use dark states to achieve both superextensive capacity and power, that scales with $N$, with only local interactions, in an open system. We will show that the superextensive behavior of the system is correlated with entanglement. Furthermore, the stored energy of the battery is stable without the need to continually access the battery. \section{Model} \label{sec:Model} In general, the QB charging protocol consists of a battery and an energy source or charger. Switching on (off) the coupling between the battery and charger initiates the charging (discharging) process. We consider a QB in an open system, modeled as an ensemble of $N_B$ $\frac12$-spins with transition energy $\hbar\omega$, in a thermal reservoir Fig.~\ref{fig:1}. Initially, the QB is in thermal equilibrium with the reservoir. The charger is another ensemble of $N_C$ $\frac12$-spins, but in the excited (up) state. We will assume $N_C\ge N_B$. The charging process is initiated by bringing the charger into the reservoir. \begin{figure} \caption{\textit{Model.} \label{fig:1} \end{figure} The Hamiltonian of our model is \begin{equation} \begin{split} H &= \omega(J_B^z+J_C^z)+\int d^dkE_\mathbf{k}r_\mathbf{k}^\dagger\\ &+\frac{g}{2}[(J_B^++J_C^+)R+(J_B^-+J_C^-)R^\dagger] \end{split} \label{eq:H} \end{equation} where $J^{x,y,z}_i$ are the usual collective spin operators on ensemble $i$, with the collective raising and lowering operators defined as $J^\pm_i=J^x_i\pm iJ^y_i$. The first of term of the Hamiltonian represents the battery and charger. The second term represents the reservoir with $d$ spatial dimension and wave vectors $\mathbf{k}=(k_1,\cdots,k_d)$. $E_\mathbf{k}$ is the linear dispersion relation with $r_\mathbf{k}(r_\mathbf{k}^\dagger)$ the annihilation (creation) operator satisfying the commutation relation $[r_\mathbf{k},r_\mathbf{k}^\dagger]=\delta(\mathbf{k}-\mathbf{k}')$. The third term is the interaction between this reservoir and the spins with coupling strength $g$, where $R=\int d^dk\kappa_\mathbf{k}r_\mathbf{k}$ with $\kappa_\mathbf{k}$ being a continuous function of $\mathbf{k}$ whose exact form depends on the system under consideration. Under the assumption that the spins and reservoir were initially uncorrelated, one can characterize the reservoir with the density matrix $\rho_R=\exp(-H_R/k_BT)/\text{Tr}_R[\exp(-H_R/k_BT)]$ where $H_R=\int d^dkE_\mathbf{k}r_\mathbf{k}^\dagger$. With the Born-Markov approximation, the Lindblad master equation during charging is~\cite{hama18}, \begin{equation} \begin{split} \dot{\rho}(t)&=-i\frac{\omega}{\hbar}[J_C^z+J_B^z,\rho(t)]\\ &\quad+\frac{\gamma}{\hbar^2}\Big[(\bar{n}+1)\mathcal{L}(J_C^-+J_B^-)+\bar{n}\mathcal{L}(J_C^++J_B^+))\Big]~, \end{split} \label{eq:me} \end{equation} where $\mathcal{L}(O)\equiv2O\rho O^\dagger-O^\dagger O\rho-\rho O^\dagger O$ is the Lindblad superoperator. The damping rate $\gamma$ is a function of $g$ and $|\kappa_\mathbf{k}|^2$, and $\bar{n}=1/(e^{\hbar\omega/k_BT}-1)$ is the mean thermal population. Importantly, even though we have a non-interacting spin model, the Lindblad operator gives rise to terms that affects a global spin entangling operator. \section{Energy transfer and stabilization mechanism} \label{sec:Energy transfer and stabilisation mechanism} Naively, one may expect all energy to be loss to the reservoir at zero temperature. However, quantum interference can lead to steady states that are not the ground state. Consider the two spin case at $T=0$, which at initial time is $\ket{\psi_0}=\ket{\frac12}_C\ket{{-\frac12}}_B$. This can be expressed as \begin{equation} \rho_0 =\frac12(\ket{\psi_+}\bra{\psi_+}+\ket{\psi_+}\bra{\psi_-}+\ket{\psi_-}\bra{\psi_+}+\ket{\psi_-}\bra{\psi_-})~, \end{equation} where $\ket{\psi_\pm}\equiv(\ket{\frac12}_C\ket{{-\frac12}}_B\pm\ket{{-\frac12}}_C\ket{\frac12}_B)/\sqrt{2}$~. The anti-symmetric component does not couple to the reservoir, since $\mathcal{L}(J_C^-+J_B^-)=0$ for $\ket{\psi_-}\bra{\psi_-}$, and therefore does not decay. Such states are known as dark or subradiant states~\cite{freedhoff67,stroud72}. The other components decay to the ground state leading to a steady state of the form \begin{equation} \rho^\text{ss}=\frac{1}{2}\ket{\psi_\downarrow}\bra{\psi_\downarrow}+\frac{1}{2}\ket{\psi_-}\bra{\psi_-}~, \label{eq:rho_ss} \end{equation} where $\ket{\psi_\downarrow}\equiv\ket{{-\frac12}}_C\ket{{-\frac12}}_B$ (see Appendix for a formal derivation). In this steady state the spin angular momenta of the charger and battery are \begin{equation} \Exp{J_C^z}=-\frac{\hbar}{4}~,\quad\Exp{J_B^z}=-\frac{\hbar}{4}~, \end{equation} where $\Exp{J_i^z}=\mathrm{Tr}(\rho_iJ_i^z)$. We immediately observe that $\hbar\omega/4$ units of energy has been transferred from the charger to the battery, since initially $\Exp{J_B^z}=-\hbar/2$. One notes that this transfer of energy cannot be viewed (semi-) classically as a transfer of energy due to the emission of a photon by the charger followed by the absorption of that photon by the battery. Instead, this is a purely quantum mechanical effect which arises out of the the collective behavior of the battery, charger, and reservoir. As the steady state is decoupled from the environment, the stored energy of the battery is stable whilst the charger is present, even in the open system. This is the basis of how energy is transferred and stably stored in our open system protocol. In general, for this effect to take place the initial combined battery and charger states should overlap with a dark state(s). This condition is trivially satisfied when the charger state is initially excited and the battery is in its ground state. One notes that dark states have been proposed to stabilize energy storage in a single three-level system~\cite{santos19}; what we are proposing here is very different, involving the collective effect of multiple two-level systems. \begin{figure*} \caption{\textit{Superextensive capacity and power density.} \label{fig:2} \end{figure*} \section{Superextensive capacity and charging} \label{sec:Superextensive capacity} \textit{Superextensive capacity.} The energy density of the charger and battery are \begin{equation} \mathcal{E}_i(t)=\omega\Big(\frac{\Exp{J^z_i(t)}}{N_i}+\frac12\Big)~, \end{equation} with $i=B,C$. The capacity of the battery is defined as the energy in the steady state, \begin{equation} E_{R,N_B} \equiv N_B\mathcal{E}_B^\mathrm{ss}~, \end{equation} where $\mathcal{E}^\text{ss}_B$ is the steady state energy density with $R\equiv N_C/N_B$ being the ratio of the number of spins in the charger to the battery. We have shown for the case where $N_C=N_B=1$, that the steady state energy of the battery is $\hbar\omega/4$. If we had $M$ of these systems isolated from each other, the energy density would not change, so that the total capacity would be $E_{1,M}=M\mathcal{E}_B^{ss}=M\hbar\omega/4$. However, we can improve on this by charging the batteries collectively. As a example, let us consider the case with $R=5$ during charging. Solving the master equation [Eq.~(\ref{eq:me})] at zero temperature, we plot $\mathcal{E}_i(t)$ for $N_B=1,2,3$ in Fig.~2(a). Firstly, the plots show that $\mathcal{E}_C(t)$ monotonically decreases as $\mathcal{E}_{B}(t)$ correspondingly increases, indicating a transfer of energy from charger to the battery. Secondly, $\mathcal{E}^\text{ss}_B$ increases with $N_B$. This is shown in Fig.~\ref{fig:2}(b) where we plot $\mathcal{E}^\text{ss}_B(N_B)$ for $R=2,5,10$. As $\mathcal{E}^\text{ss}_B(N_B)$ increases monotonically, the capacity of the battery scales superextenstively. With increasing $R$, the scaling of $\mathcal{E}^\text{ss}_B$ with $N_B$ decreases, \textit{i.e.} the plot tends to flatten out, even for small $N_B$. This indicates a decrease in the superextensive capacity of the battery with $R$. Fig.~\ref{fig:2}(c) plots $\mathcal{E}^\text{ss}_B(R)$ for $N_B=1,2,3$. In the thermodynamic limits, $\Lim{R \rightarrow \infty} \mathcal{E}^\text{ss} = \Lim{N_B \rightarrow \infty} \mathcal{E}^\text{ss} = \hbar\omega/2~,\forall R>1$~. The superextensive scaling of $\mathcal{E}^\mathrm{ss}$ means that the capacity of one battery with $M$ spins is greater than $M$ batteries with one spin, \textit{i.e.} $E_{R,M}>ME_{R,1}~,~\forall M>1$. This improves upon the Dicke QB, where the capacity does not in general superextensively scale with the number of spins~\cite{ferraro18}. \textit{Ergotropy.} One notes that not all stored energy may be extractable as work. In an open system, the thermal state energy ($\mathcal{E}_B^\text{th}$) represents a natural limit on extractable work as \begin{equation} \mathcal{W}^\text{open}=\mathcal{E}_B-\mathcal{E}_B^\text{th}~. \end{equation} For zero temperature $\mathcal{E}_B^\text{th}=0$, and so $\mathcal{W}^\text{open}=\mathcal{E}_B$. Another class of extractable work occurs under unitary evolution of the battery, and is known as \textit{ergotropy}. The ergotropy of a system is the maximal amount of work that can be extracted acting cyclically under thermal isolation. This is an important measure, as not all the energy stored in a system can be unitarily extracted as work. The ergotropy density is given by~\cite{allahverdyan04} \begin{equation} \mathcal{W}^\text{closed}=\mathcal{E}_B-\min_{U_B}\mathcal{E}_B~, \end{equation} where the second term is the minimum battery energy under all possible unitary evolution of the battery $U_B$. The $\min_{U_B}\mathcal{E}_B =\omega\min_{U_B} \text{Tr}(J^z_BU_B\rho_BU^\dagger_B)/N_B$ term can be found by ordering the eigenvalues of $J_B^z/N_B$ in increasing order ($\epsilon_1<\epsilon_2<\cdots<\epsilon_n$), and the eigenvalues of $\rho_B$ in decreasing order ($r_1<r_2<\cdots<r_n$). From this we get that~\cite{allahverdyan04} \begin{equation} \min_{U_B}\mathcal{E}_B =\omega\sum_ir_i\epsilon_i~. \end{equation} It is conjectured that $\mathcal{W}^\text{closed}\rightarrow\mathcal{E}_B$ in the large $N_B$ limit~\cite{andolina19}. This is a particular useful conjecture as this would mean that in principle nearly all the stored energy could be extracted as work, in most practical applications. Our system is indeed consistent with this conjecture. In addition, we find that $\mathcal{W}^\text{closed}\rightarrow\mathcal{E}_B$ in the large $R$ limit also. We plot in Fig.~\ref{fig:A1}(a) the ergotropy for $R=5$ for various $N_B$. For $N_B=1$ the ergotropy is zero until the stored energy $\mathcal{E}_B>\frac12$ (or $\Exp{J_B^z}>0$). As $N_B$ increases, the ergotropy approaches the stored energy. Fig.~\ref{fig:A1}(b) plots the ergotropy for $N_B=1$ various $R$. As $R$ increases, the ergotropy approaches the stored energy. The figures shows that work can only be extracted in a cyclic manner when there is a net positive spin angular moment, $\Exp{J_B^z}>0$. \begin{figure} \caption{Ergotropy (solid line) and stored energy (dotted line) for (a) various $N_B$ and $R=5$, (b) $R$ and $N_B=1$. For $N_B=1$ the ergotropy is zero until $\mathcal{E} \label{fig:A1} \end{figure} \label{sec:Superextensive charging} \textit{Superextensive charging.} The power density of the battery is given by \begin{equation} \mathcal{P}_B(t) = \frac{d\mathcal{E}_B(t)}{dt}~, \end{equation} which we plot in Fig.~\ref{fig:2}(d). The plot shows that maximum power density $\mathcal{P}_B^\text{max}$ increases with $N_B$. This is clearly shown in Fig.~2(e) where we observe that $\mathcal{P}_B^\text{max}(N_B)\propto N_B$. Up until now, charging protocols have required global interactions to achieve $N$ scaling~\cite{hovhannisyan13,binder15,le18}. Protocols with local interactions have not exceeded $\sqrt{N}$ scaling~\cite{ferraro18,le18,zhang18}. Here we have shown that one can achieve $N$ power scaling with non-interacting spins coupled to a reservoir. Fig.~\ref{fig:2}(f) shows that $\mathcal{P}_B^\text{max}(R)$ also scales with $R$. As the battery superextensively charges, if one were to simply disconnect the charger, it would also superextensively discharge as well. The reason for this is that the coherent spins would superradiantly decay~\cite{dicke54}. However, if a slow discharge is desired, we propose an intermediately process of dephasing to destroy spin coherence, before disconnecting the charger. This could be achieved with a dephasing pulse, for example. With no coherence, the battery would discharge at the single-spin relaxation rate. \section{Entanglement} \label{sec:Entanglement} The role of entanglement has been studied in closed unitary QB systems~\cite{alicki13,hovhannisyan13,binder15,le18,zhang18}. Here we systematically investigate the role of entanglement in our open QB protocol. For mixed systems, the logarithmic negativity~\cite{vidal02,plenio05} provides a convenient measure of entanglement. It is defined using the trace norm as \begin{equation} S_B(t) = \log_2\|\rho^{\Gamma_B}(t)\|~, \end{equation} where $\Gamma_B$ denotes the partial transpose with respect to subsystem $B$. We plot $S_B(t)$ in Fig.~\ref{fig:3}(a), with the same parameters as Fig.~\ref{fig:2}(a). A comparison of these two plots shows higher entanglement to correspond to higher energy, supporting the idea that entanglement drives the superextensive capacity of the battery. Their relationship is shown in Fig.~\ref{fig:3}(b), where we plot $\mathcal{E}_B(t)$ and $\mathcal{S}_B(t)$ parameterized over $t$. In Fig.~\ref{fig:3}(c), we plot $\mathcal{S}_B^\mathrm{ss}(N_B)$, showing that steady state entanglement scales positively with $N_B$. In Fig.~\ref{fig:3}(d) we plot $\mathcal{E}_B^\mathrm{ss}(N_B)$ and $\mathcal{S}_B^\mathrm{ss}(N_B)$ parameterized over $N_B$, showing the positive correlation between the battery capacity and entanglement. Revealingly, Fig.~\ref{fig:3}(d) shows that entanglement decreases with increasing $R$ (for a given $\mathcal{E}^\text{ss}_B$), inline with the decreased superextensive scaling of $\mathcal{S}_B^\mathrm{ss}$ in Fig.~\ref{fig:2}(b). In other words, as $R$ increases we have less entanglement to drive the system, and hence the ability of the battery capacity to superextensively increase, diminishes. \begin{figure} \caption{\textit{Entanglement and capacity.} \label{fig:3} \end{figure} If energy correlates with entanglement, then it follows that power should correlate with entanglement rate. In Fig.~\ref{fig:4}(a) and (b) we plot $\mathcal{P}_B(t)$ and $\dot{S}_B(t)$ for $N_B=1,2,3$ at $R=50$. Periods of non-zero $\mathcal{P}_B(t)$ corresponds to periods of non-zero $\dot{S}_B(t)$. In Fig.~\ref{fig:4}(c) we plot the local maximum entanglement rate $\dot{S}_B^\mathrm{max}$, for various $R$ (when there are more than one local maxima, such is the case for $N_B=1$, we choose the largest value). The plot shows that $\dot{S}_B^\mathrm{max}$ linearly scales with $N_B$. As $\mathcal{P}^\mathrm{max}$ also linearly scales with $N_B$, $\dot{S}_B^\mathrm{max}$ and $\mathcal{P}^\mathrm{max}$ are positively correlated. Interestingly, $\mathcal{P}^\mathrm{max}$ and $\dot{S}_B^\mathrm{max}$ do not occur at the same time: $\dot{S}_B^\mathrm{max}$ lags $\mathcal{P}^\mathrm{max}$ by $\gamma\Delta t$. Fig.~\ref{fig:4}(d) plots this lag time; it shows that the lag time decreases with increasing $N_B$ or $R$. In the large $N_B$ or $R$ limit, the lag time vanishes. \begin{figure} \caption{\textit{Entanglement rate and power density.} \label{fig:4} \end{figure} Another important feature revealed by the plots is that $\dot{S}_B^\mathrm{max}$ increases with $R$, whilst $S^\mathrm{ss}_B$ decreases. This correlates with the observation that $\mathcal{P}^\mathrm{max}_B$ superextensively increase with $R$ [Fig.~\ref{fig:2}(d)], whilst the superextensivity of $\mathcal{E}^\mathrm{ss}_B$ diminishes with $R$ [Fig.~\ref{fig:2}(b)]. These correlations provide further evidence that entanglement underpins the superextensive properties of the battery. In unitary systems with global interaction, it has been shown that entangled states reduce the number of operations required to reach a passive state, thereby increasing power~\cite{hovhannisyan13,binder15}; the rate at which entangled states are generated does not seem to play a part. Here we show something different. In our non-unitary system with local interactions, we show that for a given $R$, energy is correlated with the level of entanglement, and power is related to the rate at which this entanglement is generated. This suggests a different mechanism for driving superextensive behavior with entanglement in our protocol. \section{Temperature.} \label{sec:Temperature} The effects of of thermal fluctuations on the battery provides a rich area of investigation; here we show some interesting properties. Let us begin by considering two spins at non-zero temperature. From Eq.~(\ref{eq:rho_ss_T})we can determine the spin expectation values of the charger and battery for non-zero temperature, \begin{equation} \Exp{J_C^z}=\Exp{J_B^z}=-\frac{2\bar{n}+1}{12\bar{n}(\bar{n}+1)+4}\hbar~. \label{eq:energyTemp} \end{equation} At high temperature $\Lim{T \rightarrow \infty} \Exp{J_i^z} = 0~$, meaning thermal fluctuations dominate so that spins are equally as likely to found in the spin-up as spin-down state. At low temperature the battery obtains its energy primarily from the charger, but as the temperature increases the energy source shifts from the charger to the reservoir. This behavior is generalized to various $R$ as shown in Fig.~\ref{fig:5}, where we we plot $\mathcal{E}_i(t)$ for increasing $T$. Fig.~\ref{fig:5}(a) shows that as the temperature increases, less energy is transferred from the charger to the battery. In Fig.~\ref{fig:5}(b) we plot $\mathcal{E}^\mathrm{SS}_B(T)$. It shows that all states converge to $\Lim{T \rightarrow \infty} \mathcal{E}_B^\mathrm{SS} = \frac12~$, as the system thermalizes. For states where $\mathcal{E}^\mathrm{SS}_B<\frac12$ at $T=0$, thermal fluctuations increases the battery capacity. Conversely for states where $\mathcal{E}^\mathrm{SS}_B>\frac12$ at $T=0$, thermal fluctuations decreases battery capacity. However there is a trade-off between the infusion of energy from the reservoir, and the destruction of dark states caused by thermal fluctuations. As shown in Fig.~\ref{fig:A1}, for $R=1$, $\mathcal{E}_B^\mathrm{SS}$ increases with temperature, as the infusion of energy from the reservoir more than compensates for the loss of energy from the destruction of dark states. Conversely, for $R>3$, $\mathcal{E}_B^\mathrm{SS}$ decreases with temperature, with the greatest decline occurring at low temperature, as the infusion of energy from the reservoir cannot compensate for the destruction of dark states. $R=2$ is an interesting intermediary case, as $\mathcal{E}_B^\mathrm{SS}$ can both increase or decrease, depending on the temperature. As previously mentioned, a non-zero temperature lowers the upper bound on extractable work. This is reflected in Fig.~\ref{fig:5}(d) which shows $\mathcal{W}_B^\text{SS}\rightarrow0$ as $\mathcal{E}_B^\text{SS}\rightarrow\frac12$, since one would not expect there to be any extractable work under unitary transformations as the system thermalizes. \begin{figure} \caption{\textit{Charger and battery performance at non-zero temperature.} \label{fig:5} \end{figure} \section{Implementation and applications} \label{sec:Implementation} Our protocol can be implemented with atomic or artificial two-level systems, including superconducting qubits, semiconductor quantum dots, ultracold atoms, trapped ions, and nitrogen-vacancy (NV) centers. We propose that experimental verification should be conducted in two regimes. Our protocol should be investigated deep in the quantum regime with few spins and at low temperature, but with a high level of control and measurement. As such, superconducting qubits coupled to a broad band resonator, which acts as the reservoir, would be suitable~\cite{gu17}. However, this platform typically is limited to few qubits. Although QB capacity on small energy scales may find application in quantum technologies, verifying the ability to scale up capacity is important for wider adoption. Therefore, we propose that the protocol should also be investigated in the semi-classical regime with many spins and high temperature. NV centers coupled to a broad band resonator, would be a suitable platform to achieve this. Large coherent ensembles of NV-center spins ($>10^{16}$) coupled to superconducting circuits have been used to demonstrate the collective behavior of superradiance~\cite{angerer18}, and the coherent coupling between two macroscopically separated spin ensembles has also been realized~\cite{astner17}. Because QBs utilize quantum properties, they should find applications in other quantum technologies, such as quantum computing, communication, sensing. As these technologies are underpinned by the quantum storage and transfer of energy, the applications of QB devices or principles to these technologies has the potential to improve their functionality, possibly opening new fields of investigation. For example, superextensive charging may increase quantum computation power, enhance quantum capacitor capabilities, and QB principles could advance quantum sensing devices. Whether QBs can replace conventional batteries is ultimately a question of scalability. Nevertheless, QB devices and principles will need to find novel ways to interface with conventional technologies. An example of how quantum technology can find novel application in classical devices, is provided by the quantum dot solar cell. Here the tunable band gap of quantum dots replaces the fixed band gap of conventional bulk materials such as silicon, copper indium gallium selenide (CIGS) or cadmium telluride (CdTe). QB principles have significant potential to find applications in solar cells, as its superextensive charging property may be utilized to superabsorb light. \section{Conclusion} \label{sec:Conclusion} Our protocol is major step towards the experimental realization of a QB that achieves superextensive capacity and charging: it uses only local interactions, and is intrinsically stable in an open system - two critical features for practical applications. This rich protocol opens the way for further theoretical investigation, including a deeper understanding of the correlation between entanglement rate and power. \section*{Acknowledgments} \label{sec:Acknowledgements} We thank Jared Cole, Sergi Julia-Farre, Andreas Angerer, Johannes Majer and Kae Nemoto for valuable discussions. JQQ acknowledges the Ramsay fellowship and the Centre for Nanoscale BioPhotonics Family Friendly Fund, for financial support of this work. WJM acknowledges partial support for this work from a Japanese MEXT Grant-in-Aid for Scientific Research(A) KAKENHI Grant No. 19H00662 and the MEXT Quantum Leap Flagship Program (MEXT Q-LEAP) Grant No. JP- MXS0118069605. \section*{Appendix: Derivation of the steady state of two spins in a thermal reservoir} \label{sec:Derivation of the steady state of two spins in a thermal reservoir} Here we derive the steady state of two spins in a thermal reservoir, which gives Eq.~(3) in the main text. We begin by defining the following spin basis: \begin{align} \ket{\psi_\uparrow}&\equiv \ket{\frac12}_C\ket{\frac12}_A \equiv \ket{1}\\ \ket{\psi_+}&\equiv\frac{\ket{\frac12}_C\ket{{-\frac12}}_A+\ket{{-\frac12}}_C\ket{\frac12}_A}{\sqrt{2}}\equiv \ket{2}\\ \ket{\psi_-}&\equiv\frac{\ket{\frac12}_C\ket{{-\frac12}}_A-\ket{{-\frac12}}_C\ket{\frac12}_A}{\sqrt{2}}\equiv \ket{3}\\ \ket{\psi_\downarrow}&\equiv \ket{{-\frac12}}_C\ket{{-\frac12}}_A\equiv \ket{4} \end{align} From the Linblad master equation, we write down the equations of motion for the elements of the Hermitian density matrix in the spin basis defined above [$\rho_{ij}\equiv\Exp{i|\rho|j}$]: \begin{align} \dot{\rho}_{11}&=-2\gamma(\bar{n}+1)\rho_{11}+2\gamma \bar{n}\rho_{22}\\ \dot{\rho}_{22}&=2\gamma(\bar{n}+1)\rho_{11}-2\gamma (2\bar{n}+1)\rho_{22}+2\gamma \bar{n}\rho_{44}\\ \dot{\rho}_{33}&=0\\ \dot{\rho}_{44}&=2\gamma(\bar{n}+1)\rho_{22}-2\gamma \bar{n}\rho_{44}\\ \dot{\rho}_{12}&=-[\gamma(3\bar{n}+2)-i\omega]\rho_{12}\\ \dot{\rho}_{13}&=-(\gamma \bar{n}-i\omega)\rho_{13}\\ \dot{\rho}_{14}&=-[\gamma(2\bar{n}+1)-i2\omega]\rho_{14}\\ \dot{\rho}_{23}&=-(\gamma(2\bar{n}+1)\rho_{23}\\ \dot{\rho}_{24}&=-[\gamma(3\bar{n}+1)-i\omega]\rho_{24}\\ \dot{\rho}_{34}&=-(\gamma \bar{n}-i\omega)\rho_{34} \end{align} Solving these equations one finds that in the steady state, the off-diagonal terms vanish, leaving only the diagonal terms given by: \begin{align} \rho_{11}^\mathrm{ss}&=\frac{\bar{n}^2[1-\rho_{33}(0)]}{1+3\bar{n}(\bar{n}+1)}\label{eq:rho11}\\ \rho_{22}^\mathrm{ss}&=\frac{\bar{n}(\bar{n}+1)[1-\rho_{33}(0)]}{1+3\bar{n}(\bar{n}+1)}\label{eq:rho22}\\ \rho_{33}^\mathrm{ss}&=\rho_{33}(0)\label{eq:rho33}\\ \rho_{44}^\mathrm{ss}&=\frac{(\bar{n}+1)^2[1-\rho_{33}(0)]}{1+3\bar{n}(\bar{n}+1)}\label{eq:rho44} \end{align} The initial state in the spin basis has non-zero elements: $\rho_{22}(0)=\rho_{23}(0)=\rho_{32}(0)=\rho_{33}(0)=1/2$~. It is then straightforward to show the steady state density matrix has the form \begin{equation} \begin{split} \rho^\text{ss}&=\frac12[\bar{n}^2\ket{\psi_\uparrow}\bra{\psi_\uparrow}+\bar{n}(\bar{n}+1)\ket{\psi_+}\bra{\psi_+}\\ &\quad+\ket{\psi_-}\bra{\psi_-}+(\bar{n}+1)^2\ket{\psi_\downarrow}\bra{\psi_\downarrow}]\\ &\quad/[1+3\bar{n}(\bar{n}+1)]~. \end{split} \label{eq:rho_ss_T} \end{equation} Eq.~(3) of the main text is obtained by setting $\bar{n}=0$. From the density matrix one can get the spin expectation values through $\Exp{J_i^z}=\mathrm{Tr}(\rho_iJ_i^z)$. \end{document}
\begin{document} \title{The Lindel{\"o}f Hypothesis for almost all Hurwitz's Zeta-Functions holds true } \author{Masumi Nakajima \ \\ \it Department of Economics \ \\ \it International University of Kagoshima \ \\ \it Kagoshima 891-0191, JAPAN \\ e-mail: [email protected] } \maketitle \begin{abstract} By probability theory we prove here that the Lindel{\"o}f hypothesis holds for almost all Hurwitz's zeta-functions, i.e. \\ $ \qquad \zeta({1\over2} + it,\omega)={\rm o}_{\omega,\epsilon}\{(\log t)^{{3\over2} + \epsilon}\} $ \\ for almost everywhere $ 0< \omega <1,$ and for any small $ \epsilon >0,$ where $ {\rm o}_{\omega,\epsilon} $ denotes the Landau small o-symbol which depends on $\omega$ and $\epsilon$ and $\zeta(s,\omega)$ denotes the Hurwitz zeta-function. The details will be given elsewhere.\\ Key words ; The Riemann zeta function, the Hurwitz zeta function, the Lindel{\"o}f hypothesis, law of large numbers, law of the iterated logarithm. Mathematics Subject Classification ; \\ 11M06, 11M26, 11M35, 60F15. \end{abstract} Let $ \zeta(s,\omega)$ be the Hurwitz zeta function which is meromorphically extended to the whole complex plane from the Dirichlet series \[ \sum_{n=0}^{\infty} (n+{\omega})^{-s} \quad (s=\sigma + it,\sigma=\Re s >1, \ 0<\omega \leq 1). \] We should note that \[ \zeta(s,1)=\zeta(s),\] \[ \zeta(s,{1\over2} )=({2^s}-1)\zeta(s), \] where $\zeta(s)$ denotes the Riemann zeta function.\\ \quad In analytic number theory, there are three famous conjectures which are related each other as follows.\\ {\bf The Riemann Hypothesis }(1859, by B.Riemann):\\ $ \rho \notin {\bf R}, \ \zeta(\rho)=0 \Rightarrow \Re \rho ={1\over2} $ \\ {\bf The Lindel{\"o}f Hypothesis }(1908, by E.Lindel{\"o}f):\\ $ \zeta({1\over2}+ it)={\rm O}_{\epsilon}(t^{\epsilon}) \ for \ any \ small \ {\epsilon}>0,$ \\ where ${\rm O}_{\epsilon}$ denotes the Bachmann-Landau large O-symbol which depends on $\epsilon$. \\ {\bf The Density Hypothesis }: \\ \[ N(\sigma,T)={\rm O}_{\epsilon}(T^{2-2\sigma + \epsilon}) \] \[ for \ any \ small \ \epsilon>0 \ and \ {1\over2}\leq \sigma \leq 1,\] \\ where $ N(\sigma,T) $ denotes the number of zeros of $\zeta(s)$ in the rectangle whose four vertices are $ \sigma ,1,1+iT $ and $ \sigma +iT $. \\ It is well known that \\ $the \ Riemann \ Hypothesis \Rightarrow the \ Lindel \ddot{o}f \ Hypothesis $ \\ $ \Rightarrow the \ Density \ Hypothesis.$ \\ (It is not known whether the Lindel{\"o}f Hypothesis implies the Riemann Hypothesis or not.)\\ And also as it is well known, the Riemann Hypothesis is the most important and the strongest conjecture that has serious influences on many branches of mathematics including number theory. But it is less known that in fact the Lindel{\"o}f Hypothesis has almost the same effects on number theory as the Riemann Hypothesis ~\cite{A1976}~\cite{I1985}~\cite{T1951}. About the Lindel{\"o}f Hypothesis there are many studies which improve the power $L$ of $t$ in $\zeta({1\over2}+ it)={\rm O}(t^L)$. These studies in this direction have their long history and story. The recent results in this direction are due to G.Kolesnik, E.Bombieri, H.Iwaniec, M.N.Huxley, N.Watt and others, for example, $\zeta({1\over2}+ it)={\rm O}(t^{9/56})$ due to Bombieri and Iwaniec in 1986 and the best up to the present time is $={\rm O}(t^{32/205})$ due to Huxley in 2005. \\ \quad In 1952, Koksma and Lekkerkerker~\cite{K-L1952} proved that \\ \[ \int_0^1 |\zeta_{1}({1\over2}+it,\omega)|^{2}d\omega={\rm O}(\log t) \] where $\zeta_{1}(s,\omega):=\zeta(s,\omega)-\omega^{-s}$ whose term $-\omega^{-s}$ makes keeping out the singularity at $ \omega=0$. \\ \quad From this mean value results, by using {\v C}eby{\v s}ev's inequality in probability theory, we easily have \[ \mu \{ 0<\omega \leq 1 ; |\zeta({1\over2}+it,\omega)| \geq C \sqrt{\log t} \} \leq {{{\rm O}(1)}\over{C^2}} \] \[ for \ any \ t>1 \ and \ any \ large \ C>0, \] where $\mu \{ B \} $ denotes the Lebesgue measure of measurable set $ B $, which shows that the Lindel{\"o}f Hypothesis holds in the sence of weak law in probability theory. \\ \quad In this short note we give the following strong law version of the Lindel{\"o}f Hypothesis , that is, \\ \newtheorem{theo}{Theorem} \begin{theo} \begin{eqnarray*} \zeta({1\over2}+it,\omega)={\rm o}_{\omega,\epsilon}\{(\log t)^{{3\over2} + \epsilon}\} \end{eqnarray*} for almost everywhere $ \omega \in \Omega:=(0,1) $ and for any small $ \epsilon >0 $. \end{theo} \quad In order to prove this theorem, we need some definitions and some results in probability theory. \\ \quad Let $ (\Omega, { F},{\rm P}) $ be some probability space, $X,Y,Z,\cdots $ be complex valued random variables on this space, $ {\rm E}[X] $ be the expectation value of the random variable $ X $ and $ {\rm V}[X]={\rm E}[|X-{\rm E}[X]|^2] $ be the variance of $ X $. \newtheorem{lem}{Lemma} \begin{lem} Let $ Z $ be a complex valued random variable. If $ {\rm E}[|Z|^2]| < +\infty $, then we have \[ |Z|<+\infty \ almost \ surely \ (abbreivated \ by \ a.s. ),\] \[i.e. \ {\rm P}\{ |Z|<+\infty \}=1. \] \end{lem} {\bf proof.}\ From $|Z| \geq 0$, we have \[ 0 \leq |Z|=|Z(\omega)| < +\infty \ {\rm or} \ |Z|=|Z(\omega)| = +\infty. \] We define the set $A \subset \Omega$ by $A:=\{ \omega ; |Z(\omega)|=+\infty \}$ and the indicator function of the set $A$; \begin{eqnarray*} 1_{A}:=1_{A}(\omega):=\left\{ \begin{array}{ll} 1 & (\omega \in A) \\ 0 & (\omega \notin A). \end{array} \right. \end{eqnarray*} If we assumed that ${\rm P}\{ \omega \in \Omega ; |Z(\omega)| < +\infty \} < 1 $, we would have ${\rm P}\{ A \}>0 $ and \[ {\rm E}[|Z|^2] \geq {\rm E}[|Z|^{2} 1_{A} ]=(+\infty){\rm P}\{ A \}=+\infty, \] which is the contradiction to the assumption $ {\rm E}[|Z|^2]| < +\infty. $ So we have the lemma. \begin{lem} Let $ Z_n $ be a complex valued random variables \ $(n=1,2,3.\cdots)$. If $ \sum_{n=1}^{\infty}{\rm E}[|Z_n|^2] < +\infty $, then we have \\ \[ Z_n \rightarrow 0 \ a.s.\ (as \ n \rightarrow +\infty ).\] \end{lem} {\bf proof.}\ By Lemma 1, we have \[ {\rm P}\{ \sum_{n=1}^{\infty}|Z_n|^2 < \infty \}=1, \] which shows that $|Z_n|^2 \rightarrow 0 \ a.s.\ (as \ n \rightarrow +\infty )$, that is, $Z_n \rightarrow 0 \ a.s.\ (as \ n \rightarrow +\infty )$. \begin{lem} {\rm ({\rm Rademacher-Menchoff's lemma}~\cite{D1953} ~\cite{K1999}) } \\ Let $ a(p) \ (p=1,2,\cdots,2^{n+1}-1) $ be complex numbers and $ a(0):=0 $, then we have \begin{eqnarray*} \lefteqn{ \max_{ 1\leq p <2^{n+1} } |a(p)|^2 } \\ & \leq & (n+1) \sum_{k=0}^{n} \sum_{j=0}^{2^{n-k}-1}|a(2^{k}+j2^{k+1}) - a(j2^{k+1})|^2. \end{eqnarray*} \end{lem} {\bf proof.}\ For the natural number $p$ which satisfy $1 \leq p < 2^{n+1}$, we have its binomial expansion; \[ p=\sum_{j=0}^{n}\epsilon_{j}2^{j} \ (\epsilon_{j}=0\ {\rm or} \ 1). \] With respect to the above $p$, we define $p_{k+1},\ p_{n+1},\ p_{0}$ respectively by \begin{eqnarray*} p_{k+1}:=\sum_{j=k+1}^{n}\epsilon_{j}2^{j} \ (k=0,1,2,\cdots,n-1),\\ p_{n+1}:=0,\ p_0:=p. \end{eqnarray*} From these definitions we have \begin{eqnarray*} p_0=p \geq p_1 \geq p_2 \geq \cdots \geq p_n \geq p_{n+1}=0, \ \ \ \ \ (1) \\ p_k - p_{k+1}=\epsilon_{k}2^k, \quad \quad \quad \\ p_{k+1}=\sum_{j=k+1}^{n}\epsilon_{j}2^{j} =\sum_{j=0}^{n-k-1}\epsilon_{k+1+j}2^{j+k+1} \\ =\sum_{j=0}^{n-k-1}(\epsilon_{k+1+j}2^{j})2^{k+1}=: \delta_{k+1}2^{k+1},\ \ \ (2) \\ 0 \leq \delta_{k+1} \leq \sum_{i=0}^{n-k-1} 2^i = 2^{n-k}-1. \ \ \ (3) \end{eqnarray*} From \[ a(p)=a(p_0)=a(p_0)-a(p_{n+1})=\sum_{k=0}^{n}(a(p_{k})-a(p_{k+1})), \] we have \begin{eqnarray*} |a(p)|^2=|\sum_{k=0}^n 1 \cdot (a(p_k)-a(p_{k+1}))|^2 \\ \leq \sum_{k=0}^n 1 \sum_{k=0}^n |a(p_k)-a(p_{k+1})|^2 \\ =(n+1)\sum_{k=0}^n |a(p_k)-a(p_{k+1})|^2 \\ =(n+1)\sum_{k=0}^n |a(\epsilon_k 2^k + p_{k+1})-a(p_{k+1})|^2 \ \\ \ ({\rm by \ (1)}) \\ =(n+1)\sum_{k=0}^n |a(\epsilon_k 2^k + \delta_{k+1} 2^{k+1})-a(\delta_{k+1} 2^{k+1})|^2 \ \\ \ ({\rm by \ (2)}) \\ \leq (n+1)\sum_{k=0}^n \sum_{j=0}^{2^{n-k}-1} |a(2^k + j 2^{k+1})-a(j 2^{k+1})|^2, \ (4) \\ \end{eqnarray*} because we take the summation with respect to $k$ into account only when $\epsilon_k =1$, and we sum up $j$ in place of $\delta_k$ by (3). By the fact that the right hand side of (4) is independent of $p$, we have the lemma. \newtheorem{de}{Definition} \begin{de} {\rm Let $ X,Y $ be complex valued random variables which satisfy \ $ {\rm E}[|X|^2],\ {\rm E}[|Y|^2]< \infty. $ \\ If $ {\rm E}[\bar{X}Y]={\rm E}[\bar{X}]{\rm E}[Y], $ \ we call $ X,Y $ (pairwise) uncorrelated. } \end{de} \begin{de} {\rm Let $ X,Y $ be complex valued random variables which satisfy \ $ {\rm E}[|X|^2],\ {\rm E}[|Y|^2]< \infty. $ \\ If $ {\rm E}[\bar{X}Y]=0, $ \ we call $ X,Y $ {\rm(}pairwise{\rm)} orthogonal. } \end{de} \begin{lem} {\rm ({\rm Rademacher-Menchoff}~\cite{D1953} ~\cite{K1999}) } \\ Let $ X_1,X_2,\cdots $ be pairwise uncorrelated complex valued random variables which satisfy \[ {\rm E}[X_i]=0, \ \sigma_i^2:={\rm V}[X_i] \ (i=1,2,\cdots), \ \sigma_i \geq 0 \] and let $ S_n:=S_n(\omega):=X_1+X_2+\cdots+X_n. $ \\ Then we have \begin{eqnarray*} \lefteqn{ {\rm E}[\max_{ 2^m < k \leq 2^{m+1} } |S_k-S_{2^m}|^2 ] } \\ & \leq & (m^2 + 1) \sum_{i=1}^{2^m} \sigma_{2^m + i}^2 \ for \ m=0,1,2,\cdots . \end{eqnarray*} \end{lem} {\bf proof.}\ In Lemma 3, we put \[ n=m-1, \ a(p)=X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^m+p}. \] From this, we have \begin{eqnarray*} {\rm E}[\max_{ 2^m < k \leq 2^{m+1} } |S_k-S_{2^m}|^2 ] \\ \leq {\rm E}[\max_{ 1 \leq k <2^{m} } |S_{2^m+k}-S_{2^m}|^2 ] + {\rm E}[|S_{2^{m+1}}-S_{2^m}|^2 ] \\ ={\rm E}[\max_{ 1 \leq p <2^{m} } |X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^m+p}|^2 ] \\ + {\rm E}[|X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^{m+1}}|^2 ] \\ \leq {\rm E}[m\sum_{k=0}^{m-1}\sum_{j=0}^{2^{m-1-k}-1} |(X_{2^m+1}+ X_{2^m+2}+\cdots+X_{2^m+2^k+j2^{k+1}}) \\ -(X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^m+j2^{k+1}})|^2 ] \\ + {\rm E}[|X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^{m+1}}|^2 ] \\ ({\rm by \ Lemma \ 3})\\ =m \sum_{k=0}^{m-1}\sum_{j=0}^{2^{m-1-k}-1} {\rm E}[|X_{2^m+j2^{k+1}+1}+X_{2^m+j2^{k+1}+2}+ \\ \cdots +X_{2^m+j2^{k+1}+2^k}|^2 ] \\ + {\rm E}[|X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^{m+1}}|^2 ] \\ =m \sum_{k=0}^{m-1}\sum_{j=0}^{2^{m-1-k}-1} {\rm V}[X_{2^m+j2^{k+1}+1}+X_{2^m+j2^{k+1}+2}+ \\ \cdots +X_{2^m+j2^{k+1}+2^k} ] \\ + {\rm V}[X_{2^m+1}+X_{2^m+2}+\cdots+X_{2^{m+1}} ] \\ =m \sum_{k=0}^{m-1}\sum_{j=0}^{2^{m-1-k}-1} \sum_{i=1}^{2^k}\sigma_{2^m+j2^{k+1}+i}^2 +\sum_{i=1}^{2^m}\sigma_{2^m+i}^2 \\ \leq m \sum_{k=0}^{m-1} \sum_{i=1}^{2^m}\sigma_{2^m+i}^2 +\sum_{i=1}^{2^m}\sigma_{2^m+i}^2 \\ \leq m \cdot m \sum_{i=1}^{2^m}\sigma_{2^m+i}^2 +\sum_{i=1}^{2^m}\sigma_{2^m+i}^2 \\ =(m^2+1)\sum_{i=1}^{2^m} \sigma_{2^m+i}^2, \end{eqnarray*} which completes the proof of the lemma.\\ \quad By using these lemmas, we have \begin{theo}{\rm~\cite{N2004.1}} \\ Let $ X_1^{(n)},X_2^{(n)},\cdots , X_k^{(n)},\cdots $ be pairwise uncorrelated complex valued random variables which may depend on $ n $ and satisfy \[ {\rm E}[X_k^{(n)}]=0, \ \sigma_k^2:={\rm V}[X_k^{(n)}]= {\rm O}(k^{-2\alpha}),\ |X_k^{(n)}|<+\infty \] \[ (k,n=1,2,\cdots,\ \ \alpha \in {\bf R}, \forall \omega \in \Omega) \] , where $ \sigma_k \geq 0 $ do not depend on $ n $ . Also let \[ S_n^{(l)}:=S_n^{(l)}(\omega):= X_1^{(l)}+X_2^{(l)}+\cdots+X_n^{(l)}, \] and \[ \varphi(n):=n^{\beta}(\log n)^{{3\over2}+\epsilon} \ with \ any \ small \ \epsilon >0 \] \begin{eqnarray*} \beta :=\left\{ \begin{array}{ll} 0 & (\alpha \geq {1\over2}) \\ {1\over2}- \alpha & (\alpha < {1\over2}). \end{array} \right. \end{eqnarray*} Then we have \[ S_n^{(n)}=S_n^{(n)}(\omega)= {\rm o}_{\omega,\epsilon}(\varphi(n)) \ a.s.\ \omega \in \Omega. \] \end{theo} {\bf proof.}\ We choose any natural number sequence $\{n_k\}_{k=1}^\infty$ with $2^k<n_k\leq 2^{k+1}$ and $X_{1}^{(l)},X_{2}^{(l)},\cdots,X_{2^{m+1}}^{(l)},\cdots\ (l,m \in {\bf N})$ are pairwise uncorrelated complex valued random variables for any $l \in {\bf N}$. We have \begin{eqnarray*} {\rm E}[\sum_{k=1}^m |{{S_{2^k}^{(n_k)}}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{S_{2^k}^{(2^{k+1})}}\over{\varphi(2^k)}}|^2] =\sum_{k=1}^\infty 2^{-2k\beta}{(\log 2^k)}^{-3-2\epsilon} (\sigma_1^2 + \sigma_2^2 + \cdots + \sigma_{2^k}^2). \ \ (5)\\ \end{eqnarray*} In case of $\alpha > {1\over2}$, then $\beta=0$ and we have \begin{eqnarray*} (5)={\rm O}(\sum_{k=1}^\infty {(\log 2^k)}^{-3-2\epsilon} \sum_{l=1}^{2^k} l^{-2\alpha}) \\ ={\rm O}(\sum_{k=1}^\infty k^{-3-2\epsilon})<+\infty. \end{eqnarray*} In case of $\alpha = {1\over2}$, then $\beta=0$ and we have \begin{eqnarray*} (5)={\rm O}(\sum_{k=1}^\infty {(\log 2^k)}^{-3-2\epsilon} \sum_{l=1}^{2^k} l^{-1}) \\ ={\rm O}(\sum_{k=1}^\infty k^{-3-2\epsilon}\cdot \log 2^k) \\ ={\rm O}(\sum_{k=1}^\infty k^{-2-2\epsilon})<+\infty. \end{eqnarray*} In case of $\alpha < {1\over2}$, then $\beta={1\over2}-\alpha$ and we have \begin{eqnarray*} (5)={\rm O}(\sum_{k=1}^\infty 2^{-k(1-2\alpha)} {(\log 2^k)}^{-3-2\epsilon} \sum_{l=1}^{2^k} l^{-2\alpha}) \\ ={\rm O}(\sum_{k=1}^\infty 2^{-k(1-2\alpha)} k^{-3-2\epsilon} \cdot 2^{k(1-2\alpha)}) \\ ={\rm O}(\sum_{k=1}^\infty k^{-3-2\epsilon})<+\infty. \end{eqnarray*} Then in any case, we have \[ {\rm E}[\sum_{k=1}^m |{{S_{2^k}^{(n_k)}}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{S_{2^k}^{(2^{k+1})}}\over{\varphi(2^k)}}|^2] <+\infty , \] which means, by Lemma 2, with some $A((n_1,\cdots,n_m))\subset\Omega$, \[ \sum_{k=1}^m |{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{S_{2^k}^{(2^{k+1})}(\omega)}\over{\varphi(2^k)}}|^2 <+\infty \] for $\forall \omega \in A((n_1,\cdots,n_m))$ with ${\rm P}\{A((n_1,\cdots,n_m))\}=1$. We put \[ A(m):=\bigcap_{(n_1,\cdots,n_m)}A((n_1,\cdots,n_m)) \] where $(n_1,\cdots,n_m)$ under $\cap$ runs through all $(n_1,\cdots,n_m)\in {\bf N}^{m}$ with $2^k<n_k\leq 2^{k+1} (k=1,2,\cdots,m).$ \\ Since \[\bigcap_{(n_1,\cdots,n_m)}\] is finitely many intersections of the sets, we have \[{\rm P}\{A(m)\}=1.\] Therefore we have \[ \sum_{k=1}^m |{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{S_{2^k}^{(2^{k+1})}(\omega)}\over{\varphi(2^k)}}|^2 <+\infty \] for $\forall\omega \in A(m)$ with \ ${\rm P}\{A(m)\}=1$.\\ We show that \[A(m)=A(m+1) \ (m=1,2,\cdots).\] In fact, if $\omega \in A(m)$ which means \[ \sum_{k=1}^m |{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{S_{2^k}^{(2^{k+1})}(\omega)}\over{\varphi(2^k)}}|^2 <+\infty \], then we immediately have \[ \sum_{k=1}^m |{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}}|^2 +|{{S_{2^{m+1}}^{(n_{m+1})}(\omega)}\over{\varphi(2^{m+1})}}|^2 +\sum_{k=m+2}^\infty |{{S_{2^k}^{(2^{k+1})}(\omega)}\over{\varphi(2^k)}}|^2 <+\infty \] for $2^{m+1}<\forall n_{m+1}\leq 2^{m+2}$, because $|X_k^{(l)}(\omega)|<+\infty$ for $\forall \omega \in \Omega.$ This means $\omega \in A(m+1)$. Inversely $\omega \in A(m+1)$ implies $\omega \in A(m)$ by the same argument.\\ So, There exists \[\lim_{m \to +\infty}A(m)=:A=A(1) \ \ {\rm and} \ \ {\rm P}\{A\}=1.\] This means \[ \sum_{k=1}^\infty |{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}}|^2 <+\infty \ for \ \forall\{n_k\}_{k=1}^\infty,\ \forall \omega \in A \ with \ {\rm P}\{A\}=1 \] and \[ \lim_{k \to +\infty}{{S_{2^k}^{(n_k)}(\omega)}\over{\varphi(2^k)}} =0 \ a.s. \ \omega \ for \ \forall\{n_k\}_{k=1}^\infty \quad \quad \quad {\rm (6)} \] Next we put \begin{eqnarray*} Y_k^{(n_k)}:=\max_{1\leq l \leq 2^k} |X_{2^k+1}^{(n_k)}+X_{2^k+2}^{(n_k)}+\cdots+X_{2^k+l}^{(n_k)}| \\ . \end{eqnarray*} By Lemma 4, we have for any $l \in {\bf N}$ \begin{eqnarray*} {\rm E}[|Y_k^{(l)}|^2]\leq (k^2+1)\sum_{i=1}^{2^k}\sigma_{2^k+i}^2 \\ =\left\{ \begin{array}{ll} {\rm O}(k^2+1) & (\alpha \geq {1\over2}) \\ {\rm O}(2^{(k+1)(1-2\alpha)}(k^2+1)) & (\alpha < {1\over2}). \end{array} \right. \end{eqnarray*} and \[ {\rm E}[\sum_{k=1}^m |{{Y_{k}^{(n_k)}}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{Y_{k}^{(2^{k+1})}}\over{\varphi(2^k)}}|^2] ={\rm O}(\sum_{k=1}^\infty 2^{-2k\beta} k^{-3-2\epsilon} {\rm E}[ |Y_{k}^{(l)}|^2])\ \ with \ \forall l\in {\bf N}. \ \ (7) \] In case of $\alpha \geq {1\over2}$, then $\beta=0$ and we have \begin{eqnarray*} (7)={\rm O}(\sum_{k=1}^\infty k^{-3-2\epsilon}(k^2+1) ) ={\rm O}(\sum_{k=1}^\infty k^{-1-2\epsilon})<+\infty. \end{eqnarray*} In case of $\alpha < {1\over2}$, then $\beta={1\over2}-\alpha$ and we have \begin{eqnarray*} (7)={\rm O}(\sum_{k=1}^\infty 2^{-k(1-2\alpha)} k^{-3-2\epsilon} \cdot (k^2+1) 2^{(k+1)(1-2\alpha)}) \\ ={\rm O}(2^{-2\alpha}\sum_{k=1}^\infty k^{-1-2\epsilon})<+\infty. \end{eqnarray*} In any case, we have \[ {\rm E}[\sum_{k=1}^m |{{Y_{k}^{(n_k)}}\over{\varphi(2^k)}}|^2 +\sum_{k=m+1}^\infty |{{Y_{k}^{(2^{k+1})}}\over{\varphi(2^k)}}|^2] <+\infty . \] The same argument as that of ${{S_{2^k}^{(n_k)}}\over{\varphi(2^k)}}$ leads \[ \lim_{k\to\infty}{{Y_{k}^{(n_k)}(\omega)}\over{\varphi(2^k)}} =0 \ {\rm a.s.} \ \ {\rm for}\ \forall\{n_k\}_{k=1}^\infty . \ \ \ (8) \] Then, for any $n$ with $2^m<n\leq 2^{m+1}$, we have, by (6) and (8), \begin{eqnarray*} {{|S_{n}^{(n)}|}\over{\varphi(n)}} \leq {{|S_{n}^{(n)}|}\over{\varphi(2^m)}} \leq { { |S_{2^m}^{(n)}|+Y_{m}^{(n)} }\over{\varphi(2^m)} }\\ ={ { |S_{2^m}^{(n)}| }\over{\varphi(2^m)} }+ { { Y_{m}^{(n)} }\over{\varphi(2^m)} } \to 0 \ \ {\rm a.s.}\ \ ({\rm as}\ n\to\infty), \end{eqnarray*} which means \[ S_n^{(n)}=S_n^{(n)}(\omega)= {\rm o}_{\omega}(\varphi(n)) \ a.s.\ \omega \in \Omega, \ \ {\rm with \ any \ small \ \epsilon>0.} \] This completes the proof. \newtheorem{rem}{Remark} \begin{rem} {\rm This theorem is a generalization of the strong limit theorem the position of which may be placed between laws of large numbers and laws of the iterated logarithm in probability theory. (\ Therefore we would like to call these types of theorems quasi laws of the iterated logarithm.) } \end{rem} \begin{rem} {\rm This is also a new proof of the strong law of large numbers without using the Borel-Cantelli theorem. We can prove other limit theorems in probability theory by this method. } \end{rem} \ We yet need some lemmas for proving Theorem 1. \begin{lem} {\rm ( {\rm Functional equation for the Hurwitz zeta function} ~\cite{A1976}~\cite{I1985}~\cite{T1951} ) } \\ \[ \zeta(s,\omega)={{\Gamma(s)}\over{(2\pi)^s}} \{ e^{-{\pi\over2}is}{\rm F}(\omega,s) + e^{+{\pi\over2}is}{\rm F}(-\omega,s) \} \] for \ $ 0<\omega<1,\sigma>0 $ or \ $ 0<\omega \leq 1,\sigma>1 $,\\ where $\Gamma(s)$ is the gamma function of Euler and \\ ${\rm F}(\omega,s):=\sum_{k=1}^{\infty}k^{-s}e^{2\pi ik \omega}.$ \end{lem} \begin{lem} {\rm ~\cite{I1985}~\cite{M1970}~\cite{T1980}~\cite{T1951}} \[ |\Gamma(s)|=\sqrt{2\pi}|t|^{\sigma -{1\over2}}e^{-{\pi\over2}|t|} \{ 1 + {\rm O}_{\sigma_1,\sigma_2,\delta}({1\over{|t|}}) \} \] for \ $ \sigma_1 \leq \sigma \leq \sigma_2,\ |t| \geq \delta >0. $ \end{lem} \begin{lem} \[ {\rm F}(\omega,s)=\sum_{k \leq t^2}k^{-s}e^{2\pi ik \omega} + {\rm O}({1\over{|1-e^{2\pi i \omega}|}}t^{1-2\sigma}) \] for \ $ \sigma \geq {1\over2} . $ \end{lem} {\bf proof.}\ \[ {\rm F}(\omega,s)=\sum_{k \leq t^2}k^{-s}e^{2\pi ik \omega} + \sum_{k > t^2}k^{-s}e^{2\pi ik \omega}. \] By applying the partial summation to the second term of the above ${\rm F}(\omega,s)$, \begin{eqnarray*} \sum_{k > t^2}k^{-s}e^{2\pi ik \omega}\\ =-A(t^2){(t^2)}^{-s}+s\int_{t^2}^\infty A(u)u^{-s-1}du \\ ={\rm O}({1\over{|1-e^{2\pi i \omega}|}}t^{-2\sigma})+ {\rm O}(t{1\over{|1-e^{2\pi i \omega}|}}t^{-2\sigma}) \\ ={\rm O}({{t^{1-2\sigma}}\over{|1-e^{2\pi i \omega}|}}), \end{eqnarray*} where $A(t^2):=\sum_{k < t^2}e^{2\pi ik \omega}$, which completes the proof of the lemma. From Lemma 5,6 we easily have \begin{lem} \[ |\zeta({1\over2} + it,\omega)|={\rm O}({\rm F} (\omega,{1\over2} + it)) \ \ for \ 0<\omega<1. \] \end{lem} \noindent {\bf Proof of Theorem 1.} From Lemma 7, we have \[ {\rm F}(\omega,{1\over2}+it)={\rm O}_{\delta} (\sum_{k \leq t^2}k^{-{1\over2}-it}e^{2\pi ik \omega}) \] \[ for \ 0<\delta<\omega<1-\delta \ with \ any \ small \ \delta>0. \] In Theorem 2, put $ \Omega=(0,1),\ {\rm P}=\mu $ (Lebesgue \ measure), $ n=[t^2] $ \ ($ [x] $ denotes the integral part of real number $ x.$) and \[ X_k^{(t)}=k^{-{1\over2}-it}e^{2\pi ik \omega} \ \ (k=1,2,\cdots), \] which satisfy all the conditions in Theorem 2. Then we have \[ \sum_{k \leq t^2}k^{-{1\over2}-it}e^{2\pi ik \omega} ={\rm o}_{\omega,\epsilon}(\varphi([t^2])) ={\rm o}_{\omega,\epsilon}((\log t)^{{3\over2}+\epsilon}) \] With Lemma 7,8, this completes the proof of the theorem. \begin{rem} {\rm The exact expression of the Lindel{\"o}f Hypothesis is \begin{eqnarray*} \mu_{\omega}(\sigma)=\left\{ \begin{array}{ll} 0 & (\sigma \geq {1\over2}) \\ {1\over2}- \sigma & (\sigma < {1\over2}). \end{array} \right. \end{eqnarray*} \begin{eqnarray*} where \ \ \mu_{\omega}(\sigma):= \lim_{\stackrel{\scriptstyle }{t\ \to}}\sup_{+\infty} {{\log |\zeta(\sigma + it,\omega)|}\over{\log t}}, \end{eqnarray*} which is the same form as \begin{eqnarray*} \beta =\left\{ \begin{array}{ll} 0 & (\alpha \geq {1\over2}) \\ {1\over2}- \alpha & (\alpha < {1\over2}), \end{array} \right. \end{eqnarray*} in Theorem 2. \\ } \end{rem} \begin{rem} {\rm In 1936, Davenport and Heilbronn~\cite{D-H1936} has already proved that the Riemann Hypothesis fails for $\zeta(s,\omega)$ with transcendental number $\omega$ and rational number $\omega \neq {1\over2},1$ in contrast with our Theorem 1, which shows that the Lindel{\"o}f Hypothesis by itself, for example, without the Euler product, does not imply the Riemannn Hypothesis. } \end{rem} \begin{rem} {\rm It seems that the behaviour of $\zeta(s,\omega)$ as $\omega$ varies in the interval $(0,1)$ is very complicated because of the following facts;\\ (1)Barasubramanian-Ramachandra~\cite{B-R1977}(\ the case $ \omega=1 $ ) and Ramachandra-Sankaranarayanan~\cite{R-S1989} proved the following $\Omega$-theorem; \begin{eqnarray*} \zeta({1\over2} + it,\omega)=\Omega(\exp (C_{\omega}\sqrt{{\log t} \over{\log\log t}})) \\ with \ some \ C_{\omega}>0 \ and \ \omega \in {\bf Q}, \end{eqnarray*} which shows \begin{eqnarray*} \{0<\omega<1;{\rm Theorem \ 1. \ holds } \} \cap {\bf Q}=\emptyset. \end{eqnarray*} \noindent (2)It is well known that divisor problems and circle problems are closely related each other and so are shifted divisor problems and shifted circle problems. The Hurwitz zeta function naturally appears in shifted divisor problems~\cite{N1993}. And Bleher-Cheng-Dyson-Lebowitz ~\cite{B-C-D-L1993} pointed out that the value distributions of the error terms of the number of lattice points inside shifted circles behave very differently when the shift varies by their numerical studies. Therefore it seems that the behaviour of $\zeta(s,\omega)$ including its value distribution is very complicated as $\omega$ varies. (For the value distribution of $\zeta(s,\omega)$ with transcendental number $\omega$, see ~\cite{N1997}. ) \\ \noindent (3)Our numerical studies by "Mathematica" show also the complexity of the behaviour of $\zeta(s,\omega)$ as follows, for example,} \end{rem} \noindent The graph of $\zeta(s,x)$ which plots the points $(x,y) \in {\bf R^2}$ such that \[ y={{|\zeta({1\over2}+it,x)-x^{-({1\over2}+it)}|}\over{(\log t)^2}} \ (0 \leq x \leq 1,\ t=10^8.). \] seems to be a kind of white noise.\\ \noindent {\bf Acknowledgment} \ \ The author thanks to \\ Prof. Jyoichi Kaneko of the University of the Ryukyus for his careful reading of the previous manuscript.\\ \end{document}
\begin{document} \title{ Advances on the Conjecture of Erd\H{o} \begin{abstract} Results: \begin{itemize} \item A hamiltonian graph $G$ verifying $e(G)>n(k-1)/2$ contains any $k$-spider. \item If $G$ is a graph with average degree $\bar{d} > k-1$, then every spider of size $k$ is contained in $G$ for $k\le 10$. \item A $2$-connected graph with average degree $\bar{d} > \ell_2+\ell_3+\ell_4$ contains every spider of $4$ legs $S_{1,\ell_2,\ell_3,\ell_4}$. We claim also that the condition of $2$-connection is not needed, but the proof is very long and it is not included in this document. \end{itemize} \end{abstract} \section{Introduction} The Erd\H{o}s-S\'os conjecture \cite{E65} says that a graph $G$ on $n$ vertices and number of edges $e(G)>n(k-1)/2$ contains all trees of size $k$. By $g(n,k)$ Erd\H{o}s and Gallai \cite{EG59} denoted the maximum number of edges of a graph $G$ on $n$ vertices containing no cycles with more than $k$ edges. Moreover, these authors proved that $g(n,k) \le \frac{1}{2}(n-1) k , \mbox{ for } 2\le k \le n.$ (\emph{Theorem 2.7}) Thus if $e(G)> (n-1) k /2$ then $G$ contains a cycle with at least $k+1$ edges. Fan and Sun \cite{FL07} used Theorem 2.7 to note that every graph $G$ with $e(G)>n(k-1)/2$ has a circumference of length at least $k$. This is clear because $e(G)>n(k-1)/2> (n-1)(k-1)/2$. Then they used this observation to prove that every graph with $e(G)>n(k-1)/2$ contains any $k$-spider of three legs. We will prove that a hamiltonian graph $G$ with $e(G)>n(k-1)/2$ contains any $k$-spider. We will prove also that a connected graph with $e(G)>n(k-1)/2$ contains all spider of four legs, with one leg of unity length. \section{Results} We need to introduce the following notation. Let $S_{\ell_1,\ldots,\ell_f}$ be a $k$-spider of $f$ legs of lengths $\ell_1,\dots,\ell_f$, i.e., $\ell_1+\dots+\ell_f=k$. Let $P_1, \dots, P_f$ be the $f$ legs of the $k$-spider such that $e(P_i)=\ell_i$, $i=1,2,\dots, f$. We may assume that $\ell_1\le \ell_2\le \cdots \le \ell_f$ and if the spider has $4$ legs ($f=4$) and $\ell_1=1$ then $\ell_2\ge 2$ because if $\ell_2=1$, then $S_{1,1,\ell_3,\ell_4}$ is a caterpillar, which is included in any graph $G$ with $e(G)>|V(G)|(k-1)/2$ where $k=\ell_3+\ell_4+2$ \cite{MP93}. Note that $\ell_1\le k/f$ and $\ell_2\le (k-\ell_1)/(f-1)$. (If $f=4$, $\ell_1\le k/4$ and $\ell_2\le (k-\ell_1)/3$). In general, $\ell_i\le k-\sum\limits_{j=1}^{i-1}\ell_j/(f-i+1)=\sum\limits_{j=i}^{f}\ell_j/(f-i+1)$ Let $G$ be a graph with $e(G)>|V(G)|(k-1)/2$. Let $H$ be a minimal induced subgraph of $G$ such that $e(H)>|V(H)|(k-1)/2$. By the minimality, $H$ is connected and $deg_{H}(v)\ge k/2$ for every $v\in V(H)$. If $H$ has a copy of $S_{\ell_1,\ell_2,\dots,\ell_f}$, so does $G$. \begin{theorem}\label{hamilgen} Let $G$ be a graph and $H$ a hamiltonian subgraph of $G$. Suppose that there exists a vertex $x_0\in V(H)$ such that $deg_H(x_0)\ge k$. Then $H$ contains (and so $G$) any $k$-spider. \end{theorem} \begin{proof} Let $m=|V(H)|$ and $x_0 x_1 \cdots x_{m-1} x_0$ a hamiltonian cycle of $H$. We will prove the theorem by induction on $k$. For $k=2$, it is easy to check that theorem holds, because the only $2$-spider are $S_{1,1}$ and $S_2$ and they are isomorphic to the path of length $2$, which is contained in the hamiltonian cycle. Moreover $S_{1,1}$ or $S_2$ can be taken with root $x_0$. For $k=3$, the only $3$-spiders are $S_{1,1,1}$ that is contained in $H$ with root $x_0$ because $deg_H(x_0)\ge k=3$, and the isomorphic spiders $S_{1,2}$ and $S_3$ which clearly are contained in $H$ with $x_0$ as root. Suppose that theorem is true for every $k'$ with $3< k'<k$, and let us show that the theorem is also valid for $k$. Let $S_{\ell_1,\ell_2,\dots,\ell_f}$ be a spider of $f$ legs and size $k$, i.e, $\ell_1+\ell_2+\dots+\ell_f=k$. Let $\alpha$ be the smallest index such that $x_{\alpha}\in N(x_0)$ with $\alpha \ge \ell_1+1$, that there exists because $deg_H(x_0)\ge k>\ell_1$. Let $H'\subset H$ be the subgraph induced by $V(H)\setminus \{x_1, \dots, x_{\alpha-1}\}$. Clearly $H'$ is hamiltonian because $C=x_0x_{\alpha}x_{\alpha+1}\cdots x_{n-1}x_0$ is a hamiltonian cycle of $H'$. Moreover $deg_{H'}(x_0)\ge k-\ell_1$. By the inductive hypothesis on $k$, $H'$ contains all the spiders with root in $x_0$ and size $k-\ell_1$. Particularly, $H'$ contains the spider $S_{\ell_2,\dots,\ell_f}$ whose legs are denoted by $P_2, \dots, P_f$. Then, the spider with root $x_0$ and legs $P_1=x_0,x_1,\dots, x_{\ell_1}$, $P_2,\dots, P_f$ is contained in $H$. Thus $S_{\ell_1,\ell_2,\dots,\ell_f}$ is contained in $G$, finishing the proof. \end{proof} Observe that $e(G)>|V(G)|(k-1)/2$ is equivalent to the requirement that the average degree $\overline{d}>k-1$ and so maximum degree of $G$, $\Delta(G)\ge \overline{d}>k-1$. \begin{corollary}\label{hamilton} Let $G$ be a hamiltonian graph with average degree $\bar{d} > k-1$. Then every spider of size $k$ is contained in $G$. \end{corollary} \begin{lemma}\label{ciclovalmax} Let $G$ be a 2-connected graph with average degree $\bar{d} > k-1$. Then every vertex of degree at least $k$ lies on a cycle $C_s$ of length $s\ge k$. \end{lemma} \begin{proof} Since $\bar{d}> k-1$ then $\Delta(G) \geq k$. The results follows directly from Theorem 1.16 in \cite{EG59}. \end{proof} \begin{corollary}\label{kmenorque11} Let $G$ be a graph with average degree $\bar{d} > k-1$. Then every spider of size $k$ is contained in $G$ for $k\le 9$. \end{corollary} \begin{proof} Every spider with legs of length at most 4 are contained in $G$ by Theorem 4.1 of \cite{FL07}. Moreover, every spider with three legs are contained in $G$ by Theorem 3.1 of \cite{FL07}. Therefore the remaining spiders are the comet $S_{1,1,1,1,5}$ and the caterpillar $S_{1,1,2,5}$ and therefore the result is valid by~\cite{MP93}. \end{proof} \begin{theorem}\label{mainbiconex} Let $G$ be a 2-connected graph with average degree $\bar{d} > k-1$. Then $G$ contains every $k$-spider $S_{1,\ell_2,\ell_3,\ell_4}\ (k=1+\ell_2+\ell_3+\ell_4)$. \end{theorem} \begin{proof} We will suppose that $2\le \ell_2\le\ell_3\le\ell_3$ (otherwise $S_{1,1,\ell_3,\ell_4}$ is a caterpillar and is contained in $G$). Let $x_0 \in V(G)$ be with $deg_G(x_0)=\Delta(G)\ge k$. By Lemma~\ref{ciclovalmax}, we can take a cycle $C_s$ of maximum length $|C_s|=s\ge k$ such that $x_0\in V(C_s)$. Let $C_s=x_0 x_1 \cdots x_{s-1}x_0$. If $N(x_0)\subset V(C_s)$, the subgraph $H$ of $G$ induced by the vertices of $C_s$ is clearly hamiltonian and has a vertex $x_0$ of Therefore by Theorem~\ref{hamilgen}, $H$ (and so $G$) contains all spiders of size $k$ and particularly $S_{1,\ell_2,\ell_3,\ell_4}$. Hence assume that $N(x_0)\not\subset V(C_s)$. Therefore we can consider a path $P= x_0 u_1\cdots u_\ell$ starting in $x_0$ of maximum length such that $V(C_s)\cap V(P)=\{x_0\}$. Two cases need to be distinguished according to $\ell\ge \ell_2$ or $\ell< \ell_2$. \emph{Case 1: $\ell\ge \ell_2$.} If there exists $ y\notin V(C_s)\cup V(P)$ such that $y\in N(x_0)$, then $S_{1,\ell_2,\ell_3,\ell_4}$ is contained in $G$ and its legs are $P_1=x_0y$, $P_2=x_0 u_1\cdots u_{\ell_2}$, $P_3=x_0 x_1\cdots x_{\ell_3}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ (see Figure~\ref{fig:erdos-sos1}). Therefore, assume $N(x_0)\subset V(C_s)\cup V(P)$ and let us study the following subcases. \begin{figure} \caption{$\ell\ge \ell_2$ and $N(x_0)\not\subset V(C)\cup V(P)$.} \label{fig:erdos-sos1} \caption{$\ell\ge \ell_2$, $u_m\in N(x_0)\cap V(P)$, $2\geq m\geq \ell-\ell_2+1$ or $\ell_2+1\le m$.} \label{fig:erdos-sos2} \caption{$\ell\ge \ell_2$, $N(x_0)\cap V(P)\subset \{u_{\ell-\ell_2+2} \label{fig:erdos-sos3} \end{figure} \begin{enumerate} \item[(a)] If $u_m\in N(x_0)$ with $2\leq m\leq \ell-\ell_2+1$ , then $G$ contains the spider $S_{1,\ell_2,\ell_3,\ell_4}$, of legs $P_1=x_0 u_1$, $P_2=x_0 u_m u_{m+1}\cdots u_{m+\ell_2-1}$, $P_3=x_0 x_1\cdots x_{\ell_3}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ (see Figure~\ref{fig:erdos-sos2}). \item[(b)] If $u_m\in N(x_0)$ with $\ell_2+1\le m$, then $G$ contains the spider $S_{1,\ell_2,\ell_3,\ell_4}$ of legs $P_1=x_0 u_1$, $P_2=x_0 u_m u_{m-1}\cdots u_{m-\ell_2+1}$, $P_3=x_0 x_1\cdots x_{\ell_3}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ (see Figure~\ref{fig:erdos-sos2}). \item[(c)] Otherwise we must distinguish between two different situations. If $\ell-\ell_2+2\leq \ell_2$ then $(N(x_0)-u_1)\cap V(P) \subseteq \{u_{\ell-\ell_2+2},\dots,u_{\ell_2}\}$, and $|N(x_0)\cap V(P)|\le \ell_2-(\ell-\ell_2+2)+1=2\ell_2-\ell-1\le 2\ell_2-\ell_2-1= \ell_2-1$. By the contrary, if $\ell-\ell_2+2> \ell_2$ then $N(x_0)\cap V(P)=\{ u_1\}$ and $|N(x_0)\cap V(P)|=1\le \ell_2-1$. Therefore, in both cases, $|N(x_0)\cap V(C_s)|\geq k-1- \ell_2+1=k- \ell_2=1+\ell_3+\ell_4$. Thus there must exist an edge $x_0x_h$ with $\ell_3<h<s-\ell_4$ so that $G$ contains the spider $S_{1,\ell_2,\ell_3,\ell_4}$ of legs $P_1=x_0x_h$, $P_2=x_0 u_1\cdots u_{\ell_2}$, $P_3=x_0 x_1\cdots x_{\ell_3}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ (see Figure~\ref{fig:erdos-sos3}). \end{enumerate} \emph{Case 2: $\ell< \ell_2$.} Note that $N(u_\ell)\subset V(C_s)\cup V(P)$ as $P$ has maximum length. Since $|N(u_\ell)\cap V(P)|\le \ell\le \ell_2-1$, $N(u_\ell)\not\subset V(P)$ because $ \ell_2\le(k-1)/3<k/2 \le deg(u_\ell)$. Note also that since $C_s$ has maximum length, $ N(u_{\ell})\cap \{x_1,\ldots, x_\ell\}=\emptyset$, otherwise the cycle $x_0u_1\cdots u_{\ell}x_ix_{i+1}\cdots x_{s-1}x_0$ would have a length greater than $s$ which is a contradiction. Similarly, $ N(u_{\ell}) \cap \{x_{s-\ell}, \ldots, x_{s-1}\}$ $=\emptyset$, otherwise the cycle $x_0u_1\cdots u_{\ell}x_i x_{i-1}\cdots x_{1}x_0$ would have a length greater than $s$ again a contradiction (see Figure~\ref{fig:erdos-sos4}). We may notice that if $x_j, x_{j+1}\in V(C_s)$ and $x_j\in N(u_\ell)$, then $ x_{j+1}\not\in N(u_\ell)$ because $C_s$ has maximum length. Since $|\{x_{\ell+1},\dots,x_{\ell_2}\}|=|\{x_{s-\ell_2},\dots,x_{s-\ell-1}\} |=\ell_2-\ell $ then $|N(u_\ell)\cap( \{x_{\ell+1},\dots,x_{\ell_2}\}\cup\{x_{s-\ell_2},\dots,x_{s-\ell-1}\})|\le 2\left\lceil(\ell_2-\ell)/2\right\rceil$. Since $|N(u_\ell)\cap V(P)|\le \ell$, it follows that $|N(u_\ell)\cap \{x_{\ell_2+1},\dots,x_{s-\ell_2-1}\}|\ge k/2-2\left\lceil(\ell_2-\ell)/2\right\rceil-\ell\ge k/2-\ell_2-1>0$ (because $\ell\le \ell_2-1$). Let $\alpha$ be the smallest index such that $x_\alpha\in N(u_\ell)\cap \{x_{\ell_2+1},\dots,x_{s-\ell_2-1}\}$. Since $k/2-\ell_2-1\le \left|N(u_\ell)\cap \{x_{\ell_2+1},\dots,x_{s-\ell_2-1}\}\right|=\left|N(u_\ell)\cap \{x_{\alpha},\dots,x_{s-\ell_2-1}\}\right|\le \left\lceil(s-\ell_2-\alpha)/2\right\rceil\le (s-\ell_2-\alpha+1)/2 $ then $\alpha \le s-k+\ell_2+3 $. But the inequality does not hold because it would mean that $2\left\lceil(\ell_2-\ell)/2\right\rceil=\ell_2-\ell+1$, so $u_{\ell}$ would be adjacent to $x_{s-\ell_2}$, and $\left\lceil(s-\ell_2-\alpha)/2\right\rceil=(s-\ell_2-\alpha+1)/2$ and $u_{\ell}$ would be adjacent to $x_{s-\ell_2-1}$ too, and that it is not possible because of the maximality of the cycle. So $\alpha < s-k+\ell_2+3 $. \begin{figure} \caption{$\ell<\ell_2$.} \label{fig:erdos-sos4} \caption{$\ell<\ell_2$, $N(x_0)\not\subset V(C)\cup V(P)$.} \label{fig:erdos-sos5} \caption{$\ell<\ell_2$, $N(x_0)\subset V(C)\cup V(P)$.} \label{fig:erdos-sos6} \end{figure} If there exists $ y\notin V(C_s)\cup V(P)$ such that $y\in N(x_0)$, then the spider $S_{1,\ell_2,\ell_3,\ell_4}$ with legs $P_1=x_0y$, $P_2=x_0 x_1\cdots x_{\ell_2}$, $P_3=x_0 u_1\cdots u_{\ell}x_\alpha\cdots x_{\alpha+\ell_3-\ell-1}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ is contained in $G$ because $\alpha+\ell_3-\ell-1 <s-k+\ell_2+3+\ell_3-\ell-1=s-\ell_4+1-\ell\le s-\ell_4$ (see Figure \ref{fig:erdos-sos5}). If $N(x_0)\subset V(C_s)\cup V(P)$, since $|N(x_0)\cap V(P)|\le \ell$, it follows that $|N(x_0)\cap V(C_s)|\ge k-\ell$. As the index set $I=\{1,\ldots,\ell_2,\alpha,\ldots, {\alpha+\ell_3-\ell-1},{s-\ell_4},\ldots,{s-1}\}$ has cardinality $k-\ell-1$, there must exist $x_q\in N(x_0)$ ($q\not\in I$). As $\alpha+\ell_3-\ell-1<s-\ell_4$, the spider of legs $P_1=x_0x_q$, $P_2=x_0 x_1\cdots x_{\ell_2}$, $P_3=x_0 u_1\cdots u_{\ell}x_\alpha\cdots x_{\alpha+\ell_3-\ell-1}$ and $P_4=x_0 x_{s-1}\cdots x_{s-\ell_4}$ is contained in $G$ (see Figure \ref{fig:erdos-sos6}). \end{proof} \begin{theorem}\label{main} If $G$ is a connected graph with average degree $\bar{d} > k-1$, then $G$ contains every $k$-spider $S_{1,\ell_2,\ell_3,\ell_4}\ (k=1+\ell_2+\ell_3+\ell_4)$. \end{theorem} Proof is similar to the proof of Theorem~\ref{mainbiconex}, but is very long and it is not included here. \begin{corollary}\label{k10} Let $G$ be a graph with average degree $\bar{d} > k-1$. Then every spider of size $k$ is contained in $G$ for $k\le 10$. \end{corollary} \begin{proof} Every spider with legs of length at most 4 are contained in $G$ by Theorem 4.1 of \cite{FL07}. Moreover, every spider with three legs are contained in $G$ by Theorem 3.1 of \cite{FL07}. For the caterpillars $S_{1,1,1,1,1,5}$, $S_{1,1,1,1,6}$, $S_{1,1,1,2,5}$, $S_{1,1,1,7}$, $S_{1,1,2,6}$ or $S_{1,1,3,5}$ the result is valid by~\cite{MP93}. It remains the spider $S_{1,2,2,5}$, which can be applied the Theorem~\ref{main}. \end{proof} \end{document}
\begin{document} \title{Quantifying Spatial Correlations of General Quantum Dynamics} \author{\'Angel Rivas and Markus M\"uller} \affiliation{Departamento de F\'{\i}sica Te\'orica I, Universidad Complutense, 28040 Madrid, Spain} \begin{abstract} Understanding the role of correlations in quantum systems is both a fundamental challenge as well as of high practical relevance for the control of multi-particle quantum systems. Whereas a lot of research has been devoted to study the various types of correlations that can be present in the states of quantum systems, in this work we introduce a general and rigorous method to quantify the amount of correlations in the dynamics of quantum systems. Using a resource-theoretical approach, we introduce a suitable quantifier and characterize the properties of correlated dynamics. Furthermore, we benchmark our method by applying it to the paradigmatic case of two atoms weakly coupled to the electromagnetic radiation field, and illustrate its potential use to detect and assess spatial noise correlations in quantum computing architectures. \end{abstract} \pacs{03.65.Yz, 42.50.Lc, 03.67.Mn} \maketitle \section{Introduction} Quantum systems can display a wide variety of dynamical behaviors, in particular depending on how the system is affected by its coupling to the surrounding environment. One interesting feature which has attracted much attention is the presence of memory effects (non-Markovianity) in the time evolution. These typically arise for strong enough coupling between the system and its environment, or when the environment is structured, such that the assumptions of the well-known weak-coupling limit \cite{BrPe02,GardinerZoller04,Libro} are no longer valid. Whereas memory effects (or time correlations) can be present in any quantum system exposed to noise, another extremely relevant feature, which we will focus on in this work, are correlations in the dynamics of different parts of multi-partite quantum systems. Since different parties of a partition are commonly, though not always, identified with different places in space, without loss of generality we will in the following refer to these correlations between different subsystems of a larger system as spatial correlations. Spatial correlations in the dynamics give rise to a wide plethora of interesting phenomena ranging from super-radiance \cite{Dicke} and super-decoherence \cite{14-Qubit} to sub-radiance \cite{Pillet} and decoherence-free subspaces \cite{Zanardi,Lidar1,Lidar2,Wineland,Haeffner}. Moreover, clarifying the role of spatial correlations in the performance of a large variety of quantum processes, such as e.g. quantum error correction \cite{Clemmens,Klesse,Kitaev,Preskill2013,Novais,Shabani}, photosynthesis and excitation transfer \cite{Caruso,Aspuru,Nazir,Olaya,Nalbach,Silbey,Olbrich,Sarovar,Schulten,Mukamel,Jeske3}, dissipative phase transitions \cite{Diehl,Verstraete,Igor,Lee,Schindler} and quantum metrology \cite{Jeske2} has been and still is an active area of research. Along the last few years, numerous works have aimed at quantifying up to which extent quantum dynamics deviates from the Markovian behavior, see e.g. \cite{Wolf,BrLaPi,RHP,Sun,Mutual,Mauro,MichaelHall,Bylicka,Review}. However, much less attention has been paid to develop quantifiers of spatial correlations in the dynamics, although some works e.g. \cite{Jeske1,Joe} have addressed this issue for some specific models. This may be partially due to the well-known fact that under many, though not all practical circumstances, dynamical correlations can be detected by studying the time evolution of correlation functions of properly chosen observables $\mathcal{O}_A$ and $\mathcal{O}_B$, acting respectively on the two parties A and B of interest. For instance, in the context of quantum computing, sophisticated methods to witness the correlated character of quantum dynamics, have been developed and implemented in the laboratory \cite{Joe}. Indeed, any correlation $C(\mathcal{O}_A, \mathcal{O}_B)=\langle \mathcal{O}_A \otimes \mathcal{O}_B \rangle-\langle \mathcal{O}_A \rangle \langle \mathcal{O}_B \rangle$ detected during the time evolution of an initial product state, $\rho = \rho_A \otimes \rho_B$, witnesses the correlated character of the dynamics. However, note that there exist highly correlated dynamics, which cannot be realized by a combination of local processes, which do not generate any such correlation, e.g.~the swap process between two parties. Such dynamics can either act on internal degrees of freedom, induced e.g.~by the action of a swap gate acting on two qubits \cite{NC00}, or can correspond to (unwanted) external dynamics, caused e.g.~by correlated hopping of atoms in an optical lattice \cite{Folling, Lewenstein-book} or crystal melting and subsequent recooling dynamics in trapped-ion architectures \cite{Naegerl}. Thus, it is of eminent importance to develop methods which allow us to detect the presence or absence of spatial correlations in the dynamics, without a priori knowledge of the underlying microscopic dynamics, and do not require us to resort to adequately chosen ``test'' observables and initial ``test'' quantum states. Such methods should furthermore provide a rigorous ground to quantitatively compare the amount of spatial correlations in different dynamical processes. These characteristics are essential for a ``good'' correlation quantifier that can be used to study spatial correlations in quantum dynamics from a fundamental point of view \cite{Kraus, Nielsen-Dawson, Linden}, to clarify their role in physical processes \cite{Clemmens,Klesse,Kitaev,Preskill2013,Novais,Shabani,Caruso,Aspuru,Nazir,Olaya,Nalbach,Silbey,Olbrich,Sarovar,Schulten,Mukamel,Jeske3,Diehl,Verstraete,Igor,Lee,Schindler,Jeske2}, as well as to measure and quantify spatial correlations in the dynamics of experimental quantum systems. It is the aim of this work to introduce a method to quantify the degree of correlation in general quantum dynamics from a fundamental view point. Specifically, \noindent i) we propose a theoretical framework and formulate a general measure to assess the amount of spatial correlations of quantum dynamics without resorting to any specific physical model. To this end, we adopt a resource theory approach, and formulate a fundamental law that any faithful measure must satisfy. \noindent ii) Within this framework, we study the properties that a dynamics has to fulfill to be considered as maximally correlated. \noindent iii) We apply our measure to the paradigmatic quantum-optical model of two two-level atoms radiating into the electromagnetic vacuum. This case exemplifies the working principle of our measure and quantitatively confirms the expectation that spatial dynamical correlations decay with increasing interatomic distance and for long times. \noindent iv) Finally, we illustrate this formalism with a second example in the context of quantum computing, where quantum error correction protocols rely on certain assumptions on (typically sufficiently small) noise strengths and noise correlations. Specifically, we consider two qubits subject to local thermal baths that suffer some residual interaction which induces a correlated noisy dynamics. Our method reveals the remarkable fact that, under keeping the overall error probability for the two qubits constant, the degree of spatial correlations decays very rapidly as the bath temperature increases. This suggests that, in some situations, noise addition as e.g.~by a moderate increase of the environmental temperature, can be beneficial to tailor specific desired noise characteristics. \section{Measure of Correlations for Dynamics} \subsection{Uncorrelated Dynamics} Let us consider a bipartite quantum system $\mathrm{S}=\mathrm{A}\mathrm{B}$ undergoing some dynamics given by a completely positive and trace preserving (CPT) map $\mathcal{E}_\mathrm{S}$ [without loss of generality we shall assume $\dim(\mathcal{H}_\mathrm{A})=\dim(\mathcal{H}_\mathrm{B})=d$ and so $d_{\mathrm{S}}:=\dim(\mathcal{H}_\mathrm{S})=d^2$]. This dynamics is said to be uncorrelated with respect to the subsystems $\mathrm{A}$ and $\mathrm{B}$ if it can be decomposed as $\mathcal{E}_\mathrm{S}=\mathcal{E}_\mathrm{A}\otimes\mathcal{E}_\mathrm{B}$, with CPT maps $\mathcal{E}_\mathrm{A}$ and $\mathcal{E}_\mathrm{B}$ acting on $\mathrm{A}$ and $\mathrm{B}$, respectively. Otherwise it is said to be correlated. The central tool of our construction is the Choi-Jamio{\l}kowski isomorphism \cite{Choi,Jamiolkowski}, which provides a one-to-one map of a given quantum dynamics to an equivalent representation in the form of a quantum state in an enlarged Hilbert space. This mapping allows us to use tools developed for the quantification of correlations in quantum states for our purpose of quantifying correlations in quantum dynamics. Thus, consider a second $d^2-$dimensional bipartite system $\mathrm{S}'=\mathrm{A}'\mathrm{B}'$, and let $\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$ be the maximally entangled state between $\mathrm{S}$ and $\mathrm{S}'$, \begin{equation}\label{MaxEnta} \ket{\Phi_{\mathrm{S}\mathrm{S}'}}:=\frac{1}{d}\sum_{j=1}^{d^2}\ket{jj}_{\mathrm{S}\mathrm{S}'}=\frac{1}{d}\sum_{k,\ell=1}^d\ket{ k \ell}_{\mathrm{A}\mathrm{B}}\otimes \ket{k\ell}_{\mathrm{A}'\mathrm{B}'}. \end{equation} Here, $\ket{j}$ denotes the state vector with 1 at the $j$-th position and zero elsewhere (canonical basis). The Choi-Jamio{\l}kowki representation of some CPT map $\mathcal{E}_\mathrm{S}$ on $\mathrm{S}$ is given by the $d^4-$dimensional state \begin{equation}\label{CJstate} \rho^{\rm CJ}_\mathrm{S}:=\mathcal{E}_\mathrm{S}\otimes\mathds{1}_{\mathrm{S}'}(\ket{\Phi_{\mathrm{S}\mathrm{S}'}}\bra{\Phi_{\mathrm{S}\mathrm{S}'}}), \end{equation} where $\mathds{1}_{\mathrm{S}'}$ denotes the identity map acting on $\mathrm{S}'$. The entire information about the dynamical process $\mathcal{E}_\mathrm{S}$ is contained in this unique state. \begin{figure} \caption{Schematics of the method. Left: the system S is prepared in a maximally entangled state $\ket{\Phi_{\mathrm{S} \end{figure} \subsection{Construction of the correlation measure} In order to formulate a faithful measure of spatial correlations for dynamics, we adopt a resource theory approach \cite{MartinShash,Brandao1,Gour1,Brandao2,Emerson,Gour2,Tilmann,deVicente}. This is, we may consider correlated dynamics as a resource to perform whatever task that cannot be implemented solely by (composing) uncorrelated evolutions $\mathcal{E}_\mathrm{A}\otimes\mathcal{E}_\mathrm{B}$. Then, suppose that the system S undergoes some dynamics given by the map $\mathcal{E}_\mathrm{S}$, and consider the (left and right) composition of $\mathcal{E}_\mathrm{S}$ with some uncorrelated maps $\mathcal{L}_\mathrm{A}\otimes\mathcal{L}_\mathrm{B}$ and $\mathcal{R}_\mathrm{A}\otimes\mathcal{R}_\mathrm{B}$, so that the total dynamics is given by $\mathcal{E}'_\mathrm{S}=(\mathcal{L}_\mathrm{A}\otimes\mathcal{L}_\mathrm{B})\mathcal{E}_\mathrm{S}(\mathcal{R}_\mathrm{A}\otimes\mathcal{R}_\mathrm{B})$. It is clear that any task that we can do with $\mathcal{E}'_\mathrm{S}$ by composition with uncorrelated maps can also be achieved with $\mathcal{E}_\mathrm{S}$ by composition with uncorrelated maps. Hence, we assert that the amount of correlation in $\mathcal{E}_{\rm S}$ is at least as large as in $\mathcal{E}'_\mathrm{S}$. In other words, the amount of correlations of some dynamics does not increase under composition with uncorrelated dynamics. This is the fundamental law of this resource theory, and any faithful measure of correlations should satisfy it. For the sake of comparison, in the resource theory of entanglement, entanglement is the resource, and the fundamental law is that entanglement cannot increase under application of local operations and classical communication (LOCC) \cite{MartinShash}. In this spirit, we introduce a measure of correlations for dynamics via the (normalized) quantum mutual information of the Choi-Jamio{\l}kowski state $\rho^{\rm CJ}_\mathrm{S}$, Eq. \eqref{CJstate}, \begin{align}\label{Ibar} \bar{I}(\mathcal{E}_\mathrm{S})&:=\frac{I(\rho^{\rm CJ}_\mathrm{S})}{4 \log d}\\ &:=\frac{1}{4 \log d}\left[S\left(\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{A}\mathrm{A}'}\right)+S\left(\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{B}\mathrm{B}'}\right)-S\left(\rho^{\rm CJ}_\mathrm{S}\right)\right],\nonumber \end{align} with $S(\cdot):=- {\rm{Tr }}[(\cdot) \log(\cdot)]$ the von Neumann entropy evaluated for the reduced density operators $\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{A}\mathrm{A}'}:= {\rm{Tr }}_{\mathrm{B}\mathrm{B}'}(\rho^{\rm CJ}_\mathrm{S})$ and $\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{B}\mathrm{B}'}:= {\rm{Tr }}_{\mathrm{A}\mathrm{A}'}(\rho^{\rm CJ}_\mathrm{S})$, and $\rho^{\rm CJ}_\mathrm{S}$; see Fig. 1. The quantity $\bar{I}(\mathcal{E}_\mathrm{S})$ is a faithful measure of how correlated the dynamics given by $\mathcal{E}_\mathrm{S}$ is, as it satisfies the following properties: \begin{enumerate} \item[i)] $\bar{I}(\mathcal{E}_\mathrm{S})=0$ if and only if $\mathcal{E}_\mathrm{S}$ is uncorrelated, $\mathcal{E}_\mathrm{S}=\mathcal{E}_\mathrm{A}\otimes\mathcal{E}_\mathrm{B}$. This follows from the fact that the Choi-Jamio{\l}kowski state of an uncorrelated map is a product state with respect to the bipartition $\mathrm{A}\mathrm{A}'|\mathrm{B}\mathrm{B}'$, see Appendix \ref{Ap:1}. \item[ii)] $\bar{I}(\mathcal{E}_\mathrm{S})\in[0,1]$. It is clear that $\bar{I}(\mathcal{E}_\mathrm{S})\geq0$, moreover it reaches its maximum value when $S(\rho^{\rm CJ}_\mathrm{S})$ is minimal and $S\left(\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{A}\mathrm{A}'}\right)+S\left(\rho^{\rm CJ}_\mathrm{S}|_{\mathrm{B}\mathrm{B}'}\right)$ is maximal. Both conditions meet when $\rho^{\rm CJ}_\mathrm{S}$ is a maximally entangled state with respect to the bipartition $\mathrm{A}\mathrm{A}'|\mathrm{B}\mathrm{B}'$, leading to $I(\rho^{\rm CJ}_\mathrm{S})=2\log d^2$. \item[iii)] The fundamental law is satisfied, \begin{equation}\label{fundamentallaw} \bar{I}(\mathcal{E}_\mathrm{S})\geq\bar{I}[(\mathcal{L}_\mathrm{A}\otimes\mathcal{L}_\mathrm{B}) \mathcal{E}_\mathrm{S} (\mathcal{R}_\mathrm{A}\otimes\mathcal{R}_\mathrm{B})], \end{equation} where the equality is reached for uncorrelated unitaries $\mathcal{L}_{\mathrm{A}}(\cdot)=U_{\mathrm{A}}(\cdot)U^\dagger_{\mathrm{A}}$, $\mathcal{L}_{\mathrm{B}}(\cdot)=U_{\mathrm{B}}(\cdot)U^\dagger_{\mathrm{B}}$, $\mathcal{R}_{\mathrm{A}}(\cdot)=V_{\mathrm{A}}(\cdot)V^\dagger_{\mathrm{A}}$, and $\mathcal{R}_{\mathrm{B}}(\cdot)=V_{\mathrm{B}}(\cdot)V^\dagger_{\mathrm{B}}$. This result follows from the monotonicity of the quantum mutual information under local CPT maps (which in turn follows from the monotonicity of quantum relative entropy \cite{Vedral}) and the fact that for any matrix $A$, $A\otimes\mathbb{1}_{\mathrm{S}'}\ket{\Phi_{\mathrm{S}\mathrm{S}'}}=\mathbb{1}_{\mathrm{S}}\otimes A^{\rm t}\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$ where the superscript ``t'' denotes the transposition in the Schmidt basis of the maximally entangled state $\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$. \end{enumerate} \subsection{Maximally correlated dynamics}\label{sec:MCD} Before computing $\bar{I}$ for some cases it is worth studying which dynamics achieve the maximum value $\bar{I}_{\rm max}=1$. From the resource theory point of view, these dynamics can be considered as maximally correlated since they cannot be constructed from other maps by composition with uncorrelated maps [because of Eq. \eqref{fundamentallaw}]. We have the following results: \noindent \textit{Theorem 1}. If for a map $\mathcal{E}_\mathrm{S}$ the property $\bar{I}(\mathcal{E}_\mathrm{S})=1$ holds, such map must be unitary $\mathcal{E}_\mathrm{S}(\cdot)=U_\mathrm{S}(\cdot)U_\mathrm{S}^\dagger$, $U_\mathrm{S} U_\mathrm{S}^\dagger=\mathbb{1}$. \noindent \textit{Proof}. As aforementioned, the maximum value, $\bar{I}(\mathcal{E}_\mathrm{S})=1$, is reached if and only if $\rho^{\rm CJ}_{\mathrm{S}}$ is a maximally entangled state with respect to the bipartition $\mathrm{A}\mathrm{A}'|\mathrm{B}\mathrm{B}'$, $|\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}\rangle$. Then \begin{equation} \mathcal{E}_\mathrm{S}\otimes\mathds{1}_{\mathrm{S}'}(\ket{\Phi_{\mathrm{S}\mathrm{S}'}}\bra{\Phi_{\mathrm{S}\mathrm{S}'}})=|\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}\rangle\langle\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}| \end{equation} is a pure state. Therefore $\mathcal{E}_\mathrm{S}$ must be unitary as the Choi-Jamio{\l}kowski state is pure if and only if it represents a unitary map. \qed Despite the connection with maximally entangled states, the set of maximally correlated operations $\mathfrak{C}:=\{U_\mathrm{S}; \bar{I}(U_\mathrm{S})=1\}$, can not be so straightforwardly characterized as it may seem. Note that not all maximally entangled states $|\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}\rangle$ are valid Choi-Jamio{\l{kowski states. In Appendix \ref{Ap:2} we provide a detailed proof of the next theorem. \noindent \textit{Theorem 2}. A unitary map $U_\mathrm{S}\in\mathfrak{C}$ if and only if it fulfills the equation \begin{equation}\label{Umax3} \sum_{i,j} \langle k i |U_{\mathrm{S}}| m j \rangle\langle n j |U_{\mathrm{S}}^\dagger| \ell i \rangle=\delta_{k\ell}\delta_{mn}. \end{equation} Examples of maximally correlated dynamics are the swap operation exchanging the states of the two parties A and B, $U_{\mathrm{S}}=U_{\mathrm{A}\leftrightarrow\mathrm{B}}$, and thus also any unitary of the form of $(U_{\mathrm{A}}\otimes U_{\mathrm{B}}) U_{\mathrm{A}\leftrightarrow\mathrm{B}}(V_{\mathrm{A}}\otimes V_{\mathrm{B}})$. However, not every $U_\mathrm{S}\in\mathfrak{C}$ falls into this class. For example, the unitary operation of two qubits $U'_{\mathrm{S}}=|21\rangle\langle12|+{\rm i}(|11\rangle\langle21|+|12\rangle\langle11|+|22\rangle\langle22|)$ belongs to $\mathfrak{C}$ and it cannot be written as $(U_{\mathrm{A}}\otimes U_{\mathrm{B}}) U_{\mathrm{A}\leftrightarrow\mathrm{B}}(V_{\mathrm{A}}\otimes V_{\mathrm{B}})$, since that would imply vanishing $\bar{I}(U'_{\mathrm{S}}U_{\mathrm{A}\leftrightarrow\mathrm{B}})$ whereas $\bar{I}(U'_{\mathrm{S}}U_{\mathrm{A}\leftrightarrow\mathrm{B}})=1/2\neq0$. Interestingly, operations able to create highly correlated states such as the two-qubit controlled-NOT gate \cite{NC00} as well as the two-qubit dynamical maps describing the dissipative generation of Bell states \cite{Barreiro, Mueller} achieve a correlation value of 1/2 and thus do not correspond to maximally correlated dynamics. Note that whereas a controlled-NOT gate creates for appropriately chosen two-qubit initial states maximally entangled states, there are other states which are left completely uncorrelated under its action. The measure $\bar{I}$ captures - completely independently of initial states and of whether possibly created correlations are quantum or classical - the fact that correlated dynamics cannot be realized by purely local dynamics. Let us point out that in some resource theories, such as bi-partite entanglement, the maximal element can generate any other element by applying the operations which fulfill its fundamental law, e.g. LOCC. This is not the case here, i.e. maximally correlated evolutions cannot generate any arbitrary dynamics by composition with uncorrelated operations. Indeed, if $\mathcal{E}_\mathrm{S}^{\rm max}$ were able to generate any other dynamics, in particular it would be able to generate any unitary evolution $U_\mathrm{S}$, $(\mathcal{L}_\mathrm{A}\otimes\mathcal{L}_\mathrm{B})\mathcal{E}_\mathrm{S}^{\rm max}(\mathcal{R}_\mathrm{A}\otimes\mathcal{R}_\mathrm{B})(\cdot)=U_\mathrm{S}(\cdot)U_\mathrm{S}^\dagger$. However, this would imply that $\mathcal{L}_\mathrm{A}\otimes\mathcal{L}_\mathrm{B}$, $\mathcal{E}_\mathrm{S}^{\rm max}$ and $(\mathcal{R}_\mathrm{A}\otimes\mathcal{R}_\mathrm{B})$ are unitary evolutions as well, so that $(U_\mathrm{A}\otimes U_\mathrm{B} ) U^{\rm max}_\mathrm{S} (V_\mathrm{A}\otimes V_\mathrm{B} )=U_\mathrm{S}$, with $\mathcal{E}_\mathrm{S}^{\rm max}(\cdot)=U^{\rm max}_\mathrm{S}(\cdot)U_\mathrm{S}^{\rm max\dagger}$. Since $\bar{I}(\mathcal{E}_\mathrm{S})$ is invariant under the composition of uncorrelated unitaries, this result would imply that for any correlated unitary $U_\mathrm{S}$, $\bar{I}(U_\mathrm{S})$ would take the same value $[\bar{I}(U^{\rm max}_\mathrm{S})]$, and this is not true as can be easily checked. \begin{figure} \caption{Maximum value of $\bar{I} \label{fig2} \end{figure} \section{Applications} \subsection{Two-level atoms in the electromagnetic vacuum} To illustrate the behavior of $\bar{I}(\mathcal{E}_\mathrm{S})$, consider the paradigmatic example of two identical two-level atoms with transition frequency $\omega$ interacting with the vacuum of the electromagnetic radiation field (see Appendix \ref{Ap:3}). Under a series of standard approximations, the dynamics of the reduced density matrix of the atoms $\rho_\mathrm{S}$ is described by the master equation \begin{align} \frac{d\rho_\mathrm{S}}{dt}=\mathcal{L}(\rho_\mathrm{S})=&-{\rm i}\tfrac{\omega}{2}[\sigma_1^z+\sigma_2^z,\rho_\mathrm{S}]\\ &+\sum_{j,k=1,2}a_{jk}\mathrm{B}ig(\sigma_k^-\rho_{\mathrm{S}}\sigma_j^+-\tfrac{1}{2}\{\sigma_j^+\sigma_k^-,\rho_\mathrm{S}\}\mathrm{B}ig),\nonumber \end{align} where $\sigma^z_j$ is the Pauli $z$-matrix for the $j$-th atom, and $\sigma_j^+=(\sigma_j^-)^\dagger=|e\rangle\mbox{}_j\langle g|$ the electronic raising and lowering operators, describing transitions between the exited $\ket{e}_j$ and ground $\ket{g}_j$ states. The coefficients $a_{jk}$ depend on the spatial separation $r$ between the atoms. In the limit of $r\gg 1/\omega$ they reduce to $a_{jk}\simeq \gamma_0\delta_{jk}$, whereas for $r\ll 1/\omega$ they take the form $a_{jk}\simeq\gamma_0$. Here $\gamma_0$ is the decay rate of the individual transition between $\ket{e}$ and $\ket{g}$. In the first regime the two-level atoms interact effectively with independent environments, while in the second, the transitions are collective and lead to the Dicke model of super-radiance \cite{Dicke}. To quantitatively assess this behavior of uncorrelated/correlated dynamics as a function of $r$, we compute the measure of correlations $\bar{I}$, Eq. \eqref{Ibar} (see Appendix \ref{Ap:3} for details). The results are shown in Fig. 2. Despite the fact that the value of $\bar{I}$ depends on time (the dynamical map is $\mathcal{E}_{\rm S}={\rm e}^{t\mathcal{L}}$), $\bar{I}$ decreases as $r$ increases, as expected. Furthermore, the value of $\bar{I}$ approaches zero for $t$ large enough (see inset plot), except in the limiting case $r=0$, because for $r\neq0$ the dynamics becomes uncorrelated in the asymptotic limit, $\lim_{t\rightarrow\infty}{\rm e}^{t\mathcal{L}}=\mathcal{E}\otimes\mathcal{E}$, where $\mathcal{E}(\cdot)=K_1(\cdot)K_1^\dagger+K_2(\cdot)K_2^\dagger$ with Kraus operators $K_1=\bigl( \begin{smallmatrix} 0 & 0\\ 1 & 0 \end{smallmatrix} \bigr)$ and $K_2=\bigl( \begin{smallmatrix} 0 & 0\\ 0 & 1 \end{smallmatrix} \bigr)$; however for $r=0$, $\lim_{t\rightarrow\infty}{\rm e}^{t\mathcal{L}}$ is a correlated map. Thus, we obtain perfect agreement between the rigorous measure of correlations $\bar{I}$ and the physically expected behavior of two distant atoms undergoing independent noise. \subsection{Spatial noise correlations in quantum computing} Fault-tolerant quantum computing is predicted to be achievable provided that detrimental noise is sufficiently weak \textit{and} not too strongly correlated \cite{Preskill}. However, even if noise correlations decay sufficiently fast in space, associated (provable) bounds for the accuracy threshold values can decrease by several orders of magnitude as compared to uncorrelated noise \cite{Kitaev}. Thus, it is of both fundamental and practical importance \cite{Joe} to be able to detect, quantify and possibly reduce without a priori knowledge of the underlying microscopic dynamics the amount of correlated noise. Here, we exemplify how the proposed measure can be employed in this context by applying it to a simple, though paradigmatic model system of two representative qubits from a larger qubit register. We assume that the qubits are exposed to local thermal (bosonic) baths, such as realized e.g.~by coupling distant atomic qubits to the surrounding electromagnetic radiation field, and that they interact via a weak ZZ-coupling, which could be caused, e.g., by undesired residual dipolar or van-der-Waals type interactions between the atoms. The ``error'' dynamics of this system is described by the master equation \begin{align}\label{masterZZ} \frac{d\rho_\mathrm{S}}{dt}=\mathcal{L}(\rho_\mathrm{S})=&-{\rm i}[\tfrac{\omega}{2}(\sigma_1^z+\sigma_2^z)+J\sigma^z_1\sigma^z_2,\rho_\mathrm{S}]\\ +&\sum_{j=1,2}\gamma_0(\bar{n}+1)\mathrm{B}ig(\sigma_j^-\rho_{\mathrm{S}}\sigma_j^+-\tfrac{1}{2}\{\sigma_j^+\sigma_j^-,\rho_\mathrm{S}\}\mathrm{B}ig)\nonumber\\ +&\sum_{j=1,2}\gamma_0\bar{n}\mathrm{B}ig(\sigma_j^+\rho_{\mathrm{S}}\sigma_j^--\tfrac{1}{2}\{\sigma_j^-\sigma_j^+,\rho_\mathrm{S}\}\mathrm{B}ig),\nonumber \end{align} where $\omega$ is the energy difference between the qubit states, $J$ the strength of the residual Hamiltonian coupling, $\gamma_0$ is again the decay rate between upper and lower energy level of each individual qubit and $\bar{n}=[\exp(\omega/T)-1]^{-1}$ is mean number of bosons with frequency $\omega$ in the two local baths of temperature $T$ (assumed to be equal). We assume $J$ and $\gamma_0$ to be out of our control and aim at studying the spatial correlations of the errors induced by the interplay of the residual ZZ-coupling and the baths as a function of the bath temperature $T$ and elapsed time $t$, which in the present context might be interpreted as the time for executing one round of quantum error correction \cite{Preskill,Dennis}. Since the overall probability that some error occurs on the two qubits will increase under increasing $t$ and $T$, we need to fix it for a fair assessment of the correlation of the dynamics. A natural way to do this is by defining the error probability in terms of how close the dynamical map induced by Eq.~\eqref{masterZZ} [excluding the term $\tfrac{\omega}{2}(\sigma_1^z+\sigma_2^z)$, as this is not considered a source of error] is to the identity map (the case of no errors). Particularly, we can use the fidelity between both Choi-Jamio{\l}kowski states, $\rho^{\rm CJ}_{\rm S}$ for the ``error'' map and $|\Phi_{\mathrm{S}\mathrm{S}'}\rangle$ for the identity map, $P_{\rm error}=1-\sqrt{\langle\Phi_{\mathrm{S}\mathrm{S}'}|\rho^{\rm CJ}_{\rm S}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle}$. Figure 3 shows the value of amount of dynamical correlations as measured by $\bar{I}$ along a $t$-$T$ line on which the error probability is constant ($P_{\rm error}=0.1$, green line in the inset plot). The numerical data shows, despite this fixing of the overall error rate, that as the temperature increases the correlatedness of errors decreases very rapidly. This remarkable result suggests that by increasing the effective, surrounding temperature one can strongly decrease the non-local character of the noise at the expense of a slightly higher error rate per fixed time $t$, or constant error rates if the time $t$ for an error correction round can be reduced. Thus, the proposed quantifier might prove useful to meet and certify in a given physical architecture the noise levels and noise correlation characteristics which are required to reach the regime where fault-tolerant scalable quantum computing becomes feasible in practice. \begin{figure} \caption{Amount of spatial correlations $\bar{I} \end{figure} \section{Conclusion} In this work, we have formulated a general measure for the spatial correlations of quantum dynamics without restriction to any specific model. To that aim we have adopted a resource theory approach and obtained a fundamental law that any faithful quantifier of spatial correlation must satisfy. We have characterized the maximally correlated dynamics, and applied our measure to the paradigmatic example of two atoms radiating in the electromagnetic field, where spatial correlations are naturally related to the separation between atoms. Furthermore, we have illustrated the applicability of the measure in the context of quantum computing, where it can be employed to quantify and potentially control spatial noise correlations without a priori knowledge of the underlying dynamics. Beyond the scope of this work it will be interesting from a fundamental point of view to study how many independent (up to local unitaries) maximally correlated dynamics there are, and how to deal with the case of multi-partite or infinite dimensional systems. From a practical point of view, it is also interesting to develop efficient methods to estimate the proposed measure, in particular in high-dimensional quantum systems, e.g.~by the construction of witnesses or bounds, in analogy to entanglement estimators \cite{GuhneToth} that have been developed based on the resource theory of entanglement. In this regard, it is our hope that the present results provide a useful tool to study rigorously the role of spatial correlations in a variety of physical processes, including noise assisted transport, quantum computing and dissipative phase transitions. \section*{Acknowledgments} We acknowledge interesting discussions with T. Monz and D. Nigg, as well as financial support by the Spanish MINECO grant FIS2012-33152, the CAM research consortium QUITEMAD grant S2009-ESP-1594, the European Commission PICC: FP7 2007-2013, Grant No.~249958, the UCM-BS grant GICC-910758 and the U.S. Army Research Office through grant W911NF-14-1-0103. \appendix \section{Choi-Jamio{\l}kowski state of uncorrelated maps} \label{Ap:1} First of all, let $U_{\mathrm{B}\leftrightarrow\mathrm{A}'}$ be the commutation matrix (or unitary swap operation) \cite{vec1,vec2} between Hilbert subspaces $\mathcal{H}_\mathrm{B}$ and $\mathcal{H}_{\mathrm{A}'}$ of the total Hilbert space $\mathcal{H}_\mathrm{A}\otimes \mathcal{H}_\mathrm{B}\otimes \mathcal{H}_{\mathrm{A}'}\otimes \mathcal{H}_{\mathrm{B}'}$: \begin{eqnarray} U_{\mathrm{B}\leftrightarrow\mathrm{A}'} \left(M_1\otimes M_2\otimes M_{3}\otimes M_{4}\right) U_{\mathrm{B}\leftrightarrow\mathrm{A}'}^\dagger\\ = M_1\otimes M_{3} \otimes M_2\otimes M_{4}. \end{eqnarray} where $M_1$, $M_2$, $M_3$ and $M_4$ are operators acting on the respective Hilbert subspaces in the decomposition $\mathcal{H}_\mathrm{A}\otimes \mathcal{H}_\mathrm{B}\otimes \mathcal{H}_{\mathrm{A}'}\otimes \mathcal{H}_{\mathrm{B}'}$. This is, $M_1$ acts on $\mathcal{H}_\mathrm{A}$, $M_4$ on $\mathcal{H}_{\mathrm{B}'}$, and $M_2$ and $M_{3}$ act on $\mathcal{H}_\mathrm{B}$ and $\mathcal{H}_{\mathrm{A}'}$ on the left hand side and on $\mathcal{H}_{\mathrm{A}'}$ and $\mathcal{H}_\mathrm{B}$ on the right hand side of the equality respectively. Note that $U_{\mathrm{B}\leftrightarrow\mathrm{A}'}U_{\mathrm{B}\leftrightarrow\mathrm{A}'}=\mathbb{1}$ and then $U_{\mathrm{B}\leftrightarrow\mathrm{A}'}=U_{\mathrm{B}\leftrightarrow\mathrm{A}'}^\dagger$. Now, it turns out that the evolution given by some dynamical map $\mathcal{E}_\mathrm{S}$ is uncorrelated with respect to the subsystems $\mathrm{A}$ and $\mathrm{B}$, $\mathcal{E}_\mathrm{S}=\mathcal{E}_\mathrm{A}\otimes \mathcal{E}_\mathrm{B}$, if and only if its Choi-Jamio{\l}kowski state $\rho^{\rm CJ}_\mathrm{S}:=\mathcal{E}_\mathrm{S}\otimes\mathds{1}_{\mathrm{S}'}(\ket{\Phi_{\mathrm{S}\mathrm{S}'}}\bra{\Phi_{\mathrm{S}\mathrm{S}'}})$ is \begin{equation}\label{CJstatetilde} \rho^{\rm CJ}_\mathrm{S}=U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\left(\rho^{\rm CJ}_\mathrm{A}\otimes \rho^{\rm CJ}_\mathrm{B}\right) U_{\mathrm{B}\leftrightarrow\mathrm{A}'}, \end{equation} where $\rho^{\rm CJ}_\mathrm{A}$ and $\rho^{\rm CJ}_\mathrm{B}$ are the Choi-Jamio{\l}kowski states of the maps $\mathcal{E}_\mathrm{A}$ and $\mathcal{E}_\mathrm{B}$, respectively. Indeed, if $\mathcal{E}_\mathrm{S}=\mathcal{E}_\mathrm{A}\otimes \mathcal{E}_\mathrm{B}$, we have (omitting for the sake of clarity the subindexes in the basis expansion of $\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$): \begin{widetext} \begin{align} \rho^{\rm CJ}_\mathrm{S}=\mathcal{E}_\mathrm{S}\otimes\mathds{1}_{\mathrm{S}'}(\ket{\Phi_{\mathrm{S}\mathrm{S}'}}\bra{\Phi_{\mathrm{S}\mathrm{S}'}})&=\frac{1}{d^2}\sum_{k,\ell,m,n=1}^d\mathcal{E}_\mathrm{S}\left(|k\ell\rangle\langle mn|\right)\otimes|k\ell\rangle\langle mn| \nonumber\\ &=\frac{1}{d^2}\sum_{k,\ell,m,n=1}^d\mathcal{E}_\mathrm{A}\left(|k\rangle\langle m|\right)\otimes\mathcal{E}_\mathrm{B}\left(|\ell\rangle\langle n|\right)\otimes|k\ell\rangle\langle mn|, \end{align} then \begin{align} U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\rho^{\rm CJ}_\mathrm{S} U_{\mathrm{B}\leftrightarrow\mathrm{A}'}&=\frac{1}{d^2}\sum_{k,\ell,m,n=1}^d\mathcal{E}_\mathrm{A}(|k\rangle\langle m|)\otimes|k\rangle\langle m|\otimes\mathcal{E}_\mathrm{B}(|\ell\rangle\langle n|)\otimes |\ell\rangle\langle n|\nonumber\\ &=\frac{1}{d}\sum_{k,m=1}^d\mathcal{E}_\mathrm{A}\otimes\mathds{1}(|kk\rangle\langle mm|)\otimes\frac{1}{d}\sum_{\ell,n=1}^d\mathcal{E}_\mathrm{B}\otimes\mathds{1}(|\ell \ell\rangle\langle nn|)\nonumber\\ &=\rho^{\rm CJ}_\mathrm{A}\otimes \rho^{\rm CJ}_\mathrm{B}. \end{align} \end{widetext} Conversely, if Eq. \eqref{CJstatetilde} holds, then the dynamics has to be uncorrelated because the correspondence between Choi-Jamio{\l}kowski states and dynamical maps is one-to-one. From Eq. \eqref{CJstatetilde} it is straightforward to conclude that $\bar{I}(\mathcal{E}_{\mathrm{S}})=0$ if and only if $\mathcal{E}_{\mathrm{S}}$ is uncorrelated, because the von Neumann entropy of the Choi-Jami{\l}kowski state factorizes $S(\rho^{\rm CJ}_\mathrm{S})=S\left[U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\left(\rho^{\rm CJ}_\mathrm{A}\otimes \rho^{\rm CJ}_\mathrm{B}\right)U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\right]=S\left(\rho^{\rm CJ}_\mathrm{A}\otimes \rho^{\rm CJ}_\mathrm{B}\right)=S\left(\rho^{\rm CJ}_\mathrm{A}\right)+S\left(\rho^{\rm CJ}_\mathrm{B}\right)$ if and only if $\mathcal{E}_{\mathrm{S}}$ is uncorrelated. \section{Proof of Theorem 2} \label{Ap:2} As commented in section \ref{sec:MCD}, $U_\mathrm{S}\in\mathfrak{C}$ if \begin{equation}\label{proof1} \ket{\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}}=U_{\mathrm{S}}\otimes\mathbb{1}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle, \end{equation} where $\ket{\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}}$ is a maximally entangled state with respect to the bipartition $\mathrm{A}\mathrm{A}'|\mathrm{B}\mathrm{B}'$. Note that if $\ket{\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}}$ is a maximally entangled state with respect to the bipartition $\mathrm{A}\mathrm{A}'|\mathrm{B}\mathrm{B}'$, $U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\ket{\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}}$ will be a maximally entangled state state with respect to the bipartition $\mathrm{A}\mathrm{B}|\mathrm{A}'\mathrm{B}'=\mathrm{S}|\mathrm{S}'$. Since any maximally entangled state with respect to the bipartition $\mathrm{S}|\mathrm{S}'$ can be written as $\tilde{U}_{\mathrm{S}}\otimes\tilde{U}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle$ for some local unitaries $\tilde{U}_{\mathrm{S}}$ and $\tilde{U}_{\mathrm{S}'}$, we can write \begin{equation}\label{proof2} U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\ket{\Psi_{(\mathrm{A}\mathrm{A}')|(\mathrm{B}\mathrm{B}')}}=\tilde{U}_{\mathrm{S}}\otimes\tilde{U}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle. \end{equation} Because of Eqs. \eqref{proof1} and \eqref{proof2} we conclude that $U_{\mathrm{S}}\in\mathfrak{C}$ if and only if there exist unitaries $\tilde{U}_{\mathrm{S}}$ and $\tilde{U}_{\mathrm{S}'}$ such that \begin{equation}\label{Umax1} U_{\mathrm{S}}\otimes\mathbb{1}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle=U_{\mathrm{B}\leftrightarrow\mathrm{A}'} ( \tilde{U}_{\mathrm{S}}\otimes\tilde{U}_{\mathrm{S}'} ) |\Phi_{\mathrm{S}\mathrm{S}'}\rangle. \end{equation} Next, we prove the following \noindent \textit{Lemma}. A unitary map $U_\mathrm{S}\in\mathfrak{C}$ if and only if there exists some other unitary $V$ such that the matrix elements of $U_S$ can be written as \begin{equation}\label{Umax2} \langle k\ell|U_{\mathrm{S}}|m n\rangle=\langle k m |V|\ell n \rangle. \end{equation} \noindent \textit{Proof}. If $U_\mathrm{S}\in\mathfrak{C}$, then by taking inner product with respect to the basis element $\ket{ k \ell m n}$ in Eq. \eqref{Umax1} we obtain: \begin{align}\label{Vintermedio} \langle k\ell|U_{\mathrm{S}}|m n\rangle&=d\langle k m \ell n |\tilde{U}_{\mathrm{S}}\otimes \tilde{U}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle \nonumber\\ &=\langle k m |\tilde{U}_{\mathrm{S}} \tilde{U}^{\rm t}_{\mathrm{S}'}|\ell n \rangle=\langle k m |V|\ell n\rangle, \end{align} for $V=\tilde{U}_{\mathrm{S}} \tilde{U}^{\rm t}_{\mathrm{S}'}$. Here we have used that $A\otimes\mathbb{1}_{\mathrm{S}'}\ket{\Phi_{\mathrm{S}\mathrm{S}'}}=\mathbb{1}_{\mathrm{S}}\otimes A^{\rm t}\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$ where the superscript ``t'' denotes the transposition in the Schmidt basis of the maximally entangled state $\ket{\Phi_{\mathrm{S}\mathrm{S}'}}$, which has been taken to be the canonical basis here. \noindent Conversely, assume that there exists a unitary $V$ satisfying \eqref{Umax2}. As $V$ can always be decomposed as the product of two unitaries, $V=V_1V_2$, by setting $\tilde{U}_{\mathrm{S}}=V_1$ and $\tilde{U}_{\mathrm{S}'}^{\rm t}=V_2$, the same algebra as in Eq. \eqref{Vintermedio} leads us to rewrite Eq. \eqref{Umax2} as \begin{equation} \langle k \ell m n|U_{\mathrm{S}}\otimes \mathbb{1}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle=\langle k \ell m n|U_{\mathrm{B}\leftrightarrow\mathrm{A}'}\tilde{U}_{\mathrm{S}}\otimes \tilde{U}_{\mathrm{S}'}|\Phi_{\mathrm{S}\mathrm{S}'}\rangle. \end{equation} Since $\ket{ k \ell m n}$ are elements of a basis we conclude that Eq. \eqref{Umax1} holds. \qed \noindent With these results, the Theorem 2 is easy to prove. \noindent \textit{Proof of Theorem 2}. Note that for any unitary $U_\mathrm{S}$, Eq. \eqref{Umax2} is satisfied for some matrix $V$. Thus, what we have to prove is that such a matrix $V$ is unitary if and only if $U_\mathrm{S}$ fulfills the equation \begin{equation} \sum_{i,j} \langle k i |U_{\mathrm{S}}| m j \rangle\langle n j |U_{\mathrm{S}}^\dagger| \ell i \rangle=\delta_{k\ell}\delta_{mn}, \end{equation} and this follows after a straightforward algebraic computation. \qed \section{Two two-level atoms coupled to the radiation field} \label{Ap:3} The free Hamiltonian of the atoms is \begin{equation} H_\mathrm{S}=\frac{\omega}{2}(\sigma^z_1+\sigma^z_2), \end{equation} where $\sigma^z_j$ is the Pauli $z$-matrix for the $j$-th atom. In addition, the environmental free Hamiltonian is given by \begin{equation} H_{\rm E}=\sum_{\bm{k}} \sum_{\lambda =1,2} \omega_{\bm{k}} a_\lambda^\dagger(\bm{k})a_\lambda(\bm{k}), \end{equation} where $\bm{k}$ and $\lambda$ stand for the wave vector and the two polarization degrees of freedom, respectively. We have taken natural units $\hbar=c=1$. The dispersion relation in the free space is $\omega_{\rm{k}}=|\bm{k}|$, and the field operators $a_\lambda^\dagger(\bm{k})$ and $a_\lambda(\bm{k})$ describe the creation and annihilation of photons with wave vector $\bm{k}$ and polarization vector $\bm{e}_\lambda$. These fulfill $\bm{k}\cdot\bm{e}_\lambda=0$ and $\bm{e}_\lambda\cdot\bm{e}_{\lambda'}=\delta_{\lambda,\lambda'}$. The atom-field interaction is described in dipole approximation by the Hamiltonian \begin{equation} H_{\rm SE}=-\sum_{j=1,2}\left[\sigma_j^-\bm{d}\cdot\bm{E}(\bm{r}_j)+\sigma_j^+\bm{d}^\ast\cdot\bm{E}(\bm{r}_j)\right]. \end{equation} Here, $\bm{d}$ is the dipole matrix element of the atomic transition, $\bm{r}_j$ denotes the position of the $j$-th atom, and $\sigma_j^+=(\sigma_j^-)^\dagger=|e\rangle\mbox{}_j\langle g|$ for its exited $\ket{e}_j$ and ground $\ket{g}_j$ states. Furthermore, the electric field operator is given by (Gaussian units) \begin{equation} \bm{E}(\bm{r})={\rm i}\sum_{\bm{k},\lambda}\sqrt{\frac{2\pi\omega_{\bm{k}}}{\mathcal{V}}}\bm{e}_\lambda(\bm{k})\left(a_\lambda(\bm{k}){\rm e}^{{\rm i} \bm{k}\cdot\bm{r}}-a^\dagger_\lambda(\bm{k}){\rm e}^{-{\rm i} \bm{k}\cdot\bm{r}}\right), \end{equation} where $\mathcal{V}$ denotes the quantization volume. In the Markovian weak coupling limit \cite{BrPe02} the master equation for the atoms takes the form: \begin{align}\label{MasterApp} \frac{d\rho_\mathrm{S}}{dt}=\mathcal{L}(\rho_\mathrm{S})=&-{\rm i}\tfrac{\omega}{2}[\sigma_1^z+\sigma_2^z,\rho_\mathrm{S}]\\ &+\sum_{i,j=1,2}a_{jk}\mathrm{B}ig(\sigma_k^-\rho_{\mathrm{S}}\sigma_j^+-\tfrac{1}{2}\{\sigma_j^+\sigma_k^-,\rho_\mathrm{S}\}\mathrm{B}ig),\nonumber \end{align} where, after taking the continuum limit ($\tfrac{1}{\mathcal{V}}\sum_{\bm{k}}\rightarrow \tfrac{1}{(2\pi)^3}\int d^3\bm{k}$) and performing the integrals, the coefficients $a_{jk}$ are given by (sec. 3.7.5 of \cite{BrPe02}) \begin{equation} a_{jk}=\gamma_0[j_0(x_{jk})+P_2(\cos\theta_{jk})j_2(x_{jk})], \end{equation} here $\gamma_0=\frac{4}{3}\omega^3|\bm{d}|^2$, and $j_0(x)$ and $j_2(x)$ are spherical Bessel functions \cite{AS}, \begin{equation} j_0(x)=\frac{\sin x}{x}, \quad j_2(x)=\left(\frac{3}{x^3}-\frac{1}{x}\right)\sin x-\frac{3}{x^2}\cos x, \end{equation} and \begin{equation} P_2(\cos\theta)=\frac{1}{2}(3\cos^2\theta- 1) \end{equation} is a Legendre polynomial, with \begin{equation} x_{jk}=\omega|\bm{r}_j-\bm{r}_k|, \quad \text{and } \cos^2(\theta_{jk})=\frac{|\bm{d}\cdot(\bm{r}_j-\bm{r}_k)|^2}{|\bm{d}|^2|\bm{r}_j-\bm{r}_k|^2}. \end{equation} Notice that if the distance between atoms $r=|\bm{r}_1-\bm{r}_2|$, is much larger than the wavelength associated with the atomic transition $r\gg 1/\omega$, we have $a_{jk}\simeq \gamma_0\delta_{ij}$ and only the diagonal terms $\gamma_0=\frac{4}{3}\omega^3|\bm{d}|^2$ are relevant. Then, the master equation describes two-level atoms interacting with independent environments, and there are no correlations in the emission of photons by the first and the second atom. In the opposite case, when $r\ll 1/\omega$, every matrix element approaches the same value $a_{ij}\simeq\gamma_0$, in the master equation the atomic transitions can be approximately described by the collective jump operators $J_{\pm}=\sigma_1^\pm+\sigma_1^\pm$, and the pair of atoms becomes equivalent to a four-level system with Hamiltonian $\omega J_z=\frac{\omega}{2} (\sigma_1^z+\sigma_2^z)$ at the mean position $(\bm{r}_1-\bm{r}_2)/2$ interacting with the electromagnetic vacuum. This emission of photons in a collective way known as super-radiance is effectively described in terms of collective angular momentum operators in the Dicke model \cite{Dicke}. \textbf{Evaluation of the correlation measure.} In order to numerically compute $\bar{I}$ for this dynamics, we consider a maximally entangled state $|\Phi_{\mathrm{S}\mathrm{S}'}\rangle$ between two sets $\mathrm{S}$ and $\mathrm{S}'$ of two qubits. Namely, $\mathrm{S}$ is the set of the two physical qubits, i.e. the two two-level atoms 1 and 2, and $\mathrm{S}'$ is made up of two auxiliary qubits $1'$ and $2'$ as sketched in Fig. 1. Next, the part $\mathrm{S}$ of the maximally entangled state $|\Phi_{\mathrm{S}\mathrm{S}'}\rangle\langle\Phi_{\rm \mathrm{S}\mathrm{S}'}|$ is evolved according to the master equation \eqref{MasterApp} while keeping the part ${\mathrm{S}'}$ constant, to obtain $\rho_{\mathrm{S}}^{\rm CJ}(t)$. This can be done, for instance, by numerically integrating the master equation $\frac{d \rho_{\mathrm{S}}^{\rm CJ}(t)}{dt}=\mathcal{L}\otimes\mathds{1} [\rho_{\rm S}^{\rm CJ}(t)]$, with the initial condition $\rho_{\rm S}^{\rm CJ}(0)=|\Psi_{\mathrm{S}\mathrm{S}'}\rangle\langle\Psi_{\mathrm{S}\mathrm{S}'}|$, where $\mathcal{L}$ is for the present example specified in Eq. \eqref{MasterApp}. Tracing out the qubits $2$ and $2'$ of $\rho_{\mathrm{S}}^{\rm CJ}(t)$ yields $\rho^{\rm CJ}_\mathrm{S}(t)|_{11'}$, and similarly tracing out qubits $1$ and $1'$ yields $\rho^{\rm CJ}_\mathrm{S}(t)|_{22'}$. Finally, this allows one to compute the von Neumann entropies of $\rho^{\rm CJ}_\mathrm{S}(t)|_{11'}$, $\rho^{\rm CJ}_\mathrm{S}(t)|_{22'}$ and $\rho_{\mathrm{S}}^{\rm CJ}(t)$ to calculate $\bar{I}(t)$ according to Eq. \eqref{Ibar}. \end{document}
\begin{document} \title[Hyperbolic series]{Functional relations for hyperbolic cosecant series} \author{M. Buzzegoli} \address{Universit\`a di Firenze and INFN Sezione di Firenze, Florence, Italy} \email{[email protected]} \thanks{Very useful discussions with F. Becattini and A. Palermo and helpful comments on the manuscript of C. Dappiaggi are gratefully acknowledged.} \subjclass[2010]{Primary 11L03; Secondary 11B68} \keywords{Lambert series, Bernoulli, Norlund, Ramanujan, polynomials, series, q-series.} \begin{abstract} We study the function series $\sum_{n=1}^\infty \phi^{2m+2} \text{cosch}^{2m+2}(n\phi/2)$, and similar series, for integers $m$ and complex $\phi$. This hyperbolic series is linearly related to the Lambert series. The Lambert series is known to satisfy a functional equation which defines the Ramanujan polynomials. By using residue theorem (summation theorem) we find the functional equation satisfied by this hyperbolic series. The functional equation identifies a class of polynomials which can be seen as a generalization of the Ramanujan polynomials. These polynomials coincide with the asymptotic expansion of the hyperbolic series at the origin and they all vanish for $\phi=\pm 2\pii$. We furthermore derive several identities between Harmonic numbers and ordinary and generalized Bernoulli polynomials. \end{abstract} \maketitle \section{Introduction and main results} \label{sec:intro} Given an integer number $m\geq 0$ and a complex number $\phi$, we study the function series \begin{equation*} S_{2m+2}(\phi)=\sum_{n=1}^\infty \frac{\phi^{2m+2}}{\sinh^{2m+2}(n\phi/2)}, \end{equation*} henceforth simply denoted as hyperbolic series. The hyperbolic series defines a function in $\phi$ that is analytic in any domain not intersecting the imaginary axis, where the series diverges everywhere except that in the origin. By using the summation theorem to write the sums on the series as a complex integral, we find a functional relation satisfied by this series. The functional relation separates the function into a polynomial part and into a part that is not analytical in $\Re(\phi)=0$. The polynomial identified in this way coincides with the asymptotic expansion of the hyperbolic series in $\phi=0$. The functional relations are easily understood once we establish the connections between the hyperbolic series and the Lambert series. \begin{definition}[Lambert series]\label{def:Lambertserie} We denote with $\mathcal{L}_q(s)$ the Lambert series of the type \begin{equation}\label{eq:Lambertserie} \mathcal{L}_q(s)=\sum_{k=1}^\infty \frac{k^s q^k}{1-q^k}\qquad s\in\mathbb{R},\,q\in\mathbb{C} \end{equation} with $|q|<1$. \end{definition} \begin{proposition}\label{prop:LambertRel_Intro} For $m\geq 0$ integer and $\Re(\phi)\neq 0$, the hyperbolic series is the linear combination of the Lambert series $\mathcal{L}_{q}(s)$, \begin{equation} \label{eq:LambertRel_0} S_{2m+2}(\phi)=\frac{(2\phi)^{2m+2}}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)}\,\mathcal{L}_{e^{-\sigma \phi}}(2i+1), \end{equation} where $\sigma=$sgn$(\Re(\phi))$ and $c_i^{(m)}$ are real numbers given by: \begin{equation*} c_{2i+1}^{(m)}=\frac{(2m+1)!}{(2i+1)!}\frac{B_{2m-2i}^{(2m+2)}(m+1)}{(2m-2i)!} \end{equation*} with $B_n^{(m)}(t)$ the generalized Bernoulli polynomial. \end{proposition} \begin{definition}[Generalized Bernoulli polynomials]\label{def:bernpoly} For integers $n$ and $m$ and real $t$, we denote with $B_n$ the Bernoulli number, with $B_n(t)$ the Bernoulli polynomial and with $B_n^{(m)}(t)$ the generalized Bernoulli polynomial \cite[p. 145]{Norlund1924} (see also \cite{LukeVol1}), defined respectively by the following exponential generating functions: \begin{align*}\label{eq:bernpoly} \frac{x}{e^x-1}&=\sum_{n=0}^\infty \frac{B_n}{n!} \,x^n\quad |x|<2\pi\, ;\qquad \frac{x\,e^{t\, x}}{e^x-1}=\sum_{n=0}^{\infty } \frac{B_n(t) x^n}{n!}\quad |x|<2\pi\, ;\\ \frac{x^m}{(e^x-1)^m} e^{t\, x} &=\sum_{n=0}^{\infty } \frac{B_n^{(m)}(t)\, x^n}{n!}\quad |x|<2\pi\, . \end{align*} \end{definition} Some functional relations for the Lambert series were given by Ramanujan~\cite{RamNotebook} and later proved by several authors, see~\cite{BerndtVol2} and reference therein. When $s$ is a negative odd integer $s=-2m-1$ with $m=0,1,2,\dots$ and for any $\Re(\phi)>0$, the Lambert series satisfies the functional relation~\cite[Entry 21(i) Chapter 14]{BerndtVol2} \begin{equation} \label{eq:LambertRelNegOdd} \begin{split} \mathcal{L}_{e^{-\phi}}(-2m-1)=&(-1)^m\left(\frac{\phi}{2\pi}\right)^{2m}\mathcal{L}_{e^{-4\pi^2/\phi}}(-2m-1)+\\ &-\frac{1}{2}\left[1-(-1)^m\frac{\phi^{2m}}{(2\pi)^m}\right]\zeta(2m+1)+\frac{1}{2^{2m+2}\phi}\mathcal{R}_{2m+2}(\phi), \end{split} \end{equation} where $\zeta$ is the Riemann zeta and $\mathcal{R}$ is the Ramanujan polynomial~\cite{RamPoly} defined as \begin{equation} \label{eq:RamPol} \mathcal{R}_{2m+2}(\phi)=-2^{2m+1}\sum_{k=0}^{m+1} (2\pii)^{2k} \frac{B_{2k}}{(2k)!} \frac{B_{2m+2-2k}}{(2m+2-2k)!}\phi^{2m+2-2k}, \end{equation} with $B_j$ the $j$-th Bernoulli number. Taking advantage of the identity \begin{equation*} \coth x=1+ \frac{2}{e^{2x}-1} \end{equation*} we can also write the Ramanujan identity~(\ref{eq:LambertRelNegOdd}) as an identity for hyperbolic cotangent series \begin{equation*} \begin{split} \sum_{n=1}^\infty \frac{\coth(n\phi/2)}{n^{2m+1}}=&(-1)^m\left(\frac{\phi}{2\pi}\right)^{2m} \sum_{n=1}^\infty \frac{\coth(4\pi^2 n/2\phi)}{n^{2m+1}}+\\ &+\frac{1}{2^{2m+1}\phi}\mathcal{R}_{2m+2}(\phi). \end{split} \end{equation*} In contrast to this series, the hyperbolic series $S_{2m+2}$ is related to the Lambert series with positive argument and, for any integer $m\geq 0$, the functional relation for the Lambert series with $s=2m+1$ becomes~\cite[Entry 13 Chapter 14]{BerndtVol2} \begin{equation} \label{eq:LambertRelPosOdd} \begin{split} \mathcal{L}_{e^{-\phi}}(2m+1)=&-(-1)^m\left(\frac{2\pi}{\phi}\right)^{2m+2}\mathcal{L}_{e^{-4\pi^2/\phi}}(2m+1) -\frac{\partiallta_{m,0}}{2\phi}+\\ &+\frac{1}{2}\frac{B_{2m+2}}{2m+2}\left[1+\frac{(-1)^m(2\pi)^{2m+2}}{\phi^{2m+2}}\right]. \end{split} \end{equation} We can then plug relations~(\ref{eq:LambertRelPosOdd}) into~(\ref{eq:LambertRel_0}) to find functional equations relating the hyperbolic series. \begin{theorem}\label{thm:funcrelS} The series $S_{2m+2}(\phi)$ for any integer $m\geq 0$ and any complex $\phi$ such that $\Re(\phi)\neq 0$ satisfy the functional relation \begin{equation} \label{eq:funcrelS} S_{2m+2}(\phi)-\sum_{i=0}^m\mathcal{S}_i^{(m)}(\phi)S_{2i+2}\left(\frac{4\pi^2}{\phi}\right)= \mathcal{B}_{2m+2}(\phi)+(-1)^{m+1}\frac{4\sigma}{m+2}\phi^{2m+1}, \end{equation} where $\sigma=$sgn$(\Re(\phi))$, $\mathcal{S}_i^{(m)}$ are polynomials on $\phi$ of degree $2m+2$ and $\mathcal{B}_{2m+2}(\phi)$ is the polynomial \begin{equation} \label{eq:PolynB} \mathcal{B}_{2m+2}(\phi)= -2^{2m+1}\sum_{k=0}^{m+1}(2\pii)^{2k}\frac{B_{2k}}{(2k)!} \frac{B_{2m+2-2k}^{(2m+2)}(m+1)}{(2m+2-2k)!} \, \phi^{2m+2-2k}. \end{equation} \end{theorem} We do not have a general expression for the polynomials $\mathcal{S}_i^{(m)}$ but we give the procedure to find them. The main difference with Lambert functional equations is that the hyperbolic function $S_{2m+2}(\phi)$ is related not just to $S_{2m+2}(4\pi^2/\phi)$ but to all functions $S_{2i+2}(4\pi^2/\phi)$ with $i=0,1,\dots,m$. Similarly to functional equation~(\ref{eq:LambertRelNegOdd}), the~(\ref{eq:funcrelS}) defines a class of polynomials which can be seen as a generalization of Ramanujan polynomials. Indeed the form of the polynomials~(\ref{eq:PolynB}) suggests to define the generalization of Ramanujan polynomial as \begin{equation*} \mathcal{R}^{(s,r)}_{2m+2}(\phi)= -2^{2m+1}\sum_{k=0}^{m+1}(2\pii)^{2k}\frac{B_{2k}}{(2k)!} \frac{B_{2m+2-2k}^{(2s+r)}(s)}{(2m+2-2k)!} \, \phi^{2m+2-2k}. \end{equation*} It follows immediately from the generalized Bernoulli properties that the generalized Ramanujan polynomials reduce to ordinary Ramanujan polynomials and to $\mathcal{B}$ polynomials for the following choice of parameters: \begin{equation*} \mathcal{R}^{(0,1)}_{2m+2}(\phi)=\mathcal{R}_{2m+2}(\phi),\quad \mathcal{R}^{(m+1,0)}_{2m+2}(\phi)=\mathcal{B}_{2m+2}(\phi). \end{equation*} Notice that each term of the series $S_{2i+2}(4\pi^2/\phi)$ and the last term in~(\ref{eq:funcrelS}) are non analytical in $\phi=0$. Then, the functional relation~(\ref{eq:funcrelS}) identifies the polynomial $\mathcal{B}_{2m+2}$ as the only analytical part of the hyperbolic series and it can be used to assign a value to the series for imaginary $\phi$. The study of this series and its analytical part is also motivated by a recent application in Physics. Exact solutions for thermal states of a quantum relativistic fluid in the presence of both acceleration and rotation are found by extracting the analytic part of a function series that do not converge in the whole complex plane~\cite{Becattini:2020qol}. In particular, it was shown in ref.~\cite{Becattini:2020qol} that every thermal distribution quantity of an accelerating fluid composed by non-interacting massless scalar particles can be given as a linear combination of the polynomials~(\ref{eq:PolynB}) derived form the hyperbolic series discussed here. The paper is organized as follows. In Section~\ref{sec:basic} we consider three types of hyperbolic series, we study their uniform convergence, and we show that they are not analytic in the imaginary axis. In Proposition~\ref{prop:LambertRel} we establish a linear relation between the hyperbolic series and the Lambert series. In Sec.~\ref{sec:gammarep} we show that the linearity coefficients of proposition~\ref{prop:LambertRel} are given by special values of the generalized Bernoulli polynomials. This is done by relating the ratio of two gamma functions to the generalized Bernoulli polynomials (Theorem~\ref{thm:gammaratiorepgen}). In Sec.~\ref{sec:IdenBer} we take advantage of Theorem~\ref{thm:gammaratiorepgen} to derive several identities involving ordinary and generalized Bernoulli polynomials as well as Bell polynomials and Harmonic numbers. In Sec.~\ref{sec:FuncRel} we first prove the functional relation satisfied by Lambert series and then we prove Theorem~\ref{thm:funcrelS}. We also show that the polynomial $\mathcal{B}$ corresponds to the asymptotic expansion of $S_{2m+2}$ at the origin and we analyze the zeros of the polynomials. \section{Function series with hyperbolic cosecant} \label{sec:basic} The results quoted in Section \ref{sec:intro} can be extended to the following class of hyperbolic series. \begin{definition}[Hyperbolic series]\label{def:baseserie} Given $m$ and $\gamma$ two integers such that $m\geq 0$ and $\gamma<m+1$ and given $\phi$ a complex number such that $\Re(\phi)\neq 0$, we refer to hyperbolic series as the following function series involving the hyperbolic cosecant: \begin{align}\label{eq:baseserie_cosh} S^{(\gamma)}_{2m+2}(\phi)&=\sum_{n=1}^\infty \frac{\phi^{2m+2}\cosh^\gamma(n\,\phi)}{\sinh^{2m+2}(n\phi/2)},\\ \label{eq:baseserie_sinh} S^{(\sinh,\gamma)}_{2m+2}(\phi)&=\sum_{n=1}^\infty \frac{\phi^{2m+2}\sinh^\gamma(n\,\phi)}{\sinh^{2m+2}(n\phi/2)},\\ \label{eq:baseserie} S_{2m+2}(\phi)&=\sum_{n=1}^\infty \frac{\phi^{2m+2}}{\sinh^{2m+2}(n\phi/2)}, \end{align} where $S_{2m+2}(\phi)$ is the series discussed in Section \ref{sec:intro} and it corresponds to $S^{(\gamma)}_{2m+2}(\phi)$ in Eq. (\ref{eq:baseserie_cosh}) with $\gamma=0$. \end{definition} In this section we show that these series are analytic functions in $\phi$ in the whole complex plane except the imaginary axis, that they are continuous function in the real axis but that they are not real analytic in any open set containing the zero. These properties are inherited directly from Lambert series thanks to linearity relation~(\ref{eq:LambertRel_0}). Furthermore, we do not need to study all of the three type of hyperbolic function defined above. Indeed, the series (\ref{eq:baseserie_cosh}) and (\ref{eq:baseserie_sinh}) are linear combination of $S_{2m+2}(\phi)$ and $S^{(\sinh,1)}_{2m+2}(\phi)$. \begin{proposition}\label{prop:redundant} The series (\ref{eq:baseserie_cosh}) and (\ref{eq:baseserie_sinh}) are linear combination of $S_{2m+2}(\phi)$ and $S^{(\sinh,1)}_{2m+2}(\phi)$. Set $\gamma=2p$ if $\gamma$ is even, otherwise $\gamma=2p+1$, then \begin{align*} S^{(\gamma)}_{2m+2}(\phi)&=\sum_{l=0}^{\gamma} \genfrac(){0pt}{0}{\gamma}{l} 2^l\, S_{2(m-l)+2}(\phi);\\ S^{(\sinh,2p)}_{2m+2}(\phi)&=4^p\sum_{l=0}^{p} \genfrac(){0pt}{0}{p}{l} \, S_{2(m+l-2p)+2}(\phi);\\ S^{(\sinh,2p+1)}_{2m+2}(\phi)&= 4^p\sum_{l=0}^{p} \genfrac(){0pt}{0}{p}{l} \, S^{(\sinh,1)}_{2(m+l-2p)+2}(\phi). \end{align*} \end{proposition} \begin{proof} This is easily proved by taking advantage of hyperbolic function relations. Indeed, using the relation $\cosh(n\phi)=2\sinh^2(n\phi/2) +1$ and the binomial expansion we obtain \begin{equation*} \begin{split} S^{(\gamma)}_{2m+2}(\phi)&=\sum_{n=1}^\infty \frac{\phi^{2m+2}\left( 2\sinh^2(n\phi/2)+1\right)^\gamma}{\sinh^{2m+2}(n\phi/2)} =\sum_{n=1}^\infty\sum_{l=0}^{\gamma} \genfrac(){0pt}{0}{\gamma}{l}\frac{\phi^{2m+2}2^l\sinh^{2l}(n\phi/2)}{\sinh^{2m+2}(n\phi/2)}\\ &= \sum_{l=0}^{\gamma} \genfrac(){0pt}{0}{\gamma}{l} 2^l\, S_{2(m-l)+2}(\phi). \end{split} \end{equation*} In the same way, taking advantage of \begin{equation*} \sinh^2(n\phi)=\cosh^2(n\phi)-1=4\sinh^2(n\phi/2)+4\sinh^4(n\phi/2), \end{equation*} the series $S^{(\sinh,\gamma)}_{2m+2}(\phi)$ for an even $\gamma=2p$ can be written as \begin{equation*} \begin{split} S^{(\sinh,2p)}_{2m+2}(\phi)&= \sum_{n=1}^\infty \frac{\phi^{2m+2}4^p\left(\sinh^2(n\phi/2)+\sinh^4(n\phi/2)\right)^p}{\sinh^{2m+2}(n\phi/2)}\\ &=\sum_{n=1}^\infty\sum_{l=0}^{p} \genfrac(){0pt}{0}{p}{l}\frac{\phi^{2m+2}4^p\sinh^{4p-2l}(n\phi/2)}{\sinh^{2m+2}(n\phi/2)}\\ &= 4^p\sum_{l=0}^{p} \genfrac(){0pt}{0}{p}{l} \, S_{2(m+l-2p)+2}(\phi). \end{split} \end{equation*} The case with odd $\gamma=2p+1$ is the same as the previous equation multiplied by $\sinh(n\phi)$, thus leading to the result written above. \end{proof} Hence, from now on we only consider $S_{2m+2}(\phi)$ and $S^{(\sinh,1)}_{2m+2}(\phi)$. All their properties are transported to the others hyperbolic series thanks to these linear relations. Moving on now to find the relation with Lambert series, we have first to write the hyperbolic series just in terms of exponential functions. \begin{lemma}\label{lem:expseries} For any integer $m\geq 0$ and complex number $\phi$ such that $\Re(\phi)\neq 0$, the hyperbolic series are represented by \begin{align} \label{eq:expseries_1} S_{2m+2}(\phi)&=(2\phi)^{2m+2}\sum_{k=0}^\infty \genfrac(){0pt}{0}{k+m}{k-m-1} \frac{e^{-\sigma k\phi}}{1-e^{-\sigma k\phi}},\\ \label{eq:expseries_2} S^{(\sinh,1)}_{2m+2}(\phi)&= (2\sigma\phi)^{2m+1} \phi \sum_{k=0}^\infty \left[ \genfrac(){0pt}{0}{k+m-1}{k-m-1}+\genfrac(){0pt}{0}{k+m}{k-m}\right] \frac{e^{-\sigma k\phi}}{1-e^{-\sigma k\phi}}; \end{align} where $\sigma$ is the sign of the real part of $\phi$. \end{lemma} \begin{proof} We start by expressing a generic term of $S_{2m+2}(\phi)$ with exponential functions: \begin{equation*} \frac{\phi^{2m+2}}{\sinh^{2m+2}(n\phi/2)}=\frac{2^{2m+2}\phi^{2m+2}}{\left(e^{n\phi/2}-e^{-n\phi/2}\right)^{2m+2}} =\frac{2^{2m+2}\phi^{2m+2}e^{-n\phi(m+1)}}{\left(1-e^{-n\phi}\right)^{2m+2}}. \end{equation*} When $\Re(\phi)> 0$, we can use the well-known binomial property \begin{equation*} \frac{1}{(1-z)^{\beta+1}} = \sum_{k=0}^{\infty}\genfrac(){0pt}{0}{k+\beta}{k}z^k\qquad |z|<1 \end{equation*} and the generic term of the hyperbolic series is given by the series \begin{equation*} \frac{\phi^{2m+2}}{\sinh^{2m+2}(n\phi/2)} =2^{2m+2}\phi^{2m+2} \sum_{k=0}^\infty\genfrac(){0pt}{0}{k+2m+1}{k}e^{-n(k+m+1)\phi}. \end{equation*} We plug this inside the hyperbolic series and we invert the order of summation, then the last sum is the geometrical series which is converging for $\Re(\phi)> 0$. We find: \begin{equation*} \begin{split} S_{2m+2}(\phi)&=2^{2m+2}\phi^{2m+2} \sum_{k=0}^\infty\sum_{n=1}^\infty\genfrac(){0pt}{0}{k+2m+1}{k}e^{-n(k+m+1)\phi}\\ &=\sum_{k=0}^\infty \genfrac(){0pt}{0}{k+2m+1}{k} \frac{2^{2m+2}\phi^{2m+2}}{ e^{(k+m+1)\phi}-1}. \end{split} \end{equation*} Then, we change the summation index name from $k$ to $k'=k+m+1$, so that \begin{equation*} \begin{split} S_{2m+2}(\phi)&=\!\!\sum_{k=m+1}^\infty\!\! \genfrac(){0pt}{0}{k+m}{k-m-1} \frac{2^{2m+2}\phi^{2m+2}}{ e^{k\phi}-1} =\!\!\sum_{k=m+1}^\infty \!\!\genfrac(){0pt}{0}{k+m}{k-m-1}\frac{(2\phi)^{2m+2}e^{-k\phi}}{1-e^{-k\phi}}. \end{split} \end{equation*} At last, noticing that the binomial coefficient is vanishing in $k=0,1,\dots,m$ we recover Eq. (\ref{eq:expseries_1}). The same procedure for $S^{(\sinh,1)}_{2m+2}(\phi)$ leads to \begin{equation*} S^{(\sinh,1)}_{2m+2}(\phi)=\frac{(2\phi)^{2m+2}}{2}\sum_{k=0}^\infty \left[ \genfrac(){0pt}{0}{k+m+1}{k-m}-\genfrac(){0pt}{0}{k+m-1}{k-m-2}\right]\frac{e^{-k\phi}}{1-e^{-k\phi}}. \end{equation*} Replacing the first binomial factor with \begin{equation*} \genfrac(){0pt}{1}{k+m+1}{k-m}=\genfrac(){0pt}{1}{k+m}{k-m-1}+\genfrac(){0pt}{1}{k+m}{k-m} =\genfrac(){0pt}{1}{k+m-1}{k-m-1}+\genfrac(){0pt}{1}{k+m-1}{k-m-2}+\genfrac(){0pt}{1}{k+m}{k-m} \end{equation*} reproduces Eq. (\ref{eq:expseries_2}) for $\Re(\phi)>0$. Noticing that $S_{2m+2}(\phi)$ is an even function in the exchange $\phi\to-\phi$, while $S^{(\sinh,1)}_{2m+2}(\phi)$ is odd, we obtain Eq. (\ref{eq:expseries_1}) and Eq. (\ref{eq:expseries_2}) for $\Re(\phi)<0$. \end{proof} \begin{lemma}\label{lem:oddbinomial} The binomial $\genfrac(){0pt}{1}{k+m}{k-m-1}$ for integer $m\geq 0$ is a odd polynomial in $k$ of degree $2m+1$. And the binomial combination $\genfrac(){0pt}{1}{k+m-1}{k-m-1}+\genfrac(){0pt}{1}{k+m}{k-m}$ for integer $m\geq 0$ is an even polynomial in $k$ of degree $2m$. \end{lemma} \begin{proof} The fact that they are polynomial of finite degree follows from the definition of binomial coefficient, consider for instance the first binomial coefficient: \begin{equation*} \begin{split} \genfrac(){0pt}{0}{k+m}{k-m-1}&=\frac{(k+m)!}{(k-m-1)!(2m+1)!}\\ &=\frac{1}{(2m+1)!} \underbrace{(k+m)(k+m-1)\cdots(k+1)k(k-1)\cdots(k-m)}_{2m+1\text{ factors}}. \end{split} \end{equation*} Now, to show that is odd, we first send $k$ to $-k$ in the previous equation and we obtain \begin{equation*} \genfrac(){0pt}{0}{m-k}{-k-m-1}=\frac{1}{(2m+1)!}(m-k)(m-1-k)\cdots(1-k)(-k)(-k-1)\cdots(-k-m); \end{equation*} then, we gather a $-1$ sign in every round bracket, obtaining an overall $(-1)^{2m+1}=-1$ sign: \begin{equation*} \genfrac(){0pt}{0}{m-k}{-k-m-1}=\frac{-1}{(2m+1)!}(k-m)(k-m+1)\cdots(k-1)k(k+1)\cdots(k+m-1)(k+m); \end{equation*} therefore, by comparison we see that \begin{equation*} \genfrac(){0pt}{0}{k+m}{k-m-1}=-\genfrac(){0pt}{0}{m-k}{-k-m-1} \end{equation*} meaning that $\genfrac(){0pt}{1}{k+m}{k-m-1}$ is odd with respect to $k$. The others binomial coefficients reads \begin{equation*} \begin{split} \genfrac(){0pt}{0}{k+m-1}{k-m-1}+\genfrac(){0pt}{0}{k+m}{k-m}&=\frac{2k}{(2m)!} \Big[\underbrace{(k+m-1)\cdots(k-m+1)}_{2m-1\text{ factors}}\Big]. \end{split} \end{equation*} Using the same method as before we see that the polynomials inside the square bracket is odd, therefore the combination $\genfrac(){0pt}{1}{k+m-1}{k-m-1}+\genfrac(){0pt}{1}{k+m}{k-m}$ is an even polynomial. \end{proof} Thanks to the Lemma \ref{lem:oddbinomial} we can define the following numbers and reveal the relation between hyperbolic and Lambert series. \begin{definition}\label{def:coeffbinc} The binomial coefficient $\genfrac(){0pt}{1}{k+m}{k-m-1}$ expressed as a polynomial in $k$ is given by \begin{equation} \label{eq:Coeffbinc} \genfrac(){0pt}{0}{k+m}{k-m-1}=\frac{1}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)} k^{2i+1}; \end{equation} instead, the binomial combination $\genfrac(){0pt}{1}{k+m-1}{k-m-1}+\genfrac(){0pt}{1}{k+m}{k-m}$ is \begin{equation} \label{eq:Coeffbind} \genfrac(){0pt}{0}{k+m-1}{k-m-1}+\genfrac(){0pt}{0}{k+m}{k-m}=\frac{1}{(2m)!}\sum_{i=0}^{m} d_{2i}^{(m)} k^{2i}. \end{equation} \end{definition} \begin{proposition}\label{prop:LambertRel} For $m\geq 0$ integer and $\Re(\phi)\neq 0$, the hyperbolic series (\ref{eq:baseserie_cosh}), (\ref{eq:baseserie_sinh}) and (\ref{eq:baseserie}) are linear combination of the Lambert series $\mathcal{L}_{q}(s)$ in Def.~\ref{def:Lambertserie}, \begin{align} \label{eq:LambertRel_1} S_{2m+2}(\phi)&=\frac{(2\phi)^{2m+2}}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)}\,\mathcal{L}_{e^{-\sigma \phi}}(2i+1),\\ \label{eq:LambertRel_2} S^{(\sinh,1)}_{2m+2}(\phi)&=\frac{(2\sigma \phi)^{2m+1}\phi}{(2m)!}\sum_{i=0}^{m} d_{2i}^{(m)} \mathcal{L}_{e^{-\sigma\phi}}(2i), \end{align} where $\sigma=$sgn$(\Re(\phi))$ and the generating function for $c_i^{(m)}$ is given by (\ref{eq:Coeffbinc}) and for $d_i^{(m)}$ by (\ref{eq:Coeffbind}). \end{proposition} \begin{proof} By Lemma \ref{lem:expseries}, for $\Re(\phi)>0$ the series $S_{2m+2}(\phi)$ is given by (\ref{eq:expseries_1}). Plugging the definition (\ref{eq:Coeffbinc}) in (\ref{eq:expseries_1}) and comparing with Eq. (\ref{eq:Lambertserie}) we readily obtain (\ref{eq:LambertRel_1}). The same argument can be used to obtain (\ref{eq:LambertRel_1}) for $\Re(\phi)<0$ and similarly to derive Eq. (\ref{eq:LambertRel_2}). \end{proof} In Sec.~\ref{sec:gammarep} we find that the values of the coefficients $c_{2i+1}^{(m)}$ and $d_{2i}^{(m)}$ are given in terms of the generalized Bernoulli polynomials, see Eq.~(\ref{eq:relBernGen}). For the topics of this section, it suffices to say that $c_{2i+1}^{(m)}$ and $d_{2i}^{(m)}$ are real coefficients. \begin{proposition}\label{prop:analLamb} Let $s$ be any complex number, the Lambert series $\mathcal{L}_q(s)$ as a function of $q$ is an analytic function for any $|q|<1$. \end{proposition} \begin{proof} First, we prove that the functions series $\mathcal{L}_q(s)$ is a convergent series. The ratio test on Lambert series \begin{equation*} \lim_{k\to\infty}\left|\frac{(k+1)^s q^{k+1}}{1-q^{k+1}}\frac{1-q^k}{k^s q^k}\right|=|q| \end{equation*} shows that $\mathcal{L}_q(s)$ is pointwise convergent for $|q|<1$ and $\forall\,s\in\mathbb{C}$. But the Lambert series can also be written as a power series \cite[\S 24.3.3]{abramowitz+stegun}: \begin{equation*} \mathcal{L}_q(s)=\sum_{k=1}^\infty \frac{k^s q^k}{1-q^k}=\sum_{n=1}^\infty \sigma_s(n) q^n\qquad |q|<1. \end{equation*} Therefore, the Lambert series is a convergent power series in $|q|<1$, which proves that it is analytic. \end{proof} \begin{proposition} Choose $q=e^{-i\theta}$ with $\theta$ a real positive number, the Lambert series of definition~\ref{def:Lambertserie} is a divergent series. \end{proposition} \begin{proof} In this case the series becomes \begin{equation*} \sum_{k=1}^\infty \frac{k^s}{e^{i k\theta}-1}. \end{equation*} The root test for convergence gives \begin{equation*} \alpha=\limsup_{k\to \infty} \left(\frac{|k^s|}{|e^{i k\theta}-1|}\right)^{1/k} =\limsup_{k\to \infty} \frac{k^{s/k}}{\left[2-2\cos(k\theta)\right]^{1/2k}}. \end{equation*} Since for any $\bar{k}>0$ and small $\epsilon>0$, it exist an integer $n>\bar{k}$ such that $\cos(n\theta)>1-\epsilon$, then the sup diverges; that is \begin{equation*} \alpha=\lim_{k\to \infty} \lim_{\epsilon\to 0}\frac{k^{s/k}}{\epsilon^{1/2k}}=\infty. \end{equation*} Therefore the series diverges. \end{proof} As a consequence, also the hyperbolic series are divergent series for $\phi$ in the imaginary axis (except in $\phi=0$). This also means that for any $\phi$ not in the imaginary axis, the radius of convergence of the hyperbolic series could not be greater than the distance of $\phi$ from the imaginary axis. This fact prevent the hyperbolic series to be real analytic in a open set containing $\phi=0$. \begin{proposition}\label{prop:analseries} The hyperbolic series (\ref{eq:baseserie_cosh}), (\ref{eq:baseserie_sinh}) and (\ref{eq:baseserie}) are analytic function for $\phi$ in the complex plane except the imaginary axis. The hyperbolic series are continuous function in the real axis. \end{proposition} \begin{proof} Since the hyperbolic series for $\Re(\phi)>0$ ($\Re(\phi)<0$) are linear combination of Lambert series which are analytic function in that domain, they are analytic function in that region too. To prove that in the real axis they are continuous functions, we show that they converge uniformly in a open set containing the origin. When we set $\phi$ to zero we obtain the well known series related to the Riemann zeta function $\zeta$: \begin{equation*} \begin{split} S^{(\gamma)}_{2m+2}(0)&=S_{2m+2}(0)=\sum_{n=1}^\infty \frac{2^{2m+2}}{n^{2m+2}}=2^{2m+2}\zeta\left(2m+2\right)\\ &=(-1)^m\frac{2^{2m+1}(2\pi)^{2m+2}B_{2m+2}}{(2m+2)!};\\ S^{(\sinh,\gamma)}_{2m+2}(0)&=0; \end{split} \end{equation*} where $B_n$ are the Bernoulli numbers, Definition \ref{def:bernpoly}. The series are clearly pointwise convergent in $\phi=0$. When $\phi\neq 0$ to prove absolute convergence it suffices to use the ratio test; for $S_{2m+2}(\phi)$ we have \begin{equation*} \lim_{n\to\infty} \left|\frac{\left(e^{n\phi/2}-e^{-n\phi/2}\right)^{2m+2}}{\left(e^{(n+1)\phi/2}-e^{-(n+1)\phi/2}\right)^{2m+2}}\right| =e^{-|\phi|(m+1)}<1\quad \text{for }m>-1; \end{equation*} instead, for $S^{(\sinh,1)}_{2m+2}(\phi)$ we have \begin{equation*} \lim_{n\to\infty} \left|\frac{\left(e^{n\phi/2}-e^{-n\phi/2}\right)^{2m+2}}{\left(e^{(n+1)\phi/2}-e^{-(n+1)\phi/2}\right)^{2m+2}} \frac{e^{(n+1)\phi}-e^{-(n+1)\phi}}{e^{n\phi}-e^{-n\phi}}\right| =e^{-|\phi|m} \end{equation*} that is smaller than 1, hence absolutely convergent, for $m>1$. For uniform convergence of $S^{(\sinh,1)}_{2m+2}(\phi)$, we notice that for all $n$ and $\phi\in\mathbb{R}$ the sequence functions in the series are monotonically increasing. Therefore, by Dini's lemma, see \cite[Theorem 7.13 on p. 150]{book:Rudin} and \cite[Theorem 12.1 on p. 157]{book:Jurgen}, the hyperbolic function $S^{(\sinh,1)}_{2m+2}(\phi)$ is uniformly convergent in every compact set contained in $\mathbb{R}$. In particular, we showed that is uniformly convergent in a set not containing the origin $\phi=0$. Consider now a closed interval $I_0=[-a,a]$ with $a>0$ a real number, we use Weierstrass M-test to prove uniform convergence of $S_{2m+2}(\phi)$ on $I_0$. Let be $f_n$ the sequence functions in the series $S_{2m+2}(\phi)$: \begin{equation*} f_n(\phi)=\frac{\phi^{2m+2}}{\sinh^{2m+2}(n\phi/2)}. \end{equation*} Notice that $f_n$ have a maximum in $\phi=0$. Then we can built the sequence $M_n$ by \begin{equation*} |f_n(\phi)|\leq |f_n(0)|=\frac{2^{2m+2}}{n^{2m+2}}\equiv M_n,\quad \forall\phi\in I_0. \end{equation*} Since the sequence $M_n$ converges, then by Weierstrass M-test the series converges absolutely and uniformly on $I_0$. Because the hyperbolic functions are defined by an uniformly convergent series and every functions in the succession is $C^\infty(\mathbb{R})$, then the hyperbolic functions are continuous in $I_0$ and because we know that they are analytic in every other part of the real axis then they are continuous in all $\mathbb{R}$. \end{proof} \section{Bernoulli polynomials representation for a ratio of Gamma functions} \label{sec:gammarep} In this section we give the values of the coefficients $c_{2i+1}^{(m)}$ and $d_{2i}^{(m)}$ of Def.~\ref{def:coeffbinc} which are needed to take advantage of the linearity relations between hyperbolic series and the Lambert series. By using a power series representation of the ratio of two gamma functions, we find that these coefficients can be given in terms of the generalized Bernoulli polynomials. First of all, we can invert the definition of the coefficients and compute them through the following derivatives: \begin{equation} \label{eq:CoeffCDDervGamma} \begin{split} c_i^{(m)}&=\frac{(2m+1)!}{i!}\frac{\partial^i}{\partial k^i}\genfrac(){0pt}{0}{k+m}{k-m-1}\Big|_{k=0},\\ d_i^{(m)}&=\frac{(2m)!}{i!}\frac{\partial^i}{\partial k^i}\left[\genfrac(){0pt}{0}{k+m-1}{k-m-1} +\genfrac(){0pt}{0}{k+m}{k-m}\right]\Big|_{k=0}. \end{split} \end{equation} The binomial coefficient can also be written as a ratio of Euler gamma functions: \begin{equation*} \begin{split} c_i^{(m)}&=\frac{1}{i!}\frac{\partial^i}{\partial k^i}\frac{\Gamma(k+m+1)}{\Gamma(k-m)}\Big|_{k=0},\\ d_i^{(m)}&=\frac{1}{i!}\frac{\partial^i}{\partial k^i}\left[\frac{\Gamma(k+m)}{\Gamma(k-m)} +\frac{\Gamma(k+m+1)}{\Gamma(k-m+1)}\right]\Big|_{k=0}. \end{split} \end{equation*} Therefore, if we express the ratio of gamma functions in the previous equation as a power series of $k$ we will readily obtain the coefficients. Tricomi and Erdélyi~\cite{tricomi1951} gave the asymptotic expansion for the ratio of two gamma functions \begin{equation*} \frac{\Gamma(z+\alpha)}{\Gamma(z+\beta)}\sim\sum_{n=0}^\infty \frac{\Gamma(1+\alpha-\beta)}{\Gamma(\alpha-\beta-n+1)} \frac{B_n^{(\alpha-\beta+1)}(\alpha)}{n!}z^{\alpha-\beta-n} \quad \text{as }z\to\infty, \end{equation*} valid for $\alpha,\beta$ such that $\alpha-\beta\neq -1,-2,\dots$. The asymptotic expansion becomes an exact relation when $\alpha-\beta\geq 0$ is an integer~\cite[Sec 2.11, Eq. (12)]{LukeVol1}. That is the case we are interested in. We can then reformulate the representation of the ratio of two gamma functions of~\cite{tricomi1951} in a form suitable for our scope. \begin{theorem}[Luke 1969 \cite{LukeVol1}]\label{thm:gammaratiorepgen} Given $z\in\mathbb{C}$ and two integer $\alpha,\beta\geq 0$, then \begin{equation} \label{eq:gammaratiorepgen} \frac{\Gamma(z+\alpha)}{\Gamma(z-\beta)}=\sum_{i=0}^{\alpha+\beta} \frac{(\alpha+\beta)!}{i!} \frac{B_{\alpha+\beta-i}^{(1+\alpha+\beta)}(\alpha)}{(\alpha+\beta-i)!} z^{i} \end{equation} where $\Gamma$ is the Euler Gamma. \end{theorem} \begin{proof} We follow the proof of~\cite{tricomi1951} which was based on Watson's lemma. In our case $\beta$ is negative and we are not interested in the value for $z\to\infty$ and $\alpha$ and $\beta$ are integers. Furthermore, our case is much more simple because we know that the ratio $\Gamma(z+\alpha)/\Gamma(z-\beta)$ is analytic and can be written as a power series: \begin{equation} \label{eq:coeffgamma} \frac{\Gamma(z+\alpha)}{\Gamma(z-\beta)}=\sum_{i=0}^{\infty} \gamma_{i} z^{i}. \end{equation} Following~\cite{tricomi1951} for every $\alpha$ and $\beta$ such that $\alpha+\beta\neq-1,-2,\dots$ and for every $z$ in the complex plane which does not lay in the real segment from $-\alpha$ to $-\alpha-\infty$, the ratio between gamma functions has the integral representation \begin{equation*} \begin{split} \frac{\Gamma(z+\alpha)}{\Gamma(z-\beta)}=&\frac{\Gamma(1+\alpha+\beta)}{2\pi\,i} \int_{-\infty\cdote^{i\partiallta}}^{(0^+)}\frac{e^{z\, t}\,e^{\alpha\, t}}{(e^t-1)^{1+\alpha+\beta}}{\rm d} t\\ =&\frac{\Gamma(1+\alpha+\beta)}{2\pi\,i}\int_{-\infty\cdote^{i\partiallta}}^{(0^+)} \frac{e^{z\, t}}{t^{1+\alpha+\beta}}\left(\frac{1}{e^t-1}\right)^{1+\alpha+\beta}e^{\alpha\, t}\,{\rm d} t, \end{split} \end{equation*} where $-\pi/2<\partiallta<\pi/2$. The coefficient $\gamma_i$ of Eq.~(\ref{eq:coeffgamma}) can be found through \begin{equation*} \gamma_{i}=\lim_{z\to 0} \frac{\gamma_{i}(z)}{i!} \end{equation*} where $\gamma_{i}(z)$ is the derivative \begin{equation*} \begin{split} \gamma_{i}(z)=&\frac{{\rm d}^{i}}{{\rm d} z^{i}}\frac{\Gamma(z+\alpha)}{\Gamma(z-\beta)}\\ =&\frac{\Gamma(1+\alpha+\beta)}{2\pi\,i}\int_{-\infty\cdote^{i\partiallta}}^{(0^+)} \frac{e^{z\, t}}{t^{1+\alpha+\beta-i}}\left(\frac{1}{e^t-1}\right)^{1+\alpha+\beta}e^{\alpha\, t}\,{\rm d} t. \end{split} \end{equation*} In the previous equation we recognize the generating function of the generalized Bernoulli polynomials (Def.~\ref{def:bernpoly}), from which the derivative becomes \begin{equation*} \gamma_{i}(z)=\sum_{n=0}^\infty \frac{\Gamma(1+\alpha+\beta)}{n!}B_{n}^{(1+\alpha+\beta)}(\alpha)\frac{1}{2\pi\,i} \int_{-\infty\cdote^{i\partiallta}}^{(0^+)}\frac{e^{z\, t}}{t^{1+\alpha+\beta-i-n}} {\rm d} t. \end{equation*} Then, using the integral~\cite{tricomi1951} \begin{equation*} \frac{1}{2\pi\,i}\int_{-\infty\cdote^{i\partiallta}}^{(0^+)}\frac{e^{z\, t}}{t^{a+1}} {\rm d} t= \frac{z^a}{\Gamma(a+1)} \end{equation*} we obtain \begin{equation*} \gamma_{i}(z)=\sum_{n=0}^{\infty} \frac{\Gamma(1+\alpha+\beta)}{\Gamma(1+\alpha+\beta-i-n)}\frac{B_{n}^{(1+\alpha+\beta)}(\alpha)}{n!} z^{\alpha+\beta-i-n}. \end{equation*} In the limit $z\to 0$ we are just left with the term in $n=1+\alpha+\beta-i$, therefore the coefficient of the series (\ref{eq:coeffgamma}) is \begin{equation*} \gamma_{i}=\frac{(\alpha+\beta)!}{i!} \frac{B_{\alpha+\beta-i}^{(1+\alpha+\beta)}(\alpha)}{\Gamma(1+\alpha+\beta-i)}. \end{equation*} At last, noticing that \begin{equation*} 1/\Gamma(1+\alpha+\beta-i)=0\quad \text{for }i>\alpha+\beta, \end{equation*} we have that the powers series stops at $i=\alpha+\beta$. \end{proof} In the same way we can prove the following proposition. \begin{proposition}\label{prop:gammaratiorep} Given $z\in\mathbb{C}$ and an integer $m$, then \begin{equation} \label{eq:gammaratiorepEven} \frac{\Gamma(z+m+1)}{\Gamma(z-m)}=\sum_{i=0}^{m} \frac{(2m+1)!}{(2i+1)!} \frac{B_{2m-2i}^{(2m+2)}(m+1)}{(2m-2i)!} z^{2i+1} \end{equation} and \begin{equation} \label{eq:gammaratiorepOdd} \begin{split} \frac{\Gamma(z+m)}{\Gamma(z-m)}+\frac{\Gamma(z+m+1)}{\Gamma(z-m+1)}= &\sum_{i=0}^{m} \frac{2(2m)!}{(2i)!(2m-2i)!} B_{2m-2i}^{(2m+1)}(m) z^{2i}. \end{split} \end{equation} \end{proposition} \begin{proof} This are just special cases of theorem \ref{thm:gammaratiorepgen}. We already know from Lemma~\ref{lem:oddbinomial} that the ratio $\Gamma(z+m+1)/\Gamma(z-m)$ is a odd polynomial of degree $2m+1$. Choosing $\alpha=m+1$ and $\beta=m$ in theorem \ref{thm:gammaratiorepgen} we find (\ref{eq:gammaratiorepEven}). It also follows from lemma~\ref{lem:oddbinomial} that the (\ref{eq:gammaratiorepOdd}) is an even polynomial of degree $2m$. Following the demonstration of theorem \ref{thm:gammaratiorepgen} we end up with \begin{equation*} \begin{split} \frac{\Gamma(z+m)}{\Gamma(z-m)}+\frac{\Gamma(z+m+1)}{\Gamma(z-m+1)}=&\sum_{i=0}^{m} \frac{(2m)!}{(2i)!(2m-2i)!} \left(B_{2m-2i}^{(2m+1)}(m)\right.\\ &\left.+ B_{2m-2i}^{(2m+1)}(m+1) \right) z^{2i}. \end{split} \end{equation*} Then from the definition of generalized Bernoulli polynomials in Def.~\ref{def:bernpoly} it clearly follows that~\cite{LukeVol1} \begin{equation*} B^{(m)}_n(t)=(-1)^n B_n^{(m)}(m-t), \end{equation*} and in particular that $B_{2m-2i}^{(2m+1)}(m)=B_{2m-2i}^{(2m+1)}(m+1)$. \end{proof} The coefficients $c_{2k+1}^{(m)}$ and $d_{2k+1}^{(m)}$ immediately follow from proposition~\ref{prop:gammaratiorep}. \begin{proposition}\label{prop:relCBernGen} The coefficient of linearity between hyperbolic series and Lambert series of Def.~\ref{def:coeffbinc} are \begin{equation} \label{eq:relBernGen} \begin{split} c_{2k+1}^{(m)}&=\frac{(2m+1)!}{(2k+1)!}\frac{B_{2m-2k}^{(2m+2)}(m+1)}{(2m-2k)!}\quad k\geq 0,\\ d_{2k}^{(m)}&=\frac{2(2m)!}{(2k)!(2m-2k)!}B_{2m-2k}^{(2m+1)}(m)\quad k\geq 0. \end{split} \end{equation} \end{proposition} From the representation of the ratio of two gamma function in theorem \ref{thm:gammaratiorepgen} we can also find some special zeros of the generalized Bernoulli polynomials. \begin{lemma}\label{lemma:ZerosGenBer} For any integer $m\geq 1$ the followings are zeros of the generalized Bernoulli polynomials \begin{equation} \label{eq:ZerosBern} B^{(2m+1)}_{2m}\left(m\right)=0. \end{equation} \end{lemma} \begin{proof} We recall that from theorem \ref{prop:gammaratiorep} given an integer $m$ we have \begin{equation*} \begin{split} \frac{\Gamma(z+m)}{\Gamma(z-m)}+\frac{\Gamma(z+m+1)}{\Gamma(z-m+1)}= &\sum_{i=0}^{m} \frac{2(2m)!}{(2i)!(2m-2i)!} B_{2m-2i}^{(2m+1)}(m) z^{2i}. \end{split} \end{equation*} If we evaluate the previous expression in $z=0$ for integer $m\geq 1$, the two ratios of gamma functions are vanishing \begin{equation*} \frac{\Gamma(m)}{\Gamma(-m)}+\frac{\Gamma(m+1)}{\Gamma(-m+1)}= 2\Gamma(m)\Gamma(m+1)\frac{\sin(m\pi)}{\pi}=0 \end{equation*} and the sum contains only one terms: \begin{equation*} 0=\frac{2(2m)!}{(2m)!}B^{(2m+1)}_{2m}\left(m\right), \end{equation*} which gives the zeros in (\ref{eq:ZerosBern}). \end{proof} To find some special zeros of the polynomial $\mathcal{B}_{2m+2}(\phi)$, coming from the functional relation (\ref{eq:funcrelS}), we need to prove the following identity, which uses the previous lemma. \begin{lemma} For every integer $m$ such that $m\geq 0$, the following identity holds true \begin{equation} \label{eq:IdentZeros} \sum_{k=0}^{m+1}\frac{B_{2k}\,B_{2m+2-2k}^{(2m+2)}(m+1)}{(2k)!(2m+2-2k)!}=0. \end{equation} \end{lemma} \begin{proof} Consider the function \begin{equation} \label{auxf} f(x)=\left(\frac{x}{e^x-1}+\frac{x}{2}\right)\left(\frac{x\,e^{x/2}}{e^x-1}\right)^{2m+2}. \end{equation} The function $f$ admits an expansion in power series of $x$ \begin{equation} \label{svilf} f(x)=\sum_{k=0}^\infty d_k\, x^k \end{equation} which is converging for $|x|\leq 2\pi$. Let us focus just on the even powers of $x$, i.e. on $d_{2k}$. By expanding the product in (\ref{auxf}) as \begin{equation*} f(x)=\frac{x}{e^x-1}\left(\frac{x\,e^{x/2}}{e^x-1}\right)^{2m+2} +\frac{x}{2}\left(\frac{x\,e^{x/2}}{e^x-1}\right)^{2m+2}, \end{equation*} we see that the second term is odd, so it does not contributes to the even coefficients $d_{2k}$. The first term instead can be expanded with the generalized Bernoulli polynomials \begin{equation*} \frac{x}{e^x-1}\left(\frac{x\,e^{x/2}}{e^x-1}\right)^{2m+2}= \left(\frac{x}{e^x-1}\right)^{2m+3} e^{(m+1)x}= \sum_{k=0}^\infty \frac{1}{k!}B^{(2m+3)}_{k}(m+1) x^k \end{equation*} from which, thanks to lemma~\ref{lemma:ZerosGenBer}, follows that for every integer $m\geq 0$ \begin{equation*} d_{2m+2}=\frac{1}{(2m+2)!}B^{(2m+3)}_{2m+2}(m+1)=0. \end{equation*} The $f$ function has been built such that the first factor in brackets of (\ref{auxf}) is the generating function of Bernoulli numbers of which we have subtracted the only odd term in $x$: \begin{equation*} \frac{x}{e^x-1}+\frac{x}{2}=\sum_{i=0}^\infty \frac{B_{2i}}{(2i)!} x^{2i}; \end{equation*} instead, the second brackets is the generating function of Bernoulli polynomials involved in the identity we want to prove: \begin{equation*} \left(\frac{x\,e^{x/2}}{e^x-1}\right)^{2m+2}=\sum_{j=0}^\infty \frac{B^{(2m+2)}_{2j}(m+1)}{(2j)!} x^{2j}. \end{equation*} Using the Cauchy product on (\ref{auxf}) we therefore obtain the expansion \begin{equation*} f(x)=\sum_{k=0}^\infty \sum_{l=0}^k \frac{B_{2l}}{(2l)!} \frac{B^{(2m+2)}_{2k-2l}(m+1)}{(2k-2l)!} x^{2k}. \end{equation*} Equating this expansion with the one in (\ref{svilf}), we obtain an expression for the vanishing coefficient $d_{2m+2}$ \begin{equation*} \sum_{l=0}^{m+1} \frac{B_{2l}}{(2l)!} \frac{B^{(2m+2)}_{2m+2-2l}(m+1)}{(2m+2-2l)!}=d_{2m+2}=0 \end{equation*} which is the identity we wanted. \end{proof} That is all we need to know to study the functional relations of the hyperbolic series. However, the theorem \ref{thm:gammaratiorepgen} and alternative way of finding the coefficients (\ref{eq:relBernGen}) make it possible to establish several identities between the generalized Bernoulli polynomials and other special functions. Those relation are established in the next section, while the functional relations of the hyperbolic series are derived in Sec.~\ref{sec:FuncRel}. \section{Bernoulli polynomial identities} \label{sec:IdenBer} The coefficients in Proposition \ref{prop:LambertRel} are given by (\ref{eq:relBernGen}) in terms of the generalized Bernoulli polynomials of the type \begin{equation*} B_n^{2m+2}(m+1),\qquad B_n^{2m+1}(m). \end{equation*} Those defines a new class of polynomials in $m$ called the reduced Bernoulli polynomials~\cite{2016Elezovic}. The representation of the ratio of two gamma functions (\ref{eq:gammaratiorepgen}) and different methods to derive the coefficients $c_i^{(m)}$ and $d_i^{(m)}$ of Proposition \ref{prop:LambertRel}, allow to establish identities for ordinary Bernoulli polynomials and reduced Bernoulli polynomials. For instance, the coefficients can also be derived either by using the Polygamma function \begin{equation*} \psi^{(s-1)}(z)=\frac{\partial^s}{\partial z^s}\log\left(\Gamma(z)\right) \end{equation*} or by the Pochhammer symbol $(x)_n$, which is related to generalized Bernoulli polynomials by~\cite{LukeVol1} \begin{equation*} (x)_n=\frac{\Gamma(x+n)}{\Gamma(x)}=(-1)^{n-1}B_{n-1}^{(n)}(x). \end{equation*} \begin{lemma}\label{lemma:relCBernGen} The coefficients of linearity between hyperbolic series and Lambert series of Def.~\ref{def:coeffbinc} are \begin{equation} \label{eq:CoeffCBell} \begin{split} c_{i}^{(m)}=&\frac{\Gamma(m+1)}{\Gamma(-m)(i!)}\sum_{l=0}^i \sum_{s=1}^{i-l} \sum_{j=1}^{l}(-1)^j \genfrac(){0pt}{0}{i}{l} Y_{i-l,s}\left(\psi^{(0)}(m+1),\dots,\psi^{(i-l-s)}(m+1)\right)\\ &\times Y_{l,j}\left(\psi^{(0)}(-m),\dots,\psi^{(l-j)}(-m)\right),\\ d_{2k+1}^{(m)}=&\sum_{l=0}^i \sum_{s=1}^{i-l} \sum_{j=1}^{l}\frac{(-1)^j}{i!} \genfrac(){0pt}{0}{i}{l} \left\{\frac{\Gamma(m)}{\Gamma(-m)} Y_{i-l,s}\left(\psi^{(0)}(m),\dots,\psi^{(i-l-s)}(m)\right)\right.\\ &\times Y_{l,j}\left(\psi^{(0)}(-m),\dots,\psi^{(l-j)}(-m)\right)\\ &+\frac{\Gamma(m+1)}{\Gamma(1-m)} Y_{i-l,s}\left(\psi^{(0)}(m+1),\dots,\psi^{(i-l-s)}(m+1)\right)\\ &\left.\times Y_{l,j}\left(\psi^{(0)}(1-m),\dots,\psi^{(l-j)}(1-m)\right)\right\}. \end{split} \end{equation} where $Y_{n,k}$ are the incomplete Bell Polynomials \begin{equation*} \begin{split} Y_{n,k}(x_{1},x_{2},\dots ,x_{n-k+1})=&\sum \frac{n!}{j_{1}!j_{2}!\cdots j_{n-k+1}!}\times\\ &\times\left(\frac{x_1}{1!}\right)^{j_{1}}\left(\frac{x_2}{2!}\right)^{j_{2}}\cdots \left(\frac{x_{n-k+1}}{(n-k+1)!}\right)^{j_{n-k+1}}. \end{split} \end{equation*} Instead in terms of the ordinary Bernoulli polynomials and the Stirling number of the first kind $s(n,k)=\genfrac[]{0pt}{1}{n}{k}$, the coefficients are given by \begin{equation} \label{eq:CoeffCBern} \begin{split} c_i^{(m)}=&\frac{1}{i!}\sum_{j=i-1}^{2m}\frac{2m+1}{j+1}\genfrac[]{0pt}{0}{2m}{j}\,(j+2-i)_i \, B_{j+1-i}(m),\\ d_i^{(m)}=&\frac{1}{i!}\sum_{j=i-1}^{2m-1}\frac{4m}{j+1}\genfrac[]{0pt}{0}{2m-1}{j}\,(j+2-i)_i \, B_{j+1-i}(m). \end{split} \end{equation} \end{lemma} \begin{proof} The (\ref{eq:CoeffCBell}) are simply obtained by evaluating the derivatives in (\ref{eq:CoeffCDDervGamma}) using the Faà di Bruno formula. Using the chain rules inside (\ref{eq:CoeffCDDervGamma}) the coefficient is written as \begin{equation} \label{eq:FaaCoeff} \begin{split} c_i^{(m)}=&\frac{1}{i!}\frac{\partial^i}{\partial k^i}\frac{\Gamma(k+m+1)}{\Gamma(k-m)}\Big|_{k=0}\\ =&\frac{1}{i!}\sum_{l=0}^i \genfrac(){0pt}{0}{i}{l}\left[\left(\frac{{\rm d}^{i-l} }{{\rm d} z^{i-l}} \Gamma(k+m+1)\right) \left(\frac{{\rm d}^{l} }{{\rm d} z^{l}}\frac{1}{\Gamma(k-m)}\right)\right]_{k=0}. \end{split} \end{equation} Consider now the derivative of the Gamma function. In terms of the Polygamma function, if we set \begin{equation*} q(y)=e^y,\qquad g(z)=\log \Gamma(z) \end{equation*} then the Faà di Bruno formula for the derivative of the Gamma function gives \begin{equation*} \begin{split} \frac{\partial^s}{\partial z^s}\Gamma(z)=&\frac{\partial^s}{\partial z^s}e^{\log \Gamma(z)}=\frac{{\rm d}^n}{{\rm d} z^n}q(g(z))\\ =&\sum_{s=1}^n q^{(s)}(g(z)) Y_{n,s}\left(g^{(1)}(z),g^{(2)}(z),\dots,g^{(n-s+1)}(z)\right), \end{split} \end{equation*} where we defined \begin{equation*} \begin{split} q^{(s)}(y)=\frac{\partial^s}{\partial y^s }q(y)=e^y,\quad q^{(s)}(g(z))=\Gamma(z)\quad g^{(s)}(z)=\psi^{(s-1)}(z). \end{split} \end{equation*} Therefore, we have \begin{equation} \label{eq:FaaGamma} \frac{\partial^s}{\partial z^s}\Gamma(z)=\Gamma(z)\sum_{s=1}^n Y_{n,s}\left(\psi^{(0)}(z),\psi^{(1)}(z),\dots,\psi^{(n-s)}(z)\right). \end{equation} Similarly, choosing $q(y)=e^{-y}$, for the derivative of the inverse Gamma function we find \begin{equation} \label{eq:FaaInvGamma} \frac{\partial^s}{\partial z^s}\frac{1}{\Gamma(z)}=\frac{1}{\Gamma(z)}\sum_{s=1}^n (-1)^s Y_{n,s}\left(\psi^{(0)}(z),\psi^{(1)}(z),\dots,\psi^{(n-s)}(z)\right). \end{equation} Plugging (\ref{eq:FaaGamma}) and (\ref{eq:FaaInvGamma}) in (\ref{eq:FaaCoeff}) proves the proposition for $c_i^{(m)}$. The same procedure gives $d_i^{(m)}$. The Eq.s (\ref{eq:CoeffCBern}) are obtained taking advantage of the relation between the Pochhammer symbol and the Bernoulli polynomials: \begin{equation*} \frac{\Gamma(x+1)}{\Gamma(x-n+1)}=(x)_n=\sum_{j=0}^{n-1}\frac{n}{j+1}\genfrac[]{0pt}{0}{n-1}{j} \left( B_{j+1}(x)-B_{j+1} \right). \end{equation*} The coefficient (\ref{eq:CoeffCDDervGamma}) can then be written as \begin{equation*} \begin{split} c_i^{(m)}=&\frac{1}{i!}\frac{\partial^i}{\partial k^i}\frac{\Gamma(k+m+1)}{\Gamma(k-m)}\Big|_{k=0} =\frac{1}{i!}\left(\frac{\partial^i}{\partial k^i} (k+m)_{2m+1}\right)\Big|_{k=0}\\ =&\frac{1}{i!}\sum_{j=0}^{2m}\frac{2m+1}{j+1}\genfrac[]{0pt}{0}{2m}{j} \left( \frac{\partial^i}{\partial k^i} B_{j+1}(k+m) \right)\Big|_{k=0}. \end{split} \end{equation*} Therefore, making use of \begin{equation*} \frac{{\rm d}}{{\rm d} x}B_n(x)=\begin{cases} n B_{n-1}(x) & n\geq 1\\ 0 & n=0 \end{cases} \end{equation*} we obtain \begin{equation*} \frac{{\rm d}^s}{{\rm d} x^s}B_n(x)=\begin{cases} (n+1-s)_s \, B_{n-s}(x) & n\geq s\\ 0 & n<s \end{cases} \end{equation*} and finally the (\ref{eq:CoeffCBern}). \end{proof} Since the coefficient $c_i^{(m)}$ ($d_i^{(m)}$) is vanishing for any even (odd) $i$, we can immediately have the result for those cases. \begin{corollary} \label{cor:VanishingBernSum} For all integers $m\geq 0$ and $i\leq m$, the following sums are vanishing \begin{equation} \label{eq:VanishingBernSum} \begin{split} \sum_{j=2i-1}^{2m}\frac{1}{j+1}\genfrac[]{0pt}{0}{2m}{j}\,(j+2-2i)_{2i} \, B_{j+1-2i}(m)=&0,\\ \sum_{j=2i}^{2m-1}\frac{1}{j+1}\genfrac[]{0pt}{0}{2m-1}{j}\,(j+1-2i)_{2i+1} \, \left(B_{j-2i}(m)+B_{j-2i}(m-1)\right)=&0. \end{split} \end{equation} \end{corollary} Furthermore equating the different expression for the coefficients (\ref{eq:relBernGen}) (\ref{eq:CoeffCBell}) and (\ref{eq:CoeffCBern}), we can establish several identities relating the reduced Bernoulli polynomials to the ordinary Bernoulli polynomials and we can give the form of reduced Bernoulli polynomials in terms of Harmonic numbers. Equating (\ref{eq:relBernGen}) with (\ref{eq:CoeffCBern}) we readily obtain the following identities. \begin{proposition} \label{Prop:GenOrdBernIdent} For all integers $m\geq 0$ and $n\leq m$, \begin{equation} \label{eq:GenOrdBernIdent} \begin{split} B_{2n}^{(2m+2)}(m+1)=&\frac{(2n)!}{(2m)!}\sum_{j=2m-2n}^{2m}\frac{1}{j+1} \genfrac[]{0pt}{0}{2m}{j}\\ &\times(j+2n-2m+1)_{2m-2n+1} B_{j+2n-2m}(m),\\ B_{2n}^{(2m+1)}(m)=&\frac{(2n)!}{(2m-1)!}\sum_{j=2m-1-2n}^{2m-1}\frac{1}{j+1} \genfrac[]{0pt}{0}{2m-1}{j}\\ &\times(j+2n-2m+2)_{2m-2n} B_{j+2n-2m+1}(m). \end{split} \end{equation} \end{proposition} \begin{proposition} \label{Prop:BernHarmonicIdent} Given an integer $m\geq 0$, the generalized Bernoulli polynomials of even order have the following special values: \begin{equation} \label{eq:BernEvenHarmonic} \begin{split} B^{(2m+2)}_{2m}(m+1)&=\frac{(-1)^m (m!)^2}{2m+1}, \\ B^{(2m+2)}_{2m-2}(m+1)&=\frac{(-1)^{m+1}(2m-2)!}{(2m+1)!} (m!)^2 6 H_{m}^{(2)},\\ B^{(2m+2)}_{2m-4}(m+1)&=\frac{(-1)^{m}5!(2m-4)!}{2(2m+1)!} (m!)^2 \left({H_m^{(2)}}^2-H_m^{(4)}\right),\\ B^{(2m+2)}_{2m-6}(m+1)&=\frac{(-1)^{m+1}7!(2m-6)!}{6(2m+1)!} (m!)^2 \left({H_m^{(2)}}^3 -3H_m^{(2)} H_m^{(4)}+2H_m^{(6)}\right);\\ \vdots \end{split} \end{equation} and the special values for Bernoulli polynomials of even order are \begin{equation} \label{eq:BernOddHarmonic} \begin{split} B_0^{(1)}(0)=&1,\quad B^{(2m+1)}_{2m}(m)=0\quad \text{for }m\geq 1, \\ B^{(2m+1)}_{2m-2}(m)&=\frac{(-1)^{m+1}2(2m-2)!}{(2m)!} ((m-1)!)^2,\\ B^{(2m+1)}_{2m-4}(m)&=\frac{(-1)^{m}4!(2m-4)!}{(2m)!} ((m-1)!)^2 {H_{m-1}^{(2)}},\\ B^{(2m+1)}_{2m-6}(m)&=\frac{(-1)^{m+1}6!(2m-6)!}{2(2m)!} ((m-1)!)^2 \left({H_m^{(2)}}^2-H_m^{(4)}\right).\\ \vdots \end{split} \end{equation} \end{proposition} \begin{proof} It is possible to compute the coefficient $c_{2i+1}^{(m)}$ for a generic integer $m$ and for a specific value of $i$ from its representation in (\ref{eq:CoeffCBell}). The Gamma function in the denominator of (\ref{eq:CoeffCBell}) can be moved into the nominator using the reflection formula \begin{equation*} \Gamma (1-z)\Gamma (z)=\frac{\pi}{\sin(\pi z)},\qquad z\not \in \mathbb {Z}. \end{equation*} Deriving the previous formula we obtain the reflection formula for the Polygamma function \begin{equation*} \psi(1-z)-\psi(z)=\pi\cot(\pi z) \end{equation*} and deriving again \begin{equation*} (-1)^n\psi^{(n)}(1-z)-\psi(z)=\frac{{\rm d}^n}{{\rm d} z^n}\pi\cot(\pi z). \end{equation*} Using the reflection formula for the Polygamma functions we can cancel the apparent divergences coming from the $\psi^{(n)}(-m)$ inside the Bell polynomials. Then, the non diverging Polygamma functions can be written in terms of the zeta function and of $H_{z}^{(r)}$, which is the Harmonic numbers of order $r$: \begin{equation*} \psi^{(n)}(z)=\zeta(2,m)=(-1)^{n+1}n!\left(\zeta(n+1)-H_{z-1}^{(n+1)}\right). \end{equation*} We evaluated the coefficient $c_{2i+1}^{(m)}$ for particular values of $i$ following these steps and we found: \begin{equation} \label{coeffcH} \begin{split} c_1^{(m)}&=\cos(\pi\,m)\Gamma^2 (m+1)=(-1)^m (m!)^2, \\ c_3^{(m)}&=(-1)^{m+1} (m!)^2 H_m^{(2)},\quad m\geq 1,\\ c_5^{(m)}&=\frac{(-1)^{m}}{2} (m!)^2 \left({H_m^{(2)}}^2-H_m^{(4)}\right),\quad m\geq 2,\\ c_7^{(m)}&=\frac{(-1)^{m+1}}{6} (m!)^2 \left({H_m^{(2)}}^3 -3H_m^{(2)} H_m^{(4)+2H_m^{(6)}}\right),\quad m\geq 3,\\ \vdots\,\,. \end{split} \end{equation} Equating the previous equations with the one in (\ref{eq:relBernGen}) we obtain the (\ref{eq:BernEvenHarmonic}). Instead for the coefficient $d_i^{(m)}$ we find \begin{equation*} \begin{split} d_0^{(0)}&=2,\quad d_0^{(m)}=0\quad m\geq 1, \\ d_2^{(m)}&=(-1)^{m+1}2((m-1)!)^2,\quad m\geq 1,\\ d_4^{(m)}&=(-1)^{m}2((m-1)!)^2 H_{m-1}^{(2)},\quad m\geq 2,\\ d_6^{(m)}&=(-1)^{m+1}2((m-1)!)^2 \left({H_m^{(2)}}^2-H_m^{(4)}\right),\quad m\geq 3,\\ \vdots\,\,. \end{split} \end{equation*} In this way we could obtain all the values $B^{(2m+1)}_{2m-2k}(m)$ for a chosen $k$ as in (\ref{eq:BernOddHarmonic}). \end{proof} Before moving to the functional relations of the hyperbolic series we anticipate that the polynomial $\mathcal{B}$ in (\ref{eq:PolynB}) is derived evaluating the residue of a function in a specific point. The details of the calculation of the residue are reported in the Appendix~\ref{sec:residue}. There, it is evaluated in two different methods. When equating the two results one found a relation between the reduced Bernoulli polynomials and the ordinary Bernoulli polynomial. \begin{proposition}\label{prop:RelOrdGenBer} Given an integer $n$, the reduced Bernoulli polynomial is \begin{equation} \label{eq:RelOrdGenBer} B_n^{(2m+2)}(m+1)=\sum_{s=1}^n \frac{(2m+2)!}{(2m+2-s)!} Y_{n,s}\left(B_1(1/2),B_2(1/2),\dots,B_{n-s+1}(1/2)\right). \end{equation} \end{proposition} The previous identity generates the reduced Bernoulli polynomials similarly to the formula in~\cite[Eq. (23)]{2016Elezovic} \begin{equation*} B_{2n}^{(2m+2)}(m+1)=-\frac{m+1}{n}\sum_{k=0}^{n-1}\genfrac(){0pt}{0}{2n}{2k} B_{2n-2k} B_{2k}^{(2m+2)}(m+1). \end{equation*} \begin{corollary}\label{cor:SpecialValuesGenBer} From (\ref{eq:RelOrdGenBer}) we obtain \begin{align*} B_0^{(2m+2)}(m+1) = & 1,\quad B_{2n+1}^{2m+2}(m+1)=0,\\ B_2^{(2m+2)}(m+1) = & -\frac{m+1}{6},\\ B_4^{(2m+2)}(m+1) = & \frac{5 m^2+11 m+6}{60},\\ B_6^{(2m+2)}(m+1) = & \frac{-35 m^3-126 m^2-151 m-60}{504},\\ B_8^{(2m+2)}(m+1) = & \frac{175 m^4+910 m^3+1781 m^2+1550 m+504}{2160},\\ B_{10}^{(2m+2)}(m+1) = & \frac{-385 m^5-2695 m^4-7601 m^3-10769 m^2-7638 m-2160}{3168}.\\ \vdots \end{align*} \end{corollary} \section{Functional relations for hyperbolic series} \label{sec:FuncRel} We give two different proofs of the functional relations for the hyperbolic series. In the first one, the functional relation is derived by using the summation theorem applied to the sum that defined the hyperbolic series. In the second one, the functional relations is inherited by the functional relation for the Lambert series. Before doing that we prove the functional relation of the Lambert series using the summation theorem. \begin{theorem}[see \cite{BerndtVol2} Entry 13 Chapter 14]\label{thm:funcrelLambert} Let $\phi$ be a complex number such that $\Re(\phi)>0$ and let $m\geq 0$ be an integer, then the Lambert series satisfies the functional equation \begin{equation} \label{eq:funcrelL} \begin{split} \mathcal{L}_{e^{-\phi}}(2m+1)=&-(-1)^m\left(\frac{2\pi}{\phi}\right)^{2m+2}\mathcal{L}_{e^{-4\pi^2/\phi}}(2m+1) -\frac{\partiallta_{m,0}}{2\phi}+\\ &+\frac{1}{2}\frac{B_{2m+2}}{2m+2}\left[1+\frac{(-1)^m(2\pi)^{2m+2}}{\phi^{2m+2}}\right]. \end{split} \end{equation} \end{theorem} \begin{proof} From the definition of Lambert series we define a function $f(\phi,k)$ as \begin{equation*} \mathcal{L}_{e^{-\phi}}(2m+1)=\sum_{k=1}^\infty \frac{k^{2m+1}e^{-\phi k}}{1-e^{-\phi k}} =\sum_{k=1}^\infty f(\phi,k). \end{equation*} Notice that if we exchange $k$ to $-k$ in $f$ we obtain the identity \begin{equation} \label{eq:ReflectionfLam} f(\phi,-k)=f(\phi,k)+k^{2m+1}. \end{equation} Then, from (\ref{eq:ReflectionfLam}) if we sum on all positive and negative $k$ we obtain \begin{equation*} \sum_{k=-\infty,k\neq 0}^\infty \frac{k^{2m+1}e^{-\phi k}}{1-e^{-\phi k}}=2\mathcal{L}_{e^{-\phi}}(2m+1) +\sum_{k=1}^\infty k^{2m+1}. \end{equation*} The last term is divergent and does not depend on $\phi$, we denote it with \begin{equation*} D_1(2m+1)=\sum_{k=1}^\infty k^{2m+1}. \end{equation*} The Lambert series is nevertheless finite and can be obtained as \begin{equation*} \mathcal{L}_{e^{-\phi}}(2m+1)=\frac{1}{2}\sum_{k=-\infty,k\neq 0}^\infty f(\phi,k) - \frac{1}{2}D_1(2m+1). \end{equation*} To adopt the summation theorem we promote $k$ to a complex variable $z$. The function $f(\phi,z)$ has poles in $z=2\pii n/\phi$ for $n=1,2,3,\dots$; in particular it does not have poles on the real axis. Consider now the meromorphic function $h$ \begin{equation*} h(z)=\frac{2\pi\,i}{e^{2\pi\,i\, z}-1}; \end{equation*} whose only poles are single poles at the integers (including the zero) where the residues are all 1. Let $C_N$ be a rectangular closed curve enclosing $-N,-N+1,\dots,0,1,$ $\dots,N$ and cutting the imaginary axis in $\pmi\epsilon$ with $\epsilon<im(2\pii/\phi)$ (see Figure~\ref{fig:PathCN}), then the residue theorem gives: \begin{equation*} \sum_{k=-N,k\neq 0}^N f(\phi,k)=\oint_{C_N}f(\phi,z) h(z)\frac{{\rm d} z}{2\pii} -\text{Res}\left[h(z)f(\phi,z),z=0\right]. \end{equation*} \begin{figure} \caption{The $C_N$ path. If you choose $\epsilon<im(2\pii/\phi)$ none of the pole of $f$ is enclosed by $C_N$. } \label{fig:PathCN} \end{figure} The summation theorem~\cite{book:145191} is obtained performing the limit for $N\to\infty$. In this case, as seen earlier, the infinite sum $\sum_{k\neq 0} f(\phi, k)$ reproduces the Lambert series: \begin{equation*} \mathcal{L}_{e^{-\phi}}(2m+1)= \frac{1}{2}\oint_C f(\phi,z) h(z)\frac{{\rm d} z}{2\pii} -\frac{1}{2}\text{Res}\left[h(z)f(\phi,z),z=0\right]-\frac{1}{2}D_1(2m+1) \end{equation*} where we denote with Res$\left[f(z),z=z_0\right]$ the residue of $f$ in $z_0$ and $C$ is the limit of the closed path $C_N$ for $N\to\infty$ and $\epsilon$ approaching to $0^+$. It is easy to verify that \begin{equation*} -\frac{1}{2}\text{Res}\left[h(z)f(\phi,z),z=0\right]=-\frac{\partiallta_{m,0}}{2\phi}. \end{equation*} The integral on the complex path can be decomposed in the four segments of the rectangle: \begin{equation*} \begin{split} \oint_C f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}=& \int_{-\infty-i 0^+}^{+\infty-i 0^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii} +\int_{+\infty+i 0^+}^{-\infty+i 0^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}\\ &+\int_{V^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii} +\int_{V^-} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}, \end{split} \end{equation*} where $V^+$ is the vertical line in the area where $\Re(z)>0$, while $V^-$ is the vertical line for $\Re(z)<0$. Noticing that \begin{equation*} \lim_{\Re(z)\to\infty} f(\phi,z)h(z)=0 \end{equation*} we conclude that the integral in $V^+$ is vanishing in the limit. Instead the integral in $V^-$ is diverging, indeed we have \begin{equation*} \lim_{\Re(z)\to-\infty} f(\phi,z)h(z)=\infty. \end{equation*} Since the Lambert series is converging and finite, we expect that at the end all the divergent parts cancel. The integral over $V^-$ can be re-written performing the change of variable from $z$ to $-z$ and taking advantage of (\ref{eq:ReflectionfLam}) and of the relation \begin{equation*} h(-z)=-h(z)-2\pii. \end{equation*} We find \begin{equation*} \begin{split} \int_{V^-} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}=& \lim_{\substack{\epsilon\to 0 \\ R\to \infty}}\int_{-R+i\epsilon}^{-R-i\epsilon} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}\\ =&\int_{V^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}+\lim_{\substack{\epsilon\to 0 \\ R\to \infty}} \int_{-R+i\epsilon}^{-R-i\epsilon} z^{2m+1}( h(z)+2\pii)\frac{{\rm d} z}{2\pii}. \end{split} \end{equation*} Since the integral in $V^+$ is vanishing only the second term contributes. Then, we see that the integral in $V^-$ is diverging but does not depend on $\phi$, we indicate it as \begin{equation*} \int_{V^-} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}=D_2(2m+1). \end{equation*} The integrals on the horizontal paths are made again with residue theorem closing them in the appropriate way. Since for $im(z)<0$ we have \begin{equation*} \lim_{|z|\to\infty} |f(\phi,z)h(z)|=0, \end{equation*} the lower horizontal line can be closed in the lower-half plane. The upper horizontal line can be moved to the lower one performing again the change of variable from $z$ to $-z$. This time we find \begin{equation*} \begin{split} \int_{+\infty+i 0^+}^{-\infty+i 0^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}=& \int_{-\infty}^\infty {\rm d} z\left[f(\phi,z)+z^{2m+1} \right]+ \int_{-\infty-i 0^+}^{+\infty-i 0^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}\\ &+\int_{-\infty-i 0^+}^{\infty-i 0^+}\frac{{\rm d} z}{2\pi i}z^{2m+1} h(z). \end{split} \end{equation*} The second term is identical to the integration to the lower horizontal line. The last term is vanishing because we can evaluate it with residue theorem closing the path in the lower half-plane but the integrand does not have poles. Instead the first term can be decomposed in two parts and transformed again as follows \begin{equation*} \begin{split} \int_{-\infty}^\infty {\rm d} z &\left[f(\phi,z)+z^{2m+1} \right]= \int_{-\infty}^0 {\rm d} z\left[f(\phi,z)+z^{2m+1} \right]+\int_{0}^\infty {\rm d} z\left[f(\phi,z)+z^{2m+1} \right]\\ =&\int_{0}^\infty{\rm d} z\left[f(\phi,z)+z^{2m+1}-z^{2m+1}\right] +\int_{0}^\infty{\rm d} z f(\phi,z)+\int_{0}^\infty{\rm d} z\, z^{2m+1}\\ =&2\int_{0}^{\infty}{\rm d} z f(\phi,z) + D_3(2m+1). \end{split} \end{equation*} Again we have decomposed the integral into a diverging part that does not depend on $\phi$ and into a finite part which gives: \begin{equation*} \int_{0}^{\infty}{\rm d} z f(\phi,z)=\frac{(-1)^m}{2}\frac{B_{2m+2}}{2m+2}\left(\frac{2\pi}{\phi}\right)^{2m+2}. \end{equation*} The last piece we need to evaluate is the integral over the lower horizontal line. As said, we close the path with a semicircle in the lower half-plane and we compute it with the residue theorem. Inside that path the function $h$ does not have any pole but $f$ has infinite many poles located in $z=-2\pii n/\phi$ counted by integer $n$ starting from 1. As a consequence, the result is \begin{equation*} \begin{split} \int_{-\infty-i 0^+}^{+\infty-i 0^+} f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}=& \sum_{n=1}^\infty \text{Res}\left[h(z)f(\phi,z),z=-\frac{2\pii n}{\phi}\right]\\ =&-(-1)^m \left(\frac{2\pi}{\phi}\right)^{2m+2} \sum_{n=1}^\infty \frac{n^{2m+1} e^{-4\pi^2 n/\phi}}{1-e^{-4\pi^2 n/\phi}}\\ =&-(-1)^m \left(\frac{2\pi}{\phi}\right)^{2m+2} \mathcal{L}_{e^{-4\pi^2/\phi}}(2m+1), \end{split} \end{equation*} where in the last step we recognized the Lambert series. If we put all the pieces together we obtain \begin{equation*} \begin{split} \mathcal{L}_{e^{-\phi}}(2m+1)=&-(-1)^m\left(\frac{2\pi}{\phi}\right)^{2m+2}\mathcal{L}_{e^{-4\pi^2/\phi}}(2m+1) -\frac{\partiallta_{m,0}}{2\phi}+\\ &+\frac{(-1)^m}{2}\frac{B_{2m+2}}{2m+2}\left(\frac{2\pi}{\phi}\right)^{2m+2}+D(2m+1), \end{split} \end{equation*} where $D$ is the sum of all the diverging terms. Since for $\Re(\phi)>0$ all the pieces of the previous equation are finite then it also follows that $D(2m+1)$ must also be finite. Furthermore, since $D$ does not depend on $\phi$ it can be fixed choosing a value for $\phi$ and enforcing the relation. In particular we choose to evaluate the relation as an asymptotic expansion around $\phi=0$. In this case we have that the contribution from the Lambert series on the r.h.s of the functional relation is vanishing \begin{equation*} \phi^{-2m-2}\mathcal{L}_{e^{-4\pi^2/\phi}}(2m+1)\to 0, \end{equation*} while the asymptotic expansion for Lambert series in $\phi=0$ is~\cite{2016Banerjee} \begin{equation*} \mathcal{L}_{e^{-\phi}}(2m+1)\sim \frac{1}{2}\frac{B_{2m+2}}{2m+2}\left[1+ (-1)^m \left(\frac{2\pi}{\phi}\right)^{2m+2}\right]-\frac{\partiallta_{m,0}}{2\phi} \end{equation*} then we conclude that it must be \begin{equation*} D(2m+1)=\frac{1}{2}\frac{B_{2m+2}}{2m+2} \end{equation*} proving the functional relation. \end{proof} Functional relation similar to (\ref{eq:funcrelL}) for the Lambert series with odd negative argument involves the Riemann zeta function and where used in~\cite{2018Banerjee} to give rapid converging formula of $\zeta(4k\pm 1)$. The method used in the proof above can not be applied to derive the functional relation for the Lambert series with even positive argument. Consequently, the functional relations for the hyperbolic series $S^{(\sinh,1)}_{2m+2}(\phi)$ can not be inferred from those of the Lambert series, neither can be derived from the summation theorem. Indeed, for even positive argument the Lambert series is \begin{equation*} \mathcal{L}_{e^{-\phi}}(2m)=\sum_{k=1}^\infty \frac{k^{2m}e^{-\phi k}}{1-e^{-\phi k}} =\sum_{k=1}^\infty f(\phi,k), \end{equation*} but the reflection properties of $f$ \begin{equation*} f(\phi,-k)=-f(\phi,k)-k^{2m} \end{equation*} does not allow to use the summation theorem. The same problem arise for the hyperbolic series $S^{(\sinh,1)}_{2m+2}(\phi)$. Nevertheless, we can still take advantage of the linearity between the hyperbolic series and the Lambert series (see Proposition~\ref{prop:LambertRel}) to derive its asymptotic expansion in $\phi=0$. Recently, the authors of~\cite{Dorigoni:2020oon} derived the functional relation for Lambert series using resurgent expansion and extended their validity to complex arguments and to positive even integer. They found that for any complex $s$ and $\phi>0$, the Lambert series satisfies \begin{equation} \label{eq:LambertResurgent} \begin{split} \mathcal{L}_{e^{-\phi}}(s)=&\frac{\zeta(1+s)\Gamma(1+s)}{\phi^{1+s}} +\sum_{k=0}^{\Re(s)+1}\frac{(-\phi)^{k-1}}{\Gamma(k)}\zeta(1-k)\zeta(1-s-k)\\ &+\mathcal{S}_{\pm}(s,\phi)+\left(\mpi\frac{\phi}{2\pi}\right)^{-1-s}\mathcal{L}_{e^{-4\pi^2/\phi}}(s), \end{split} \end{equation} where $\mathcal{S}_{\pm}(s,\phi)$ is the resurgent completion of the Lambert series obtained starting from its asymptotic expansion. In the second line of (\ref{eq:LambertResurgent}), $\mathcal{S}_{\pm}(s,\phi)$ together with the Lambert series evaluated in $q=e^{-4\pi^2/\phi}$ captures the non analytical terms in $\Re(\phi)=0$ of the Lambert series. For an odd positive integer $s$, the completion $\mathcal{S}_{\pm}(s,\phi)$ is vanishing and the previous equations reduces to (\ref{eq:funcrelL}). \begin{proof}[Proof of theorem \ref{thm:funcrelS} with the summation theorem] Here to prove the functional relations for the hyperbolic series (\ref{eq:baseserie}) \begin{equation*} S_{2m+2}(\phi)=\sum_{n=1}^\infty \frac{\phi^{2m+2}}{\sinh^{2m+2}\left(n\frac{\phi}{2}\right)} \end{equation*} we follow the demonstration of theorem~\ref{thm:funcrelLambert}. In this case we do not find any apparently divergent term and therefore we do not have to use an asymptotic expansion to fix the missing constant. Since the hyperbolic series $S_{2m+2}(\phi)$ is an even function on $\phi$ we can write it as \begin{equation*} \begin{split} S_{2m+2}(\phi)&=\frac{1}{2}\sum_{n=1}^\infty \frac{\phi^{2m+2}}{\sinh^{2m+2}\left(n\frac{\phi}{2}\right)} +\frac{1}{2}\sum_{n=-\infty}^{-1} \frac{\phi^{2m+2}}{\sinh^{2m+2}\left(n\frac{\phi}{2}\right)}\\ &=\frac{1}{2}\sum_{n\neq 0} \frac{\phi^{2m+2}}{\sinh^{2m+2}\left(n\frac{\phi}{2}\right)} \equiv \sum_{n\neq 0} f(\phi,n), \end{split} \end{equation*} where we denoted \begin{equation*} f(\phi,z)=\frac{\phi^{2m+2}}{2\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}. \end{equation*} Notice that the function $f$ given above is a meromorphic function on $z$ with poles of order $2m+2$ located in \begin{equation} \label{eq:poli} z=\frac{2\pi\,i\, m}{\phi},\quad m\in\mathbb{Z} \end{equation} and with $z=0$ the only pole that lies on the real axis. As done in the proof of theorem~\ref{thm:funcrelLambert}, we can evaluate the sums using the summation theorem. With the same notation, it yields \begin{equation*} S_{2m+2}(\phi)=\oint_C f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}-\text{Res}\left[h(z)f(\phi,z),z=0\right]. \end{equation*} The residue is computed in Appendix \ref{sec:residue} and gives the polynomial in Eq. (\ref{eq:PolynB}): \begin{equation} \label{eq:ResidueDef} \begin{split} \mathcal{B}_{2m+2}(\phi)&\equiv-\text{Res}\left[h(z)f(\phi,z),z=0\right]\\ &=-2^{2m+1}\sum_{k=0}^{m+1}(2\pii)^{2k}\frac{B_{2k}}{(2k)!}\frac{B_{2m+2-2k}^{(2m+2)}(m+1)}{(2m+2-2k)!}\, \phi^{2m+2-2k}. \end{split} \end{equation} Now, we consider the integral \begin{equation*} \oint_C f(\phi,z) h(z)\frac{{\rm d} z}{2\pii}. \end{equation*} First, we notice that when the real part of $z$ is very large the integrand goes to zero \begin{equation*} \lim_{\Re(z)\to\pm\infty} f(\phi,z) h(z)=0, \end{equation*} therefore the integrals on the vertical line of $C$ are vanishing and we are left with \begin{equation*} \oint_C f(\phi,z)h(z)\frac{{\rm d} z}{2\pi\,i}=\int_{-\infty-i 0^+}^{\infty-i 0^+} f(\phi,z)h(z) \frac{{\rm d} z}{2\pi\,i} +\int_{+\infty+i 0^+}^{-\infty+i 0^+} f(\phi,z)h(z) \frac{{\rm d} z}{2\pi\,i}. \end{equation*} In the last integral we change variable $z\to -z$ and taking advantage of the reflection identities \begin{equation*} f(\phi,-z)=f(\phi,z),\qquad h(-z)=-2\pii-h(z) \end{equation*} we obtain \begin{equation*} \oint_C f(\phi,z)h(z)\frac{{\rm d} z}{2\pi\,i}=\int_{-\infty-i 0^+}^{\infty-i 0^+} f(\phi,z)\,{\rm d} z +\int_{-\infty-i 0^+}^{\infty-i 0^+}2f(\phi,z)h(z)\frac{{\rm d} z}{2\pi\,i}\,. \end{equation*} Consider the integral in the first term: \begin{equation*} \begin{split} I_\phi=&\int_{-\infty-i 0^+}^{\infty-i 0^+} f(\phi,z)\,{\rm d} z = \int_{-\infty-i 0^+}^{\infty-i 0^+}\frac{\phi^{2m+2}}{2\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}\,{\rm d} z\\ &=\frac{\phi^{2m+1}}{2}|\phi|e^{i\arg\phi}\int_{-\infty-i 0^+}^{\infty-i 0^+} \frac{{\rm d} z}{\sinh^{2m+2}\left(z\frac{|\phi|}{2}e^{i\arg\phi}\right)}, \end{split} \end{equation*} if we change variable to $\tilde{z}=z\exp(i\arg\phi)$ first and then to $y=\tilde{z}|\phi|/2$ we obtain \begin{equation} \label{eq:ArgIntDiag} \begin{split} I_\phi=&\frac{\phi^{2m+1}}{2}|\phi|\int_{(-\infty-i 0^+)e^{i\arg\phi}}^{(\infty-i 0^+)e^{i\arg\phi}} \frac{{\rm d} \tilde{z}}{\sinh^{2m+2}\left(\tilde{z}\frac{|\phi|}{2}\right)}\\ =&\phi^{2m+1}\int_{(-\infty-i 0^+)e^{i\arg\phi}}^{(\infty-i 0^+)e^{i\arg\phi}} \frac{{\rm d} y}{\sinh^{2m+2}(y)}. \end{split} \end{equation} \begin{figure} \caption{The integral~(\ref{eq:ArgIntDiag} \label{fig:ArgPath} \end{figure} Since the integrand function has poles only in the imaginary axis of $y$, we can use the closed path in Fig.~\ref{fig:ArgPath} to transform the integral path into the horizontal one. Indeed, the integrand does not contain poles inside the path and is vanishing in the arcs. Noticing that the direction of the path changes accordingly to the sign $\sigma=$sgn$(\Re(\phi))$, we can write \begin{equation} \label{eq:ArgIntHor} \begin{split} I_\phi =&\phi^{2m+1}\sigma \int_{-\infty-i 0^+}^{\infty-i 0^+}\frac{{\rm d} y}{\sinh^{2m+2}(y)}. \end{split} \end{equation} To obtain the result we iteratively use the formula \cite[Formula 1 \S 1.4.5, p. 146]{book:PrudVol1} \begin{equation*} \int \frac{{\rm d} x}{\sinh^p x}=-\frac{1}{p-1}\frac{\cosh x}{\sinh^{p-1}x}+\frac{2-p}{p-1}\int \frac{{\rm d} x}{\sinh^{p-2} x} \end{equation*} and we finally get \begin{equation*} I_\phi =\int_{-\infty-i 0^+}^{\infty-i 0^+} f(\phi,z)\,{\rm d} z=(-1)^{m+1}\frac{4\sigma}{m+2}\phi^{2m+1}. \end{equation*} At last we are left with the complex integral: \begin{equation*} I_{2m+2}(\phi)=\int_{-\infty-i 0^+}^{\infty-i 0^+}2f(\phi,z)h(z)\frac{{\rm d} z}{2\pi\,i} =\int_{-\infty-i 0^+}^{\infty-i 0^+} \frac{\phi^{2m+2}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}\frac{1}{e^{2\pi\,i\, z}-1}{\rm d} z. \end{equation*} Again, we can compute the integral with residue theorem by closing the path with a semicircle in the lower half-plane which have vanishing contribution. In the lower-half complex plane the $f$ has infinite many poles and the residue theorem gives \begin{equation*} I_{2m+2}(\phi)= \sum_{n=1}^\infty \text{Res}\left[2h(z)f(\phi,z),z=-\frac{2\pii n}{\phi}\right]. \end{equation*} Evaluating the residue at fixed $n$ we realize that the sum over all $n$ reproduces a linear combination the hyperbolic series with argument $4\pi^2/\phi$ and of order from $2m+2$ to $2$. More precisely, the residue is the sums of these hyperbolic series each one weighted with a an even polynomial $\mathcal{S}_i^{(m)}(\phi)$ of degree $2i+2$: \begin{equation*} I_{2m+2}(\phi)=\sum_{i=0}^m\mathcal{S}_i^{(m)}(\phi)S_{2i+2}\left(\frac{4\pi^2}{\phi}\right). \end{equation*} For instance, for $m=0$ we obtain \begin{equation*} I_{2}(\phi)=\sum_{n=1}^\infty \frac{2\pi^2}{\sinh^2(2\pi^2 n/\phi)} =\frac{\phi^2}{4 \pi^2}S_2\left(\frac{4\pi^2}{\phi}\right), \end{equation*} hence the hyperbolic series $S_2$ satisfies the functional equation \begin{equation*} S_2(\phi)+\frac{\phi^2}{4 \pi^2}S_2\left(\frac{4\pi^2}{\phi}\right) =\frac{\phi^2}{6}+\frac{2 \pi^2}{3}-2\phi. \end{equation*} For $m=1$ we have \begin{equation*} \begin{split} I_{4}(\phi)=&-\sum_{n=1}^\infty\left[ \frac{16\pi^4}{\sinh^4(2\pi^2 n/\phi)} +\left(\frac{8\pi^2\phi^2}{3}+\frac{32\pi^4}{3} \right) \frac{1}{\sinh^2(2\pi^2 n/\phi)}\right]\\ =&-\frac{\phi^4}{16 \pi^4}S_4\left(\frac{4\pi^2}{\phi}\right) -\left(\frac{\phi^4}{6 \pi^2}+\frac{2\phi^2}{3}\right)S_2\left(\frac{4\pi^2}{\phi}\right), \end{split} \end{equation*} which brings to the functional equation \begin{equation*} S_4(\phi)-\frac{\phi^4}{16 \pi^4}S_4\left(\frac{4\pi^2}{\phi}\right) -\left(\frac{\phi^4}{6 \pi^2}+\frac{2\phi^2}{3}\right)S_2\left(\frac{4\pi^2}{\phi}\right) =-\frac{11 \phi^4}{90}-\frac{4 \pi^2 \phi^2}{9}+\frac{8 \pi^4}{45}+\frac{4\phi^3}{3}. \end{equation*} \end{proof} The previous proof of the functional relation (\ref{eq:funcrelS}) does not provide a good method to derive the polynomials $\mathcal{S}_i^{(m)}$. The linearity between Lambert series and the hyperbolic series provides a better method to derive those polynomials. To establish the connection between the two methods, we first need to show that the polynomial derived with the residue (\ref{eq:PolynB}) coincides with the polynomial obtained with the asymptotic expansion of the hyperbolic series. \begin{proposition}\label{prop:AsymS} The asymptotic expansions in $\phi=0$ of the hyperbolic series are $S_{2m+2}(\phi)\sim A_{2m+2}(\phi)$ and $S^{(\sinh,1)}_{2m+2}(\phi)\sim A^{(\sinh,1)}_{2m+2}(\phi)$ where $A_{2m+2}(\phi)$ and $A^{(\sinh,1)}_{2m+2}(\phi)$ are the polynomials \begin{equation} \label{eq:AsymS} \begin{split} A_{2m+2}(\phi)=&\sum_{i=0}^m \frac{2^{2m+1}B_{2i+2}}{(2i+2)!} \frac{B_{2m-2i}^{(2m+2)}(m+1)}{(2m-2i)!}\phi^{2(m-i)}\left(\phi^{2i+2}-(2\pii)^{2i+2}\right) \end{split} \end{equation} and \begin{equation} \label{eq:AsymSSinh} \begin{split} A^{(\sinh,1)}_{2m+2}(\phi)=& -\frac{2^{2m+1}}{(2m)!}\sum_{i=0}^{m} d_{2i}^{(m)} \psi^{(2i)}(1)\phi^{2m+1-2i}\\ &-\frac{2^{2m+1}}{(2m)!}\sum_{i=0}^{m}\sum_{k=0}^{\infty} d_{2i}^{(m)} \frac{B_{2i+2k}B_{2k}}{(2i+2k)(2k)!}\phi^{2m+2k+1}. \end{split} \end{equation} \end{proposition} \begin{proof} We again use the linearity relation (\ref{eq:LambertRel_1}): \begin{equation*} S_{2m+2}(\phi)=\frac{(2\phi)^{2m+2}}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)}\,\mathcal{L}_{e^{-\phi}}(2i+1). \end{equation*} We could just replace the asymptotic expansion of the Lambert series~\cite{2016Banerjee} in the previous equation but it is more convenient to use the asymptotic expansion of the q-polygamma instead. From the relation of Lambert series and q-polygamma function \begin{equation*} \mathcal{L}_q(s)=\frac{1}{(\log q)^{s+1}} \psi^{(s)}_q(1) \end{equation*} the hyperbolic series is \begin{equation} \label{eq:SqPolyg} S_{2m+2}(\phi)=\frac{2^{2m+2}}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)}\, \psi^{(2i+1)}_{e^{-\phi}}(1) \phi^{2m-2i}. \end{equation} The asymptotic expansion of q-polygamma at $\phi=0$ for $s\geq 1$ is~\cite{2016Banerjee} \begin{equation} \label{exppolygammaesp} \psi^{(s)}_{e^{-\phi}}(1)\sim\psi^{(s)}(1)-\sum_{k=0}^\infty (-1)^{k+1}\frac{B_{s+k}B_k}{(s+k)k!}\phi^{s+k}. \end{equation} If we choose $s$ as an odd integer $s=2i+1$, since the only non vanishing odd Bernoulli number is $B_1$, we simply have: \begin{equation*} \psi^{(2i+1)}_{e^{-\phi}}(1)\sim\psi^{(2i+1)}(1)+\frac{B_{2i+2}}{2(2i+2)}\phi^{2i+2}\quad i\geq 0. \end{equation*} The polygamma of argument $1$ is \begin{equation*} \begin{split} \psi^{(2i+1)}(1)=&\int_0^1 \frac{(\log t)^{2i+1}}{t-1}{\rm d} t =(-1)^{2i+2}\Gamma(2i+2)\zeta(2i+2)\\ =&(-1)^{i+2}\frac{B_{2i+2}(2\pi)^{2i+2}}{2(2i+2)}; \end{split} \end{equation*} then the asymptotic expansion of a q-polygamma with odd integer argument is \begin{equation} \label{qpolygammadisp} \psi^{(2i+1)}_{e^{-\phi}}(1)\sim\frac{B_{2i+2}}{2(2i+2)}\left(\phi^{2i+2}-(2\pii)^{2i+2}\right). \end{equation} Plugging the (\ref{qpolygammadisp}) in the (\ref{eq:SqPolyg}) and using (\ref{eq:relBernGen}) we obtain the polynomial (\ref{eq:AsymS}). The other hyperbolic series is related to q-polygamma functions of positive even integer whose asymptotic expansion does not stop at a finite power of $\phi$. From (\ref{eq:LambertRel_2}) the hyperbolic series is given by \begin{equation*} S^{(\sinh,1)}_{2m+2}(\phi)=-\frac{2^{2m+1}}{(2m)!}\sum_{i=0}^{m} d_{2i}^{(m)} \psi^{(2i)}_{e^{-\phi}}(1)\phi^{2m+1-2i}. \end{equation*} Then using (\ref{exppolygammaesp}) we obtain (\ref{eq:AsymSSinh}). \end{proof} \begin{proposition}\label{prop:AltPolB} The polynomials $A_{2m+2}$ in (\ref{eq:AsymS}) are exactly the same polynomials $\mathcal{B}_{2m+2}$ in (\ref{eq:PolynB}). \end{proposition} \begin{proof} Using the identity (\ref{eq:IdentZeros}), the generalized Bernoulli polynomial appearing in the $k=0$ term of (\ref{eq:PolynB}) can be written as \begin{equation*} \frac{B_{2m+2}^{(2m+2)}(m+1)}{(2m+2)!}=-\sum_{k=1}^{m+1}\frac{B_{2k}\,B_{2m+2-2k}^{(2m+2)}(m+1)}{(2k)!(2m+2-2k)!} =-\sum_{i=0}^{m}\frac{B_{2i+2}\,B_{2m-2i}^{(2m+2)}(m+1)}{(2i+2)!(2m-2i)!}. \end{equation*} If we replace it in (\ref{eq:PolynB}) we obtain \begin{equation*} \begin{split} \mathcal{B}_{2m+2}(\phi)=&\sum_{i=0}^m \frac{2^{2m+1}B_{2i+2}}{(2i+2)!} \frac{B_{2m-2i}^{(2m+2)}(m+1)}{(2m-2i)!}\phi^{2(m-i)}\left(\phi^{2i+2}-(2\pii)^{2i+2}\right) \end{split} \end{equation*} which is $A_{2m+2}(\phi)$ in (\ref{eq:AsymS}). \end{proof} \begin{corollary} For every integer $m\geq 0$, the polynomials $\mathcal{B}_{2m+2}(\phi)$ have two zeros in $\phi=\pm 2\pii$. \end{corollary} \begin{figure} \caption{The zeros of the $B_{2m+2} \label{fig:Zeros} \end{figure} Contrary to Ramanujan polynomials, the $\mathcal{B}$ polynomials are not reciprocal and because of that we expect that the study of their zeros would be more difficult than those of Ramanujan polynomials. Furthermore, we can verify that contrary to Ramanujan's polynomials~\cite{ZerosRam}, the zeros of $\mathcal{B}$ do not lye on the unitary circle and they do not seem to have other imaginary zeros except for $\phi=\pm 2\pii$, see Fig.~\ref{fig:Zeros}. In the physics of accelerating fluids, the zeros in $\phi=\pm 2\pii$ are a direct consequence of the Unruh effect~\cite{Crispino:2007eb}. Indeed, the thermal functions of an accelerating fluid composed by free Klein-Gordon massless spin $0$ particles are given by the $\mathcal{B}$ polynomials and are obtained by an analytic extraction of the hyperbolic series into the imaginary axis~\cite{Becattini:2020qol}. The zeros in $\phi=\pm 2\pii$ correspond to the vanishing of the thermal functions at the Unruh Temperature $T_U=a/2\pi$~\cite{Becattini:2020qol}. We can now give the proof of theorem \ref{thm:funcrelS} that provides an easier method to derive the polynomial $\mathcal{S}$. \begin{proof}[Proof of theorem \ref{thm:funcrelS} from Lambert series] To prove the functional relation (\ref{eq:funcrelS}) we take advantage of the linearity relation between $S_{2m+2}$ and the Lambert series and the Lambert functional equation (\ref{eq:funcrelL}). For the sake of clarity suppose $\Re(\phi)>0$, then from proposition~\ref{prop:LambertRel} we have \begin{equation} \label{eq:proofSwithL} S_{2m+2}(\phi)=\frac{(2\phi)^{2m+2}}{(2m+1)!}\sum_{i=0}^{m} c_{2i+1}^{(m)}\,\mathcal{L}_{e^{-\phi}}(2i+1), \end{equation} where from proposition~\ref{prop:relCBernGen} the coefficient $c_{2i+1}$ is given by \begin{equation*} c_{2i+1}^{(m)}=\frac{(2m+1)!}{(2i+1)!}\frac{B_{2m-2i}^{(2m+2)}(m+1)}{(2m-2i)!}. \end{equation*} If we define \begin{equation*} L(2i+2)=\frac{1}{2}\frac{B_{2i+2}}{2i+2}\left[1+\frac{(-1)^i(2\pi)^{2i+2}}{\phi^{2i+2}}\right], \end{equation*} then the Lambert functional relation is written as \begin{equation} \label{eq:proofFunRelL} \mathcal{L}_{e^{-\phi}}(2i+1)=L(2i+2)-\frac{\partiallta_{i,0}}{2\phi} -(-1)^i\left(\frac{2\pi}{\phi}\right)^{2i+2}\mathcal{L}_{e^{-4\pi^2/\phi}}(2i+1). \end{equation} Then plugging (\ref{eq:proofFunRelL}) in (\ref{eq:proofSwithL}) we obtain \begin{equation} \label{eq:almostRel} \begin{split} S_{2m+2}(\phi)=&A_{2m+2}(\phi)+(-1)^{m+1}\frac{4}{m+2}\phi^{2m+1}\\ &-\frac{(2\phi)^{2m+2}}{(2m+1)!} \sum_{i=0}^m c_{2i+1}^{(m)}\frac{(-1)^i (2\pi)^{2i+2}}{\phi^{2i+2}}\mathcal{L}_{e^{-4\pi^2/\phi}}(2i+1). \end{split} \end{equation} Since the asymptotic expansion in $\phi=0$ of the Lambert series with positive odd argument coincide with $L(2i+2)$~\cite{2016Banerjee}, then the polynomial $A_{2m+2}$ is exactly the one we calculated in Proposition~\ref{prop:AsymS}. We already showed in Proposition~\ref{prop:AltPolB} that this polynomial coincide with the one found with the residue: $A_{2m+2}(\phi)=\mathcal{B}_{2m+2}(\phi)$. To write the (\ref{eq:almostRel}) only in terms of the hyperbolic series we invert the equation (\ref{eq:proofSwithL}) to find the Lambert series appearing in (\ref{eq:almostRel}). The result is \begin{equation} \label{eq:InvertionL} \mathcal{L}_{e^{-4\pi^2/\phi}}(2i+1)=\sum_{k=0}^i \mathcal{B}_k^{(i)}\phi^{2k+2} S_{2k+2}\left(\frac{4\pi^2}{\phi}\right), \end{equation} where $\mathcal{B}_k^{(i)}$ are real numbers. Inserting the relation (\ref{eq:InvertionL}) in (\ref{eq:almostRel}) we obtain \begin{equation*} S_{2m+2}(\phi)-\sum_{i=0}^m\mathcal{S}_i^{(m)}(\phi)S_{2i+2}\left(\frac{4\pi^2}{\phi}\right)= \mathcal{B}_{2m+2}(\phi)+(-1)^{m+1}\frac{4\sigma}{m+2}\phi^{2m+1}, \end{equation*} where the polynomials $\mathcal{S}$ are inferred by the equality \begin{equation} \label{eq:PolSDef} \begin{split} \sum_{i=0}^m\mathcal{S}_i^{(m)}(\phi)S_{2i+2}\left(\frac{4\pi^2}{\phi}\right)=&-\frac{(2\phi)^{2m+2}}{(2m+1)!} \sum_{i=0}^m \left[c_{2i+1}^{(m)}\frac{(-1)^i (2\pi)^{2i+2}}{\phi^{2i+2}}\right.\\ &\left.\times\sum_{k=0}^i \mathcal{B}_k^{(i)}\phi^{2k+2} S_{2k+2}\left(\frac{4\pi^2}{\phi}\right)\right]. \end{split} \end{equation} \end{proof} With this proof we have a simple procedure to derive the exact form of functional relation of hyperbolic series (\ref{eq:funcrelS}) for any chosen $m$. The numbers $\mathcal{B}^{(i)}_k$ in Eq.~(\ref{eq:InvertionL}) are derived inverting Eq.~(\ref{eq:proofSwithL}): \begin{align*} \mathcal{B}^{(0)}_0=&\frac{1}{64\pi^4},\\ \mathcal{B}^{(1)}_0=&\frac{1}{64 \pi^4},\,\mathcal{B}^{(1)}_1=\frac{3}{2048 \pi^8};\\ \mathcal{B}^{(2)}_0=&\frac{1}{64 \pi^4},\,\mathcal{B}^{(2)}_1=\frac{15}{2048 \pi^8},\, \mathcal{B}^{(2)}_2=\frac{15}{32768 \pi^{12}};\\ \mathcal{B}^{(3)}_0=&\frac{1}{64 \pi^4},\,\mathcal{B}^{(3)}_1=\frac{63}{2048 \pi^8},\, \mathcal{B}^{(3)}_2=\frac{105}{16384 \pi^{12}},\,\mathcal{B}^{(3)}_3=\frac{315}{1048576 \pi^{16}}. \end{align*} Once we have the numbers $\mathcal{B}^{(i)}_k$, all the element of the r.h.s of Eq.~(\ref{eq:PolSDef}) are known and we can make the sums and consequently find the $\mathcal{S}$ polynomials: \begin{align*} \mathcal{S}^{(0)}_0(\phi)=&-\frac{\phi^2}{4 \pi^2},\\ \mathcal{S}^{(1)}_0(\phi)=&\frac{\phi^4}{6 \pi^2}+\frac{2\phi^2}{3},\, \mathcal{S}^{(1)}_1(\phi)=\frac{\phi^4}{16 \pi^4};\\ \mathcal{S}^{(2)}_0(\phi)=&-\frac{2\phi^6}{15\pi^6}-\frac{2\phi^4}{3}-\frac{8\pi^2\phi^2}{15},\, \mathcal{S}^{(2)}_1(\phi)=-\frac{\phi^6}{16\pi^4}-\frac{\phi^4}{4\pi^2},\, \mathcal{S}^{(2)}_2(\phi)=-\frac{\phi^6}{64 \pi^6};\\ \mathcal{S}^{(3)}_0(\phi)=&\frac{4\phi^8}{35\pi^2}+\frac{28\phi^6}{45} +\frac{32\pi^2\phi^4}{45}+\frac{64\pi^4\phi^2}{315},\, \mathcal{S}^{(3)}_1(\phi)=\frac{7 \phi^8}{120 \pi^4}+\frac{\phi^6}{3 \pi^2}+\frac{2\phi^4}{5},\\ &\mathcal{S}^{(3)}_2(\phi)=\frac{\phi^8}{48 \pi^6}+\frac{\phi^6}{12 \pi^4},\, \mathcal{S}^{(3)}_3(\phi)=\frac{\phi^8}{256 \pi^8}. \end{align*} We also report the $\mathcal{B}_{2m+2}$ polynomials for $m=0,1,2,3$ \begin{align*} \mathcal{B}_2(\phi)=&\frac{\phi^2}{6}+\frac{2 \pi^2}{3};\\ \mathcal{B}_4(\phi)=&-\frac{11 \phi^4}{90}-\frac{4 \pi^2 \phi^2}{9}+\frac{8 \pi^4}{45};\\ \mathcal{B}_6(\phi)=&\frac{191 \phi^6}{1890}+\frac{16 \pi^2 \phi^4}{45} -\frac{8 \pi^4 \phi^2}{45}+\frac{64 \pi^6}{945};\\ \mathcal{B}_8(\phi)=&-\frac{2497 \phi^8}{28350}-\frac{32 \pi^2 \phi^6}{105} +\frac{112 \pi^4 \phi^4}{675}-\frac{256 \pi^6 \phi^2}{2835}+\frac{128 \pi^8}{4725}. \end{align*} We can then explicitly write the functional equation (\ref{eq:funcrelS}) for a given $m$. For instance, for $m=0,1$ we have: \begin{equation*} S_2(\phi)+\frac{\phi^2}{4 \pi^2}S_2\left(\frac{4\pi^2}{\phi}\right) =\frac{\phi^2}{6}+\frac{2 \pi^2}{3}-2\phi; \end{equation*} \begin{equation*} S_4(\phi)-\frac{\phi^4}{16 \pi^4}S_4\left(\frac{4\pi^2}{\phi}\right) -\left(\frac{\phi^4}{6 \pi^2}+\frac{2\phi^2}{3}\right)S_2\left(\frac{4\pi^2}{\phi}\right) =-\frac{11 \phi^4}{90}-\frac{4 \pi^2 \phi^2}{9}+\frac{8 \pi^4}{45}+\frac{4\phi^3}{3}. \end{equation*} \appendix \section{The residue \texorpdfstring{$\mathcal{B}_{2m+2}(\phi)$}{B 2m+2}} \label{sec:residue} In this appendix we evaluate the residue in Eq. (\ref{eq:ResidueDef}), that gives the polynomial $\mathcal{B}_{2m+2}(\phi)$. From the definition, since the residues is evaluated in a $2m+2$th order pole, we have: \begin{equation*} \begin{split} \mathcal{B}_{2m+2}(\phi)&=-\text{Res}\left[f(\phi,z)h(z)\right]_{z=0}\\ &=-\frac{1}{2(2m+2)!}\lim_{z\to0}\frac{{\rm d}^{2m+2}}{{\rm d} z^{2m+2}} \left[ \frac{\phi^{2m+2}z^{2m+3}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}\frac{2\pii}{e^{2\pi\,i\, z}-1}\right]. \end{split} \end{equation*} We define the following functions \begin{equation*} F(\phi,z)=\frac{\phi^{2m+2}z^{2m+2}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)};\quad H(z)=\frac{2\pii\, z}{e^{2\pi\,i\, z}-1}; \end{equation*} in this way the limit $z\to 0$ is well behaved separtly in each functions and their derivatives. Next, to perform the derivative in the residue we use the generalized Leibnitz rule: \begin{equation} \label{derFH} \frac{{\rm d}^{2m+2}}{{\rm d} z^{2m+2}}\left[F(\phi,z) H(z)\right]= \sum_{k=0}^{2m+2}\genfrac(){0pt}{0}{2m+2}{k}F^{(2m+2-k)}(\phi,z)\, H^{(k)}(z). \end{equation} To derive $H(z)$ we recognize the generating functions of Bernoulli numbers in $H$, see Definition \ref{def:bernpoly}: \begin{equation*} H(z)=\frac{2\pii\, z}{e^{2\pi\,i\, z}-1}=\sum _{n=0}^{\infty } \frac{(2\pii)^n\, B_n }{n!} z^n \end{equation*} from which it immediately follows its derivatives \begin{equation*} H^{(k)}(z)=\sum _{n=k}^{\infty } \frac{B_n(2\pii)^n }{(n-k)!} z^{n-k} \end{equation*} and, particularly, in the limit $z\to 0$ we obtain: \begin{equation} \label{derH} H^{(k)}(0)=B_k(2\pii)^k. \end{equation} The previous expression holds even for $k=0$ because $H(0)=1$. Now we evaluate the $s$-th derivative of $F(\phi,z)$. This time we recognize the generating functions of the generalized Bernoulli polynomials, see Definition \ref{def:bernpoly}: \begin{equation*} \begin{split} F(\phi,z)&=\frac{\phi^{2m+2}z^{2m+2}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)} =\left(\frac{\phi z}{\sinh \left(z\frac{\phi}{2}\right)}\right)^{2m+2} =\left(\frac{2z\,\phi}{e^{z\phi}-1}\right)^{2m+2}e^{z(m+1)\phi}\\ &=2^{2m+2}\sum _{n=0}^{\infty } \frac{B_n^{(2m+2)}(m+1) \phi^n}{n!} z^n. \end{split} \end{equation*} Therefore, the derivatives of $F$ is \begin{equation*} F^{(s)}(\phi,z)= 2^{2m+2}\sum_{n=s}^{\infty } \frac{B_n^{(2m+2)}(m+1) \phi^n}{(n-s)!} z^{n-s} \end{equation*} and when $z\to 0$ only the term $n=s$ remains \begin{equation} \label{derF} F^{(s)}(\phi,0)=2^{2m+2} B_s^{(2m+2)}(m+1) \phi^s. \end{equation} Finally, we plug Eq. (\ref{derH}) and Eq. (\ref{derF}) in Eq. (\ref{derFH}) and we obtain the following polynomial: \begin{equation*} \begin{split} \mathcal{B}_{2m+2}(\phi)&=-\frac{1}{2(2m+2)!}\lim_{z\to0}\frac{{\rm d}^{2m+2}}{{\rm d} z^{2m+2}} \left[ \frac{\phi^{2m+2}z^{2m+3}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}\frac{2\pii}{e^{2\pi\,i\, z}-1}\right]\\ &=-\frac{2^{2m+1}}{(2m+2)!} \sum_{k=0}^{2m+2}\genfrac(){0pt}{0}{2m+2}{k} (2\pii)^k \,B_k\,B_{2m+2-k}^{(2m+2)}(m+1)\, \phi^{2m+2-k}. \end{split} \end{equation*} But $B_{2m+2-k}^{(2m+2)}(m+1)$ is the coefficient of a series of an even function, therefore is vanishing for odd $k$ and we can sum only on even $k$. Furthermore, we write the binomial as \begin{equation*} \genfrac(){0pt}{0}{2m+2}{2k}=\frac{(2m+2)!}{(2k)!(2m+2-2k)!} \end{equation*} and we obtain \begin{equation*} \mathcal{B}_{2m+2}(\phi)=-2^{2m+1}\sum_{k=0}^{m+1}(2\pii)^{2k}\frac{B_{2k}}{(2k)!}\frac{B_{2m+2-2k}^{(2m+2)}(m+1)}{(2m+2-2k)!} \, \phi^{2m+2-2k}, \end{equation*} that is Eq. (\ref{eq:PolynB}). \subsection{Another form of the residue} Going back to Eq.~(\ref{derFH}), we can obtain the derivatives of $F(z)$ with an alternative method. We can look at the function $F$ as the result of two composite functions $F(z)=q(g(z))$, where \begin{equation*} q(y)=y^{2m+2};\quad g(z)=\frac{\phi z }{\sinh \left(z\frac{\phi}{2}\right)}. \end{equation*} From this observation, we can use the Faà di Bruno formula for the derivatives of $F$: \begin{equation*} F^{(n)}(z)=\frac{{\rm d}^n}{{\rm d} z^n}q(g(z))=\sum_{s=1}^n q^{(s)}(g(z)) Y_{n,s}\left(g^{(1)}(z),g^{(2)}(z),\dots,g^{(n-s+1)}(z)\right), \end{equation*} where $Y_{n,s}$ is the incomplete Bell polynomial. The derivative of $q$ is simply the derivative of a power \begin{equation*} q^{(s)}(y)=\frac{{\rm d}^s}{{\rm d} y^s}y^{2m+2}=\frac{(2m+2)!}{(2m+2-s)!}y^{2m+2-s} \end{equation*} and hence we find \begin{equation*} q^{(s)}(g(z))=\frac{(2m+2)!}{(2m+2-s)!}\frac{\phi^{2m+2-s} z^{2m+2-s} }{\sinh^{2m+2-s} \left(z\frac{\phi}{2}\right)}. \end{equation*} In the limit $z\to 0$ this yields \begin{equation} \label{derq} q^{(s)}(g(0))=\frac{2^{2m+2}(2m+2)!}{(2m+2-s)!}. \end{equation} The derivatives of $g^{(s)}(z)$ are found with the Bernoulli polynomials \begin{equation*} \frac{x\,e^{t\, x}}{e^x-1}=\sum _{n=0}^{\infty } \frac{B_n(t) x^n}{n!} \quad |x|<2\pi \end{equation*} which yields \begin{equation*} g(z)=\frac{z\,\phi}{\sinh \left(z\frac{\phi}{2}\right)}=\frac{2z\,\phi}{e^{z\phi/2}-e^{-z\phi/2}} =\frac{2z\,\phi\,e^{z\phi/2}}{e^{z\phi}-1} =\sum_{n=0}^\infty \frac{2B_n(1/2)\phi^n}{n!}z^n. \end{equation*} The derivatives immediately follow \begin{equation*} g^{(s)}(z)=\sum_{n=0}^\infty \frac{2B_n(1/2)\phi^n}{(n-s)!}z^{n-s}. \end{equation*} In the limit $z\to 0$, the only non vanishing terms are those with $n=s$ and we have \begin{equation*} g^{(s)}(0)=2 B_s\left(1/2\right)\phi^s. \end{equation*} Plugging the derivatives of $q$ and $g$ in the Faà di Bruno formula, we obtain the derivative of $F$ \begin{equation} \label{derFver2} \begin{split} F^{(n)}(0)&=\sum_{s=1}^n \frac{2^{2m+2-s}(2m+2)!}{(2m+2-s)!} Y_{n,s}\left(g^{(1)}(0),g^{(2)}(0),\dots,g^{(n-s+1)}(0)\right)\\ &=\phi^n\sum_{s=1}^n \frac{2^{2m+2}(2m+2)!}{(2m+2-s)!} Y_{n,s}\left(B_1\left(1/2\right), \dots,B_{n-s+1}\left(1/2\right)\right), \end{split} \end{equation} where we took advantage of the Bell polynomials properties. Equating Eq.~(\ref{derFver2}) with Eq.~(\ref{derF}) we establish a relation between generalized Bernoulli polynomial and ordinary Bernoulli polynomials: \begin{equation} \label{eq:AppGenBer} B_n^{(2m+2)}(m+1)=\sum_{s=1}^n \frac{(2m+2)!}{(2m+2-s)!} Y_{n,s}\left(B_1(1/2),B_2(1/2),\dots,B_{n-s+1}(1/2)\right). \end{equation} We can also use Eq. (\ref{derFH}) to find an alternative expression of the polynomial $\mathcal{B}$ \begin{equation} \label{resFaaBruno} \begin{split} \mathcal{B}_{2m+2}(\phi)=&-\frac{1}{2}\text{Res}\left[f(z)h(z)\right]_{z=0}\\ =&-\frac{1}{2(2m+2)!}\lim_{z\to0}\frac{{\rm d}^{2m+2}}{{\rm d} z^{2m+2}} \left[ \frac{\phi^{2m+2}z^{2m+3}}{\sinh^{2m+2}\left(z\frac{\phi}{2}\right)}\frac{2\pii}{e^{2\pi\,i\, z}-1}\right]\\ =&- \sum_{k=0}^{2m+2}\sum_{s=1}^{2m+2-k}\genfrac(){0pt}{0}{2m+2}{k} \frac{2^{2m+1}(2\pii)^k B_k}{(2m+2-s)!}\phi^{2m+2-k}\times\\ &\times Y_{2m+2-k,s}\left(B_1\left(1/2\right), \dots,B_{2m+2-k-s+1}\left(1/2\right)\right). \end{split} \end{equation} This is used to prove the Proposition~\ref{prop:RelOrdGenBer}. \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR } \providecommand{\MRhref}[2]{ \href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2} } \providecommand{\href}[2]{#2} \end{document}
\begin{document} \title{A fast spectral method for the Boltzmann collision operator with general collision kernels \footnote{Los Alamos Report LA-UR-16-26555.Funded by the Department of Energy at Los Alamos National Laboratory under contract DE-AC52-06NA25396. } \footnote{This manuscript has been authored, in part, by UT-Battelle, LLC, under Contract No. DE-AC0500OR22725 with the U.S. Department of Energy. The United States Government retains and the publisher, by accepting the article for publication, acknowledges that the United States Government retains a non-exclusive, paid-up, irrevocable, world-wide license to publish or reproduce the published form of this manuscript, or allow others to do so, for the United States Government purposes. The Department of Energy will provide public access to these results of federally sponsored research in accordance with the DOE Public Access Plan (\texttt{http://energy.gov/downloads/doe-public-access-plan}).} } \author{Irene M. Gamba\footnote{Department of Mathematics and Institute for Computational Engineering and Sciences, The University of Texas at Austin, Austin, TX 78712, USA ([email protected]). I. Gamba's research was partially supported by NSF grant DMS-1413064 and NSF RNMS (KI-Net) grant DMS-1107465.}, Jeffrey R. Haack\footnote{Computational Physics and Methods Group, Los Alamos National Laboratory, Los Alamos, NM 87545, USA ([email protected]). J. Haack's research was partially supported by NSF grant DMS-1109625 and NSF RNMS (KI-Net) grant DMS-1107465.}, Cory D. Hauck\footnote{Computational and Applied Mathematics Group, Oak Ridge National Laboratory, Oak Ridge, TN 37831, USA ([email protected]). C. Hauck's research was supported by the U.S. Department of Energy, Office of Science, Office of Advanced Scientific Research.}, and Jingwei Hu\footnote{Department of Mathematics, Purdue University, West Lafayette, IN 47907, USA ([email protected]). J. Hu's research was partially supported by NSF grant DMS-1620250 and a startup grant from Purdue University. } } \maketitle \begin{abstract} We propose a simple fast spectral method for the Boltzmann collision operator with general collision kernels. In contrast to the direct spectral method \cite{PR00, GT09} which requires $O(N^6)$ memory to store precomputed weights and has $O(N^6)$ numerical complexity, the new method has complexity $O(MN^4\log N)$, where $N$ is the number of discretization points in each of the three velocity dimensions and $M$ is the total number of discretization points on the sphere and $M\ll N^2$. Furthermore, it requires no precomputation for the variable hard sphere (VHS) model and only $O(MN^4)$ memory to store precomputed functions for more general collision kernels. Although a faster spectral method is available \cite{MP06} (with complexity $O(MN^3\log N)$), it works only for hard sphere molecules, thus limiting its use for practical problems. Our new method, on the other hand, can apply to arbitrary collision kernels. A series of numerical tests is performed to illustrate the efficiency and accuracy of the proposed method. \end{abstract} {\small {\bf Key words.} Boltzmann collision integral, spectral method, convolution, fast Fourier transform, Lebedev quadrature. {\bf AMS subject classifications.} 35Q20, 65M70. } \section{Introduction} Kinetic theory describes the non-equilibrium dynamics of a gas or any system comprised of a large number of particles. When well-known fluid mechanical laws of Navier-Stokes and Fourier become inadequate, kinetic equations provide rich information at the mesoscopic level and have found applications in various fields such as rarefied gas dynamics \cite{Cercignani00}, radiative transfer \cite{Chandrasekhar}, semiconductor modeling \cite{MRS}, and biological and social sciences \cite{NPT}. Our main focus in this paper is the Boltzmann equation which constitutes the central model in kinetic theory and takes the form \cite{Cercignani, CC, Villani02}: \begin{equation} \label{CBE} \frac{\partial f}{\partial t}+v\cdot \nabla_x f=\mathcal{Q}(f), \quad t>0, \ x\in \Omega\subset\mathbb{R}^3, \ v\in \mathbb{R}^3. \end{equation} Here $f = f(t,x,v)$ is the phase space distribution function, which depends on time $t$, position $x$, and particle velocity $v$; and $\mathcal{Q}$ is the Boltzmann collision operator, which models binary interactions between particles: \footnote{The variables $t$ and $x$ are suppressed because $\mathcal{Q}$ acts on $f$ only through the velocity.} \begin{equation} \label{CO} \mathcal{Q}(f)(v)=\int_{\mathbb{R}^3}\int_{S^2}\mathcal{B}(v-v_*,\omega)\left[f(v')f(v_*')-f(v)f(v_*)\right]\,\mathrm{d}{\omega}\,\mathrm{d}{v_*}. \end{equation} In this formula, $(v',v_*')$ and $(v,v_*)$ represent the velocity pairs before and after a collision. The requirement that momentum and energy are conserved during such a collision means that $(v',v_*')$ can be expressed in terms of $(v,v_*)$: \begin{equation} v'=\frac{v+v_*}{2}+\frac{|v-v_*|}{2}\omega, \quad v_*'=\frac{v+v_*}{2}-\frac{|v-v_*|}{2}\omega, \end{equation} where the parameter $\omega$ varies over the unit sphere $S^2$. The collision kernel $\mathcal{B}$ is a non-negative function that depends on its arguments only through $|v-v_*|$ and cosine of the deviation angle $\theta$ (the angle between $v-v_*$ and $v'-v_*'$). Thus $\mathcal{B}$ is often written as \begin{equation} \label{CK} \mathcal{B}(v-v_*, \omega)=B(|v-v_*|,\cos \theta), \quad \cos\theta=\frac{\omega\cdot (v-v_*)}{|v-v_*|}. \end{equation} The specific form of $B$ can be determined from the intermolecular potential using scattering theory \cite{Cercignani}. For numerical purposes, a commonly used collision kernel is the variable hard sphere (VHS) model proposed by Bird \cite{Bird}: \begin{equation} \label{VHS} B=b_{\gamma}|v-v_*|^{\gamma}, \qquad b_{\gamma} >0, \quad 0\leq\gamma\leq 1, \end{equation} where $\gamma$ and $b_{\gamma}$ are constants. In particular, $\gamma=1$ corresponds to hard sphere molecules and $\gamma=0$ to Maxwell molecules. The collision operator $\mathcal{Q}$ has collision invariants $1$, $v$, and $|v|^2$; that is, \begin{equation} \label{consv} \int_{\mathbb{R}^3}\mathcal{Q}(f)\,\mathrm{d}{v}= \int_{\mathbb{R}^3}\mathcal{Q}(f) v\,\mathrm{d}{v}=\int_{\mathbb{R}^3}\mathcal{Q}(f)|v|^2\,\mathrm{d}{v}=0. \end{equation} In addition, $\mathcal{Q}$ satisfies Boltzmann's $H$-theorem; that is, \begin{equation} \int_{\mathbb{R}^3}\mathcal{Q}(f)\ln f\,\mathrm{d}{v}\leq 0, \end{equation} with equality if and only if $f$ takes on the form of a Maxwellian: \begin{equation} \label{Max} M(v)=\frac{\rho}{(2\pi T)^{\frac{3}{2}}}e^{-\frac{|v-u|^2}{2T}}, \end{equation} where the density $\rho$, bulk velocity $u$, and temperature $T$ are given by \begin{align} \rho=\int_{\mathbb{R}^3}f\,\mathrm{d}{v}, \quad u=\frac{1}{\rho}\int_{\mathbb{R}^3}fv\,\mathrm{d}{v}, \quad T=\frac{1}{3\rho}\int_{\mathbb{R}^3}f|v-u|^2\,\mathrm{d}{v}. \end{align} This implies that in the homogeneous case, the entropy $\mathcal{S}(f) = -\int_{\mathbb{R}^3} f \ln f\,\mathrm{d}{v}$ is always non-decreasing and reaches its maximum at the equilibrium defined by the Maxwellian in \eqref{Max}. Proposed by Ludwig Boltzmann in 1872, the Boltzmann equation (\ref{CBE}) is one of the fundamental equations of kinetic theory. Yet its numerical approximation still presents a huge computational challenge, even on today's supercomputers. This is mainly due to the high-dimensional, nonlinear, nonlocal structure of the collision integral in (\ref{CO}). Two approaches have been primarily employed for solving the Boltzmann equation numerically: one stochastic and one deterministic. Direct simulation Monte Carlo (DSMC) methods \cite{Bird, Nanbu80, Caflisch98} have been historically popular because they avoid the curse of dimensionality for this problem, however they can suffer from slow convergence for certain types of problems such as transient and low-speed flows and give noisy results due to their stochastic nature. The other approach is to use deterministic solvers, which have undergone considerable development over the past twenty years. These methods include discrete velocity models (DVM) \cite{RS94, BPS95, Buet96, MPR13} and Fourier spectral methods \cite{PP96, BR99, PR00, FR03, GT09, GT10}. DVMs are quadrature-based methods with grid points that are carefully chosen in order to preserve the conserved quantities of the collision operator. Spectral methods, on the other hand, compute the collision operator by exploiting its structure in Fourier space. Compared with DVM, they can provide significantly more accurate results with less numerical complexity; the conservation properties are not strictly maintained but are preserved up to spectral accuracy. Compared with DSMC, they produce smooth, noise free solutions and can simulate regimes that particle methods find difficult. Despite of the aforementioned advantages, spectral methods are invariably hindered in most real-world applications since they require $O(N^6)$ operations per evaluation of the collision operator, with $N$ being the number of discretization points in each velocity dimension, as well as $O(N^6)$ bytes of memory to store precomputed weight functions. With this type of scaling, the evaluation of the collision operator quickly becomes the bottleneck when solving large-scale problems \cite{PR00, GT09}. Fast spectral methods, based on the Carleman representation of the collision integral, have been proposed in \cite{BR99, MP06}. These methods reduce the complexity of evaluating the collision operator to $O(MN^3\log N)$, where $M$ is the total number of discretization points on a sphere and $M\ll N^2$). However, a decoupling assumption for the collision kernel is needed that restricts application of the method to the hard sphere case, i.e., $\gamma=1$ in (\ref{VHS}). In practice, however, $\gamma$ may take on any value in $[0,1]$; in addition, the collision kernel (\ref{CK}) may also have angular dependence. Therefore, the goal of this paper is to introduce a fast spectral method for the Boltzmann collision operator that can handle general collision kernels as well as mitigate the memory requirement in the direct spectral method. Specifically, the numerical complexity of our new method is $O(MN^4\log N)$; no precomputation is required for the VHS model, and only $O(MN^4)$ memory is needed to store precomputed functions for more general collision kernels. The proposed method can serve as a ``black-box" solver in the velocity domain to be used in conjunction with existing time and spatial discretization methods to treat more practical problems with complex geometries, multiple temporal/spatial scales, etc. Since our goal here is to present a simple strategy to accelerate the direct spectral method without sacrificing spectral accuracy, we will mainly focus on the approximation of the collision operator in the numerical examples and consider only the spatially homogeneous version of the Boltzmann equation \eqref{CBE}. The rest of this paper is organized as follows. In the next section, we review the basic formulation of the direct spectral method and discuss its numerical challenges. The fast method is then described in Section 3. Numerical examples are presented in Section 4 to demonstrate the efficiency and accuracy of the proposed method. The paper is concluded in Section 5. \section{The direct spectral method} While multiple spectral formulations exist, we have elected in this paper to adopt the Fourier-Galerkin approach \cite{PR00} to illustrate the idea. The strategy introduced below can be easily applied to other spectral formulations such as the one based on the Fourier transform \cite{GT09}. The starting point for the spectral method is a change in the integration variable $v_*$ in (\ref{CO}) to $g=v-v_*$. It is then assumed that $f$ has a compact support in $v$: $\text{Supp}_v(f)\approx \mathcal{B}_S$, where $\mathcal{B}_S$ is a ball centered at the origin with radius $S$. Of course, many distribution functions, including the Maxwellian, will not have compact support. Thus in practice, the support is chosen as some multiple (typically 6 to 8) of the thermal speed $v_{\rm{th}} = \sqrt{T}$. It then suffices to truncate the infinite integral in $g$ to a larger ball $\mathcal{B}_R$ with radius $R=2S$: \begin{equation} \label{CO1} \mathcal{Q}(f)(v)\approx \mathcal{Q}_R(f)(v) = \int_{\mathcal{B}_R}\int_{S^2}B\left(r,\omega\cdot \hat{g}\right) \left[f(v')f(v_*')-f(v)f(v-g)\right]\,\mathrm{d}{\omega}\,\mathrm{d}{g}, \end{equation} where \begin{equation} v'=v-\frac{g}{2}+\frac{r}{2}\omega \quad \text{and} \quad v_*'=v-\frac{g}{2}-\frac{r}{2}\omega, \end{equation} with $r =|g|$ and $\hat{g} = g/|g|$ being the magnitude and direction of $g$, respectively. Next, one restricts $f$ to the computational domain $\mathcal{D}_L=[-L,L]^3$ and extends it periodically to the whole space. For anti-aliasing purposes, we let $L\geq (3+\sqrt{2})S/2$. \footnote{See \cite{PR00} for more details justifying this choice of $L$.} Then $f$ is approximated by a truncated Fourier series: \footnote{Note that $k=(k_1,k_2,k_3)\in \mathbb{Z}^3$ is a multi-dimensional index so that, for example, the summation in \eqref{Fseries} is understood to be over the lattice $\{k\in \mathbb{Z}^3: -\frac{N}{2} \leq k_1,k_2,k_3 \leq \frac{N}{2}-1 \}$.} \begin{equation} \label{Fseries} f(v)\approx f_N(v)= \sum_{k={-\frac{N}{2}}}^{\frac{N}{2}-1}\hat{f}_k e^{i \frac{\pi}{L}k\cdot v}, \qquad \hat{f}_k=\frac{1}{(2L)^3}\int_{\mathcal{D}_L}f(v)e^{-i \frac{\pi}{L}k\cdot v}\,\mathrm{d}{v}. \end{equation} By substituting (\ref{Fseries}) into (\ref{CO1}) and performing a Galerkin projection, one can express the $k$-th mode of the Fourier expansion of the collision operator as a (discrete) weighted convolution: \begin{equation} \label{WC} \hat{\mathcal{Q}}_k:=\frac{1}{(2L)^3} \int_{\mathcal{D}_L}\mathcal{Q}_R(f)(v) e^{-i \frac{\pi}{L}k\cdot v}\,\mathrm{d}{v} =\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}\mathcal{G}(l,m)\hat{f}_l\hat{f}_m, \quad k=-\frac{N}{2},\dots,\frac{N}{2}-1, \end{equation} where $\mathcal{G}(l,m)=G(l,m)-G(m,m)$ and \begin{equation} \label{weight1} G(l,m)=\int_{\mathcal{B}_R}\int_{S^2}B\left(r,\omega\cdot \hat{g}\right)e^{-i\frac{\pi}{L}\frac{(l+m)}{2}\cdot g+i\frac{\pi}{L}r\frac{(l-m)}{2}\cdot \omega}\,\mathrm{d}{\omega}\,\mathrm{d}{g}. \end{equation} For the VHS model (\ref{VHS}), the formula for $G$ reduces to \begin{align} G(l,m)= 16\pi^2b_{\gamma}\int_0^Rr^{\gamma+2}\,\text{Sinc}\left(\frac{\pi}{L}r\frac{|l+m|}{2}\right)\,\text{Sinc}\left(\frac{\pi}{L}r\frac{|l-m|}{2}\right)\,\mathrm{d}{r}, \end{align} where $\text{Sinc}(x)=\sin x/x$. To summarize, a single evaluation of the collision operator $\mathcal{Q}$ in the direct spectral method consists of the following steps: \begin{enumerate} \item[0.] precompute the weight $\mathcal{G}(l,m)$ --- storage requirement $O(N^6)$; \item[1.] compute $\hat{f}_k$ using the fast Fourier transform (FFT) --- cost $O(N^3\log N)$; \item[2.] compute the weighted convolution (\ref{WC}) --- cost $O(N^6)$; \item[3.] take the inverse Fourier transform of $\hat{\mathcal{Q}}_k$ to obtain $\mathcal{Q}$ --- cost $O(N^3\log N)$. \end{enumerate} Step 2 is by far the most expensive step. Indeed, due to the presence of the weights $\mathcal{G}({l,m})$ in the convolution, typical fast methods for convolutions do not apply. Thus the constrained double summation in (\ref{WC}) has to be evaluated directly for every index $k$, resulting in $O(N^6)$ complexity. Step 0 can be completed in advance, but it requires a huge amount of memory to store the precomputed weights. This can be a challenge for large-scale problems, even on the largest supercomputers. For example, when $N=40$, it takes just over 32 gigabytes of data to store the weights---an amount that exceeds the memory capacity on a typical compute node of Oak Ridge National Laboratory's Titan supercomputer. \section{The new fast spectral method} In the new fast spectral method, we accelerate the summation in (\ref{WC}). Let $\hat{\mathcal{Q}}_k=\hat{\mathcal{Q}}^+_k-\hat{\mathcal{Q}}^-_k$, where \begin{align}\label{Q+} \hat{\mathcal{Q}}^+_k=\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}G(l,m)\hat{f}_l\hat{f}_m \quad \text{and} \quad \hat{\mathcal{Q}}^-_k=\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}G(m,m)\hat{f}_l\hat{f}_m. \end{align} Because $G(m,m)$ depends only on $m$, the loss term $\hat{\mathcal{Q}}^-_k$ is actually a convolution of the functions $G(m,m)\hat{f}_m$ and $\hat{f}_l$. It can therefore be computed efficiently by FFT in $O(N^3\log N)$ operations, since convolution in the Fourier domain becomes multiplication in the original domain. What makes the total cost $O(N^6)$ is the gain term $\hat{\mathcal{Q}}^+_k$. Our goal is to find an approximation for $\hat{\mathcal{Q}}^+_k$ that can be expressed as a convolution. To this end, we seek an approximation of $G(l,m)$ in the following decoupled form: \begin{equation} \label{lowrank} G(l,m)\approx\sum_{p=1}^{N_p} \alpha_p(l+m)\beta_p(l)\gamma_p(m), \end{equation} where $\alpha_p$, $\beta_p$, and $\gamma_p$ are functions of $(l+m)$, $l$, and $m$ respectively, and $N_p \ll N^3$. Substitution of (\ref{lowrank}) into (\ref{Q+}) gives \begin{equation} \label{sum} \hat{\mathcal{Q}}^+_k\approx\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}\sum_{p=1}^{N_p} \alpha_p(l+m)\beta_p(l)\gamma_p(m)\hat{f}_l\hat{f}_m=\sum_{p=1}^{N_p}\alpha_p(k)\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}\left(\beta_p(l)\hat{f}_l\right)\left(\gamma_p(m)\hat{f}_m\right). \end{equation} The inner summation in (\ref{sum}) is a pure convolution of the two functions $\beta_p(l)\hat{f}_l$ and $\gamma_p(m)\hat{f}_m$ that can be computed in $O(N^3\log N)$ via FFT. This, together with the outer summation, results in a total cost of $O(N_pN^3\log N)$ for a single evaluation of $\mathcal{Q}^+$. To generate a suitable low-rank approximation of the form (\ref{lowrank}), we propose a simple solution in which, instead of precomputing all the weights $G(l,m)$ in (\ref{weight1}), we compute them partially ``on the fly" using a quadrature rule. Specifically, we rewrite $G(l,m)$ as \begin{equation} \label{weight2} G(l,m)=\int_0^R\int_{S^2}F(l+m,r,\omega)\, e^{i\frac{\pi}{L}r\frac{l}{2}\cdot \omega}\, e^{-i\frac{\pi}{L}r\frac{m}{2}\cdot \omega}\, \mathrm{d}{\omega}\,\mathrm{d}{r}, \end{equation} where \begin{equation} F(l+m,r,\omega)=r^2\int_{S^2}B\left(r,\omega\cdot \hat{g}\right)e^{-i\frac{\pi}{L}r\frac{(l+m)}{2}\cdot \hat{g}}\,\mathrm{d}{\hat{g}}. \end{equation} For each fixed $r$ and $\omega$, the integrand in (\ref{weight2}) is a product of three functions: one that depends on $(l+m)$, one that depends on $l$, and one that depends on $m$. This is exactly the desired form of (\ref{lowrank}). In order to maintain this structure, we carry out the integration in $r$ and $\omega$ using a fixed numerical quadrature. This yields \begin{equation} \label{form} G(l,m)\approx \sum_{r, \phi_1, \phi_2}w_{r}w_{\phi_1}w_{\phi_2} \sin\phi_2 \,F(l+m,r,\omega)e^{i\frac{\pi}{L}r\frac{l}{2}\cdot \omega}e^{-i\frac{\pi}{L}r\frac{m}{2}\cdot \omega}, \end{equation} where $\phi_1$ is the azimuthal angle, $\phi_2$ is the polar angle, and $w_{r}$, $w_{\phi_1}$ and $w_{\phi_2}$ represent the corresponding quadrature weights. Since the radial direction oscillates on the scale of $\frac{l-m}{2}$, the number of quadrature points in $r$ must be at least $O(N)$ in order to resolve this dimension. For the integration on the sphere, we anticipate that the total number $M$ of quadrature points needed is much less than $N^2$; this is confirmed in our numerical results. Thus we are able to obtain an admissible decomposition (\ref{lowrank}) of $G(l,m)$ with $N_p = O(MN) \ll N^3$. Substituting (\ref{form}) into (\ref{Q+}), we have \begin{align} \label{form1} \hat{\mathcal{Q}}^+_k&\approx\sum_{r, \phi_1, \phi_2}w_{r}w_{\phi_1}w_{\phi_2} \sin\phi_2 \, F(k,r,\omega)\sum_{\substack{l,m=-\frac{N}{2}\\l+m=k}}^{\frac{N}{2}-1}\left[ e^{i\frac{\pi}{L} r\frac{l}{2}\cdot \omega}\hat{f}_l \right] \left[e^{-i\frac{\pi}{L}r\frac{m}{2}\cdot \omega}\hat{f}_m\right]. \end{align} In summary, the proposed fast algorithm for a single evaluation of $\mathcal{Q}$ consists of the following steps: \begin{enumerate} \item[0.] precompute the weights $G(m,m)$ and $F(k,r,\omega)$ --- storage requirement $O(MN^4)$; \item[1.] compute $\hat{f}_k$ using FFT --- cost $O(N^3\log N)$; \item[2.] compute the loss term $\mathcal{Q}^-$ using FFT --- cost $O(N^3\log N)$; \item[3.] compute the gain term $\mathcal{Q}^+$ based on (\ref{form1}) using FFT --- cost $O(MN^4\log N)$; \item[4.] compute $\mathcal{Q}=\mathcal{Q}^+-\mathcal{Q}^-$ --- cost $O(N^3)$. \end{enumerate} Compared with the direct spectral method in the previous section, the new method saves both in memory storage (step 0) and computational time (step 3). For the $N=40$ case mentioned in Section 2, if we take $M=14$, the precomputed weights only require roughly $247$ megabytes. This is less than one percent of the memory required for the direct method. For the VHS model, the function $F$ does not depend on $\omega$ and has an analytical form \begin{equation} F(k,r)=4\pi \,b_{\gamma}\,r^{\gamma+2}\, \text{Sinc}\left(\frac{\pi}{L}r\frac{|k|}{2}\right). \end{equation} Thus no precomputation is needed in this case. In our numerical implementation, we use the Gauss-Legendre quadrature in the radial direction $r$, while for the integration in $\omega$ we propose to use the Lebedev quadrature \cite{LL99} which is the near optimal quadrature on the sphere and requires fewer quadrature points than tensor product based Gauss quadratures for a large class of functions \cite{Beentjes15}. The Lebedev quadrature is designed to enforce the exact integration of spherical harmonics up to a given order and only a certain number of quadrature points are available. To gain a concrete idea of how many quadrature points $M$ is needed for our problem, note that in a typical numerical example where $N=32$, we only need $M=14$ to reach a relative $10^{-6}$ accuracy; a larger $M$ (say, $M=74$) may be needed when considering anisotropic distribution functions. Nevertheless, it is generally expected that $M\ll N^2$. A similar observation has been made for the fast spectral method in \cite{MP06, FMP06, Filbet12}. Although this method is based on a different representation of the collision integral (and restricted only to the hard sphere case), it also requires numerical discretization on a sphere. \begin{remark} The method proposed above can be followed by a post-processing subroutine after each evaluation of the collision operator to strictly enforce the collision invariants in (\ref{consv}) \cite{GT09} for either scalar or system Boltzmann models, where it is shown that the solution of the scalar problem converges to the equilibrium Maxwellian state (\ref{Max}) \cite{AGT16}, or alternatively, adapted easily to preserve exactly the equilibrium Maxwellian state as proposed in \cite{FPR15}. Since the goal in this paper is to present a simple strategy to accelerate the computation of the weighted convolution (\ref{WC}) in the direct spectral method without sacrificing spectral accuracy, we will mainly focus on the proposed method itself in the following numerical examples and leave the investigation of aforementioned extensions to future work. \end{remark} \section{Numerical examples} In this section, we perform a series of numerical tests to validate the accuracy and efficiency of the proposed method. In the first test, we compare results of the new method to the Bobylev-Krook-Wu (BKW) solution \cite{Bobylev75_1, KW77}, which is constructed for Maxwell-type interactions (i.e., $\gamma=0$ in (\ref{VHS})) and is one of the few analytical solutions available for the Boltzmann equation. In the second test, we again consider Maxwell molecules, but assume an initial condition that is anisotropic in $v$. In this case, there is no analytical solution for the full distribution function, but exact formulas for higher order moments such as the momentum flow tensor \begin{equation} \label{P} P_{ij}=\int_{\mathbb{R}^3} f v_i v_j\,\mathrm{d}{v}, \quad i,j=1,2,3, \end{equation} and the energy flow vector \begin{equation} \label{q} q_{i}=\frac{1}{2}\int_{\mathbb{R}^3} f v_i v^2\,\mathrm{d}{v}, \quad i=1,2,3 \end{equation} can be derived. We will also test our method for the hard sphere case by comparing it with the direct spectral method. Finally, we illustrate the generality of our method by considering a more realistic, angularly dependent collision kernel. In the following, ``direct spectral" refers to the direct spectral method, and ``fast spectral" refers to the new method proposed in this paper. The implementation is in MATLAB and all numerical results are obtained on a laptop computer (MacBook Pro, 3.0GHz Dual-core Intel Core i7 with 8GB memory). Further acceleration can be achieved by careful implementation in C or Fortran. \subsection{Maxwell molecules -- BKW solution} When the collision kernel $B=1/(4\pi)$ is a constant, one can construct an exact solution to the spatially homogeneous Boltzmann equation \begin{equation} \label{HCBE} \frac{\partial f}{\partial t}=\mathcal{Q}(f), \quad t>0, \ v\in \mathbb{R}^3. \end{equation} This solution takes the form \begin{equation} \label{ext} f(t,v)=\frac{1}{2(2\pi K(t))^{3/2}} \exp\left(-\frac{v^2}{2K(t)}\right)\left(\frac{5K(t)-3}{K(t)}+\frac{1-K(t)}{K^2(t)}v^2\right), \end{equation} where $K(t)=1-\exp(-t/6)$. The initial time $t_0$ has to be greater than $6\ln (5/2) \approx 5.498$ for $f$ to be positive. We take $t_0=5.5$. Since $f$ given in (\ref{ext}) satisfies (\ref{HCBE}) exactly, the time derivative of $f$ gives \begin{multline} \label{extQ} \mathcal{Q}(f) \equiv \frac{\partial f}{\partial t} = \left\{\left( -\frac{3}{2K} +\frac{v^2}{2K^2} \right) f + \frac{1}{2(2\pi K)^{3/2}}\exp\left(-\frac{v^2}{2K}\right) \left( \frac{3}{K^2}+\frac{K-2}{K^3}v^2\right)\right\}K', \end{multline} where $K'(t)=\exp(-t/6)/6$. Using (\ref{extQ}), we can verify the accuracy of the proposed method without introducing additional time discretization error. We pick an arbitrary time $t=6.5$ and compare the numerical error and evaluation time of the direct and fast spectral methods. The results are shown in Tables \ref{table3} and \ref{table4}, from which we see that only 14 points are needed on the sphere for the fast method to obtain comparable accuracy to the direct method. Meanwhile, the speedup is about a factor of 300, even for the moderate value $N=32$. When $N=64$, the direct method requires too much storage for precomputed weights to fit within the available memory; it is therefore omitted. This restriction highlights the advantage of the proposed method in terms of memory. \begin{table}[h!] \centering \begin{tabular}{ c | c | c } \hline $N$ & direct spectral & fast spectral $M=14$ \\ \hline $8$ & 6.91e-04 &7.33e-04 \\ $16$&7.83e-05 &7.63e-05 \\ $32$& 3.90e-08 &3.90e-08 \\ $64$ & --- & 3.81e-08 \\ \hline \end{tabular} \caption{$\|\mathcal{Q}^{\text{num}}(f)-\mathcal{Q}^{\text{ext}}(f)\|_{L^{\infty}}$ evaluated at $t=6.5$. $N$ is the number of discretization points in each velocity dimension. In the fast spectral method, $N$-point Gauss quadrature is used in the radial direction and $M=14$ Lebedev rule is used for the sphere integration. We set $R=6$ (integration range), and $L=(3+\sqrt{2})R/4\approx6.62$ (computational domain).} \label{table3} \end{table} \begin{table}[h!] \centering \begin{tabular}{ c | c | c } \hline $N$ & direct spectral & fast spectral $M=14$ \\ \hline $8$ & 0.09s & 0.14s \\ $16$& 6.31s &0.26s \\ $32$& 542.34s & 1.78s \\ $64$ & --- &33.15s \\ \hline \end{tabular} \caption{Average running time for one time evaluation of the collision operator. Same parameters as in Table \ref{table3}.} \label{table4} \end{table} We next couple the fast collision solver with time discretization to numerically solve the Boltzmann equation (\ref{HCBE}). A $4$th-order Runge-Kutta method is used to ensure that the temporal error does not pollute the spectral accuracy in velocity. The results are shown in Figure \ref{figure2}. The fast method basically produces very similar results to the direct method. Other norms behave similarly, but are omitted for brevity. \begin{figure} \caption{Time evolution of $\|f^{\text{num} \label{figure2} \end{figure} \subsection{Maxwell molecules -- moments} Consider again the constant collision kernel $B=1/(4\pi)$. For the initial condition \begin{equation} f(0,v)=\frac{1}{2(2\pi)^{3/2}}\left\{ \exp\left(-\frac{(v-u_1)^2}{2}\right) + \exp\left(-\frac{(v-u_2)^2}{2}\right) \right\}, \end{equation} with $u_1=(-2,2,0)$ and $u_2=(2,0,0)$, the exact formulas for the non-zero components of $P$ and $q$ (cf. \eqref{P} and \eqref{q}) are given by \begin{align} &P_{11}=\frac{7}{3}\exp\left(-\frac{t}{2} \right)+\frac{8}{3}, \quad P_{22}=-\frac{2}{3}\exp\left(-\frac{t}{2} \right)+\frac{11}{3}, \nonumber\\ &P_{33}=-\frac{5}{3}\exp\left(-\frac{t}{2} \right)+\frac{8}{3}, \quad P_{12}=-2\exp\left(-\frac{t}{2} \right), \end{align} and \begin{equation} q_1=-2\exp\left(-\frac{t}{2} \right), \quad q_2=-\frac{2}{3}\exp\left(-\frac{t}{2} \right)+\frac{43}{6}. \end{equation} In Figure \ref{figure3}, we compare the results of the fast method with the formulas above. Because the solution is anisotropic in $v$, we need a larger value of $M$ than before to obtain reasonable accuracy. We find that $M=74$, which is still much less than $N^2=1024$, is sufficient to obtain roughly three digits of accuracy for moments. In Figure \ref{figure3_1}, we plot differences in the solution of the distribution function computed with the fast method and the direct method for different values of $M$. (Since the exact distribution function is not known, we use the solution of the direct method as a reference.) We observe that the error decreases quickly as $M$ increases. \begin{figure} \caption{Maxwell molecules. Time evolution for higher order moments. In each figure, the left scale shows the result by the fast spectral method, and the right scale shows its difference from the exact solution. RK4 with $\Delta t=0.3$ for time discretization. $N=32$ in each velocity dimension. In the fast method, $N=32$ in radial direction and $M=74$ for sphere integration. $R=10$, $L=(3+\sqrt{2} \label{figure3} \end{figure} \begin{figure} \caption{$f^{\text{fast} \label{figure3_1} \end{figure} \subsection{Hard sphere molecules -- moments} We next consider the same example as in the previous subsection, but for hard sphere molecules. That is, the collision kernel (\ref{VHS}) is assumed to be \begin{equation} B=\frac{1}{4\pi}|v-v_*|. \end{equation} In this case, there is no exact formula for either the distribution function or its higher order moments. Therefore, we use the direct spectral method as a reference solution and compare it with the new fast method. The results are plotted in Figure \ref{figure4}, from which we again observe roughly three digits of accuracy for moments. \begin{figure} \caption{Hard sphere molecules. Time evolution for higher order moments. In each figure, the left scale shows the result by the fast spectral method, and the right scale shows the difference between the fast and the direct method. RK4 with $\Delta t=0.3$ for time discretization. $N=32$ in each velocity dimension. In the fast method, $N=32$ in radial direction and $M=74$ for sphere integration. $R=10$, $L=(3+\sqrt{2} \label{figure4} \end{figure} \subsection{Angularly dependent collision kernel} Our final numerical test involves the variable soft sphere (VSS) model [18], which is widely used in DSMC calculations. The model has a collision kernel with both velocity and angular dependence: \begin{equation} B=b_{\gamma,\eta}|v-v_*|^{\gamma}(1+\cos \theta)^{\eta}, \end{equation} where $b_{\gamma,\eta}$ is a positive constant and $\cos \theta$ is given in \eqref{CK}. Setting $\gamma=0.38$, $\eta=0.4$, and $b_{\gamma,\eta} = 1/(4\pi)$, we perform the same test as in Section 4.2 using the same set of discretization parameters. \footnote{The choice of $\gamma$ and $\eta$ corresponds to argon gas \cite{Bird} while the choice of $b_{\gamma,\eta}$, which has no effect on the efficiency of the algorithm, is simply a matter of convenience. In general, these values are composite parameters \cite{WA15} that are used to tune the kernel in order to reproduce experimentally measured values for viscosity and diffusion. A careful comparison of our method with DSMC for various benchmark examples will be the subject of future work (for which all physical parameters and spatial discretization will need to be included).} In Figure \ref{figure5}, we plot the results for the fast method. Similar results for the direct method are omitted because the time it takes to precompute the weights $G(l,m)$ for this model is prohibitive. For the fast method, this step takes only a few hours. \begin{figure} \caption{Argon molecules. Time evolution for higher order moments computed by the fast spectral method. RK4 with $\Delta t=0.3$ for time discretization. $N=32$ in each velocity dimension, $N=32$ in radial direction and $M=74$ for sphere integration. $R=10$, $L=(3+\sqrt{2} \label{figure5} \end{figure} \section{Conclusion} A simple, fast spectral method for the Boltzmann collision operator has been proposed in this paper. The method is designed to accelerate the direct method and to relieve the memory bottleneck in its precomputation stage. Through a series of examples, we have demonstrated that the proposed method can be orders of magnitude faster than the direct method while maintaining a comparable level of accuracy. Furthermore, unlike existing fast spectral methods that can treat only hard sphere molecules, the proposed method is applicable to general collision kernels with both velocity and angular dependence. Ongoing work includes a more careful analysis of spherical quadratures errors and the development of adaptive quadratures to further improve the method. \end{document}
\begin{document} \title[The Binary $\Theta_3$-closed Matroids]{A binary-matroid analogue of a graph connectivity theorem of Jamison and Mulder} \author{Cameron Crenshaw} \address{Mathematics Department\\ Louisiana State University\\ Baton Rouge, Louisiana} \email{[email protected]} \author{James Oxley} \address{Mathematics Department\\ Louisiana State University\\ Baton Rouge, Louisiana} \email{[email protected]} \begin{abstract} Jamison and Mulder characterized the set of graphs that can be built from cycles and complete graphs via 1-sums and parallel connections as those graphs $G$ such that, whenever two vertices $x$ and $y$ of $G$ are joined by three internally disjoint paths, $x$ and $y$ are adjacent. This paper proves an analogous result for the set of binary matroids constructible from direct sums and parallel connections of circuits, complete graphs, and projective geometries. \end{abstract} \begin{abstract} Let $G$ be a graph such that, whenever two vertices $x$ and $y$ of $G$ are joined by three internally disjoint paths, $x$ and $y$ are adjacent. Jamison and Mulder determined that the set of such graphs coincides with the set of graphs that can be built from cycles and complete graphs via 1-sums and parallel connections. This paper proves an analogous result for binary matroids. \end{abstract} \maketitle \hspace{0.5cm}ection{Introduction} \label{introduction} Jamison and Mulder~\cite{JM} defined a graph $G$ to be \emph{$\Theta_3$-closed} if, whenever distinct vertices $x$ and $y$ of $G$ are joined by three internally disjoint paths, $x$ and $y$ are adjacent. For disjoint graphs $G_1$ and $G_2$, a 1-\emph{sum} of $G_1$ and $G_2$ is a graph that is obtained by identifying a vertex of $G_1$ with a vertex of $G_2$. Following Jamison and Mulder, we define a \emph{$2$-sum} of $G_1$ and $G_2$ to be a graph that is obtained by identifying an edge of $G_1$ with an edge of $G_2$. Note that, in contrast to some other definitions of this operation, we retain the identified edge as an edge of the resulting graph. The main result of Jamison and Mulder's paper is the following. \begin{theorem} \label{JMtheorem} A connected graph $G$ is $\Theta_3$-closed if and only if $G$ can be built via $1$-sums and $2$-sums from cycles and complete graphs. \end{theorem} This paper generalizes Theorem~\ref{JMtheorem} to binary matroids; all matroids considered here are binary unless stated otherwise. The terminology and notation follow~\cite{oxley} with the following additions. We will use $P_r$ to denote the rank-$r$ binary projective geometry, $PG(r-1,2)$. A \emph{theta-graph} is a graph that consists of two distinct vertices and three internally disjoint paths between them. A theta-graph in a matroid $M$ is a restriction of $M$ that is isomorphic to the cycle matroid of a theta-graph. Equivalently, it is a restriction of $M$ that is isomorphic to a matroid that is obtained from $U_{1,3}$ by a sequence of series extensions. The series classes of a theta-graph are its \emph{arcs}. Let $T$ be a theta-graph of $M$ with arcs $A_1$, $A_2$, and $A_3$. If $M$ has an element $e$ such that, for every $i$, either $A_i\cup e$ is a circuit of $M$, or $A_i=\{e\}$, then $e$ \emph{completes} $T$ in $M$, and $T$ is said to be \emph{complete}. A matroid $M$ is \emph{matroid $\Theta_3$-closed} if every theta-graph of $M$ is complete. The next theorem is the main result of this paper. \begin{theorem} \label{mainresult} A matroid $M$ is matroid $\Theta_3$-closed if and only if $M$ can be built via direct sums and parallel connections from circuits, cycle matroids of complete graphs, and projective geometries. \end{theorem} Suppose $M$ is isomorphic to the cycle matroid of a graph $G$. Two vertices in $G$ that are joined by three internally disjoint paths are adjacent via an edge $e$ exactly when the corresponding theta-graph of $M$ is completed by $e$. In other words, $G$ is $\Theta_3$-closed if and only if $M$ is matroid $\Theta_3$-closed. This allows us to refer to $M$ as \emph{$\Theta_3$-closed} without ambiguity. We will denote the class of $\Theta_3$-closed matroids by $\Theta_3$. Section~\ref{prelims} introduces supporting results. The 3-connected matroids that are $\Theta_3$-closed are characterized in Section~\ref{3conn}, and the proof of Theorem~\ref{mainresult} appears in Section~\ref{main}. \hspace{0.5cm}ection{Preliminaries} \label{prelims} Our first proposition collects some essential properties of $\Theta_3$-closed matroids. These properties will be used frequently and often implicitly. \begin{proposition} \label{basics} If $M\in \Theta_3$, then \begin{itemize} \item[(i)]{$\hspace{0.5cm}i(M)\in\Theta_3$;} \item[(ii)]{$M\vert F\in\Theta_3$ for every flat $F$ of $M$; and} \item[(iii)]{$M/e\in \Theta_3$ for every $e\in E(M)$.} \end{itemize} \end{proposition} \begin{proof} Parts (i) and (ii) are straightforward. For part (iii), let $T$ be a theta-graph of $M/e$. Then $[(M/e)\vert T]^\ast$ is obtained from $U_{2,3}$ by adding elements in parallel to the existing elements. Since $M$ is binary, it follows that $M^\ast/(E(M)-(T\cup e))$ is obtained from $[(M/e)\vert T]^\ast$, that is, from $M^\ast/(E(M)-(T\cup e))\backslash e$, by adding $e$ as a coloop or by adding $e$ in parallel to one of the existing elements. Thus, $e$ is either a loop in $M\vert (T\cup e)$, or is in series with another element. Hence, since $T$ is complete in $M$, it is complete in $M/e$. \end{proof} Evidently, a matroid is in $\Theta_3$ if and only if its connected components are in $\Theta_3$. This will also be used implicitly throughout the paper. The following is an immediate consequence of Proposition~\ref{basics}. \begin{corollary} \label{parallelminorclosed} If $M\in\Theta_3$ and $N$ is a parallel minor of $M$, then $N\in\Theta_3$. \end{corollary} From, for example,~\cite[Exercise 8.3.3]{oxley}, if $M=M_1\oplus_2 M_2$, then $M_1$ and $M_2$ are parallel minors of $M$. The next result now follows from Corollary~\ref{parallelminorclosed}. \begin{corollary} \label{2summands} If $M\oplus_2 N$ is in $\Theta_3$, then $M$ and $N$ are in $\Theta_3$. \end{corollary} To see that the converse of the last corollary fails, observe that $M(K_{2,4})$ is not in $\Theta_3$ although it is the 2-sum of two copies of a matroid in $\Theta_3$. We conclude this section with a result about constructing larger matroids in $\Theta_3$ from smaller ones. Recall that, for sets $X$ and $Y$ in a matroid $M$, the \emph{local connectivity} between $X$ and $Y$, denoted $\hspace{0.5cm}qcap(X,Y)$, is defined by $\hspace{0.5cm}qcap(X,Y)=r(X)+r(Y)-r(X\cup Y)$. We will use the following result about local connectivity from, for example,~\cite[Lemma 8.2.3]{oxley}. \begin{lemma} \label{staplelemma} Let $X_1$, $X_2$, $Y_1$, and $Y_2$ be subsets of the ground set of a matroid $M$. If $X_1\hspace{0.5cm}upseteq Y_1$ and $X_2\hspace{0.5cm}upseteq Y_2$, then $\hspace{0.5cm}qcap(X_1,X_2)\geq\hspace{0.5cm}qcap(Y_1,Y_2)$. \end{lemma} \begin{proposition} \label{parconnprop} For matroids $M$ and $N$, the parallel connection $P(M,N)$ is in $\Theta_3$ if and only if $M\in \Theta_3$ and $N\in \Theta_3$. \end{proposition} \begin{proof} Let $p$ be the basepoint of the parallel connection. When $p$ is a loop or a coloop of $M$, the matroid $P(M,N)$ is $M\oplus (N/p)$ or $(M/ p)\oplus N$, respectively. In these cases, it follows using Proposition~\ref{basics} that the result holds. Thus we may assume that $p$ is neither a loop nor a coloop of $M$ or $N$. Suppose $P(M,N)\in\Theta_3$. Let $B_M$ be a basis for $M$ containing $p$. Extend $B_M$ to a basis $B$ for $P(M,N)$. After contracting both the elements of $B-B_M$ in $P(M,N)$ as well as all of the resulting loops, the remaining elements of $E(N)-p$ are parallel to $p$. We deduce that $M$, and similarly $N$, is a parallel minor of $P(M,N)$. Hence, by Corollary~\ref{parallelminorclosed}, $M$ and $N$ are in $\Theta_3$. Conversely, suppose that $M, N\in \Theta_3$ and let $T$ be a theta-graph of $P(M,N)$ with arcs $A_1$, $A_2$, and $A_3$. Then we may assume that $\vert A_i\vert\geq 2$ for each $i$, otherwise $T$ is complete. Suppose $p\in A_1$. Then $A_1\cup A_2$ is a circuit containing $p$, so it is contained in $E(M)$ or $E(N)$ depending on which of these sets contains $A_1-p$. It follows that the same set contains $A_2$ and, likewise $A_3$, so $T$ is complete. Hence we may assume that $p\notin T$. Suppose that each of the arcs of $T$ meets both $E(M\backslash p)$ and $E(N\backslash p)$. Let $T_M=E(T)\cap E(M)$, and similarly for $T_N$. Note that $T_M$ and $T_N$ are independent, and $T_M\cup T_N=E(T)$, so \begin{align*} \hspace{0.5cm}qcap(T_M,T_N)&=r(T_M)+r(T_N)-r(T_M\cup T_N)\\ &=\vert T_M\vert + \vert T_N\vert - (\vert T_M\vert + \vert T_N\vert - 2)\\ &=2. \end{align*} However, $\hspace{0.5cm}qcap(E(M),E(N))=1$, contradicting Lemma~\ref{staplelemma}. Next, suppose that each of $A_1$ and $A_2$ meets both $E(M\backslash p)$ and $E(N\backslash p)$. Then, from above, we may assume that $A_3\hspace{0.5cm}ubseteq E(M\backslash p)$. The circuits $A_1\cup A_3$ and $A_2\cup A_3$ have the form $(C_1-p)\cup(D_1-p)$ and $(C_2-p)\cup(D_2-p)$, respectively, for circuits $C_1$ and $C_2$ of $M$ containing $p$, and circuits $D_1$ and $D_2$ of $N$ containing $p$. Because $A_3\hspace{0.5cm}ubseteq E(M)$ and $A_1\cap A_2=\emptyset$, it follows that $D_1-p$ and $D_2-p$ are disjoint. However, since $M$ is binary, $D_1\triangle D_2$ contains a circuit of $P(M,N)$ that is properly contained in the circuit $A_1\cup A_2$, a contradiction. Now, suppose that $A_1$ meets both $E(M\backslash p)$ and $E(N\backslash p)$. Then, from above, each of the remaining arcs of $T$ lies in $E(M\backslash p)$ or $E(N\backslash p)$. We may assume that $A_2\hspace{0.5cm}ubseteq E(M\backslash p)$. Suppose $A_3\hspace{0.5cm}ubseteq E(N\backslash p)$. Then the circuits $A_1\cup A_2$ and $A_3\cup A_2$ have the form $(C_1-p)\cup (D_1-p)$ and $(C_3-p)\cup(D_3-p)$, respectively, for circuits $C_1$ and $C_3$ of $M$ containing $p$, and circuits $D_1$ and $D_3$ of $N$ containing $p$. Now, since $A_2\hspace{0.5cm}ubseteq E(M\backslash p)$ and $A_1$ meets $E(M\backslash p)$, the set $C_1-p$ properly contains $A_2$. Further, as $A_3$ does not meet $E(M\backslash p)$, we have that $A_2=C_3-p$. This means $A_2\cup p$ is the circuit $C_3$, but $A_2\cup p$ is properly contained in $C_1$, a contradiction. We conclude that $A_3\hspace{0.5cm}ubseteq E(M\backslash p)$. Form $T'$ from $T$ by replacing the portion of $A_1$ in $E(N\backslash p)$ by $p$. Observe that $T'$ is isomorphic to a series minor of $T$, so $T'$ is a theta-graph. Moreover, $T'$ is a theta-graph of $M$, so it is completed in $M$ by an element $f$. Now, since $T$ and $T'$ share an arc, $f$ also completes $T$ in $P(M,N)$. We are left to consider the case when each arc of $T$ is contained in either $E(M)$ or $E(N)$. If all three arcs belong to $E(M)$, say, then $T$ is complete in $M$, and so is complete in $P(M,N)$. Otherwise, $p$ completes $T$. \end{proof} \hspace{0.5cm}ection{The $3$-Connected $\Theta_3$-closed Matroids} \label{3conn} The proof of Theorem~\ref{mainresult} will use the canonical tree decomposition of Cunningham and Edmonds~\cite{cunned} and, in support of that approach, this section proves the following 3-connected form of Theorem~\ref{mainresult}. \begin{theorem} \label{mainres3conn} Let $M$ be a simple $3$-connected $\Theta_3$-closed matroid. Then $M$ is a projective geometry or the cycle matroid of a complete graph. \end{theorem} The proof of this theorem relies on the next two propositions. \begin{proposition} \label{mkorpgprop} If $M$ is a simple matroid in $\Theta_3$ and $M$ has a spanning $M(K_{r+1})$-restriction, then $M\cong M(K_{r+1})$ or $M\cong P_r$. \end{proposition} \begin{proof} Take a standard binary representation for $P_r$, and view $M$ as the restriction of $P_r$ to the set $X$ of vectors. Recall that the number of nonzero entries of a vector is its \emph{weight}, and that the \emph{distance} between two vectors is the number of coordinates upon which they disagree. Because $M$ has an $M(K_{r+1})$-restriction, we may assume that $X$ contains the set $Z$ of vectors of weight one or two. We may assume that $Z\neq X$. Then $M$ has an element $e$ of weight at least three. We shall establish that $M\cong P_r$ by proving the following three assertions. \begin{itemize} \item[(i)]{$M$ has an element of weight three;} \item[(ii)]{if the matroid $M$ has every element of weight $k-1$ and an element of weight $k$, for some $k$ exceeding two, then $M$ has every element of weight $k$; and} \item[(iii)]{if $M$ has every element of weight $k$, where $3\leq k< r$, then $M$ has an element of weight $k+1$.} \end{itemize} Let $e_i$ denote the weight-1 element whose nonzero entry is in the $i$th position. To show (i), we may assume $e$ has weight $k\geq 4$. Say $e=e_1+e_2+\cdots+e_k$. Let $Y=\{e,e_1,e_2,e_4,e_5,\dots,e_k,e_1+e_3,e_2+e_3\}$. Then $M\vert Y$ is a theta-graph having arcs $\{e,e_4,e_5,\dots,e_k\}$, $\{e_1,e_2+e_3\}$, and $\{e_2,e_1+e_3\}$. This theta-graph forces $e_1+e_2+e_3$ to be an element of $M$, so (i) holds. To prove (ii), we may assume $k<r$. Suppose $g$ is an element of weight $k$ not in $M$, and let $f$ be an element of weight $k$ in $M$ with minimum distance from $g$. Let $s$ label a row where $f$ is 1 and $g$ is 0, and let $t$ label a row where $g$ is 1 and $f$ is 0. Next, as $k\geq 3$, there are two additional rows, $u$ and $v$, distinct from $s$ where $f$ is 1. Now, the set $\{f,e_u,e_v,e_s+e_t\}$ is independent, so the arcs $\{f, e_s+e_t\}$, $\{e_u, f+e_u+e_s+e_t\}$, and $\{e_v,f+e_v+e_s+e_t\}$ form a theta-graph in $M$. This theta-graph implies that $f+e_s+e_t$ belongs to $M$. However, $f+e_s+e_t$ has weight $k$ and is a smaller distance from $g$ than $f$, a contradiction. Thus (ii) holds. Finally, let $f$ be an element of weight $k+1$ for some $k$ with $3\leq k<r$. By symmetry, we may assume that the set of rows in which $f$ is nonzero contains $\{1,2,3\}$. Then $\{f,e_1,e_2,e_3\}$ is independent in $M$, and the sets $\{e_1,f+e_1\}$, $\{e_2,f+e_2\}$, and $\{e_3,f+e_3\}$ are the arcs of a theta-graph in $M$. This theta-graph shows that $f$ belongs to $M$. Thus (iii) holds. Hence the proposition holds as well. \end{proof} The second proposition that we use to prove Theorem~\ref{mainres3conn} will follow from the following three results. \begin{lemma} \label{pglift} Let $M$ be a simple rank-$r$ matroid in $\Theta_3$. Suppose that \begin{itemize} \item[(i)]$r\geq 4$; \item[(ii)] $E(M)$ has a subset $P$ such that $M\vert P\cong P_{r-1}$; and \item[(iii)]$E(M)-P$ contains at least three elements. \end{itemize} Then $M\cong P_r$. \end{lemma} \begin{proof} View $M$ as a restriction of $P_r$, and let $\{e,f,g\}$ be a subset of $E(M)-P$. Let $p$ be a point in $E(P_r)- P$ that is not in $\{e,f,g\}$. Observe that, for each $x$ in $\{e,f,g\}$, the third point on the line in $P_r$ containing $\{x,p\}$ is in $P$. Thus there are three lines of $M$ that meet at $p$. Provided $p$ is not coplanar with $\{e,f,g\}$, these lines define a theta-graph in $M$ that is completed by $p$, so $p$ is in $M$. It remains to show that the point $q$ of $E(P_r)- (P\cup \{e,f,g\})$ that is coplanar with $\{e,f,g\}$ belongs to $M$. But one easily checks that $P_r\backslash q$ is not in $\Theta_3$ when $r\geq 4$. Thus $M\cong P_r$. \end{proof} \begin{corollary} \label{prfromf7s} Let $M$ be a simple rank-$r$ matroid in $\Theta_3$ with $r\geq 3$. If $M$ has a basis $B$ and an element $b$ in $B$ so that, for each $\{x,y\}\hspace{0.5cm}ubseteq B-b$, the set $\{b,x,y\}$ spans an $F_7$-restriction of $M$, then $M\cong P_r$. \end{corollary} \begin{proof} Let $B=\{b_1,b_2,\dots,b_r\}$ with $b=b_1$. If $r=3$, then the result is immediate, so suppose $r\geq 4$. By induction, $M\vert\cl(B-b_r)$ is isomorphic to $P_{r-1}$. Since $M\vert\cl(\{b_1,b_2,b_r\})\cong F_7$, we see that this restriction contains an independent set of three elements that avoids $\cl(B-b_r)$. Lemma~\ref{pglift} now implies that $M\cong P_r$. \end{proof} The next result was proved by McNulty and Wu~\cite[Lemma 2.10]{mcnultywu}. \begin{lemma} \label{connhyp} Let $M$ be a $3$-connected binary matroid with at least four elements. Then, for any two distinct elements $e$ and $f$ of $M$, there is a connected hyperplane containing $e$ and avoiding $f$. \end{lemma} For a simple binary matroid $M$, we now define the smallest $\Theta_3$-closed matroid whose ground set contains $E(M)$. Let $M_0=M$ and $r(M)=r$. Suppose $M_0, M_1, \dots, M_k$ have been defined. The simple binary matroid $M_{k+1}$ is obtained from $M_k$ by ensuring that, whenever $T$ is an incomplete theta-graph of $M_k$, the element $x$ that completes $T$ is in $E(M_{k+1})$. Since each $M_i$ is a restriction of $P_r$, there is a $j$ for which $M_{j+1}=M_j$. When this first occurs, we call $M_j$ the \emph{$\Theta_3$-closure} of $M$. Evidently this is well defined. By associating $M$ with its ground set, the $\Theta_3$-closure is a closure operator (but not necessarily a matroid closure operator) on the set of subsets of the ground set of any projective geometry containing $M$. \begin{proposition} \label{pginflater} Let $M$ be a simple $3$-connected matroid in $\Theta_3$, and let $k$ be an integer exceeding two. If $M$ has a simple minor $N$ whose $\Theta_3$-closure is $P_k$, then $M$ is a projective geometry. \end{proposition} \begin{proof} Take subsets $X$ and $Y$ of $E(M)$ such that $M/X\backslash Y=N$ with $X$ independent and $Y$ coindependent in $M$. The matroid $M/X$ is in $\Theta_3$ and has $N$ as a spanning restriction. Therefore $M/X$ has $P_k$ as a restriction, so $P_k$ is a minor of $M$. From here, the proof is by induction on the rank, $r$, of $M$. If $r=k$, the result is immediate, so assume $r>k$. By Seymour's Splitter Theorem, $P_k$ can be obtained from $M$ by a sequence of single-element contractions and deletions, all while staying 3-connected. Let $e$ be the first element that is contracted in this sequence. Note that $\hspace{0.5cm}i(M/e)$ is a 3-connected member of $\Theta_3$ that has $P_k$ as a minor. By induction, $\hspace{0.5cm}i(M/e)\cong P_{r-1}$. Fix an embedding of $M$ in $P_{r}$. We may assume that $M\not\cong P_r$. Then some line $\ell$ of $P_{r}$ through $e$ is not contained in $E(M)$. For each subset $Z$ of $E(P_r)$, let $\cl_P(Z)$ be its closure in $P_r$. Since $\hspace{0.5cm}i(M/e)\cong P_{r-1}$, there is an element $s$ of $E(M)$ that is in $\ell-\{e\}$. Let $t$ be the point of $P_{r}$ in $\ell-\{e\}$ that is not in $E(M)$. \begin{sublemma} \label{ellpyramids} Let $F$ be a rank-$4$ flat of $M$ containing $\ell-t$. Then $M\vert F$ is isomorphic to one of $P(F_7,U_{2,3})$ or $F_7\oplus U_{1,1}$ where the $F_7$-restriction of $M\vert F$ contains $s$ but avoids $e$. \end{sublemma} \begin{figure} \caption{In the proof of \ref{ellpyramids} \label{ellplanes} \end{figure} To see this, first note that, by Proposition~\ref{basics}(ii), $M\vert F$ is in $\Theta_3$. Recall that each line of $\cl_P(F)$ through $e$ contains another point of $F$. Then there are three planes, $\pi_1$, $\pi_2$, and $\pi_3$, of $M\vert F$ containing $\ell-t$. Let $\mathcal{P}$ be this set of planes. Therefore, each plane in $\mathcal{P}$ has at least one pair of points that are not on $\ell$ such that these two points are collinear with either $s$ or $t$. Call such a pair of points a \emph{target pair}. The rest of the proof frequently relies of finding theta-graphs, particularly in rank 4. It maybe helpful to note that such a theta-graph will be isomorphic to $M(K_{2,3})$. Geometrically, this is three non-coplanar lines, each full except for shared point. This common point completes the theta-graph. Suppose $\pi_1$ has a target pair collinear with $t$. Note that if two distinct planes in $\mathcal{P}$ each have a target pair collinear with $t$, then these pairs, along with $\{e,s\}$, are the arcs of an incomplete theta-graph in $M\vert F$, an impossibility. Consequently, neither $\pi_2$ nor $\pi_3$ has a target pair collinear with $t$, so both have the form in Figure~\ref{ellplanes}. The restriction of $M$ to $\pi_2\cup \pi_3$ is given in Figure~\ref{pi2pi3}. The plane $\pi_1$ adds a pair of points collinear with $t$ to $M\vert (\pi_2\cup\pi_3)$, and one readily checks that the addition of this pair gives a restriction of $M\vert F$ that is isomorphic to $F_7\oplus_2 U_{2,3}$. A theta-graph of $M\vert F$ now gives that $M\vert F$ has a restriction isomorphic to $P(F_7,U_{2,3})$. It follows that $M\vert F\cong P(F_7, U_{2,3})$ otherwise, by Lemma~\ref{pglift}, $M\vert F\cong P_4$ and we obtain the contradiction that $t\in F$. We may now suppose that no $\pi_i$ has a target pair collinear with $t$. It follows that each target pair in each $\pi_i$ is collinear with $s$, and that $e$ is the only element of $F$ outside of the target pairs. Observe that the target pairs must be coplanar as, otherwise, we can find an incomplete theta-graph in $M\vert F$. Thus $M\vert F\cong F_7\oplus U_{1,1}$. Noting that $e$ is not in the $F_7$-restriction of $M\vert F$, we conclude that~\ref{ellpyramids} holds. \begin{figure} \caption{The matroid $M\vert(\pi_2\cup\pi_3)$ in the proof of~\ref{ellpyramids} \label{pi2pi3} \end{figure} Since $M$ is 3-connected, it follows from~\ref{ellpyramids} that $r\geq 5$. By Lemma~\ref{connhyp}, there is a connected hyperplane $H$ of $M$ containing $s$ and avoiding $e$. Let $s=b_1$ and let $\{b_1,b_2,\dots,b_{r-1}\}$ be a basis $B$ of $M\vert H$. For distinct elements $i$ and $j$ of $\{2,3,\dots,r-1\}$, let $F_{i,j}$ denote the rank-4 flat $M\vert\cl(\{e,s,b_i,b_j\})$ of $M$. By~\ref{ellpyramids}, $M\vert F_{i,j}$ is isomorphic to $F_7\oplus U_{1,1}$ or $P(F_7,U_{2,3})$. Let $X$ be the subset of $F_{i,j}$ such that $M\vert X\cong F_7$, and recall that $e\not\in X$ and $s\in X$. The hyperplane $H$ either contains $X$ or meets $X$ along one of the lines $\cl(\{s,b_i\})$ or $\cl(\{s,b_j\})$. We deduce the following. \begin{sublemma} \label{sswitcher} For each pair $\{i,j\} \hspace{0.5cm}ubseteq \{2,3,\dots,r-1\}$, at least one of $s+b_i$ or $s+b_j$ is in $E(M)$. \end{sublemma} Suppose $s+b_2$ is not in $E(M)$. By~\ref{sswitcher}, the element $s+b_i$ belongs to $E(M)$ for every $i$ in $\{3,4,\dots,r-1\}$. Consequently, for each pair $\{i,j\}$ in $\{3,4,\dots,r-1\}$, the hyperplane $H$ contains the copy of $F_7$ in $M\vert F_{i,j}$, and this $F_7$ is spanned by $\{s,b_i, b_j\}$. Corollary~\ref{prfromf7s} now implies that $M\vert \cl(B-b_2)\cong P_{r-2}$. Now, since $M\vert H$ is connected, $H$ contains an element $f$ that is not in $\cl(B-b_2)\cup b_2$. The line $\cl(\{b_2,f\})$ meets the projective geometry $\cl(B-b_2)$, so $b_2+f$ is also in $M$. Now consider $Y=\cl(\{e,s,b_2,b_2+f\})$. The intersection $H\cap Y$ contains the line $\{b_2,f,b_2+f\}$ and also the element $s$. By applying~\ref{ellpyramids} to $M\vert Y$, we see that $H\cap Y$ is an $F_7$-restriction containing $s$ and $b_2$. Since $s+b_2$ is not in $E(M)$, we have a contradiction. We conclude that $s+b_i$ is in $E(M)$ for every $i$ in $\{2,3,\dots,r-1\}$. The flat $M\vert F_{i,j}$ now meets $H$ in an $F_7$-restriction for every pair $\{i,j\}\hspace{0.5cm}ubseteq \{2,3,\dots,r-1\}$ so, by Corollary~\ref{prfromf7s}, $M\vert H$ is a projective geometry. Finally, as $M$ is 3-connected, there is an independent set of three elements in $E(M)$ avoiding $H$. Hence, by Lemma~\ref{pglift}, $M\cong P_r$. \end{proof} \iffalse The next lemma is a consequence of the Splitter Theorem~\cite{splitter}. \begin{lemma} \label{f7makesf7*} Let $M$ be a $3$-connected binary matroid with rank at least 4. If $M$ has an $F_7$-minor, then $M$ has an $F_7^\ast$-minor. \end{lemma} \fi We are now ready to prove the main result of this section. \begin{proof}[Proof of Theorem~\ref{mainres3conn}] Let $r$ be the rank of $M$. If $M$ is graphic, then Theorem~\ref{JMtheorem} gives that $M\cong M(K_{r+1})$, so we may assume that $M$ is not graphic. Thus $M$ has a minor $N$ isomorphic to $F_7$, $F_7^\ast$, $M^\ast(K_{3,3})$, or $M^\ast(K_5)$. By Proposition~\ref{pginflater}, it now suffices to show that the $\Theta_3$-closure, $\Theta(N)$, of $N$ is a projective geometry. This is immediate when $N\cong F_7$, so suppose $N$ is isomorphic to $F_7^\ast$, labelled as in Figure~\ref{f7star}. The theta-graphs of $N$ imply that, in $\Theta(N)$, the plane containing $\{1,2,5,6\}$ is isomorphic to $F_7$. Proposition~\ref{pginflater} now implies that $M$ is isomorphic to $P_r$. \begin{figure} \caption{The matroid $F_7^\ast$.} \label{f7star} \end{figure} Next, suppose $N\cong M^\ast(K_{3,3})$. The complement of $N$ in $P_4$ is $U_{2,3}\oplus U_{2,3}$; let $x$ be an element of this complement. The elementary quotient of $N$ obtained by extending $N$ by $x$ and then contracting $x$ is shown in Figure~\ref{mk33quo}. The three pairwise-skew 2-element circuits of this quotient correspond to three lines in the extension of $N$ by $x$ where the union of these lines has rank four. Thus $N$ contains a theta-graph that is completed by $x$. It follows that $x$ and, symmetrically, every point in the complement of $N$ in $P_4$, belongs to $\Theta(N)$. Lemma~\ref{pglift} now implies that $\Theta(N)\cong P_4$. \begin{figure} \caption{A quotient of $M^\ast(K_{3,3} \label{mk33quo} \end{figure} \begin{figure} \caption{The graph $K_5$.} \label{k5} \end{figure} \begin{figure} \caption{A binary representation of $M^\ast(K_5)$.} \label{m*k5} \end{figure} Now suppose $N\cong M^\ast(K_5)$, where $K_5$ is labelled as in Figure~\ref{k5}. Figure~\ref{m*k5} gives a corresponding binary representation of $N$. The dual of a theta-graph with arcs of size at least two is a triangle with no trivial parallel classes. Therefore, we can detect theta-graph restrictions of $N$ by contracting elements of $N^\ast$ to produce such a triangle. For example, $N^\ast/5,8$ is the dual of a theta-graph in $N$ with arcs $\{1,2\}$, $\{3,7\}$, and $\{0,4,6,9\}$. This theta-graph is completed by the element $[1\ 1\ 0\ 0\ 0\ 0]^T$, so this element belongs to $\Theta(N)$. The following is a list of duals of theta-graphs of $N$ and the corresponding elements of $\Theta(N)$ that they produce using this reasoning. \begin{itemize} \item $N^\ast/5,8$ gives $[1\ 1\ 0\ 0\ 0\ 0]^T\in \Theta(N)$. \item $N^\ast/4,9$ gives $[1\ 0\ 1\ 0\ 0\ 0]^T\in \Theta(N)$. \item $N^\ast/3,9$ gives $[1\ 0\ 0\ 1\ 0\ 0]^T\in \Theta(N)$. \item $N^\ast/2,8$ gives $[1\ 0\ 0\ 0\ 1\ 0]^T\in \Theta(N)$. \item $N^\ast/0,6$ gives $[0\ 1\ 1\ 0\ 0\ 0]^T\in \Theta(N)$. \item $N^\ast/1,8$ gives $[0\ 1\ 0\ 0\ 1\ 0]^T\in \Theta(N)$. \item $N^\ast/0,3$ gives $[0\ 1\ 0\ 0\ 0\ 1]^T\in \Theta(N)$. \item $N^\ast/1,9$ gives $[0\ 0\ 1\ 1\ 0\ 0]^T\in \Theta(N)$. \item $N^\ast/0,2$ gives $[0\ 0\ 1\ 0\ 0\ 1]^T\in \Theta(N)$. \item $N^\ast/6,7$ gives $[0\ 0\ 0\ 1\ 1\ 0]^T\in \Theta(N)$. \item $N^\ast/5,7$ gives $[0\ 0\ 0\ 1\ 0\ 1]^T\in \Theta(N)$. \item $N^\ast/4,7$ gives $[0\ 0\ 0\ 0\ 1\ 1]^T\in \Theta(N)$. \end{itemize} It is now straightforward to find theta-graphs in $\Theta(N)$ that are completed by the elements $[1\ 0\ 0\ 0\ 0\ 1]^T$, $[0\ 1\ 0\ 1\ 0\ 0]^T$, and $[0\ 0\ 1\ 0\ 1\ 0]^T$, so $\Theta(N)$ contains every vector of weight 1 or 2. Thus $\Theta(N)$ properly contains $M(K_7)$, so, by Proposition~\ref{mkorpgprop}, $\Theta(N)\cong P_6$. \end{proof} \hspace{0.5cm}ection{The Main Result} \label{main} After a review of canonical tree decompositions, this section proves Theorem~\ref{mainresult}. For a set $\{M_1,M_2,\dots,M_n\}$ of matroids, a \emph{matroid-labelled tree} with vertex set $\{M_1,M_2,\dots,M_n\}$ is a tree $T$ such that \begin{enumerate} \item[(i)]{if $e$ is an edge of $T$ with endpoints $M_i$ and $M_j$, then $E(M_i)\cap E(M_j)=\{e\}$, and $\{e\}$ is not a separator of $M_i$ or $M_j$; and} \item[(ii)]{$E(M_i)\cap E(M_j)$ is empty if $M_i$ and $M_j$ are non-adjacent.} \end{enumerate} The matroids $M_1, M_2,\dots,M_n$ are called the \emph{vertex labels} of $T$. Now suppose $e$ is an edge of $T$ with endpoints $M_i$ and $M_j$. We obtain a new matroid-labelled tree $T/e$ by contracting $e$ and relabelling the resulting vertex with $M_i\oplus_2 M_j$. As the matroid operation of 2-sum is associative, $T/X$ is well defined for all subsets $X$ of $E(T)$. Let $T$ be a matroid-labelled tree for which $V(T)=\{M_1,M_2,\dots,M_n\}$ and\\ $E(T)=\{e_1,e_2,\dots,e_{n-1}\}$. Then $T$ is a \emph{tree decomposition} of a connected matroid $M$ if \begin{enumerate} \item[(i)]{$E(M)=(E(M_1)\cup E(M_2)\cup\cdots\cup E(M_n))-\{e_1,e_2,\dots,e_{n-1}\}$;} \item[(ii)]{$\vert E(M_i)\vert\geq 3$ for all $i$ unless $\vert E(M)\vert<3$, in which case $n=1$ and $M=M_1$; and} \item[(iii)]{$M$ labels the single vertex of $T/E(T)$.} \end{enumerate} In this case, the elements $\{e_1,e_2,\dots,e_{n-1}\}$ are the \emph{edge labels} of $T$. Cunningham and Edmonds~\cite{cunned} (see also~\cite[Theorem 8.3.10]{oxley}) proved the next theorem that says that $M$ has a \emph{canonical tree decomposition}, unique to within relabelling of the edges. \begin{theorem} \label{treedecomp} Let $M$ be a $2$-connected matroid. Then $M$ has a tree decomposition $T$ in which every vertex label is $3$-connected, a circuit, or a cocircuit, and there are no two adjacent vertices that are both labelled by circuits or are both labelled by cocircuits. Moreover, $T$ is unique to within relabelling of its edges. \end{theorem} We now complete the proof of our main result. \begin{proof}[Proof of Theorem~\ref{mainresult}] Since circuits, cycle matroids of complete graphs, and projective geometries are in $\Theta_3$, by Proposition~\ref{parconnprop}, every matroid that can be built from such matroids by a sequence of parallel connections is in $\Theta_3$. To prove the converse, we begin by noting that loops can be added via direct sums and that parallel elements can be added via parallel connections of circuits, so we may assume $M$ is simple. Let $T$ be the canonical tree decomposition of $M$. The proof is by induction on $\vert V(T)\vert$. If $\vert V(T)\vert = 1$, then $M$ is 3-connected and the result holds by Theorem~\ref{mainres3conn}. Now assume $T$ has at least two vertices, and let $N$ be a matroid labelling a leaf of $T$. Since $M$ is simple, $N$ is not a cocircuit. We may now write $M=N\oplus_2 M_1$, where, by Corollary~\ref{2summands}, $N$ and $M_1$ are in $\Theta_3$. Thus, by Theorem~\ref{mainres3conn}, $N$ is a circuit, the cycle matroid of a complete graph of rank at least three, or a projective geometry of rank at least three. Moreover, by induction, $M_1$ is a parallel connection of circuits, cycle matroids of complete graphs, and projective geometries. Let $N_1$ be the label of the neighbor of $N$ in $T$, and suppose $N_1$ is not a cocircuit. In this case, each of $N$ and $N_1$ is a circuit, the cycle matroid of a complete graph of rank at least three, or a projective geometry of rank at least three, and they are not both circuits. Therefore, if $p$ is the basepoint of the 2-sum $N\oplus_2 N_1$, there are circuits in $N$ and $N_1$ that form a theta-graph that is completed by $p$, a contradiction. Thus $N_1$ is a cocircuit. Now let $k$ be the degree of $N_1$ in $T$. Evidently $N_1$ has at least $k$ elements, but, since $M$ is simple, $N_1$ has at most $k+1$ elements. If $k=2$, then $N_1$ has three elements as $N_1$ labels a vertex of $T$. Otherwise $k\geq 3$, so there are circuits in $M$ that form a theta-graph that is completed by an element of $N_1$. Hence $N_1$ has $k+1$ elements, and therefore corresponds to a parallel connection of its neighbors. It now follows that $M$ is the parallel connection of circuits, cycle matroids of complete graphs, and projective geometries. \end{proof} \hspace{0.5cm}ection*{Acknowledgements} The authors thank the referee for suggesting a number of improvements to the paper. \end{document}
\begin{document} \title{Secretary Ranking with Minimal Inversions ootnote{Work done in part while the first two authors were summer interns at Google Research, New York.} \thispagestyle{empty} \begin{abstract} We study a twist on the classic secretary problem, which we term the \emph{secretary ranking} problem: elements from an ordered set arrive in random order and instead of picking the maximum element, the algorithm is asked to assign a rank, or position, to each of the elements. The rank assigned is irrevocable and is given knowing only the pairwise comparisons with elements previously arrived. The goal is to minimize the distance of the rank produced to the true rank of the elements measured by the Kendall-Tau distance, which corresponds to the number of pairs that are inverted with respect to the true order. Our main result is a matching upper and lower bound for the secretary ranking problem. We present an algorithm that ranks $n$ elements with only $O(n^{3/2})$ inversions in expectation, and show that any algorithm necessarily suffers $\mathcal{O}mega(n^{3/2})$ inversions when there are $n$ available positions. In terms of techniques, the analysis of our algorithm draws connections to linear probing in the hashing literature, while our lower bound result relies on a general anti-concentration bound for a generic balls and bins sampling process. We also consider the case where the number of positions $m$ can be larger than the number of secretaries $n$ and provide an improved bound by showing a connection of this problem with random binary trees. \end{abstract} \setcounter{page}{0} \newcommand{\ensuremath{\mathbb{I}}}{\ensuremath{\mathbb{I}}} \section{Introduction} \label{sec:intro} The secretary problem is one of the first problems studied in online algorithms---in fact, is was extensively studied much before the field of online algorithms even existed. It first appeared in print in 1960 as a recreational problem in Martin Gardner's Mathematical Games column in Scientific American. In the subsequent decade it caught the attention of many of the eminent probabilist researchers like Lindley~\cite{lindley1961dynamic}, Dynkin~\cite{dynkin1963optimum}, Chow et al.\xspace~\cite{chow1964optimal} and Gilbert and Mosteller \cite{gilbert2006recognizing} among others. In a very entertaining historical survey, Ferguson \cite{ferguson1989solved} traces the origin of the secretary problem to much earlier: Cayley in 1875 and Kepler in 1613 pose questions in the same spirit as the secretary problem. Secretary problem has been extended in numerous directions, see for example the surveys by Sakaguchi \cite{sakaguchi1995optimal} and Freeman \cite{freeman1983secretary}. The problem has had an enormous influence in computer science and has provided some of basic techniques in the field of online and approximation algorithms. Babaioff et al extended this problem to matroid set systems \cite{babaioff2007matroids} and Knapsack \cite{babaioff2007knapsack} and perhaps more importantly, show that the secretary problem is a natural tool for designing online auctions. In the last decade, the secretary problem has also been extended to posets \cite{kumar2011hiring}, submodular systems \cite{bateni2010submodular}, general set systems \cite{rubinstein2016beyond}, stable matchings \cite{babichenko2017stable}, non-uniform arrivals \cite{kesselheim2015secretary} and applied to optimal data sampling \cite{girdhar2009optimal}, design of prophet inequalities \cite{azar2014prophet,esfandiari2017prophet}, crowdsourcing systems \cite{singer2013pricing}, pricing in online settings \cite{cohen2014pricing}, online linear programming \cite{agrawal2014dynamic} and online ad allocation \cite{feldman2010online}. The (admittedly incomplete) list of extensions and applications in the last paragraph serves to showcase that the secretary problem has traditionally been a vehicle for deriving connections between different subfields of computer science and a testbed of new techniques. \paragraph{Ranking Secretaries.} Here we consider a natural variant of the secretary problem, which we name the \emph{secretary ranking} problem, where instead of selecting the maximum element we are asked to \emph{rank} each arriving element. In the process of deriving the optimal algorithm for this problem, we show connections to the technique of linear probing, which is one of the earliest techniques in the hashing literature studied by Knuth~\cite{knuth1963notes} and also to the expected height of random binary trees. In the traditional secretary problem a decision maker is trying to hire a secretary. There is a total order over $n$ secretaries and the goal of the algorithm is to hire the best secretary. The secretaries are assumed to arrive in a random order and the algorithm can only observe the relative rank of each secretary with respect to the previously interviewed ones. Once a secretary is interviewed, the algorithms needs to decide whether to hire the current one or to irrevocably abandon the current candidate and continue interviewing. In our setting, there are $m$ job positions and $n$ secretaries with $m \geq n$. There is a known total order on positions. Secretaries arrive in random order and, as before, we can only compare a secretary with previously interviewed ones. In our version, all secretaries will be hired and the decision of the algorithm is in which position to hire each secretary. Each position can be occupied by at most one secretary and hiring decisions are irrevocable. Ideally, the algorithm will hire the best secretary in the best position, the second best secretary in the second best position and so on. The loss incurred by the algorithm corresponds to the pairs that are incorrectly ordered, i.e., pairs where a better secretary is hired in a worse position. \subsection{Our Results and Techniques} \paragraph{Dense case ($m=n$).} We start by studying the perhaps most natural case of the secretary ranking problem when $m=n$, which we call the dense case. The trivial algorithm that assigns a random empty position for each arriving secretary incurs $\Theta(n^2)$ cost, since each pair of elements has probability $1/2$ of being an inversion. On the other hand, $\mathcal{O}mega(n)$ is a trivial lower bound on the cost of any algorithm because nothing is known when the first element arrives. As such, there is a linear gap between the costs of the trivial upper and lower bounds for this secretary ranking problem. Our first main result is an asymptotically tight upper and lower bound on the loss incurred by the algorithms for the secretary ranking problem. \begin{theorem*}\label{res:main} There is an algorithm for the secretary ranking problem with $m=n$ that computes a ranking with $\mathcal{O}(n^{3/2})$ inversions in expectation. Moreover, any algorithm for this problem makes $\mathcal{O}mega(n^{3/2})$ inversions in expectation. \end{theorem*} There are two main challenges in designing an algorithm for secretary ranking. In earlier time steps, there are only a small number of comparisons observed and these do not contain sufficient information to estimate the true rank of the arriving elements. In later time steps, we observe a large number of comparisons and using the randomness of elements arrival, the true rank of the elements can be estimated well. However, we face another difficulty for these time steps: many of the positions have already been assigned to some element arrived earlier and are hence not available. The first information-theoretic challenge exacerbates this second issue. Previous bad placements might imply that all the desired positions are unavailable for the current element, causing a large cost even for an element which we can estimate its true rank accurately. The algorithm needs to handle these two opposing challenges simultaneously. The main idea behind our algorithm is to estimate the rank of the current element using the observed comparisons and then add some noise to these estimations to obtain additional randomness in the positions and avoid positively correlated mistakes. We then assign the current element to the closest empty position to this noisy estimated rank. The main technical interest is in the analysis of this algorithm. We draw a connection to the analysis of linear probing in the hashing literature~\cite{knuth1963notes} to argue that under this extra noise, there exists typically an empty position that is close to the estimated rank. For the lower bound, we analyze the number of random pairwise comparisons needed to estimate the rank of an element accurately. Such results are typically proven in the literature by using {anti-concentration} inequalities. A main technical difficulty is that most of the existing anti-concentration inequalities are for {independent} random variables while there is a {correlation} between the variables we are considering. We prove, to the best of our knowledge, a new anti-concentration inequality for a generic balls in bins problem that involves correlated sampling. \paragraph{Sparse Case ($m \gg n$).} Next we consider the case where there are considerably more positions than secretaries and compute how large the number $m$ of positions needs to be such that we incur no inversions. Clearly for $m = 2^{n+1}-1$ it is possible to obtain zero inversions with probability $1$ and for any number less than that it is also clear that any algorithm needs to cause inversions with non-zero probability. If we only want to achieve zero inversions with high probability, how large does $m$ need to be? By showing a connection between the secretary problem and random binary trees, we show that for $m \geq n^\alpha$ for $\alpha \approx 2.998$ it is possible to design an algorithm that achieves zero inversion with probability $1 - o(1)$. The constant $\alpha$ here is obtained using the high probability bound on the height a random binary tree of $n$ elements. \paragraph{General Case.} Finally, we combine the algorithms for the dense and sparse cases to obtain a general algorithm with a bound on the expected number of inversions which smoothly interpolates between the bounds obtained for the dense and sparse cases. This algorithm starts by running the algorithm for the sparse case and when two elements are placed very closed to each other by the sparse algorithm, we switch to use the algorithm for the dense case to assign a position to remaining elements with rank between these two close elements. \subsection{Related Work} Our work is inserted in the vast line of literature on the secretary problem, which we briefly discussed earlier. There has been a considerable amount of work on multiple-choice secretary problems where, instead of the single best element, multiple elements can be chosen as they arrive online \cite{kleinberg2005multiple,babaioff2007knapsack,babaioff2007matroids,bateni2010submodular,rubinstein2016beyond,KorulaP09}. We note that in multiple-choice secretary problems, the decision at arrival of an element is still binary, whereas in secretary ranking one of $n$ positions must be chosen. More closely related to our work is a paper of Babichenko et al.\xspace~\cite{babichenko2017stable} where elements that arrive must also be assigned to a position. However, the objective is different and the goal, which uses a game-theoretic notion of stable matching, is to maximize the number of elements that are not in a blocking pair. Gobel~et al.\xspace~\cite{GobelKT15} also studied an online appointment scheduling problem in which the goal is to assign starting dates to a set of jobs arriving online. The objective here is again different from the secretary ranking problem and is to minimize the total weight time of the jobs. Another related line of work in machine learning is the well-known problem of learning to rank that has been extensively studied in recent years (e.g. \cite{burges2005learning,cao2007learning,burges2007learning,xia2008listwise}). Two important applications of this problem are search engines for document retrieval \cite{liu2009learning,radlinski2005query,liu2007letor,cao2006adapting,xu2007adarank} and collaborative filtering approaches to recommender systems \cite{shi2010list,shi2012climf,liu2008eigenrank,wang2008probabilistic}. There has been significant interest recently in ranking from pairwise comparisons \cite{feige1994computing,busa2013top,chen2015spectral,shah2015simple,jang2016top, heckel2016active,davidson2014top,braverman2016parallel,agarwal2017learning}. To the best of our knowledge, there has not been previous work on ranking from pairwise comparisons in the online setting. Finally, we also briefly discuss hashing, since our main technique is related to linear probing. Linear probing is a classic implementation of hash tables and was first analyzed theoretically by Knuth in 1963~\cite{knuth1963notes}, in a report which is now regarded as the birth of algorithm analysis. Since then, different variants of this problem mainly for hash functions with limited independence have been considered in the literature~\cite{schmidt1990analysis,pagh2007linear,patracscu2010k}. Reviewing the vast literature on this subject is beyond the scope of our paper and we refer the interested reader to these papers for more details. \noindent \textbf{Organization.} The rest of the paper is organized as follows. In Section~\ref{sec:setup} we formalize the problem and present the notation used in the paper. In Section~\ref{sec:alg}, we present and analyze our algorithm for $m=n$ case. Section~\ref{sec:lower-bound} is devoted to showing the lower bound also for $m=n$ case. Our results for the general case when $m$ can be different from $n$ appear in Section~\ref{sec:sparse}. Missing proofs and standard concentration bounds are postponed to the appendix. \section{Problem Setup}\label{sec:setup} In the secretary ranking problem, there are $n$ elements $a_1, \hdots, a_n$ that arrive one at a time in an online manner and in a uniformly random order. There is a total ordering among the elements, but the algorithm has only access to pairwise comparisons among the elements that have already arrived. In other words, at time $t$, the algorithm only observes whether $a_i < a_j$ for all $i,j \leq t$. We define the rank function $\textsf{rk} : \{a_1, \hdots, a_n\} \rightarrow [n]$ as the true rank of the elements in the total order, i.e., $a_i < a_j$ iff $\rank{a_i} < \rank{a_j}$. Since the elements arrive uniformly at random, $\rank{\cdot}$ is a random permutation. Upon arrival of an element $a_t$ at time step $t$, the algorithm must, irrevocably, place $a_t$ in a position $\pi(a_t) \in [m]$ that is not yet occupied, in the sense that for $a_t \neq a_s$ we must have $\pi(a_s) \neq \pi(a_t)$. Since the main goal of the algorithm is to place the elements as to reflect the true rank as close as possible\footnote{In other words, hire the better secretaries in better positions}, we refer to $\pi(a_t)$ as the \emph{learned rank} of $a_t$. The goal is to minimize the number of pairwise mistakes induced by the learned ranking compared to the true ranking. A pairwise mistake, or an inversion, is defined as a pair of elements $a_i, a_j$ such that $\rank{a_i} < \rank{a_j}$ according to the true underlying ranking but $\pi(a_i) > \pi(a_j)$ according to the learned ranking. The secretary ranking problem generalizes the secretary problem in the following sense: in the secretary problem, we are only interested in finding the element with the highest rank. However, in the secretary ranking problem, the goal is to assign a rank to every arrived element and construct a complete ranking of all elements. Similar to the secretary problem, we make the enabling assumption that the order of elements arrival is uniformly random.\footnote{It is straightforward to verify that when the ordering is adversarial, any algorithm incurs the trivial cost of $\mathcal{O}mega(n^2)$ for $m=n$. For completeness, a proof is provided in Appendix~\ref{sec:appadversarial}.} We measure the cost of the algorithm in expectation over the randomness of both the arrival order of elements and the algorithm. \paragraph{Measures of sortedness.} We point out that the primary goal in the secretary ranking problem is to learn an ordering $\pi$ of the input elements which is as close as possible to their sorted order. As such, the \emph{cost} suffered by an algorithm is given by a \emph{measure of sortedness} of $\pi$ compared to the true ranking. There are various measures of sortedness studied in the literature depending on the application. Our choice of using the number of inversions, also known as \emph{Kendall's tau} measure, as the cost of algorithm is motivated by the importance of this measure and its close connection to other measures such as \emph{Spearman's footrule} (see, e.g., Chapter 6B in~\cite{Diaconis88}). For a mapping $\pi:[n] \rightarrow [m]$, Kendall's tau $K(\pi)$ measures the number of inversions in $\pi$, i.e.: $$K(\pi): = \vert\{(i,j); (\pi(a_i) - \pi(a_j))(\rank{a_i} - \rank{a_j}) < 0 \}\vert.$$ When $n=m$, another important measure of sortedness is Spearman's footrule $F(\pi)$ given by: $ F(\pi) := \sum_{i=1}^{n} \card{\rank{a_i}- \pi(a_i)},$ which corresponds to the summation of distances between the true rank of each element and its current position. A celebrated result of Diaconis and Graham~\cite{DiaconisG77} shows that these two measures are within a factor of two of each other, namely, $K(\pi) \leq F(\pi) \leq 2 \cdot K(\pi)$. We refer to this inequality as the DG inequality throughout the paper. Thus, up to a factor of two, the goals of minimizing the Kendall tau or Spearman's footrule distances are equivalent and, while the Kendall tau distance is used in the formulation of the problem, we also use the Spearman's footrule distance in the analysis. \section{Preliminaries} \label{sec:prelim} \newcommand{\set}[1]{\ensuremath{\{#1\}}} \section{Dense Secretary Ranking}\label{sec:alg} We start by analyzing the dense case, where $m = n$ and both the true rank $\rank{\cdot}$ and the learned rank $\pi(\cdot)$ are permutations over $n$ elements. Our main algorithmic result is the following theorem. \begin{theorem}\label{thm:dense} There exists an algorithm for the secretary ranking problem with $m=n$ that incurs a cost of $O(n\sqrt{n})$ in expectation. \end{theorem} \subsection{Description of the Algorithm} The general approach behind the algorithm in Theorem~\ref{thm:dense} is as follows. \begin{mdframed}[hidealllines=false,backgroundcolor=gray!10,innertopmargin=10pt] Upon the arrival of element $a_{t}$ at time step $t$: \begin{enumerate} \item \textbf{Estimation step:} Estimate the true rank of the arrived element $a_{t}$ using the \emph{partial} comparisons seen so far. \item \textbf{Assignment step:} Find the nearest currently unassigned rank to this estimate and let $\pi(a_{t})$ be this position. \end{enumerate} \end{mdframed} We now describe the algorithm in more details. A natural way to estimate the rank of the $t$-th element in the estimation step is to compute the rank of this element with respect to the previous $t-1$ elements seen so far and then scale this number to obtain an estimate to rank of this element between $1$ and $n$. However, for our analysis of the assignment step, we need to tweak this approach slightly: instead of simply rescaling and rounding, we add perturbation to the estimated rank and then round its value. This gives a nice distribution of estimated ranks which is crucial for the analysis of the assignment step. The assignment step then simply assigns a learned rank to the element as close as possible to its estimated rank. We formalize the algorithm in Algorithm~\ref{alg:dense}. \begin{algorithm2e}[H] \caption{Dense Ranking}\label{alg:dense} { \textbf{Input:} a set of $n$ positions, denoted here by $[n]$, and at most $n$ online arrivals.} \For{\textnormal{any time step $t \in [n]$ and element $a_{t}$}}{ Define $r_t := \card{\set{a_{t'} \mid a_{t'} < a_{t} \textnormal{~and~} t' < t}}$. Sample $x_t$ uniformly in the real interval $[r_t \cdot \frac{n}{t}, (r_t+1) \cdot \frac{n}{t}]$ and choose $\erank{a_{t}} = \lceil x_t \rceil$. Set the learned rank of $a_{t}$ as $\pi(a_{t}) = \arg\min_{i \in R} \card{i - \erank{a_{t}}}$ and remove $i$ from $R$. } \end{algorithm2e} We briefly comment on the runtime of the algorithm. By using any self-balancing binary search tree---such as a red-black tree or an AVL tree---to store the ranking of the arrived elements as well as the set $R$ of available ranks separately, Algorithm~\ref{alg:dense} is implementable in $O(\log n)$ time for each step, so total $O(n\log{n})$ worst-case time. We also note some similarity between this algorithm and linear probing in hashing. Linear probing is an approach to resolving collisions in hashing where, when a key is hashed to a non-empty cell, the closest neighboring cells are visited until an empty location is found for the key. The similarity is apparent to our assignment step which finds the nearest currently unassigned rank to the estimated rank of an element. The analysis of the assignment step follows similar ideas as the analysis for the linear probing hashing scheme. \subsection{The Analysis}\label{sec:analysis} For $m=n$, the total number of inversions can be approximated within a factor of $2$ by the Spearman's footrule. Therefore, we can write the cost of Algorithm~\ref{alg:dense} (up to a factor $2$) as follows: $$ \sum_{t=1}^{n} \card{\rank{a_{t}} - \pi(a_{t})} \leq \sum_{t=1}^{n} \card{\rank{a_{t}} - \erank{a_{t}}} + \sum_{t=1}^{n} \card{\erank{a_{t}} - \pi(a_{t})}. \label{eq:cost}$$ This basically breaks the cost of the algorithm in two parts: one is the cost incurred by the estimation step and the other one is the cost of the assignment step. Our analysis then consists of two main parts where each part bounds one of the terms in the RHS above. In particular, we first prove that given the partial comparisons seen so far, we can obtain a relatively good estimation to the rank of the arrived element, and then in the second part, we show that we can typically find an unassigned position in the close proximity of this estimated rank to assign to it. The following two lemmas capture each part separately. {In both lemmas, the randomness in the expectation is taken over the random arrivals and the internal randomness of the algorithm:} \begin{lemma}[Estimation Cost]\label{lem:estimation} In Algorithm~\ref{alg:dense}, $ \Exp\bracket{\sum_{t=1}^{n}\card{\rank{a_{t}} - \erank{a_{t}}}} = O(n\sqrt{n}).$ \end{lemma} \begin{lemma}[Assignment Cost]\label{lem:assignment} In Algorithm~\ref{alg:dense}, $\Exp\bracket{\sum_{t=1}^{n}\card{\erank{a_{t}} - \pi(a_{t})}} = O(n\sqrt{n}).$ \end{lemma} Theorem~\ref{thm:dense} then follows immediately from these two lemmas and Eq~(\ref{eq:cost}). The following two sections are dedicated to the proof of each of these two lemmas. We emphasize that the main part of the argument is the analysis of the assignment cost, i.e., Lemma~\ref{lem:assignment}, and in particular its connection to linear probing. The analysis for estimation cost, i.e., Lemma~\ref{lem:estimation}, follows from standard Chernoff bound arguments. \subsubsection{Estimation Cost: Proof of Lemma~\ref{lem:estimation}} \label{sec:estimation} The correctness of the estimation step in our algorithm relies on the following proposition that bounds the probability of the deviation between the estimated rank and the true rank of each element (depending on the time step it arrives). The proof uses the Chernoff bound for sampling without replacement. \begin{restatable}{rPro}{lemquantileestimation} \label{lem:quantile-estimation} For any $t > 1$ and any $\alpha \geq 0$, $\Prob\Paren{\card{\rank{a_{t}} - \erank{a_{t}}} \geq 1 + {\frac{n}{t}} + \alpha \cdot \frac{n-1}{\sqrt{t-1}}} \leq e^{-\mathcal{O}mega(\alpha^2)}.$ \end{restatable} \begin{proof} Fix any $t \in [n]$ and element $a_{t}$ and recall that $\rank{a_{t}}$ denotes the true rank of $a_t$. Conditioned on a fixed value for the rank of $a_{t}$, the distribution of the number of elements $r_t$ that arrived before $a_t$ and have a smaller rank is equivalent to a sampling without replacement process of $t-1$ balls where the urn has $\rank{a_{t}} - 1$ red balls and $n - \rank{a_{t}}$ blue balls (and the goal is to count the number of red balls). As such $\Exp[r_t] = \frac{\rank{a_{t}} - 1}{n-1}$ and by the Chernoff bound for sampling without replacement (Proposition \ref{prop:chernoff_negative} with $a = n$ and $b=t-1$), we have: \begin{align*} \Prob\Paren{\card{r_t - \Exp\bracket{r_t}} \geq \alpha \sqrt{t-1}} \leq 2\cdot \exp \Paren{-\frac{2(\alpha \sqrt{t-1})^2}{t-1} } = e^{-\mathcal{O}mega(\alpha^2)}. \end{align*} We now argue that \begin{align*} \Prob\Paren{\card{\rank{a_{t}} - \erank{a_{t}}} \geq 1 + {\frac{n}{t}} + \alpha \cdot \frac{n-1}{\sqrt{t-1}}} \leq \Prob\Paren{\card{r_t - \Exp\bracket{r_t}} \geq \alpha \sqrt{t-1}}. \end{align*} which finalizes the proof by the bound in above equation. To see this, note that, \begin{align*} \alpha \frac{n-1}{\sqrt{t-1}} \geq \card{ \frac{n-1}{t-1} r_t - \rank{a_{t}} } \geq \card{ x_t - \rank{a_{t}} } - \frac{n}{t} \geq \card{ \erank{a_{t}} - \rank{a_{t}} } - 1 - \frac{n}{t} \end{align*} The first inequality follows from substituting the expectation in $\card{r_t - \Exp\bracket{r_t}} \geq \alpha \sqrt{t-1}$ and multiplying the whole expression by $(n-1)/(t-1)$. The second inequality just follows from the fact that both the variable $x_t$ (defined in step $4$ of Algorithm~\ref{alg:main}) and $\frac{n-1}{t-1} r_t $ are in the interval $[\frac{n}{t} r_t, \frac{n}{t} (r_t+1)]$. The fact that $x_t$ is in this interval comes directly from its definition in the algorithm and the fact that $\frac{n-1}{t-1} r_t $ is in the interval is by a simple calculation (see Proposition~\ref{prop:ratios} in Appendix~\ref{sec:appalg}). The last inequality follows from the fact that $\erank{a_{t}} = \ceil{x_t}$. \end{proof} We are now ready to prove Lemma~\ref{lem:estimation}. \begin{proof}[Proof of Lemma~\ref{lem:estimation}] Fix any $t > 1$; we have, \begin{align*} \Exp\bracket{\card{\rank{a_{t}} - \erank{a_{t}}}{-1- \frac{n}{t}} } &\leq \int_{\alpha=0}^{\infty} \Prob\Paren{\card{\rank{a_{t}} - \erank{a_{t}}} {-1 - \frac{n}{t}} \geq \alpha \cdot \frac{n-1}{\sqrt{t-1}}} \cdot \frac{n-1}{\sqrt{t-1}} \cdot d\alpha \\ & \leq \frac{n-1}{\sqrt{t-1}} \cdot \int_{\alpha=0}^{\infty} e^{-\mathcal{O}mega(\alpha^2)} \cdot d\alpha = O\Paren{\frac{n}{\sqrt{t}}}. \tag{by Proposition~\ref{lem:quantile-estimation}} \end{align*} Hence, using the trivial bound for $t=1$ and the bound above for $t>1$ we conclude that: \begin{align*} \Exp\bracket{\sum_{t=1}^{n}\card{\rank{a_{t}} - \erank{a_{t}}}} = \sum_{t=1}^{n}\Exp\bracket{\card{\rank{a_{t}} - \erank{a_{t}}}} = \sum_{t=1}^{n} O\left(\frac{n}{t} + \frac{n}{\sqrt{t}}\right) = O(n\sqrt{n}). \qedhere \end{align*} \end{proof} \subsubsection{Assignment Cost: Proof of Lemma~\ref{lem:assignment}}\label{sec:assignment} For the second part of the analysis, it is useful to think of sampling a random permutation in the following recursive way: given a random permutation over $t-1$ elements, it is possible to obtain a random permutation over $t$ elements by inserting the $t$-th element in a uniformly random position between these $t-1$ elements. Formally, given $\sigma:[t-1] \rightarrow [t-1]$, if we sample a position $i$ uniformly from $[t]$ and generate permutation $\sigma':[t] \rightarrow [t]$ such that: $$\sigma'(t') = \left\{ \begin{aligned} & i & & \text{~if~} t' = t \\ & \sigma(t') & & \text{~if~} t' < t \text{~and~} \sigma'(t') < i \\ & \sigma(t') + 1 & & \text{~if~} t' < t \text{~and~} \sigma'(t') > i \\ \end{aligned}\right.$$ then $\sigma'$ will be a random permutation over $t$ elements. It is simple to see that just by fixing any permutation and computing the probability of it being generated by this process. Thinking about sampling the permutation in this way is very convenient for this analysis since at the $t$-th step of the process, the relative order of the first $t$ elements is fixed (even though the true ranks can only be determined in the end). In that spirit, let us also define for a permutation $\sigma:[t] \rightarrow [t]$ the event $\mathcal{O}_\sigma$ that $\sigma$ is the relative ordering of the first $t$ elements: $$\mathcal{O}_\sigma = \{ a_{\sigma(1)} < a_{\sigma(2)} < \hdots < a_{\sigma(t)} \}.$$ The following proposition asserts that the randomness of the arrival and the inner randomness of the algorithm, ensures that the estimated ranks at each time step are chosen \emph{uniformly at random} from all possible ranks in $[n]$. \begin{proposition}\label{prop:random-assignment} The values of $\erank{a_{1}}, \hdots, \erank{a_{n}}$ are i.i.d and uniformly chosen from $[n]$. \end{proposition} \begin{proof} First let us show that for any fixed permutation $\sigma$ over $t-1$ elements, the relative rank $r_t$ defined in the algorithm is uniformly distributed in $\{0,\hdots, t-1\}$. In other words: $$\Prob[r_t = i \mid \mathcal{O}_\sigma] = \frac{1}{t}, \qquad \forall i \in \{0, \hdots, t-1\}.$$ Simply observe that there are exactly $t$ permutations over $t$ elements such that the permutation induced in the first $t-1$ elements is $\sigma$. Since we are sampling a random permutation in this process, each of these permutation are equally likely to happen. Moreover, since each permutation corresponds to inserting the $t$-the element in one of the $t$ positions, we obtain the bound. Furthermore, since the probability of each value of $r_t$ does not depend on the induced permutation $\sigma$ over the first $t-1$ elements, then $r_t$ is independent of $\sigma$. Since all the previous values $r_{t'}$ are completely determined by $\sigma$, $r_t$ is independent of all previous $r_{t'}$ for $t' < t$. Finally observe that if $r_t$ is random from $\{0, ..., t-1\}$, then $x_t$ is sampled at random from $[0,n]$, so $\erank{a_{t}}$ is sampled at random from $[n]$. Since for different values of $t \in [n]$, all $r_t$ are independent, all the values of $\erank{a_{t}}$ are also independent. \end{proof} Now that we established that $\erank{a_{t}}$ are independent and uniform, our next task is to bound how far from the estimated rank we have to go in the assignment step, before we are able to assign a learned rank to this element. This part of our analysis will be similar to the analysis of the linear probing hashing scheme. If we are forced to let the learned rank of $a_{t}$ be far away from $\erank{a_{t}}$, say $\card{ \erank{a_{t}} - \pi(a_{t})} > k$, then this necessarily means that all positions in the integer interval $[\erank{a_{t}} - k:\erank{a_{t}} + k]$ must have already been assigned as a learned rank of some element. In the following, we bound the probability of such event happening for large values of $k$ compared to the current time step $t$. We say that the integer interval $I = [i : i+s-1]$ of size $s$ is \emph{popular} at time $t$, iff at least $s$ elements $a_{t'}$ among the $t-1$ elements that appear before the $t$-th element have estimated rank $\erank{a_{t'}} \in I$. Since by Proposition~\ref{prop:random-assignment} every element has probability $s/n$ of having estimated rank in $I$ and the estimated ranks are independent, we can bound the probability that $I$ is popular using a standard application of Chernoff bound. \begin{restatable}{rClaim}{clmlinearprobing} \label{clm:linear-probing} Let $\alpha \geq 1$, an interval of size $s \geq 2\alpha \max\Paren{1, \Paren{\frac{t}{n-t}}^2}$ is popular at time $t$ w.p. $e^{-O(\alpha)}$. \end{restatable} \begin{proof} The proof follows directly from the Chernoff bound in Proposition~\ref{prop:chernoff-multi}. For $t' \in [t]$, let $X_{t'}$ be the event that $\erank{a_{t'}} \in I$ and $X = \sum_{t'=1}^{t} X_{t'}$, then setting $\ensuremath{\varepsilon}ilon = \min(1, \frac{n-t}{t})$ we have that: \begin{align*} \Prob\paren{I~\text{is popular}} &= \Prob\paren{X \geq s} \leq \Prob\paren{X > (1+\ensuremath{\varepsilon}) \cdot \ensuremath{\varepsilon}ilon \cdot \Exp[X]} \\ &\leq \exp \Paren{-\frac{\ensuremath{\varepsilon}^2 \cdot \Exp[X]}{2}} = e^{-O(\alpha)} \end{align*} as $\Exp[X] =s \cdot t/n$. \end{proof} We now use the above claim to bound the deviation between $\erank{a_t}$ and $\pi(a_t)$. The following lemma, although simple, is the key part of the argument. \begin{lemma}\label{lem:cost} For any $t \leq n$, we have $\Exp \card{ \erank{a_{t}} - \pi(a_{t})} = O(\max\Paren{1, \Paren{\frac{t}{n-t}}^2})$. \end{lemma} \begin{proof} Fix any $\alpha \geq 1$. We claim that, if the learned rank of $a_{t}$ is a position which has distance at least $k_{\alpha} = 4\alpha \cdot \max\Paren{1,\Paren{\frac{t}{n-t}}^2}$ from its estimated rank, then necessarily there exists an interval $I$ of length at least $2k_{\alpha}$ which contains $\erank{a_t}$ and is popular. Let us prove the above claim then. Let $I$ be the shortest integer interval $[a:b]$ which contains $\erank{a_t}$ and moreover both positions $a$ and $b$ are not assigned to a learned rank by time $t$ (by this definition, $\pi(a_t)$ would be either $a$ or $b$). For $\card{\erank{a_t} - \pi(a_t)}$ to be at least $k_{\alpha}$, the length of interval $I$ needs to be at least $2k_{\alpha}$. But for $I$ to have length at least $2k_{\alpha}$, we should have at least $2k$ elements from $a_1,\ldots,a_{t-1}$ to have an estimated rank in $I$: this is simply because $a$ and $b$ are not yet assigned a rank by time $t$ and hence any element $a_{t'}$ which has estimated rank outside the interval $I$ is never assigned a learned rank inside $I$ (otherwise the assignment step should pick $a$ or $b$, a contradiction). We are now ready to finalize the proof. It is straightforward that in the above argument, it suffices to only consider the integer intervals $[\erank{a_t} - k_{\alpha} : \erank{a_t}+k_{\alpha}]$ parametrized by the choice of $\alpha \geq 1$. By the above argument and Claim~\ref{clm:linear-probing}, for any $\alpha \geq 1$, we have, \begin{align*} \Exp\bracket{\card{\erank{a_{t}} - \pi(a_t)}} &\leq \int_{\alpha=0}^{\infty} \Prob\Paren{\card{ \erank{a_{t}} - \pi(a_t)} > k_{\alpha}} \cdot k_{\alpha} \cdot d\alpha \\ &\leq \int_{\alpha=0}^{\infty} \Prob\Paren{\textnormal{Integer interval $[\erank{a_t} - k_{\alpha} : \erank{a_t}+k_{\alpha}]$ is popular}} \cdot k_{\alpha} \cdot d\alpha\\ &\!\!\!\!\Leq{Claim~\ref{clm:linear-probing}} O(\max\Paren{1,\Paren{\frac{t}{n-t}}^2}) \cdot \int_{\alpha=0}^{\infty} e^{-O(\alpha)} \cdot \alpha \cdot d\alpha \\ &= O(\max\Paren{1,\Paren{\frac{t}{n-t}}^2}) . \qedhere \end{align*} \end{proof} We are now ready to finalize the proof of Lemma~\ref{lem:assignment}. \begin{proof}[Proof of Lemma~\ref{lem:assignment}] We have, $\Exp\bracket{\sum_{t=1}^{n}\card{\erank{a_{t}} - \pi(a_{t})}} = \sum_{t=1}^{n} \Exp\bracket{\card{\erank{a_{t}} - \pi(a_{t})}}$ by linearity of expectation. For any $t < n/2$, the maximum term in RHS of Lemma~\ref{lem:cost} is $1$ and hence in this case, we have $\Exp\bracket{\card{\erank{a_{t}} - \pi(a_{t})}} = O(1)$. Thus, the contribution of the first $n/2-1$ terms to the above summation is only $O(n)$. Also, when $t > n-\sqrt{n}$, we can simply write $\Exp\bracket{\card{\erank{a_{t}} - \pi(a_{t})}} \leq n$ which is trivially true and hence the total contribution of these $\sqrt{n}$ summands is also $O(n\sqrt{n})$. It remains to bound the total contribution of $t \in [n/2,n-\sqrt{n}]$. By Lemma~\ref{lem:cost}, $$ \sum_{t=n/2}^{n-\sqrt{n}} \Exp\bracket{\card{\erank{a_{t}} - \pi(a_{t})}} \leq O(1) \cdot \sum_{t=n/2}^{n-\sqrt{n}} \paren{\frac{t}{n-t}}^2 = O(n\sqrt{n}), $$ where the equality is by a simple calculation (see Proposition~\ref{prop:sum} in Appendix~\ref{sec:appalg}). \end{proof} \renewcommand{\ensuremath{\mathcal{A}}}{\ensuremath{\mathcal{A}}} \newcommand{\ensuremath{\mathcal{A}}star}{\ensuremath{\mathcal{A}^*}} \newcommand{\ensuremath{\textnormal{\textsc{Ranking}}}\xspace}{\ensuremath{\textnormal{\textsc{Ranking}}}\xspace} \newcommand{\ensuremath{\textnormal{\textsc{Congested-Ranking}}}\xspace}{\ensuremath{\textnormal{\textsc{Congested-Ranking}}}\xspace} \newcommand{\cost}[1]{\ensuremath{\textnormal{\textsf{cost}}_{\ensuremath{\mathcal{A}}}(#1)}} \newcommand{\ensuremath{\mathcal{U}}}{\ensuremath{\mathcal{U}}} \newcommand{\ensuremath{\mathcal{E}}}{\ensuremath{\mathcal{E}}} \subsection{A Tight Lower Bound} \label{sec:lower-bound} We complement our algorithmic result in Theorem~\ref{thm:dense} by showing that the cost incurred by our algorithm is asymptotically optimal. \begin{theorem}\label{thm:lower-bound} Any algorithm for the secretary ranking problem incurs ${\mathcal{O}mega(n\sqrt{n})}$ cost in expectation. \end{theorem} To prove Theorem~\ref{thm:lower-bound}, we first show that no deterministic algorithm can achieve better than $O(n \sqrt{n})$ inversions and then use Yao's minimax principle to extend the lower bound to randomized algorithms (by simply fixing the randomness of the algorithm to obtain a deterministic one with the same performance over the particular distribution of the input). The main ingredient of our proof of Theorem~\ref{thm:lower-bound} is an anti-concentration bound for sampling without replacement which we cast as a balls in bins problem. We start by describing this balls in bin problem and prove the anti-concentration bound in Lemma~\ref{lem:ballsandbin}. Lemma~\ref{lem:anti-concentration} then connects the problem of online ranking to the balls in bins problem. We conclude with the proof of Theorem~\ref{thm:lower-bound}. To continue, we introduce some asymptotic notation that is helpful for readability. We write $v = \Theta_1(n)$ if variable $v$ is linear in $n$, but also smaller and bounded away from $n$, i.e., $v = cn$ for some constant $c$ such that $0 < c < 1$. \begin{lemma} \label{lem:ballsandbin} Assume there are $n$ balls in a bin, $r$ of which are red and the remaining $n - r$ are blue. Suppose $t < \min(r, n-r)$ balls are drawn from the bin uniformly at random without replacement, and let $\ensuremath{\mathcal{E}}_{k,t,r,n}$ be the event that $k$ out of those $t$ balls are red. Then, if $r = \Theta_1(n)$ and $t = \Theta_1(n)$, for every $k \in \{0, \hdots, t\}$: $\Prob\left(\ensuremath{\mathcal{E}}_{k,t,r,n}\right) = O\left(1/\sqrt{n}\right).$ \end{lemma} \noindent Our high level approach toward proving Lemma~\ref{lem:ballsandbin} is as follows: \begin{enumerate} \item We first use a counting argument to show that $\Prob\left(\ensuremath{\mathcal{E}}_{k,t,r,n}\right) = {{r}\choose{k}} {{n-r}\choose{t-k}}/{{n}\choose{t}}.$ \item We then use Stirling's approximation to show ${{r}\choose{k}} {{n-r}\choose{t-k}}/{{n}\choose{t}} =O(n^{-1/2})$ for $k = \lfloor \frac{tr}{n} \rfloor$. \item Finally, with a max. likelihood argument, we show that $\argmax_{k \in [n]} {{r}\choose{k}} {{n-r}\choose{t-k}}/{{n}\choose{t}} \approx \frac{tr}{n}$. \end{enumerate} \noindent By combining these, we have, $\Prob\left(\ensuremath{\mathcal{E}}_{k,t,r,n}\right) \leq \max_{k \in [n]} {{r}\choose{k}} {{n-r}\choose{t-k}}/{{n}\choose{t}} \leq {{r}\choose{k^*}} {{n-r}\choose{t-k^*}}/{{n}\choose{t}}$ for $k^* \approx \frac{tr}{n}$ (by the third step), which we bounded by $O(n^{-1/2})$ (in the second step). The actual proof is however rather technical and is postponed to Appendix~\ref{sec:applower-bound}. The next lemma shows that upon arrival of $a_{t}$, any position has probability at least $O\paren{1 /\sqrt{n}}$ of being the correct rank for $a_{t}$, under some mild conditions. The proof of this lemma uses the previous anti-concentration bound for sampling without replacement by considering the elements smaller than $a_{t}$ to be the red balls and the elements larger than $a_{t}$ to be the blue balls. For $a_{t}$ to have rank $r$ and be the $k$th element in the ranking so far, the first $t - 1$ elements previously observed must contain $k-1$ red balls out of the $r-1$ red balls and $t - k$ blue balls out of the $n - r $ blue balls. \begin{lemma}\label{lem:anti-concentration} Fix any permutation $\sigma$ of $[t]$ and let the event $\mathcal{O}_\sigma$ denote the event that $a_{\sigma(1)} < a_{\sigma(2)} < \hdots < a_{\sigma(t)}$. If $\sigma(k) = t$, $k = \Theta_1(t)$ and $t = \Theta_1(n)$ then for any $r$: $ \Prob\paren{\rank{a_{t}} = r \mid \mathcal{O}_{\sigma}} = {O}\paren{1/\sqrt{n}}. $\end{lemma} \begin{proof} Define $\ensuremath{\mathcal{E}}_k$ as the event that ``$a_{t}$ is the $k$-th smallest element in $a_{1},\ldots,a_{t}$''. We first have, $ \Prob\paren{\rank{a_{t}} = r \mid \mathcal{O}_{\sigma}} = \Prob\paren{\rank{a_{t}} = r \mid \ensuremath{\mathcal{E}}_k}. $ This is simply because $\rank{a_{t}}$ is only a function of the pairwise comparisons of $a_{t}$ with other elements and does not depend on the ordering of the remaining elements between themselves. Moreover, \begin{align*} \Prob\paren{\rank{a_{t}} = r \mid \ensuremath{\mathcal{E}}_k} &= \Prob\paren{\ensuremath{\mathcal{E}}_k \mid \rank{a_{t}} = r} \cdot \frac{\Prob\paren{\rank{a_{t}} = r}}{\Prob\paren{\ensuremath{\mathcal{E}}_k}} = \Prob\paren{\ensuremath{\mathcal{E}}_k \mid \rank{a_{t}} = r} \cdot \frac{t}{n} \end{align*} since $a_{t}$ is randomly partitioned across the $[n]$ elements. Notice now that conditioned on $\rank{a_{t}} = r $, the event $\ensuremath{\mathcal{E}}_k$ is exactly the event $\ensuremath{\mathcal{E}}_{k-1,t-1,r-1,n-1}$ in the sampling without replacement process defined in Lemma \ref{lem:ballsandbin}. The $n-1$ balls are all the elements but $a_t$, the $r-1$ red balls correspond to elements smaller than $a_{t}$, the $n-r$ blue balls to elements larger than $a_{t}$, and $t-1$ balls drawn are the elements arrived before $a_t$. Finally, observe that $\Prob\paren{r < k \vert \ensuremath{\mathcal{E}}_k} = 0$, so for $r < k$, the bound holds trivially. In the remaining cases, $r = \Theta_1(n)$ and we use the bound in Lemma \ref{lem:ballsandbin} with $t/n = \Theta(1)$ to get the statement. \end{proof} Using the previous lemma, we can lower bound the cost due to the $t$-th element. Fix any deterministic algorithm $\ensuremath{\mathcal{A}}$ for the online ranking problem. Recall that $\pi(a_{t})$ denotes the learned rank of the item $a_{t}$ arriving in the $t$-th time step. For any time step $t \in [n]$, we use $\cost{t}$ to denote the cost incurred by the algorithm $\ensuremath{\mathcal{A}}$ in positioning the item $a_{t}$. More formally, if $\rank{a_{t}} = i$, we have $\cost{t} := \card{i - \pi(a_{(t)})}$. The following lemma is the key part of the proof. \begin{lemma}\label{lem:lower-cost-t} Fix any deterministic algorithm $\ensuremath{\mathcal{A}}$. For any $t =\Theta_1(n)$, $\Exp\bracket{\cost{t}} = \mathcal{O}mega\paren{\sqrt{n}}$. \end{lemma} \begin{proof} Let $\sigma$ be a permutation of $[t]$ and $\mathcal{O}_\sigma$ the event that $a_{\sigma(1)} < a_{\sigma(2)} < \hdots < a_{\sigma(t)}$. For any deterministic algorithm $\mathcal{A}$, the choice of the position $\pi(a_t)$ where to place the $t$-th element depends only on $\sigma$. Let $k = \sigma^{-1}(t)$ be the relative rank of the $t$-th element. Since the distribution of $k$ is uniform in $[t]$ (see the proof of Proposition \ref{prop:random-assignment}), then we have that: $$\Prob\left[\frac{t}{4} \leq k \leq \frac{3t}{4}\right] = \frac{1}{2}$$ Conditioned on that event $k = \Theta_1(t)$ so we are in the conditions of Lemma \ref{lem:anti-concentration}. Therefore, the probability of each rank given the observations is at most $O(1/\sqrt{n})$. Therefore, there is a constant $c$ such that: $$\Prob\left[ \card{\rank{a_{(t)}} - \pi(a_{(t)}) } < c \sqrt{n} \text{ } \middle\vert \text{ } \frac{t}{4} \leq k \leq \frac{3t}{4} \right] \leq \frac{1}{2}$$ Finally, we observe that: \begin{align*} \Exp\bracket{\cost{t}} & \geq \frac{1}{2}\cdot \Exp\left[\card{\rank{a_{(t)}} - \pi(a_{(t)})} \text{ } \middle\vert \text{ } \frac{t}{4} \leq k \leq \frac{3t}{4} \right] \\ & \geq \frac{1}{2} \cdot c \sqrt{n} \cdot \Prob\left[ \card{\rank{a_{(t)}} - \pi(a_{(t)})} \geq c \sqrt{n} \text{ } \middle\vert \text{ } \frac{t}{4} \leq k \leq \frac{3t}{4} \right] \geq \frac{c \sqrt{n}}{4}. \end{align*} \end{proof} We are now ready to prove Theorem~\ref{thm:lower-bound}. \begin{proof}[Proof of Theorem~\ref{thm:lower-bound}] For any deterministic algorithm, sum the bound in Lemma \ref{lem:lower-cost-t} for $\Theta(n)$ time steps. For randomized algorithms, the same bound extends via Yao's minimax principle. The reason is that a randomized algorithm can be seen as a distribution on deterministic algorithms parametrized by the random bits it uses. If a randomized algorithm obtains less than $O(n \sqrt{n})$ inversions in expectation, then it should be possible to fix the random bits and obtain a deterministic algorithm with the same performance. \end{proof} \section{Sparse Secretary Ranking} \label{sec:sparse} Now we consider the special case where the number of positions is very large, which we call sparse secretary ranking. In the extreme when $m \geq 2^{n+1}-1$ it is possible to assign a position to each secretary without ever incurring a mistake. To do that, build a complete binary tree of height $n$ and associate each position in $[m]$ with a node (both internal and leaf) of the binary tree such that the order of the positions corresponds to the pre-order induced by the binary tree (see figure \ref{fig:binary_tree}). Once the elements arrive in online fashion, insert them in the binary tree and allocate them in the corresponding position. \begin{figure} \caption{Illustration of the binary tree algorithm for $m = 7$ and order $a_2 < a_3 < a_1$.} \label{fig:binary_tree} \end{figure} We note that the above algorithm works for any order of arrival. If the elements arrive in random order, it is possible to obtain zero inversions with high probability for an exponentially smaller value of $m$. The idea is very similar to the one oulined above. Let $H_n$ be a random variable corresponding to the height of a binary tree built from $n$ elements in random order. Reed \cite{reed2003height} shows that $\Exp[H_n] = \alpha \ln(n)$, $\text{Var}[H_n] = O(1)$ where $\alpha$ is the solution of the equation $\alpha \ln(2e / \alpha) = 1$ which is $\alpha \approx 4.31107$. Since the arrival order of secretaries is uniformly random, the binary tree algorithm won't touch any node with height more than $\bar h = \lceil (\alpha + O(\ensuremath{\varepsilon}ilon)) \ln(n) \rceil$ with probability $1-o(1)$. This observation allows us to define an algorithm that obtains zero inversions with probability $1-o(1)$. If $m \geq 2^{\bar h + 1}-1 = \mathcal{O}mega(n^{2.998 + \ensuremath{\varepsilon}ilon})$, we can build a binary tree with height $\bar h$ and associate each node of the tree to a position. Once the elements arrive, allocate the item in the corresponding position. If an item is added to the tree with height larger than $\bar h$, start allocating the items arbitrarly. \begin{theorem}\label{thm:sparse} If $m \geq n^{2.988 + \ensuremath{\varepsilon}ilon}$ then the algorithm that allocates according to a binary tree incurs zero inversions with probability $1-o(1)$. \end{theorem} Devroye \cite{devroye1986note} bounds the tail of the distribution of $H_n$ as follows: $$\Prob[H_n \geq k \cdot \ln n] \leq \frac{1}{n} \cdot \left( \frac{2e}{k} \right)^{k\cdot \ln n}$$ for $k > 2$. In particular: $\Prob[H_n \geq 6.3619 \cdot \ln n] \leq 1/n^2$. Adapting the analysis above, we can show that for $m \geq 4.41$ (where $4.41 = 6.3619 \cdot \ln(2)$) the algorithm incurs less than one inversion in expectation. \begin{corollary} If $m \geq \mathcal{O}mega(n^{4.41})$ then the algorithm that allocates according to a binary tree incurs $O(1)$ inversion in expectation. \end{corollary} \section{General Secretary Ranking} In the general case, we combine the ideas for the sparse and dense case to obtain an algorithm interpolating both cases. As described in Algorithm~\ref{alg:main}, we construct a complete binary search tree of height $h$ and associating one position for each internal node, but for the leaves we associate a block of $w = m/2^h - 1$ positions (see Figure \ref{fig:binary_tree_2}). If we insert an element in a leaf, we allocate according to an instance of the dense ranking algorithm. By that we mean that the agorithm pretends that the elements allocated to that leaf are an isolated instance of dense ranking with $w$ elements and $w$ positions. We will set $h$ such that in expectation there only $w$ elements in each leaf with high probability. If at some point more than $w$ elements are placed in any given leaf, the algorithm starts allocating arbitrarly. \begin{figure} \caption{Illustration of the general algorithm (Algorithm~\ref{alg:main} \label{fig:binary_tree_2} \end{figure} \begin{algorithm2e}[H] \caption{General secretary ranking}\label{alg:main} { \textbf{Input:} a set of $m$ positions, at most $n$ online arrivals and a height $h$.} Construct a complete binary search tree $T$ of height $h$ and associate one position for each internal node, and a block of $w = m/2^h - 1$ positions for each leaf such that the order of the positions corresponds to the pre-order induced by the binary tree \For{\textnormal{any time step $t \in [n]$ and element $a_{t}$}}{ Insert $a_t$ in the tree $T$ \If{\textnormal{$a_t$ reaches an empty internal node}}{ Place $a_t$ in the position corresponding to this internal node } \Else{ Place $a_t$ according to an instance of the dense ranking algorithm (Algorithm~\ref{alg:dense}) over the block of positions corresponding to the leaf reached by $a_t$. If there are no position available in that block, place $a_t$ arbitrarily } } \end{algorithm2e} For stating our main theorem and its proof, it is convenient to define the functions: $$f(\alpha) = \frac{\alpha \ln(2) - 1}{1 - 2 \alpha \ln(2e / \alpha)} \qquad g(\alpha) = \frac{1}{1 - 2 \alpha \ln(2e / \alpha)}$$ defined in the interval $(\alpha_0, \infty)$ where $\alpha_0 \approx 4.910$ is the solution to the equation $1- 2 \alpha_0 \ln(2e / \alpha_0) = 0$. Both functions are monotone decreasing from $+\infty$ (when $\alpha = \alpha_0)$ to zero (when $\alpha \rightarrow \infty$). We are now ready to state our main theorem: \begin{theorem} Assume $m \geq 10 n \log n$ and let $\alpha \in (\alpha_0, \infty)$ be the solution to $\frac{m}{9 n \log n} = n^{f(\alpha)}$, then the expected number of inversions of the general secretary ranking algorithm with $h = \alpha \ln(n^{g(\alpha)})$ is $\tilde{O}(n^{1.5 - 0.5 g(\alpha)})$. \end{theorem} We note that the algorithm smoothly interpolated between the two cases previously analyzed. When $m = n \log(n)$ then $\alpha \rightarrow \infty$, so $g(\alpha) \rightarrow 0$ and the bound on the theorem becomes $\tilde{O}(n^{1.5})$. In the other extreme, when $m \rightarrow \infty$, then $\alpha \rightarrow \alpha_0$ and therefore $g(\alpha) \rightarrow \infty$, so the bound on the number of inversions becomes $O(1)$. \begin{proof} Let $H_t$ be the height of the binary tree formed by the first $t$ elements. By Devroye's bound \cite{devroye1986note}, the probability that a random binary tree formed by the first $t := n^{g(\alpha)}$ elements has height more than $h = \alpha \ln(t)$ is $$\Prob[H_t \geq h] \leq \frac{1}{t} (2e/\alpha)^{\alpha \ln t} = t^{\alpha \ln(2e/\alpha)-1}.$$ In case this event happens, we will use the trivial bound of $O(n^2)$ on the number of inversions, which will contribute $$n^2 t^{\alpha \ln(2e/\alpha)-1} = n^{1.5 - 0.5/(1 - 2 \alpha \ln(2e / \alpha))}= n^{1.5-0.5 g(\alpha)}$$ to the expectation. From this point on, we consider the remaining event that $H_t < h$. Next, we condition on the first $t$ elements that we denote $b_1, \ldots, b_t$ such that $b_1 < \cdots < b_t$. We note that for each remaining element $a_i$, $i > t$, we have $b_j < a_i < b_{j+1}$ with probability $1/(t+1)$ for all $j \in [t]$. Since $b_1, \ldots, b_t$ are all placed in positions corresponding to internal nodes, each element has at most probability $1/t$ of hitting any of the dense-ranking instances. Thus, each dense ranking instance receives at most $n/t$ elements in expectation, and by a standard application of the Chernoff bound, the probability that a dense ranking instance sees more than $ 9 (n/t) \log n$ elements is $ n^{-3}$. If this is the case for some dense ranking instance, we again use the $n^2$ trivial bound, which contributes at most $1$ to the expected number of inversions. For the remainder of the proof, we assume that each dense ranking instance gets at most $ 9(n/t)\log n$ elements. Next, note that the size of each block is $$w = \frac{m}{2^{h}} -1 = \frac{m}{t^{\alpha \ln(2)}} -1 \geq 9 (n/t) \log n$$ where the last equality is by definition of $t$. Thus, no more than $w$ elements are inserted in any leaf. Let $v_i$ is the number of elements in each of the dense rank instances. We note that within the elements in each dense ranking block the arrival order is random, so we can apply the bound from Section~\ref{sec:alg} and obtain by Theorem~\ref{thm:dense} that the total expected cost from the inversions caused by dense rank is at most $$\sum_i O\left(v_i^{1.5}\right) \leq \tilde{O}(t \cdot (n/t)^{1.5}) = \tilde{O}(n^2 t^{\alpha \ln(2e/\alpha)-1}) = \tilde{O}(n^{1.5-0.5 g(\alpha)}) $$ since $\sum_i v_i = n$ and $v_i \leq (n/t) \log(n)$. By the construction there are no inversions between elements inserted in different leaves and between an element inserted in an internal node and any other element. Summing the expected number of mistakes from the events $H_t \geq h$ and $H_t < h$, we get the bound in the statement of the theorem. \end{proof} \appendix \section{Useful Concentration of Measure Inequalities} \label{sec:appconcentration} We use the following two standard versions of Chernoff bound (see, e.g.,~\cite{ConcentrationBook}) throughout. \begin{proposition}[Multiplicative Chernoff bound] \label{prop:chernoff-multi} Let $X_1,\ldots,X_n$ be $n$ independent random variables taking values in $[0,1]$ and let $X:= \sum_{i=1}^{n} X_i$. Then, for any $\ensuremath{\varepsilon} \in (0,1]$, \begin{align*} \Prob\paren{X \geq (1+\ensuremath{\varepsilon}) \cdot \Exp\bracket{X}} \leq \exp\paren{-{2\ensuremath{\varepsilon}^2 \cdot \Exp\bracket{X}}}. \end{align*} \end{proposition} \begin{proposition}[Additive Chernoff bound] \label{prop:chernoff} Let $X_1,\ldots,X_n$ be $n$ independent random variables taking values in $[0,1]$ and let $X:= \sum_{i=1}^{n} X_i$. Then, \begin{align*} \Prob\paren{\card{X - \Exp\bracket{X}} > t} \leq 2 \cdot \exp\paren{-\frac{2t^2}{n}}. \end{align*} Moreover, if $X_1,\ldots,X_n$ are \emph{negatively correlated} (i.e. $\Prob[X_i = 1, \forall i \in S] \leq \prod_{i \in S} \Prob[X_i = 1]$ for all $S \subseteq [n]$), then the upper tail holds: $\Prob\paren{X - \Exp\bracket{X} > t} \leq \exp\paren{-\frac{2t^2}{n}}$. \end{proposition} Moreover, in the above setting, if $X$ comes from a sampling \emph{with} replacement process, then the inequality holds for both upper and lower tails. For sampling without replacement, we refer to Serfling \cite{serfling1974probability} for a complete discussion and for Chernoff bounds for negatively correlated random variables see \cite{panconesi1997randomized}. \begin{proposition}[Chernoff bound for sampling without replacement]\label{prop:chernoff_negative} Consider an urn with $a \geq b$ red and blue balls. Draw $b$ balls uniformly from the urn without replacement and let $X$ be the number of red balls drawn, then the two sided bound holds: $\Prob\paren{\card{X - \Exp\bracket{X}} > t} \leq 2 \cdot \exp\paren{-\frac{2t^2}{b}}$. \end{proposition} \begin{proof} If $X_i$ is the event that the $i$-th ball is red, then since $X_i$ are negatively correlated, the upper tail Chernoff bound of $X = \sum_i X_i$ holds. Now, let $Y_i = 1 - X_i$ be the probability that the the $i$-th ball is blue and $Y = \sum_i Y_i$. The upper tail for $Y$ correspond to the lower tail for $X$, i.e.: $\Prob\paren{X - \Exp\bracket{X} < t} = \Prob\paren{Y - \Exp\bracket{Y} > t} \leq \exp\paren{-\frac{2t^2}{b}}$. \end{proof} \section{Missing Details from Section~\ref{sec:alg}} \label{sec:appalg} The following two straightforward equations are used in the proof of Proposition~\ref{lem:quantile-estimation} and Lemma~\ref{lem:assignment}, respectively. For completeness, we provide their proofs here. \begin{restatable}{rPro}{propratios} \label{prop:ratios} If $1 < t \leq n$ and $0 \leq r \leq t-1$, then $r \cdot \paren{\frac{n}{t}} \leq r \cdot \paren{\frac{n-1}{t-1}} \leq (r+1) \cdot \paren{\frac{n}{t}}$. \end{restatable} \begin{proof} $0 \leq r \left( \frac{n-1}{t-1} - \frac{n}{t} \right) = r \frac{n-t}{t(t-1)} \leq (t-1) \frac{n-t}{t(t-1)} \leq \frac{n}{t}. $ \end{proof} \begin{restatable}{rPro}{propsum} For any integer $n > 0$, \label{prop:sum} $\sum_{t=1}^{n-\sqrt{n}} \paren{\frac{t}{n-t}}^2 = O(n\sqrt{n})$. \end{restatable} \begin{proof} By defining $k = n-t$, we have, \begin{align*} \sum_{t=1}^{n-\sqrt{n}} \paren{\frac{t}{n-t}}^2 = \sum_{k=\sqrt{n}}^{n-1} \paren{\frac{n-k}{k}}^2 \leq \sum_{k=\sqrt{n}}^{n-1} \paren{\frac{n}{k}}^2 \end{align*} For $i \in [\sqrt{n}]$, define $K_i := \set{k \mid i \cdot \sqrt{n} \leq k < (i+1) \cdot \sqrt{n}}$. For any $k \in K_i$, we have, $\frac{n}{k} \leq \frac{\sqrt{n}}{i}$. As such, we can write, \begin{align*} \sum_{k=\sqrt{n}}^{n-1} \paren{\frac{n}{k}}^2 &= \sum_{i=1}^{\sqrt{n}} \sum_{k \in K_i} \paren{\frac{n}{k}}^2 \leq \sum_{i=1}^{\sqrt{n}} \sum_{k \in K_i} \paren{\frac{\sqrt{n}}{i}}^2 \\ &\leq \sum_{i=1}^{\sqrt{n}} n \cdot \card{K_i} \cdot \frac{1}{i^2} \leq n\sqrt{n} \cdot \sum_{i=1}^{\sqrt{n}} \frac{1}{i^2} = O(n\sqrt{n}) \end{align*} as the series $\sum_{i} \frac{1}{i^2}$ is a converging series. \end{proof} \section{Anti-Concentration for Sampling Without Replacement} \label{sec:applower-bound} We prove Lemma~\ref{lem:ballsandbin} restated here for convenience. \begin{lemma*} [Restatement of Lemma~\ref{lem:ballsandbin}] Assume there are $n$ balls in a bin, $r$ of which are red and the remaining $n - r$ are blue. Suppose $t < \min(r, n-r)$ balls are drawn from the bin uniformly at random without replacement, and let $\ensuremath{\mathcal{E}}_{k,t,r,n}$ be the event that $k$ out of those $t$ balls are red. Then, if $r = \Theta_1(n)$ and $t = \Theta_1(n)$, for every $k \in \{0, \hdots, t\}$: $\Prob\left(\ensuremath{\mathcal{E}}_{k,t,r,n}\right) = O\left(1/\sqrt{n}\right).$ \end{lemma*} To prove Lemma~\ref{lem:ballsandbin}, we will describe the sampling without replacement process explicitly and bound the relevant probabilities. \begin{proposition} \label{prop:stirling} Let $0 < c < 1$ be a constant. Then: $$\binom{n}{cn} = \Theta( n^{-1/2} c^{- (cn + 1/2)} (1-c)^{- ((1-c)n +1/2)})$$ \end{proposition} The notation $y = \Theta(x)$ in the lemma statement means that there are universal constants $0 < \underline \alpha < \overline \alpha$ independent of $c$ and $n$ such that $\underline \alpha \cdot x \leq y \leq \overline \alpha \cdot x$. The proof is based on the following version of Stirling's approximation: $ {\sqrt {2\pi }}\ n^{n+{\frac {1}{2}}}e^{-n}\leq n!\leq e\ n^{n+{\frac {1}{2}}}e^{-n}.$ which can be written in our notation as: $n! = \Theta( n^{n+{\frac {1}{2}}}e^{-n})$. The proof of the previous lemmas follows from just expanding the factorials in the definition of the binomial: \begin{proof} Observe that \begin{align*} \binom{n}{cn} & = \frac{n!}{(cn)! ((1-c)n)!} = \Theta \left( \frac{n^{n+{\frac {1}{2}}}e^{-n}}{(cn)^{cn+{\frac {1}{2}}}e^{-cn}((1-c)n)^{(1-c)n+{\frac {1}{2}}}e^{-((1-c)n)}} \right) \end{align*} The statement follows from simplifying the right hand side. \end{proof} \begin{lemma} \label{lem:ktrn} Assume that $r = \Theta_1(n)$ and $t = \Theta_1(n)$ and $t \leq \min(r, n-r)$, then for $k = \lfloor rt / n \rfloor$, we have $$\frac{{{r}\choose{k}} \cdot {{n-r}\choose{t-k}}}{{{n}\choose{t}}} = \mathcal{O}\left(1/ \sqrt{n}\right).$$ \end{lemma} \begin{proof} Start by writing $r = c_r \cdot n$ and $t = c_t \cdot n$ for $0 < c_r, c_t < 1$. It will be convenient to assume that $k = rt / n$ is an integer (if not and we need to apply floors, the exact same proof work by keeping track of the errors introduced by floor). Then we can write: First, note that $$ \frac{{{r}\choose{k}} \cdot {{n-r}\choose{t-k}}}{{{n}\choose{t}}} = \frac{{{c_r n}\choose{c_t c_r n}} \cdot {{(1-c_r)n}\choose{(1-c_r)c_t n}}}{{{n}\choose{c_t n}}}$$ We can now apply the approximation in Proposition \ref{prop:stirling} obtaining: $$\Theta \left( \frac{n^{1/2}c_t^{c_t n+{\frac {1}{2}}}(1-c_t)^{(1-c_t)n+{\frac {1}{2}}}}{(c_r n)^{1/2}c_t^{c_t(c_r n)+{\frac {1}{2}}}(1-c_t)^{(1-c_t)(c_r n)+{\frac {1}{2}}}((1-c_r)n)^{1/2}c_t^{c_t ((1-c_r)n)+{\frac {1}{2}}}(1-c_t)^{(1-c_t)((1-c_r)n)+{\frac {1}{2}}}} \right)$$ Simplifying this expressoin, we get: $\Theta\left( \left(n c_t (1-c_t) c_r (1-c_r) \right)^{ -1/2} \right) = \Theta_1\left(1 / \sqrt{n} \right)$. \end{proof} \begin{lemma} \label{lem:max} Fix any $r,t,n$ such that $r, t \leq n$. Then, $$\argmax_{k \in [n]} \frac{{{r}\choose{k}} \cdot {{n-r}\choose{t-k}}}{{{n}\choose{t}}} = \left\lfloor t \cdot \frac{r}{n} \right\rfloor \text{~or~} \left\lceil t \cdot \frac{r}{n} \right\rceil.$$ \end{lemma} \begin{proof} The proof is again simpler if we assume $k = tr/n$ is an integer. If not, the same argument works controlling the errors. In that case, let $k_1 = t r /n + i$ and $k_2 = t r /n + i + 1$ and as before, let $r = c_r n$ and $t =c_t n$. Note that \begin{align*} \frac{\frac{{{r}\choose{k_1}} \cdot {{n-r}\choose{t-k_1}}}{{{n}\choose{t}}} }{\frac{{{r}\choose{k_2}} \cdot {{n-r}\choose{t-k_2}}}{{{n}\choose{t}}} } & = \frac{{{r}\choose{k_1}} \cdot {{n-r}\choose{t-k_1}}}{{{r}\choose{k_2}} \cdot {{n-r}\choose{t-k_2}}} = \frac{{{c_r n}\choose{c_tc_rn+i}} \cdot {{(1-c_r)n}\choose{(1-c_r)c_tn-i}}}{{{c_r n}\choose{c_tc_rn+i+1}} \cdot {{(1-c_r)n}\choose{(1-c_r)c_tn-i - 1}}} = \frac{(c_tc_rn + i + 1)\cdot((1-c_t)(1-c_r)n +i + 1)}{((1-c_t)c_rn - i)\cdot ((1-c_r)c_tn-i)} \end{align*} If $i \geq 0$, then the last term is at least $ \frac{(c_tc_rn)\cdot((1-c_t)(1-c_r)n)}{((1-c_t)c_rn)\cdot ((1-c_r)c_tn)}$ which is greater than one. If $ i \leq - 1$, then the last term is $\frac{(c_tc_rn)\cdot((1-c_t)(1-c_r)n)}{((1-c_t)c_rn)\cdot ((1-c_r)c_tn)} $ which is smaller than one. Thus, $\frac{{{r}\choose{k}} \cdot {{n-r}\choose{t-k}}}{{{n}\choose{t}}}$ is increasing as $k$ increases up to $tr / n$ and then decreases. Thus, the maximum is reached at $tr / n$. \end{proof} \begin{proof}[Proof of Lemma \ref{lem:ballsandbin}] We first use a simple counting argument to obtain an expression for $\Prob\left(\ensuremath{\mathcal{E}}_{k,t,r,n}\right) $ as a ratio of binomial coefficients. We note that there are ${{r}\choose{k}}$ collections of $k$ red balls, ${{n-r}\choose{t-k}}$ collections of $t - k$ blue balls, and that the total number of collections of $t$ balls is ${{n}\choose{t}}$. Since the $t$ balls are drawn uniformly at random without replacement, we get $$ \Prob\paren{\ensuremath{\mathcal{E}}_{k,t,r,n} } = \frac{{{r}\choose{k}} \cdot {{n-r}\choose{t-k}}}{{{n}\choose{t}}}.$$ The $O(1 / \sqrt{n})$ bound now follows directly from Lemma \ref{lem:ktrn} and Lemma \ref{lem:max}. \end{proof} \section{Hardness of Online Ranking with Adversarial Ordering} \label{sec:appadversarial} \begin{proposition} \label{prop:adversarial} If the ordering $\sigma$ of the arrival of elements is adversarial, then any algorithm has cost $\mathcal{O}mega(n^2)$ in expectation. \end{proposition} \begin{proof} At a high level, we construct an ordering such that at each iteration, the arrived element is either the largest or smallest element not yet observed with probability $1/2$ each. Since the algorithm cannot distinguish between the two cases, it suffers a linear cost in expectation at each arrival. Formally, we define $\sigma$ inductively. At round $t$, let $i_{t,-}$ and $i_{t,+}$ be the minimum and maximum indices of the elements arrived previously. We define $\sigma(t)$ such that $\sigma(t) = a_{i_{t,-} + 1}$ with probability $1/2$ and $\sigma(t) = a_{i_{t,+} - 1}$ with probability $1/2$. Thus, the $t$th element arrived is either the smallest or largest element not yet arrived. The main observation is that the pairwise comparisons at time $t$ are identical whether $a_{(t)} = a_{i_{t,-} + 1}$ or $a_{(t)} = a_{i_{t,+} - 1}$. This is since all the elements previously arrived are either maximal or minimal and there is no elements that are between $a_{i_{t,-} + 1}$ and $a_{i_{t,+} - 1}$ that have previously arrived. Thus the decision of the algorithm is \emph{independent} of the randomization of the adversary for the $t$th element. Thus for any learned rank at time $t$, in expectation over the randomization of the adversary for the element arrived at time $t$, the learned rank is at expected distance of the true rank at least $n/4$ for $t \leq n/2$. Thus the total cost is $\mathcal{O}mega(n^2)$ in expectation. \end{proof} \end{document}
\betaetagin{document} \title[Strichartz estimates on irrational tori] {Strichartz estimates for Schr\"odinger equations on irrational tori} \author[Z.~Guo, T.~Oh, and Y.~Wang]{Zihua Guo, Tadahiro Oh, and Yuzhao Wang} \address{ Zihua Guo\\ School of Mathematical Sciences\\ Peking University\\ Beijing 100871, China} \email{[email protected]} \address{ Tadahiro Oh\\ School of Mathematics\\ The University of Edinburgh\\ and The Maxwell Institute for the Mathematical Sciences\\ James Clerk Maxwell Building\\ The King's Buildings\\ Mayfield Road\\ Edinburgh\\ EH9 3JZ, Scotland\\ and Department of Mathematics\\ Princeton University\\ Fine Hall\\ Washington Rd.\\ Princeton, NJ 08544-1000, USA} \email{[email protected]} \address{ Yuzhao Wang\\ Department of Mathematics and Physics\\ North China Electric Power University\\ Beijing 102206, China} \curraddr{Department of Mathematics and Statistics\\ Memorial University\\ St. John's, NL A1C 5S7, Canada} \email{[email protected]} \sigmaubjclass[2010]{35Q55, 42B37} \keywords{nonlinear Schr\"odinger equation; irrational torus; Strichartz estimate; well-posedness} \dedicatory{Dedicated to the memory of Professor Harold N. Shapiro (1922--2013)} \betaetagin{abstract} In this paper, we prove new Strichartz estimates for linear Schr\"odinger equations posed on $d$-dimensional irrational tori. Then, we use these estimates to prove subcritical and critical local well-posedness results for nonlinear Schr\"odinger equations (NLS) on irrational tori. \end{abstract} \maketitle {\betaf a}selineskip = 15pt \sigmaection{Introduction}\ellabel{SEC:1} \sigmaubsection{Background} The Cauchy problem of the nonlinear Schr\"odinger equation (NLS): \betaetagin{equation} \betaetagin{cases}\ellabel{NLS0} i \partial_t u - \Delta u = \pm |u|^{p-1}u \\ u\betaig|_{t = 0} = u_0 \in H^s(M), \end{cases} \qquad ({\betaf x}, t) \in M \times \mathbb{R} \end{equation} \noindent has been studied extensively in different settings (for example, $M = \mathbb{R}^d$, $\mathbb{T}^d$, and certain classes of manifolds) over recent years \cite{GV2, Tsutsumi, Kato, CazW2, Bo2, Bo1,BGT1, BGT2, KochT, HTT11, HTT2, Herr, W}. See also the following monographs \cite{SULEM, Caz, TAO} for references therein. In the study of NLS (1.1), Strichartz estimates of the following type have played a fundamental role \betaetagin{equation}\ellabel{IntroStr} \| e^{- it\Delta} f\|_{L^q_t L^r_{\betaf x} (\mathbb{R}\times M)} \ellesssim \|f\|_{H^s_{\betaf x}(M)}, \end{equation} \noindent where $\|f\|_{L_t^qL_{\betaf x}^r}=\betaig\|\norm{f({\betaf x}, t)}_{L^r_{\betaf x}}\|_{L_t^q}$.\footnote{ We use $A\ellesssimssim B$ to denote an estimate of the form $A\elle CB$ for some $C>0$. Similarly, we use $A\sigmaim B$ to denote $A\ellesssimssim B$ and $B\ellesssimssim A$.} In particular, when $M = \mathbb{R}^d$, \eqref{IntroStr} is known to hold with $s = 0$, namely \betaetagin{equation}\ellabel{IntroStr2} \| e^{- it\Delta} f\|_{L^q_t L^r_{\betaf x} (\mathbb{R}\times \mathbb{R}^d)} \ellesssim \|f\|_{L^2_{\betaf x}(\mathbb{R}^d)}, \end{equation} \noindent if and only if $(q, r)$ satisfies $\frac{2}{q} + \frac{d}{r} = \frac{d}{2}$ with $2\elleq q, r \elleq \infty$ and $(q, r, d) \ne (2, \infty, 2)$. See \cite{Strichartz, Yajima, GV, KeelTao}. This was first obtained for the case $q=r$ by Strichartz \cite{Strichartz} via the Fourier restriction method. It was then generalized by a combination of the duality argument and the following dispersive estimate: \betaetagin{equation}\ellabel{eq:dis} \| e^{- it\Delta} f\|_{L^\infty_{\betaf x} (\mathbb{R}^d)} \ellesssimssim |t|^{-\frac d2}\|f\|_{L^1_{\betaf x}(\mathbb{R}^d)}. \end{equation} \noindent The endpoint case $(q,r)=(2,\frac{2d}{d-2})$, $d \ne 2$, was then proven in \cite{KeelTao}. Now, consider $f\in L^2(\mathbb{R}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N, N]^d$. Then, as an immediate corollary to \eqref{IntroStr2}, we have the following Strichartz estimate on $\mathbb{R}^d$: \betaetagin{align} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(\mathbb{R} \times \mathbb{R}^d)} \ellesssimssim N^{\frac{d}{2} - \frac{d+2}p}\|f\|_{L^2(\mathbb{R}^d)}, \ellabel{IntroStr3} \end{align} \noindent for $\frac{2(d+2)}{d} \elleq p \elleq \infty$ on $\mathbb{R}^d$. Indeed, on the one hand, the Strichartz estimate \eqref{IntroStr2} with $q = r = \frac{2(d+2)}{d}$ gives \betaetagin{equation}\ellabel{P3} \|e^{-it \Delta}f\|_{L^\frac{2(d+2)}{d}_{t, {\betaf x}}(\mathbb{R}\times \mathbb{R}^d)} \ellesssim \|f\|_{L^2(\mathbb{R}^d)}. \end{equation} \noindent On the other hand, by Bernstein's inequality \cite[Chapter 5]{Wo}, we have \betaetagin{equation}\ellabel{P4} \|e^{-it \Delta}f\|_{L^\infty_{t, {\betaf x}}(\mathbb{R}\times \mathbb{R}^d)} \ellesssim N^\frac{d}{2} \|f\|_{L^2(\mathbb{R}^d)}, \end{equation} \noindent for all $f\in L^2(\mathbb{R}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N, N]^d$. By interpolating \eqref{P3} and \eqref{P4}, we see that the estimate \eqref{IntroStr3} holds for $\frac{2(d+2)}{d} \elleq p \elleq \infty$ and is sharp in view of sharpness of \eqref{P3} and \eqref{P4}. Note that the estimate \eqref{IntroStr3} is {\it scaling-invariant} in the following sense. Consider the linear Schr\"odinger equation: \betaetagin{equation} \betaetagin{cases} i \partial_t u - \Delta u = 0\\ u\betaig|_{t = 0} = f. \end{cases} \ellabel{linSchro} \end{equation} \noindent The solution $u$ to \eqref{linSchro} is given by $u ({\betaf x}, t) := e^{-it\Delta} f({\betaf x})$. Then, the rescaled function $u^\ellambda({\betaf x}, t) : = u(\ellambda {\betaf x}, \ellambda^2 t)$, $\ellambda > 0$, is also a solution to \eqref{linSchro} but with the rescaled initial condition $f^\ellambda({\betaf x}): = f(\ellambda {\betaf x})$. Noting that $\sigmaupp \widehat{f^\ellambda} = \ellambda \cdot \sigmaupp \widehat f$, it is easy to see that the power of $N$ in \eqref{IntroStr3} is the only power that is consistent with this scaling. We point out that the inequalities \eqref{IntroStr2}, \eqref{P3}, and \eqref{P4} are also scaling-invariant with respect to this scaling associated to the linear Schr\"odinger equation and that the scaling-invariance shows sharpness of these estimates. When $M$ is a compact manifold, the Strichartz estimate \eqref{IntroStr} becomes much more difficult and much less is known. This is partially due to the fact that we do not have the dispersive estimate \eqref{eq:dis} on a compact manifold. Moreover, \eqref{IntroStr} requires deep understanding of the eigenvalues and the eigenfunctions of the Laplacian. In the following, we focus on the case when $M$ is a standard flat torus $\mathbb{T}^d = (\mathbb{R}/ \mathbb{Z})^d$, corresponding to the usual periodic boundary condition. Moreover, we restrict our attention to the diagonal case, i.e.~$q = r$. Then, one would like to establish the following scaling-invariant\footnote{Obviously, the scaling associated to the linear Schr\"odinger equation discussed above for $\mathbb{R}^d$ does not quite make sense on $\mathbb{T}^d$. We nonetheless call the estimate \eqref{eq:stritori} scaling-invariant.} Strichartz estimate: \betaetagin{align}\ellabel{eq:stritori} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I \times \mathbb{T}^d)} \ellesssimssim N^{\frac{d}{2} - \frac{d+2}p}\|f\|_{L^2(\mathbb{T}^d)}, \end{align} for all $f \in L^2(\mathbb{T}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N, N]^d$, where $I$ is a compact interval. Note that, in the compact setting, an estimate of the form \eqref{eq:stritori} does not hold with $I = \mathbb{R}$, unless $p = \infty$. By drawing an analogy to the Euclidean case $M = \mathbb{R}^d$, one may hope to have \eqref{eq:stritori} for $p \gammaeq \frac{2(d+2)}{d} $. By combining the tools from number theory, such as a divisor counting argument and the Hardy-Littlewood circle method, and the Tomas-Stein restriction method from harmonic analysis, Bourgain \cite{Bo2} proved \eqref{eq:stritori} for certain ranges of $p$: (i) $ p > \frac{2(d+2)}{d}$ when $ d = 1, 2$, (ii) $p > 4$ when $ d= 3$, and (iii) $p > \frac{2(d+4)}d$ for higher dimensions $d \gammaeq 4$. It is worthwhile to note that, when $d = 1, 2$, \eqref{eq:stritori} is known to fail at the endpoint $p = \frac{2(d+2)}{d}$. See \cite{Bo2, TT}. Namely, the situation on $\mathbb{T}^d$ is strictly worse than the Euclidean setting. Indeed, Bourgain \cite{Bo2,Bo4} conjectured that \betaetagin{align}\ellabel{P01} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I \times \mathbb{T}^d )} \elle K_{p, N}\|f\|_{L^2(\mathbb{T}^d)}, \end{align} \noindent for all $f \in L^2(\mathbb{T}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N, N]^d$, where $K_{p,N}$ satisfies \betaetagin{equation} \betaetagin{cases} K_{p,N} < c_p, & \text{if } p < \frac{2(d+2)}{d}, \\ K_{p,N} \elll N^\varepsilon, &\text{if } p=\frac{2(d+2)}{d}, \\ K_{p,N} < c_p N^{\frac{d}2 - \frac{d+2}p}, \quad & \text{if } p>\frac{2(d+2)}{d}, \end{cases}\ellabel{conj} \end{equation} \noindent for any small $\varepsilon > 0$. Here, $A(N) \elll B(N)$ means that $ \ellim_{N\to \infty} \frac{A(N)}{B(N)} = 0$. More recently, using multilinear restriction theory after \cite{BCT, BG}, Bourgain \cite{Bo5} improved the result (iii) for $d \gammaeq 4$ and showed that \eqref{eq:stritori} holds for $p > \frac{2(d+3)}d$. The general conjecture \eqref{conj}, however, remains open up to date. In \cite{Bo2, Bo1,HTT11, W}, these Strichartz estimates were then applied to prove well-posedness results of NLS \eqref{NLS0} on $\mathbb{T}^d$. See Subsection \ref{SUBSEC:1.3} for more on the well-posedness issue of \eqref{NLS0}. Let us conclude this subsection by stating the result by Herr \cite{Herr}. He considered the quintic NLS on a three-dimensional Zoll manifold $M$, i.e.~a compact Riemannian manifold such that all geodesics are simple and closed with a {\it common minimal period}. One simplest example is the three dimensional sphere $\mathbb{S}^3$. By establishing the Strichartz estimate \eqref{eq:stritori} on $M$ (instead of $\mathbb{T}^d$) with $p >4$, he proved local well-posedness of the quintic NLS on a three-dimensional Zoll manifold $M$ in the energy space $H^1(M)$. As mentioned above, all geodesics on a Zoll manifold have a common minimal period. Hence, it is natural to ask if a Strichartz estimate of the form \eqref{eq:stritori} holds on a manifold, where there is no common minimal period for geodesics. This leads us to the study of Strichartz estimates on an {\it irrational torus} $\mathbb{T}^d_{\pmb{\alpha}}$, since it is one of the simplest examples of manifolds with no common minimal period for geodesics. \sigmaubsection{Strichartz estimate on irrational tori} In the remaining part of this paper, we focus on the case when $M$ is an irrational torus $\mathbb{T}^d_{\pmb{\alpha}}$: \betaetagin{equation} M = \mathbb{T}^d_{\pmb{\alpha}}:= \prod_{j = 1}^d \mathbb{R}/ ( \alpha_j \mathbb{Z}), \quad \alpha_j > 0,\ j = 1, \dots, d. \ellabel{Torus} \end{equation} \noindent As the name suggests, we are mainly interested in the case when at least one $\alpha_j$ is irrational. More generally, we are interested in the case when at least one $\alpha_{j}$ is ``rationally independent'' of the remaining ones, i.e.~there exists $\alpha_{j}$ that can not be written as a linear combination of the other $\alpha_k$'s with rational coefficients. First consider the case when all $\alpha_j$'s are rational. Namely, $M = \mathbb{T}^d_{\pmb{\alpha}}$ is a ``rational'' torus. In this case, the problem can be reduced to that on the standard torus $\mathbb{T}^d$ by a simple geometric consideration. By writing $\alpha_j = \frac{k_j}{m_j}$ for some $k_j, m_j \in \mathbb{N}$, let $k$ be the least common multiple of $k_j$'s. The basic idea is to view the scaled standard torus $\widetilde M: = k \mathbb{T}^d = \betaig( \mathbb{R}/(k \mathbb{Z}) \betaig)^d$ as a disjoint union of parallel translates of the original rational torus $M = \mathbb{T}^d_{\pmb{\alpha}}$ with $\alpha_j^{-1} k$ copies in the $x_j$-direction. Now, consider the linear Schr\"odinger equation \eqref{linSchro} on $M = \mathbb{T}^d_{\pmb{\alpha}}$. By periodic extension, we can view this problem on the scaled standard torus $\widetilde M = k \mathbb{T}^d $. Given an initial condition $f$ and the solution $u(t) = e^{-it \Delta} f$ on $M$, let $\widetilde f$ and $\widetilde u$ denote their periodic extensions on $\widetilde M$, respectively. By uniqueness of solutions to the linear Schr\"odinger equation, we see that $\widetilde u(t) = e^{-it \Delta} \widetilde f$ on $\widetilde M$. Clearly, the Strichartz estimates on the standard torus $\mathbb{T}^d$ also hold on the scaled standard torus $\widetilde M = k \mathbb{T}^d $, where the implicit constants further depend on $k$. With $\pmb{\alpha} = (\alpha_1, \dots, \alpha_d)$, we have \[\|\widetilde f \|_{L^2_{\betaf x}(\widetilde M)} = C(\pmb{\alpha}) \|f \|_{L^2_{\betaf x}(M)} \quad \text{and}\quad \|\widetilde u(t) \|_{L^p_{\betaf x}(\widetilde M)} = C(\pmb{\alpha}, p) \|u(t) \|_{L^p_{\betaf x}(M)}. \] \noindent Moreover, letting $\widehat f$ and $\widetilde{\mathcal{F}} [\widetilde f]$ denote the Fourier coefficients of $f$ on $\mathbb{T}^d_{\pmb{\alpha}}$ and $\widetilde f$ on $k \mathbb{T}^d$, respectively, we have \betaetagin{align*} \widehat f ({\betaf n} ) & = \frac{1}{|\mathbb{T}^d_{\pmb{\alpha}}|} \int_{\mathbb{T}^d_{\pmb{\alpha}}} f({\betaf x}) e^{-2\pi i \sigmaum_{j = 1}^d n_j \frac{ x_j}{\alpha_j}} d{\betaf x}\\ & = \frac{1}{|k \mathbb{T}^d|} \int_{k \mathbb{T}^d} \widetilde f({\betaf x}) e^{-2\pi i \sigmaum_{j = 1}^d \frac{k n_j}{\alpha_j} \frac{x_j}{k}} d{\betaf x} = \widetilde{\mathcal{F}}[\widetilde f] \betaig(\tfrac{k}{\alpha_1}n_1, \dots, \tfrac{k}{\alpha_d}n_d\betaig), \end{align*} \noindent where ${\betaf n} = (n_1, \dots, n_d) \in \mathbb{Z}^d$ and $|\,\cdot\,|$ denotes the Lebesgue measure of a set. Namely, we have \[ \sigmaupp \widetilde{\mathcal{F}}[\widetilde f] = \frac{k}{\pmb{\alpha}}\cdot \sigmaupp \widehat f := \Big\{\betaig(\tfrac{k}{\alpha_1}n_1, \dots, \tfrac{k}{\alpha_d}n_d\betaig) \in \mathbb{Z}^d:\, {\betaf n} \in \sigmaupp \widehat f \, \Big\}. \] \noindent Therefore, we see that the Strichartz estimates of the form \eqref{P01} on the standard torus $\mathbb{T}^d$ also hold on our rational torus $M = \mathbb{T}^d_{\pmb{\alpha}}$, where the implicit constants further depends on $\pmb{\alpha} = (\alpha_1, \dots, \alpha_d)$. When there is no $\alpha_{j}$ that is rationally independent of the remaining ones, we can use spatial and temporal dilations to reduce the situation to the case of a rational torus above. Therefore, in the following, we assume that at least one $\alpha_{j}$ is rationally independent of the remaining $\alpha_k$'s. Before proceeding further, let us change the spatial domain $M = \mathbb{T}^d_{\pmb{\alpha}}$ to the standard torus $\mathbb{T}^d$ at the expense of modifying the Laplacian. By a change of spatial variables ($x_j \mapsto \alpha_j x_j$), we see that \eqref{NLS0} is equivalent to the following NLS on the usual torus $\mathbb{T}^d = (\mathbb{R} / \mathbb{Z})^d$: \betaetagin{equation} \betaetagin{cases}\ellabel{eq:nls} i \partial_t u - \Delta u = \pm |u|^{p-1}u\\ u\betaig|_{t = 0} = u_0\in H^s(\mathbb{T}^d), \end{cases} \quad ({\betaf x}, t) \in \mathbb{T}^d\times \mathbb{R}, \end{equation} \noindent where the Laplace operator $\Delta$ is now defined by \betaetagin{equation}\ellabel{eq:Q0} \widehat{\Delta f}({\betaf n}) = - 4 \pi^2 Q({\betaf n})\widehat f({\betaf n}), \end{equation} \noindent with ${\betaf n} = (n_1,\dots, n_d) \in \mathbb{Z}^d$ and \betaetagin{align}\ellabel{eq:Q} Q({\betaf n}) = \theta_1 n_1^2 + \cdots + \theta_d n_d^2, \quad \tfrac{1}{C}\elleq\theta_j: = \tfrac{1}{\alpha_j^2}\elleq C, \ j=1,\cdots,d. \end{align} \noindent We point out that some estimates in the following depend on $C$ in \eqref{eq:Q} but not on the specific arithmetic nature of $\theta_j$'s. Our main interest is to discuss well-posedness of the Cauchy problem \eqref{eq:nls} by first studying relevant Strichartz estimates in this setting. As compared to the problem on the standard torus $\mathbb{T}^d$, i.e.~with $Q({\betaf n}) = |{\betaf n}|^2 = \sigmaum_{j = 1}^d n_j^2$, it is a lot harder to study Strichartz estimates on irrational tori. The main reason for this difficulty is that the number theoretic tools such as a divisor counting argument and the Hardy-Littlewood circle method do not work well in this setting. Previously, Bourgain \cite{Bo4} and Catoire-Wang \cite{CW} studied the Cauchy problem \eqref{eq:nls} on irrational tori and proved some local well-posedness results in subcritical Sobolev spaces. See Theorem \ref{THM:3} below. In the following, we investigate new Strichartz estimates on irrational tori and use them to prove well-posedness results of the Cauchy problem \eqref{eq:nls} in both subcritical and critical Sobolev spaces. In the rest of the paper, we assume that the Laplacian $\Delta$ is defined by \eqref{eq:Q0}, unless stated otherwise, and define the linear Schr\"odinger evolution by\footnote{Strictly speaking, there is an extra factor of $2\pi$ in front of $Q({\betaf n})$ in \eqref{eq:Q}. However, such a factor can be eliminated by time dilation and thus, for simplicity of notations, we drop it in the following.}\betaetagin{align}\ellabel{lin-evo} e^{-it\Delta} f ({\betaf x}) = \sigmaum_{{\betaf n}\in \mathbb{Z}^d} \widehat f({\betaf n}) e^{2\pi i ({\betaf n} \cdot {\betaf x} + Q({\betaf n}) t)}, \end{align} \noindent where $Q({\betaf n})$ is as in \eqref{eq:Q}. We first summarize the known Strichartz estimates. In the following, $I$ denotes a compact interval in $\mathbb{R}$. \betaetagin{theorem}\ellabel{THM:1} The Strichartz estimate on a irrational torus is known to hold \betaetagin{align} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I \times \mathbb{T}^d )} \ellesssimssim K_{p, N}\|f\|_{L^2(\mathbb{T}^d)}, \ellabel{P0} \end{align} \noindent for all $f \in L^2(\mathbb{T}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N, N]^d$ in the following cases: \betaetagin{itemize} \item[(i)] $d= 2$ \cite{CW}\footnote{ After the completion of this manuscript, we learned that this result in \cite{CW} was recently improved to $K_{4, N} = N^{\frac{131}{832}+}$ by Demirbas \cite{Demirbas}. While the proof in \cite{CW} is based on Jarn\'ik's argument \cite{Jarnik}, the proof in \cite{Demirbas} is based on Huxley's counting estimate \cite{Huxley}. More recently, this result in \cite{Demirbas} was improved to $K_{4, N} = N^{\frac{1}{8}+}$ by Demeter \cite{Demeter}. See the footnote in Theorem \ref{THM:2} (ii).} \textup{:} $K_{4, N} = N^\frac{1}{6}$, \item[(ii)] $d=3$ \cite{Bo4}\textup{:} $K_{4, N} = N^{\frac{1}{3}+\varepsilon}$, \item[(iii)] $d\gammae3$ \cite{CW}\textup{:} $K_{4, N} = N^{\frac{d}{4} - \frac d{2(d+1)} +\varepsilon}$ when $d$ is odd, and $K_{4, N} = N^{\frac{d}{4} - \frac 1{2} +\varepsilon}$ when $d$ is even, \item[(iv)] $d \gammaeq 2$ \cite{Bo5}\textup{:} $K_{p, N} = N^\varepsilon$ for $p = \frac{2(d+1)}{d}$, \end{itemize} \noindent for any small $\varepsilon >0$. \end{theorem} \noindent Note that the implicit constants in \eqref{P0} depend on $C$ in \eqref{eq:Q} and the length of the local-in-time interval $I$. The same comment applies to all the estimates in the remaining of the paper and we do not mention this dependence explicitly in the following. In \cite{Bo4}, Bourgain also proved \betaetagin{equation} \| e^{-it \Delta} f\|_{L^p_{t}L^4_{\betaf x}(I\times \mathbb{T}^3)} \ellesssimssim N^{\frac{3}{4} - \frac{2}p}\|f\|_{L^2(\mathbb{T}^3)} \ellabel{P0a} \end{equation} for $p>\frac {16}3$. In this paper, we partially improve the known results in Theorem \ref{THM:1}, and obtain some critical Strichartz estimates when $p$ is large. We state our main result on the Strichartz estimates on irrational tori. \betaetagin{theorem}\ellabel{THM:2} \textup{(i)} The following scaling-invariant Strichartz estimate holds on an irrational torus: \betaetagin{align} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I \times \mathbb{T}^d)} \ellesssimssim N^{\frac{d}{2} - \frac{d+2}p}\|f\|_{L^2(\mathbb{T}^d)}, \ellabel{P1} \end{align} \noindent for all $f \in L^2(\mathbb{T}^d)$ with $ \sigmaupp \widehat f \sigmaubset [-N,N]^d$, provided that $d$ and $p$ are in the following ranges: \betaetagin{itemize} \item[(i.a)] $d=2$\textup{:} $p>\frac{20}{3}$, \item[(i.b)]$d=3$ \textup{:} $p>\frac {16}3$, \item[(i.c)] $d=4$\textup{:} $p>4$, \item[(i.d)] $d\gammae 5$\textup{:} $p\gammae 4$. \end{itemize} \noindent \textup{(ii)} Let $\varepsilon > 0$. Then, the Strichartz estimate with an $\varepsilon$-loss of regularity holds on an irrational torus:\footnote{In a very recent preprint, Demeter \cite{Demeter} proved the Strichartz estimates \eqref{P2} with an $\varepsilon$-loss on the standard torus $\mathbb{T}^d$ for $p \gammaeq \frac{2(d+3)}{d}$. His argument is based on incidence geometry, without any number theory. As a result, the same result holds for irrational tori and hence improves our result in Theorem \ref{THM:2} (ii) in a significant manner. This also improves the values of $s_0$ in some {\it subcritical} local well-posedness results below (Theorem \ref{THM:3} (i.a), (i.b), (ii.a), and (ii.b)). Note that the result in \cite{Demeter} comes with an $\varepsilon$-loss and thus it does not improve the scaling-invariant Strichartz estimate \eqref{P1} in Theorem \ref{THM:2} (i) and {\it critical} local well-posedness results in Theorems \ref{THM:4} and \ref{THM:5}.} \betaetagin{align} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I \times \mathbb{T}^d )} \ellesssimssim N^{\frac{d}{2} - \frac{d+2}p+\varepsilon}\|f\|_{L^2(\mathbb{T}^d)}, \ellabel{P2} \end{align} \noindent for all $f \in L^2(\mathbb{T}^d)$ with $ \sigmaupp \widehat f \sigmaubset [-N,N]^d$, provided that $d$ and $p$ are in the following ranges: \betaetagin{itemize} \item[(ii.a)] $d=2$\textup{:} $p\gammaeq \frac{20}{3}$, \item[(ii.b)]$d=3$\textup{:} $p= \frac {16}3$, \item[(ii.c)] $d=4$\textup{:} $p= 4$. \end{itemize} \end{theorem} \noindent When $d\gammaeq 3$, we follow a relatively simple argument after Bourgain \cite{Bo4} and prove Theorem \ref{THM:2} in Subsection \ref{SUBSEC:d3}. When $d = 2$, this argument proves \eqref{P2} only for $p \gammaeq 8$. In Subsection \ref{SUBSEC:d2}, we present a duality argument to prove \eqref{P1} (when $d = 2$) for $p > 12$. In Section \ref{SEC:level}, we establish certain level set estimates and provide a full proof of Theorem \ref{THM:2} when $d = 2$. By interpolating with Theorems \ref{THM:1} and \ref{THM:2}, we can obtain Strichartz estimates for other values of $p$. The following corollary shows a summary of known Strichartz estimates on irrational tori at this point. \betaetagin{corollary}\ellabel{COR:1} Let $\varepsilon > 0$. Then, the following Strichartz estimates hold for all $f \in L^2(\mathbb{T}^d)$ with $\sigmaupp \widehat f \sigmaubset [-N,N]^d$: \noindent \textup{(i)} $d = 2$\textup{:} \betaetagin{align}\ellabel{co-d2} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I\times\mathbb{T}^2)} \ellesssimssim \betaetagin{cases} \vphantom{\Big|} N^{\varepsilon}\|f\|_{L^2(\mathbb{T}^2)}, & \text{for } 2 < p \elleq 3, \\ \vphantom{\Big|} N^{\frac 23 - \frac 2p+\varepsilon}\|f\|_{L^2(\mathbb{T}^2)}, & \text{for } 3 < p < 4, \\ \vphantom{\Big|} N^{\frac16}\|f\|_{L^2(\mathbb{T}^2)}, & \text{for } p = 4, \\ \vphantom{\Big|}N^{\frac34 - \frac 7{3p}+\varepsilon}\|f\|_{L^2(\mathbb{T}^2)},& \text{for } 4<p\elleq\frac{20}{3}, \\ \vphantom{\Big|} N^{1 - \frac{4}p }\|f\|_{L^2(\mathbb{T}^2)},& \text{for } p> \frac{20}{3}. \end{cases} \end{align} \noindent \textup{(ii)} $d = 3$\textup{:} \betaetagin{align}\ellabel{co-d3} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I\times\mathbb{T}^3)} \ellesssimssim \betaetagin{cases} \vphantom{\Big|} N^{\varepsilon}\|f\|_{L^2(\mathbb{T}^3)}, & \text{for } 2 < p \elleq \frac 83, \\ \vphantom{\Big|} N^{1 - \frac{8}{3p}+\varepsilon}\|f\|_{L^2(\mathbb{T}^3)}, & \text{for } \frac 83 < p \elleq 4, \\ \vphantom{\Big|} N^{\frac54 - \frac{11}{3p}+\varepsilon}\|f\|_{L^2(\mathbb{T}^3)},& \text{for } 4< p\elleq \frac{16}{3}, \\ \vphantom{\Big|} N^{\frac 32 - \frac{5}p }\|f\|_{L^2(\mathbb{T}^3)},& \text{for } p> \frac{16}{3}. \end{cases} \end{align} \noindent \textup{(iii)} $d = 4$\textup{:} \betaetagin{align}\ellabel{co-d4} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I\times\mathbb{T}^4)} \ellesssimssim \betaetagin{cases} \vphantom{\Big|} N^{\varepsilon}\|f\|_{L^2(\mathbb{T}^4)}, & \text{for } 2 < p \elleq \frac 52, \\ \vphantom{\Big|} N^{\frac 43 - \frac{10}{3p}+\varepsilon}\|f\|_{L^2(\mathbb{T}^4)}, & \text{for } \frac 52 < p \elleq 4, \\ \vphantom{\Big|} N^{ 2 - \frac{6}p }\|f\|_{L^2(\mathbb{T}^4)},& \text{for } p> 4. \end{cases} \end{align} \noindent \textup{(iv)} $d \gammaeq 5$\textup{:} \betaetagin{align}\ellabel{co-d5} \| e^{-it \Delta} f\|_{L^p_{t,{\betaf x}}(I\times\mathbb{T}^d)} \ellesssimssim \betaetagin{cases} \vphantom{\Big|} N^{\varepsilon}\|f\|_{L^2(\mathbb{T}^d)}, & \text{for } 2 < p \elleq \frac{2(d+1)}{d} , \\ \vphantom{\Big|} N^{(\frac d4 - \frac 12) (\frac{2d}{d-1} - \frac{4(d+1)}{p(d-1)})+\varepsilon}\|f\|_{L^2(\mathbb{T}^d)}, & \text{for } \frac{2(d+1)}{d} < p < 4, \\ \vphantom{\Big|} N^{ \frac{d}{2} - \frac{d+2}p }\|f\|_{L^2(\mathbb{T}^d)},& \text{for } p\gammaeq 4. \end{cases} \end{align} \end{corollary} \sigmaubsection{Local well-posedness results of NLS on irrational tori} \ellabel{SUBSEC:1.3} In the following, we apply these Strichartz estimates in Corollary \ref{COR:1} to the Cauchy problem of NLS on an irrational torus: \betaetagin{equation} \betaetagin{cases}\ellabel{eq:nls1} i \partial_t u - \Delta u =\pm |u|^{2k}u \\ u\betaig|_{t = 0} = u_0 \in H^s(\mathbb{T}^d), \end{cases} \quad ({\betaf x}, t) \in \mathbb{T}^d\times \mathbb{R}, \end{equation} where $k\in \mathbb{N}$ is a positive integer and the Laplacian $\Delta$ is defined by \eqref{eq:Q0}. First, recall the following notion. When $M = \mathbb{R}^d$, the Cauchy problem \eqref{NLS0} enjoys the dilation symmetry. Namely, if $u$ is a solution to \eqref{NLS0} with respect to an initial condition $u_0$, then the rescaled function $u_\ellambda({\betaf x}, t) := \ellambda^{\frac{2}{p-1}} u(\ellambda {\betaf x}, \ellambda^2 t)$ is also a solution to \eqref{NLS0} with the rescaled initial condition $u_{0, \ellambda}({\betaf x}) := \ellambda^{\frac{2}{p-1}} u_0 (\ellambda {\betaf x})$. We say that the Sobolev index $s_c$ is critical if the homogeneous Sobolev norm $\|\cdot\|_{\dot H^{s_c}(\mathbb{R}^d)}$ is invariant under this dilation symmetry. In particular, the critical Sobolev index is given by $s_c = \frac{d}{2} - \frac{2}{p-1}$. When $M\ne \mathbb{R}^d$, we may not have this natural dilation symmetry. Nonetheless, the notion of the critical Sobolev index provides us important heuristics. In terms of the Cauchy problem \eqref{eq:nls1}, the critical Sobolev index $s_c$ is given by \betaetagin{equation} \ellabel{Introsc} s_c = \frac d2 - \frac 1k. \end{equation} First, we state local well-posedness in subcritical Sobolev spaces $H^s(\mathbb{T}^d)$ with $s > s_c$. \betaetagin{theorem}[Local well-posedness in subcritical spaces]\ellabel{THM:3} Let $d \gammaeq 2$ and $k \in \mathbb{N}$. Then, there exists $s_0 = s_0(k,d)$ such that the Cauchy problem \eqref{eq:nls1} on a $d$-dimensional irrational torus $\mathbb{T}^d$ is locally well-posed in $H^{s}(\mathbb{T}^d)$ for $s > s_0$ in the following cases: \sigmamallskip \noindent \textup{(i)} $d=2$\textup{:} \betaetagin{itemize} \item[(i.a)] $k = 1$, $s_0= \frac{1}{3}$ \cite{CW}, \item[(i.b)] $k = 2, 3, 4, 5$, $s_0= \frac{7k - 3 }{7k+5}$, \item[(i.c)] $k \gammaeq 5$, $s_0= s_c = 1- \frac 1k$, \end{itemize} \noindent Note that the values of $s_0$ in \textup{(i.b)} and \textup{(i.c)} coincide when $k = 5$. \sigmamallskip \noindent \textup{(ii)} $d=3$\textup{:} \betaetagin{itemize} \item[(ii.a)] $k = 1$, $s_0= \frac{2}{3}$ \cite{Bo4}, \item[(ii.b)] $k = 2$, $s_0= \frac{53}{52}$, \item[(ii.c)] $k \gammaeq 3$, $s_0=s_c = \frac 32 - \frac 1k$, \end{itemize} \noindent \textup{(iii)} $d\gammaeq 4$\textup{:} $k\gammae1$, $s_0=s_c = \frac d2 - \frac 1k$. \end{theorem} \noindent After Bourgain's seminal paper \cite{Bo2}, the Fourier restriction norm method, involving the $X^{s, b}$-space, has been applied to study well-posedness of a wide class of equations. In our proof, we also employ the $X^{s, b}$-spaces and by the standard argument, the proof is reduced to establishing certain multilinear Strichartz estimates. Furthermore, by applying the well-posedness theory involving the $U^p$- and $V^p$-spaces developed by Tataru, Koch, and their collaborators \cite{KochT, HHK, HTT11, HTT2}, we prove some critical local well-posedness.\footnote{In a very recent paper, Strunk \cite{Strunk} extended Theorem \ref{THM:4} to (i) $k \gammaeq 3$ when $d = 2$ and (ii) $k = 2$ when $d = 3$. The main idea in \cite{Strunk} is based on considering Strichartz estimates in mixed Lebesgue spaces $L^q_tL^r_{\betaf x}$ to improve the {\it multilinear} Strichartz estimate (Proposition \ref{PROP:LWP2} below). This clever argument avoids the need of improving the scaling-invariant Strichartz estimate \eqref{P1}.} \betaetagin{theorem}[Local well-posedness in critical spaces]\ellabel{THM:4} Given $d \gammaeq 2$ and $k \in \mathbb{N}$, let $s_c$ be the critical Sobolev index given by \eqref{Introsc}. Then, the Cauchy problem \eqref{eq:nls1} on a $d$-dimensional irrational torus $\mathbb{T}^d$ is locally well-posed in the critical Sobolev space $H^{s_c}(\mathbb{T}^d)$ in the following cases: \betaetagin{itemize} \item[(i)] $d=2$\textup{:} $k\gammae 6$, \item[(ii)] $d=3$\textup{:} $k\gammae 3$, \item[(iii)] $d\gammae4$\textup{:} $k\gammae 2$. \end{itemize} \end{theorem} \noindent Once again, the proof is reduced to establishing certain multilinear Strichartz estimates. See Propositions \ref{PROP:XLWP} and \ref{PROP:LWP2}. Lastly, we briefly discuss the case of a {\it partially} irrational torus. Namely, we consider Strichartz estimates on an irrational torus $\mathbb{T}^d_{\pmb{\alpha}}$, when some of $\alpha_j$'s in \eqref{Torus} are rationally dependent. In this case, we may obtain improvements over Theorem \ref{THM:2}, yielding better local well-posedness results than those presented in Theorems \ref{THM:3} and \ref{THM:4}. For simplicity of presentation, we only consider an example of the three-dimensional torus of the form $\mathbb{T}^2 \times \mathbb{T}_{\alpha_3}$, where two periods are the same. By a change of spatial variables as before, we consider the Cauchy problem \eqref{eq:nls1}, where the multiplier $Q({\betaf n})$ in \eqref{eq:Q} is given by \betaetagin{align}\ellabel{eq:Q1} Q({\betaf n}) = n_1^2 + n_2^2 + \theta_3 n_3^2, \quad \theta_3 >0, \end{align} \noindent i.e.~we set $\theta_1 = \theta_2 =1$. Then, we have the following local well-posedness result for the energy-critical quintic NLS on a three-dimensional partially irrational torus. \betaetagin{theorem}\ellabel{THM:5} Suppose that $Q({\betaf n})$ is given by \eqref{eq:Q1}. Then, the energy-critical quintic NLS, \eqref{eq:nls1} with $k = 2$, on $\mathbb{T}^3$ is locally well-posed in the critical Sobolev space $H^1(\mathbb{T}^3)$. \end{theorem} \noindent Previously, Herr-Tataru-Tzvetkov \cite{HTT11} proved local well-posedness in the energy space $H^1(\mathbb{T}^3)$ of the energy-critical quintic NLS on the three-dimensional standard torus $\mathbb{T}^3$. By combining the results in \cite{Bo5} and \cite{HTT2}, we also see that the energy-critical cubic NLS on the four-dimensional standard torus $\mathbb{T}^4$ is local well-posedness in the energy space $H^1(\mathbb{T}^4)$. See also the work by the third author \cite{W} for some other critical local well-posedness results. The result in \cite{W}, however, does not cover an energy-critical setting. As mentioned earlier, Herr \cite{Herr} proved local well-posedness in the energy space of the energy-critical quintic NLS on three-dimensional Zoll manifolds. We point out that Theorem \ref{THM:5} seems to be the first local well-posedness result of the energy-critical NLS in its energy space $H^1(\mathbb{T}^3)$, where there is no common minimal period for geodesics. We present a sketch of the proof in Appendix \ref{SEC:B}. More precisely, we revisit the argument in Section \ref{SEC:level} and prove the sharp Strichartz estimate \eqref{P1} on $\mathbb{T}^3$ for $p > \frac{14}{3}$ {\it under the assumption \eqref{eq:Q1}.} The rest follows from a slight modification of the argument in Section \ref{SEC:critical}. Lastly, note that Theorem \ref{THM:5} combined with the conservation of mass and Hamiltonian yields small data global well-posedness of the quintic NLS, \eqref{eq:nls1} with $k = 2$, in $H^1(\mathbb{T}^3)$, just as in \cite{HTT11, Herr}. Recently, global well-posedness (for large data) of the energy-critical quintic NLS on the three-dimensional standard torus $\mathbb{T}^3$ and on the three-dimensional sphere $\mathbb{S}^3$ was obtained by Ionescu-Pausader \cite{IP} and Pausader-Tzvetkov-Wang \cite{PTW}, respectively. It would be of interest to investigate if global well-posedness of the energy-critical quintic NLS holds in the setting of Theorem \ref{THM:5}.\footnote{After Strunk's result \cite{Strunk}, it is now of interest to study global well-posedness of the energy-critical quintic NLS on a {\it general} three-dimensional irrational torus in its energy space $H^1(\mathbb{T}^3)$.} This paper is organized as follows. In Section \ref{SEC:2}, we prove Theorem \ref{THM:2} when $ d\gammaeq 3$ and partially when $ d = 2$, via multilinear estimates and a duality argument. In Section \ref{SEC:level}, we establish certain level set estimates and prove Theorem \ref{THM:2} when $ d = 2$. In Section \ref{SEC:4}, we prove local well-posedness results in subcritical Sobolev spaces (Theorem \ref{THM:3}). In Section \ref{SEC:critical}, we prove local well-posedness results in critical Sobolev spaces (Theorem \ref{THM:4}). In Appendix \ref{SEC:A}, we present a proof of \eqref{A-1} below, using the Hardy-Littlewood circle method. In Appendix \ref{SEC:B}, we sketch a proof of Theorem \ref{THM:5}. \noindent {\betaf Acknowledgments:} Z.~Guo is supported in part by NNSF of China (No.11371037, No.11271023) and Beijing Higher Education Young Elite Teacher Project. Y.~Wang is supported by NNSF of China (No.11126247, No.11201143) and AARMS Postdoctoral Fellowship. The authors would like to thank the anonymous referee for thoughtful comments that have significantly improved the introduction of this paper. T.~Oh would like to express his sincere gratitude to Professor~Harold N.~Shapiro for his support and teaching in mathematics, including the Hardy-Littlewood circle method used in this paper, as well as in life. \sigmaection{Strichartz estimates: Part 1} \ellabel{SEC:2} In this section, we prove our main result (Theorem \ref{THM:2}) for $ d\gammaeq3$ and present a partial proof for $ d = 2$. In \cite{Bo4}, Bourgain treated the three-dimensional case. His argument is based on the following estimate: \betaetagin{equation} \int_{\mathbb{T}} \Big| \sigmaum_{0\elle n\elle N} e^{2\pi i n^2 t}\Big|^{r}\,dt \sigmaim N^{r-2}, \ellabel{A-1} \end{equation} \noindent for $r > 4$. We first apply this argument and generalize the result in \cite{Bo4} to a general dimension $d \gammaeq 3$. When $r = 4$, \eqref{A-1} holds with a logarithmic loss (Hua's inequality). This yields the endpoint case for $ d= 3, 4$. When $d = 2$, this also proves Theorem \ref{THM:2} (ii) but only for $p \gammaeq 8$. In Subsection \ref{SUBSEC:d2}, we present a simple duality argument when $d = 2$. This proves Theorem \ref{THM:2} (i) for $p > 12$. The full proof of Theorem \ref{THM:2} for $ d = 2$, i.e.~\eqref{P1} for $ p > \frac{20}{3}$ and \eqref{P2} for $ p \gammaeq \frac{20}{3}$, is presented in Section \ref{SEC:level}. \sigmaubsection{Higher dimensional case: $d\gammae 3$} \ellabel{SUBSEC:d3} In this subsection, we prove Theorem \ref{THM:2} when $d \gammaeq 3$. First, we prove the following lemma, which can be viewed as a version of Hausdorff-Young's inequality. \betaetagin{lemma}[Hausdorff-Young's inequality] \ellabel{LEM:HY} Let $d \gammaeq 2$ and ${\betaf a} \in \mathbb{Z}^d$. Given a sequence $\{c_{\betaf n}\}_{{\betaf n}\in \mathbb{Z}^d}$, define $F_{\betaf a} (t)$ by \betaetagin{equation} F_{\betaf a}(t) = \sigmaum_{{\betaf n}\in \mathbb{Z}^d} c_{\betaf n} c_{{\betaf a}-{\betaf n}} e^{2 \pi i[Q({\betaf n}) + Q({\betaf a}-{\betaf n})]t}, \ellabel{A0} \end{equation} \noindent where $Q({\betaf n})$ is as in \eqref{eq:Q}. Then, for $p \gammaeq 2$, we have \betaetagin{equation} \|F_{\betaf a}(t)\|_{L^p_t([-1,1])} \ellesssim \Bigg[\sigmaum_{k\in \mathbb{Z}} \betaigg(\sigmaum_{|Q({\betaf n}) + Q({\betaf a}-{\betaf n})-k|\elle \frac12} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}| \betaigg)^{p'}\Bigg]^{\frac1{p'}}, \ellabel{A1} \end{equation} where $\frac 1p+\frac{1}{p'} =1$. \end{lemma} \noindent Lemma \ref{LEM:HY} was used in \cite{Bo4} for the three-dimensional case. See also Lemma 2 in \cite{CW}. A proof for general dimensions in \cite{CW} relies on Schur's lemma. In the following, we give a direct proof for reader's convenience. \betaetagin{proof} When $p = \infty$, \eqref{A1} follows immediately. Hence, by interpolation, it suffices to prove \eqref{A1} for $p=2$. Let $\eta(t)$ be a cutoff function supported on $[-2,2]$ such that $\eta \equiv 1$ on $[-1,1]$. By Plancherel identity, we have \betaetagin{align*} \|F_{\betaf a}(t)\|_{L^2_t([-1,1])} & \elle\|F_{\betaf a}(t) \eta(t)\|_{L^2_t(\mathbb{R})} \\ & = \betaigg\|\sigmaum_{\betaf n} c_{\betaf n} c_{{\betaf a}-{\betaf n}} \widehat\eta\betaig(\tau-[Q({\betaf n}) + Q({\betaf a}-{\betaf n})]\betaig)\betaigg\|_{L^2_\tau}\\ & = \betaigg\|\sigmaum_{k\in \mathbb{Z}} \sigmaum_{n\in I_{k,{\betaf a}}} c_{\betaf n} c_{{\betaf a}-{\betaf n}} \widehat\eta\betaig(\tau-[Q({\betaf n}) + Q({\betaf a}-{\betaf n})]\betaig)\betaigg\|_{L^2_\tau}\\ & = \betaigg\|\sigmaum_{k\in \mathbb{Z}} B_k(\tau)\betaigg\|_{L^2_\tau}, \end{align*} \noindent where $I_{k,{\betaf a}} = \betaig\{{\betaf n}\in \mathbb{Z}^d: \, Q({\betaf n}) + Q({\betaf a}-{\betaf n})-k \in(-\frac 12,\frac 12]\betaig\}$ and \[ B_k(\tau) = \sigmaum_{{\betaf n}\in I_{k,{\betaf a}}} c_{\betaf n} c_{{\betaf a}-{\betaf n}} \widehat\eta\betaig(\tau-[Q({\betaf n}) + Q({\betaf a}-{\betaf n})]\betaig).\] \noindent Noting that $|B_k(\tau)| \ellesssim \sigmaum_{n\in I_{k,{\betaf a}}} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}| \jb{\tau-k}^{-2}$, we have \betaetagin{align*} \Big\|\sigmaum_{k\in \mathbb{Z}} & B_k(\tau) \Big\|^2_{L^2_\tau} = \Big\|\Big(\sigmaum_{k\in \mathbb{Z}} B_k(\tau)\Big)^2 \Big\|_{L^1_\tau} \ellesssim \sigmaum_{k, k'} \|B_{k}(\tau)B_k'(\tau)\|_{L^1_\tau}\\ & \ellesssim \sigmaum_{k, k'} \sigmaum_{{\betaf n}\in I_{k,{\betaf a}}} \sigmaum_{{\betaf n}'\in I_{k',{\betaf a}}} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}| |c_{{\betaf n}'} c_{{\betaf a}-{\betaf n}'}| \int_{\mathbb{R}}\jb{\tau-k}^{-2} \jb{\tau-k'}^{-2} d\tau \\ & \ellesssim \sigmaum_{k, k'} \frac 1{\jb{k-k'}^2} \sigmaum_{{\betaf n}\in I_{k,{\betaf a}}} \sigmaum_{{\betaf n}'\in I_{k',{\betaf a}}} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}| |c_{{\betaf n}'} c_{{\betaf a}-{\betaf n}'}| \intertext{By Cauchy-Schwarz inequality (in $k$) followed by Young inequality, we have} & \elleq \betaigg[\sigmaum_k \Big(\sigmaum_{{\betaf n}\in I_{k,{\betaf a}}} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}|\Big)^2\betaigg]^{\frac12} \betaigg[\sigmaum_k \Big( \sigmaum_{k'}\sigmaum_{{\betaf n}'\in I_{k',{\betaf a}}} \frac{|c_{{\betaf n}'} c_{{\betaf a}-{\betaf n}'}|}{\jb{k-k'}^2}\Big)^2\betaigg]^{\frac12}\\ & \ellesssim \sigmaum_k \betaigg(\sigmaum_{{\betaf n}\in I_{k,{\betaf a}}} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}|\betaigg)^2. \end{align*} \noindent This completes the proof of Lemma \ref{LEM:HY}. \end{proof} Next, we state the main proposition. Theorem \ref{THM:2} then follows this proposition and Bernstein's inequality when $d \gammaeq 3$. \betaetagin{proposition}\ellabel{PROP:Str d3} Let $f$ be a function on $\mathbb{T}^d$ with $\sigmaupp \widehat f \sigmaubset [-N,N]^d$. \noindent \textup{(i)} Let $d\gammae 3$. Then, for $p\gammae \max\betaig(\frac {16}d+, 4\betaig)$, we have \betaetagin{align}\ellabel{Str d3} \|e^{-it\Delta}f\|_{L^p_{t, \textup{loc}}L^4_{{\betaf x}}} \ellesssim N^{\frac d4 -\frac 2p} \|f\|_{L^2}, \end{align} \sigmamallskip \noindent \textup{(ii)} Suppose that $d$ and $p$ satisfy \textup{(ii.a)} $d = 2$, $p \gammaeq 8$, \textup{(ii.b)} $(d, p) = (3, \frac{16}{3})$, or \textup{(ii.c)} $(d, p) = (4, 4)$. Then, we have \betaetagin{align}\ellabel{Str d3a} \|e^{-it\Delta}f\|_{L^p_{t, \textup{loc}}L^4_{{\betaf x}}} \ellesssim N^{\frac d4 -\frac 2p} (\ellog N)^\frac{2}{q} \|f\|_{L^2}, \end{align} \noindent where $q = p$ when $d = 3, 4$ and $q = 8$ when $d = 2$. \end{proposition} \noindent \noindent Bourgain proved \eqref{Str d3} for $ d= 3$. See Proposition 1.1 in \cite{Bo4}. Our proof follows the ideas developed in \cite{Bo4}. By setting $p = 4$ when $ d \gammaeq 5$ and $p = \frac{16}{d}+$ when $ d= 3, 4$, Proposition \ref{PROP:Str d3} yields the $L^4$-Strichartz estimate, which improves the result in \cite{CW} for $ d\gammaeq 3$. Note that the Strichartz estimate \eqref{Str d3} is essentially sharp in higher dimensions ($d \gammaeq 4$). Indeed, on $\mathbb{R}^d$ with $ d \gammaeq 2$, by Sobolev inequality and interpolation of \eqref{P3} and \eqref{P4} \betaetagin{align*} \|e^{-it\Delta}f\|_{L^p_{t}L^4_{x} (\mathbb{R}\times \mathbb{R}^d)} \ellesssim N^{2(\frac 14 -\frac 1p)}\|e^{-it\Delta}f\|_{L^4_{t, x} (\mathbb{R}\times \mathbb{R}^d)} \ellesssim N^{\frac d4 -\frac 2p} \|f\|_{L^2(\mathbb{R}^d)}, \end{align*} \noindent for $p \gammaeq 4$. We first use Proposition \ref{PROP:Str d3} to prove Theorem \ref{THM:2} when $d \gammaeq 3$. \betaetagin{proof}[Proof of Theorem \ref{THM:2} for $d \gammaeq 3$] Suppose that $p \gammaeq \max(\frac{16}{d}+, 4)$, satisfying the hypothesis of Theorem \ref{THM:2} (i). By Bernstein's inequality and Proposition \ref{PROP:Str d3} (i), we have \[ \|e^{-it\Delta} f\|_{L^p_{t,{\betaf x}}} \ellesssim N^{\frac d4- \frac dp} \|e^{-it\Delta} f\|_{L^p_tL^4_{{\betaf x}}} \ellesssim N^{\frac d2-\frac {d+2}p} \|f\|_{L^2}. \] \noindent By repeating the same argument with Proposition \ref{PROP:Str d3} (ii), we obtain Theorem \ref{THM:2} (ii) when $d = 3, 4$. When $d = 2$, this yields Theorem \ref{THM:2} (ii) only for $p \gammaeq 8$. \end{proof} We now present the proof of Proposition \ref{PROP:Str d3}. \betaetagin{proof}[Proof of Proposition \ref{PROP:Str d3}] (i) Let $Q({\betaf n})$ be as in \eqref{eq:Q}. Then, we have \betaetagin{equation} \ellabel{A1a} (e^{-it\Delta} f )({\betaf x}) = \sigmaum_{{\betaf n}\in \mathbb{Z}^d} \widehat f ({\betaf n}) e^{2\pi i ({\betaf n}\cdot {\betaf x} + Q({\betaf n}) t)}. \end{equation} \noindent With $c_{\betaf n} = \widehat f ({\betaf n})$, let $F_{\betaf a}(t)$ be as in \eqref{A0}. Then, by Minkowski's integral inequality with $p \gammaeq 4$, we have \betaetagin{align} \ellabel{A2} \|e^{-it\Delta}f\|^2_{L^p_t L^4_{{\betaf x}}} & = \|(e^{-it\Delta} f)^2\|_{L^{\frac p2 }_t L^2_{{\betaf x}}} = \betaigg\|\Big(\sigmaum_{{\betaf a}\in \mathbb{Z}^d} |F_{\betaf a}(t)|^2\Big)^\frac{1}{2} \betaigg\|_{L^{\frac p 2}_t} \nonumber \\ & \elle \betaigg(\sigmaum_{{\betaf a}\in \mathbb{Z}^d} \|F_{\betaf a}(t)\|_{L^\frac{p}{2}_t}^2 \betaigg)^{1/2}. \end{align} For $\ell \in \mathbb{Z}$, let $A_\ell = \{{\betaf n}\in \mathbb{Z}^d :\, |Q({\betaf n})-\ell| \elle 1\} \cap [-N, N]^d$. Noting that $Q({\betaf n}) + Q({\betaf a}- {\betaf n}) = \frac 12 \betaig(Q(2{\betaf n}-{\betaf a}) + Q({\betaf a})\betaig)$, the condition $|Q({\betaf n}) + Q({\betaf a}-{\betaf n}) - k|\elle \frac 12$ is equivalent to $2{\betaf n}\in {\betaf a} + A_\ell$ with $\ell = 2k- Q({\betaf a})$. Note that, $\betaig|\{ \ell \in \mathbb{Z}:\, 2{\betaf n} \in {\betaf a} +A_\ell\}\betaig| \ellesssim 1$ for all ${\betaf n} \in \mathbb{Z}^d$. Then, by Lemma \ref{LEM:HY} and Cauchy-Schwarz and H\"older's inequalities, we have \betaetagin{align}\ellabel{A3} \|F_{\betaf a}\|_{L^{\frac p2}_t} & \elle \Bigg[\sigmaum_{\ell \in \mathbb{Z}} \betaigg(\sigmaum_{2{\betaf n}\in {\betaf a} + A_\ell} |c_{\betaf n} c_{{\betaf a}- {\betaf n}}| \betaigg)^{\frac p{p-2}} \Bigg]^{\frac{p-2}p} \notag \\ &\ellesssim \Bigg[\sigmaum_\ell |A_\ell|^{\frac p{2(p-2)}} \betaigg(\sigmaum_{2{\betaf n}\in {\betaf a} + A_\ell} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}|^2 \betaigg)^{\frac p{2(p-2)}} \Bigg]^{{\frac{p-2}p}} \nonumber\\ & \elleq \betaigg(\sigmaum_\ell |A_\ell|^{\frac p{p-4}}\betaigg)^{\frac{p-4}{2p}} \betaigg( \sigmaum_{\ell } \sigmaum_{2{\betaf n}\in {\betaf a} + A_\ell} |c_{\betaf n} c_{{\betaf a}-{\betaf n}}|^2 \betaigg)^{\frac 12} \notag \\ & \sigmaim \betaigg(\sigmaum_\ell |A_\ell|^{\frac p{p-4}}\betaigg)^{\frac{p-4}{2p}} \betaigg( \sigmaum_{{\betaf n} \in \mathbb{Z}^d } |c_{\betaf n} c_{{\betaf a}-{\betaf n}}|^2 \betaigg)^{\frac 12}. \end{align} \noindent From \eqref{A2} and \eqref{A3}, we have \betaetagin{equation} \ellabel{A4} \|e^{-it\Delta} f\|_{L^p_tL^4_{{\betaf x}}} \elle C \betaigg(\sigmaum_\ell |A_\ell|^{\frac p{p-4}}\betaigg)^{\frac{p-4}{4p}} \|f\|_{L^2}. \end{equation} Now, let $\eta(t)$ be a smooth function with a compact support $I \sigmaubset \mathbb{R}$ such that $\widehat \eta \gammaeq 0$ and $\widehat \eta \gammaeq 1$ on $[-1, 1]$. Now we estimate $\Big(\sigmaum_\ell |A_\ell|^{\frac p{p-4}}\Big)^{\frac{p-4}{4p}}$, using \[ |A_\ell|\elle \int \Big[\sigmaum_{\sigmaubstack{{\betaf n}\in\mathbb{Z}^d\\|n_j|\elle N}} e^{2\pi iQ({\betaf n})t}\Big]\eta(t)e^{-2 \pi i\ell t} dt. \] If $p\elle 8$, then we have $\frac p{p-4}\gammae 2$. Then, by Hausdorff-Young's inequality, we have \betaetagin{align}\ellabel{A5} \Big(\sigmaum_\ell |A_\ell & |^{\frac p{p-4}}\Big)^{\frac{p-4}{4p}} \ellesssim \betaigg[\int_{I}\prod_{j=1}^d \Big| \sigmaum_{ |n_j|\elle N} e^{ 2\pi i\theta_j n_j^2 t}\Big|^{\frac p4}\,dt\betaigg]^{\frac1p} \nonumber\\ & \ellesssim \prod_{j=1}^d \betaigg[\int_{I} \Big| \sigmaum_{ |n_j|\elle N} e^{2\pi i\theta_j n_j^2 t}\Big|^{\frac {dp}4}\,dt\betaigg]^{\frac1{dp}} \ellesssim \betaigg[\int_{I} \Big| \sigmaum_{ 0 \elleq n\elle N} e^{2\pi i n^2 t}\Big|^{\frac {dp}4}\,dt\betaigg]^{\frac1p}. \end{align} \noindent Note that $r= \frac {dp}4 >4$, since $p> \frac{16}d$. Then, by an application of the Hardy-Littlewood circle method (see Appendix \ref{SEC:A}), we have \betaetagin{equation} \int_{I} \Big| \sigmaum_{0\elle n\elle N} e^{2\pi i n^2 t}\Big|^{r}\,dt \sigmaim N^{r-2}, \ellabel{A6} \end{equation} \noindent yielding $\eqref{A5}\ellesssim N^{\frac d4-\frac 2p}$. Hence, \eqref{Str d3} follows from \eqref{A4} in this case. If $p>8$, then by Bernstein's inequality (in $t$), we have \betaetagin{equation}\ellabel{A7} \|e^{-it\Delta} f\|_{L^p_tL^4_{{\betaf x}}} \elle C N^{\frac 14-\frac 2p} \|e^{-it\Delta} f\|_{L^8_tL^4_{{\betaf x}}}. \end{equation} \noindent Then, \eqref{Str d3} follows from \eqref{A7} and \eqref{Str d3} for $p = 8$. \sigmamallskip \noindent (ii) When $(d, p) = (2, 8),$ $(3, \frac{16}{3})$, or $(4, 4)$, we have $r = \frac{dp}{4} = 4$. In this case, \eqref{A6} does not hold. Nonetheless, by Hua's inequality \cite{V}, we have \betaetagin{equation} \int_{I} \Big| \sigmaum_{0\elle n\elle N} e^{2\pi i n^2 t}\Big|^{4}\,dt \ellesssim N^{2} (\ellog N)^2. \ellabel{A8} \end{equation} \noindent See also \cite[(8.13)]{IK}. Then, \eqref{Str d3a} follows from \eqref{A8} and repeating the computation in (i). This completes the proof of Proposition \ref{PROP:Str d3}. \end{proof} \sigmaubsection{Two dimensional case}\ellabel{SUBSEC:d2} For $d=2$, the sharp estimate \eqref{P1} is not covered by Proposition \ref{PROP:Str d3}. In the following, we use a simple duality argument and prove the sharp Strichartz estimate \eqref{P1} for $p>12$. Without loss of generality, we assume that \betaetagin{align}\ellabel{B1} Q(\mathbf{n})= n_1^2 +\theta n_2^2, \quad \tfrac{1}{C} \elleq \theta \elleq C. \end{align} \noindent Then, the local-in-time Strichartz estimate can be expressed as \betaetagin{equation}\ellabel{B2} \betaigg\|\sigmaum_{\mathbf{n}\in S_{N}}a_\mathbf{n} e^{2\pi i(\mathbf{n}\cdot\mathbf{x}+Q(\mathbf{n})t)}\betaigg\|_{L^p_{t, {\betaf x}}(I\times \mathbb{T}^2)}\elleq K_{p,N}\betaigg(\sigmaum_{{\betaf n}\in S_N}|a_\mathbf{n}|^2\betaigg)^{1/2}, \end{equation} \noindent where $S_{N}$ denotes the following set: \betaetagin{equation}\ellabel{B2a} S_N: = \betaig\{(n_1,n_2)\in \mathbb{Z}^2: |n_j|\elleq N,\, j=1,2 \betaig\}. \end{equation} \noindent Our task is to seek for an optimal constant $K_{p, N}$. By duality, \eqref{B2} is equivalent to \betaetagin{equation}\ellabel{B3} \betaigg(\sigmaum_{{\betaf n}\in S_N}\betaig|\widehat{f}(\mathbf{n}, Q(\mathbf{n}))\betaig|^2\betaigg)^\frac{1}{2} \elleq K_{p, N} \|f\|_{L^{p'}_{t, {\betaf x}}(I\times \mathbb{T}^2)}, \end{equation} \noindent for any $f \in L^{p'}(I\times \mathbb{T}^2)$, where $\frac 1 p+ \frac 1{p'} = 1$. Here, the Fourier transform $\widehat f$ is defined by \[ \widehat f({\betaf n},\tau) = \int_\mathbb{R} \int_{\mathbb{T}^2} e^{-2\pi i {\betaf n}\cdot{\betaf x}} e^{-2\pi i \tau t } \mathbf 1_{I}(t) f({\betaf x},t)\,d{\betaf x}\,dt. \] \noindent Then, \eqref{P1} for $p > 12$ follows once we prove the next proposition. \betaetagin{proposition}\ellabel{PROP:Str d2} For $p> 12$, we have $K_{p, N} \ellesssim N^{1-\frac 4p}$. Namely, we have \betaetagin{equation} \betaigg(\sigmaum_{{\betaf n} \in S_N } \betaig|\widehat{f}({\betaf n}, Q({\betaf n}))\betaig|^2 \betaigg)^\frac{1}{2} \ellesssim N^{1-\frac{4}{p}} \|f\|_{L^{p'}_{t, {\betaf x}}(I\times \mathbb{T}^2)}^2. \ellabel{B4} \end{equation} \end{proposition} \betaetagin{remark}\rm Recall that, on the standard torus $\mathbb{T}^2$, i.e.~with $Q({\betaf n}) = n_1^2 + n_2^2$, Bourgain \cite{Bo2} proved $K_{p, N} \ellesssim N^{1-\frac 4p}$ for $ p > 4$. Hence, Proposition \ref{PROP:Str d2} states that, on an irrational torus, the same estimate for $K_{p, N}$ holds, but only for $p > 12$. \end{remark} \betaetagin{proof} Without loss of generality, assume that $I$ is centered at 0. Let $\mathbb{R}R$ be a kernel defined by \betaetagin{equation}\ellabel{B4a} \mathbf{R}({\betaf x}, t) = \sigmaum_{{\betaf n} \in S_N} e^{2\pi i ({\betaf n} \cdot {\betaf x} + Q(\mathbf{n}) t)}. \end{equation} \noindent Then, defining $R_\theta$ by \betaetagin{equation} R_\theta(x, t) = \sigmaum_{|n| \elleq N} e^{2\pi i (nx + \theta n^2 t)}, \ellabel{B4b} \end{equation} \noindent we have $\mathbf{R}({\betaf x}, t) = R_1(x_1, t)R_\theta(x_2, t).$ From Proposition 3.114 in \cite{Bo2}, we have \betaetagin{align} \|R_1(x, t)\|_{L^p_{t, x}(I\times \mathbb{T})}\elleq C_{p, I}N^{1- \frac 3p}, \ellabel{B5} \end{align} \noindent for $p > 6$. Bourgain's argument is based on an application of the Hardy-Littlewood circle method. See also Lemma 2.4 in \cite{Hu-Li} for a simpler proof based on the Poisson summation formula. Note that \eqref{B5} does not hold for $p = 6$. See Rogovskaya \cite{R} and \cite{Bo2}. By H\"older's inequality, \eqref{B5}, and Sobolev inequality, we have \betaetagin{align} \|\mathbf{R}\|_{L^p_{t, {\betaf x}}(I\times \mathbb{T}^2)} & = \betaigg(\int_I \|R_1(x_1, t)\|^p_{L^p_{x_1}} \| R_\theta (x_2,t)\|^p_{L^p_{x_2}}\,dt\betaigg)^\frac{1}{p} \notag \\ & \elle \|R_1(x_1,t)\|_{L^p_{t,x_1}} \|R_\theta(x_2,t)\|_{L^\infty_t(I; L^p_{x_2})} \notag \\ & \ellesssim N^{1 - \frac 3p} \|R_\theta(x_2,t)\|_{L^\infty_t(I; H^{\frac{1}{2}-\frac 1p}_{x_2})} \ellesssim N^{2 - \frac 4 p}. \ellabel{B6} \end{align} \noindent By \eqref{B4a}, Young's inequality, and \eqref{B6}, we have \betaetagin{align*} \sigmaum_{{\betaf n}\in S_N } \betaig| \widehat {f}({\betaf n}\,, Q({\betaf n}))\betaig|^2 & = \jb{ \mathbf{R} * \mathbf 1_I f, \mathbf 1_I f } \elleq \|\mathbf{R}\|_{L^\frac{p}{2}_{t, {\betaf x}}(2I \times \mathbb{T}^2)}\|f\|_{L^{p'}_{t, {\betaf x}}(I\times \mathbb{T}^2)}^2\\ & \ellesssim N^{2-\frac{8}{p}}\|f\|_{L^{p'}_{t, {\betaf x}}(I\times \mathbb{T}^2)}^2 \end{align*} \noindent as long as $p > 12$. \end{proof} \sigmaection{Strichartz estimates: Part 2} \ellabel{SEC:level} \sigmaubsection{Level set estimates} In this section, we prove Theorem \ref{THM:2} when $d = 2$. The main ingredient is the level set estimates on irrational tori in Proposition \ref{PROP:level} below. For level sets estimates on the usual torus $\mathbb{T}^d$, see \cite{Bo2, Hu-Li}. It turns out that these level set estimates are useful only when $d = 2, 3$ (see Remark \ref{REM:level}), but we state and prove the results for a general dimension. In the following, we assume that $\theta_1 = 1$ in \eqref{eq:Q} for simplicity. Namely, we consider \betaetagin{equation} Q({\betaf n}) = n_1^2 + \theta_2 n_2^2 + \cdots + \theta_d n_d^2. \ellabel{CQ} \end{equation} \noindent Also, let $S_N = \{ {\betaf n} \in \mathbb{Z}^d: \, |n_j| \elleq N, \, j = 1, \dots, d \}$. \betaetagin{proposition}\ellabel{PROP:level} Let $I$ be a compact interval in $\mathbb{R}$. Given \betaetagin{equation} f ({\betaf x}) = \sigmaum_{{\betaf n} \in S_N} c_{\betaf n} e^{2\pi i {\betaf n} \cdot {\betaf x}} \ellabel{C0} \end{equation} \noindent such that $\|c_{\betaf n}\|_{\ell^2_{\betaf n}} = 1$, define the distribution function $A_\ellambda$ by \betaetagin{equation} A_\ellambda = \betaig\{ ({\betaf x}, t) \in \mathbb{T}^2 \times I: \, \betaig|\betaig(e^{-it \Delta} f\betaig)({\betaf x}, t)\betaig| > \ellambda\betaig\}. \ellabel{C00a} \end{equation} \noindent \textup{(i)} For any $\varepsilon > 0$, we have \betaetagin{equation} |A_\ellambda | \ellesssim N^{2(d-1)\frac{1}{1+6\varepsilon} } \ellambda^{-6 + \frac{24}{1+6\varepsilon}\varepsilon} \ellabel{C1} \end{equation} \noindent for $\ellambda \gammaes N^{\frac{d}{2} - \frac{1}{4}+\varepsilon}$. \noindent \textup{(ii)} Let $ q > 6$. Then, there exists small $\varepsilon >0$ such that \betaetagin{equation} |A_\ellambda |\ellesssim N^{\frac{d}{2} q - (d+2)} \ellambda^{-q} \ellabel{E0} \end{equation} \noindent for $\ellambda \gammaes N^{\frac{d}{2}-\varepsilon}$. In \eqref{C1} and \eqref{E0}, the implicit constants depend on $\varepsilon > 0$, $q > 6$, and $|I|$, but are independent of $f$. \end{proposition} We present the proof of Proposition \ref{PROP:level} in Subsections \ref{SUBSEC:PROP1} and \ref{SUBSEC:PROP2}. In the following, we use Proposition \ref{PROP:level} to prove Theorem \ref{THM:2} when $ d = 2$. First, we present the proof of Theorem \ref{THM:2} (ii.a), i.e.~we prove \eqref{P2} for $p \gammaeq \frac{20}{3}$ when $ d= 2$. Recall that Catoire-Wang \cite{CW} proved \betaetagin{equation} \|e^{-it \Delta} f\|_{L^4_{t, {\betaf x}}(I \times \mathbb{T}^d)} \ellesssim N^\frac{1}{6} \|f\|_{L^2(\mathbb{T}^d)} \ellabel{C0a} \end{equation} \noindent for $f \in \mathbb{T}^2$ with $\sigmaupp \widehat{f} \sigmaubset [-N, N]^2$. Given $f$ as in \eqref{C0}, let $F({\betaf x}, t) = e^{-it \Delta} f({\betaf x}, t)$. By Cauchy-Schwarz inequality, we have $\|F\|_{L^\infty_{t, {\betaf x}}} \ellesssim N. $ Then, with Proposition \ref{PROP:level} (i) and \eqref{C0a}, we have \betaetagin{align*} \int_{I \times \mathbb{T}^2 } |F({\betaf x}, t)|^p d{\betaf x} dt & \elleq \int_{ N^{\frac34+\varepsilon}\ellesssim |F| \ellesssim N}|F({\betaf x}, t)|^p d{\betaf x} dt + N^{(\frac{3}{4}+\varepsilon)(p - 4)}\int |F({\betaf x}, t)|^4 d{\betaf x} dt\\ & \ellesssim N^{2-\frac{12}{1+6\varepsilon}\varepsilon} \int_{N^{\frac34+}}^N\ellambda^{p - 7+ \frac{24}{1+6\varepsilon}\varepsilon} d\ellambda + N^{(\frac{3}{4}+\varepsilon)(p - 4)+ \frac 23}\\ & \ellesssim N^{p - 4+}, \end{align*} \noindent where the last inequality holds as long as $p \gammaeq \frac{20}{3}$. This proves Theorem \ref{THM:2} (ii.a). By Proposition \ref{PROP:level} (i) and (ii), Theorem \ref{THM:2} (i.a) follows in a similar manner. We omit details. \betaetagin{remark} \rm When $d = 2$, Proposition \ref{PROP:level} (i) and (ii) basically says that the level set estimates \eqref{C1} and \eqref{E0} are sufficient in proving the Strichartz estimates \eqref{P1} and \eqref{P2} for $p > 6$ as long as $\ellambda$ is {\it large}: $\ellambda \gammaeq N^{\frac{1}{4}+}$. Hence, an improvement on Theorem \ref{THM:2} when $ d= 2$ may be obtained if we can improve the lower bound on $\ellambda$ in Proposition \ref{PROP:level} (i) or the $L^4$-Strichartz estimate \eqref{C0a}. \end{remark} \betaetagin{remark}\ellabel{REM:level} \rm In \cite{Bo5}, Bourgain proved \betaetagin{equation} \|e^{-it \Delta} f\|_{L^p_{t, {\betaf x}}(I \times \mathbb{T}^d)} \ellesssim N^\varepsilon \|f\|_{L^2(\mathbb{T}^d)} \ellabel{C0b} \end{equation} \noindent for $ p = \frac{2(d+1)}{d}$. See Proposition 8 and the comment afterward in \cite{Bo5}. Combining Proposition \ref{PROP:level} (i) and \eqref{C0b}, we obtain \eqref{P2} only for $ p \gammaeq \frac{2(3d+1)}{d}$. When $d = 2$, the combination of Proposition \ref{PROP:level} (i) and \eqref{C0a} yields a better result. When $d \gammaeq 3$, Proposition \ref{PROP:Str d3} yields better results. We point out that, when $d = 3$, combining Proposition \ref{PROP:level} (i) with Theorem \ref{THM:1} (ii) yields another proof of Theorem \ref{THM:2} (ii.b). \end{remark} \sigmaubsection{Proof of Proposition \ref{PROP:level} (i)} \ellabel{SUBSEC:PROP1} Let $\eta$ be a smooth cutoff function supported on $[\frac{1}{200}, \frac{1}{100}]$. Given $q \in \mathbb{N}$, define $J_q$ by \betaetagin{equation} J_q = \{ a \in \mathbb{N}:\, 1\elleq a \elleq q, \, (a, q) = 1\}. \ellabel{C1a} \end{equation} \noindent Then, for given $M \in \mathbb{N}$ with $M \gammaeq N$, we define \[ \Phi(t) = \sigmaum_{M\elleq q < 2M}\sigmaum_{a \in J_q} \eta\Big( q^2 \betaig\| t - \tfrac{a}{q}\betaig\|\Big),\] \noindent where $\|x\| = \min_{n\in \mathbb{Z}}|x - n|$ denotes the distance of $x$ to the closest integer. Note that $\Phi$ is periodic with period 1. By taking a Fourier transform, we have \betaetagin{equation} \widehat \Phi(k) = \sigmaum_{M\elleq q < 2M}\frac{1}{q^2} c_q(k) \, \widehat \eta(q^{-2} k), \ellabel{C2} \end{equation} \noindent where $c_q(k)$ denotes Ramanujan's sum: $c_q(k) : =\sigmaum_{a \in J_q} e^{-2\pi i \frac a qk}$. Let $\phi(q)$ be the Euler's totient function defined by $\phi(q) = \sigmaum_{a \in J_q} 1$. Then, by Theorem 330 in \cite{HW}, we have \betaetagin{equation*} \widehat \Phi(0) \sigmaim \frac{1}{M^2} \sigmaum_{M\elleq q < 2M} \phi(q) \sigmaim 1. \end{equation*} \noindent Namely, $\widehat\Phi(0)$ is independent of $M$. Without loss of generality, assume that $I $ is centered at $0$. With $Q({\betaf n})$ in \eqref{CQ}, define $\mathbf{R}$ as in \eqref{B4a}, where $S_N = \{ {\betaf n} \in \mathbb{Z}^d: \, |n_j| \elleq N, \, j = 1, \dots, d \}$. Then, we have $\mathbf{R}({\betaf x}, t) = R_1(x_1, t)\prod_{j = 2}^d R_{\theta_j}(x_j, t),$ where $R_\theta$ is defined in \eqref{B4b}. Now, letting $\chi$ be a smooth cutoff function support on $3I$ such that $\chi(t) \equiv 1$ on $2I$, define $\mathbf{R}_1$ and $\mathbf{R}_2$ by \betaetagin{equation} \mathbf{R}_1({\betaf x}, t) = \frac{\Phi(t)}{\widehat \Phi(0)} \mathbb{R}R({\betaf x}, t) \chi(t) \quad \text{and} \quad \mathbb{R}R_2 ({\betaf x}, t) = \mathbb{R}R ({\betaf x}, t)\chi(t) - \mathbb{R}R_1 ({\betaf x}, t) . \ellabel{C2a} \end{equation} \noindent Noting that the intervals $ I_{\ell, q, a}: = \betaig[ \ell + \frac aq + \frac{1}{200 q^2}, \ell+ \frac aq + \frac{1}{100 q^2}\betaig] $ are disjoint for distinct values of $\ell, a$, and $q \sigmaim M \gammag 1$, it follows from Weyl's inequality \cite[Theorem 1 on p.~41]{M} that \betaetagin{equation} |R_1(x_1, t) | \ellesssim \frac{N}{q^\frac{1}{2}} + N^\frac{1}{2}(\ellog q)^\frac{1}{2} + q^\frac{1}{2}(\ellog q)^\frac{1}{2} \ellesssim M^\frac{1}{2} (\ellog M)^\frac{1}{2} \ellabel{C2b} \end{equation} \noindent for $t \in I_{\ell, q, a}$ since $q\sigmaim M \gammaeq N$. Then, along with a trivial bound $ |R_{\theta_j}(x_j, t) | \ellesssim N$, we obtain \betaetagin{equation} \|\mathbb{R}R_1\|_{L^\infty_{t, {\betaf x}}} \ellesssim \min\betaig( N^{d-1} M^\frac{1}{2} (\ellog M)^\frac{1}{2}, N^d\betaig). \ellabel{C3} \end{equation} Next, we consider $\mathbb{R}R_2$. By expanding $\Phi(t)$ in the Fourier series, we have \betaetagin{align} \mathbb{R}R_2({\betaf x}, t) = - \frac{1}{\widehat \Phi(0)} \mathbb{R}R({\betaf x}, t) \chi(t) \sigmaum_{k \ne 0} \widehat\Phi(k) e^{2\pi i k t}. \ellabel{C3a} \end{align} \noindent First, recall the following lemma (Lemma 3.33 in \cite{Bo2}). Given $M \in \mathbb{N}$ and $k \in \mathbb{Z}$, we have \betaetagin{equation} \sigmaum_{M\elleq q < 2M} |c_q(k)| \ellesssim d(k, M)M^{1+}, \ellabel{C4} \end{equation} \noindent where $d(k, M)$ denotes the number of divisors of $k$ less than $M$. Then, by taking a Fourier transform of \eqref{C3a} with \eqref{C2}, \eqref{C4}, and $d(k, M) \ellesssim k^{0+}$, we have \betaetagin{align} |\widehat \mathbb{R}R_2({\betaf n}, \tau) | & = \betaigg| \mathbf 1_{S_N}({\betaf n})\sigmaum_{k\ne0} \frac{\widehat \Phi(k)}{\widehat \Phi(0)} \, \widehat \chi (\tau - Q({\betaf n}) - k)\betaigg|\notag \\ & \ellesssim \frac{1}{M^2} \sigmaum_{k\ne0} \sigmaum_{M\elleq q < 2M}|c_q(k)| \betaig|\widehat \eta(q^{-2} k) \widehat \chi (\tau - Q({\betaf n}) - k)\betaig|\notag \\ & \ellesssim \frac{1}{M^2} \sigmaum_{k\ne0} k^{0+} M^{1+} \Big(\frac{M^2}{k}\Big)^{0+} \frac{1}{\jb{\tau - Q({\betaf n}) - k}^{10}}\notag \\ & \ellesssim M^{-1 + }. \ellabel{C5} \end{align} Define $\mathbb{T}heta_\ellambda({\betaf x}, t)$ by \betaetagin{equation} \mathbb{T}heta_\ellambda({\betaf x}, t) = \exp\betaig(i \arg(e^{-it \Delta}f({\betaf x}))\betaig) \cdot \mathbf 1_{A_\ellambda}({\betaf x}, t). \ellabel{C5a} \end{equation} \noindent Note that $\sigmaupp \mathbb{T}heta_\ellambda({\betaf x}, \cdot) \sigmaubset I$ for each ${\betaf x} \in \mathbb{T}^d$. Then, by Cauchy-Schwarz inequality with \eqref{C0}, we have \betaetagin{align} \ellambda^2 |A_\ellambda|^2 & \elleq \betaigg(\int_{I\times \mathbb{T}^2} \betaig(e^{-it \Delta} f\betaig)({\betaf x}, t) \omegaverline{\mathbb{T}heta_\ellambda({\betaf x}, t)} d{\betaf x} dt\betaigg)^2 = \betaigg(\sigmaum_{{\betaf n} \in S_N} c_{\betaf n} \omegaverline{\widehat \mathbb{T}heta_\ellambda ({\betaf n}, Q({\betaf n}))}\betaigg)^2 \notag \\ & \elleq \sigmaum_{{\betaf n} \in S_N} \betaig| \widehat \mathbb{T}heta_\ellambda ({\betaf n}, Q({\betaf n}))\betaig|^2 = \betaig\ellangle \mathbb{R}R* \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda\betaig\rangle = \betaig\ellangle (\mathbb{R}R\chi)* \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda\betaig\rangle \ellabel{C5b} \intertext{By \eqref{C2a}, \eqref{C3}, and \eqref{C5}, we have} & = \betaig\ellangle \mathbb{R}R_1* \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda\betaig\rangle + \betaig\ellangle \mathbb{R}R_2* \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda\betaig\rangle \notag \\ & \elleq \|\mathbb{R}R_1\|_{L^\infty_{t, {\betaf x}}} \|\mathbb{T}heta_\ellambda\|_{L^1_{t, {\betaf x}}}^2 + \|\widehat \mathbb{R}R_2\|_{L^\infty_\tau \ell^\infty_{\betaf n} } \|\mathbb{T}heta_\ellambda\|_{L^2_{t, {\betaf x}}}^2 \notag \\ & \elleq C_1 N^{d-1} M^{\frac{1}{2}+\varepsilon_1} |A_\ellambda|^2 + M^{-1+\varepsilon_2} |A_\ellambda| \ellabel{C6} \end{align} \noindent for small $\varepsilon_1, \varepsilon_2 > 0$. Now, choose $M \gammaeq N$ such that \betaetagin{equation} N^{d-1} M^{\frac{1}{2}+\varepsilon_1} \sigmaim \ellambda^2. \ellabel{C7} \end{equation} \noindent The condition \eqref{C7} with $M \gammaeq N$ implies that $\ellambda \gammaes N^{\frac{d}{2} - \frac{1}{4} + \frac{\varepsilon_1}{2}}$. Then, \eqref{C6} yields \betaetagin{align*} |A_\ellambda| \ellesssim \Big(\frac{N^{2(d-1)}}{\ellambda^4}\Big)^\frac{1 - \varepsilon_2}{1+2\varepsilon_1} \ellambda^{-2} \ellesssim N^{2(d-1)\frac{1}{1+3\varepsilon_1} } \ellambda^{-6+\frac{12}{1+3\varepsilon_1}\varepsilon_1} \end{align*} \noindent by setting $\varepsilon_2 = \varepsilon_2(\varepsilon_1)$ such that $\frac{1 - \varepsilon_2}{1+2\varepsilon_1} = \frac{1}{1+3\varepsilon_1}$. This proves \eqref{C1} with $\varepsilon = \frac{\varepsilon_1}{2}$. \sigmaubsection{Proof of Proposition \ref{PROP:level} (ii)} \ellabel{SUBSEC:PROP2} In this subsection, we prove the level set estimate \eqref{E0}, which is sharp for $ q > 6.$ The following argument is inspired by Bourgain's paper \cite{Bo2}. We first go over some basic setups, restricting our attention to $t \in \mathbb{T}$. Let $\{\sigma_n\}_{n\in \mathbb{Z}}$ be the multiplier defined by $\sigma_n = 1$ on $[-N, N]$, $\sigma_n = \frac{N-j}N$ for $n = N+j$ and $n = -N -j$, $j = 1, \dots, N$, and $\sigma_n = 0$ for $|n| \gammaeq 2N$. Consider \betaetagin{equation} K(x, t) := \sigmaum_{n\in\mathbb{Z}} \sigma_n e^{2\pi i (nx + n^2t)}. \ellabel{D0a} \end{equation} \noindent Then, we have the following lemma. Here, $\|x\| = \min_{n\in \mathbb{Z}}|x - n|$ denotes the distance of $x$ to the closest integer as before. \betaetagin{lemma}[Lemma 3.18 in \cite{Bo2}] \ellabel{LEM:Weyl} Let $1 \elleq a \elleq q \elleq N$ and $(a, q) = 1$ such that \betaetagin{equation} \betaigg\| t - \frac aq\betaigg\| \elleq \frac{1}{qN}. \ellabel{D0} \end{equation} \noindent Then, we have \betaetagin{equation} | K(x, t)| \ellesssim \frac{N}{q^\frac{1}{2}\Big(1 + N\betaig\|t - \frac aq\betaig\|^\frac{1}{2}\Big)}. \ellabel{D1} \end{equation} \end{lemma} \noindent Note that the multiplier $\sigma_n$ in \eqref{D0a} avoids the logarithmic loss (when $q \sigmaim N$) in Weyl's inequality \eqref{C2b} on the Weyl sum $W_N(x, t) = \sigmaum_{|n|\elleq N} e^{2\pi i (nx + n^2t)}$. Indeed, by writing $K = \frac{1}{N} \sigmaum_{k = N}^{2N-1} W_k$, we see that this regularizing effect in \eqref{D1} is analogous to that of the F\'ej\'er kernel over the Dirichlet kernel. In the following, we fix $N\gammag1$, dyadic. For dyadic $M \elleq N$, let $\mathcal{R}_M$ by \[ \mathcal{R}_M = \Big\{ \frac aq: \, a\in J_q, \, M\elleq q < 2M\Big\},\] \noindent where $J_q$ is as in \eqref{C1a}. Let $\psi(t)$ be a smooth cutoff function supported on $\frac{1}{2}+\frac{1}{10} \elleq |t| \elleq 2 - \frac{1}{10}$ such that $\sigmaum_{j \in \mathbb{Z}} \psi(2^{-j} t) = 1$ for $t \ne 0$. For $s \in \mathbb{N}$ with $ M \elleq 2^s < N$ let $\omegamega_{N, 2^s} (t)= \psi(2^sN t)$ and define $\omegamega_{N, N}$ by \[ \omega_{N, N}(t) = \betaetagin{cases} \sigmaum_{j \gammaeq \ellog_2 N} \psi(2^jNt), & t\ne 0,\\ 1, & t = 0. \end{cases} \] \noindent Note that we have $\sigmaupp(\omega_{N, 2^s}) \sigmaubset \betaig\{|t| \ellesssim \frac{1}{2^sN}\betaig\}$ and \betaetagin{equation} |\widehat \omega_{N, 2^s}(k) | \ellesssim \frac{1}{2^s N} \Big\ellangle\frac{ k}{2^sN}\Big\rangle^{-100}. \ellabel{D2a} \end{equation} \noindent Now, let \betaetagin{equation} \Omegamega_{M, N} = \sigmaum_{M \elleq 2^s \elleq N} \omegamega_{N, 2^s}. \ellabel{D2} \end{equation} \noindent Then, it follows that $\Omega_{M, N} \equiv 1$ on $[ -\frac{1}{MN}, \frac{1}{MN}]$ and $\sigmaupp\Omega_{M, N} \sigmaubset [ -\frac{2}{MN}, \frac{2}{MN}]$. Let $N_1 = \frac{1}{100}N$. Note that, for $M_1 < M_2 \elleq N_1$, we have \[\betaigg(\mathcal{R}_{M_1} + \Big[ -\frac{2}{M_1N}, \frac{2}{M_1N}\Big]\betaigg) \cap \betaigg(\mathcal{R}_{M_2} + \Big[ -\frac{2}{M_2N}, \frac{2}{M_2N}\Big] \betaigg)= \emptyset.\] \noindent Recall that by Dirichlet's theorem \cite[Lemma 2.1]{V}, \eqref{D0} is satisfied for all $t \in \mathbb{T} = [0, 1]$. Then, by letting $\delta_T$ denote the Dirac delta measure at $T$, we have \betaetagin{equation}\ellabel{D3} 1 = \sigmaum_{\sigmaubstack{M \elleq N_1\\ M, \text{ dyadic}}} \sigmaum_{T \in \mathcal{R}_M}\delta_T* \Omega_{M, N} + \rho, \end{equation} \noindent such that $\rho(t) \ne 0$ for some $t \in \mathbb{T}$ implies that $t$ satisfies \eqref{D0} for some $q > N_1$. In particular, by Lemma \ref{LEM:Weyl}, we have \betaetagin{equation} | \rho(t) K(x, t) | \ellesssim N^\frac{1}{2}. \ellabel{D4} \end{equation} \noindent From \eqref{D3} with \eqref{C4}, \eqref{D2a}, and \eqref{D2}, we have \betaetagin{align} |\widehat \rho(k)| & \elleq \betaigg| \sigmaum_{\sigmaubstack{M \elleq N_1\\ M, \text{ dyadic}}} \sigmaum_{M \elleq 2^s \elleq N} \mathcal{F} \betaigg[\sigmaum_{T \in \mathcal{R}_M}\delta_T\betaigg] (k)\cdot \widehat \omega_{N, 2^s}(k) \betaigg| \notag \\ & \ellesssim \sigmaum_{\sigmaubstack{M \elleq N_1\\ M, \text{ dyadic}}} \sigmaum_{M \elleq 2^s \elleq N} \frac{d(k, M)M^{1+}}{2^s N} \Big\ellangle\frac{ k}{2^sN}\Big\rangle^{-100} \ellesssim N^{-1+} \ellabel{D4b} \end{align} \noindent for $k \ne 0$. Here, we used the fact that $d(k, M) \ellesssim k^{0+}$ and $M \elleq 2^s \elleq N$. Now, for each $M$ and $s$, we choose a coefficient $\alpha_{M, s}$ such that \betaetagin{equation} \mathcal{F}\betaigg[\sigmaum_{T \in \mathcal{R}_M} \delta_T * \omega_{N, 2^s}\betaigg](0) = \alpha_{M, s} \widehat \rho(0). \ellabel{D4a} \end{equation} \noindent Then, from \cite[(3.56)]{Bo2}, we have \betaetagin{equation} |\alpha_{M, s}| \ellesssim \frac{M^2}{2^sN}. \ellabel{D5} \end{equation} Now, we focus on our problem. Namely, we do not assume $t \in \mathbb{T}$ any longer. Given an interval $I \sigmaubset \mathbb{R}$, assume that $I $ is centered at $0$ and let $\chi$ be a smooth cutoff function support on $3I$ such that $\chi(t) \equiv 1$ on $2I$ as before. We define \betaetagin{align} \mathbf{K}({\betaf x}, t) & = \chi(t) \sigmaum_{{\betaf n} \in \widetilde S_N} \sigma_{n_1} e^{2\pi i ({\betaf n}\cdot {\betaf x} + Q({\betaf n}) t)} \notag \\ & = \chi(t) K(x_1, t) \prod_{j = 2}^d \sigmaum_{|n_j|\elleq N} e^{2\pi i (n_j x_j + \theta_j n_j^2 t)} , \ellabel{D6} \end{align} \noindent where $\widetilde S_N = \{ {\betaf n} \in \mathbb{Z}^d: \, |n_j| \elleq N, j = 2, \dots, N\}$ and $K(x_1, t)$ is as in \eqref{D0a}. Define $\mathcal{L}ambda_{M, s}$ by \betaetagin{equation} \mathcal{L}ambda_{M, s}({\betaf x}, t) = \mathbf{K}({\betaf x}, t) \betaigg[\sigmaum_{T \in \mathcal{R}_M} \delta_T * \omega_{N, 2^s} (t) - \alpha_{M, s} \rho(t)\betaigg] . \ellabel{D7} \end{equation} \noindent Then, from Lemma \ref{LEM:Weyl}, \eqref{D4}, and \eqref{D5} with $M \elleq 2^s \elleq N$, we have \betaetagin{align} |\mathcal{L}ambda_{M, s}({\betaf x}, t)|\ellesssim N^{d-1} \frac{N}{M^\frac{1}{2} \betaig( 1 + (2^{-s} N)^\frac{1}{2}\betaig)} + \frac{M^2}{2^sN} N^{d-\frac{1}{2}} \ellesssim N^{d-1} \Big(\frac{2^s N }{M}\Big)^\frac{1}{2}. \ellabel{D8} \end{align} \noindent Hence, from \eqref{D8}, we have \betaetagin{equation} \| f * \mathcal{L}ambda_{M, s} \|_{L^\infty(I \times \mathbb{T}^d)} \ellesssim N^{d-1} \Big(\frac{2^s N }{M}\Big)^\frac{1}{2}\|f\|_{L^1(I \times \mathbb{T}^d)}. \ellabel{D9} \end{equation} Next, we estimate $|\widehat \mathcal{L}ambda_{M, s}|$. Denote the second factor in \eqref{D7} by $\Phi_{M, s}$, i.e.~let \betaetagin{equation} \Phi_{M, s}(t) = \sigmaum_{T \in \mathcal{R}_M} \delta_T * \omega_{N, 2^s} (t) - \alpha_{M, s} \rho(t). \ellabel{D9a} \end{equation} \noindent Note that $\Phi_{M, s}$ is periodic. Moreover, by \eqref{D4a}, we have $\widehat \Phi_{M, s}(0) = 0$. Hence, we have \betaetagin{equation} \widehat \mathcal{L}ambda_{M, s}({\betaf n}, \tau) = \sigma_{n_1} \betaigg(\prod_{j = 2}^d \mathbf 1_{|n_j| \elleq N}\betaigg) \sigmaum_{k \ne 0}\widehat \Phi_{M, s}(k) \widehat \chi(\tau - Q({\betaf n}) - k). \ellabel{D10} \end{equation} \noindent By \eqref{C4}, \eqref{D2a}, \eqref{D4b}, and \eqref{D5} with $d(k, M) \ellesssim k^{0+}$ and $M \elleq 2^s \elleq N$, we have \betaetagin{align} |\widehat \Phi_{M, s}(k)| \ellesssim \frac{d(k, M) M^{1+}}{2^s N} \Big\ellangle\frac{ k}{2^sN}\Big\rangle^{-100} + \frac{M^2}{2^s N^{2-}} \ellesssim \frac{M}{2^s N^{1-}} \ellabel{D11} \end{align} \noindent for $k \ne 0$. By summing $|\widehat \chi(\tau - Q({\betaf n}) - k)| \ellesssim \jb{\tau - Q({\betaf n}) - k}^{-100}$ over $k \ne 0$, it follows from \eqref{D10} and \eqref{D11} that \betaetagin{align} |\widehat \mathcal{L}ambda_{M, s}({\betaf n}, \tau)| \ellesssim \frac{M}{2^s N^{1-}}. \ellabel{D12} \end{align} \noindent Hence, from \eqref{D12}, we have \betaetagin{equation} \| f * \mathcal{L}ambda_{M, s} \|_{L^2(I \times \mathbb{T}^d)} \ellesssim \frac{M}{2^s N}N^{0+} \|f\|_{L^2(I \times \mathbb{T}^d)}. \ellabel{D13} \end{equation} \noindent Also, with the trivial bound $d(k, M) \elleq M$ in \eqref{D11}, we have \betaetagin{equation} \| f * \mathcal{L}ambda_{M, s} \|_{L^2(I \times \mathbb{T}^d)} \ellesssim \frac{M^{2+}}{2^s N} \|f\|_{L^2(I \times \mathbb{T}^d)}. \ellabel{D13a} \end{equation} \noindent The second estimate \eqref{D13a} is useful when $M \elll N^\varepsilon$. In the following, we establish another estimate on $\| f * \mathcal{L}ambda_{M, s} \|_{L^2(I \times \mathbb{T}^d)}$, using the following lemma from Bourgain \cite{Bo2}. \betaetagin{lemma}[Lemma 3.47 in \cite{Bo2}] \ellabel{LEM:divisor} Let $d(k, M)$ denote the number of divisors of $k$ less than $M$. Then, for any $\betaeta, B, D > 0$, we have \betaetagin{equation}\ellabel{D14} \# \{ 0 \elleq k \elleq N: \, d(k, M) > D\} < c_{\betaeta, B} (D^{-B} M^\betaeta N + M^B). \end{equation} \noindent Note that the constant in \eqref{D14} is independent of $D>0$, $M, N \in \mathbb{N}$. \end{lemma} \noindent From \eqref{D10} and \eqref{D11}, we have \betaetagin{align} \| f * \mathcal{L}ambda_{M, s} \|_{L^2(I \times \mathbb{T}^d)} & \ellesssim \frac{ M^{1+}}{2^s N} \Bigg(\int \sigmaum_{\widetilde S_N} \sigma_{n_1}^2 |\widehat f({\betaf n} , \tau)|^2 \betaigg[\sigmaum_{k \ne 0}\frac{d(k, M)}{\jb{\frac{ k}{2^sN}}^{100}\jb{\tau - Q({\betaf n}) - k}^{100}} \betaigg]^2 d\tau \Bigg)^\frac{1}{2}\notag \\ & \hphantom{XXX} + \frac{M^2}{2^s N^{2-}} \|f\|_{L^2(I\times \mathbb{T}^d)}, \ellabel{D15} \end{align} \noindent where $\widetilde S_N$ is as in \eqref{D6}. Given $D > 0$ (to be chosen later), separate the first term, depending on $d(k, M) \elleq D$ or $> D$. The contribution from $d(k, M) \elleq D$ can be estimated by \betaetagin{equation} \ellesssim \frac{ D M^{1+}}{2^s N} \|f\|_{L^2(I\times \mathbb{T}^d)}. \ellabel{D16} \end{equation} \noindent Next, we estimate the contribution from $d(k, M) > D$. By Cauchy-Schwarz inequality, we have \betaetagin{align} \betaigg[\sigmaum_{k \ne 0} & \frac{d(k, M)}{\jb{\frac{ k}{2^sN}}^{100}\jb{\tau - Q({\betaf n}) - k}^{100}} \betaigg]^2 \notag \\ & \elleq \betaigg(\sigmaum_{k \ne 0}\frac{d(k, M)^2}{\jb{\frac{ k}{2^sN}}^{200}\jb{\tau - Q({\betaf n}) - k}^{100}} \betaigg) \betaigg(\sigmaum_{\widetilde k \ne 0}\frac{1}{\jb{\tau - Q({\betaf n}) - \widetilde k}^{100}} \betaigg) \notag \\ & \ellesssim \betaigg(\sigmaum_{k \ne 0}\frac{d(k, M)^2}{\jb{\frac{ k}{2^sN}}^{200}\jb{\tau - Q({\betaf n}) - k}^{100}} \betaigg). \ellabel{D15a} \end{align} \noindent Now, we estimate the first term on the right-hand side of \eqref{D15} after applying \eqref{D15a}. By first integrating in $\tau$, then summing over $|n_j| \ellesssim N$ for $j = 1, \dots, d$, applying Lemma \ref{LEM:divisor} (with $2B$ and $2\betaetata$ instead of $B$ and $\betaetata$), the trivial bound $d(k, M) \elleq M$, and Hausdorff-Young's inequality, we have \betaetagin{align} & \ellesssim \frac{ M^{1+}N^{\frac{d}{2}}}{2^s N} \betaigg( \sigmaum_{|k| \ellesssim 2^s N} d(k, M)^2 + \sigmaum_{j = 1}^\infty \sigmaum_{|k| \sigmaim 2^{s+j} N}d(k, M)^2\Big\ellangle\frac{ k}{2^sN}\Big\rangle^{-200}\betaigg)^\frac{1}{2} \|\widehat f\|_{L^\infty_\tau \ell^\infty_{\betaf n}} \notag \\ & \ellesssim \frac{ M^{2+}N^{\frac{d}{2}}}{2^s N} \betaigg( \sigmaum_{j = 0}^\infty 2^{-200j} (D^{-2B} M^{2\betaeta} 2^{s+j} N + M^{2B}) \betaigg)^\frac{1}{2} \|f\|_{L^1(I\times \mathbb{T}^d)} \notag \\ & \ellesssim N^\frac{d}{2}\betaigg( \frac{D^{-B} M^{2+\betaeta +} }{2^{\frac{s}{2}}N^\frac{1}{2}} + \frac{ M^{2+B+}}{2^sN } \betaigg) \|f\|_{L^1(I\times \mathbb{T}^d)}. \ellabel{D17} \end{align} \noindent Hence, from \eqref{D15}, \eqref{D16}, and \eqref{D17} with $M \elleq N$, we have \betaetagin{align} \| f * \mathcal{L}ambda_{M, s} \|_{L^2(I \times \mathbb{T}^d)} & \ellesssim \frac{ D M^{1+}}{2^s N} \|f\|_{L^2(I\times \mathbb{T}^d)} \notag \\ & \hphantom{XXX} + N^\frac{d}{2} \betaigg( \frac{D^{-B} M^{2+\betaeta +} }{2^{\frac{s}{2}}N^\frac{1}{2}} + \frac{ M^{2+B+}}{2^sN } \betaigg) \|f\|_{L^1(I\times \mathbb{T}^d)}. \ellabel{D18} \end{align} Define $\mathcal{L}ambda$ by \betaetagin{equation} \mathcal{L}ambda ({\betaf x}, t) = \sigmaum_{\sigmaubstack{M \elleq N_1\\ M, \text{ dyadic}}} \sigmaum_{M \elleq 2^s \elleq N} \mathcal{L}ambda_{M, s}({\betaf x}, t), \ellabel{E1} \end{equation} \noindent where $\mathcal{L}ambda_{M, s}$ is as in \eqref{D7}. By \eqref{D3}, we have \betaetagin{equation} \betaig(\mathbf{K} - \mathcal{L}ambda \betaig)({\betaf x}, t) = \betaigg[ 1+ \sigmaum_{\sigmaubstack{M \elleq N_1\\ M, \text{ dyadic}}} \sigmaum_{M \elleq 2^s \elleq N}\alpha_{M, s} \betaigg] \mathbf{K}({\betaf x}, t) \rho(t). \ellabel{E2} \end{equation} \noindent Then, by \eqref{D4}, \eqref{D5}, and \eqref{D6} with $N_1 = \frac{1}{100}N$, we have \betaetagin{equation} \|\mathbf{K} - \mathcal{L}ambda\|_{L^\infty(I\times\mathbb{T}^d)} \ellesssim N^{d-\frac{1}{2}}. \ellabel{E3} \end{equation} \noindent Hence, we have \betaetagin{align} |\jb{f, f*(\mathbf{K} - \mathcal{L}ambda)}| \ellesssim N^{d-\frac{1}{2}} \|f\|_{L^1}^2. \ellabel{E4} \end{align} Let $p \in (1, 2)$ such that \betaetagin{equation} \frac 1p = \frac{1-\theta}{1}+\frac \theta 2 \ellabel{E4a} \end{equation} \noindent for some $\theta \in (0, 1)$. Note that $p' \theta = 2$. \noindent {\betaf Case (i):} $\theta < \frac{1}{5}$. By interpolating \eqref{D9} and \eqref{D13a}, we have \betaetagin{align} \| f * \mathcal{L}ambda_{M, s} \|_{L^{p'}(I \times \mathbb{T}^d)} \ellesssim N^{(d-1)(1-\theta)} (2^s N)^{\frac12 - \frac 32\theta} M^{-\frac 12 + (\frac 52+)\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}. \ellabel{E4b} \end{align} \noindent Then, summing over dyadic $M \gammaeq 1$ and $s$ with $2^s \elleq N$, we have \betaetagin{align} \| f *\mathcal{L}ambda \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim N^{d - (d+2)\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}, \ellabel{E4c} \end{align} \noindent as long as $\theta > 0$ satisfies $-\frac 12 + (\frac 52+)\theta \elleq 0$, i.e. \betaetagin{equation} \theta < \frac 15. \ellabel{E4d} \end{equation} In the following, we prove the level set estimate \eqref{E0} for $ q > 10$. Given $f$ as in \eqref{C0}, let $F({\betaf x}, t) = e^{-it \Delta} f({\betaf x}, t)$. Let $\mathbb{T}heta_\ellambda({\betaf x}, t)$ be as in \eqref{C5a}, where $A_\ellambda = \{({\betaf x}, t) \in \mathbb{T}^d \times I:\, |F({\betaf x}, t)| > \ellambda\}$. Then, proceeding as in \eqref{C5b} with \eqref{E4} and \eqref{E4c}, we have \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \elleq \sigmaum_{{\betaf n} \in S_N} \betaig|\widehat \mathbb{T}heta_\ellambda({\betaf n}, Q({\betaf n}))\betaig|^2 \elleq \jb{ \mathbf{K}* \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda}\notag \\ & \elleq |\jb{ (\mathbf{K} - \mathcal{L}ambda) * \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|+ |\jb{ \mathcal{L}ambda*\mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|\notag \\ & \ellesssim N^{d-\frac{1}{2}} |A_\ellambda|^2 + N^{d - (d+2) \theta} |A_\ellambda|^\frac{2}{p}. \ellabel{E4e} \end{align} \noindent For $\ellambda \gammag N^{\frac{d}{2}-\frac 14}$, \eqref{E4e} reduces to \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \ellesssim N^{d - (d+2) \theta} |A_\ellambda|^\frac{2}{p}. \ellabel{E4f} \end{align} \noindent Noting that $p' \theta = 2$ by \eqref{E4a}, it follows from \eqref{E4f} that \betaetagin{align*} |A_\ellambda| \ellesssim N^{\frac{d}{2}q - (d+2) } \ellambda^{-q}, \end{align*} \noindent where $q: = p' = \frac 2\theta > 10$. Note that we only needed to assume $\ellambda \gammag N^{\frac{d}{2}-\frac 14}$ and did not need the condition $\ellambda \gammaes N^{\frac d2 - \varepsilon}$ in this case. \noindent {\betaf Case (ii):} $\frac 15 \elleq \theta < \frac{1}{3}$. Let $M_j$, $j = 1, 2$ be dyadic numbers such that \betaetagin{equation} M_1 \sigmaim \betaigg(\frac{N^\frac{d}{2}}{\ellambda}\betaigg)^{\delta_1} \ellesssim N^{\varepsilon \delta_1} \quad \text{and} \quad M_2 \sigmaim N^{\delta_2}. \ellabel{EE0} \end{equation} \noindent Here, we choose $\delta_1, \delta_2>0$ such that $M_1 \elll M_2$. We divide $\mathcal{L}ambda$ into three pieces: $\mathcal{L}ambda = \mathcal{L}ambda_1 + \mathcal{L}ambda_2 + \mathcal{L}ambda_3$ by setting \betaetagin{equation} \mathcal{L}ambda_j = \sigmaum_{\sigmaubstack{M \in I_j \\M, \text{ dyadic}}} \sigmaum_{M \elleq 2^s \elleq N} \mathcal{L}ambda_{M, s}, \ellabel{EE0a} \end{equation} \noindent where $I_1 = [1, M_1]$, $I_2 = (M_1, M_2]$, and $I_3 = (M_2, N_1]$ with $N_1 = \frac{1}{100}N$ as before. Then, summing \eqref{E4b} over dyadic $M \elleq M_1$ and $2^s \elleq N$, we have \betaetagin{align} \| f *\mathcal{L}ambda_1 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim M_1^{-\frac{1}{2} + (\frac 52 +)\theta} N^{d - (d+2)\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}, \ellabel{EE1} \end{align} \noindent since $\theta \in [\frac 15, \frac 13)$. Similarly, by interpolating \eqref{D9} and \eqref{D18}, we have \betaetagin{align} \| f * & \mathcal{L}ambda_{M, s} \|_{L^{p'}(I \times \mathbb{T}^d)} \ellesssim N^{(d-1)(1-\theta)} (2^s N)^{\frac12 - \frac 32\theta} M^{-\frac{1}{2}+ (\frac{3}{2}+)\theta} D^\theta \| f \|_{L^{p}(I \times \mathbb{T}^d)}\notag \\ & + N^{(d-1)(1-\theta)} \betaigg(\frac{2^s N}{M}\betaigg)^{\frac12 (1- \theta)} N^{\frac{d}{2} \theta} \betaigg(\frac{D^{-B}M^{2+\betaeta+}}{2^\frac{s}{2} N^\frac{1}{2}} + \frac{ M^{2+B+}}{2^sN } \betaigg)^\theta \|f\|_{L^1(I\times \mathbb{T}^d)}. \ellabel{E5} \end{align} \noindent Now, choose $D\sigmaim M^\alpha$ for some small $\alpha > 0$. Then, set $\betaeta \elll 1$ and $ B \gammag 1$ such that \betaetagin{equation} \sigma: =- \frac{5}{2} - \betaetata + \alpha B - > 0. \ellabel{E5a} \end{equation} \noindent Then, summing \eqref{E5} over dyadic $M \in (M_1, M_2]$ and $s$ with $2^s \elleq N$, we have \betaetagin{align} \| f * \mathcal{L}ambda_2 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim N^{d - (d+2) \theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)} \notag \\ & + \Big( M_1^{-\frac{1}{2} - \sigma \theta} N^{d - (\frac d 2+1)\theta} + M_2^{-\frac{1}{2} + (\frac{5}{2}+ B+) \theta} N^{d - (\frac d 2+2) \theta} \Big) \|f\|_{L^1(I\times \mathbb{T}^d)}, \ellabel{E6} \end{align} \noindent as long as $-\frac{1}{2} + (\frac{3}{2}+\alpha+)\theta \elleq 0$, i.e.~ \betaetagin{equation} \theta < \frac{1}{3+2\alpha +}. \ellabel{E6a} \end{equation} \noindent Note that \eqref{E6a} can be satisfied as long as $\theta < \frac{1}{3}$ by choosing $\alpha$ sufficiently small. Lastly, from \eqref{D9} and \eqref{D13}, we have \betaetagin{align} \| f * \mathcal{L}ambda_{M, s} \|_{L^{p'}(I \times \mathbb{T}^d)} \ellesssim N^{(d-1)(1-\theta)} \betaigg(\frac{2^s N}{M}\betaigg)^{\frac12 - \frac 32\theta} N^{(0+) \theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}. \ellabel{E7} \end{align} \noindent Then, summing over $M \gammaeq M_2 \sigmaim N^{\delta_2}$ and $s$ with $2^s \elleq N$, we have \betaetagin{align} \| f * \mathcal{L}ambda_3 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim N^{(d-1)(1-\theta)} \betaigg(\frac{N^2}{M_2}\betaigg)^{\frac12 - \frac 32\theta} N^{(0+) \theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}\notag \\ & \ellesssim N^{d - (d+2)\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}, \ellabel{E8} \end{align} \noindent as long as $\theta < \frac 13$. Putting \eqref{EE1}, \eqref{E6}, and \eqref{E8} together with \eqref{E4d}, we obtain \betaetagin{align} \| f * \mathcal{L}ambda \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim M_1^{-\frac{1}{2} + (\frac 52 +)\theta} N^{d - (d+2) \theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)} \notag \\ & + \Big( M_1^{- \frac 12 -\sigma \theta} N^{d - (\frac d 2+1)\theta} + M_2^{ -\frac{1}{2}+(\frac{5}{2}+B+) \theta} N^{d - (\frac d 2+2) \theta} \Big) \|f\|_{L^1(I\times \mathbb{T}^d)}. \ellabel{E9} \end{align} Now, we are ready to prove the level set estimate \eqref{E0} for $ q > 6$. Then, proceeding as in Case (i) with \eqref{E4} and \eqref{E9}, we have \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \elleq |\jb{ (\mathbf{K} - \mathcal{L}ambda) * \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|+ |\jb{ \mathcal{L}ambda*\mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|\notag \\ & \ellesssim N^{d-\frac{1}{2}} |A_\ellambda|^2 + M_1^{-\frac{1}{2} + (\frac 52 +)\theta} N^{d - (d+2) \theta} |A_\ellambda|^\frac{2}{p} \notag \\ & \hphantom{XXX} + M_1^{- \frac 12- \sigma \theta} N^{d - (\frac{d}{2}+1) \theta}|A_\ellambda|^{1+\frac{1}{p}} + M_2^{ -\frac{1}{2}+(\frac{5}{2}+B+) \theta} N^{d - (\frac d 2+2) \theta} |A_\ellambda|^{1+\frac{1}{p}}. \ellabel{E10} \end{align} \noindent Since $\ellambda \gammag N^{\frac{d}{2}-\frac 14}$, \eqref{E10} reduces to \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \ellesssim M_1^{-\frac{1}{2} + (\frac 52 +)\theta} N^{d - (d+2) \theta} |A_\ellambda|^\frac{2}{p} + M_1^{- \frac 12- \sigma \theta} N^{d - (\frac{d}{2}+1) \theta}|A_\ellambda|^{1+\frac{1}{p}}\notag \\ & \hphantom{XXX} + M_2^{ -\frac{1}{2}+(\frac{5}{2}+B+) \theta} N^{d - (\frac d 2+2) \theta} |A_\ellambda|^{1+\frac{1}{p}} \notag \\ & = : \hspace{0.5mm}\text{I}\hspace{0.5mm} + \hspace{0.5mm}\text{I}\hspace{0.5mm}I + \hspace{0.5mm}\text{I}\hspace{0.5mm}II. \ellabel{E11} \end{align} First, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}$ holds. Recall from \eqref{E4a} that $p' \theta = 2$. Then, with \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| \ellesssim \betaigg(\frac{N^\frac{d}{2}}{\ellambda}\betaigg)^{(-\frac{p'}{4}+\frac{5}{2}+)\delta_1} N^{\frac{d}{2}p' - (d+2) } \ellambda^{-p'} \ellesssim N^{\frac{d}{2}q - (d+2) } \ellambda^{-q} \ellabel{E12} \end{align} \noindent for $ q > p'$ by choosing $\delta_1 = \delta_1(q, p')$ sufficiently small. Next, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}I$ holds. Then, from \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| & \ellesssim N^{\frac{d}{2}p' - (d+2) } \ellambda^{-p'} \Big(N^{\frac{d}{2}p'} \ellambda^{-p'}M_1^{-\frac{p'}{2} - 2\sigma}\Big) \ellesssim N^{\frac d 2 p' - (d+2)} \ellambda^{-p'}, \ellabel{E13} \end{align} \noindent by making $\sigma = \sigma(p', \delta_1)$ in \eqref{E5a} (and hence $B = B(p', \delta_1)$) sufficiently large. Lastly, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}II$ holds. By $\ellambda \gammaes N^{\frac{d}{2}-\varepsilon}$ and \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| & \ellesssim N^{\frac d 2 p' - (d+2)} \ellambda^{-p'} N^{-2 + \varepsilon p' +(-\frac{p'}{2} + 5+ 2B+)\delta_2} \ellesssim N^{\frac d 2 p' - (d+2)} \ellambda^{-p'} \ellabel{E15} \end{align} \noindent as long as we have $\varepsilon p' \elleq 1$ and $\delta_2 = \delta_2(p', B)$ is sufficiently small such that $(-\frac{p'}{2} + 5+ 2B+)\delta_2 \elleq 1$. Finally, given $q > 6$, we choose $\theta < \frac 13$ such that $ q > p' = \frac{2}{\theta} > 6$. Then, from \eqref{E11}, \eqref{E12}, \eqref{E13}, and \eqref{E15} with $\ellambda \elleq N^\frac{d}{2}$, we obtain \betaetagin{align*} |A_\ellambda| \ellesssim N^{\frac d 2 q - (d+2)} \ellambda^{-q}. \end{align*} \noindent This completes the proof of Proposition \ref{PROP:level} (ii). \sigmaection{Well-posedness in subcritical spaces} \ellabel{SEC:4} In this section, we prove local well-posedness of NLS \eqref{eq:nls1} on irrational tori in subcritical Sobolev spaces (Theorem \ref{THM:3}). It turns out that the well-posed theory of \eqref{eq:nls1} is very similar to that on the standard tours \cite{Bo2,Bo1, BoPCMI, Bo4}. In the seminal paper \cite{Bo2}, Bourgain introduced the $X^{s, b}$-space whose norm is given by \betaetagin{equation} \| u \|_{X^{s, b}(\mathbb{R} \times \mathbb{T}^d)} = \| \jb{{\betaf n}}^s \jb{\tau - |{\betaf n}|^2}^b\widehat u({\betaf n}, \tau) \|_{L^2_\tau\ell^2_{\betaf n}(\mathbb{R}\times \mathbb{Z}^d )}, \ellabel{F1} \end{equation} \noindent where $\jb{\, \cdot\, } = (1+|\cdot|^2)^\frac{1}{2}$. After establishing Strichartz estimates, he proved several well-posedness results of NLS on the standard torus $\mathbb{T}^d$. In our case, i.e.~on an irrational torus, we need to replace the weight $\jb{\tau - |{\betaf n}|^2}^b$ in \eqref{F1} by $\jb{\tau - Q({\betaf n})}^b$, where $Q({\betaf n})$ is defined in \eqref{eq:Q}. Then, by the standard $X^{s,b}$-theory, it is known that certain multilinear Strichartz estimates imply well-posedness. More precisely, we have the following lemma. \betaetagin{lemma}\ellabel{LEM:LWP1} Let $s_0 > \max (0, s_c)$ and $I\sigmaubset \mathbb{R}$ be a bounded interval. Suppose that the following multilinear Strichartz estimate holds for $s>s_0$: \betaetagin{align}\ellabel{multi-est} \betaigg\|\prod_{j=1}^{k+1}e^{-it\Delta}\phi_j \betaigg\|_{L^2_{t,{\betaf x}}(I\times \mathbb{T}^d)} \ellesssim N_{\max}^{-s} \prod_{j=1}^{k+1} N_j^{s}\|\phi_i\|_{L^2_{\betaf x}(\mathbb{T}^d) }, \end{align} \noindent for all $\phi_j \in L^2(\mathbb{T}^d)$ with $\sigmaupp \widehat \phi_j \sigmaubset [-N_j,N_j]^d$, $j = 1, \dots, k+1$, and $N_{\max} := \max(N_1,\cdots,N_{k+1} )$. Then, the Cauchy problem \eqref{eq:nls1} is locally well-posed in $H^s(\mathbb{T}^d)$ for $s> s_0$. \end{lemma} \noindent The proof of Lemma \ref{LEM:LWP1} is standard and we refer the readers to \cite{Bo2,Bo1,Bo4,CW} for details. \betaetagin{proof}[Proof of Theorem \ref{THM:3}] In view of Lemma \ref{LEM:LWP1}, it suffices to prove the $ (k+1)$-linear estimate \eqref{multi-est} for $ s > s_0$. Without loss of generality, assume that $N_1 \gammae N_2\gammae \cdots \gammae N_{k+1}$. \noindent {\betaf Case (i):} $d=2$. When $k=1$, the well-posedness result was already obtain in \cite{CW}. In the following, we first consider the case $k = 2, 3, 4, 5$. First, assume that $\sigmaupp \widehat \phi_1 \sigmaubset [-N_2, N_2]^2$. Then, by H\"older's inequality and \eqref{co-d2} with $\frac{4(7k+5) }{3(k+3)} \in ( 4, \frac{20}{3}]$, we have \betaetagin{align} \betaigg\|\prod_{j=1}^{k+1} e^{-it\Delta} \phi_j \betaigg\|_{L^2_{t,{\betaf x}}} & \elle \|e^{-it\Delta} \phi_1 \|_{L^\frac{4(7k+5) }{3(k+3)}} \|e^{-it\Delta} \phi_2 \|_{L^\frac{4(7k+5) }{3(k+3)}} \prod_{j=3}^{k+1} \|e^{-it\Delta} \phi_j \|_{L^\frac{7k + 5}{2}} \notag \\ & \ellesssim N_2^{\frac{7k-3}{7k+5} + 2\varepsilon} \|\phi_1 \|_{L^2}\|\phi_2 \|_{L^2}\prod_{j=3}^{k+1} N^{\frac{7k-3}{7k+5}+\varepsilon}_j \|\phi_j \|_{L^2}\notag \\ & \elleq N_{\max}^{-s} \prod_{j=1}^{k+1} N_j^{s}\|\phi_i\|_{L^2_{\betaf x}}, \ellabel{F2} \end{align} \noindent for $s > s_0 = \frac{7k-3}{7k+5}$. In general, if $\sigmaupp \widehat \phi_1 \sigmaubset [-N_1, N_1]^2$, then we can write $\phi_1 = \sigmaum_{|{\betaf j}|\ellesssim \frac{N_1}{N_2}} \phi_{1{\betaf j}}$, where $\sigmaupp \widehat \phi_{1{\betaf j}} \sigmaubset N_2\, {\betaf j} + [-N_2, N_2]^2$. Letting $\psi_{1{\betaf j}} ({\betaf x}) = e^{ - 2\pi i N_2 {\betaf j} \cdot {\betaf x}} \phi_{1{\betaf j}}({\betaf x}) $, we have $\sigmaupp \widehat \psi_{1{\betaf j}} \sigmaubset [-N_2, N_2]^2$. Then, by a change of variables and \eqref{co-d2} (see \cite{BoPCMI}), we obtain \betaetagin{equation} \|e^{-it\Delta} \phi_{1{\betaf j}} \|_{L^\frac{4(7k+5) }{3(k+3)}_{t, {\betaf x}}} = \|e^{-it\Delta} \psi_{1{\betaf j}} \|_{L^\frac{4(7k+5) }{3(k+3)}} \ellesssim N_2^{\frac{7k-3}{2(7k+5)} + \varepsilon}\|\phi_{1{\betaf j}}\|_{L^2_{\betaf x}}. \ellabel{F3} \end{equation} \noindent Then, by almost orthogonality with \eqref{F2} and \eqref{F3}, we have \betaetagin{align*} \betaigg\|\prod_{j=1}^{k+1}e^{-it\Delta}\phi_j \betaigg\|_{L^2_{t,{\betaf x}}}^2 \ellesssim \sigmaum_{|{\betaf j}|\ellesssim \frac{N_1}{N_2}} \betaigg\|e^{-it\Delta}\phi_{1{\betaf j}} \prod_{j=2}^{k+1}e^{it\Delta}\phi_j \betaigg\|_{L^2_{t,{\betaf x}}}^2 \ellesssim N_{\max}^{-2s} \prod_{j=1}^{k+1} N_j^{2s}\|\phi_j\|_{L^2_{\betaf x}}^2. \end{align*} \betaetagin{remark}\rm In view of the reduction above, we only prove \eqref{multi-est}, assuming $\sigmaupp \widehat \phi_1 \sigmaubset [-N_2, N_2]^d$ in the following. \end{remark} Next, we consider the case $k \gammaeq 5$. By H\"older's inequality and \eqref{co-d2} with $\frac{8k}{k+1} \in [ \frac{20}{3}, 10)$, we have \betaetagin{align*} \betaigg\|\prod_{j=1}^{k+1} e^{-it\Delta} \phi_j \betaigg\|_{L^2_{t,{\betaf x}}} & \elle \|e^{-it\Delta} \phi_1 \|_{L^\frac{8k}{k+1}} \|e^{-it\Delta} \phi_2 \|_{L^\frac{8k}{k+1}} \prod_{j=3}^{k+1} \|e^{-it\Delta} \phi_j \|_{L^4k} \notag \\ & \ellesssim N_2^{1- \frac{1}{k} + 2\varepsilon} \|\phi_1 \|_{L^2}\|\phi_2 \|_{L^2}\prod_{j=3}^{k+1} N^{1- \frac{1}{k}}_j \|\phi_j \|_{L^2_{\betaf x}}. \end{align*} \noindent Hence, \eqref{multi-est} holds for $s > s_0 = 1 - \frac{1}{k}$. \noindent {\betaf Case (ii):} $d=3$. When $k=1$, the well-posedness result was obtained in \cite{Bo4}. When $k=2$, by H\"older's inequality and \eqref{co-d3}, we have \betaetagin{align*} \betaigg\|\prod_{j=1}^{3} e^{-it\Delta} \phi_j \betaigg\|_{L^2_{t,{\betaf x}}} & \elle \|e^{-it\Delta} \phi_1 \|_{L^{\frac{104}{21}}} \|e^{-it\Delta} \phi_2 \|_{L^{\frac{104}{21}}} \|e^{-it\Delta} \phi_3 \|_{L^{\frac{52}5}} \\ & \ellesssim N_2^{\frac{53}{52} + 2 \varepsilon } N^{\frac{53}{52}}_3 \prod_{j = 1}^3 \|\phi_j \|_{L^2_{\betaf x}}. \end{align*} \noindent \noindent Hence, \eqref{multi-est} holds for $s > s_0 = \frac{53}{52}$. When $k\gammae 3$, by H\"older's inequality and \eqref{co-d3}, we have \betaetagin{align*} \betaigg\|\prod_{j=1}^{k+1} e^{-it\Delta} \phi_j \betaigg\|_{L^2_{t,{\betaf x}}} & \elle \|e^{-it\Delta} \phi_1 \|_{L^{\frac{20k}{3k+2}}} \|e^{-it\Delta} \phi_2 \|_{L^{\frac{20k}{3k+2}}} \prod_{j=3}^{k+1} \|e^{-it\Delta} \phi_j \|_{L^{5k}} \\ & \ellesssim N_2^{\frac32- \frac1{k}} \|\phi_1 \|_{L^2} \|\phi_2 \|_{L^2} \prod_{j=3}^{k+1} N^{\frac32- \frac1{k}}_j \|\phi_j \|_{L^2_{\betaf x}}. \end{align*} \noindent Hence, \eqref{multi-est} holds $s \gammaeq s_0 = \frac32- \frac1{k}$. \noindent {\betaf Case (iii):} $d\gammae 4$ and $k\gammae 1$. By H\"older's inequality with \eqref{co-d4} or \eqref{co-d5}, we have \betaetagin{align*} \betaigg\|\prod_{j=1}^{k+1} e^{-it\Delta} \phi_j \betaigg\|_{L^2_{t,{\betaf x}}} & \elle \|e^{-it\Delta} \phi_1 \|_{L^{\frac{4(d+2)k}{dk+2}}} \|e^{-it\Delta} \phi_2 \|_{L^{\frac{4(d+2)k}{dk+2}}} \prod_{j=3}^{k+1} \|e^{-it\Delta} \phi_j \|_{L^{(d+2)k}} \\ & \ellesssim N_2^{\frac d2- \frac1{k}+\varepsilon} \|\phi_1 \|_{L^2} \|\phi_2 \|_{L^2}\prod_{j=3}^{k+1} N^{\frac d2- \frac1{k}}_j \|\phi_j \|_{L^2_{\betaf x}}. \end{align*} \noindent Hence, \eqref{multi-est} holds for $s > s_0 = \frac d2- \frac1{k}$. \end{proof} \sigmaection{Well-posedness in critical spaces} \ellabel{SEC:critical} \sigmaubsection{Function spaces} \ellabel{SUBSEC:G1} In this section, we prove local well-posedness of NLS \eqref{eq:nls1} on irrational tori in critical Sobolev spaces $H^{s_c}(\mathbb{T}^d)$ (Theorem \ref{THM:4}). In the following, we use the $U^p$- and $V^p$-spaces, developed by Tataru, Koch, and their collaborators \cite{KochT, HHK, HTT11, HTT2}. These spaces have been very effective in establishing well-posedness of various dispersive PDEs in critical spaces. We briefly go over the basic definitions of function spaces and their properties. See Hadac-Herr-Koch \cite{HHK} and Herr-Tataru-Tzvetkov \cite{HTT11} for detailed proofs. Let $H$ be a separable Hilbert space over $\mathbb{C}$. In particular, it will be either $H^s(\mathbb{T}^d)$ or $\mathbb{C}$. Let $\mathcal{Z}$ be the collection of finite partitions $\{t_k\}_{k = 0}^K$ of $\mathbb{R}$: $-\infty < t_0 < \cdots < t_K \elleq \infty$. If $t_K = \infty$, we use the convention $u(t_K) :=0$ for all functions $u:\mathbb{R}\to H$. We use $\mathbf 1_I$ to denote the sharp characteristic function of a set $I \sigmaubset \mathbb{R}$. \betaetagin{definition} \ellabel{DEF:X1}\rm Let $1\elleq p < \infty$. \sigmamallskip \noindent \textup{(i)} A $U^p$-atom is defined by a step function $a:\mathbb{R}\to H$ of the form \[ a = \sigmaum_{k = 1}^K \mathbf 1_{[t_{k-1}, t_k)}\phi_{k - 1}, \] \noindent where $\{t_k\}_{k = 0}^K \in \mathcal{Z}$ and $\{\phi_k\}_{k = 0}^{K-1} \sigmaubset H$ with $\sigmaum_{k = 0}^{K-1} \|\phi_k\|_H^p = 1$. Then, we define the atomic space $U^p(\mathbb{R}; H)$ to be the collection of functions $u:\mathbb{R}\to H$ of the form \betaetagin{equation} u = \sigmaum_{j = 1}^\infty \ellambda_j a_j, \quad \text{ where $a_j$'s are $U^p$-atoms and $\{\ellambda_j\}_{j \in \mathbb{N}}\in \ell^1(\mathbb{N}; \mathbb{C})$}, \ellabel{X1} \end{equation} \noindent with the norm \[ \|u\|_{U^p(\mathbb{R}; H)} : = \inf \Big\{ \|{ \betaf \ellambda} \|_{\ell^1} : \eqref{X1} \text{ holds with } \ellambda = \{\ellambda_j \}_{j \in \mathbb{N}} \text{ and some $U^p$-atoms } a_j\Big\}.\] \sigmamallskip \noindent \textup{(ii)} We define $V^p(\mathbb{R}; H)$ by the collection of functions $u : \mathbb{R} \to H$ with $\|u\|_{V^p(\mathbb{R}; H)} < \infty$, where the $V^p$-norm is defined by \[ \|u\|_{V^p(\mathbb{R}; H)} := \sigmaup_{\{t_k\}_{k = 0}^K \in \mathcal{Z}} \betaigg(\sigmaum_{k = 1}^K\|u(t_k) - u(t_{k-1})\|_H^p\betaigg)^\frac{1}{p}. \] \noindent We also define $V^p_\text{rc}(\mathbb{R}; H)$ to be the closed subspace of all right-continuous functions in $V^p(\mathbb{R}; H)$ such that $\ellim_{t \to -\infty} u(t) = 0$. \sigmamallskip \noindent \textup{(iii)} Let $s \in \mathbb{R}$. We define $U^p_\Delta H^s$ (and $V^p_\Delta H^s$, respectively) to be the spaces of all functions $u: \mathbb{R} \to H^s(\mathbb{T}^d)$ such that the following $U^p_\Delta H^s$-norm (and $V^p_\Delta H^s$-norm, respectively) is finite: \[ \|u \|_{U^p_\Delta H^s} := \|e^{it \Delta} u\|_{U^p(\mathbb{R}; H^s)} \quad \text{and} \quad \|u \|_{V^p_\Delta H^s} := \|e^{it \Delta} u\|_{V^p(\mathbb{R}; H^s)}.\] \noindent Here, the Laplacian $\Delta$ is defined in terms of $Q({\betaf n})$ as in \eqref{eq:Q0}. \end{definition} \betaetagin{remark}\ellabel{REM:UpVp} \rm Note that the spaces $U^p(\mathbb{R}; H)$, $V^p(\mathbb{R}; H)$, and $V^p_\text{rc}(\mathbb{R}; H)$ are Banach spaces. Moreover, we have the following embeddings: \betaetagin{equation*} U^p(\mathbb{R}; H) \hookrightarrow V^p_\text{rc}(\mathbb{R}; H) \hookrightarrow U^q(\mathbb{R}; H) \hookrightarrow L^\infty(\mathbb{R}; H) \end{equation*} \noindent for $ 1\elleq p < q < \infty$. Similar embeddings hold for $U^p_\Delta H^s$ and $V^p_\Delta H^s$. \end{remark} Next, we state a transference principle and an interpolation result. \betaetagin{lemma}\ellabel{LEM:Xinterpolate} \textup{(i)} {\rm (transference principle)} Suppose that we have \[ \betaig\| T(e^{-it \Delta} \phi_1, \dots, e^{-it \Delta} \phi_k)\betaig\|_{L^p_t L^q_{\betaf x}(\mathbb{R}\times \mathbb{T}^d)} \ellesssim \prod_{j = 1}^k \|\phi_j\|_{L^2_{\betaf x}}\] \noindent for some $1\elleq p, q \elleq \infty$. Then, we have \[ \betaig\| T(u_1, \dots, u_k)\betaig\|_{L^p_t L^q_{\betaf x}(\mathbb{R}\times \mathbb{T}^d)} \ellesssim \prod_{j = 1}^k \|u_j\|_{U^p_\Delta L^2_{\betaf x}}.\] \noindent \textup{(ii)} {\rm (interpolation)} Let $E$ be a Banach space. Suppose that $T: U^{p_1}\times \cdots \times U^{p_k} \to E$ is a bounded $k$-linear operator such that \[ \|T(u_1, \dots, u_k)\|_{E} \elleq C_1 \prod_{j = 1}^k \|u_j\|_{U^{p_j}}\] \noindent for some $p_1, \dots, p_k > 2$. Moreover, assume that there exists $C_2 \in (0, C_1]$ such that \[ \|T(u_1, \dots, u_k)\|_{E} \elleq C_2 \prod_{j = 1}^k \|u_j\|_{U^{2}}.\] \noindent Then, we have \[ \|T(u_1, \dots, u_k)\|_{E} \elleq C_2 \Big(\elln \frac{C_1}{C_2}+ 1\Big)^k \prod_{j = 1}^k \|u_j\|_{V^2}\] \noindent for $u_j \in V^2_\textup{rc}$, $j = 1, \dots, k$. \end{lemma} \noindent A transference principle as above has been commonly used in the Fourier restriction norm method. See \cite[Proposition 2.19]{HHK} for the proof of Lemma \ref{LEM:Xinterpolate} (i). The proof of the interpolation result follows from extending the trilinear result in \cite{HTT11} to a general $k$-linear case. See also \cite[Proposition 2.20]{HHK}. \sigmamallskip Let $\eta: \mathbb{R} \to [0, 1]$ be an even smooth cutoff function supported on $[-\frac{8}{5}, \frac{8}{5}]$ such that $\eta \equiv 1$ on $[-\frac{5}{4}, \frac{5}{4}]$. Given a dyadic number $N \gammaeq 1$, we set $\eta_1(\xi) = \eta(|\xi|)$ and \[\eta_N(\xi) = \eta\Big(\frac{|\xi|}{N}\Big) - \eta\Big(\frac{2|\xi|}{N}\Big)\] \noindent for $N \gammaeq 2$. Then, we define the Littlewood-Paley projection operator $P_N$ as the Fourier multiplier operator with symbol $\eta_N$. Moreover, we define $P_{\elleq N}$ by $P_{\elleq N} = \sigmaum_{1 \elleq M \elleq N} P_M$. More generally, given a set $R \sigmaubset \mathbb{Z}^d$, we define $P_R$ to be the Fourier multiplier operator with symbol $\mathbf 1_R$. \betaetagin{definition} \ellabel{DEF:X3} \textup{(i)} Let $s\in \mathbb{R}$. We define $X^s$ to be the space of all functions $u : \mathbb{R} \to H^s(\mathbb{T}^d)$ such that $\| u\|_{X^s} < \infty$, where the $X^s$-norm is defined by \[ \|u \|_{X^s} : = \betaigg( \sigmaum_{{\betaf n} \in \mathbb{Z}^d} \jb{{\betaf n}}^{2s} \betaig\| e^{- i t Q({\betaf n})} \widehat{u}({\betaf n}, t) \betaig\|_{U^2(\mathbb{R}_t; \mathbb{C})}^2\betaigg)^\frac{1}{2}. \] \sigmamallskip \noindent \textup{(ii)} Let $s\in \mathbb{R}$. We define $Y^s$ to be the space of all functions $u : \mathbb{R} \to H^s(\mathbb{T}^d)$ such that for every ${\betaf n} \in \mathbb{Z}^d$, the map $t \mapsto e^{- it Q({\betaf n})} \widehat{u}({\betaf n}, t)$ is in $V^2_\textup{rc}(\mathbb{R}_t; \mathbb{C})$ and $\| u\|_{Y^s} < \infty$, where the $Y^s$-norm is defined by \[ \|u \|_{Y^s} : = \betaigg( \sigmaum_{{\betaf n} \in \mathbb{Z}^d} \jb{{\betaf n}}^{2s} \betaig\| e^{- i t Q({\betaf n})} \widehat{u}({\betaf n}, t) \betaig\|_{V^2(\mathbb{R}_t; \mathbb{C})}^2\betaigg)^\frac{1}{2}. \] \end{definition} \noindent Recall the following embeddings: \betaetagin{equation} U^2_\Delta H^s \hookrightarrow X^s \hookrightarrow Y^s \hookrightarrow V^2_\Delta H^s. \ellabel{X3} \end{equation} \noindent Given a time interval $I \sigmaubset \mathbb{R}$, we define the restrictions $X^s(I)$ and $Y^s(I)$ of these spaces in the usual manner. We now state the linear estimates. Given $f \in L^1_\text{loc}([0, \infty); L^2(\mathbb{T}^d))$, define $\mathcal{I}(f)$ by \[ \mathcal{I}(f)(t): = \int_0^t e^{-i(t- t') \Delta} f(t') dt'.\] \betaetagin{lemma}[Linear estimates]\ellabel{LEM:Xlinear} Let $s \gammaeq 0$ and $T > 0$. Then, the following linear estimates hold: \betaetagin{align*} \|e^{-it \Delta} \phi\|_{X^s([0, T))} & \elle \|\phi\|_{H^s}, \\ \|\mathcal{I}(f)\|_{X^s([0, T))} & \elleq \sigmaup_{\sigmaubstack{v \in Y^{-s}([0, T))\\\|v\|_{Y^{-s} = 1}}} \betaigg| \int_0^T \int_{\mathbb{T}^d} f({\betaf x}, t) \omegaverline{v({\betaf x}, t)} d{\betaf x} dt\betaigg|, \end{align*} \noindent for all $\phi \in H^s(\mathbb{T}^d)$ and $f \in L^1([0, T); H^s(\mathbb{T}^d))$. \end{lemma} Next, we present the crucial multilinear estimate. \betaetagin{proposition}\ellabel{PROP:XLWP} Let $d$ and $k$ satisfy \betaetagin{align}\ellabel{admis1} \textup{(i) } d = 2, \ \, k \gammae 6, \quad \textup{(ii) }d= 3, \ \, k\gammae3, \quad \text{ or } \quad \textup{(iii) } d\gammae 4, \ \, k\gammae 2. \end{align} \noindent Then, the following multilinear estimate holds for all $T \in (0, 1]$: \betaetagin{align} \betaigg\| \mathcal{I} \betaigg( \prod_{j=1}^{2k+1}u^*_j\betaigg)\betaigg\|_{X^{s_c}([0, T))} \ellesssim \sigmaum_{j = 1}^{2k+1} \betaigg(\|u_j\|_{X^s([0, T))} \prod_{\sigmaubstack{ \ell = 1\\ \ell \ne j}}^{2k+1} \|u_\ell\|_{X^{s_c}([0, T))}\betaigg), \ellabel{X0} \end{align} \noindent for $s \gammaeq s_c = \frac d2 - \frac 1k >0$, where $u^*_j$ denotes either $u_j$ or $\omegaverline u_j$. \end{proposition} \noindent Once we prove Proposition \ref{PROP:XLWP}, one can prove Theorem \ref{THM:4} by the fixed point argument as in \cite{HTT11,W}. We omit details. The remainder of this section is devoted to the proof of Proposition \ref{PROP:XLWP}. Indeed, the multilinear estimate \eqref{X0} follows once we prove the following multilinear Strichartz estimate. \betaetagin{proposition}\ellabel{PROP:LWP2} Let $d$ and $k$ satisfy \eqref{admis1}. Then, there exists $\delta > 0$ such that the following multilinear Strichartz estimate holds: \betaetagin{align} \betaigg\|\prod_{j=1}^{k+1}P_{N_j} e^{-it \Delta} \phi_j\betaigg\|_{L^2_{t, {\betaf x}}(I \times\mathbb{T}^d)} \ellesssim& \elleft(\frac{N_{k+1}}{N_1}+\frac1{N_2}\right)^{\delta}\|P_{N_1}\phi_1\|_{L^2_{\betaf x}(\mathbb{T}^d)} \prod_{j=2}^{k+1}N^{s_c}_j \|P_{N_j}\phi_j\|_{L^2_{\betaf x}(\mathbb{T}^d)}, \ellabel{G1} \end{align} \noindent for any interval $I\sigmaubset [0, 1]$ and for all $\phi_j \in L^2(\mathbb{T}^d)$, $j = 1, \dots, k+1$, and $N_1\gammae N_2 \gammae \cdots \gammae N_{k+1}\gammae 1$. \end{proposition} \noindent In Subsection \ref{SUBSEC:G2}, we first present the proof of Proposition \ref{PROP:LWP2}. In Subsection \ref{SUBSEC:G3}, we then use the multilinear Strichartz estimate \eqref{G1} to prove Proposition \ref{PROP:XLWP}, thus yielding Theorem \ref{THM:4}. \sigmaubsection{Multilinear Strichartz estimate} \ellabel{SUBSEC:G2} In this subsection, we use the sharp $L^p$-Strichartz estimates \eqref{P1} in Theorem \ref{THM:2} to prove the multilinear Strichartz estimate \eqref{G1}. The main idea is to refine the Strichartz estimate by considering frequency scales finer than the standard dyadic Littlewood-Paley localizations as in \cite{HTT11}. See Lemma \ref{LEM:Gstr2}. \betaetagin{definition}\ellabel{DEF:Gadmis} \rm We say that $(d, p)\in \mathbb{N}\times \mathbb{R}$ is an admissible pair if \betaetagin{align}\ellabel{admis} \textup{(i) } d = 2, \ \, p > \frac{20}{3}, \quad \textup{(ii) } d =3, \ \, p>\frac{16}3, \quad \text{ or} \quad \textup{(iii) } d \gammae 4,\ \, p> 4. \end{align} \end{definition} \noindent Note that, by Theorem \ref{THM:2}, the Strichartz estimates with $(d,p)$ in this range are sharp. Given dyadic $N\gammaeq 1$, let $\mathcal{C}_N$ denote the collection of cubes $C\sigmaubset \mathbb{Z}^d$ of side length $\sigmaim N$ with arbitrary center and orientation. Then, we can rewrite Theorem \ref{THM:2} in the following form. \betaetagin{lemma}\ellabel{LEM:Gstr} Let $(d, p)$ be admissible and $I\sigmaubset \mathbb{R}$ be a bounded interval. Then, for all dyadic $N\gammaeq 1$, we have \betaetagin{align}\ellabel{G2} \|P_N e^{-it \Delta}\phi\|_{L^p_{t, {\betaf x}}(I\times \mathbb{T}^d)} \ellesssim N^{\frac{d}2-\frac{d+2}p} \|P_N \phi\|_{L^2_{\betaf x}(\mathbb{T}^d)}. \end{align} \noindent More generally, for all $C \in \mathcal{C}_N$, we have \betaetagin{equation}\ellabel{G3} \|P_C e^{-it \Delta}\phi\|_{L^p_{t, {\betaf x}}(I\times \mathbb{T}^d)} \ellesssim N^{\frac{d}2-\frac{d+2}p} \|P_C \phi\|_{L^2_{\betaf x} (\mathbb{T}^d)}. \end{equation} \end{lemma} \noindent The Strichartz estimate \eqref{G3} shows that the loss in \eqref{G3} depends only on the size of the frequency support, not the position. See \cite{BoPCMI, HTT11}. In order to exploit further orthogonality between different frequency pieces under the linear Schr\"odinger evolution, we need to decompose the frequency cubes $C_N$. Let $\mathcal{R}_{M}(N)$ be the collection of all sets in $\mathbb{Z}^d$ which are given as the intersection of a cube of side length $2N$ with a strip of ``width'' $2 M$, i.e.~the collection of all sets of the form \betaetagin{equation} \betaig({\betaf n}_0+[-N,N]^d\betaig)\cap \betaig\{ {\betaf n} \in \mathbb{Z}^d: |{\betaf a} \cdot_{\pmb{\theta}} {\betaf n} -A|\elleq M \betaig\} \ellabel{G3a} \end{equation} with some ${\betaf n}_0 \in \mathbb{Z}^d$, ${\betaf a} \in \mathbb{R}^d$, $|{\betaf a}|=1$, $A \in \mathbb{R}$. Here, the dot product ${\betaf a} \cdot_{\pmb{\theta}} {\betaf n}$ is given by \betaetagin{equation} {\betaf a}\cdot_{\pmb{\theta}} {\betaf n} = \sigmaum_{j=1}^d \theta_j a_j n_j, \ellabel{G3b} \end{equation} \noindent where $\pmb{\theta} = (\theta_1, \dots, \theta_d)$ is as in \eqref{eq:Q}. Then, we have the following refinement of the Strichartz estimate. \betaetagin{lemma}\ellabel{LEM:Gstr2} Let $(d, p)$ be admissible and $I\sigmaubset \mathbb{R}$ be a bounded interval. Then, for all $1\elleq M\elleq N$ and $R \in \mathcal{R}_M(N)$, we have \betaetagin{equation}\ellabel{G4} \|P_R e^{-it \Delta}\phi\|_{L^p_{t, {\betaf x}} (I\times \mathbb{T}^d)} \ellesssim N^{\frac{d}2-\frac{d+2}p} \elleft(\frac{M}{N}\right)^{\delta}\|P_R \phi\|_{L^2_{\betaf x} (\mathbb{T}^d)}, \end{equation} \end{lemma} \noindent where $0<\delta <\frac 12-\frac{10}{3p}$ when $ d= 2$, $0<\delta <\frac 12-\frac8{3p}$ when $ d= 3$, and $0<\delta <\frac 12- \frac 2p$ when $d \gammaeq 4$. \betaetagin{proof} By Bernstein's inequality, we have \betaetagin{equation}\ellabel{G5} \|P_R e^{-it \Delta}\phi\|_{L^\infty_{t, {\betaf x}} (I \times \mathbb{T}^d)} \ellesssim M^{\frac12}N^{\frac{d-1}{2}} \|P_R \phi\|_{L^2_{\betaf x} (\mathbb{T}^d)}. \end{equation} \noindent Given admissible $(d, p)$, write $\frac 1p = \frac {\theta}{q} + \frac{1-\theta}{\infty}$ for some $\theta \in [0, 1)$, where $q < p$ is given by $q = \frac{20}{3}+$ when $ d = 2$, $q = \frac{16}{3}+$ when $ d = 3$, and $q = 4+$ when $ d\gammaeq 4$. Then, \eqref{G4} follows from interpolating \eqref{G3} (with $p = q$) and \eqref{G5}. \end{proof} Now, we are ready to prove the main multilinear Strichartz estimates \eqref{G1}. \betaetagin{proof}[Proof of Proposition \ref{PROP:LWP2}] Let $u_j = e^{-it \Delta} \phi_j$. Then, by almost orthogonality in spatial frequencies, it suffices to prove that there exists $\delta > 0$ such that \betaetagin{align} \Big\|P_CP_{N_1}u_1\prod_{j=2}^{k+1}P_{N_j} u_j\Big\|_{L^2_{t, {\betaf x}}} & \ellesssim \elleft(\frac{N_{k+1}}{N_1}+\frac1{N_2}\right)^{\delta}\|P_C P_{N_1}\phi_1\|_{L^2_{\betaf x}} \prod_{j=2}^{k+1}N^{s_c}_j \|P_{N_j}\phi_j\|_{L^2_{\betaf x}}, \ellabel{G5a} \end{align} \noindent for all cubes $C \in \mathcal{C}_{N_2}$. Fix a cube $C \in \mathcal{C}_{N_2}$ and let ${\betaf n}_0$ be the center of $C$. Partition $C = \betaigcup R_\ell$ into disjoint strips $R_\ell$ with width $M=\max(N_2^2/N_1,1)$, which are all orthogonal to ${\betaf n}_0$ with respect to the dot product $\cdot_{\pmb{\theta}}$ in \eqref{G3b}, i.e.~$R_\ell$ is given by \[ R_\ell=\betaig\{{\betaf n}\in C :\,{\betaf n}\cdot_{\pmb{\theta}} {\betaf n}_0 \in \betaig[|{\betaf n}_0| M \ell, |{\betaf n}_0| M(\ell+1)\betaig) \betaig\}, \qquad |\ell| \sigmaim \frac{N_1}{M}. \] \noindent Note that we have $R_\ell \in \mathcal{R}_M(N_2)$. By writing \betaetagin{equation}\ellabel{G6} P_C P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j = \sigmaum_\ell P_{R_\ell} P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j, \end{equation} \noindent we show that the sum is almost orthogonal in $L^2_{t, {\betaf x}}$. Since $N_2^2 \ellesssimssim M^2 \ell$, we have \[ Q({\betaf n}_1) = \frac1{Q({\betaf n}_0)}|{\betaf n}_1 \cdot_{\pmb{\theta}} {\betaf n}_0|^2 + Q({\betaf n}_1-{\betaf n}_0) - \frac1{Q({\betaf n}_0)}|({\betaf n}_1- {\betaf n}_0) \cdot_{\pmb{\theta}} {\betaf n}_0|^2 = M^2\ell^2 + O(M^2 \ell), \] \noindent for ${\betaf n}_1 \in R_\ell$. Note that the multiplication by the factor $\prod_{j=2}^{k+1}P_{N_j} u_j$ in \eqref{G6} changes the time frequency at most by $O(N_2^2)$. Hence, $P_{R_\ell} P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j$ in \eqref{G6} is localized at time frequency $M^2 \ell^2 + O(M^2 \ell) = O(M^2 \ell^2)$ for each $\ell$. Therefore, the sum in \eqref{G6} is almost orthogonal and we have \betaetagin{equation} \ellabel{G6a} \Big\| P_C P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j \Big\|_{L^2_{t, {\betaf x}}}^2 \sigmaim \sigmaum_\ell \betaigg\|P_{R_\ell} P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j\betaigg\|_{L^2_{t, {\betaf x}}}^2. \end{equation} With $d$ and $k$ as in \eqref{admis1}, let $p_{d,k}=(d+2)k$. Then, by Lemma \ref{LEM:Gstr}, we have \betaetagin{equation}\ellabel{G7} \|P_N e^{-it \Delta}\phi\|_{L^{p_{d,k}}_{t, {\betaf x}}} \ellesssim N^{s_c} \|P_N \phi\|_{L^2_{\betaf x}}. \end{equation} \noindent Now, choose $p$ such that \betaetagin{equation}\ellabel{G8} \betaetagin{split} \textup{(i)}& \quad \frac{20}3<p<\frac{8k}{k+1} \quad \text{when } d =2, \\ \textup{(ii)}& \quad \frac{16}3<p<\frac{20k}{3k+2} \quad \text{when } d =3, \\ \textup{(iii)}& \quad 4<p<\frac{4k(d+2)}{dk+2}\quad \text{when } d\gammae 4. \end{split} \end{equation} \noindent The existence of such $p$ is implied by \eqref{admis1}. Moreover, the lower bound on $p$ guarantees that each $(d,p)$ is admissible, while the upper bound on $p$ guarantees that \betaetagin{align}\ellabel{G9} d-\frac{2(d+2)}{p}-s_c <0. \end{align} \noindent Let $q$ such that \betaetagin{align}\ellabel{G10} \frac2p+\frac{k-2}{p_{d,k}}+\frac1q=\frac12. \end{align} \noindent Then, it follows from \eqref{G8} that $(d,q)$ is also admissible. By H\"older's inequality and Lemmata \ref{LEM:Gstr} and \ref{LEM:Gstr2}, we have \betaetagin{align} \betaigg\|P_{R_\ell} P_{N_1} u_1 & \prod_{j=2}^kP_{N_j} u_j\betaigg\|_{L^2_{t, {\betaf x}}} \ellesssim\|P_{R_\ell} P_{N_1} u_1 \|_{L^p} \|P_{N_2}u_2\|_{L^p} \prod_{j=3}^k \|P_{N_j}u_j\|_{L^{p_{d,k}}} \|P_{N_{k+1}}u_{k+1}\|_{L^q} \notag \\ & \ellesssim N_2^{d-\frac{2(d+2)}{p}-s_c} N_{k+1}^{\frac d2- \frac{d+2}q -s_c}\betaigg(\frac{M}{N_2} \betaigg)^{\delta} \|P_{R_\ell}P_{N_1}\phi_1\|_{ L^2_{\betaf x}} \prod_{j=2}^{k+1} N^{s_{c}}_j \|P_{N_j}\phi_j\|_{L^2_{\betaf x}}, \ellabel{G11} \end{align} \noindent for some $\delta > 0$. In view of \eqref{G8} and \eqref{G9}, choose $p$ such that \[ - d +\frac{2(d+2)}{p}+s_c = \delta. \] \noindent Moreover, from \eqref{G9} and \eqref{G10}, we have \betaetagin{equation} \frac d2-\frac{d+2}q-s_c = -d+\frac{2(d+2)}{p}+s_c = \delta> 0. \ellabel{G12} \end{equation} \noindent Then, noting $\frac{M}{N_2} \sigmaim \frac{N_2}{N_1}+\frac1{N_2} $, it follows from \eqref{G11} that \betaetagin{align*} \betaigg\| P_{R_\ell} P_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j \betaigg\|_{L^2_{t, {\betaf x}}} \ellesssim\betaigg( \frac{N_{k+1}}{N_1}+\frac1{N_2}\betaigg)^{\delta} \|P_{R_\ell} P_{N_1}\phi_1\|_{L^2_{\betaf x}} \prod_{j=2}^{k+1}N^{s_c}_j \|P_{N_j}\phi_j\|_{L^2_{\betaf x}}. \end{align*} \noindent Finally, by summing up the squares in \eqref{G6a} with respect to $\ell$, we obtain \eqref{G5a} and hence \eqref{G1}. This completes the proof of Proposition \ref{PROP:LWP2}. \end{proof} \sigmaubsection{Proof of Proposition \ref{PROP:XLWP}} \ellabel{SUBSEC:G3} First, we state and prove an auxiliary lemma (Lemma \ref{LEM:XmultiY}), using Proposition \ref{PROP:LWP2}. Let $\mathcal{C}_N$, $N \gammaeq 1$, be the collection of cubes $C\sigmaubset \mathbb{Z}^d$ of side length $\sigmaim N$ as before. Let $(d, p)$ be admissible in the sense of Definition \ref{DEF:Gadmis}. Then, it follows from Lemma \ref{LEM:Gstr} with Lemma \ref{LEM:Xinterpolate} (i) that \betaetagin{equation}\ellabel{Y0} \|P_C e^{it \Delta}\phi\|_{L^p_{t, {\betaf x}} (I\times \mathbb{T}^d)} \ellesssim N^{\frac{d}2-\frac{d+2}p} \|P_C \phi\|_{U^p_\Delta L^2_{\betaf x}} \end{equation} \noindent for all $C \in \mathcal{C}_N$. \betaetagin{lemma} \ellabel{LEM:XmultiY} Let $d$ and $k$ satisfy \eqref{admis1}. Then, there exists $\delta' > 0$ such that \betaetagin{align} \betaigg\|\prod_{j=1}^{k+1}P_{N_j} u_j\betaigg\|_{L^2_{t, {\betaf x}}(I \times\mathbb{T}^d)} \ellesssim \betaigg(\frac{N_{k+1}}{N_1}+\frac1{N_2}\betaigg)^{\delta'}\|P_{N_1}u_1\|_{Y^0} \prod_{j=2}^{k+1}\|P_{N_j}u_j\|_{Y^{s_c}}, \ellabel{Y1} \end{align} \noindent for any interval $I \sigmaubset [0, 1]$ and for all $N_1\gammae N_2 \gammae \cdots \gammae N_{k+1}\gammae 1$. \end{lemma} \betaetagin{proof} By almost orthogonality in spatial frequencies, it suffices to prove that there exists $\delta' > 0$ such that \betaetagin{align} \betaigg\|P_CP_{N_1} u_1 \prod_{j=2}^{k+1}P_{N_j} u_j\betaigg\|_{L^2_{t, {\betaf x}}} \ellesssim \betaigg(\frac{N_{k+1}}{N_1}+\frac1{N_2}\betaigg)^{\delta'}\|P_C P_{N_1}u_1\|_{Y^0} \prod_{j=2}^{k+1} N_j^{s_c}\|P_{N_j}u_j\|_{Y^{0}}, \ellabel{Y2} \end{align} \noindent for all cubes $C \in \mathcal{C}_{N_2}$. Moreover, by the embedding \eqref{X3}, it suffices to prove \eqref{Y2}, where the $Y^0$-norm is replaced by the $V^2_\Delta L^2$-norm. Furthermore, it suffices to prove that there exists $\ellambda > 0$ such that the following two estimates hold: \betaetagin{align} \text{LHS of \eqref{Y2} } & \ellesssim \betaigg(\frac{N_{k+1}}{N_1}+\frac1{N_2}\betaigg)^{\delta}\|P_C P_{N_1}u_1\|_{U^2_\Delta L^2} \prod_{j=2}^{k+1} N_j^{s_c}\|P_{N_j}u_j\|_{U^2_\Delta L^2}, \ellabel{Y3} \intertext{and} \text{LHS of \eqref{Y2} } & \ellesssim \betaigg(\frac{N_{k+1}}{N_2}\betaigg)^{\delta}\|P_C P_{N_1}u_1\|_{U^p_\Delta L^2} \prod_{j=2}^{k+1} N_j^{s_c}\|P_{N_j}u_j\|_{U^p_\Delta L^2}, \ellabel{Y4} \end{align} \noindent for some $ p > 2$. Indeed, if \eqref{Y3} and \eqref{Y4} hold, then it follows from Lemma \ref{LEM:Xinterpolate} (ii) that \eqref{Y2} holds with $\delta' < \delta$. The first estimate \eqref{Y3} directly follows from Proposition \ref{PROP:LWP2} and Lemma \ref{LEM:Xinterpolate} (i). Hence, it remains to prove the second estimate \eqref{Y4}. Let $p$, $p_{d, k}$, and $q$ be as in the proof of Proposition \ref{PROP:LWP2}. Then, by H\"older's inequality with \eqref{Y0}, we have \betaetagin{align} \betaigg\|P_C P_{N_1} u_1 & \prod_{j=2}^kP_{N_j} u_j\betaigg\|_{L^2_{t, {\betaf x}}} \ellesssim\|P_C P_{N_1} u_1 \|_{L^p} \|P_{N_2}u_2\|_{L^p} \prod_{j=3}^k \|P_{N_j}u_j\|_{L^{p_{d,k}}} \|P_{N_{k+1}}u_{k+1}\|_{L^q} \notag \\ & \ellesssim N_2^{d-\frac{2(d+2)}{p}-s_c} N_{k+1}^{\frac d2- \frac{d+2}q -s_c} \|P_C P_{N_1} u_1 \|_{U^p_\Delta L^2} \prod_{j=2}^{k+1} N^{s_{c}}_j \|P_{N_j}u_j\|_{U^{q_j}_\Delta L^2}, \ellabel{Y5} \end{align} \noindent where $q_2 = p$, $q_j = p_{d, k}$ for $j = 3, \dots, k$, and $q_{k+1} = q$. From \eqref{G8} and \eqref{G10} with $p_{d, k} = (d+2) k$, we have $q > p_{d, k} > p > 2$. Therefore, \eqref{Y4} follows from \eqref{Y5} with Remark \ref{REM:UpVp} and \eqref{G12}. \end{proof} We conclude this section by presenting the proof of Proposition \ref{PROP:XLWP}. \betaetagin{proof}[Proof of Proposition \ref{PROP:XLWP}] Let $I = [0, T)$. In the following, we prove \betaetagin{align*} \Bigg\| \mathcal{I} \Bigg( \mathbb{P}_{\elle N} \betaigg( \prod_{j=1}^{2k+1}u^*_j\betaigg) \Bigg)\Bigg\|_{X^{s_c}(I)} \ellesssim \sigmaum_{j = 1}^{2k+1} \betaigg(\|u_j\|_{X^s(I)} \prod_{\sigmaubstack{ \ell = 1\\ \ell \ne j}}^{2k+1} \|u_\ell\|_{X^{s_c}(I)}\betaigg) \end{align*} \noindent for all $N\gammaeq 1$, where the implicit constant is independent of $N$. By Lemma \ref{LEM:Xlinear}, we have \betaetagin{align} \Bigg\| \mathcal{I} \Bigg( \mathbb{P}_{\elle N} \betaigg( \prod_{j=1}^{2k+1}u^*_j\betaigg) \Bigg)\Bigg\|_{X^{s_c}(I)} \elle \sigmaup_{\sigmaubstack{v \in Y^{-s}([0, T))\\\|v\|_{Y^{-s} = 1}}} \betaigg| \int_{I \times \mathbb{T}^d} \prod_{j=1}^{2k+1}u^*_j({\betaf x}, t) \mathbb{P}_{\elle N} \omegaverline{v({\betaf x}, t)} d{\betaf x} dt\betaigg|. \ellabel{Y6} \end{align} \noindent Hence, with $u_0 = \mathbb{P}_{\elle N }v$, it suffices to show that \betaetagin{align} \betaigg| \int_{I \times \mathbb{T}^d} \prod_{j=0}^{2k+1}u^*_j({\betaf x}, t) d{\betaf x} dt\betaigg| \ellesssim \|u_0\|_{Y^{-s}(I)} \sigmaum_{j = 1}^{2k+1} \betaigg(\|u_j\|_{X^s} \prod_{\sigmaubstack{ \ell = 1\\ \ell \ne j}}^{2k+1} \|u_\ell\|_{X^{s_c}}\betaigg). \ellabel{Y7} \end{align} Now, dyadically decompose $u_j^* = \sigmaum_{N_j \gammaeq 1} P_{N_j} u_j^* $. Without loss of generality, assume $N_1 \gammaeq N_2 \gammaeq \cdots \gammaeq N_{2k+1}$. Then, in order to have a non-trivial contribution on the left-hand side of \eqref{Y7}, we must have $N_1 \sigmaim \max(N_0, N_2)$. \noindent {\betaf Case (i):} $N_0 \sigmaim N_1$. By Lemma \ref{LEM:XmultiY}, we have \betaetagin{align} \betaigg| & \int_{I \times \mathbb{T}^d} \prod_{j=0}^{2k+1} P_{N_j} u^*_j({\betaf x}, t) d{\betaf x} dt\betaigg| \elleq \betaigg\|\prod_{j=0}^{k}P_{N_{2j}} u^*_{2j}\betaigg\|_{L^2_{t, {\betaf x}}} \betaigg\|\prod_{j=0}^{k}P_{N_{2j+1}} u^*_{2j+1}\betaigg\|_{L^2_{t, {\betaf x}}} \notag\\ & \ellesssim \betaigg(\frac{N_{2k}}{N_0}+\frac1{N_2}\betaigg)^{\delta'} \betaigg(\frac{N_{2k+1}}{N_1}+\frac1{N_3}\betaigg)^{\delta'} \| P_{N_0} u_0\|_{Y^{-s}} \| P_{N_1} u_1\|_{Y^{s}} \prod_{j=2}^{2k+1}\|P_{N_j}u_j\|_{Y^{s_c}}. \ellabel{Y8} \end{align} \noindent Summing \eqref{Y8} over dyadic blocks $N_0\sigmaim N_1\gammaeq N_2 \gammaeq \cdots \gammaeq N_{2k+1}$ and by Cauchy-Schwarz inequality, we have \betaetagin{align*} \text{LHS of \eqref{Y7} } & \ellesssim \sigmaum_{N_0 \sigmaim N_1} \| P_{N_0} u_0\|_{Y^{-s}} \| P_{N_1} u_1\|_{Y^{s}} \prod_{j=2}^{2k+1}\|u_j\|_{Y^{s_c}} \notag \\ & \ellesssim \| u_0\|_{Y^{-s}} \| u_1\|_{Y^{s}} \prod_{j=2}^{2k+1}\|u_j\|_{Y^{s_c}}, \end{align*} \noindent yielding \eqref{Y7} in view of \eqref{X3}. \noindent {\betaf Case (ii):} $N_2 \sigmaim N_1 \gammag N_0$. By Lemma \ref{LEM:XmultiY} with $N_1\sigmaim N_2$, we have \betaetagin{align} \betaigg| \int_{I \times \mathbb{T}^d} & \prod_{j=0}^{2k+1} P_{N_j} u^*_j({\betaf x}, t) d{\betaf x} dt\betaigg| \notag\\ & \ellesssim \betaigg(\frac{N_0}{N_1}\betaigg)^{s+s_c} \betaigg(\frac{N_{2k+1}}{N_1}+\frac1{N_3}\betaigg)^{\delta'} \| P_{N_0} u_0\|_{Y^{0}} \| P_{N_1} u_1\|_{Y^{s}} \prod_{j=2}^{2k+1}\|P_{N_j}u_j\|_{Y^{s_c}} \ellabel{Y9} \end{align} \noindent Summing \eqref{Y9} over dyadic blocks $N_0 (\elll N_1)$ and $ N_1 \sigmaim N_2 \gammaeq N_3 \gammaeq \cdots \gammaeq N_{2k+1}$ and by Cauchy-Schwarz inequality, we have \betaetagin{align*} \text{LHS of \eqref{Y7} } & \ellesssim \| u_0\|_{Y^{-s}} \sigmaum_{N_1 \sigmaim N_2} \| P_{N_1} u_1\|_{Y^{s}} \| P_{N_2} u_2\|_{Y^{s_c}} \prod_{j=3}^{2k+1}\|u_j\|_{Y^{s_c}} \notag \\ & \ellesssim \| u_0\|_{Y^{-s}} \| u_1\|_{Y^{s}} \prod_{j=2}^{2k+1}\|u_j\|_{Y^{s_c}}. \end{align*} \noindent This completes the proof of Proposition \ref{PROP:XLWP}. \end{proof} \appendix \sigmaection{On the Weyl sum estimate \eqref{A6}}\ellabel{SEC:A} In this appendix, we present a proof of \eqref{A6}. We decided to include the proof for the convenience of readers, in particular for those in PDEs. Since $I = I(\eta)$ is a compact interval and the integrand is periodic with period 1, it suffices to show \betaetagin{equation} \int_0^{1} | F(t) |^{r}dt \sigmaim N^{r-2}, \ellabel{Z1} \end{equation} \noindent for $ r > 4$, where $F(t)$ is the Weyl sum defined by \betaetagin{align*} F(t) = \sigmaum_{0\elle n\elle N} e^{2\pi i n^2 t}. \end{align*} For $a, q \in \mathbb{N}$ with $1 \elleq a \elleq q \elleq N$ and $(a, q) = 1$, define a major arc $\mathfrak{M}(q, a)$ by \betaetagin{equation} \mathfrak{M}(q, a) = \betaigg\{ t \in [0, 1]: \, \Big\|t - \frac{a}{q}\Big\| \elleq \frac{1}{100N^2}\betaigg\}, \ellabel{Z1a} \end{equation} \noindent where $\|x\| = \min_{n\in \mathbb{Z}}|x - n|$ denotes the distance of $x$ to the closest integer as before. Let $\mathfrak{M} = \betaigcup_{a, q} \mathfrak{M}(q, a)$. Note that we have $\betaig\|t - \frac{a}{q}\betaig\| \elleq \frac{1}{q^2}$ for $t \in \mathfrak{M}$. Then, by Weyl's inequality we have \betaetagin{equation} |F(t) | \ellesssim \frac{N}{q^\frac{1}{2}} + N^\frac{1}{2}(\ellog q)^\frac{1}{2} + q^\frac{1}{2}(\ellog q)^\frac{1}{2}. \ellabel{Z2} \end{equation} \noindent Hence, the contribution from the major arc $\mathfrak{M}$ is estimated by \betaetagin{align*} \int_\mathfrak{M} | F(t) |^{r} dt \ellesssim \sigmaum_{q = 1}^N \sigmaum_{\sigmaubstack{a=1\\(a, q) = 1}}^q\frac{N^r}{q^\frac{r}{2}}(\ellog q)^\frac{r}{2} \frac{1}{N^2} \elleq N^{r-2} \sigmaum_{q = 1}^N q^{1 - \frac r2+} \ellesssim N^{r-2}, \end{align*} \noindent since $r > 4$. \betaetagin{remark} \rm Indeed, the contribution from the major arc $\mathfrak{M}$ provides the lower bound in \eqref{Z1}. We only need to consider the contribution from $\mathfrak{M}(q, a)$ for odd $q \elleq N^\frac{1}{2}$. Let $S(q, a)$ be the Gauss sum given by $S(q, a) = \sigmaum_{n = 1}^q e^{2\pi i n^2 \frac aq}$. We have $S(q, a) = \sigmaqrt{q}$ for odd $q$. Now, suppose that $q$ is odd such that $1\elleq q \elleq N^\frac{1}{2}$. Then, by Van der Corput's method \cite{Vi} with $S(q, a) = \sigmaqrt{q}$, one can show that $|F(t)| \gammatrsim \frac{N}{q^\frac{1}{2}}$ for $t \in \mathfrak{M}(q, a)$. Noting that $\mathfrak{M}(q, a)$ are disjoint, we have \betaetagin{align*} \int_\mathfrak{M} | F(t) |^{r} dt \gammatrsim \sigmaum_{\sigmaubstack{q = 1\\q, \text{odd}}}^{N^\frac{1}{2}} \sigmaum_{\sigmaubstack{a=1\\(a, q) = 1}}^q\frac{N^r}{q^\frac{r}{2}} \frac{1}{N^2} = N^{r-2} \sigmaum_{\sigmaubstack{q = 1\\q, \text{odd}}}^{N^\frac{1}{2}} \phi(q) q^{ - \frac r2} \sigmaim N^{r-2}, \end{align*} \noindent where $\phi(q)$ denotes Euler's function. This shows the lower bound in \eqref{Z1}. \end{remark} Next, we estimate the contribution from the minor arc $\mathfrak{m} : = [0, 1] \sigmaetminus \mathfrak{M}$. Fix small $\varepsilon > 0$. Then, Dirichlet's theorem \cite[Lemma 2.1]{V} states that given $t \in [0, 1]$, there exist integers $a, q$ with $1 \elleq a \elleq q \elleq N^{2-\varepsilon}$ and $(a, q) = 1$ such that $\betaig\|t - \frac{a}{q}\betaig\| \elleq \frac{1}{qN^{2-\varepsilon}}$. Define $I(q)$ by \[ I(q) = \betaigcup_{\sigmaubstack{a = 1\\(a, q) = 1}}^q I(q, a), \] \noindent where $I(q, a) = \betaig\{ t\in [0, 1]:\, \betaig\|t - \frac{a}{q}\betaig\| \elleq \frac{1}{qN^{2-\varepsilon}} \betaig\}$. Now, in view of \eqref{Z1a}, divide the minor arc $\mathfrak{m}$ into two pieces: $ \mathfrak{m} = \mathfrak{m}_1 \cup \mathfrak{m}_2$, where \[ \mathfrak{m}_1 = \mathfrak{m}\cap \betaigcup_{ 1\elleq q \elll N^\varepsilon} I(q) \quad \text{and} \quad \mathfrak{m}_2 = \mathfrak{m}\cap \betaigcup_{ N < q \elleq N^{2-\varepsilon}} I(q).\] Let $ t \in \mathfrak{m}_2$. From \eqref{Z2}, we have $|F(t)|\ellesssim N^{1-\frac{1}{2}\varepsilon} (\ellog N)^\frac{1}{2}$. Then, by Hua's inequality \cite[Lemma 2.5]{V}, we have \betaetagin{align*} \int_{\mathfrak{m}_2} | F(t) |^{r} dt & \elleq \Big(\sigmaup_{t \in \mathfrak{m}_2}|F(t)|\Big)^{r-4} \int_0^1 | F(t) |^4 dt \ellesssim \betaig[ N^{1-\frac{1}{2}\varepsilon} (\ellog N)^\frac{1}{2}\betaig]^{r-4} N^{2+} \\ & \elleq N^{r-2}. \end{align*} Let $t \in \mathfrak{m}_1$, i.e.~we have $\betaig\|t - \frac{a}{q}\betaig\| \elleq \frac{1}{qN^{2-\varepsilon}}$ for some $ q \elll N^\varepsilon$. Then, by Lemmata 2.7 and 2.8 in \cite{V} with $|S(q, a)| \ellesssim q^\frac{1}{2}$, we have \betaetagin{equation*} |F(t)| = q^{-1} S(q, a) v\betaig( t- \tfrac aq\betaig)+ O(N^\frac{2}{200}) \ellesssim N^{1-\frac{\varepsilon}{2}}, \end{equation*} \noindent where $v$ is defined in \cite[(2.9)]{V}. Applying Hua's inequality as before, we have \betaetagin{align*} \int_{\mathfrak{m}_1} | F(t) |^{r} dt & \elleq \Big(\sigmaup_{t \in \mathfrak{m}_1}|F(t)|\Big)^{r-4} \int_0^1 | F(t) |^4 dt \ellesssim \betaig[N^{1-\frac{\varepsilon}{2}} \betaig]^{r-4} N^{2+} \elleq N^{r-2}. \end{align*} \noindent This completes the proof of \eqref{Z1}. \sigmaection{On a partially irrational torus $\mathbb{T}^2 \times \mathbb{T}_{\alpha_3}$} \ellabel{SEC:B} In this appendix, we present a sketch of the proof of Theorem \ref{THM:5}. Thus, we set $d = 3$ and assume that \eqref{eq:Q1} holds in the following. The main ingredient is an improvement of the Strichartz estimate on a partially irrational torus. \betaetagin{proposition}\ellabel{PROP:Bstrichartz} Let $d = 3$ and $I$ be a compact interval in $\mathbb{R}$. Suppose that \eqref{eq:Q1} holds. Then, the scaling-invariant Strichartz estimate \eqref{P1} holds for $p > \frac{14}3$. \end{proposition} \noindent The proof of Proposition \ref{PROP:Bstrichartz} is based on the following level set estimates under the assumption \eqref{eq:Q1}. \betaetagin{lemma}\ellabel{LEM:level2} Suppose that \eqref{eq:Q1} holds. Given a compact interval $I \sigmaubset \mathbb{R}$ and $f$ as in \eqref{C0}, let $A_\ellambda = A_\ellambda(f)$ be the distribution function defined by \eqref{C00a}. \noindent \textup{(i)} For any $\varepsilon > 0$, we have \betaetagin{equation} |A_\ellambda | \ellesssim N^{\frac{1}{1+4\varepsilon} } \ellambda^{-4 + \frac{8}{1+4\varepsilon}\varepsilon} \ellabel{BB1} \end{equation} \noindent for $\ellambda \gammaes N^{1+\varepsilon}$. \noindent \textup{(ii)} Let $ q > 4$. Then, there exists small $\varepsilon >0$ such that \betaetagin{equation} |A_\ellambda |\ellesssim N^{\frac{3}{2} q - 5} \ellambda^{-q} \ellabel{BB2} \end{equation} \noindent for $\ellambda \gammaes N^{\frac{3}{2}-\varepsilon}$. In \eqref{BB1} and \eqref{BB2}, the implicit constants depend on $\varepsilon > 0$, $q >4$, and $|I|$, but are independent of $f$. \end{lemma} We first prove Proposition \ref{PROP:Bstrichartz} assuming Lemma \ref{LEM:level2}. Then, we sketch the proof of Theorem \ref{THM:5}. We present the proof of Lemma \ref{LEM:level2} at the end of this appendix. Given $f$ as in \eqref{C0}, let $F({\betaf x}, t) = e^{-it \Delta} f({\betaf x}, t)$. By Cauchy-Schwarz inequality, we have $\|F\|_{L^\infty_{t, {\betaf x}}} \ellesssim N^\frac{3}{2}. $ Given $p > \frac{14}{3}$, let $q \in (4, p)$. Then, by Lemma \ref{LEM:level2} and Theorem \ref{THM:1} (ii), we have \betaetagin{align*} & \int_{I \times \mathbb{T}^3 } |F({\betaf x}, t)|^p d{\betaf x} dt\\ & \hphantom{X} \elleq \betaigg(\int_{ N^{1+\varepsilon_1}\ellesssim |F| \ellesssim N^{\frac{3}{2}-\varepsilon_2}} + \int_{ N^{\frac 32 - \varepsilon_2}\ellesssim |F| \ellesssim N^{\frac{3}{2}}}\betaigg) |F({\betaf x}, t)|^p d{\betaf x} dt + N^{(1+\varepsilon_1)(p - 4)}\int |F({\betaf x}, t)|^4 d{\betaf x} dt\\ & \hphantom{X} \ellesssim N^{1-\frac{4}{1+4\varepsilon_1}\varepsilon_1} \int_{N^{1+\varepsilon_1}}^{N^{\frac{3}{2}-\varepsilon_2}}\ellambda^{p - 4+ \frac{8}{1+4\varepsilon_1}\varepsilon_1} d\ellambda + N^{\frac{3}{2}q - 5} \int_{N^{\frac{3}{2}-\varepsilon_2}}^{N^\frac{3}{2}}\ellambda^{p - 1 - q} d\ellambda + N^{(1+\varepsilon_1)(p - 4)+ \frac 43+}\\ & \hphantom{X} \ellesssim N^{\frac{3}{2}p - 5}, \end{align*} \noindent for sufficiently small $\varepsilon_1, \varepsilon_2 > 0$. Here, the condition $p > \frac{14}{3}$ is needed in the last inequality. This proves Proposition \ref{PROP:Bstrichartz}. Next, we briefly discuss how Theorem \ref{THM:5} follows from Proposition \ref{PROP:Bstrichartz}. As in Section \ref{SEC:critical}, the goal is to prove the multilinear estimate \eqref{X0} in Proposition \ref{PROP:XLWP} for $ d = 3$ and $k=2$ (with $s_c =1$) under the assumption \eqref{eq:Q1}. In view of the argument in Subsection \ref{SUBSEC:G3} (which holds without any change even in this case), it suffices to prove the multilinear Strichartz estimate \eqref{G1} in Proposition \ref{PROP:LWP2} for $d = 3$ and $k = 2$ (with $s_c = 1$). By repeating the proof of Lemma \ref{LEM:Gstr2} with Proposition \ref{PROP:Bstrichartz}, we see that \eqref{G4} holds for $ p> \frac{14}{3}$ with $\delta \in (0, \frac 12 -\frac7{3p})$. In the proof of Proposition \ref{PROP:LWP2}, the only change appears in \eqref{G8} and we can choose $p$ such that \[ \frac{14}{3} < p< \frac{20k}{3k+2}\] \noindent in this case. In particular, we can set $k = 2$ by choosing $p \in (\frac{14}{3}, 5)$. The rest of the argument follows exactly as in Section \ref{SEC:critical}. In the remaining part of the paper, we present the proof of Lemma \ref{LEM:level2}. \betaetagin{proof}[Proof of Lemma \ref{LEM:level2}] The proof follows the proof of Proposition \ref{PROP:level} with small modifications. In the following, we only point out these modifications. Given an interval $I \sigmaubset \mathbb{R}$, assume that $I $ is centered at $0$. \noindent (i) With $Q({\betaf n})$ in \eqref{eq:Q1}, define $\mathbf{R}$ as in \eqref{B4a}. Then, we have $\mathbf{R}({\betaf x}, t) = \prod_{j = 1}^2 R_1(x_j, t)\cdot R_{\theta_3}(x_3, t),$ where $R_\theta$ is defined in \eqref{B4b}. We also define $\mathbf{R}_1$ and $\mathbf{R}_2$ as in \eqref{C2a}. Then, by \eqref{C2b}, we have \betaetagin{equation} \|\mathbb{R}R_1\|_{L^\infty_{t, {\betaf x}}} \ellesssim \min\betaig( N M\ellog M, N^3\betaig). \ellabel{BB3} \end{equation} \noindent Proceeding as in \eqref{C6} with \eqref{BB3} and \eqref{C5}, we have \betaetagin{align} \ellambda^2 |A_\ellambda|^2 & \elleq \|\mathbb{R}R_1\|_{L^\infty_{t, {\betaf x}}} \|\mathbb{T}heta_\ellambda\|_{L^1_{t, {\betaf x}}}^2 + \|\widehat \mathbb{R}R_2\|_{L^\infty_\tau \ell^\infty_{\betaf n} } \|\mathbb{T}heta_\ellambda\|_{L^2_{t, {\betaf x}}}^2 \notag \\ & \elleq C_1 N M^{1+\varepsilon_1} |A_\ellambda|^2 + M^{-1+\varepsilon_2} |A_\ellambda| \ellabel{BB4} \end{align} \noindent for small $\varepsilon_1, \varepsilon_2 > 0$. Choose $M \gammaeq N$ such that $N M^{1+\varepsilon_1} \sigmaim \ellambda^2.$ This condition with $M \gammaeq N$ implies that $\ellambda \gammaes N^{1 + \frac{\varepsilon_1}{2}}$. Then, \eqref{BB4} yields \betaetagin{align*} |A_\ellambda| \ellesssim \Big(\frac{N}{\ellambda^2}\Big)^\frac{1 - \varepsilon_2}{1+\varepsilon_1} \ellambda^{-2} \ellesssim N^{\frac{1}{1+2\varepsilon_1} } \ellambda^{-4+\frac{4}{1+2\varepsilon_1}\varepsilon_1} \end{align*} \noindent by setting $\varepsilon_2 = \varepsilon_2(\varepsilon_1)$ such that $\frac{1 - \varepsilon_2}{1+\varepsilon_1} = \frac{1}{1+2\varepsilon_1}$. This proves \eqref{BB1} with $\varepsilon = \frac{\varepsilon_1}{2}$. \noindent (ii) Define $\mathbf{K}$ by \betaetagin{align} \mathbf{K}({\betaf x}, t) & = \chi(t) K(x_1, t) K(x_2, t) \sigmaum_{|n_3|\elleq N} e^{2\pi i (n_3 x_3 + \theta_3 n_3^2 t)} , \ellabel{BB5} \end{align} \noindent where $K(x, t)$ is as in \eqref{D0a}. Let $\mathcal{L}ambda_{M, s}$ be as in \eqref{D7}. Then, from Lemma \ref{LEM:Weyl}, \eqref{D4}, and \eqref{D5} with $M \elleq 2^s \elleq N$, we have \betaetagin{align} |\mathcal{L}ambda_{M, s}({\betaf x}, t)|\ellesssim N \betaigg(\frac{N}{M^\frac{1}{2} \betaig( 1 + (2^{-s} N)^\frac{1}{2}\betaig)}\betaigg)^2 + \frac{M^2}{2^sN} N^{2} \ellesssim \frac{2^s N^2 }{M}. \ellabel{BB6} \end{align} \noindent Hence, from \eqref{BB6}, we have \betaetagin{equation} \| f * \mathcal{L}ambda_{M, s} \|_{L^\infty(I \times \mathbb{T}^d)} \ellesssim \frac{2^s N^2 }{M}\|f\|_{L^1(I \times \mathbb{T}^d)}. \ellabel{BB7} \end{equation} \noindent Also, with $\mathcal{L}ambda$ as in \eqref{E1} and $\mathbf{K}$ as in \eqref{BB5}, it follows from \eqref{D4} and \eqref{D5} with $N_1 = \frac{1}{100}N$ that \betaetagin{equation} \|\mathbf{K} - \mathcal{L}ambda\|_{L^\infty(I\times\mathbb{T}^d)} \ellesssim N^{2}. \ellabel{BB8} \end{equation} Let $p$ be as in \eqref{E4a}. In the following, we consider the case $\theta \gammaeq \frac{1}{3}$. With $M_1$ and $M_2$ as in \eqref{EE0}, write $\mathcal{L}ambda = \mathcal{L}ambda_1 + \mathcal{L}ambda_2+ \mathcal{L}ambda_3$ as before. See \eqref{EE0a}. By interpolating \eqref{BB7} and \eqref{D13a} and summing over dyadic $M\elleq M_1$ and $2^s \elleq N$, we have \betaetagin{align} \| f *\mathcal{L}ambda_1 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim M_1^{-1 + (3 +)\theta} N^{3 - 5\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}, \ellabel{BB9} \end{align} \noindent since $\theta \in [\frac 13, \frac 12)$. Next, choose $D \sigmaim M^\alpha$ for some small $\alpha = \alpha(\theta) > 0$, and set $\betaetata \elll 1$ and $B \gammag1$ such that \[ \sigma : = -3 -\betaeta +\alpha B - > 0.\] \noindent Then, by interpolating \eqref{BB7} and \eqref{D18} and summing over dyadic $M \in ( M_1, M_2]$ and $2^s \elleq N$, we have \betaetagin{align} \| f * \mathcal{L}ambda_2 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim N^{3 - 5 \theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)} \notag \\ & + \Big( M_1^{-1 - \sigma \theta} N^{3 - \frac 52\theta} + M_2^{-1 + (3+ B+) \theta} N^{3 - \frac 72 \theta} \Big) \|f\|_{L^1(I\times \mathbb{T}^d)}, \ellabel{BB10} \end{align} \noindent as long as $\theta < \frac{1}{2+\alpha +}.$ This can be guaranteed by choosing $\alpha = \alpha(\theta) > 0$ sufficiently small for given $\theta < \frac{1}{2}$. Lastly, by interpolating \eqref{BB7} and \eqref{D13} and summing over $M \gammaeq M_2 \sigmaim N^{\delta_2}$ and $s$ with $2^s \elleq N$, we have \betaetagin{align} \| f * \mathcal{L}ambda_3 \|_{L^{p'}(I \times \mathbb{T}^d)} & \ellesssim N^{3 - 5\theta} \| f \|_{L^{p}(I \times \mathbb{T}^d)}, \ellabel{BB11} \end{align} \noindent as long as $\theta < \frac 12$. Now, we are ready to prove the level set estimate \eqref{BB2} for $ q > 4$. Then, proceeding as before with \eqref{BB8}, \eqref{BB9}, \eqref{BB10}, and \eqref{BB11}, we have \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \elleq |\jb{ (\mathbf{K} - \mathcal{L}ambda) * \mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|+ |\jb{ \mathcal{L}ambda*\mathbb{T}heta_\ellambda, \mathbb{T}heta_\ellambda }|\notag \\ & \ellesssim N^{2} |A_\ellambda|^2 + M_1^{-1 + (3+) \theta} N^{3 - 5 \theta} |A_\ellambda|^\frac{2}{p} \notag \\ & \hphantom{XXX} + M_1^{- 1- \sigma \theta} N^{3 - \frac 52 \theta}|A_\ellambda|^{1+\frac{1}{p}} + M_2^{ -1+(3+B+) \theta} N^{3 - \frac 72 \theta} |A_\ellambda|^{1+\frac{1}{p}}. \ellabel{BB12} \end{align} \noindent Since $\ellambda \gammag N$, \eqref{BB12} reduces to \betaetagin{align} \ellambda^2|A_\ellambda|^2 & \ellesssim M_1^{-1 + (3 +)\theta} N^{3 - 5 \theta} |A_\ellambda|^\frac{2}{p} + M_1^{- 1- \sigma \theta} N^{3 - \frac{5}{2} \theta}|A_\ellambda|^{1+\frac{1}{p}}\notag \\ & \hphantom{XXX} + M_2^{ -1+(3+B+) \theta} N^{ 3- \frac 7 2 \theta} |A_\ellambda|^{1+\frac{1}{p}} \notag \\ & = : \hspace{0.5mm}\text{I}\hspace{0.5mm} + \hspace{0.5mm}\text{I}\hspace{0.5mm}I + \hspace{0.5mm}\text{I}\hspace{0.5mm}II. \ellabel{BB13} \end{align} First, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}$ holds. With $p' \theta = 2$ and \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| \ellesssim \betaigg(\frac{N^\frac{3}{2}}{\ellambda}\betaigg)^{(-\frac{p'}{2}+3+)\delta_1} N^{\frac{3}{2}p' - 5 } \ellambda^{-p'} \ellesssim N^{\frac{3}{2}q - 5 } \ellambda^{-q} \ellabel{BB14} \end{align} \noindent for $ q > p'$ by choosing $\delta_1 = \delta_1(q, p')$ sufficiently small. Next, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}I$ holds. Then, from \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| & \ellesssim N^{\frac{3}{2}p' - 5 } \ellambda^{-p'} \Big(N^{\frac{3}{2}p'} \ellambda^{-p'}M_1^{-p' - 2\sigma}\Big) \ellesssim N^{\frac 3 2 p' - 5} \ellambda^{-p'}, \ellabel{BB15} \end{align} \noindent by making $\sigma = \sigma(p', \delta_1)$ in \eqref{E5a} (and hence $B = B(p', \delta_1)$) sufficiently large. Lastly, suppose that $\ellambda^2|A_\ellambda|^2 \ellesssim \hspace{0.5mm}\text{I}\hspace{0.5mm}II$ holds. By $\ellambda \gammaes N^{\frac{3}{2}-\varepsilon}$ and \eqref{EE0}, we have \betaetagin{align} |A_\ellambda| & \ellesssim N^{\frac 3 2 p' - 5} \ellambda^{-p'} N^{-2 + \varepsilon p' +(-p' + 6+ 2B+)\delta_2} \ellesssim N^{\frac 3 2 p' - 5} \ellambda^{-p'} \ellabel{BB16} \end{align} \noindent as long as we have $\varepsilon p' \elleq 1$ and $\delta_2 = \delta_2(p', B)$ is sufficiently small. Finally, given $q > 4$, we choose $\theta < \frac{1}{2}$ such that $ q > p' = \frac{2}{\theta} > 4$. Then, \eqref{BB2} follows from \eqref{BB13}, \eqref{BB14}, \eqref{BB15}, and \eqref{BB16} with $\ellambda \elleq N^\frac{3}{2}$. \end{proof} \betaetagin{thebibliography}{99} \betaibitem{BCT} J.~Bennett, A.~Carbery, T.~Tao, {\it On the multilinear restriction and Kakeya conjectures,} Acta Math. {\betaf 196} (2006), no. 2, 261--302. \betaibitem{Bo0} J.~Bourgain, {\it On $\mathcal{L}ambda(p)$-subsets of squares, } Israel J. Math. {\betaf 67} (1989), no. 3, 291--311. \betaibitem{Bo2} J.~Bourgain, {\it Fourier transform restriction phenomena for certain lattice subsets and applications to nonlinear evolution equations. I. Schr\"{o}dinger equations}, Geom. Funct. Anal. {\betaf{3}} (1993), 107--156. \betaibitem{Bo1} J.~Bourgain, {\it Exponential sums and nonlinear Schr\"{o}dinger equations,} Geom. Funct. Anal. {\betaf{3}} (1993), 157--178. \betaibitem{BoPCMI} J.~Bourgain, {\it Nonlinear Schr\"odinger equations,} Hyperbolic equations and frequency interactions (Park City, UT, 1995), 3--157, IAS/Park City Math. Ser., 5, Amer. Math. Soc., Providence, RI, 1999. \betaibitem{Bo4} J.~Bourgain, {\it On Strichartz's inequalities and the nonlinear Schr\"odinger equation on irrational tori}, Mathematical aspects of nonlinear dispersive equations, 1--20, Ann. of Math. Stud., 163, Princeton Univ. Press, Princeton, NJ, 2007. \betaibitem{Bo5} J.~Bourgain, {\it Moment inequalities for trigonometric polynomials with spectrum in curved hypersurfaces}, Israel J. Math. {\betaf 193} (2013), no. 1, 441--458. \betaibitem{BG} J.~Bourgain, L.~Guth, {\it Bounds on oscillatory integral operators based on multilinear estimates,} Geom. Funct. Anal. {\betaf 21} (2011), no. 6, 1239--1295. \betaibitem{BGT1} N.~Burq, P.~G\'erard, N.~Tzvetkov, {\it Strichartz inequalities and the nonlinear Schr\"odinger equation on compact manifolds,} Amer. J. Math. {\betaf 126} (2004), no. 3, 569--605. \betaibitem{BGT2} N.~Burq, P.~G\'erard, N.~Tzvetkov, {\it Bilinear eigenfunction estimates and the nonlinear Schr\"odinger equation on surfaces,} Invent. Math. {\betaf 159} (2005), no. 1, 187--223. \betaibitem{CW} F.~Catoire, W.-M. Wang, {\it Bounds on Sobolev norms for the defocusing nonlinear Schr\"odinger equation on general flat tori}, Commun. Pure Appl. Anal. {\betaf{9}} (2010), 483--491. \betaibitem{Caz} T.~Cazenave, {\it Semilinear Schr\"odinger equations}, Courant Lecture Notes in Mathematics, 10. New York University, Courant Institute of Mathematical Sciences, New York; American Mathematical Society, Providence, RI, 2003. xiv+323 pp. \betaibitem{CazW2} T.~Cazenave, F.~Weissler, {\it The Cauchy problem for the critical nonlinear Schr\"odinger equation in $H^s$}, Nonlinear Anal. {\betaf 14} (1990), no. 10, 807--836. \betaibitem{Demeter} C.~Demeter, {\it Incidence theory and restriction estimates}, arXiv:1401.1873 [math.CA]. \betaibitem{Demirbas} S.~Demirbas, {\it Local well-posedness for 2-$d$ Schr\"odinger equation on irrational tori and bounds on Sobolev norms}, arXiv:1307.0051 [math.AP]. \betaibitem{GV2} J.~Ginibre, G.~Velo, {\it On the global Cauchy problem for some nonlinear Schr\"odinger equations,} Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire {\betaf 1} (1984), no. 4, 309--323. \betaibitem{GV} J.~Ginibre, G.~Velo, {\it Smoothing properties and retarded estimates for some dispersive evolution equations.} Comm. Math. Phys. \textbf{144} (1992), no. 1, 163--188. \betaibitem{HHK} M.~Hadac, S.~Herr, H.~Koch, {\it Well-posedness and scattering for the KP-II equation in a critical space,} Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire {\betaf 26} (2009), no. 3, 917--941. {\it Erratum to ``Well-posedness and scattering for the KP-II equation in a critical space''}, Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire {\betaf 27} (2010), no. 3, 971--972. \betaibitem{HW} G.~Hardy, E.~Wright, {\it An introduction to the theory of numbers,} Fifth edition. The Clarendon Press, Oxford University Press, New York, 1979. xvi+426 pp. \betaibitem{Herr} S.~Herr. {\it The quintic nonlinear Schr\"odinger equation on three-dimensional Zoll manifolds,} Amer. J. Math. {\betaf 135} (2013), no. 5, 1271--1290. \betaibitem{HTT11}S.~Herr, D.~Tataru, N.~Tzvetkov, {\it Global well-posedness of the energy critical Nonlinear Schr\"odinger equation with small initial data in $H^1(\mathbb{T}^3)$}, Duke Math. J., {\betaf {159}} (2011) 329--349. \betaibitem{HTT2} S.~Herr, D.~Tataru, N.~Tzvetkov, {\it Strichartz estimates for partially periodic solutions to Schr\"odinger equations in 4$d$ and applications}, J. Reine Angew. Math. doi 10.1515/crelle--2012--0013. \betaibitem{Hu-Li} Y. Hu, X. Li, {\it Discrete Fourier restriction associated with Schr\"odinger Equations}, arXiv:1108.5164v1. \betaibitem{Huxley} M.~Huxley, {\it Exponential sums and lattice points. III}, Proc. London Math. Soc. (3) {\betaf 87} (2003), no. 3, 591--609. \betaibitem{IK} H.~Iwaniec, E.~Kowalski, {\it Analytic number theory,} American Mathematical Society Colloquium Publications, 53. American Mathematical Society, Providence, RI, 2004. xii+615 pp. \betaibitem{IP} A.~Ionescu, B.~Pausader, {\it The energy-critical defocusing NLS on $\mathbb{T}^3$}, Duke Math. J. {\betaf 161} (2012), no. 8, 1581--1612. \betaibitem{Jarnik} V.~ Jarn\'ik, {\it \"Uber die Gitterpunkte auf konvexen Kurven,} (German) Math. Z. {\betaf 24} (1926), no. 1, 500--518. \betaibitem{Kato} T.~Kato, {\it On nonlinear Schr\"odinger equations,} Ann. Inst. H. Poincar\'e Phys. Th\'eor. {\betaf 46} (1987), no. 1, 113--129. \betaibitem{KeelTao} M.~Keel, T.~Tao, {\it Endpoint Strichartz estimates.} Amer. J. Math. \textbf{120} (1998), no. 5, 955--980. \betaibitem{KochT} H.~Koch, D.~Tataru, {\it A priori bounds for the 1D cubic NLS in negative Sobolev spaces,} Int. Math. Res. Not. (2007), no. 16, Art. ID rnm053, 36 pp. \betaibitem{M} H.~Montgomery, {\it Ten lectures on the interface between analytic number theory and harmonic analysis,} CBMS Regional Conference Series in Mathematics, 84. Published for the Conference Board of the Mathematical Sciences, Washington, DC; by the American Mathematical Society, Providence, RI, 1994. xiv+220 pp. \betaibitem{PTW} B.~Pausader, N.~Tzvetkov, X.~Wang, {\it Global regularity for the energy-critical NLS on $\mathbb{S}^3$,} arXiv:1210.3842 [mathAP], to appear in Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire. \betaibitem{R} N.~Rogovskaya, {\it An asymptotic formula for the number of solutions of a system of equations,} (Russian) Diophantine approximations, Part II (Russian), 78--84, Moskov. Gos. Univ., Moscow, 1986. \betaibitem{Strichartz} R.~S.~Strichartz, {\it Restrictions of Fourier transforms to quadratic surfaces and decay of solutions of wave equations.} Duke Math. J. \textbf{44} (1977), no. 3, 705--714. \betaibitem{Strunk} N.~Strunk, {\it Strichartz estimates for Schr\"odinger equations on irrational tori in two and three dimensions} arXiv:1401.6080 [math.AP]. \betaibitem{SULEM} C.~Sulem, P.L.~Sulem, {\it The nonlinear Schr\"odinger equation. Self-focusing and wave collapse,} Applied Mathematical Sciences, 139. Springer-Verlag, New York, 1999. xvi+350 pp. \betaibitem{TT} H.~Takaoka, N.~Tzvetkov, {\it On 2$D$ nonlinear Schr\"odinger equations with data on $\mathbb{R}\times\mathbb{T}$,} J. Funct. Anal. {\betaf 182} (2001), no. 2, 427--442. \betaibitem{TAO} T.~Tao, {\it Nonlinear dispersive equations. Local and global analysis,} CBMS Regional Conference Series in Mathematics, 106. Published for the Conference Board of the Mathematical Sciences, Washington, DC; by the American Mathematical Society, Providence, RI, 2006. xvi+373 pp. \betaibitem{Tsutsumi} Y.~Tsutsumi, {\it $L^2$-solutions for nonlinear Schr\"odinger equations and nonlinear groups,} Funkcial. Ekvac. {\betaf 30} (1987), no. 1, 115--125. \betaibitem{V} R.~Vaughan, {\it The Hardy-Littlewood method. Second edition}, Cambridge Tracts in Mathematics, 125. Cambridge University Press, Cambridge, 1997. xiv+232 pp. \betaibitem{Vi} I.M.~Vinogradov, {\it The method of trigonometrical sums in the theory of numbers,} Translated from the Russian, revised and annotated by K. F. Roth and Anne Davenport. Reprint of the 1954 translation. Dover Publications, Inc., Mineola, NY, 2004. x+180 pp. \betaibitem{W} Y.~Wang, {\it Periodic nonlinear Schr\"odinger equation in critical $H^s(\mathbb{T}^n)$ spaces}, SIAM J. Math. Anal. {\betaf 45} (2013), no. 3, 1691--1703. \betaibitem{Wo} T.~Wolff, {\it Lectures on harmonic analysis,} University Lecture Series, 29. American Mathematical Society, Providence, RI, 2003. x+137 pp. \betaibitem{Yajima} K.~Yajima, {\it Existence of solutions for Schr\"odinger evolution equations.} Comm. Math. Phys. \textbf{110} (1987), no. 3, 415--426. \end{thebibliography} \end{document}
\begin{document} \title{Evaluating Gaussian processes for sparse irregular spatio-temporal data} \author{\name Mehmet S{\"u}zen \thanks{Correspondence E-mail: [email protected]} and Abed Ajraou} \editor{na} \maketitle \begin{abstract} A practical approach to evaluate performance of a Gaussian process regression models (GPR) for irregularly sampled sparse time-series is introduced. The approach entails construction of a secondary autoregressive model using the fine scale predictions to forecast a future observation used in GPR. We build different GPR models for Ornstein-Uhlenbeck and Fractional processes for simulated toy data with different sparsity levels to assess the utility of the approach. \end{abstract} \begin{keywords} Gaussian Processes, autoregressive models. \end{keywords} \section{Introduction} Time-series analysis is very common subject that manifest itself in sciences and in industry [\cite{hamilton1994time}]. Temporal data rarely available in regular intervals and with sufficient sample size, i.e., sparse, irregularly occured and noisy [\cite{richards2011a}]. In these circumstances, standard modelling techniques would not be appropriate. Gaussian proceses provide a powerful alternative [\cite{williams2006a}], where a prior knowledge can be used without any restrictions on regularity or sparsity on the temporal data [\cite{roberts2013a}]. But a measure of goodness of fit would be an issue in this setting. A usual approach is to measure goodness of fit by comparing the results based on a gold standard result. Here we propose an approach to measure performance of GP without resorting to a gold standard result in sparse time-series via building a secondary model based on the resulting. \section{Gaussian Processes} A short sketch of the machinery of the Gaussian Processes [\cite{williams2006a}] is presented as follows. The starting point for Gaussian process is to define an arbitrary function that explains the outcome with a variance $\sigma_{y}$ and noise $\epsilon_{t}$, $$y(t) = f(t) + \sigma_{y} \epsilon_{t}$$ The primary approach in GP is that training time-series $(y,x)$ and $(y^{*},x^{*})$ the time-series that is to be learned can be expressed in a joint distribution, which is a multivariate Gaussian distribution, \begin{displaymath} p(y,y^{*}) = \mathcal{N} \Big( \begin{bmatrix} x \\ x^{*} \end{bmatrix}, \begin{bmatrix} K_{xx} & K_{xx^{*}} \\ K_{x^{*}x} & K_{x^{*}x^{*}} \end{bmatrix} \Big) \end{displaymath} where $x \in \mathbb{R}^{n x b}$ and $x^{*} \in \mathbb{R}^{m x d}$, where $n$ and $m$ are number of rows and $d$ is the number of predictors, the Kernel matrices can be computed as follows, in multivariate setting, \begin{displaymath} K_{i,j}(x_{i}, x_{j}) = - \sigma^{2} \exp\Bigg(-\beta \sum_{k=1}^{d} \Big( \frac{x_{i}^{k}-x_{j}^{k}}{l_{d}} \Big)^{\alpha_{d}}\Bigg) \end{displaymath} $i=1,..,n$ and $j=1,..,m$, where $d$ is the number of features or variates at each time point. Kernel choice here is not unique but this is a generalised form of the square exponential kernel. Hyperparameters $(\sigma^{2}, \beta, l, \alpha )$ can be interpreted as signal variance, scaling factor, length scale and roughnes on the time-series respectively. Hence, GP as a function approximation can be obtained in a closed form, the mean function and the covariance matrix, \begin{align*} L & = (K_{xx} + \sigma^{2}\mathbb{I})^{-1} \\ \bar{y^{*}} & = K_{x^{*}x} L^{-1} y \\ y^{*}_{cov} & = K_{x^{*}x^{*}}-K_{x^{*}x} L^{-1} K_{xx^{*}} \end{align*} Note that $y^{*}$ is the observations that is to be learned from the training data. \section{Evaluation Technique} A variable of interest $y$ appears regularly over time. A temporal evolution of $y$ can be expressed with ordered set $\Omega^{*}=(t^{*}_{i}, y^{*}_{i})$, where $i=1,..,n$. Only a subset of $\Omega^{*}$ may be observed in irregularly spaced intervals and not too frequently, i.e., sparse. These sparse observations of $y$ appear in the subset $\Omega=(t_{j}, y_{k})$, where $\{k \in \{1,..n\}\}$, as an ordered sequence but irregular, i.e., irregular time-series. Building a Gaussian Process model to construct the original time series $\Omega^{*}$ using the partial information $\Omega$ is one of the most promising approach for sparse irregular temporal data [\cite{richards2011a}]. The resulting series can be denoted by $\Omega^{*}_{gp}=(t^{*}_{i}, Y^{*}_{i})$. Performance of the resulting reconstruction $\Omega^{*}_{gp}$ usually measured against a gold standard method [\cite{roberts2013a}]. We propose using a secondary Autoregressive Model (AR) based on the resulting set $\Omega^{*}_{gp}$ is proposed. Construction of $n-1$ different autoregressive models to predict observation points $y_{j}$. Procedure is as follows. \begin{enumerate} \item Fixed a horizon $h$, for Autoregressive model prediction. \item Select the $m-1$ subsets of $\Omega^{*}_{gp}$ each up to a point $y_{j}$, $j_{k}-h$, denoting each subset, $(\Omega^{*}_{gp})_{k}$, where $k=2,..,m$. \item Build an AR model for each $(\Omega^{*}_{gp})_{k}$ and predict next $y_{k}$ as $y_{k}^{ar}$. \item Mean Absolute Percent Error (MAPE-AR) can be computed, $M = \frac{1}{m-1} \sum^{k=2}_{m} (y_{k}-y_{k}^{ar})/y_{k}$ \end{enumerate} MAPE-AR value will quantify the goodness-of-fit for GP regression without resorting to a gold standard method. \subsection{Simulated Data} \begin{figure} \caption{Simulated Ornstein-Uhlenbeck and fractional processes and with $3$ percent sparsity level. Two functions are drawn from the resulting GP regression.} \end{figure} A pair of toy data is generated, using generalised form of the square exponential kernel, Ornstein-Uhlenbeck and Fractional process with Kernel hyperparameters $(\alpha, \beta, l, \sigma)$, $(1.0, 1.0, 2.0, 1.0)$ and $(1.3, 1.0, 2.0, 1.0)$ respectively. Simulated data contains $351$ observations points with regular time spacing of $0.02$. New subset of observations generated by randomly selecting $3$, $5$ and $7$ percent of the simulated data, at least $5$ time-steps apart. This constitutes different sparsity levels. \subsection{Experiments} We fit Gaussian Process on these sparse data sets using square exponential kernel. Results for $3$ percent sparse data sets are shown on the Figure 1. Using seasonal $ARIMA(1,1,1)(1,1,1)$ as a secondary autoregressive model, MAPE-AR measure is summarized in Figure 2, for demonstration purposes. \begin{figure} \caption{MAPE-AR measure of goodness-of-fit results for different sparsity levels.} \end{figure} \section{Summary} A technique to measure goodness-of-fit in Gaussian processes for sparse temporal data is proposed based on building secondary autoregressive model to construct the regularly space data. In our emprical investigation we have demonstrated the utility of the approach using simulated data. \end{document}
\begin{document} \makeRR \tableofcontents \section{Introduction} The concept of random dynamical systems appeared to be a useful tool for modelers of many different fields, especially in computational neuroscience. Indeed, introducing noise in a system can be either a way to describe reality more sharply or to generate new behaviors absent from the deterministic world. Naively, the definition of such systems generalized the usual deterministic one as follows, \begin{equation} \label{eq:random system} {\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega)) \qquad \mbox{or} \qquad \dot{\bf x}_t = {\bf f}({\bf x}_t,\mbox{\boldmath$\xi$}_t(\omega),t) \end{equation} where $\mbox{\boldmath$\xi$}_i$ (resp. $\mbox{\boldmath$\xi$}_t$) defines a discrete (resp. continuous) stochastic process. Unfortunately, it comes out that whereas the extension of the definition in the discrete time case is rather straightforward, things get much harder for continuous time. Indeed, equation \ref{eq:random system} does not make sense for every continuous stochastic process. There are basically to different ways of fixing this problem : \begin{enumerate} \item One can restrict the definition to sufficiently smooth stochastic processes, namely those whose trajectories are \emph{cadlag} (see subsection \ref{sct:continuous}). This gives rise to \emph{random differential equations}. \item Or, as important stochastic processes like \emph{White noise} do not satisfy this property, one can switch to stochastic differential equations and It\^o calculus. \end{enumerate} When first looking at it, It\^o calculus seems more accurate as it just generalized the usual differential calculus. But the problem of this formalism lies in the absence of ``realisability'' of such defined systems. By realisability, we mean the ability to simulate the behavior of a system by mean of a computer. Hence, all result we can obtain are purely theoretic, and it is hard to make up an intuition on how such a system evolves. From a modeling point of view, this lake of manageability make this formalism unusable in most fields as we are hardly able to find explicit solution of a huge dynamical system as encountered in biology, and thus even in the deterministic case. It follows that we need our system to be realizable, and so we must restrict our attention to random differential equations. Once this restriction performed, we can really solve our system with the traditional tools of differential analysis. Indeed, it now makes sense to fix a draw in the probability space and solve the equation as the noise being an external input. We can thus describes the trajectory of the system given a sample path of the stochastic process. It results that we are also able to use deterministic contraction theory to study stability when the noise never put the system out of contraction bounds. But this implies that every trajectory is contracting, and so noise does not really matter. In this paper, we will investigate new definitions of stochastic stability together with sufficient condition to guarantee that a random differential system as a nice behavior even if the noise can induce partial divergence in the trajectories. The notations of this paper follow those of the keystone \cite{arnold98}. \section{Part 1 : State's dependency} \subsection{Nonlinear random system : the discrete time case} \label{sct:discrete} \subsubsection{Almost sure contraction : asymptotic exponential convergence $\mbox{a.s.}$} \label{sct:almost} As a first step, we define the stochastic contraction in the field of discrete system. In that case, there is no problem regarding the equation generating the dynamic system as it is just the iterated application of possibly different functions. We are dealing with stochastic processes of the form : $$ \mbox{\boldmath$\xi$} : \Omega \times \mathbb{N} \rightarrow \mathbb{R}^n $$ ie. we assume $\mbox{\boldmath$\xi$}$ to be equivalent to a sequence of random variable $\mbox{\boldmath$\xi$}_i : \Omega \rightarrow \mathbb{R}^n$. The definition of contraction in discrete-time case is a direct extension of the deterministic case and makes use of the notion of \emph{discrete stochastic differential systems} (\cite{jaswinski70}) of the form $${\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega))$$ where the ${\bf f}_i$ are continuously differentiable (a condition that will be assumed in the rest of this paper). This definition is the natural extension of the definition of \cite{Lohmiller98}, which assessed that the the difference between two trajectories tends to zero exponentially fast. In the stochastic case, we look for similar conditions satisfy almost surely. First, we have to reformulate the traditional property satisfied by the metric allowing a space deformation. In the deterministic case, the metric $M_i({\bf x}_i,i)$ has to be uniformly positive definite with respect to ${\bf x}_i$ and $i$. This property of uniformity for a metric $M_i({\bf x},i,\mbox{\boldmath$\xi$}_i)$ depending also on noise can be written $$ \exists \lambda \quad \forall {\bf x},i \quad M({\bf x},i,{\bf \mbox{\boldmath$\xi$}}_i) \geq \lambda {\bf I} \quad \mbox{a.s.} $$ But as we want the noise to introduce local bad behaviors, we need to relax the property in a sense that contraction can only been guaranteed asymptotically. This introduces a slightly difficulty as the naive formulation $$ \exists 0 \leq \alpha < 1 \qquad \lim_{n \rightarrow \infty}(\| \delta {\bf z} \| -\| \delta {\bf z}_o \| \alpha^n) = 0 $$ just says that $\| \delta {\bf z} \|$ tends to zero, which is not what we want. We thus have to switch to the following refined formulation. \begin{defn} The random system ${\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega))$, is said to be \emph{almost surely contracting} if there exists a uniformly positive definite metric $M_i({\bf x},i,\mbox{\boldmath$\xi$}_i)$ and $\eta < 0$ such that: $$ \mathbb{P}\{\omega \in \Omega, \ \limsup_{n \rightarrow \infty} \frac{1}{n} \log(\| \delta {\bf z} \|) \leq \eta \} = 1 $$ ie. the difference between two trajectories tends almost surely to zero exponentially. \end{defn} \begin{rem} \begin{enumerate} \item The notion of contracting region cannot be extended to stochastic case as it is hardly possible to guarantee that a stochastic trajectory stay in a bowl without requiring strong bound on the noise. And in that case, the noise can be treated as a bounded perturbation by analyzing the worst case. \item The definition makes use of logarithm whereas $\| \delta {\bf z} \|$ can be equal to $0$. Nevertheless, the reader should not be deterred and each time such a case appears, the equation is also satisfy if we allow infinite value and basic analytic extension (for example, $\mathbb{E}( \log \alpha) =- \infty$ as soon as $\alpha(\omega) = 0$ for some $\omega$). \end{enumerate} \end{rem} We can now state the first theorem of this paper. Remark that in the definition of the system below, the dependence on the stochastic perturbation is almost linear, as we can not master a malicious non-linear system which strongly use the ``bad draw'' of the stochastic process to diverge. In a sense, the system must satisfy a notion of uniformity with respect to the stochastic process. \begin{thm}\label{thm:ascDiscret} Given the random system ${\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega))$, note $\sigma_{f}({\bf x}_i,i,\mbox{\boldmath$\xi$}_i)$ the largest singular value of the generalized Jacobian of ${\bf f}_i$ at time i according to some metric $M_i({\bf x},i,\mbox{\boldmath$\xi$}_i)$. \\ A sufficient condition for the system to be \emph{almost surely contracting} is that \begin{itemize} \item the random process $\sigma_{f_i}({\bf x},i,\mbox{\boldmath$\xi$}_i)$ can be bounded independently from ${\bf x}$, ie there exists a stochastic process $\eta_i$ such that $$ \forall {\bf x} \quad \sigma_{f_i}({\bf x},i,\mbox{\boldmath$\xi$}_i) \leq \eta_i \qquad \mbox{a.s.} $$ \item the stochastic process $\log(\eta_i)$ follows the strong law of large number (eg. i.i.d.) $$ \frac{1}{n} \sum_{i=1}^n \log(\eta_i) \rightarrow \frac{1}{n} \sum_{i=1}^n \mathbb{E}(\log(\eta_i)) \qquad \mbox{a.s.} $$ \item the expectation of the random variables $\log(\eta_i)$ can be uniformly bounded by some $\eta < 0$ $$ \forall i \quad \mathbb{E}(\log(\eta_i)) \leq \eta $$ \end{itemize} \end{thm} \begin{pf*}{Proof.} We make a strong use of the basic idea of the original proof. Note $F_i$ the discrete generalized Jacobian of $f_i$ : $ F_i({\bf x},i,\mbox{\boldmath$\xi$}_i) \ = \ \Theta_{i+1} \frac{\partial}{\partial {\bf x}} {\bf f}_i({\bf x},i,{\bf \mbox{\boldmath$\xi$}}_i) \Theta^{-1}_i$ we have: $$ \begin{array}{lrcl} & {\bf \delta {\bf z}}_{i+1}^T {\bf \delta {\bf z}}_{i+1} & = & {\bf \delta {\bf z}}_i^T ( F_i^T F_i) \ {\bf \delta {\bf z}}_i \\ \\ \Rightarrow \quad & {\bf \delta {\bf z}}_{i+1}^T {\bf \delta {\bf z}}_{i+1} & \le & \sigma^2_{f_i}({\bf x}_i,i,\mbox{\boldmath$\xi$}_i) \ {\bf \delta {\bf z}}_i^T {\bf \delta {\bf z}}_i \qquad \mbox{a.s.} \end{array} $$ and hence, $$ \| \delta {\bf z}_n \|^2 \le \| \delta {\bf z}_o \|^2 \prod_{i=0}^n \sigma^2_{f_i}({\bf x}_i,i,\mbox{\boldmath$\xi$}_i) \qquad \mbox{a.s.} $$ So by monotony of logarithm and the two required properties, we can deduce that for almost every $\omega$ $$ \begin{array}{rcl} \limsup_{n \rightarrow \infty} \frac{1}{n} \log(\| \delta {\bf z}_n \|) & \leq & \limsup_{n \rightarrow \infty} \frac{1}{n} \log(\prod_{i=0}^n \sigma_{f_i}({\bf x}_i,i,{\bf \mbox{\boldmath$\xi$}}_i)) \\ & \leq & \limsup_{n \rightarrow \infty} \frac{1}{n} \sum_{i=0}^n \log(\eta_i) \\ & = & \limsup_{n \rightarrow \infty} \frac{1}{n} \sum_{i=0}^n \mathbb{E}(\log(\eta_i)) \\ & \leq & \eta \\ \end{array} $$ That is $$ \mathbb{P}\{\omega \in \Omega, \ \limsup_{n \rightarrow \infty} \frac{1}{n} \log(\| \delta {\bf z} \|) \leq \eta \} = 1 $$ $\square$ \end{pf*} \subsubsection{Contraction in mean square: asymptotic exponential convergence in mean square} We have seen in the subsection above sufficient conditions to guarantee almost sure asymptotic exponential convergence. But we could also be interested in looking for conditions to guarantee exponential convergence in mean square. That's what we are trying to capture with the notion of \emph{contraction in mean square}. \begin{defn} The random system ${\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega))$, is said to be \emph{contracting in mean square} if there exists an uniformly positive definite metric $M_i({\bf x},i,\mbox{\boldmath$\xi$}_i)$ such that: $$ \exists 0 \leq \eta < 1 \quad \mathbb{E}( \| \delta {\bf z}(i,\mbox{\boldmath$\xi$}_i) \|^2 ) \leq \|\delta {\bf z}_o\|^2 \eta^i $$ \end{defn} \begin{thm}\label{thm:expectDiscret} Given the random system , $ {\bf x}_{i+1} = {\bf f}_i({\bf x}_i,i,\mbox{\boldmath$\xi$}_i(\omega))$ note $\sigma_{f_i}({\bf x},i,\mbox{\boldmath$\xi$}_i)$ the largest singular value of the generalized Jacobian of ${\bf f}_i$ at time i according to some metric $M$. \\ A sufficient condition for the system to be \emph{contracting in mean square} is that \begin{itemize} \item the random process $\sigma_{f_i}({\bf x},i,\mbox{\boldmath$\xi$}_i)$ can be bounded independently from ${\bf x}$, ie there exists a stochastic process $\eta_i$ such that $$ \forall {\bf x} \quad \sigma_{f_i}({\bf x},i,\mbox{\boldmath$\xi$}_i) \leq \eta_i \qquad \mbox{a.s.} $$ \item the stochastic process $\eta_i$ is constituted of independent random variables \item the expectation of the random variables $\eta_i^2$ can be uniformly bounded by some $0 \leq \eta < 1$ $$ \forall i \quad \mathbb{E}(\eta_i^2) \leq \eta $$ \end{itemize} \end{thm} \begin{pf*}{Proof.} Note $F_i$ the discrete generalized Jacobian of $f_i$, we have again $$ {\bf \delta {\bf z}}_{i+1}^T {\bf \delta {\bf z}}_{i+1} \le \sigma^2_{f_i}({\bf x}_i,i,\mbox{\boldmath$\xi$}_i) \ {\bf \delta {\bf z}}_i^T {\bf \delta {\bf z}}_i \leq \eta_i^2 \ {\bf \delta {\bf z}}_i^T {\bf \delta {\bf z}}_i \qquad \mbox{a.s.} $$ Introducing the expectation value of $\| \delta {\bf z} \|^2$, we use that independence between $X$ and $Y$ is defined as the uncorrelation of $f(X)$ and $g(Y)$ for all mesurable functions $f$ anf $g$. $$ \mathbb{E}(\| {\bf \delta {\bf z}}_{i+1} \|^2 ) \ \leq \ \mathbb{E}(\eta_i^2 \| {\bf \delta {\bf z}}_i \|^2) = \mathbb{E}(\eta_i^2) \mathbb{E}(\| {\bf \delta {\bf z}}_i \|^2) $$ and hence \begin{equation*} \mathbb{E}( \| \delta {\bf z}_i \|^2) \le \| \delta {\bf z}_o \|^2 \eta^i \end{equation*} $\square$ \end{pf*} \subsection{Stochastic gradient} Let us have a look to a stochastic way of minimizing a function, highly used in computational neuroscience community, called \emph{stochastic gradient}. The idea is to use the traditional minimization by \emph{gradient descent} but we want to avoid explicit computation of the gradient as it is generally of high cost or even infeasible. For that, we introduced a stochastic perturbation which has the role of a ``random differentiation''. Let $\hat{P} = P + \Pi$ be the perturbation of the state $P$ with respect the vector $\Pi$ of stochastic processes $\Pi_i$ . Define the discrete system $$ P_{n+1} = P_n - \mu.(\mathcal{E}(\hat{P_n}) -\mathcal{E}(P_n))\Pi $$ with $\mu > 0$. Providing that the $\Pi_i$'s are mutually uncorrelated and of auto-correlation $\sigma^2$ (ie. $\mathbb{E}(\Pi_i.\Pi_j) = \sigma^2.\delta_ {i,j}$), the system satisfies: $$ \delta P_{n+1} = \delta P_{n} - \mu. (\sum_k \frac{\partial^2 \mathcal{E}}{\partial p_k \partial p_i}(P^*) \Pi_k.\Pi_j)_{i,j}.\delta P_{n} $$ where $P^*$ is given by the finite difference theorem. So, by taking the expectation: $$ \mathbb{E}(\delta P_{n+1}) = (I - \mu.\sigma^2.\frac{\partial^2 \mathcal{E}} {\partial P_n^2}).\mathbb{E}(\delta P_{n}) $$ So the system is contracting in mean square if \begin{itemize} \item $\frac{\partial^2 \mathcal{E}}{\partial P_n^2} > 0$ that is $\mathcal{E}$ is strictly convex. \item $\mu.\sigma^2\frac{\partial^2 \mathcal{E}}{\partial P_n^2} < I$ (that is $\mu.\sigma^2$ sufficiently small) \end{itemize} \subsection{Nonlinear random system : the continuous time case} \label{sct:continuous} We have seen in subsection \ref{sct:discrete} that the notion of contraction for discrete-time varying systems harmonizes well with stochastic calculus. Unfortunately the story is less straightforward in the continuous time case. Nevertheless, as outlined in introduction, for some practical reasons, we can restrict our intention to the case of random differential systems as define in \cite{arnold98}. Let us briefly summarize the technical background. We want to define the stochastic extension of deterministic differential systems as fellows. \begin{equation} \label{eq:continuous} \dot{\bf x}_t = {\bf f}({\bf x}_t,t,\mbox{\boldmath$\xi$}_t(\omega)) \end{equation} where $\mbox{\boldmath$\xi$}_t$ is a continuous stochastic process and ${\bf f}$ is a sufficiently smooth function, namely continuously differentiable with respect to ${\bf x}_t$ (the condition on ${\bf f}$ can be reduced to a lipschitz condition, but as we need differentiability in the rest of this paper, we prefer to assume it right now). But this formulation does not make sense for every kind of continuous processes. Typically, when dealing with White noise process, the right-hand part of equation \ref{eq:continuous} does not present finite variation. In order to overcome this difficulty, we will assume that $\mbox{\boldmath$\xi$}_t$ is a ``nice'' stationary stochastic process whose trajectories are cadlag (for the french ``continue à droite et avec des limites à gauche''), ie. are right continuous with left-hand limits. Arnold proved in \cite{arnold98} that under some assumption on ${\bf f}$, equation \ref{eq:continuous} admits a unique solution which is a global flow, whereas in general it is just a local flow. \begin{thm}[\cite{arnold98}] Suppose that $\mbox{\boldmath$\xi$}$ is cadlag and ${\bf f} \in \mathcal{C}^1$. Then, equation \ref{eq:continuous} admit a unique maximal solution which is a local random dynamical system continuously differentiable. If furthermore ${\bf f}$ satisfies $$ \| {\bf f}(\mbox{\boldmath$\xi$},{\bf x})\| \leq \alpha(\mbox{\boldmath$\xi$}) \|{\bf x}\| + \beta(\mbox{\boldmath$\xi$}) $$ where $t \rightarrow \alpha(\mbox{\boldmath$\xi$}_t(\omega))$ and $t \rightarrow \beta(\mbox{\boldmath$\xi$}_t(\omega))$ are locally integrable, then the solution is a global RDS. \end{thm} Thus, we cannot assume that random differential system defines a unique continuous trajectory for every $\omega$. This problem is also known in the deterministic case where Vidyasagar has shown the prevalence of differential equations despite our knowledge of only very restrictive characterization. Indeed, the set of equations admitting a unique solution is non-meager, whereas the set of equations we are able to exhibit is meager. That is, ``practically all'' equations admit a unique solution whereas we can characterize ``practically none'' of them ! So we will assume in the rest of this paper that the solution of the differential equation exists and is a unique continuously differentiable RDS. All those restriction are rather technical and we refer the interesting reader to \cite{arnold98} for further explanations. We can yet give two slogans reformulating intuitions coming from those restrictions : \begin{description} \item[The perturbation is memoryless] The noise appearing in the right-hand side of equation \ref{eq:continuous} is \emph{memoryless} in the sense that only the value of the perturbation at time $t$ enters into the generator ${\bf f}$. \item[The perturbation do have small variations] The cadlag condition is a nice way to avoid problems generated by dramatically varying processes like White noise process while allowing interesting discontinuous processes such as jump Markov processes. \end{description} From now on, when we will talk about \emph{random differential system}, we assume that the solution of equation \ref{eq:continuous} exists and is a unique continuously differentiable RDS. We also suppose that all the processes we are dealing with are stationary and have cadlag trajectories. \subsubsection{Almost sure contraction} We can now define the notion of almost sure contraction for the continuous-time case. \begin{defn} A random differential system $\dot{\bf x}_t = {\bf f}({\bf x}_t,t,{\bf \mbox{\boldmath$\xi$}}_t(\omega))$ is said to be \emph{almost surely contracting} if there exists an uniformly positive definite metric $ M({\bf x},t,\mbox{\boldmath$\xi$}_t)$ and $\eta < 0$ such that: $$ \mathbb{P}\{\omega \in \Omega, \ \limsup_{t \rightarrow \infty} \frac{1}{t} \log(\| \delta {\bf z} \|) \leq \eta \} = 1 $$ ie. the difference between two trajectories tends almost surely to zero exponentially. \end{defn} We now state conditions for a system to be almost surely contracting. The traditional contraction analysis requires that the largest eigenvalue of the general Jacobian is uniformly bounded by a negative constant. The stochastic version of it mainly requires that the largest eigenvalue which define a process, is bounded by a process which follows the law of large number and of expectation uniformly negative. \begin{thm}\label{thm:asc} Given the system equations , $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t,{\bf \mbox{\boldmath$\xi$}}_t(\omega))$ note $\lambda_f({\bf x},t,\mbox{\boldmath$\xi$}_t)$ the largest eigenvalue of the generalized Jacobian of ${\bf f}$ at time t according to some metric $M$. A sufficient condition for the system to be \emph{almost surely contracting} is that \begin{itemize} \item the random process $\lambda_f({\bf x},t,\mbox{\boldmath$\xi$}_t)$ can be bounded independently from ${\bf x}$, ie there exists a stochastic process $\eta_t$ such that $$\forall {\bf x} \quad \lambda_f({\bf x},t,\mbox{\boldmath$\xi$}_t) \leq \eta_t \qquad \mbox{a.s.} $$ \item the stochastic process $\eta_t$ follows the strong law of large number $$ \frac{1}{t} \int_0^t \eta_t \rightarrow \frac{1}{t} \int_0^t \mathbb{E}(\eta_t) \qquad \mbox{a.s.} $$ \item We can uniformly bound the expectation of the $\eta_t$ with some $\eta < 0$ $$\forall t \quad \mathbb{E}(\eta_t) \leq \eta $$ \end{itemize} \end{thm} \begin{pf*}{Proof.} We make a strong use of the basic idea of the original proof. $$ \frac{1}{2} \ \frac{d}{dt} ({\bf \delta {\bf z}}^T {\bf \delta {\bf z}}) \le \lambda_f({\bf x}_t,t,\mbox{\boldmath$\xi$}_t) \ {\bf \delta {\bf z}}^T {\bf \delta {\bf z}} \qquad \mbox{a.s.} $$ and hence $$ \| \delta {\bf z} \| \le \| \delta {\bf z}_o \| \ e^{\ \int\limits_o^t \eta_t dt} \qquad \mbox{a.s.} $$ Since $\eta_t$ verifies the law of large numbers, we have almost surely $$ \begin{array}{rcl} \limsup \frac{1}{t} \log \| \delta {\bf z} \| & \leq & \limsup \frac{1}{t} \int\limits_o^t \eta_t \ dt \\ & = &\limsup \frac{1}{t} \int\limits_o^t \mathbb{E}(\eta_t) dt \\ & \leq & \eta \end{array} $$ $\square$ \end{pf*} \begin{rem} It is reassuring that if we take a continuous random system satisfying conditions above, then the ``discrete envelop'' defined by $X_{n+1} = \exp [\int_{P_n} \lambda_f({\bf x}_t,t,\mbox{\boldmath$\xi$}_t)] X_{n}$ is a discrete almost surely contracting system. \end{rem} \subsubsection{Contraction in mean square} As it is the case in discrete-time case, we would like to find sufficient that guarantee the contraction in mean square of our system \begin{equation} \label{eq:continuous_average} \mathbb{E}(\| \delta {\bf z}(t,{\mbox{\boldmath$\xi$}_t}) \|^2) \leq \|\delta {\bf z}_o\|^2 e^{\eta t} \end{equation} Unfortunately, we have seen that this property required discrete independent stochastic processes, whose continuous counterpart are processes like the White noise process. As we have refused to deal with that kind of processes, we need to find a stronger condition that yet ensure a similar constraint on the average trajectory. That's why we are moving to coarse-grained version of equation \ref{eq:continuous_average}, namely where the property is guaranteed only for a discrete sample of the average trajectory. This property will be assessed when dealing with stochastic process which are coarse-grain independent, as define below. \begin{defn} A random differential system $\dot{\bf x}_t = {\bf f}({\bf x}_t,t,\mbox{\boldmath$\xi$}_t(\omega))$ is \emph{coarse-grain contracting} if there exists a metric $ M({\bf x},t,\mbox{\boldmath$\xi$}_t)$ and a partition $t_1 < t_2 < \ldots$ such that: $$ \exists 0 \leq \eta < 1 \ , \ \forall i \quad \mathbb{E}(\| \delta {\bf z}_{t_i} \|^2) \leq \|\delta {\bf z}_o\|^2 \eta^i $$ \end{defn} To guarantee this property, we have to deal with particular kind of continuous stochastic processes which satisfy a condition of independence in a coarse-grain scale. \begin{defn}[coarse-grain independence] A continuous stochastic process $\eta_t$ is said to be \emph{coarse grain independent with respect to a partition} $(P_i)$ of $\mathbb{R}^+$ if $$ \mbox{the random variables } \eta_i = \int_{P_i} \eta_t \ dt \mbox{ are independent} $$ \end{defn} \begin{rem} \begin{enumerate} \item By a partition $(P_n)$, we mean equivalently a strictly increasing infinite sequence $t_1 < t_2 < \ldots$ or a sequence of intervals $P_0 = [0,t_1], P_i = (t_i,t_{i+1}]$ for $i\geq1$. \begin{comment} \item This condition is satisfied if for all $i \neq j$ and all $n \geq 1$ we have $$ \forall s_1,\ldots,s_n \in P_i, s'_1,\ldots,s'_n \in P_j \quad \sum_{k=1}^n \eta_{s_k}(\omega) \independent \sum_{k=1}^n \eta_{s'_k}(\omega) $$ \end{comment} \item In case of Gaussian or uniform random variables, the condition is satisfied if two random variables lying in two different sets of the partition are always independent. \end{enumerate} \end{rem} \paragraph*{Example of coarse grain independent process} We will now define the typical type of coarse grain independent process we have in mind. Take a partition $(P_n)$ of $\mathbb{R}^+$ and an independent stochastic process $G_n(\omega)$. Define the process $\gamma_t(\omega) = G_n(\omega)$ for $t \in P_n$. Then $\gamma_t(\omega)$ is a coarse grain independent process. In that case, each trajectory is piecewise constant and we have $$ \int_{P_n} \gamma_{t} dt = |P_n|.G_n \mbox{ define an independent stochastic process} $$ \begin{figure} \caption{Example of a trajectory of a coarse grain independent process} \label{fig:coarse-grain} \end{figure} \begin{thm}\label{thm:expect} Given the system equations , $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t,{\bf \mbox{\boldmath$\xi$}}_t(\omega))$ note $\lambda_f({\bf x},t,\mbox{\boldmath$\xi$}_t)$ the largest eigenvalue of the generalized Jacobian of ${\bf f}$ at time t according to some metric $M$. A sufficient condition for the system to be \emph{coarse-grain contracting} is that \begin{itemize} \item the random process $\lambda_f({\bf x},t,\mbox{\boldmath$\xi$}_t)$ can be bounded independently from ${\bf x}$, ie there exists a stochastic process $\eta_t$ such that $$ \forall {\bf x} \quad \lambda_f({\bf x}_t,t,\mbox{\boldmath$\xi$}_t) \leq \eta_t \qquad \mbox{a.s.} $$ \item the process $\eta_t$ is a coarse-grain independent stochastic process with respect to a partition $(P_n)$. \item We can uniformly bound the expectation of the $e^{\int_{P_n} \eta_t}$ with some $0\leq \eta < 1$ $$\forall n \quad \mathbb{E}((e^{\int_{P_n} \eta_t})^2) \leq \eta $$ \end{itemize} \end{thm} \begin{pf*}{Proof.} $$ \frac{1}{2} \ \frac{d}{dt} ({\bf \delta {\bf z}}^T {\bf \delta {\bf z}}) \le \lambda_f({\bf x}_t,t,\mbox{\boldmath$\xi$}_t) \ {\bf \delta {\bf z}}^T {\bf \delta {\bf z}} \le \eta_t \ \delta {\bf z}^T \delta {\bf z} \qquad \mbox{a.s.} $$ Which leads to $$ \|\delta {\bf z}\| \leq \| \delta {\bf z}_0 \| \ e^{\int_0^t \eta_t dt} \qquad \mbox{a.s.} $$ Thus, we can define the system $Z_{n+1} = e^{\int_{t_n}^{t_{n+1}} \eta_t dt} Z_n$ and $Z_0 = \| \delta {\bf z}_0 \|$, which satisfies $$ \|\delta {\bf z}_{t_n}\| \leq Z_n $$ By definition of coarse-grain stochastic process and as $\mathbb{E}((e^{\int_{P_n} \eta_t})^2) \leq \eta$, we can applied theorem \ref{thm:expectDiscret} to conclude on the contraction in mean square of $Z_n$ with rate $\eta$ $$ \mathbb{E} (\| {\bf \delta {\bf z}}_{t_i} \|^2) \leq \mathbb{E}(Z_i^2) \leq \| \delta {\bf z}_0 \|^2 \eta^i $$ $\square$ \end{pf*} \begin{rem} The condition imposed on the process $\eta_t$, namely $\mathbb{E}((e^{\int_{P_n} \eta_t})^2) \leq \eta < 1$, is really different from the condition we have seen for the almost sure contraction $\mathbb{E}(\eta_t) \leq \eta < 0$. \end{rem} \section{Part 2 : Noise's dependency} Let us now turn on a very special case of perturbed systems, namely when the impact of the noise does not depend on the state space. \begin{prop} Consider a contracting system $ \dot{\bf \widehat{x}} = {\bf f}({\bf \widehat{x}},t)$ and take a perturbed version of it $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t) + {\bf \mbox{\boldmath$\xi$}}_t(\omega)$. The system is automatically both contracting on average and almost surely contracting. \end{prop} The proof is obvious as it is the case mention above of a system which is contracting for every $\omega$. Let us now study the mean and the variance of the unique solution. \subsection{Study of the average trajectory} \label{sct:average trajectory} Consider a contracting system $ \dot{\bf \widehat{x}} = {\bf f}({\bf \widehat{x}},t)$ in the metric $M = \mathnormal{I}$, take a perturbed version of it $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t) + {\bf \mbox{\boldmath$\xi$}}_t(\omega)$. Then, assuming that \begin{itemize} \item $\mathbb{E}(\mbox{\boldmath$\xi$}_t) = 0$ \item $\forall t \ \|\mbox{\boldmath$\xi$}_t\| < \alpha$ almost surely with $\mathbb{E}(\alpha) < \infty$ \end{itemize} we have that $$ \forall t \quad \mathbb{E}({\bf x}_t) = {\bf \widehat{x}}_t $$ \begin{pf*}{Proof.} Let ${\bf x_\Delta} = {\bf \widehat{x}} - \mathbb{E}({\bf x}_t)$ and consider \begin{eqnarray*} {\bf x}_\Delta^T \frac{d}{dt}({\bf x}_\Delta) & = & {\bf x}_\Delta^T ({\bf f}({\bf \widehat{x}},t) - \mathbb{E}({\bf f}({\bf x}_t,t))) \quad \mbox{by dominated convergence theorem}\\ & = & {\bf x}_\Delta^T \mathbb{E}({\bf f}({\bf \widehat{x}},t) - {\bf f}({\bf x}_t,t)) \\ & = & {\bf x}_\Delta^T \mathbb{E}(\int_0^1 \frac{\partial f}{\partial {\bf x}} ({\bf \widehat{x}} + c {\bf x}_\Delta,t) dc \ {\bf x_\Delta}) \\ & \leq & \mathbb{E}(\int_0^1 \lambda_f^{max}{\bf x}_\Delta^T {\bf x_\Delta}) \\ & = & \lambda_f^{max} \| {\bf x}_\Delta \| ^2 \end{eqnarray*} So we have that $\| {\bf x}_\Delta \| \leq \| {\bf x}_{\Delta_0} \| e^{2 \lambda_f^{max} t}$. But $\| {\bf x}_{\Delta_0} \| = 0$. $\square$ \end{pf*} \subsection{Study of deviation} \begin{thm} Take random system $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t) + {\mbox{\boldmath$\xi$}_t(\omega)}$ satisfying the conditions of the subsection \ref{sct:average trajectory}. Suppose now that the deviations of the $\mbox{\boldmath$\xi$}_t$ are uniformly bounded $$ \mathbb{E}(\|\mbox{\boldmath$\xi$}_t\|) \leq \sigma $$ The deviation of ${\bf x}_t$ is then majored by the maximum deviation $\sigma$ in the following way: $$ \mathbb{E}(\| {\bf x_1} - {\bf x_2} \|) - \mathbb{E}(\| {\bf {\bf x_1} - {\bf x_2}} \|_0) e^{\lambda_f^{max} \ t} \leq \frac{2 \sigma}{|\lambda_f^{max}|} (1 - e^{\lambda_f^{max} \ t})) $$ \end{thm} \begin{pf*}{Proof.} Let ${\bf \widetilde{x}} = {\bf x_1} - {\bf x_2}$ Let us look at $\|{\bf \widetilde{x}}\|$ $$ \left. \begin{array}{l} \dot{\bf x_1} = {\bf f}({\bf x_1},t) + {\mbox{\boldmath$\xi$}_1} \\ \dot{\bf x_2} = {\bf f}({\bf x_2},t) + {\mbox{\boldmath$\xi$}_2} \\ \end{array} \right\} \Rightarrow \frac{d}{dt}{\bf \widetilde{x}} = \int_0^1 \frac{\partial f}{\partial {\bf x}} ({\bf x_2} + c {\bf \widetilde{x}},t) dc \ {\bf \widetilde{x}} + {\mbox{\boldmath$\xi$}_1} - \mbox{\boldmath$\xi$}_2 $$ Multiply by ${\bf \widetilde{x}^T}$, it becomes: $$ {\bf \widetilde{x}^T} \frac{d}{dt}{\bf \widetilde{x}} = {\bf \widetilde{x}^T} \int_0^1 \frac{\partial f}{\partial {\bf x}} ({\bf x_2} + c {\bf \widetilde{x}},t) dc \ {\bf \widetilde{x}} + {\bf \widetilde{x}^T} (\mbox{\boldmath$\xi$}_1 - \mbox{\boldmath$\xi$}_2) $$ $$ \frac{1}{2} \frac{d}{dt}{\| \bf \widetilde{x} \| ^2} \leq \lambda_f^{max} \|{\bf \widetilde{x}}\|^2 + \|{\bf \widetilde{x}}\| \ \|\mbox{\boldmath$\xi$}_1 - \mbox{\boldmath$\xi$}_2\| $$ So, by dominated convergence theorem again $$ \frac{d}{dt} \mathbb{E}(\|{ \bf \widetilde{x}} \|) \leq \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + \mathbb{E}(\|\mbox{\boldmath$\xi$}_1 - \mbox{\boldmath$\xi$}_2\|) \leq \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + 2 \sigma $$ Solving $\frac{d}{dt} \mathbb{E}(\|{ \bf \widetilde{x}} \|) = \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + 2 \sigma$ and using the positiveness of all terms in the equation (which means that replacing $=$ by $\leq$ just make the slope of increasement smaller), we have : $$ \mathbb{E}(\| {\bf \widetilde{x}} \|) - \mathbb{E}(\| {\bf \widetilde{x}} \|_0) e^{\lambda_f^{max} \ t} \leq \frac{2 \sigma}{|\lambda_f^{max}|} (1 - e^{\lambda_f^{max} \ t})) $$ $\square$ \end{pf*} \begin{comment} \subsection{Study of deviation} \begin{thm} Take random system $ \dot{\bf x}_t = {\bf f}({\bf x}_t,t) + {\mbox{\boldmath$\xi$}_t(\omega)}$ satisfying the conditions of the subsection \ref{sct:average trajectory}. Suppose now that the deviations of the $\mbox{\boldmath$\xi$}_t$ are uniformly bounded $$ \mathbb{E}(\|\mbox{\boldmath$\xi$}_t\|) \leq \sigma $$ The deviation of ${\bf x}_t$ is then majored by the maximum deviation $\sigma$ in the following way: $$ \forall t \quad \mathbb{E}(\|{\bf x}_t - \mathbb{E}({\bf x}_t) \|) \leq \frac{\sigma}{|\lambda_f^{max}|} $$ \end{thm} \begin{pf*}{Proof.} Let ${\bf \widehat{x}}$ be the state define by the deterministic part of the above system, that is $ \dot{\bf \widehat{x}} = {\bf f}({\bf \widehat{x}},t)$, and ${\bf \widetilde{x}} = {\bf x}_t - {\bf \widehat{x}}$. From subsection \ref{sct:average trajectory}, we know that $\mathbb{E}({\bf x}_t) = {\bf \hat{x}}$. So $$\mathbb{E}(\|{\bf x}_t - \mathbb{E}({\bf x}_t) \|) = \mathbb{E}(\|{\bf x}_t - {\bf \widehat{x}} \|) = \mathbb{E}(\|{\bf \widetilde{x}}\|) $$ Let us look at $\|{\bf \widetilde{x}}\|$ $$ \left. \begin{array}{l} \dot{\bf x}_t = {\bf f}({\bf x}_t,t) + {\mbox{\boldmath$\xi$}_t} \\ \dot{\bf \widehat{x}} = {\bf f}({\bf \widehat{x}},t) \end{array} \right\} \Rightarrow \frac{d}{dt}{\bf \widetilde{x}} = \int_0^1 \frac{\partial f}{\partial {\bf x}} ({\bf \widehat{x}} + c {\bf \widetilde{x}},t) dc \ {\bf \widetilde{x}} + {\mbox{\boldmath$\xi$}_t} $$ Multiply by ${\bf \widetilde{x}^T}$, it becomes: $$ {\bf \widetilde{x}^T} \frac{d}{dt}{\bf \widetilde{x}} = {\bf \widetilde{x}^T} \int_0^1 \frac{\partial f}{\partial {\bf x}} ({\bf \widehat{x}} + c {\bf \widetilde{x}},t) dc \ {\bf \widetilde{x}} + {\bf \widetilde{x}^T} {\mbox{\boldmath$\xi$}_t} $$ $$ \frac{1}{2} \frac{d}{dt}{\| \bf \widetilde{x} \| ^2} \leq \lambda_f^{max} \|{\bf \widetilde{x}}\|^2 + \|{\bf \widetilde{x}}\| \ \|{\mbox{\boldmath$\xi$}_t}\| $$ So, by dominated convergence theorem again $$ \frac{d}{dt} \mathbb{E}(\|{ \bf \widetilde{x}} \|) \leq \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + \mathbb{E}(\|{\mbox{\boldmath$\xi$}_t}\|) \leq \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + \sigma $$ Solving $\frac{d}{dt} \mathbb{E}(\|{ \bf \widetilde{x}} \|) = \lambda_f^{max} \mathbb{E}(\|{\bf \widetilde{x}}\|) + \sigma$ and using the positiveness of all terms in the equation (which means that replacing $=$ by $\leq$ just make the slope of increasement smaller), we have : $$ \mathbb{E}(\| {\bf \widetilde{x}} \|) \leq \frac{\sigma}{|\lambda_f^{max}|} ( 1 - e^{\lambda_f^{max} \ t}) $$ This tells us that $$ \mathbb{E}(\| {\bf \widetilde{x}} \|) \leq \frac{\sigma}{|\lambda_f^{max}|} $$ which gives the desired result. $\square$ \end{pf*} \begin{rem} Suppose now that the system is contracting in a metric $$ M(t) = \Theta^T \Theta \geq d \ {\bf I} $$ that does not depend on ${\bf x}$. Then, the change of coordinate is explicit and we can applied the previous result on the state ${\bf z}_t = \Theta(t) {\bf x}_t$. We can then coming back again to ${\bf x}$ by the reverse change of coordinate ${\bf x}_t = \Theta^{-1} {\bf z}_t$ from which we deduce $$ \mathbb{E}({\bf x}_t) = {\bf \widehat{x}}_t \quad \mbox{and} \quad \mathbb{E}(\|{\bf x}_t - \mathbb{E}({\bf x}_t) \|) \leq \frac{\sigma}{d \ |\lambda_f^{max}|} $$ \end{rem} \end{comment} \subsection{Oscillator Synchronization} Consider two identical Van der Pol oscillators couples as $$ \ddot{\bf x}_1 + \alpha({\bf x}_1^2 - 1)\dot{\bf x}_1 + w^2 {\bf x}_1 = \alpha {\epsilon_1}(\dot{\bf x}_2 - \dot{\bf x}_1) $$ $$ \ddot{\bf x}_2 + \alpha({\bf x}_2^2 - 1)\dot{\bf x}_2 + w^2 {\bf x}_2 = \alpha {\epsilon_2} (\dot{\bf x}_1 - \dot{\bf x}_2) $$ where \begin{itemize} \item $\alpha > 0$, $w >0$ \item {$\epsilon_1$}, {$\epsilon_2$} are stationary processes \end{itemize} Using (Combescot, Slotine 2000), we can show that $x_1 \xrightarrow[t \rightarrow \infty]{} x_2$ when $$ \mathbb{E}(\epsilon_1) + \mathbb{E}(\epsilon_2) > 1 $$ \begin{figure} \caption{A comparison between stochastic synchronization of two Van der Pol oscillator with and without noise ($\epsilon_1,\epsilon_2 \in [-40,40]$)} \end{figure} Remark that we can add noise in the input of both oscillators, the synchronization still occurs on average (fig. \ref{fig:inputNoise}). \begin{figure} \caption{Stochastic synchronization with noise in input. Left part obtained with $\epsilon_1,\epsilon_2 \in [-80,80]$, right part with $\epsilon_1,\epsilon_2 \in [-400,400]$} \label{fig:inputNoise} \end{figure} \begin{comment} \subsection{Memo} \begin{itemize} \item The redundancy of the calculus may allow to compute directly the expectation of the trajectory. And so if the contraction on average of the system is proved, the redundant system will be contracting. \item Showing the various possibilities offered by the noise : as an input, in the gains \ldots \item Looking for the link between Yoccoz vision of noise. \end{itemize} \end{comment} \end{document}
\begin{document} \setcounter{page}{1} \publyear{2021} \papernumber{0001} \volume{XXX} \issue{X} \title{Construction of networks by associating with submanifolds of almost Hermitian manifolds} \address{Department of Mathematics, Science Faculty, Ege University, Bornova, 35100, Izmir, Turkey} \author{Arif Gursoy\thanks{Corresponding Author}\\ Department of Mathematics \\ Ege University\\ Bornova, 35100 Izmir, Turkey\\ arif.gursoy{@}ege.edu.tr} \maketitle \runninghead{A. Gursoy}{Construction of networks by associating with submanifolds of almost Hermitian manifolds} \begin{abstract} The idea that data lies on a non-linear space has brought up the concept of manifold learning \cite{ZX} as a part of machine learning and such notion is one of the most important research field of today. \end{abstract} \begin{keywords} Graph theory, digraphs, almost Hermitian manifold, submanifolds, manifold learning, machine learning, artificial intelligence \end{keywords} \section{Introduction} Graph theory can be used to model computer networks, social networks, communications networks, information networks, software design, transportation networks, biological networks, etc. So this theory is applicable in many real-world mathematical modeling. Therefore, this theory is the most active areas of mathematical research. On the other hand, one of the most active research area of differential geometry is the submanifold theory of complex manifolds. A submanifold of an almost Hermitian manifold is characterized by the behavior of tangent space of the submanifold of almost Hermitian manifold under the complex structure of the ambient manifold. In this way, we have various submanifolds titled as holomorphic, totally real, CR, slant, semi slant, hemi-slant, bi-slant for almost Hermitian manifolds. In fact, the theory of submanifolds of almost Hermitian manifolds is still main active area of complex differential geometry, see:\cite{ALVY, Lee2015, Vilcu2018} for recent results. Manifold learning method is one of the most exciting developments in machine learning recently. Manifold learning has been applied in utilizing semi-supervised learning \cite{ZX}. Moreover, Vakulenko and Radulescu have used the theory of invariant and inertial manifold to prove the realization of prescribed dynamics by networks in patterning by centralized gene networks \cite{Vakulenko}. Furthermore, manifolds also play an important role in public health. Fiorini has defined the Riemannian manifold, which is isomorphic to traditional information geometry Riemannian manifold, for noise reduction in theoretical computerized tomography providing many competitive computational advantages over the traditional Euclidean approach \cite{Fiorini}. Besides, Monti et. al. have introduced a general framework, geometric deep learning, enabling to design of convolutional deep architectures on manifolds and graphs \cite{MBMRSB}. Also, Carriazo and Fernandez \cite{carriazo2004submanifolds} have constructed a relation between slant surface and graph theory. Later, they have related graph theory with vector spaces of even dimension \cite{carriazo2009submanifolds, boza2012graphs}. Their work was restricted to slant submanifolds. We believe that further use of graph theory is possible in the theory of submanifolds. By considering vast literature of graph theory and submanifold theory, one expects more relations between these research areas. In this direction, the aim of this paper is to examine the relation among various submanifolds of almost Hermitian manifolds by using graph theory. We note that our approach is different from the approach considered in \cite{carriazo2004submanifolds} and \cite{carriazo2009submanifolds}. They only considered adapted frame of slant surface and they used them to characterize CR-submanifolds by means of trees. Later they have extended this approach for weakly associated graph. In this paper, we give relations between submanifolds of Hermitian manifolds in terms of graph theory notions. \section{Preliminaries} In this section, we are going to recall certain notions used graph theory to be used in this paper from \cite{bang2008digraphs, belmonte2019new, bondy1976graph, chartrand2010graphs, cormen2009introduction, rosen2013discrete}. For those who are not familiar with the theory of graphs (especially for readers working with the submanifolds theory), we specifically recall the basic definitions from graph theory. A graph $G = (V, E)$ consists of a nonempty set $V$ of vertices and a set $E$ of edges. Each edge has either one or two vertices connected with it, called its endpoints. An edge connects its endpoints. Two distinct vertices $u, v$ in a graph G are called adjacent (or neighbors) in $G$ if there is an edge e between $u$ and $v$, where the edge $e$ is called incident with the vertices $u$ and $v$ and $e$ connects $u$ and $v$. The set of all neighbors of a vertex $v$ of $G = (V, E)$ is denoted by $N(v)$. If $A \subset V$, we denote by $N(A)$ the set of all vertices in $G$ that are adjacent to at least one vertex in $A$. The degree of a vertex in a graph is the number of edges incident with it. The degree of the vertex $v$ is denoted by $d(v)$ and $d(v) = \vert N(v)\vert$. The graph theory can be divided into two branches as undirected and directed graphs. \cite{rosen2013discrete} A directed graph (digraph) $D$ is a finite nonempty set of objects called vertices together with a set of ordered pairs of distinct vertices of $D$ called directed edges or arcs. For a digraph $D=(V, A)$, the vertex set of $D$ is denoted by $V(D)$ or simply $V$ and the arc set of $D$ is denoted by $A(D)$ or $A$. Each arc is an ordered pair of vertices. The arc $(u,v)$ is said to start at $u$ and end at $v$. The in-degree of a vertex v, $d^-(v)$, is the number of edges which end at $v$. The out-degree of $v$, $d^+(v)$, is the number of edges with $v$ as their initial vertex. Also, for a vertex $v \in V(D)$, $N^{-}_D(v)$ and $N^{+}_D(v)$ are respectively called out-neighbors and in-neighbors where $N^{-}_D(v)=\{u \vert (u,v) \in A(D), u \in V(D)\}$ and $N^{+}_D(v)=\{u | (v,u) \in A(D), u \in V(D)\}$. \cite{chartrand2010graphs, bang2008digraphs, sedgewick2015algorithms, rosen2013discrete} In a digraph $D = (V, A)$, given a pair of vertices $u$ and $v$, whether or not there is a path from $u$ to $v$ in the digraph is useful to know. The transitive closure of $D$ is to construct a new digraph, $D^* = (V, A^*)$, such that there is an arc $(u, v)$ in $D^*$ if and only if there is a path from $u$ to $v$ in $D$. \cite{cormen2009introduction} A walk $W = x_1 a_1 x_2 a_2 x_3 ... x_{k-1} a_{k-1}x_k$ is a sequence of vertices $x_i$ and arcs $a_j$ in $D$ such that the tail and head of $a_i$ is $x_i$ and $x_{i+1}$ for every $i \in [k - 1]$, respectively. The set of vertices and arcs of the walk $W$ are denoted $V(W)$ and $A(W)$, respectively. $W$ is denoted without arcs as $x_1x_2...x_k$ and shortly $(x_1,x_k)$-walk. If $x_1 = x_k$ then $W$ is a closed walk, and otherwise $w$ is an open walk. If $W$ is an open walk, the vertices $x_1$ and $x_k$ are end-vertices and named as the initial and the terminal vertex of $W$, respectively. The length of a walk is the number of its arcs and the walk $W$ above has length $k-1$. \cite{bang2008digraphs} A trail is a walk in which all arcs are distinct. $W$ is called a path, if the vertices of a trail $V(W) \subset V(D)$ are distinct. If the vertices $x_1, x_2, ..., x_{k-1}$ are distinct, $k \geq 3$ and $x_1 = x_k$, then $W$ is a cycle. The longest path in $D$ is a path of maximum length in $D$. \cite{bang2008digraphs} \begin{proposition} \label{prop1} \cite{bang2008digraphs} Let $D$ be a digraph and let $ x, y$ be a pair of distinct vertices in $D$. If $D$ has an $(x, y)$-walk $W$, then $D$ contains an $(x, y)$-path $P$ such that $A(P) \subseteq A(W)$. If $D$ has a closed $(x, x)-$walk $W$, then $D$ contains a cycle $C$ through $x$ such that $A(C) \subseteq A(W)$. \end{proposition} An oriented graph is a digraph with no cycle of length two \cite{bang2008digraphs}. For a digraph $D$, the Underlying Graph of $D$ is the undirected graph engendered utilizing all vertices in $V(D)$, and superseding all of the arcs in $A(D)$ with undirected edges. \cite{bondy1976graph} If a digraph $D$ has an $(x,y)$-walk, then the vertex $y$ is reachable from the vertex $x$. Every vertex is reachable from itself specifically. By Proposition \ref{prop1}, $y$ is reachable from $x$ if and only if $D$ contains an $(x, y)$-path. If every pair of vertices in digraph $D$ is mutually reachable then $D$ is strongly connected (or shortly strong). A strong component of digraph $D$ is a maximal induced strong subdigraph in $D$. If $D_1, ... ,D_t$ are the strong components of $D$, then precisely $V(D_1) \cup ... \cup V(D_t) = V(D)$. If a digraph $D$ is not strongly connected and if the underlying graph of $D$ is connected, then $D$ is said to be weakly connected. \cite{bang2008digraphs, sedgewick2015algorithms} Pseudograph is a graph having parallel edges and loops, and multigraph is a pseudograph with no loops. If every pair of distinct vertices are adjacent in a multigraph then the multigraph is complete. A multigraph $H$ is called as $p-$partite if there is a partition into p sets $V(H) = V_1 \cup V_2 \cup ... \cup V_p$ where $V_i \cap V_j = \O$ for every $i \neq j$. In particular, when $p = 2$ the graph is called a bipartite graph. A bipartite graph $B$ is denoted by $B = (V_1, V_2;E)$. If the edge $(x,y)$ is in $p-$partite multigraph $H$ where all $x \in V_i$, $y \in V_j$ for $i \neq j$ then $H$ is complete $p-$partite. \cite{bang2008digraphs} A digraph $D = (V,A)$ is symmetric if arc $(x,y) \in A$ implies arc $(y,x) \in A$. A matching $M$ is an arc set having no common end-vertices and loops in $D$. Also, the arcs of $M$ are independent if $M$ is a matching. If a matching $M$ implicates the highest number of arcs in $D$, then $M$ is maximum. Besides, a maximum matching is perfect if it has $\frac{|A(D)|}{2}$ arcs. A set $Q$ of vertices in a directed pseudograph $H$ is independent if there are no arcs between vertices in $Q$. The independence number of $H$ is the size of the independent set having maximum cardinality in $H$. A coloring of a digraph $H$ is a partition of $V(H)$ into disjoint independent sets. The minimum number of independent sets in the coloring of $H$ is the chromatic number of $H$. A simple directed graph is a digraph that has no multiple arcs or loops. if a digraph contains no cycle, then it is acyclic and called acyclic digraph. \cite{bang2008digraphs} The eccentricity $e(v)$ of a vertex $v$ is the distance from $v$ to the farthest vertex from itself. The radius ($rad$) of $D $ is the minimum eccentricity, and the diameter ($diam$) is the maximum eccentricity. Besides, a vertex $v$ is central if $e(v) = rad(D)$, and $v$ is peripheral if $e(v) = diam(D)$. \cite{chartrand1997distance} Let $D=(V,A)$ be a digraph, $V(D)=n$ and $S$ $\subset$ $V(D)$. $S$ is a dominating set of $D$ if each vertex $v \in V(D) - S$ is dominated by at least a vertex in $S$. A dominating set of $D$ having the smallest cardinality is called the minimum dominating set of $D$. Also, the cardinality of the minimum dominating set is called the domination number of $D$ \cite{lee1998domination, pang2010dominating} Let $r$ be a root vertex in $D$. A directed spanning tree $T$ starting from $r$ is a subdigraph of $D$ such that the undirected form of $T$ is a tree and there is a directed unique $(r,v)$-path in $T$ for each $v \in V(T)-r$. \cite{bang2008digraphs} The vertex-integrity of a digraph $D$ is defined by $I(D) = min\{|F| + m(D - F): F\subseteq V(D)\}$, where $m(D - F)$ indicates the maximum order of a strong component of $D - F$. If $I(D) = |F| + m(D - F)$ then $F$ is called as an $I$-set of $D$. In addition, the arc-integrity of a digraph $D$, shortly $I^{'}(D)$, is described as the minimum value of $\{|F| + m(D - F): F\subseteq A(D)\}$. The set $F$ is called as an $I^{'}$-set of $D$ if $I^{'}(D) = |F| + m(D - F)$. \cite{Vandell1996} \begin{proposition}\label{prop.integrity} \cite{Vandell1996} If $S$ is a subdigraph of $D$ then $I(S) \leq I(D)$ and $I^{'}(S) \leq I^{'}(D)$. \end{proposition} \section{Construction of digraphs by relations among submanifolds of almost Hermitian manifolds} Let $( M,g)$ be a Riemannian manifold. $(M,g)$ is called an almost Hermitian manifold if there is a (1,1) tensor field on $M$ such that $J^2=-I$, where $I$ is the identity map on the tangent bundle of $M$, and $g(JX,JY)=g(X,Y)$ for vector fields $X,Y$ on $M$. Moreover if $J$ is parallel with respect to any vector field $X$, then $(M,J,g)$ is called a Kaehler manifold \cite{yano1985manifolds}. There are various submanifolds of an almost Hermitian manifold based on the behavior of the tangent space of the submanifold at a point under the almost complex structure $ J$. Let $N$ be a submanifold of an almost Hermitian manifold and $T_pN$ the tangent space at a point p belongs to $N$. Then if $T_pN$ is invariant with respect to $J_p$ for any point $p$, then $N$ is called holomorphic (or complex) submanifold \cite{yano1985manifolds}. We denote the normal space at $p$ by $T_pN^\perp$. A submanifold of an almost Hermitian manifold is called an anti-invariant submanifold if $JT_p N \subseteq T_pN^\perp$ \cite{yano1985manifolds}. As a generalization of holomorphic submanifold and anti-invariant submanifolds, a submanifold $M$ of a Kaehler manifold $N$ is called CR-submanifold \cite{bejancu2012geometry} if there are two orthogonal complementary distributions $\mathcal{D}_1$ and $\mathcal{D}_2$ such that $\mathcal{D}_1$ is invariant with respect to $J$ and $\mathcal{D}_2$ is anti-invariant with respect to $J$ for every point $p \in M$. It is clear that if $\mathcal{D}_1=\{0\}$, then a CR-submanifold becomes an anti-invariant submanifold. If $\mathcal{D}_2=\{0\}$, then $M$ becomes a holomorphic submanifold. Another generalization of holomorphic submanifolds and anti-anti-invariant submanifolds is slant submanifolds. Let $N$ be a submanifold of an almost Hermitian manifold $M$. The submanifold $N$ is called slant \cite{chen1990geometry} if for each non-zero vector $X$ tangent to $N$ the angle $\theta(X)$ between $JX$ and $T_pN$ is a constant, i.e, it does not depend on the choice of $p \in M$ and $X \in T_p N$. $\theta$ is called the slant angle. It is clear that if $\theta(X)=0$ then $N$ becomes a holomorphic submanifold. If $\theta(X) = \pi / 2$, $N$ becomes an anti-invariant submanifold. We will use the $v_1$, $v_2$, $v_3$, and $v_4$ to represent the submanifolds holomorphic, CR, anti-invariant and slant, respectively. Digraph $D_1 = (V,A)$ has four vertices, $V(D_1) = \{v_1,v_2,v_3,v_4\}$, and four arcs, $A(D_1)=\{(v_2,v_1),(v_2,v_3),(v_4,v_1),(v_4,v_3)\}$ in Fig. \ref{fig1}. $D_1$ has the maximum length of one as the longest path. $D_1$ has 2 vertices ($v_2$ and $v_4$) which are not reachable. Topological sort of $D_1$ is $v_4-v_2-v_3-v_1$. $rad(D_1)=1$, the radius of $D_1$ is $v_2 \to v_1$. $diam(D_1)=1$, the diameter of $D_1$ is the same as the radius. Also, in $D_1$, there is no center vertex, but two peripheral vertices such as $v_2$ and $v_4$. \begin{figure} \caption{Digraph $D_1$ built by submanifolds holomorphic, CR, anti-invariant and slant} \label{fig1} \end{figure} \begin{theorem} \label{teo31} For a digraph $D_1$ constructed by the four submanifolds holomorphic, CR, anti-invariant and slant considering as the vertices $v_1$, $v_2$, $v_3$, and $v_4$, respectively, \begin{enumerate}[i] \item $D_1$ is a bipartite digraph as well as a complete bipartite digraph. \item $D_1$ has a perfect matching. \item The independence number of $D_1$ is 2. \item The chromatic number of $D_1$ is 2. \item $D_1$ has no directed spanning tree. \item The domination number of $D_1$ is 2. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item There exists a partition $V_1$ and $V_2$ of $V(D_1)$ into two partite sets for the submanifolds in $D_1$: $V_1=\{v_1,v_3\}$ and $V_2=\{v_2,v_4\}$. Owing to $V(D_1) = V_1 \cup V_2$ and $V_1 \cap V_2 = \O$, then $D_1$ is a bipartite digraph. \\ Besides, for every submanifold, $x \in V_1, y \in V_2$, a connection from x to y (i.e. an arc $(x,y)$) is in $D_1$. Therefore, $D_1$ is a complete bipartite digraph. \item There is a matching $M=\{(v_2,v_1),(v_4,v_3)\} \subset A(D_1)$ in $D_1$. Each element (arc or connection between two submanifolds) in $M$ is independent, i.e. no common vertices, and $M$ is maximum. Also, $M$ is perfect so that $|M|=\frac{|A(D_1)|}{2}$. It is obvious that $D_1$ has a perfect matching. \item The subset $\widetilde{V}=\{v_2, v_4\} \subset V(D_1)$ is one of the independent sets having maximum cardinality and the size of maximum independent submanifolds set is two. This also means that there is no relation between submanifolds $v_2$ and $v_4$. Then, the independence number of $D_1$ is two. \item $V_1=\{v_2, v_4\}$ and $V_2=\{v_1, v_3\}$ are two subsets of $V(D_1)$. $V_i (i=1,2)$ are all independent sets providing the minimum number of cardinality at the same time. Hence, the minimum number of independent sets of $D_1$ is two. Then, the chromatic number of $D_1$ is two. \item There is no root vertex where a subdigraph T of $D_1$ contains a directed path from the root to any other vertex in $V(D_1)$. Then, $D_1$ has no directed spanning tree. \item There is a subset $\widetilde{V}=\{v_2, v_4\} \subset V(D_1)$ that including minimum cardinality of vertices in $D_1$. Considering this subset, for each vertex $v \in \widetilde{V}$ and $u \in V(D_1)-\widetilde{V}$, (v,u) is an arc in $D_1$. The domination number is two, because of no smaller cardinality of dominating sets in $D_1$. \end{enumerate} \end{proof} \begin{corollary}\label{cor31} In the submanifold network represented by $D_1$ in Fig. \ref{fig1}, the submanifolds, CR $(v_2)$ and slant $(v_4)$, cannot be derived by the other submanifolds, because the in-degree of these vertices (submanifolds) are zero in $D_1$, $d^{-}(v_2) = d^{-}(v_4) = 0$. In addition, whereas CR and slant subamnifolds cannot be mutually derived as between holomorphic $(v_1)$ and anti-invariant $(v_3)$, holomorphic and anti-invariant submanifolds can be derived separately from CR and slant from $N^{-}_{D_1}(v_1) = N^{-}_{D_1}(v_3) = \{v_2, v_4 \}$. \end{corollary} We now recall the notion of hemi-slant submanifolds of an almost Hermitian manifold. Let $M$ be an almost Hermitian manifold and $N$ a real submanifold of $M$. Then we say that $N$ is a hemi-slant submanifold \cite{carriazoalfonso2000,sahin2009warped} if there exist two orthogonal distributions $\mathcal{D}^\perp$ and $\mathcal{D}^\theta$ on $N$ such that \begin{enumerate} \item $TN$ admits the orthogonal direct decomposition $TN = \mathcal{D}^\perp \oplus \mathcal{D}^\theta$. \item The distribution $\mathcal{D}^\perp$ is an anti-invariant distribution, i.e., $J\mathcal{D}^\perp \subset TM^\perp$. \item The distribution $\mathcal{D}^\theta$ is slant with slant angle $\theta$. \end{enumerate} It is easy to see that if $\mathcal{D}^\perp=\{0\}$, $N$ becomes a slant submanifold with a slant angle $\theta$. If $\mathcal{D}^\theta=\{0\}$, then $N$ becomes an anti-invariant submanifold. Moreover if $\theta=0$, then $N$ becomes a CR-submanifold. Furthermore, if $\mathcal{D}^\perp=\{0\}$ and $\theta=0$, then $N$ becomes a holomorphic submanifold. We denote hemi-slant submanifolds by $v_6$. Digraph $D_2 = (V,A)$ is an extension of $D_1$, and has five vertices, $V(D_2) = \{v_1,v_2,v_3,v_4,v_6\}$, and seven arcs, $A(D_2)=\{(v_2,v_1), (v_2,v_3), (v_4,v_1), (v_4,v_3), (v_6,v_1), (v_6,v_2), (v_6,v_3)\}$ in Fig. \ref{fig2}. $D_2$ has the maximum length of two as the longest path. It has 2 vertices ($v_4$ and $v_6$) which are not reachable. Topological sort of $D_2$ is $v_6-v_4-v_2-v_3-v_1$. $rad(D_2)=1$, the radius of $D_2$ is $v_2 \to v_1$. $diam(D_2)=1$, the diameter of $D_2$ is the same as the radius. Also, there is no center vertex but three peripheral vertices such as $v_2$, $v_4$ and $v_6$. \begin{figure} \caption{Digraph $D_2$ built by submanifolds in $D_1$ and the hemi-slant submanifold} \label{fig2} \end{figure} \begin{theorem} \label{teo32} For the digraph $D_2$ created by adding the hemi-slant submanifolds as vertex $v_6$ to the $D_1$, \begin{enumerate}[i] \item $D_2$ is a three-partite digraph. \item The maximum matching is 2. \item The independence number is 2. \item The chromatic number is 3. \item $D_2$ has no directed spanning tree. \item The domination number is 2. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item There exists a partition $V_1=\{v_1, v_3\}$, $V_2=\{v_2\}$ and $V_3=\{v_4,v_6\}$ of $V(D_2)$. These three subsets are three partite sets because of following attributes: $V(D_2) = \bigcup\limits_{i=1}^{3} V_{i}$ and $V_i \cap V_j = \O$ ($i,j=1,2,3$ and $i \neq j$). Then, $D_2$ is a three-partite digraph. \item There is an arc subset $M=\{(v_6,v_1),(v_4,v_3)\}$ in $D_2$, and $|M| = 2$. In $M$, there is no common vertices and loops, that is $M$ is a matching. Also, there is no arc subset having greater cardinality than $M$. Therefore, $M$ is maximum matching in $D_2$. \item The maximum independent set and independence number of $D_2$ is the same as $D_1$. See Theorem \ref{teo31}-iii. \item The minimum number of disjoint independent sets of $D_2$ is three: $V_1=\{v_1, v_3\}$, $V_2=\{v_2\}$ and $V_3=\{v_4,v_6\}$. Then, chromatic number of $D_2$ is three. \item No root vertex that contains a directed path from the root to any other vertex in $V(D_2)$. Then, $D_2$ has no directed spanning tree. \item There is a subset $\widetilde{V}=\{v_4, v_6\} \subset V(D_2)$. Considering this subset, that including the minimum cardinality of vertices in $D_2$ as a dominating set, for each vertex $v \in \widetilde{V}$ and $u \in V(D_2)-\widetilde{V}$, (v,u) is an arc in $D_2$. Clearly, the domination number is two. \end{enumerate} \end{proof} \begin{corollary}\label{cor32} In the submanifold network represented by $D_2$ in Fig. \ref{fig2}, the submanifolds, slant $(v_4)$ and hemi-slant $(v_6)$, cannot be derived by the other submanifolds, because $d^{-}(v_4) = d^{-}(v_6) = 0$ in $D_2$. Also, holomorphic $(v_1)$ and anti-invariant $(v_3)$ submanifolds can be derived separately by CR $(v_2)$, slant and hemi-slant since $N^{-}_{D_2}(v_1)=N^{-}_{D_2}(v_3)=\{ v_2, v_4, v_6 \}$. \end{corollary} We now recall the notion of semi-slant submanifolds of an almost Hermitian manifold. Let $M$ be an almost Hermitian manifold and $N$ a real submanifold of $M$. Then we say that $N$ is a semi-slant submanifold \cite{papaghiuc1994} if there exist two orthogonal distributions $\mathcal{D}$ and $\mathcal{D}^\theta$ on $N$ such that \begin{enumerate} \item $TN$ admits the orthogonal direct decomposition $TN = \mathcal{D} \oplus \mathcal{D}^\theta$. \item The distribution $\mathcal{D}$ is an invariant distribution, i.e., $J(\mathcal{D})=\mathcal{D}$. \item The distribution $\mathcal{D}^\theta$ is slant with slant angle $\theta$. \end{enumerate} It is easy to see that if $\mathcal{D}=\{0\}$, $ N$ becomes a slant submanifold with a slant angle $\theta$. If $\mathcal{D}^\theta=\{0\}$, then $ N$ becomes a holomorphic submanifold. Moreover if $\theta=\frac{\pi}{2}$, then $ N$ becomes a CR-submanifold. Furthermore, if $\mathcal{D}=\{0\}$ and $\theta=\frac{\pi}{2}$, then $ N$ becomes an anti-invariant submanifold. We denote semi-slant submanifolds by $v_5$. Digraph $D_3 = (V,A)$ is another extension of $D_1$, and has five vertices, $V(D_3) = \{v_1,v_2,v_3,v_4,v_5\}$, and seven arcs, $A(D_3)=\{(v_2,v_1),(v_2,v_3),(v_4,v_1),(v_4,v_3),(v_5,v_2),(v_5,v_3),(v_5,v_4)\}$ in Fig. \ref{fig3}. $D_3$ has the maximum length of two as the longest path. It has a vertex ($v_5$) which is not reachable. Using transitive closure, $D_3$ has only one new direct connection such as $v_5 \to v_1$. Topological sort of $D_3$ is $v_5-v_4-v_2-v_3-v_1$. $rad(D_3)=1$, the radius of $D_3$ is $v_2 \to v_1$. $diam(D_3)=2$, the diameter of $D_3$ is $v_5 \to v_2 \to v_1$. Also, in $D_3$, there are two center vertices as $v_2$ and $v_4$, and one peripheral vertex as $v_5$. \begin{figure} \caption{Digraph $D_3$ built by submanifolds in $D_1$ and the semi-slant submanifold} \label{fig3} \end{figure} \begin{theorem} \label{teo33} For the digraph $D_3$ created by adding the semi-slant submanifolds as vertex $v_5$ to the $D_1$, \begin{enumerate}[i] \item $D_3$ is a three-partite digraph. \item The maximum matching is 2. \item The independence number is 2. \item The chromatic number is 3. \item $D_3$ has a directed spanning tree. \item The domination number is 2. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item There exists a partition $V_1=\{v_1,v_3\}$, $V_2=\{v_2, v_4\}$ and $V_3=\{v_5\}$ of $V(D_3)$ as three partite sets in $D_3$, and the subsets provide following properties: $V(D_3) = \bigcup\limits_{i=1}^{3} V_{i}$ and $V_i \cap V_j = \O$ ($i,j=1,2,3$ and $i \neq j$). In that case, $D_3$ is a three-partite digraph. \item There is an arc subset $M=\{(v_2,v_1),(v_4,v_3)\}$ in $D_3$, and $|M| = 2$. Because of no common vertices and no loops in $M$, $M$ is a matching. Furthermore, $M$ has the maximum cardinality so that $M$ is the maximum matching in $D_3$. \item The maximum independent set and independence number of $D_3$ is the same as $D_1$. See Theorem \ref{teo31}-iii. \item The minimum number of disjoint independent sets of $D_3$ is three: $V_1=\{v_1, v_3\}$, $V_2=\{v_2, v_4\}$ and $V_3=\{v_5\}$. It follows that the chromatic number of $D_3$ is three. \item $D_3$ has a unique directed spanning tree of length 4 and rooted at $v_5$ such as in Fig. \ref{fig4}. It also means that there is a transformation from submanifolds $v_5$ to all other submanifolds in $D_3$. \begin{figure} \caption{Directed spanning tree in $D_3$} \label{fig4} \end{figure} \item There is a subset $\widetilde{V}=\{v_4, v_5\} \subset V(D_3)$. According to this subset, that having the minimum cardinality, and for each vertex $v \in \widetilde{V}$ and $u \in V(D_3)-\widetilde{V}$, (v,u) is an arc in $D_3$, the domination number is two. \end{enumerate} \end{proof} \begin{corollary}\label{cor33} In the submanifold network represented by $D_3$ in Fig. \ref{fig3}, while no submanifolds can be transformed to semi-slant $(v_5)$ submanifold since $N^{-}_{D_3}(v_5)=\emptyset$, all other submanifolds (holomorphic $(v_1)$, CR $(v_2)$, anti-invariant $(v_3)$ and slant $(v_4)$) can be obtained from semi-slant submanifold because of existence of a directed spanning tree with a root vertex $v_5$ (Fig. \ref{fig4}). \end{corollary} Digraph $D_4 = (V,A)$ has six vertices, $V(D_4) = \{v_1,v_2,v_3,v_4,v_5,v_6\}$, and 10 arcs, $A(D_4)=\{(v_2,v_1),(v_2,v_3),(v_4,v_1),(v_4,v_3),(v_5,v_2),(v_5,v_3),(v_5,v_4),(v_6,v_1),(v_6,v_2),(v_6,v_3)\}$ in Fig. \ref{fig5}. $D_4$ has the maximum length of two as the longest path. It has 2 vertices ($v_5$ and $v_6$) which are not reachable. Using transitive closure, $D_4$ has only one new direct connection such as $v_5 \to v_1$. The topological sort of $D_4$ is $v_6-v_5-v_4-v_2-v_3-v_1$. $rad(D_4)=1$, the radius of $D_4$ is $v_2 \to v_1$. $diam(D_4)=2$, the diameter of $D_4$ is $v_5 \to v_2 \to v_1$. Also, in $D_4$, there are three center vertices as $v_2$, $v_4$ and $v_6$, and one peripheral vertex as $v_5$. \begin{figure} \caption{Digraph $D_4$ built by submanifolds in $D_3$ and the hemi-slant submanifold} \label{fig5} \end{figure} \begin{theorem} \label{teo34} For the digraph $D_4$ created by adding the hemi-slant submanifolds as vertex $v_6$ to the $D_3$, \begin{enumerate}[i] \item $D_4$ is a three-partite digraph. \item $D_4$ has a perfect matching. \item The independence number is 2. \item The chromatic number is 3. \item $D_4$ has no directed spanning tree. \item The domination number is 2. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item There exists a partition $V_1=\{v_1,v_3\}$, $V_2=\{v_2, v_4\}$ and $V_3=\{v_5, v_6\}$ of $V(D_4)$ as three subsets, and these subsets provide that $V(D_4) = \bigcup\limits_{i=1}^{3} V_{i}$ and $V_i \cap V_j = \O$ ($i,j=1,2,3$ and $i \neq j$). Under these conditions, $D_4$ is a three-partite digraph. \item There is an arc subset $M=\{(v_2,v_1),(v_5,v_4),(v_6,v_3)\}$ in $D_4$, and $|M| = 3$. On conditions that no common vertices and no loops in $M$ and $|M|=\frac{|A(D_4)|}{2}$, $M$ is perfect matching that's why $D_4$ has a matching also perfect. \item The maximum independent set and the independence number of $D_4$ is the same as $D_1$. See Theorem \ref{teo31}-iii. \item The minimum number of disjoint independent sets of $D_4$ is three: $V_1=\{v_1, v_3\}$, $V_2=\{v_2, v_4\}$ and $V_3=\{v_5, v_6\}$. Then, the chromatic number of $D_4$ is three. \item No root vertex that contains a directed path from the root to any other vertex in $V(D_4)$. Then, $D_4$ has no directed spanning tree. \item There is a subset $\widetilde{V}=\{v_5, v_6\} \subset V(D_4)$. According to this subset, that having the minimum cardinality, and for each vertex $v \in \widetilde{V}$ and $u \in V(D_4)-\widetilde{V}$, (v,u) is an arc in $D_4$ so that the domination number is two. \end{enumerate} \end{proof} \begin{corollary}\label{cor34} In the submanifold network represented by $D_4$ in Fig. \ref{fig5}, semi-slant $(v_5)$ and hemi-slant $(v_6)$ submanifolds cannot be obtained by any other submanifolds because $d^{-}(v_5) = d^{-}(v_6) = 0$. Besides, no submanifolds can be derived from holomorphic $(v_1)$ and anti-invariant $(v_3)$ submanifolds since $N^{-}_{D_4}(v_1) = N^{-}_{D_4}(v_3) = \emptyset$. \end{corollary} We now recall the notion of bi-slant submanifolds of an almost Hermitian manifold. Let $M$ be an almost Hermitian manifold and $ N$ a real submanifold of $M$. Then we say that $ N$ is a bi-slant submanifold \cite{carriazoalfonso2000} if there exist two orthogonal distributions $\mathcal{D}^{\theta_1}$ and $\mathcal{D}^{\theta_2}$ on $ N$ such that \begin{enumerate} \item $TN$ admits the orthogonal direct decomposition $TN=\mathcal{D}^{\theta_1} \oplus \mathcal{D}^{\theta_2}$. \item The distributions $\mathcal{D}^{\theta_1}$ and $\mathcal{D}^{\theta_2}$ are slant distributions with slant angles $\theta_1$ and $\theta_2$. \end{enumerate} It is easy to see that if $\mathcal{D}^{\theta_1}=\{0\}$ (or $\mathcal{D}^{\theta_2}=\{0\}$), $ N$ becomes a slant submanifold with a slant angle $\theta_1$. If $\theta=\theta_1=\theta_2=\{0\}$, then $ N$ becomes a holomorphic submanifold. If $\theta=\theta_1=\theta_2=\frac{\pi}{2}$, then $ N$ becomes an anti-invariant submanifold. Moreover if $\theta_1=\frac{\pi}{2}$ and $\theta_2=0$, then $ N$ becomes a CR-submanifold. Furthermore, $\theta_1=\frac{\pi}{2}$ and $\theta_1=0$, then $ N$ becomes a hemi-slant submanifold and semi-slant submanifold, respectively. We denote bi-slant submanifolds by $v_7$. Digraph $D_5 = (V,A)$ has seven vertices, $V(D_5) = \{v_1,v_2,v_3,v_4,v_5,v_6,v_7\}$, and 12 arcs, $A(D_4)=\{(v_2,v_1),(v_2,v_3),(v_4,v_1),(v_4,v_3),(v_5,v_2),(v_5,v_3),\\(v_5,v_4),(v_6,v_1),(v_6,v_2),(v_6,v_3),(v_7,v_5),(v_7,v_6)\}$ in Fig. \ref{fig6}. $D_5$ has the maximum length of three as the longest path. It has a vertex ($v_7$) which is not reachable. Using transitive closure, $D_5$ has five new direct connections such as $v_5 \to v_1$, $v_7 \to v_1$, $v_7 \to v_2$, $v_7 \to v_3$ and $v_7 \to v_4$. Topological sort of $D_5$ is $v_7-v_6-v_5-v_4-v_2-v_3-v_1$. $rad(D_5)=1$, the radius of $D_5$ is $v_2 \to v_1$. $diam(D_5)=2$, the diameter of $D_5$ is $v_5 \to v_2 \to v_1$. Also, in $D_5$, there are three center vertices as $v_2$, $v_4$ and $v_6$, and two peripheral vertices as $v_5$ and $v_7$. \begin{figure} \caption{Digraph $D_5$ built by submanifolds in $D_4$ and the bi-slant submanifold} \label{fig6} \end{figure} \begin{theorem} \label{teo35} For the digraph $D_5$ created by adding the bi-slant submanifolds as vertex $v_7$ to the $D_4$, \begin{enumerate}[i] \item $D_5$ is a three-partite digraph. \item The maximum matching is 3. \item The independence number is 3. \item The chromatic number is 3. \item $D_5$ has a directed spanning tree. \item The domination number is 3. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item There is a partition $V_1=\{v_1,v_3\}$, $V_2=\{v_2, v_4, v_7\}$ and $V_3=\{v_5, v_6\}$ of $V(D_5)$ as three subsets, and these subsets support that $V(D_4) = \bigcup\limits_{i=1}^{3} V_{i}$ and $V_i \cap V_j = \O$ ($i,j=1,2,3$ and $i \neq j$). Then, $D_5$, containing the subsets, is actually a three-partite digraph. \item $M=\{(v_2,v_1),(v_5,v_4),(v_6,v_3)\}$ is an arc subset in $D_5$, and $|M| = 3$. According to this, $M$, that includes no common vertices and no loops, is a matching. Since no other subset greater cardinality than $M$, $D_5$ has a maximum matching called $M$. \item The subset $\widetilde{V}=\{v_2, v_4, v_7\}$ is an independent set having maximum cardinality. It also means that there is no direct relationship between any two elements, i.e. submanifolds, in $\widetilde{V}$. Then, the independence number of $D_5$ is three, because $|\widetilde{V}|=3$. \item The minimum number of disjoint independent sets of $D_5$ is three: $V_1=\{v_1, v_3\}$, $V_2=\{v_2, v_4, v_7\}$ and $V_3=\{v_5, v_6\}$. According to that, three different colors are needed to coloring $D_5$ and that's why the chromatic number of $D_5$ is three. \item $D_5$ has a directed spanning tree of length 6 and root at $v_7$ such as in Fig. \ref{fig7}. It also means that there is a transformation from submanifolds $v_7$ to all other submanifolds in $D_5$ at most two-step. \begin{figure} \caption{A directed spanning tree in $D_5$} \label{fig7} \end{figure} \item There is a subset $\widetilde{V}=\{v_5, v_6, v_7\} \subset V(D_5)$. According to this subset, that having the minimum cardinality, and for each vertex $v \in \widetilde{V}$ and $u \in V(D_5)-\widetilde{V}$, $(v,u)$ is an arc in $D_5$. The domination number is three. \end{enumerate} \end{proof} \begin{corollary}\label{cor35} In the submanifold network represented by $D_5$ in Fig. \ref{fig6}, all other submanifolds can be derivated from bi-slant $(v_7)$ submanifold since $v_7$ is the root vertex of the directed spanning tree of $D_5$ and $N^{+}_{D_5}(v_7) = \{v_5, v_6\}$ in Fig. \ref{fig7}. Also, no submanifolds can be transformed to bi-slant because $N^{-}_{D_5}(v_7) = \emptyset$. \end{corollary} Digraph $D_6 = (V,A)$ has also seven vertices as well as $D_5$, $V(D_6) = \{v_1,v_2,v_3,v_4,v_5,v_6,v_7\}$, and 12 arcs, $A(D_6)=\{(v_2,v_1),(v_2,v_3),(v_4,v_1),(v_4,v_3),(v_5,v_1),(v_5,v_2),(v_5,v_3),(v_5,v_4),(v_6,v_1),\\(v_6,v_2),(v_6,v_3),(v_6,v_4),(v_7,v_5),(v_7,v_6)\}$ in Fig. \ref{fig8}. $D_6$ has the maximum length of three as the longest path. It has a vertex ($v_7$) which is not reachable. Using transitive closure, $D_6$ has four new direct connections such as $v_7 \to v_1$, $v_7 \to v_2$, $v_7 \to v_3$ and $v_7 \to v_4$. Topological sort of $D_6$ is $v_7-v_6-v_5-v_4-v_2-v_3-v_1$. $rad(D_6)=1$, the radius of $D_6$ is $v_2 \to v_1$. $diam(D_6)=2$, the diameter of $D_6$ is $v_7 \to v_5 \to v_1$. Also, in $D_6$, there are four center vertices as $v_2$, $v_4$, $v_5$ and $v_6$, and one peripheral vertex as $v_7$. \begin{figure} \caption{Digraph $D_6$ built by $D_5$ with arcs $(v_5,v_1)$ and $(v_6,v_4)$} \label{fig8} \end{figure} \begin{theorem} \label{teo36} For the digraph $D_6$ created by adding two more relations from semi-slant to holomorphic and from hemi-slant to slant as arcs to the $D_5$, \begin{enumerate}[i] \item $D_6$ is a three-partite digraph. \item The maximum matching is 3. \item The independence number is 3. \item The chromatic number is 3. \item $D_6$ contains a directed spanning tree. \item The domination number is 2. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item See Teorem \ref{teo35}-i. \item See Teorem \ref{teo35}-ii. \item See Teorem \ref{teo35}-iii. \item See Teorem \ref{teo35}-iv. \item $D_6$ has a directed spanning tree having the same structure as in Fig. \ref{fig7}. See Theorem \ref{teo35}-v. \item There is a subset $\widetilde{V}=\{v_5, v_7\} \subset V(D_6)$. According to that, the subset has the minimum cardinality while dominating all other vertices, and for each vertex $v \in \widetilde{V}$ and $u \in V(D_6)-\widetilde{V}$, $(v,u)$ is an arc in $D_6$. The domination number is two. \end{enumerate} \end{proof} \begin{corollary}\label{cor36} In the most comprehensive submanifold network represented by $D_6$ in Fig. \ref{fig7}, just two submanifolds, holomorphic $(v_1)$ and anti-invariant $(v_3)$, are not generative since $d^{+}(v_1) = d^{+}(v_3) = 0$. Besides, bi-slant $(v_7)$ is the most productive submanifold owing to transforming to all other submanifolds. \end{corollary} Using the seven submanifolds, named as holomorphic, CR, anti-invariant, slant hemi-slant, semi-slant and bi-slant, it is constructed six digraphs, called $D_1, D_2, D_3, D_4, D_5$ and $D_6$, whose vertices are submanifolds and arcs are connections among submanifolds from one to another. \begin{theorem} \label{teo37} Let $D$ be a digraph which indicates digraphs from $\{D_1, D_2, D_3, D_4, D_5, D_6\}$. $D$ provides the following properties: \begin{enumerate}[i] \item Simple directed graph. \item Directed acyclic graph. \item Weakly connected. \end{enumerate} \end{theorem} \begin{proof} \begin{enumerate}[i] \item In digraph $D$, there is no more than one relationship between any two submanifolds and no transformations from a submanifold to itself. According to that, $D$ is a simple directed graph. \item Given a transition list among submanifolds such as $v_1v_2...v_k$, meaning that $v_1$ is the source submanifold and $v_k$ is the sink submanifold. Because $D$ doesn't have any transition list including the same submanifold is both source and also sink, $D$ is acyclic. That's why $D$ is a directed acyclic digraph. \item $D$ has one pair of submanifolds as a relation at least that they can not mutually be transformed from one to another submanifold. Hence, $D$ is not strongly connected. However, when $D$, that considered as without direction of transformations, is connected, named connectedness of underlying graph because there are no isolated submanifolds. For this reason, $D$ is weakly connected. \end{enumerate} \end{proof} \begin{corollary} \label{cor38} Among all digraphs $D_1$, $D_2$, $D_3$, $D_4$, $D_5$, and $D_6$, the digraph $D_6$ has \begin{enumerate} \item[•] the maximum vertex-integrity, and \item[•] the maximum edge-integrity \end{enumerate} as well as the maximum size by Proposition \ref{prop.integrity}. \end{corollary} \section{Conclusion} Manifold learning plays an important role in analyzing data lying on a non-linear space as a part of machine learning. Moreover, the geometric deep learning yields using the concepts of manifolds and graphs together in building convolutional deep structures. In this paper, using holomorphic submanifolds, anti-invariant submanifolds, CR-submanifolds, slant submanifolds, semi-slant submanifolds, hemi-slant submanifolds and bi-slant submanifolds in almost Hermitian manifolds, it is given relations among them, six different digraphs are created as a network of these submanifolds, and main properties of them are first examined in terms of digraphs. We note that there is a much wider class that includes slant submanifolds. This class was first defined in \cite{Etayo} by Etayo as quasi-slant submanifolds. Later, these submanifolds were called pointwise slant submanifolds in \cite{Chen-Garay} by Chen and Garay. Although we have excluded such submanifolds in this article, our next research will be to examine the connections between these submanifolds and graph theory. \end{document}
\betaegin{document} \title{On the structure of the commutator subgroup of certain homeomorphism groups} \betaegin{abstract} An important theorem of Ling states that if $G$ is any factorizable non-fixing group of homeomorphisms of a paracompact space then its commutator subgroup $[G,G]$ is perfect. This paper is devoted to further studies on the algebraic structure (e.g. uniform perfectness, uniform simplicity) of $[G,G]$ and $[\timesilde G,\timesilde G]$, where $\timesilde G$ is the universal covering group of $G$. In particular, we prove that if $G$ is bounded factorizable non-fixing group of homeomorphisms then $[G,G]$ is uniformly perfect (Corollary 3.4). The case of open manifolds is also investigated. Examples of homeomorphism groups illustrating the results are given. \end{abstract} \subsetection{Introduction} Given groups $G$ and $H$, by $G\leq H$ (resp. $G\lhd H$) we denote that $G$ is a subgroup (resp. normal subgroup) of $H$. Throughout by $\mathcal{H}(X)$ we denote the group of all homeomorphism of a topological space $X$. Let $U$ be an open subset of $X$ and let $G$ be a subgroup of $\mathcal{H}(X)$. The symbol $\mathcal{H}_U(X)$ (resp. $G_U$) stands for the subgroup of elements of $\mathcal{H}(X)$ (resp. $G$) with support in $U$. For $g\in \mathcal{H}(X)$ the support of $g$, $\subsetupp(g)$, is the closure of $\{x\in X:\, g(x)\neq x\}$. Let $\mathcal{H}_c(M)$ (resp. $G$) denotes the subgroup of $\mathcal{H}(M)$ (resp. $G$) of all its compactly supported elements. \betaegin{dff}\langlebel{fac} Let $\mathcal{U}$ be an open cover of $X$. A group of homeomorphisms $G$ of a space $X$ is called \emph{$\mathcal{U}$-factorizable} if for every $g\in G$ there are $g_1,\ldots, g_r\in G$ with $g=g_1,\ldots,ots g_r$ and such that $\subsetupp(g_i)\subset U_i$, $i=1,\ldots, r$, for some $U_1,\ldots, U_r\in\mathcal{U}$. $G$ is called \emph{factorizable} if for every open cover $\mathcal{U}$ of $X$ it is $\mathcal{U}$-factorizable. Next $G$ is said to be \wyr{non-fixing} if $G(x)\neq \{ x \}$ for every $x \in X$, where $G(x):= \{ g(x)|g \in G \}$ is the orbit of $G$ at $x$. \end{dff} Given a group $G$, denote by $[f,g]=fgf^{-1}g^{-1}$ the commutator of $f,g\in G$, and by $[G,G]$ the commutator subgroup. Now the theorem of Ling can be formulated as follows. \betaegin{thm} \circte{li}\langlebel{ling} Let $X$ be a paracompact topological space and let $G$ be a factorizable non-fixing group of homeomorphisms of $X$. Then the commutator subgroup $[G,G]$ is perfect, that is $[[G,G],[G,G]]=[G,G]$. \end{thm} Recall that a group $G$ is called \wyr{uniformly perfect} \circte{bip} if $G$ is perfect (i.e. $G=[G,G]$) and there exists a positive integer $r$ such that any element of $G$ can be expressed as a product of at most $r$ commutators of elements of $G$. For $g\in [G,G]$, $g\neq e$, the least $r$ such that $g$ is a product of $r$ commutators is called the \wyr{commutator length} of $g$ and is denoted by $\cl_G(g)$. By definition we put $\cl_G(e)=0$. Throughout we adopt the following notation. Let $M$ be a paracompact manifold of class $C^r$, where $r=0,1,\ldots,\infty$. Then $\mathcal{D}^r(M)$ (resp. $\mathcal{D}^r_c(M)$) denotes the group of all $C^r$-diffeomorphisms of $M$ which can be joined with the identity by a (resp. compactly supported) $C^r$-isotopy. For simplicity by $C^0$-diffeomorphism we mean a homeomorphism. Observe that in view of recent results (Burago, Ivanov and Polterovich \circte{bip}, Tsuboi \circte{Tsu2}) the diffeomorphism groups $\mathcal{D}^{\infty}_c(M)$ are uniformly perfect for most types of manifolds $M$, though some open problems are left. Our first aim is to prove the following generalization of Theorem 1.2. \betaegin{thm} Let $X$ be a paracompact topological space and let $G$ be a factorizable non-fixing group of homeomorphisms of $X$. Assume that $\cl_G$ is bounded on $[G,G]$ and that $G$ is bounded with respect to all fragmentation norms $\varphirag^{\mathcal{U}}$ (c.f. section 2), where $\mathcal{U}$ runs over all open covers of $X$. Then the commutator subgroup $[G,G]$ is uniformly perfect. \end{thm} The proof of Theorem 1.3 and further results concerning the uniform perfectness of $[G,G]$ will be given in section 3. Ling's theorem (Theorem 1.2) constitutes an essential amelioration of the simplicity Epstein theorem \circte{ep} at least in two aspects. First, contrary to \circte{ep}, it provides an algebraic information on nontransitive homeomorphism groups. Second, it enables to strengthen the theorem of Epstein itself. We will recall Epstein's theorem and Ling's improvement of it in section 4. Also in section 4 we formulate conditions which ensure the uniform simplicity of $[G,G]$ (Theorem 4.3). As usual $\timesilde G$ stands for the universal covering group of $G$. In section 5 we will prove the following \betaegin{thm} Suppose that $G\leq\mathcal{H}(X)$ is isotopically factorizable (Def. 5.2) and that $G_0$, the identity component of $G$, is non-fixing. Then the commutator group $[\timesilde G,\timesilde G]$ is perfect. \end{thm} In section 6 we will consider the case of a noncompact manifold $M$ such that $M$ is the interior of a compact manifold $\betaar M$, and groups of homeomorphisms on $M$ with no restriction on support. Consequently such groups are not factorizable in the usual way but only in a wider sense (Def. 6.1). It is surprising that for a large class of homeomorphism or diffeomorphism groups of an open manifold the assertions of Theorems 1.2 and 1.3 still hold (see Theorems 6.9 and 6.10). In the final section we will present some examples and open problems which are of interest in the context of the above results. {\betaf Acknowledgments.} A correspondence with Paul Schweitzer and his recent paper \circte{Sch} were helpful when we were preparing section 6. We would like to thank him very much for his kind help. \subsetection{Conjugation-invariant norms} The notion of the conjugation-invariant norm is a basic tool in studies on the structure of groups. Let $G$ be a group. A \wyr{conjugation-invariant norm} (or \emph{norm} for short) on $G$ is a function $\nu:G\rightarrow[0,\infty)$ which satisfies the following conditions. For any $g,h\in G$ \betaegin{enumerate} \item $\nu(g)>0$ if and only if $g\neq e$; \item $\nu(g^{-1})=\nu(g)$; \item $\nu(gh)\leq\nu(g)+\nu(h)$; \item $\nu(hgh^{-1})=\nu(g)$. \end{enumerate} Recall that a group is called \emph{ bounded} if it is bounded with respect to any bi-invariant metric. It is easily seen that $G$ is bounded if and only if any conjugation-invariant norm on $G$ is bounded. Observe that the commutator length $\cl_G$ is a conjugation-invariant norm on $[G,G]$. In particular, if $G$ is a perfect group then $\cl_G$ is a conjugation-invariant norm on $G$. For any perfect group $G$ denote by $\cld_G$ the commutator length diameter of $G$, i.e. $\cld_G:=\subsetup_{g\in G}\cl_G(g)$. Then $G$ is uniformly perfect iff $\cld_G<\infty$. Assume now that $G\leq\mathcal{H}(X)$ is $\mathcal{U}$-factorizable (Def.1.1), and that $\mathcal{U}$ is a $G$-invariant open cover of $X$. The latter means that $g(U)\in\mathcal{U}$ for all $g\in G$ and $U\in\mathcal{U}$. Then we may introduce the following conjugation-invariant norm $\varphirag^{\mathcal{U}}$ on $G$. Namely, for $g\in G$, $g\neq\id$, we define $\varphirag^{\mathcal{U}}(g)$ to be the least integer $\rightarrowr>0$ such that $g=g_1,\ldots,ots g_{\rightarrowr}$ with $\subsetupp(g_i)\subset U_i$ for some $U_i\in\mathcal{U}$, where $i=1,\ldots, \rightarrowr$. By definition $\varphirag^{\mathcal{U}}(\id)=0$. Define $\varphiragd^{\mathcal{U}}_G:=\subsetup_{g\in G}\varphirag^{\mathcal{U}}(g)$, the diameter of $G$ in $\varphirag^{\mathcal{U}}$. Consequently, $\varphirag^{\mathcal{U}}$ is bounded iff $\varphiragd^{\mathcal{U}}_G<\infty$. Observe that $\varphirag^{\{X\}}$ is the trivial norm on $G$, i.e. equal to 1 for all $g\in G\subsetetminus\{\id\}$. Observe as well that $\varphirag^{\mathcal{V}}\geq\varphirag^{\mathcal{U}}$ provided $\mathcal{V}$ is finer than $\mathcal{U}$. The significance of $\varphirag^{\mathcal{U}}$ consists in the following version of Proposition 1.15 in \circte{bip}. \betaegin{prop} Let $M$ be a $C^r$-manifold, $r=0,1,\ldots,\infty$. Then $\mathcal{D}^r_c(M)$ is bounded if and only if $\mathcal{D}^r_c(M)$ is bounded with respect to $\varphirag^{\mathcal{U}}$, where $\mathcal{U}$ is some cover by embedded open balls. \end{prop} Indeed, it is a consequence of Theorem 1.18 in \circte{bip} stating that for a portable manifold $M$ the group $\mathcal{D}^r_c(M)$ is bounded, and the fact that $\rightarrowz^n$ is portable. \subsetection{Uniform perfectness of $[G,G]$} In Theorems 3.5 and 3.8 below we also need stronger notions than that of non-fixing group (Def. 1.1). \betaegin{dff} Let $\mathcal{U}$ be an open cover of $X$, $G\leq\mathcal{H}(X)$ and let $r\in\mathbb N$. \betaegin{enumerate} \item $G$ is called \emph{$r$-non-fixing} if for any $x\in X$ there are $f_1,\ldots, f_r,g_1,\ldots, g_r\in G$ (possibly $=\id$) such that $([f_r,g_r],\ldots,ots[f_1,g_1])(x)\neq x$. \item $G$ is said to be \emph{$\mathcal{U}$-moving} if for every $U\in\mathcal{U}$ then there is $g\in G$ such that $g(U)\cap U=\emptyset$. \item $G$ is said to be \emph{$r$-$\mathcal{U}$-moving} if for any $U\in \mathcal{U}$ there are $2r$ elements of $G$ (possibly $=\id$), say $f_1,\ldots, f_r, g_1,\ldots, g_r$, such that the sets $U$ and $([f_r,g_r],\ldots,ots[f_1,g_1])(U)$ are disjoint. \item $G$ is said to be \emph{strongly $\mathcal{U}$-moving} if for every $U, V\in\mathcal{U}$ there is $g\in G$ such that $g(U)\cap (U\cup V)=\emptyset$. \item $G$ is called \emph{locally moving} if for any open set $U\subset X$ and $x\in U$ there is $g\in G_U$ such that $g(x)\neq x$. \end{enumerate} \end{dff} Of course, if $G$ is either $r$-non-fixing, or $\mathcal{U}$-moving, or locally moving then it is non-fixing. Likewise, if $G$ is $r$-$\mathcal{U}$-moving then it is $s$-$\mathcal{U}$-moving for $r<s$ and $\mathcal{U}$-moving. Notice that if $\mathcal{V}$ is finer than $\mathcal{U}$ and $G$ is (resp. strongly) $\mathcal{U}$-moving then $G$ is (resp. strongly) $\mathcal{V}$-moving. \betaegin{prop} Let $X$ be paracompact and let $G\leq\mathcal{H}(X)$.\betaegin{enumerate} \item If $G$ is non-fixing and factorizable (Def. 1.1) then $G$ is locally moving. \item If $G$ is locally moving then so is $[G,G]$. \item If $G$ is non-fixing and factorizable then $[G,G]$ is $1$-non-fixing (Def. 3.1(1)). \end{enumerate} \end{prop} \betaegin{proof} (1) Let $x\in U$ and $g\in G$ such that $g(x)=y\neq x$. Choose $\mathcal{U}=\{U_1, U_2\}$, where $x\in U_1\subsetetminus U_2$, $y\in U_2\subsetetminus U_1$, $U_1\subset U$ and $X=U_1\cup U_2$. By assumption we may write $g=g_r,\ldots,ots g_1$, where all $g_i$ are supported in elements of $\mathcal{U}$. Let $s:=\min\{i\in\{1,\ldots, r\}:\; \subsetupp(g_i)\subset U_1 \timesext{\; and\;} g_i(x)\neq x\}$. Then $g_s\in G_U$ satisfies $g_s(x)\neq x$. (2) Let $x\in U$. There is $g\in G_U$ with $g(x)\neq x$. Take an open $V$ such that $x\in V\subset U$ and $g(x)\not\in V$. Choose $f\in G_V$ with $f(x)\neq x$. It follows that $f(g(x))=g(x)\neq g(f(x))$ and, therefore, $[f,g](x)\neq x$. (3) follows from (1) and the proof of (2). \end{proof} The following property of paracompact spaces is well-known. \betaegin{lem}\langlebel{cover} If $X$ is a paracompact space and $\mathcal{U}$ is an open cover of $X$, then there exists an open cover $\mathcal{V}$ star finer than $\mathcal{U}$, that is for all $V\in \mathcal{V}$ there is $U\in\mathcal{U}$ such that $\subsetta^{\mathcal{V}}(V)\subset U$. Here $\subsetta^{\mathcal{V}}(V):=\betaigcup\{V'\in\mathcal{V}:\; V'\cap V\neq\emptyset\}$. In particular, for all $V_1, V_2\in \mathcal{V}$ with $V_1\cap V_2\neq\emptyset$ there is $U\in\mathcal{U}$ such that $V_1\cup V_2\subsetubset U$. \end{lem} If $\mathcal{V}$ and $\mathcal{U}$ are as in Lemma 3.3 then we will write $\mathcal{V}\partialrec\mathcal{U}$. For an open cover $\mathcal{U}$ let $\mathcal{U}^G:=\{g(U):\; g\in G \timesext{\; and \;} U\in\mathcal{U}\}$. \betaigskip \emph{Proof of Theorem 1.3.} In view of Proposition 3.2 and the assumption, for any $x\in X$ there is $f,g\in[G,G]$ such that $[f,g](x)\neq x$. It follows the existence of an open cover $\mathcal{U}$ such that for any $U\in\mathcal{U}$ there are $f,g\in[G,G]$ such that $[f,g](U)\cap U=\emptyset$. Hence we have also that for any $U\in\mathcal{U}^G$ there is $f,g\in[G,G]$ such that $[f,g](U)\cap U=\emptyset$. In fact, if $N\lhd G$ and $U\in\mathcal{U}$ such that $n(U)\cap U=\emptyset$ for some $n\in N$, then for $g\in G$ we get $(\betaar ng)(U)\cap g(U)=\emptyset$, where $\betaar n=gng^{-1}\in N$. Due to Lemma 3.3 we can find $\mathcal{V}$ such that $\mathcal{V}\partialrec\mathcal{U}$. We denote \betaegin{equation*}G^{\mathcal{U}}=\partialrod\limits_{U\in \mathcal{U}^G}[G_U,G_U].\end{equation*} Assume that $G$ is $\mathcal{V}$-factorizable and $\varphiragd^{\mathcal{V}}_G= \rightarrowr$. First we show that $[G,G]\subset G^{\mathcal{U}}$ and that any $[g_1,g_2]\in[G,G]$ can be expressed as a product of at most $\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U$. In fact, it is an immediate consequence of the following commutator formulae for all $f,g,h\in G$ \betaegin{equation} [fg,h]=f[g,h]f^{-1}[f,h],\quad [f,gh]=[f,g]g[f,h]g^{-1}, \end{equation} and the fact that $\mathcal{V}\partialrec\mathcal{U}$. Now if $\cld_G=d$, then every element of $[G,G]$ is a product of at most $d\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U$. Next, fix arbitrarily $U\in \mathcal{U}$. We have to show that for every $f,g\in G_U$ the bracket $[f,g]$ can be represented as a product of four commutators of elements of $[G,G]$. By assumption on $\mathcal{U}^G$, there are $h_1,h_2\in [G,G]$ such that $h(U)\cap U=\emptyset$, where $h=[h_1,h_2]$. It follows that $[hfh^{-1}, g]=\id$. Therefore, $[[h,f],g]=[f,g]$. Observe that indeed $[[h,f],g]$ is a product of four commutators of elements of $[G,G]$. Thus any element of $[G,G]$ is a product of at most $4d\rightarrowr^2$ commutators of elements of $[G,G]$. \quad $\subsetquare$ \betaegin{cor} Let $X$ be a paracompact space and let $G\leq\mathcal{H}(X)$ be a bounded, factorizable and non-fixing group. Then the commutator subgroup $[G,G]$ is uniformly perfect. \end{cor} \betaegin{proof} The only thing we need is that $\cl_G$ should be bounded (on $[G,G]$), and this fact is a consequence of Proposition 1.4 in \circte{bip}. \end{proof} A more refined version of Theorem 1.3 is the following \betaegin{thm} Let $X$ be a paracompact topological space, let $G\leq\mathcal{H}(X)$ with $\cl_G$ bounded (as the norm on $[G,G]$) and let $\mathcal{U}$ be a $G$-invariant open cover of $X$ such that \betaegin{enumerate} \item $G$ is strongly $\mathcal{U}$-moving (Def. 3.1(4)), and \item there is an open cover $\mathcal{V}$ satisfying $\mathcal{V}\partialrec\mathcal{U}$ such that $G$ is $\mathcal{V}$-factorizable and $G$ is bounded with respect to the fragmentation norm $\varphirag^{\mathcal{V}}$.\end{enumerate} Then the commutator subgroup $[G,G]$ is uniformly perfect. Furthermore, if $\varphiragd^{\mathcal{V}}_G= \rightarrowr$ and $\cld_G=d$ then $\cld_{[G,G]}\leq d\rightarrowr^2$. \end{thm} \betaegin{proof} Let $\mathcal{U}$ and $\mathcal{V}$ satisfy the assumption. We denote $$G^{\mathcal{U}}=\partialrod\limits_{U\in \mathcal{U}}[G_U,G_U].$$ As in the proof of 1.3, first we show, due to (3.1) and $G$-invariance of $\mathcal{U}$, that $[G,G]\subset G^{\mathcal{U}}$ and that any $[f,g]\in[G,G]$ can be written as a product of at most $\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U$. This implies that every element of $[G,G]$ is a product of at most $d\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U$. For $U\in \mathcal{U}$ we will show that for every $f,g\in G_U$ the bracket $[f,g]$ is a commutator of two elements of $[G,G]$. By assumption and Def. 3.2(4), there is $h\in G$ such that $h(U)\cap U=\emptyset$. It follows that $[hfh^{-1}, g]=\id$. Next, for $U, h(U)\in\mathcal{U}$ there is $k\in G$ such that $k(U)\cap(U\cup h(U))=\emptyset$. Consequently, $[f, kgk^{-1}]=\id$ and $[hfh^{-1}, kgk^{-1}]=\id$. Therefore, in view of (3.1), $[f,g]=[[f,h],[g,k]]$, that is $[f,g]$ is a commutator of elements of $[G,G]$. Thus $[G,G]$ is uniformly perfect and $\cld_{[G,G]}\leq d\rightarrowr^2$, as required. \end{proof} From the proof of Theorem 3.5 we get \betaegin{cor} If $\mathcal{U}$ is a $G$-invariant open cover of $X$ such that $G$ is strongly $\mathcal{U}$-moving and $\mathcal{V}$-factorizable for some open cover $\mathcal{V}$ satisfying $\mathcal{V}\partialrec\mathcal{U}$ then $[G,G]$ is perfect. \end{cor} \betaegin{prop} (1) Let $G$ be $\mathcal{U}$-moving. Assume that $\mathcal{V}$ is a $G$-invariant open cover such that $\mathcal{V}\partialrec\mathcal{U}$, $G$ is $\mathcal{V}$-factorizable and $\varphiragd^{\mathcal{V}}_G=\rightarrowr$. Then $G$ is $\rightarrowr$-$\mathcal{V}$-moving. (2) Let $\mathcal{U}$, $\mathcal{V}$, $\mathcal{W}$ and $\mathcal{T}$ be such that $\mathcal{T}\partialrec\mathcal{W}\partialrec\mathcal{V}\partialrec\mathcal{U}$, and $\mathcal{V}$, $\mathcal{W}$ and $\mathcal{T}$ are $G$-invariant. If $G$ is $\mathcal{U}$-moving and $\mathcal{T}$-factorizable with $\varphiragd^{\mathcal{T}}_G=\rightarrowr$, then $[G,G]$ is $\rightarrowr^2$-$\mathcal{W}$-moving. \end{prop} \betaegin{proof} (1) Suppose that $\mathcal{V}\partialrec\mathcal{U}$ and let $V\in\mathcal{V}$. Then there is $g\in G$ such that $g(V)\cap V=\emptyset$. By assumption there exist $V_1,\ldots, V_{\rightarrowr}\in\mathcal{V}$ and $g_1,\ldots, g_{\rightarrowr}\in G$ such that $g=g_r,\ldots,ots g_1$ and $\subsetupp(g_i)\subset V_i$, $i=1,\ldots, \rightarrowr$ (possibly $g_i=\id$). Let us consider two cases: $(a)$ $g_1(V)\cap V=\emptyset$ and $(b)$ $g_1(V)\cap V\neq\emptyset$. In case $(a)$ we have $ g_1(V)\cup V\subset\subsetupp(g_1)\subset U\in\mathcal{U}$. Choose $f_1\in G$ such that $f_1(U)\cap U=\emptyset$. Then $[g_1,f_1](V)=g_1(V)$ and we are done. In case $(b)$ $V\cup g_1(V)\subset U_1\in\mathcal{U}$ such that $f_1(U_1)\cap U_1=\emptyset$ for some $f_1\in G$. Again $[g_1,f_1](V)=g_1(V)$. Now we continue as before. In case $(g_2g_1)(V)\cap g_1(V)=\emptyset$ we get $V\cap\betaar g_2(V)=\emptyset$, where $\betaar g_2=g_1^{-1}g_2^{-1}g_1$, and we are done as in $(a)$. Otherwise, $(g_2g_1)(V)\cup g_1(V)\subset U_2\in\mathcal{U}$ such that $f_2(U_2)\cap U_2=\emptyset$ for some $f_2\in G$. Therefore, $[g_2,f_2](g_1(V))=(g_2g_1)(V)$. Proceeding by induction we get $$ ([g_{\rightarrowr},f_{\rightarrowr}],\ldots,ots [g_1,f_1])(V)=(g_{\rightarrowr},\ldots,ots g_1)(V)=g(V),$$ and the claim follows. (2) It follows from the hypotheses that $G$ is $\mathcal{V}$-factorizable and $\varphiragd^{\mathcal{V}}_G\leq \rightarrowr$. Moreover, as in the proof of Theorem 1.3 we get that $[G,G]$ is $\mathcal{W}$-factorizable and $\varphiragd^{\mathcal{W}}_{[G,G]}\leq \rightarrowr^2$. Hence by (1) $G$ is $\rightarrowr$-$\mathcal{V}$-moving. In particular $[G,G]$ is $\mathcal{V}$-moving. Then again (1) implies that $[G,G]$ is $\rightarrowr^2$-$\mathcal{W}$-moving. \end{proof} In the following version of Theorem 1.3 we avoid the assumption that $G$ is strongly $\mathcal{U}$-moving. \betaegin{thm} Let $X$ be a paracompact topological space, let $G\leq\mathcal{H}(X)$ with $\cl_G$ bounded, and let $\mathcal{U}$ be an open cover of $X$ such that \betaegin{enumerate} \item $G$ is $\mathcal{U}$-moving, and \item there are $G$-invariant open covers $\mathcal{V}$, $\mathcal{W}$, and $\mathcal{T}$ fulfilling the relation $\mathcal{T}\partialrec\mathcal{W}\partialrec\mathcal{V}\partialrec\mathcal{U}$, and such that $G$ is $\mathcal{T}$-factorizable and it is bounded with respect to $\varphirag^{\mathcal{T}}$.\end{enumerate} Then $[G,G]$ is uniformly perfect and $\cld_{[G,G]}\leq 4d\rightarrowr^4$ provided $\varphiragd^{\mathcal{T}}_G= \rightarrowr$ and $\cld_G=d$. \end{thm} \betaegin{proof} Let $\varphiragd_G^{\mathcal{T}}=\rightarrowr$. Then a fortiori $\varphiragd_G^{\mathcal{W}}\leq \rightarrowr$. In view of Proposition 3.7, $[G,G]$ is $\rightarrowr^2$-$\mathcal{W}$-moving. Let $[f,g]\in[G,G]$. By applying for $\mathcal{T}\partialrec\mathcal{W}$ the same reasoning as in the proof of Theorem 1.3 for $\mathcal{V}\partialrec\mathcal{U}$, $[f,g]$ can be written as a product of at most $\rightarrowr^2$ elements from $G^{\mathcal{W}}=\partialrod_{W\in\mathcal{W}}[G_W,G_W]$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_W$ for some $W\in \mathcal{W}$. Consequently, every element of $[G,G]$ can be expressed as a product of at most $d\rightarrowr^2$ elements of $G^{\mathcal{W}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_W$ for some $W\in\mathcal{W}$. Now take arbitrarily $W\in\mathcal{W}$ and $f,g\in G_W$. Since $[G,G]$ is $\rightarrowr^2$-$\mathcal{W}$-moving, there are $h_1,\ldots, h_{\rightarrowr^2},h'_1,\ldots, h'_{\rightarrowr^2}\in [G,G]$ such that for $h=[h_1,h'_1],\ldots,ots [h_{\rightarrowr^2},h'_{\rightarrowr^2}]$ we have $h(W)\cap W=\emptyset$ and, consequently, $[[h,f],g]=[f,g]$. It is easily seen that $[[h,f],g]$ is a product of $4\rightarrowr^2$ commutators of elements of $[G,G]$. Thus any element of $[G,G]$ is a product of at most $4d\rightarrowr^4$ commutators of elements of $[G,G]$. \end{proof} As a consequence of the above proof we have \betaegin{cor} If $G$ is $\mathcal{U}$-moving and $\mathcal{T}$-factorizable for some $G$-invariant open covers $\mathcal{V}$, $\mathcal{W}$, and $\mathcal{T}$ such that $\mathcal{T}\partialrec\mathcal{W}\partialrec\mathcal{V}\partialrec\mathcal{U}$, then $[G,G]$ is perfect. \end{cor} \subsetection{Simplicity and uniform simplicity of $[G,G]$} Let us recall Epstein's theorem. \betaegin{thm}\circte{ep}\langlebel{eps} Let $X$ be a paracompact space, let $G$ be a group of~homeomorphisms of $X$ and let $\,\mathcal{B}$ be a basis of open sets of $X$ satisfying the following axioms:\\ \noindent{Axiom 1.} If $U\in \mathcal{B}$ and $g\in G$, then $g(U)\in \mathcal{B}$.\\ \noindent{Axiom 2.} $G$ acts transitively on $\mathcal{B}$ (i.e. $\varphiorall\, U,V \in \mathcal{B} \; \exists\, g \in G : g(U)=V$).\\ \noindent{Axiom 3.} Let $g\in G$, $U\in \mathcal{B}$ and let $\mathcal{U}\subsetubset \mathcal{B}$ be a cover of $X$. Then there exist an integer $n$, elements $g_1,\dots ,g_n\in G$ and $V_1,\dots ,V_n\in \mathcal{U}$ such that $g=g_ng_{n-1}\dots g_1$, $\subsetupp (g_i)\subsetubset V_i$ and $$\subsetupp (g_i)\cup (g_{i-1}\dots g_1(\overline{U}))\neq X\; \timesext{for}\; 1\leqslant i\leqslant n.$$ Then $[G,G]$, the commutator subgroup of $G$, is simple. \end{thm} It is worth noting that Theorem 4.1 was an indispensable ingredient in the proofs of celebrated simplicity theorems on diffeomorphism groups and their generalizations (c.f. \circte{Thu}, \circte{Mat}, \circte{ban1}, \circte{ban2}, \circte{ha-ry}, \circte{ry1}). We say that $G\leq\mathcal{H}(X)$ acts \emph{transitively inclusively} (c.f. \circte{li}) on a topological basis $\mathcal{B}$ if for all $U,V\in\mathcal{B}$ there is $g\in G$ such that $g(U)\subset V$. It is not difficult to derive from Theorem 1.2 the following amelioration of Theorem 4.1, see \circte{li}. \betaegin{thm}\circte{li} Let $X$ be a paracompact space, let $G\leq \mathcal{H}(X)$ and let $\,\mathcal{B}$ be a basis of open sets of $X$ satisfying the following axioms:\\ \noindent{Axiom 1.} $G$ acts transitively inclusively on $\mathcal{B}$.\\ \noindent{Axiom 2.} $G$ is $\mathcal{U}$-factorizable (Def. 1.1) for all covers $\mathcal{U}\subset\mathcal{B}$.\\ Then $[G,G]$ is a simple group. \end{thm} Now we wish to provide conditions ensuring that the commutator group of a homeomorphism group is uniformly simple. Recall that a group $G$ is called \emph{uniformly simple} if there is $d>0$ such that for all $f,g\in G$ with $f\neq e$ we have $g=h_1fh_1^{-1},\ldots,ots h_sfh_s^{-1}$, where $s\leq d$ and $h_1,\ldots, h_s\in G$. Given a uniformly simple group $G$, denote by $\usd_G$ the least $d$ as above. Note that recently Tsuboi \circte{Tsu3} showed that $\mathcal{D}^{\infty}_c(M)$ is uniformly simple for many types of manifolds $M$. However, for some types of $M$ the problem is still unsolved. \betaegin{thm} Let $\mathcal{B}$ be a topological basis of $X$. Suppose that $G\leq\mathcal{H}(X)$ satisfies the following conditions:\betaegin{enumerate} \item $\cl_G$ is bounded; \item $G$ acts transitively inclusively on $\mathcal{B}$; \item there is an open cover $\mathcal{U}\partialrec\mathcal{B}$ such that $G$ is $\mathcal{U}$-factorizable and $G$ is bounded w.r.t. the fragmentation norm $\varphirag_G^{\mathcal{U}}$.\end{enumerate} Then the group $[G,G]$ is uniformly simple. Moreover, if $\cld_G=d$ and $\varphiragd_G^{\mathcal{U}}=\rightarrowr$ then $\usd_G\leq 4d\rightarrowr^2$. \end{thm} \betaegin{proof} In view of Theorem 4.2, $[G,G]$ is simple. Let $f,g\in [G,G]$ such that $f\neq e$. There is $x\in X$ with $f(x)\neq x$ and $B\in\mathcal{B}$ satisfying $f(B)\cap B=\emptyset$. First we assume that $g=[g_1,g_2]\in[G,G]$. Then, if $\varphiragd_G^{\mathcal{U}}=\rightarrowr$ then $g$ can be expressed as a product of at most $\rightarrowr^2$ elements of $G^{\mathcal{B}}=\partialrod_{U\in \mathcal{B}^G}[G_U,G_U]$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U\in\mathcal{B}^G$. Here $\mathcal{B}^G=\{g(U)|\; g\in G,\; U\in\mathcal{B}\}$. In fact, we repeat the use of (3.1) as in the proof of Theorem 3.1. Now if $\cld_G=d$, then every $g\in[G,G]$ is a product of at most $d\rightarrowr^2$ elements of $G^{\mathcal{B}}$ of the form $[h_1,h_2]$, where $h_1,h_2\in G_U$ for some $U\in\mathcal{B}^G$. Since $G$ acts transitively inclusively on $\mathcal{B}$ (and, consequently, on $\mathcal{B}^G$), any $[h_1,h_2]$ as above is conjugate to $[k_1,k_2]$ with $k_1,k_2\in G_B$. Then $[k_1,k_2]=[[f,k_1],k_2]$. Hence $[k_1,k_2]$ is a product of four conjugates of $f$ and $f^{-1}$. It follows that $g$ is a product of at most $4d\rightarrowr^2$ conjugates of $f$ and $f^{-1}$, as claimed. \end{proof} \betaegin{cor} If $G\leq\mathcal{H}(X)$ is factorizable and bounded, and $G$ acts transitively inclusively on some basis $\mathcal{B}$ of $X$, then $[G,G]$ is uniformly simple. \end{cor} In fact, in view of Proposition 1.4 \circte{bip} $[G,G]$ is then bounded in $\cl_G$, and the remaining hypotheses of Theorem 4.3 are fulfilled too. \subsetection{Perfectness of $[\timesilde G,\timesilde G]$} Let $G$ be a topological group. By $\mathcal{P}G$ we will denote the totality of paths (or isotopies) $\gamma:I\rightarrow G$ with $\gamma(0)=e$ (where $I=[0,1]$). Then $\mathcal{P}G$ endowed with the pointwise multiplication is a topological group. Next, $\timesilde G$ will stand for the universal covering group of $G$, that is $\timesilde G=PG/_{ \subsetim}$, where $\subsetim$ denotes the relation of the homotopy rel. endpoints. We introduce the following two operations on the space of paths $\mathcal{P} G$. Let $\mathcal{P}^{\subsettar}G=\{ \gamma \in \mathcal{P}G: \gamma (t)=e \quad \timesextrm{for} \quad t \in [0,\varphirac{1}{2}] \}$. For all $\gamma \in \mathcal{P} G$ we define $\gamma^{\subsettar}$ as follows: \betaegin{equation}\nonumber \gamma^{\subsettar}(t)= \left\{ \betaegin{array}{lcl} e& for & t \in [0,\varphirac{1}{2}]\\ \gamma(2t-1)& for& t \in [\varphirac{1}{2},1] \end{array} \rightarrowight. \end{equation} Then $\gamma^{\subsettar}\in\mathcal{P}^{\subsettar}G$ and the subgroup $P^{\subsettar}G$ is the image of $\mathcal{P} G$ by the mapping $\subsettar:\gamma\mapsto \gamma^{\subsettar}$. The elements of $\mathcal{P}^{\subsettar}G$ are said to be \wyr{special} paths in $G$. Clearly, the group of special paths is preserved by conjugations, i.e. for each $g\in\mathcal{P} G$ we have $\conj_g(\mathcal{P}^{\subsettar}G)\subset\mathcal{P}^{\subsettar}G$ for every $g\in\mathcal{P} G$, where $\conj_g(h)=ghg^{-1}$, $h\in\mathcal{P} G$. Next, let $\mathcal{P}^{\subsetquare}G=\{ \gamma \in \mathcal{P}G: \gamma (t)=\gamma(1) \quad \timesextrm{for} \quad t \in [\varphirac{1}{2}, 1] \}$. For all $\gamma \in \mathcal{P} G$ we define $\gamma^{\subsetquare}$ by: \betaegin{equation}\nonumber \gamma^{\subsetquare}(t)= \left\{ \betaegin{array}{lcl} \gamma(2t)& for & t \in [0,\varphirac{1}{2}]\\ \gamma(1)& for& t \in [\varphirac{1}{2},1] \end{array} \rightarrowight. \end{equation} As before $\gamma^{\subsetquare}\in\mathcal{P}^{\subsetquare}G$ and the subgroup $P^{\subsetquare}G$ coincides with the image of $\mathcal{P} G$ by the mapping $\subsetquare:\gamma\mapsto \gamma^{\subsetquare}$. \betaegin{lem}\langlebel{zero} For any $\gamma \in \mathcal{P} G$ we have $\gamma \subsetim \gamma^{\subsettar}$ and $\gamma\subsetim\gamma^{\subsetquare}$. \end{lem} \betaegin{proof} We have to find a homotopy $\mathcal{G}amma$ rel. endpoints between $\gamma$ and $\gamma^{\subsettar}$. For all $s\in I$ define $\mathcal{G}amma$ as follows: \betaegin{equation}\nonumber \mathcal{G}amma(t,s)= \left\{ \betaegin{array}{lcl} e& for & t\in [0,\varphirac{s}{2}]\\ \gamma(\varphirac{2t-s}{2-s})& for& t\in [\varphirac{s}{2},1] \end{array} \rightarrowight. \end{equation} It is easy to check that such $\mathcal{G}amma$ fulfils all the requirements. Analogously the second claim follows. \end{proof} After these prerequisites let us return to homeomorphism groups. Let $X$ be a paracompact space and let $G\leq \mathcal{H}(X)$. Here $\mathcal{H}(X)$ is endowed with the compact-open topology and $G$ with the induced topology. If $f\in\mathcal{P} G$ then we define $\subsetupp(f):=\betaigcup_{t\in[0,1]}\subsetupp(f_t)$. By $G_0$ we define the subgroup of all $g\in G$ such that there is $f\in\mathcal{P} G$ such that $f_1=g$. $G_0$ is called the \emph{identity component} of $G$. Clearly $G_0\lhd G$. \betaegin{dff} We say that $G$ is \emph{isotopically factorizable} if for every open cover $\mathcal{U}$ and every isotopy $f\in\mathcal{P} G$ there are $U_1,\ldots, U_r\in\mathcal{U}$ and $f_1,\ldots, f_r\in\mathcal{P} G$ such that $f=f_1,\ldots,ots f_r$ and $\subsetupp(f_i)\subset U_i$ for all $i$. \end{dff} Clearly, if $G$ is isotopically factorizable then $G_0$ is factorizable. \emph{Proof of Theorem 1.4} For $f\in\mathcal{P} G$ by $\langlengle f\rightarrowangle_{\subsetim}$ denote the homotopy rel. endpoints class of $f$. Due to Proposition 3.2 and the assumption, for any $x\in X$ there is $g,\betaar g\in[G_0,G_0]$ such that $[g,\betaar g](x)\neq x$. Consequently, there exists an open cover $\mathcal{U}$ such that for all $U\in\mathcal{U}$ there are $g,\betaar g\in[G_0,G_0]$ such that $[g,\betaar g](U)\cap U=\emptyset$. Since $G_0\lhd G$, the same holds for $\mathcal{U}^G$ instead of $\mathcal{U}$. In view of Lemma 5.1, there are $f, \betaar f\in\mathcal{P}^{\subsetquare}G$ such that $f_1=g$ and $\betaar f_1=\betaar g$. Choose $\mathcal{V}$ such that $\mathcal{V}\partialrec\mathcal{U}$ (Lemma 3.3) and denote \betaegin{equation*}\mathcal{P} G^{\mathcal{U}}=\partialrod\limits_{U\in \mathcal{U}^G}[\mathcal{P} G_U,\mathcal{P} G_U].\end{equation*} First we notice that $[\mathcal{P} G,\mathcal{P} G]\subset \mathcal{P} G^{\mathcal{U}}$. As in the proof of Theorem 1.3 we use (3.1) for elements of $\mathcal{P} G$ and the fact that $\mathcal{P} G$ is $\mathcal{V}$-factorizable. Next, fix arbitrarily $U\in \mathcal{U}$ and let $f, \betaar f\in\mathcal{P}^{\subsetquare}G$ as above. Put $\hat f=[f,\betaar f]$. Then $\hat f_t(U)\cap U=\emptyset$ for all $t\in [\varphirac{1}{2}, 1]$. We will show that for every $h, \betaar h\in \mathcal{P} G_U$ the bracket $[\langlengle h\rightarrowangle_{\subsetim},\langlengle\betaar h\rightarrowangle_{\subsetim}]$ is represented as a product of four commutators of elements of $[\timesilde G,\timesilde G]$. In view of Lemma 5.1 choose $k,\betaar k\in\mathcal{P}^{\subsettar}G$ such that $\langlengle k\rightarrowangle_{\subsetim}=\langlengle h\rightarrowangle_{\subsetim}$ and $\langlengle \betaar k\rightarrowangle_{\subsetim}=\langlengle \betaar h\rightarrowangle_{\subsetim}$. It follows that $[\hat fk\hat f^{-1}, \betaar k]=\id$ and $[[\hat f,k],\betaar k]=[k,\betaar k]$. Therefore, $[\langlengle h\rightarrowangle_{\subsetim},\langlengle\betaar h\rightarrowangle_{\subsetim}]$ is a product of four commutators of elements of $[\timesilde G,\timesilde G]$. \quad $\subsetquare$ \betaegin{rem} (1) Observe that one can formulate some results for $[\timesilde G,\timesilde G]$, analogous to Theorems 1.3, 3.5 and 3.8, by assuming that $G$ is isotopically factorizable, $G_0$ satisfies some conditions in Def. 3.1, $\cl_{\mathcal{P} G}$ is bounded, and $\mathcal{P} G$ is bounded in $\varphirag^{\mathcal{U}}$. (2) Obviously, $\timesilde G$ and $[\timesilde G,\timesilde G]$ are not simple, since $\partiali(G)\lhd\timesilde G$ and $[\partiali(G),\partiali(G)]\lhd[\timesilde G,\timesilde G]$, where $\partiali(G)$ is the fundamental group of $G$. \end{rem} \subsetection{The commutator subgroup of a diffeomorphism group on open manifold} Assume $r=0,1,\ldots,\infty$. Let a manifold $M$ be the interior of a compact, connected manifold $\betaar M$ of class $C^r$ with non-empty boundary $\partial$. By a \wyr{product neighborhood} of $\partial$ we mean a closed subset $P=\partial\times[0,1)$ of $M$ such that $\partial\times[0,1]$ is embedded in $\betaar M$, and $\partial\times\{1\}$ is identified with $\partial$. A \wyr{translation system} on the product manifold $N\times[0,\infty)$ (c.f. \circte{li1}, p.168) is a family $\{P_j\}_{j=1}^{\infty}$ of closed product neighborhoods of $N\times\{\infty\}$ such that $P_{j+1}\subset\intt P_j$ and $\betaigcap_{j=1}^{\infty}P_j=\emptyset$. By a {\it ball} we mean an open ball with its closure compact and contained in a chart domain. Let $G\leq\mathcal{D}^r(M)$, where $r=0,1,\ldots,\infty$. For a subset $U\subset M$ denote by $G(U)$ the subgroup of all elements of $G$ which can be joined with the identity by an isotopy in $G$ compactly supported in $U$. \betaegin{dff} Let $\mathcal{B}$ be a cover of $M$ by balls. $G$ is called \wyr{$\mathcal{B}$-factorizable} if for any $f\in G$ there are a product neighborhood $P=\partial\times[0,1)$, and a family of diffeomorphisms $g,g_1,\ldots, g_{\rightarrowr}\in G$ such that: (1) $f=g g_1\cdotots g_{\rightarrowr}$ with $g\in G(P)$ and $g_j\in G(B_j)$, where $B_j\in\mathcal{B}$ for $j=1,\ldots, \rightarrowr$. Furthermore, for any product neighborhood $P$ and for any $g\in G(P)$ there is a sequence of reals from (0,1) tending to 1 \[0<a_1<\betaar a_1<\betaar b_1<b_1<a_2<,\ldots,ots<a_n<\betaar a_n<\betaar b_n<b_n<,\ldots,ots<1\] and $h\in G(P)$ such that (2) $h=g$ on $\betaigcup_{n=1}^{\infty} \partial\times[\betaar a_n,\betaar b_n]$; (3) $h=\id$ if $g=\id$. Put $D_n:=\partial\times(a_n,b_n)$ and $D:=\betaigcup_{n=1}^{\infty}D_n$. Then we also assume that: (4) $\subsetupp(h)\subset D$; (5) for the resulting decomposition $h=h_1h_2,\ldots,ots$ with respect to $D=\betaigcup_{n=1}^{\infty}D_n$ we have $h_n\in G(D_n)$ for all $n$. $G$ is called \wyr{factorizable (in the wider sense)} if it is $\mathcal{B}$-factorizable for every cover $\mathcal{B}$ of $M$ by balls. Finally, if $G$ factorizable, for any $f\in G$ we define $\mathcal{F}rag_{G}(f)$ as the smallest $\rightarrowr$ such that there are a family of balls $\{B_{j}\}$, a product neighborhood $P$ and and a decomposition of $f$ as in (1). Then $\mathcal{F}rag_{G}$ is a conjugation-invariant norm on $G$, called the \wyr{fragmentation norm}. In fact, since $G\leq\mathcal{D}^r(M)$, any $g\in G$ does not change the ends of $M$ so that it takes (by conjugation) any decomposition as in (1) into another such a decomposition. Define $\mathcal{F}ragd_{G}:=\subsetup_{g\in G}\mathcal{F}rag_{G}(g)$, the diameter of $G$ in $\mathcal{F}rag_{G}$. Consequently, $\mathcal{F}rag_{G}$ is bounded iff $\mathcal{F}ragd_{G}<\infty$. \end{dff} \betaegin{rem} The reason for introducing Def. 6.1 is the absence of isotopy extension theorems or fragmentation theorems for some geometric structures. Roughly speaking, $G$ satisfies Def. 6.1 if all its elements can be joined with id by an isotopy in $G$ and appropriate versions of the above mentioned theorems are available. \end{rem} Let $\diff^r(M)$ (resp. $\diff^r_c(M)$) be the group of all $C^r$ diffeomorphisms of $M$ (resp. with compact support). To illustrate Def. 6.1 we consider the following \betaegin{exa} The group $\diff^r(\rightarrowz^n)$ does not satisfy Def.6.1. The reason is that in this case any $f\in\diff^r(\rightarrowz^n)$ would be isotopic to id due to 6.1(1) which is not true. Next, any $f\in\diff^r_c(\rightarrowz^n)$ is isotopic to the identity but the isotopy need not be compactly supported. It follows that $\diff^r_c(\rightarrowz^n)$ does not fulfil Def.6.1.(1). The exception is $r=0$, when the Alexander trick is in use (see e.g. \circte{ed-ki}, p.70) and any compactly supported homeomorphism on $\rightarrowz^n$ is isotopic to id by a compactly supported isotopy. It follows that $\diff^0_c(\rightarrowz^n)$ is factorizable in view of \circte{ed-ki}. Let $C=\rightarrowz\times\mathbb S^1$ be the annulus. Then there is the twisting number epimorphism $\diff^r_c(C)\rightarrow\mathbb Z$. It follows that $\diff^r_c(C)$ is unbounded in view of Lemma 1.10 in \circte{bip}. On the other hand, $\diff^r_c(C)$ is not factorizable. \end{exa} \betaegin{dff} \betaegin{enumerate} \item $G$ is said to be \wyr{determined on compact subsets} if the following is satisfied. Let $f\in\mathcal{D}^r(M)$. If there are a sequence of relatively compact subsets $U_1\subset\overline U_1\subset U_2\subset,\ldots,ots\subset U_n\subset\overline{U}_n\subset U_{n+1}\subset,\ldots,ots$ with $\betaigcup U_n=M$ and a sequence $\{g_n\}$, $n=1,2,\ldots,$ of elements of $G$ such that $f|_{U_n}=g_n|_{U_n}$ for $n=1,2,\ldots,$ then we have $f\in G$. \item We say that $G$ \wyr{admits translation systems} if for any sequence $\{\langlembda_n\}$, $n=0,1,,\ldots,ots$, with $\langlembda_n\in(0,1)$, tending increasingly to 1, there exists a $C^r$-mapping $[0,\infty)\ni t\mapsto f_t\in G$ supported in the interior of $P$, with $f_0=\id$, $f_j=(f_1)^j$ for $j=2,3,\dots$, and such that for the translation system $P_n=\partial_i\times[\langlembda_n,1)$ one has $f_1(P_n)=P_{n+1}$ for $n=0,1,2,,\ldots,ots$. \end{enumerate} \end{dff} By using suitable isotopy extension theorems (c.f. \circte{ed-ki}, \circte{hir}, \circte{ban2}) we have \betaegin{prop} \circte{ry6} The groups $\mathcal{D}^r(M)$, $r=0,1,\ldots,\infty$, satisfy Definitions 6.1 and 6.4. \end{prop} The following result is essential to describe the structure of $[G,G]$. Though it was proved in \circte{ry6}, we give the proof of it for the sake of completeness. \betaegin{lem} If $G$ satisfies Definitions 6.1 and 6.4, then any $g\in G(P)$, where $P$ is a product neighborhood of $\partial$, can be written as a product of two commutators of elements of $G(P)$. \end{lem} \betaegin{proof} We may assume that $g\in G(\intt( P))$. Choose as in Def. 6.1 a sequence $0<a_1<\betaar a_1<\betaar b_1<b_1<a_2<,\ldots,ots<a_n<\betaar a_n<\betaar b_n<b_n<,\ldots,ots<1$ and $h\in G(P)$ such that conditions (2)-(5) in Def. 6.1 are fulfilled. Put $\betaar h=h^{-1}g$, that is $g=h\betaar h$. Then $\subsetupp(\betaar h)$ is in $(0,\betaar a_1)\cup\betaigcup_{n=1}^{\infty}(\betaar b_n,\betaar a_{n+1})$, and $\betaar h=g$ on $[0, a_1]\cup\betaigcup_{n=1}^{\infty}[ b_n, a_{n+1}]$. We show that $h$ is a commutator of elements in $G(\intt(P))$. Choose arbitrarily $\langlembda_0\in (0,a_1)$ and $\langlembda_n\in(b_n,a_{n+1})$ for $n=1,2,,\ldots,ots$. In light of Def. 6.4(2) there exists an isotopy $[0,\infty)\ni t\mapsto f_t\in G$ supported in $\partial\times(0,1)$, such that $f_0=\id$ and $f_j(P_n)=P_{n+j}$ for $j=1,2,,\ldots,ots$ and for $n=0,1,2,,\ldots,ots$, where $P_n=\partial\times[\langlembda_n,1)$ for $n=0,1,,\ldots,ots$. Now define $\timesilde h\in G(\intt(P))$ as follows. Set $\timesilde h=h$ on $\partial\times[0,\langlembda_1)$, and $\timesilde h=h(f_1hf_1^{-1}),\ldots,ots(f_nhf_n^{-1})$ on $\partial\times[0,\langlembda_{n+1})$ for $n=1,2,\ldots,ots$. Here $f_n=(f_1)^n$. Then $\timesilde h|_{\partial\times[0,\langlembda_n)}$ is a consistent family of functions, and $\timesilde h=\betaigcup_{n=1}^{\infty} \timesilde h|_{\partial\times[0,\langlembda_n)}$ is a local diffeomorphism. It is easily checked that $\timesilde h$ is a bijection. Due to Def. 6.4(1) $\timesilde h\in G(\intt (P))$. By definition we have the equality $\timesilde h=hf_1\timesilde h f_1^{-1}$. It follows that $h=\timesilde h f_1 \timesilde h^{-1}f_1^{-1}=[\timesilde h,f_1]$. Similarly, $\betaar h$ is a commutator of elements of $G(P)$. The claim follows. \end{proof} \betaegin{dff} Let $G$ satisfy Def. 6.1. Then \betaegin{enumerate} \item the symbol $G_c$ stands for the subgroup of all $f\in G$ such that there is a decomposition $f=gg_1,\ldots,ots g_{\rightarrowr}$ as in Def. 6.1(1) with $g=\id$; \item $G$ is said to be \emph{localizable} if for any $f\in G$ and any compact $C\subset M$ there is $g\in G_c$ such that $f=g$ on $C$. \end{enumerate} \end{dff} Clearly $G_c$ is a subgroup of the group of compactly supported members of $G$. However, the converse is not true: for $G=\mathcal{D}^r(C)$ take a compactly supported diffeomorphism of $C$ with nonzero twisting number (Example 6.3). For the reason of introducing localizable groups, see Remark 6.2. It follows from the isotopy extension theorems (\circte{ed-ki}, \circte{hir}) that $\mathcal{D}^r(M)$ is localizable. \betaegin{prop} Let $\varphirag_G=\varphirag_G^{\mathcal{B}}$, where $\mathcal{B}$ the family of all balls on $M$ (c.f. section 2). We have $\varphiragd_{G_c}=\mathcal{F}ragd_{G_c}$. \end{prop} \betaegin{proof} If $g\in G_c$ then $\mathcal{F}rag_{G_c}(g)\leq\varphirag_{G_c}(g)$, since any fragmentation of $g$ supported in balls is of the form from Def. 6.1(1). On the other hand, if $g=g_0g_1,\ldots,ots g_{\rightarrowr'}$ with $\rightarrowr'<\rightarrowr=\varphirag_{G_c}(g)$ is as in 6.1(1), then $g_0^{-1}g\in G_c$ and $\varphirag_{G_c}(g_0^{-1}g)\leq \rightarrowr'$. Thus, $\varphiragd_{G_c}=\mathcal{F}ragd_{G_c}$. \end{proof} For any $M$ as above a theorem of McDuff \circte{md} states that $\mathcal{D}^r(M)$ is perfect. We generalize it as follows. \betaegin{thm} Let $M$ be an open $C^r$-manifold ($r=0,1,\ldots,\infty$) such that $M=\intt\betaar M$, where $\betaar M$ is a compact manifold. Suppose that $G\leq\mathcal{D}^r(M)$ satisfies Definitions 6.1, 6.4 and 6.7, and that $G_c$ is non-fixing. Then $[G,G]$ is perfect. \end{thm} \betaegin{proof} In view of Def. 6.1 for an arbitrary $f\in G$ we can write $f=gh$, where $g\in G(P)$ and $h\in G_c$. Let $[f_1,f_2]\in [G,G]$ with $f_1=g_1h_1$ and $f_2=g_2h_2$ as above. Since $G_c$ is localizable we have $[g_1,h_2], [g_2,h_1]\in [G_c,G_c]$. Due to Lemma 6.6 $G(P)$ is perfect, that is $g_1,g_2\in[G,G]$. It follows from (3.1) that $[f_1,f_2]=\varphi[k_1,k_2][k'_1,k'_2][k''_1,k''_2]$, where $\varphi\in[[G,G],[G,G]]$ and $k_1,k_2,k'_1,k'_2,k''_1,k''_2\in G_c$. But by Theorem 1.2 $[G_c,G_c]$ is also perfect. It follows that $[G,G]$ is perfect too. \end{proof} \betaegin{thm} Under the assumptions of Theorem 6.9, if $\cl_{G_c}$ and $\varphirag_G^{\mathcal{V}}$ are bounded, where $\mathcal{V}$ is an arbitrary open cover with $\mathcal{V}\partialrec\mathcal{B}$, then $[G,G]$ is uniformly perfect. \end{thm} \betaegin{proof} By Theorem 6.9, $[G,G]$ is perfect. In view of Proposition 3.2, $[G,G]$ is 1-non-fixing. Due to this fact and Lemma 3.3 we can find an open cover $\mathcal{U}$ such that $\mathcal{U}\partialrec\mathcal{B}$ and such that for each $U\in\mathcal{U}$ there are $h_1,h_2\in[G,G]$ with $U\cap [h_1,h_2](U)=\emptyset$. We denote \betaegin{equation*}G^{\mathcal{U}}=\partialrod\limits_{U\in \mathcal{U}^G}[G_U,G_U].\end{equation*} Here $\mathcal{U}^G:=\{g(U):\; g\in G \timesext{\; and \;} U\in\mathcal{U}\}$. Then also for each $U\in\mathcal{U}^G$ there is $h_1,h_2\in[G,G]$ with $U\cap [h_1,h_2](U)=\emptyset$. Assume that $\mathcal{V}\partialrec\mathcal{U}$ and $\varphiragd^{\mathcal{V}}_G= \rightarrowr$. Let $[f_1,f_2]\in[G,G]$. As in the proof of Theorem 6.9 we have $$[f_1,f_2]=[g_1,g_2][h_1,h_2][h'_1,h'_2][h''_1,h''_2],$$ where $g_1,g_2\in G(P)$ and $h_1,\ldots, h''_2\in G_c$. By Lemma 6.6 and (3.1), $[g_1,g_2]$ is a product of four commutators of elements of $[G,G]$. Next, any $[h_1,h_2]\in[G_c,G_c]$ can be expressed as a product of at most $\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[k_1,k_2]$, where $k_1,k_2\in G_U$ for some $U$. In fact, it is a consequence of (3.1) and the fact that $\mathcal{V}\partialrec\mathcal{U}$. Now if $\cld_{G_c}=d$, then every element of $[G_c,G_c]$ is a product of at most $d\rightarrowr^2$ elements of $G^{\mathcal{U}}$ of the form $[k_1,k_2]$, where $k_1,k_2\in G_U$ for some $U$. Finally, fix arbitrarily $U\in \mathcal{U}^G$. We wish to show that for every $k_1,k_2\in G_U$ the bracket $[k_1,k_2]$ can be represented as a product of four commutators of elements of $[G,G]$. By assumption on $\mathcal{U}^G$, there are $h_1,h_2\in [G,G]$ such that $h(U)\cap U=\emptyset$ for $h=[h_1,h_2]$. It follows that $[hk_1h^{-1}, k_2]=\id$. Therefore, $[[h,k_1],k_2]=[k_1,k_2]$. Observe that indeed $[[h,k_1],k_2]$ is a product of four commutators of elements of $[G,G]$. Thus any element of $[G,G]$ is a product of at most $4d(1+\rightarrowr^2)$ commutators of elements of $[G,G]$. \end{proof} \betaegin{cor} Suppose that the assumptions of Theorem 6.9 are fulfilled and that $G$ is bounded. Then $[G,G]$ is uniformly perfect. \end{cor} In fact, $\cl_G$ is bounded in view of Proposition 1.4 in \circte{bip}, and $\varphirag_{G_c}$ is bounded in view of Proposition 6.8. \betaegin{rem} By using Theorems 3.5 and 3.8, Lemma 6.6 and (3.1) we can obtain some estimates on $\cl_{[G,G]}$. \end{rem} \subsetection{Examples and open problems} Let $M$ be a paracompact manifold, possibly with boundary, of class $C^{r}$, $r=0,1, ,\ldots,ots,\infty$. {\betaf 1.} Let $M$ be a manifold with a boundary, $\mathrm{dim}(M)=n\geqslant 2$. Then $G=\mathcal{D}^{r}_{c}(M)$, where $r=0,1,\dots ,\infty$, $r\neq n$ and $r\neq n+1$ is perfect ( \circte{ry2}, \circte{ry}) and non-simple. Recently, Abe and Fukui \circte{af09}, using results of Tsuboi \circte{Tsu2} and their own methods, showed that $G$ is also uniformly perfect for many types of $M$. In the remaining cases, where we do not know whether $G$ is perfect or uniformly perfect, our results are of use. {\betaf 2.} Let $N$ be a submanifold of $M$ of class $C^r$, {$r=0,1,,\ldots,ots ,\infty$}, and $\dim N\geq 1$. It was proved in \circte{ry3} that $G_c$, where $G=\mathcal{D}^r(M,N)$ is the identity component of the group of $C^r$-diffeomorphisms preserving $N$, is perfect. The same was proved in the Lipschitz category in \circte{af}. All these groups are clearly non-simple. It follows from \circte{af09} that $G_c$ is also uniformly perfect for many types of pairs $(M,N)$. Several results of the present paper give new information on the structure of $G$ and $G_c$. {\betaf 3.} Given a foliation $\mathcal{F}$ of dimension $k$ on a manifold $M$, let $G=\mathcal{D}^{r}(M,\mathcal{F})$ be the identity component group of all diffeomorphisms of class $C^r$ taking each leaf to itself. Due to results of Rybicki \circte{ry1}, Fukui and Imanishi \circte{fi} and Tsuboi \circte{Tsu1}, the group $G_c$ is perfect provided $r=0,1,\dots ,k$ or $r=\infty$. It is very likely that for large (but finite) $r$ the group $\mathcal{D}_c^{r}(M,\mathcal{F})$ is not perfect (c.f. a discussion on this problem in \circte{le-ry}). It is a highly non-trivial problem whether $G_c$ is uniformly perfect. Several results of the present paper apply to $G_c$ or $G$. {\betaf 4.} Let $\mathcal{F}$ be a foliation of dimension $k$ on the Lipschitz manifold $M$ and let $G=\mathrm{Lip}(M,\mathcal{F})$ be the group of all Lipschitz homeomorphisms taking each leaf of $\mathcal{F}$ to itself. In view of results of Fukui and Imanishi \circte{fi1}, the group $G_c$ is perfect. Further results may be concluded from our paper. {\betaf 5.} Assume now that $\mathcal{F}$ is a singular foliation, i.e. the dimensions of its leaves need not be equal (see \circte{st}). One can consider the group of~leaf-preserving diffeomorphisms of $\mathcal{F}$, $G=\mathcal{D}^{\infty}(M,\mathcal{F})$. However, it is hopeless to obtain any perfectness results for this group. On the other hand, Theorem 1.2 still works in this case and we know that the commutator group $[G_c,G_c]$ is perfect. We do not know whether $[G_c,G_c]$ is uniformly perfect. {\betaf 6.} Let us recall the definition of Jacobi manifold (see \circte{dlm}). Let $M$ be a $C^{\infty}$ manifold, let $\varphirak{X}(M)$ be the Lie algebra of the vector fields on $M$ and denote by $C^{\infty}(M,\mathbb{R})$ the algebra of $C^{\infty}$ real-valued functions on $M$. A \emph{Jacobi structure} on $M$ is a pair $(\mathcal{L}ambda, E)$, where $\mathcal{L}ambda$ is a 2-vector field and $E$ is a vector field on $M$ satisfying $$[\mathcal{L}ambda, \mathcal{L}ambda]=2E \wedge \mathcal{L}ambda,\quad [E,\mathcal{L}ambda]=0.$$ Here, $[\, ,\,]$ is the Schouten-Nijenhuis bracket. The manifold $M$ endowed with the Jacobi structure is called a \emph{Jacobi manifold}. If $E=0$ then $(M,\mathcal{L}ambda)$ is a Poisson manifold. Observe that the notion of Jacobi manifold generalizes also symplectic, locally conformal symplectic and contact manifolds. Now, let $(M,\mathcal{L}ambda,E)$ be a Jacobi manifold. A diffeomorphism $f$ on $M$ is called a \emph{hamiltonian diffeomorphism} if, by definition, there exists a hamiltonian isotopy $f_t$, $t\in [0,1]$, such that $f_0=\id$ and $f_1=f$. An isotopy $f_t$ is \emph{hamiltonian} if the corresponding time-dependent vector field $X_t=\dot{f}_t\circrc f_{t}^{-1}$ is hamiltonian. Let $G=\mathcal{H}(M,\mathcal{L}ambda,E)$ be the compactly supported identity component of all hamiltonian diffeomorphisms of class $C^{\infty}$ of $(M,\mathcal{L}ambda,E)$. It is not known whether $G$ is perfect, even in the case of regular Poisson manifold (\circte{ry4}). However, by Theorem 1.2 the commutator group $[G,G]$ is perfect. It is an interesting and difficult problem to answer when $[G,G]$ is uniformly perfect. In the transitive cases, the compactly supported identity components of the hamiltonian symplectomorphism group and the contactomorphism group are simple (\circte{ban1}, \circte{ha-ry}, \circte{ry5}). In general, $G$ and $\timesilde G$ is not uniformly perfect in the symplectic case, see \circte{bip}. An obstacle for the uniform simplicity of the first group is condition (2) in Theorem 4.3. On the other hand, the contactomorphism group satisfies this condition and it is likely that for some contact manifolds it is uniformly simple. \betaegin{thebibliography}{99} \betaibitem{af} K.Abe, K.Fukui, \emph{On the structure of the group of Lipschitz homeomorphisms and its subgroups}, J. Math. Soc. Japan 53(2001), 501-511. \betaibitem{af09} K. Abe, K. Fukui, Commutators of $C^{\infty}$-diffeomorphisms preserving a submanifold, \wyr{J. Math. Soc. Japan} 61(2009), 427-436. \betaibitem{ban1} A. Banyaga, \emph{Sur la structure du groupe des diff\'eomorphismes qui pr\'eservent une forme symplectique}, Comment. Math. Helv. 53 (1978), 174-227. \betaibitem{ban2} A.Banyaga, \emph{The structure of classical diffeomorphism groups}, Mathematics and its Applications, 400, Kluwer Academic Publishers Group, Dordrecht, 1997. \betaibitem{bip} D.Burago, S.Ivanov and L.Polterovich, \emph{Conjugation invariant norms on groups of geometric origin}, Advanced Studies in Pures Math. 52, Groups of Diffeomorphisms (2008), 221-250. \betaibitem{dlm} P. Dazord, A. Lichnerowicz, C.M. Marle, \emph{Structure locale des vari\'et\'es de Jacobi}, J. Math. Pures et Appl. 70(1991), 101-152. \betaibitem{ed-ki} R.D.Edwards, R.C.Kirby, \emph{Deformations of spaces of imbeddings}, Ann. Math. 93 (1971), 63-88 \betaibitem{ep} D.B.A.Epstein, \emph{The simplicity of certain groups of homeomorphisms}, Compositio Mathematica 22, Fasc.2 (1970), 165-173. \betaibitem{fi} K.Fukui, H.Imanishi, \emph{On commutators of foliation preserving homeomorphisms}, J. Math. Soc. Japan, 51-1 (1999), 227-236. \betaibitem{fi1} K.Fukui, H.Imanishi, \emph{On commutators of foliation preserving Lipschitz homeomorphisms}, J. Math. Kyoto Univ., 41-3 (2001), 507-515. \betaibitem{ha-ry} S. Haller, T. Rybicki, \wyr{On the group of diffeomorphisms preserving a locally conformal symplectic structure}, Ann. Global Anal. and Geom. 17 (1999), 475-502. \betaibitem{hir} M. W. Hirsch, \wyr{Differential Topology}, Graduate Texts in Mathemetics 33, Springer 1976. \betaibitem{le-ry} J.Lech, T.Rybicki, \emph{Groups of $C^{r,s}$-diffeomorphisms related to a foliation}, Banach Center Publ.; vol. 76 (2007), 437-450. \betaibitem{li1} W. Ling, Translations on $M\times\rightarrowz$, \wyr{Amer. Math. Soc. Proc. Symp. Pure Math.} 32, 2 (1978), 167-180. \betaibitem{li} W.Ling, \emph{Factorizable groups of homeomorphisms}, Compositio Mathematica, 51 no. 1 (1984), p. 41-50. \betaibitem{Mat} J. N. Mather, \wyr{Commutators of diffeomorphisms}, Comment. Math. Helv. I 49 (1974), 512-528; II 50 (1975), 33-40; III 60 (1985), 122-124. \betaibitem{md} D. McDuff, The lattice of normal subgroups of the group of diffeomorphisms or homeomorphisms of an open manifold, \wyr{J. London Math. Soc.} (2), 18(1978), 353-364. \betaibitem{ry1} T.Rybicki, \emph{The identity component of the leaf preserving diffeomorphism group is perfect}, Monatsh. Math. 120 (1995), 289-305. \betaibitem{ry} T.Rybicki, \emph{Commutators of homeomorphisms of a manifold}, Univ. Iagel. Acta Math. 33(1996), 153-160. \betaibitem{ry2} T.Rybicki, \emph{Commutators of diffeomorphisms of a manifold with boundary}, Ann. Pol. Math. 68, No.3 (1998), 199-210. \betaibitem{ry3} T.Rybicki, \emph{On the group of diffeomorphisms preserving a submanifold}, Demonstratio Math. 31(1998), 103-110. \betaibitem{ry4} T.Rybicki, \wyr{On foliated, Poisson and Hamiltonian diffeomorphisms}, Diff. Geom. Appl. 15(2001), 33-46. \betaibitem{ry5} T.Rybicki, \emph{Commutators of contactomorphisms}, Advances in Math. (2010), doi:10.1016/j.aim.2010.06.004 \betaibitem{ry6} T.Rybicki, \emph{Boundedness of certain automorphism groups of an open manifold}, arXiv 0912.4590v3, 2009 \betaibitem{Sch} P.A.Schweitzer, \wyr{Normal subgroups of diffeomorphism and homeomorphism groups of $\rightarrowz^n$ and other open manifolds}, preprint (2009). \betaibitem{st} P.Stefan, \emph{Accessible sets, orbits and foliations with singularities}, Proc. London Math. Soc. {29} (1974), 699-713. \betaibitem{Thu} W.Thurston, \emph{ Foliations and groups of diffeomorphisms}, Bull. Amer. Math. Soc. 80 (1974), 304-307. \betaibitem{Tsu1} T.Tsuboi, On the group of foliation preserving diffeomorphisms, (ed. P. Walczak et al.) \wyr{Foliations 2005}, World scientific, Singapore (2006), 411-430. \betaibitem{Tsu2} T.Tsuboi, \emph{On the uniform perfectness of diffeomorphism groups}, Advanced Studies in Pures Math. 52, Groups of Diffeomorphisms (2008), 505-524. \betaibitem{Tsu3} T. Tsuboi, \emph{On the uniform simplicity of diffeomorphism groups}, Differential Geometry, World Sci. Publ., Hackensack NJ, 2009, 43-55. \end{thebibliography} \end{document}
\begin{document} \title{Collatz Conjecture: Patterns Within} \author{H. Nelson Crooks, Jr. \\ Chigozie Nwoke} \date{May 2022} \maketitle \begin{abstract} Collatz Conjecture sequences increase and decrease in seemingly random fashion. By identifying and analyzing the \emph{forms} of numbers, we discover that Collatz sequences are governed by very specific, well-defined rules, which we call \emph{cascades}. \end{abstract} \tableofcontents \listoftables \listoffigures \section{Introduction} \subsection{Background and Approach} The Collatz Conjecture was posed by Lothar Collatz \text{c. 1937}. The Conjecture asks whether the iterative algorithm \[ C_{i+1}= \begin{cases} 3*C_{i}+1, & \text{for } C_{i} \text{ odd} \\ C_{i}/2, & \text {for } C_{i} \text{ even} \end{cases} \] converges to 1 for all natural starting numbers $ C_{0} $. The Collatz Conjecture has been checked by computer and found to reach 1 for all numbers $C_{0} \leq 2^{68}\approx 2.95 x 10^{20}$ [1], but has not been proven to be true for all natural numbers. In this paper, we develop a unique method of describing natural numbers and analyzing their behavior under the Collatz algorithm. This methodology allows us to draw conclusions about large groups of numbers. We seek to understand the structure of the random increase and decrease of a Collatz sequence. We will identify a number of interesting patterns in Collatz sequences we have found during our analysis. \subsection{Hypothesis} The sequence for every number iterates through a value that is smaller than the initial value. The iteration after which a value smaller than the starting number is reached for the first time is called the \emph{stopping time}. If we can prove that all natural numbers have a finite stopping time, we can prove by induction that the Collatz Conjecture is true. We analyze Collatz sequences for groups of starting numbers using forms, cascades, and columns. \section{Number Forms} \subsection{Forms} This analysis of the Collatz Conjecture is based on the concept of number \emph{forms}, which consist of a \emph{base} (a natural number) times an \emph{index} (a whole number) plus an \emph{offset} (a whole number). We define \begin{table}[!htbp] \begin{tabular}{l l l} \emph{standard form} as & $C=2^{p}n+2^{p-1}-1$; & $p \in N,n\in W$\\ \emph{non-standard form} as & $C=2^{p}n+2^{p}-1$; & $p \in N,n \in W$\\ \emph{composite form} as & $C=2^{p}n+f$; & $p \in N, n \in W, f \in W,$\\ & & $f <2^{p},f \neq 2^{p-1}-1,f \neq 2^{p}-1$ \\ \emph{mixed form} as & $ C=kn+f$; & $k \in N, k\neq \text{a power of 2}, n \in W,$\\ & &$ f \in W, f<k.$\\ \end{tabular} \end{table} \textbf{Lemma 1:} All natural numbers can be expressed in standard form by the equation: \begin{table}[!htbp] \centering \begin{tabular}{l l r} $C=2^{p} n+2^{p-1}-1;$ & $p \in N, n \in W (N \cup \{0\})$ & (1)\\ \end{tabular} \end{table} This equation was developed independently by the authors, and was previously identified by Mehendale [5] and Cadogan [2], and also describes even numbers when p=1. See Appendix I for proof of Lemma 1. The standard form of a natural number is determined by the value of p in equation (1). Small values of p and corresponding bases, offsets, and standard forms are shown in Table \ref{table:StdForms}. \begin{table}[!htbp] \caption{Standard Number Forms} \centering \begin{tabular}{c c c c} \hline\hline \rule{0pt}{4mm} p & Base &Offset & Standard Form \\ \hline 1 & 2 & 0 & 2n \\ 2 & 4 & 1 & 4n+1 \\ 3 & 8 & 3 & 8n+3 \\ 4 & 16 & 7 & 16n+7 \\ 5 & 32 & 15 & 32n+15 \\ \hline \end{tabular} \label{table:StdForms} \end{table} Cadogan identified standard form numbers in Table 1 of [2] with\newline $A_{1}=4n+1, A_{2}=8n+3, etc.$ We define a \emph{\#-form} as a number for which the $base = \#$, e.g., a 4-form number is of the form $4n+1$. We call the sequence of form numbers (the bases) for natural numbers (4, 2, 8, 2, 4, 2, 16, 2, 4, 2, 8, 2, 4, 2, 32, …) a \emph{form pattern}. We’ll see this form pattern again later in our analysis. See Appendix II for a method to determine the standard form of a number. \textbf{Lemma 2:} Every natural number can be described in standard form by a unique combination of p and n. Conversely, the combination of p and n for each natural number is unique. See Appendix III for proof of Lemma 2. \subsection{Collatz Steps} The Collatz algorithm requires that if a number is odd we multiply the number by 3 and add 1. We call this an \emph{odd step}. The result of an odd step is always an even natural number of the mixed form $3t+1, t \in \text{\emph{odd }} N$. We note that the result of an odd step cannot be a multiple of 3. If a number is even, we divide the number by two. We call this an \emph{even step}. The result of an even step can be any natural number, odd or even. Because multiples of 3 cannot result from an odd step, multiples of 3 in a Collatz sequence can only be the first number(s) of the sequence. \subsection{Collatz Cycles} Since an odd step must always be followed by an even step, we now define an \emph{odd cycle} as an odd step followed by an even step: multiply by 3 and add 1, then divide by 2. We define an \emph{even cycle} as an even step. A \emph{general cycle} is an odd or even cycle as appropriate. We further define a \emph{\#-cycle} as a cycle that starts with a number of the form \emph{\#}. For example, a 4-cycle starts with a number that is a 4-form number, such as $13 (= 4*3 +1)$. All odd cycles increase the value of a number by slightly more than half. A 2-cycle (the only even cycle) reduces the value of a number by half (we call it a 2-cycle to distinguish it from the even step at the end of all odd cycles). \section{Cascades} \subsection{Analysis of a general cycle} We now analyze what happens to an odd number of the standard form \newline $C=2^{p} n+2^{p-1}-1$ when a general cycle is applied. $C_{i}, C_{i+1}, C_{i+2},\dotsc$ represent successive steps in the Collatz algorithm. \begin{table} [!htbp] \caption{Analysis of a General Cycle} \centering \begin{tabular}{l l l l} \hline\hline \rule{0pt}{4mm} & $C=2^{p} n+2^{p-1}-1$ & $\text{for odd } C, p \in N,$ & (1) \\ & & $p \geq 2, n \in W$ &\\ Step 1 & $C_{i} \text{ is odd by definition here } \dotsc$ & so apply an odd step & \\ & $C_{i+1}=3C_{i}+1$ & & \\ & $C_{i+1}=3(2^{p}n+2^{p-1}-1)+1$ & Substitute (1) for $C_{i}$ & \\ & $C_{i+1}=3(2^{p}n)+3(2^{p-1})-3(1)+1$ & Distribute the 3 & \\ & $C_{i+1}=3(2^{p}n)+3(2^{p-1})-2$ & Combine like terms & \\ & $C_{i+1}=2(3(2^{p-1}n)+3(2^{p-2})-1)$ & Factor out 2: & (2) \\ & & $\text{after 1 iteration }=C_{i+1}$ & \\ Step 2 & $C_{i+1} \text{ is even, }\dotsc$ & so apply an even step & \\ & $C_{i+2}=C_{i+1}/2$ & & \\ & $C_{i+2}=2(3(2^{p-1}n)+3(2^{p-2})-1)/2$ & Substitute (2) for $C_{i+1}$ & \\ & $C_{i+2}=3(2^{p-1}n)+3(2^{p-2})-1$ & Cancel the 2's & \\ & Manipulating the result $\dotsc$ & & \\ & $C_{i+2}=3(2^{p-1}n)+2(2^{p-2})+1(2^{p-2})-1$ & Split $3(2^{p-2})$ into & \\ & & $2(2^{p-2}) + 1(2^{p-2})$ & \\ & $C_{i+2}=3(2^{p-1}n)+1(2^{p-1})+1(2^{p-2})-1$ & Move the 2 into the & \\ & & exponent & \\ & $C_{i+2}=2^{p-1}(3n+1)+2^{p-2}-1$ & Combine like terms; & (3) \\ & & after 2 iterations $=C_{i+2}$ & \\ \hline \end{tabular} \label{table:GenCycle} \end{table} \textbf{Applying a general cycle to an odd number results in a number of the next lower form (4-form is lower than 8-form) with the index of the initial number multiplied by 3 and increased by 1!} (Cadogan identified this as Theorem 1 [2]). See Appendix IV for an example. \subsection{Cascades} Because we evaluated a general cycle, it is clear that a subsequent cycle will reduce the form and increase the value of the result of the previous cycle. This will continue until a 2-form number is reached (an even number) at which time a 2-cycle will divide the number by 2. We call this pattern of successive cycles from the starting form through and including the 2-cycle a \emph{cascade}. We further identify a \emph{\#-cascade} as a cascade that starts with a \#-form number. Once a cascade starts, the sequence is rigidly defined by alternating odd and even steps until a 2-form number is reached, at which time another even step is required. Each cascade is comprised of $2p-1$ steps (p-1 odd steps and p even steps), where p is the power of 2 in the base of the starting number. We see that the base of the number at each step of the cascade is even through the 4-cycle of the cascade and odd after the final 2-cycle. Thus, the odd/even parity of the result of the cascade is indeterminate and depends on the parity of n. Based on values of n, half of all cascades result in an even number and half in an odd number. The result of a cascade with an odd value of p has the same parity as n. The result of a cascade with an even value of p has the opposite parity of n. \textbf{The seemingly random behavior of a Collatz sequence can now be seen as successive odd cascades interspersed with stand-alone 2 cascades.} (We call them \emph{2-cascades}, even though they involve only an even step, to distinguish them from the \emph{2-cycle} at the end of each higher-form cascade.) The distinctive sawtooth increase/decrease portions of a plot of a Collatz sequence are the visual representations of cascades. \textbf{The specific form (values of p and n) of the number that results from a cascade determines whether the sequence will then go up or down, and by how much. This, we believe, is the key to understanding the Collatz Conjecture.} \subsection{Cascade Transforms} Since the steps in a cascade are rigidly constrained, we can shortcut our sequence calculations by substituting the result of a \#-cascade for the calculations of the individual cycles within the cascade. Table \ref{table:CascadeTransforms} shows the cascade transforms for some small standard forms. \begin{table}[!htbp] \caption{Cascade Transforms} \centering \begin{tabular}{c c c c} \hline\hline \rule{0pt}{4mm} p & Base & Standard Form & Transform \\ \hline 1 & 2 & 2n & n \\ 2 & 4 & 4n+1 & 3n+1 \\ 3 & 8 & 8n+3 & 9n+4 \\ 4 & 16 &16n+7 & 27n+13 \\ 5 & 32 & 32n+15 & 81n+40 \\ 6 & 64 & 64n+31 & 243n+121 \\ 7 & 128 &128n+63 &729n+364 \\ \hline \end{tabular} \label{table:CascadeTransforms} \end{table} Note the pattern in these cascade transforms; $2^{p}n + 2^{p-1} - 1$ transforms to $3^{p-1}n + (3^{p-1} - 1)/2.$ Cascade transforms are always mixed-form numbers, where the base is not a power of 2. \subsection{Form Patterns in Cascade Transforms} When we analyze the form pattern of cascade transforms for 8-cascades we see a familiar pattern. \begin{table}[!htbp] \caption{Forms of 8-Cascade Transforms} \centering \begin{tabular}{c c c c c} \hline\hline \rule{0pt}{4mm} & Input & Transform & & \\ & Standard & Mixed & Standard & Standard \\ Index & Form & Form & Form of & Base of \\ n & 8n+3 & 9n+4 & Transform & Transform \\ \hline 0 & 3 & 4 & 2(2) & 2 \\ 1 & 11 & 13 & 4(3)+1 & 4 \\ 2 & 19 & 22 & 2(11) & 2 \\ 3 & 27 & 31 & 64(0)+31 & 64 \\ 4 & 35 & 40 & 2(20) & 2 \\ 5 & 43 & 49 & 4(12)+1 & 4 \\ 6 & 51 & 58 & 2(29) & 2 \\ 7 & 59 & 67 & 8(8)+3 & 8 \\ 8 & 67 & 76 & 2(38) & 2 \\ 9 & 75 & 85 & 4(21)+1 & 4 \\ 10 & 83 & 94 & 2(47) & 2 \\ 11 & 91 & 103 & 16(6)+7 & 16 \\ 12 & 99 & 112 & 2(56) & 2 \\ 13 & 107 & 121 & 4(30)+1 & 4 \\ 14 & 115 & 130 & 2(65) & 2 \\ 15 & 123 & 139 & 8(17)+3 & 8 \\ 16 & 131 & 148 & 2(74) & 2 \\ 17 & 139 & 157 & 4(39)+1 & 4 \\ \hline \end{tabular} \label{table:8CascadeTransforms} \end{table} The form pattern of the 8-transforms (2, 4, 2, 64, 2, 4, 2, 8, 2, 4, 2, 16, 2, 4, 2, 8, 2, 4,…) looks like the form pattern for standard forms of sequential numbers, shifted by some amount. The form patterns of cascade transforms match the form patterns of natural numbers for small standard forms when the index is shifted by values shown in Table \ref{table:PatternShifts}. This behavior appears to apply to all 8- and higher transforms. \begin{table}[!htbp] \caption{Matching Form Patterns in Cascade Transforms} \centering \begin{tabular}{ c c c c c c} \hline\hline \rule{0pt}{4mm} In & From & To & Cascade & From &To \\ Form & Index \# & Index \# & Transform & Index \# & Index \# \\ \hline 4n+1 & 0 & 524,286 & 3n+1 & 174,763 & 699,049 \\ 8n+3 & 0 & 524,286 & 9n+4 & 407,780 & 932,066 \\ 16n+7 & 0 & 524,286 & 27n+13 & 135,927 & 660,213 \\ 32n+15 & 0 & 779,928 & 81n+40 & 220,072 & 1,000,000 \\ 64n+31 & 0 & 577,117 & 243n+121 & 422,883 & 1,000,000 \\ 128n+63 & 0 & 524,286 & 729n+364 & 315,724 & 840,010 \\ \hline \end{tabular} \label{table:PatternShifts} \end{table} \subsection{Maximum Cascade Starts (MCS)} Every natural number can be the result of a cascade, and every natural number has a \emph{maximum cascade start} (maximum form, minimum value) that transforms to that number. Trümper calls them \emph{Collatz Backward Series} in [7]. We remember that each odd cycle in a cascade results in a number of the next-lower-form with the index of the previous number multiplied by 3 and increased by 1. We find the maximum cascade start (MCS) for a number by working backwards from the number. We call this reverse cascade a \emph{ladder}. \begin{table}[!htbp] \caption{Finding Maximum Cascade Start} \centering \begin{tabular}{l p{10cm}} \hline\hline Step 1 & Every cascade ends with a 2-cycle, so we start by multiplying the number by 2. \\ & \\ Step 2 & If the index of this number is of the mixed form $3t+1, t \in W$, then there is a higher form number that will cycle to this number. We use t from the index in a form 1 higher than the form of the number to generate the next-higher-form number in the ladder.\\ &\\ Step 3 & Repeat Step 2 until the index is not of the mixed form $3t+1, t \in W$, then go to Step 4. \\ & \\ Step 4 & If the index of this number is NOT of the mixed form $3t+1, t \in W$, then there is no higher-form number that will cycle to this number, and we have found the maximum cascade start.\\ \hline \end{tabular} \label{table:FindMCS} \end{table} Since the cascade structure is rigidly defined, we can use this method to find all odd values in the ladder. We don’t need to find the even numbers in the odd cycles of the reverse cascade. Just as there is only one path for each cascade, there is only one path for each ladder. The example below helps understand the process to find the maximum cascade start for a number. \begin{table}[!htbp] \caption{Finding Maximum Cascade Start Example} \centering \begin{tabular}{l c c c c} \hline\hline \rule{0pt}{4mm} & Form & Number & Index & Next \\ \hline \rule{0pt}{4mm} & $64(0)+31=$ &$31$ & &\\ &&&&\\ Step 1 & $2(31)+ 0=$ & $62$ & $31=3(10)+1$ &up to next-\\ &&&&higher form\\ &&&&\\ Step 2 & \multicolumn{4}{l}{The index of 62 is of the form $3t+1$, so there is a higher-form}\\ &\multicolumn{4}{l}{number that will cycle to 31.}\\ &&&&\\ Step 2 & \multicolumn{4}{l}{Using 10 as the index for the next number in the ladder results in}\\ &&&&\\ & $4(10)+ 1=$ & $41$ & $10=3(3)+1$ &up to next-\\ &&&&higher form\\ &&&&\\ Step 2 & $8(3)+ 3=$ & $27$ & $3=3(1)+0$ & end of\\ &&&&reverse cascade\\ &&&&\\ Step 3 & \multicolumn{4}{l}{The index of 27 is not of the form $3t+1$, so there is no higher-form}\\ &\multicolumn{4}{l}{number that will cycle to 27.}\\ &&&&\\ Step 4 & \multicolumn{4}{l}{Thus, 27 is the maximum cascade start of 31.}\\ &&&&\\ &\multicolumn{4}{l}{Checking that the 27 cascade does lead to 31:}\\ &\multicolumn{4}{l}{$27=8(3)+3$}\\ &\multicolumn{4}{l}{$82=2(41)$}\\ &\multicolumn{4}{l}{$41=4(10)+1$}\\ &\multicolumn{4}{l}{$124=2(62)$}\\ &\multicolumn{4}{l}{$62=2(31)$}\\ &\multicolumn{4}{l}{$31=64(0)+31$ (end of cascade)}\\ \hline \end{tabular} \label{table:FindMCSExample} \end{table} \noindent Maximum cascade start values for some small numbers are shown in Table \ref{table:MCSValues}. \begin{table}[!htbp] \caption{Maximum Cascade Starts for Some Small Numbers} \centering \begin{tabular}{ccc} \hline\hline \rule{0pt}{4mm} &Maximum Cascade &Standard Base of\\ Cascade Ending Value &Starting Value &Maximum Cascade\\ (Mixed Form) &(Max Form, Min Value) &Starting Value\\ \hline \rule{0pt}{4mm} $28 = 3(9) + 1 $ & $37 = 4(9) + 1$ & $ 4$ \\ $29 = 1(29)$ & $ 58 = 2(29) $ & $2$ \\ $30 = 1(30)$ & $ 60 = 2(30)$ & $ 2$ \\ $31 = 9(3) + 4 $ & $ 27 = 8(3) + 3$ & $ 8$ \\ $32 = 1(32)$ & $ 64 = 2(32) $ & $2$ \\ $33 = 1(33)$ & $ 66 = 2(33)$ & $ 2$ \\ $34 = 3(11) + 1$ & $ 45 = 4(11) + 1$ & $ 4$ \\ $35 = 1(35)$ & $ 70 = 2(35) $ & $2$ \\ $36 = 1(36)$ & $ 72 = 2(36)$ & $ 2$ \\ $37 = 3(12) + 1$ & $ 49 = 4(12) + 1$ & $ 4$ \\ $38 = 1(38)$ & $ 76 = 2(38)$ & $ 2$ \\ $39 = 1(39)$ & $ 78 = 2(39)$ & $ 2$ \\ $40 = 81(0) + 40$ & $ 15 = 32(0) + 15$ & $ 32$\\ $41 = 1(41)$ & $ 82 = 2(41)$ & $ 2$ \\ $42 = 1(42)$ & $ 84 = 2(42)$ & $ 2$ \\ $43 = 3(14) + 1$ & $ 57 = 4(14) + 1$ & $ 4$ \\ \hline \end{tabular} \label{table:MCSValues} \end{table} We can see a modified form pattern in these maximum values (4, 2, 2, 8, 2, 2, 4, 2, 2, 4, 2, 2, 32, 2, 2, 4,…). An interesting characteristic of the maximum cascade start values is that the index of each is NOT of the form 3n+1. If it were in that form there would be a higher cascade start. \subsection{Primary Maximum Cascade Starts (PMCS)} If successive maximum cascade starts (MCS) are applied to any number that is not a multiple of 3 (find the MCS of the MCS, etc.) it appears that an odd multiple of 3 is eventually reached. Proof of this is offered by Trümper in [7]. For example, successive MCS of 28 are: 28, 37, 49, 43, 57. This appears to be true for any natural number that is not a multiple of 3 and has been verified by the authors for all numbers up to 1,700,000. Because a multiple of 3 can only be the initial value(s) of a cascade, we call the cascade that starts with an odd multiple of 3 the “primary maximum cascade start” for a given number. \begin{table}[!htbp] \caption{Primary Maximum Cascade Starts for Some Small Numbers} \centering \begin{tabular}{ccc} \hline\hline \rule{0pt}{4mm} & &Standard Base of\\ Cascade Ending Value &Primary Maximum &Maximum Cascade\\ (Mixed Form) &Cascade Starting Value &Starting Value\\ \hline \rule{0pt}{4mm} $28 = 3(9) + 1 $ & $57 = 4(14) + 1$ & $ 4$ \\ $29 = 1(29)$ & $ 51 = 8(6)+3 $ & $8$ \\ $30 = 1(30)$ & see note & see note \\ $31 = 9(3) + 4 $ & $27 = 8(3) + 3$ & $ 8$ \\ $32 = 1(32)$ & $ 75 = 8(9)+3 $ & $8$ \\ $33 = 1(33)$ & see note & see note \\ $34 = 3(11) + 1$ & $ 45 = 4(11) + 1$ & $ 4$ \\ $35 = 1(35)$ & $ 93 = 4(23)+1 $ & $4$ \\ $36 = 1(36)$ & see note & see note \\ $37 = 3(12) + 1$ & $ 57 = 4(14) + 1$ & $ 4$ \\ $38 = 1(38)$ & $ 39 = 16(2)+7$ & $ 16$ \\ $39 = 1(39)$ & see note & see note \\ $40 = 81(0) + 40$ & $ 15 = 32(0) + 15$ & $ 32$ \\ $41 = 1(41)$ & $ 171 = 8(21)+3$ & $ 8$ \\ $42 = 1(42)$ & see note & see note \\ $43 = 3(14) + 1$ & $ 57 = 4(14) + 1$ & $ 4$ \\ \multicolumn{3}{c}{Note: Multiples of 3 cannot result from an odd cascade.}\\ \hline \end{tabular} \label{table:PMCSValues} \end{table} \section{Seeds} The only way for the Collatz algorithm to reach 1 is through a power of 2, which goes to 1 via repeated 2 cycles. We call the number that leads to a power of 2 a \emph{seed}. The first few seeds and the power of 2 they lead to are shown below. \begin{table}[!htbp] \caption{Seeds (Small Values)} \centering \begin{tabular}{p{15mm}ll} \hline\hline \rule{0pt}{4mm} &$1 \Rightarrow 4 = 2^{2}$ &\\ &$5 \Rightarrow 16 = 2^{4}$ &\\ &$21 \Rightarrow 64 = 2^{6}$ &\\ &$85 \Rightarrow 256 = 2^{8}$ &\\ &$341 \Rightarrow 1024 = 2^{10}$ &\\ \hline &&\\ \multicolumn{3}{l}{All seeds are 4-form numbers, and successive seeds are determined by using} \\ \multicolumn{3}{l}{the previous seed as the index of a 4-form number.} \\ &$5 = 4(1) + 1$ &\\ &$21 = 4(5) + 1$ &\\ &$85 = 4(21) + 1$ &\\ &$341 = 4(85) + 1$ &\\ &$K_{i+1} = 4K_{i} + 1 $ & $ K \in N, K_{1} = 1$\\ \hline \end{tabular} \label{table:Seeds} \end{table} Cadogan identified seeds as \emph{Set A} in (2.6) of [2]. \section{Columns} Another method of analyzing Collatz sequences is based on assigning the natural numbers into a table of 12 columns, with each column being the value of the number Mod 12 (column = C mod 12). We identify mod 12 results of zero as column 12 for ease of analysis and discussion. The characteristics of a certain column of the table apply to all numbers in that column. Applying the column concept to the standard number forms: \newline\indent 2-form numbers (evens) fall in columns 2, 4, 6, 8, 10, and 12 \newline\indent4-form numbers (4n+1) fall in columns 1, 5, and 9 \newline\indent8- and higher form numbers (non-standard form 4n+3) fall in columns 3, 7, and 11. We can see that the number forms are mutually exclusive and collectively exhaustive with respect to the 12 columns. All natural numbers can be defined in \emph{column form} as $12r + column$, where r is the row of the table, starting with 0. For example, 27 is column form $12*2 + 3$. Applying the appropriate Collatz step to each column form: \begin{table}[!htbp] \caption{Column Analysis of Collatz Steps} \centering \begin{tabular}{ccccc} \hline\hline \rule{0pt}{4mm} &Std or & Column-form & Mixed Column Form & Resulting \\ Column &Non-Std Form & Before Step & After Step & Column\\ \hline \rule{0pt}{4mm} 1 & 4n+1 & 12r+1 & 36r+4 & 4\\ 2 & 2n & 12r+2 & 6r+1 & 1 or 7\\ 3 & 4n+3 & 12r+3 & 36r+10 & 10\\ 4 & 2n & 12r+4 & 6r+2 & 2 or 8\\ 5 & 4n+1 & 12r+5 & 36r+16 & 4\\ 6 & 2n & 12r+6 & 6r+3 & 3 or 9\\ 7 & 4n+3 & 12r+7 & 36r+22 & 10\\ 8 & 2n & 12r+8 & 6r+4 & 4 or 10\\ 9 & 4n+1 & 12r+9 & 36r+28 & 4\\ 10 & 2n & 12r+10 & 6r+5 & 5 or 11\\ 11 & 4n+3 & 12r+11 & 36r+34 & 10\\ 12 & 2n & 12r+12 & 6r+6 & 6 or 12\\ \hline \end{tabular} \label{table:ColumnAnalysis} \end{table} Note that the results of Collatz steps on all 4n+1 numbers end up in column 4 and the Collatz steps on all 4n+3 numbers end up in column 10. A map of the column analysis will help us understand the process. Numbers in the circles represent columns and arrows represent steps. \begin{figure} \caption{Column Steps Map} \label{figure:ColumnMap} \end{figure} It’s clear from the diagram that multiples of 3 can only be initial values of Collatz sequences. Applying the column concept to a 64-cascade: \begin{table}[!htbp] \caption{Column Analysis of a General 64-Cascade} \centering \begin{tabular}{ccccc} \hline\hline \rule{0pt}{4mm} &Composite & &&\\ Step&Form&Standard Form&Column Form&Column\\ \hline \rule{0pt}{4mm} 0 & $64n+31$ & $64n+31$ & Indeterminate & 3, 7, or 11 \\ 1 & $192n+94$ & $2(96n+47)$ &$ 12(16n+7)+10$ &$ 10$ \\ 2 & $96n+47$ & $32(3n+1)+15$ &$ 12(8n+3)+11$ &$ 11$ \\ 3 & $288n+142$ & $2(144n+71)$ &$ 12(24n+11)+10$ &$ 10$ \\ 4 & $144n+71$ & $16(9n+4)+7$ &$ 12(12n+5)+11$ &$ 11$ \\ 5 & $432n+214$ & $2(216n+107)$ &$ 12(36n+17)+10$ &$ 10$ \\ 6 & $216n+107$ & $8(27n+13)+3$ &$ 12(18n+8)+11$ &$ 11$ \\ 7 & $648n+322$ & $2(324n+161)$ &$ 12(54n+26)+10$ &$ 10$ \\ 8 & $324n+161$ & $4(81n+40)+1$ &$ 12(27n+13)+5$ &$ 5$ \\ 9 & $972n+484$ & $2(486n+242)$ &$ 12(81n+40)+4$ &$ 4$ \\ 10 & $486n+242$ & $2(243n+121)$ & indeterminate & 2 or 8 \\ 11 & $243n+121$ & indeterminate & indeterminate & 1, 7, 4, or 10 \\ \hline \end{tabular} \label{table:64CascadeAnalysis} \end{table} We see that the bulk of the action of the cascade alternates between columns 10 and 11, and that the cascade ends in column 1, 7, 4, or 10 (the four 3t+1 columns). In fact, this is true for all 4n+3 cascades. Applying the column concept to a plummet from a power of 2: \begin{table}[!htbp] \caption{Column Analysis of a Plummet} \centering \begin{tabular}{ccccc} \hline\hline \rule{0pt}{4mm} Step &Value & Standard Form &Column Form &Column\\ \hline \rule{0pt}{4mm} 0 & $85 \text{(a seed)}$ & $4(21)+1$ & $ 12(7)+1$ & $ 1$\\ 1 & $ 256 $ & $2(128)$ & $ 12(21)+4$ & $ 4$\\ 2 & $ 128 $ & $2(64)$ & $ 12(10)+8$ & $ 8$\\ 3 & $ 64 $ & $2(32)$ & $ 12(5)+4$ & $ 4$\\ 4 & $ 32 $ & $2(16)$ & $ 12(2)+8$ & $ 8$\\ 5 & $ 16 $ & $2(8)$ & $ 12(1)+4$ & $ 4$\\ 6 & $ 8 $ & $2(4)$ & $ 12(0)+8$ & $ 8$\\ 7 & $ 4 $ & $2(2)$ & $ 12(0)+4$ & $ 4$\\ 8 & $ 2 $ & $2(1)$ & $ 12(0)+2$ & $ 2$\\ 9 & $ 1 $ & $1 $ & $12(0)+1$ & $ 1$\\ \hline \end{tabular} \label{table:PlummetAnalysis} \end{table} In the case of a plummet, the bulk of the action takes place between columns 4 and 8, and the plummet always leads to a value of 1. The number of steps to reach a value of 1 is called the \emph{total stopping time}. The maximum value in an odd cascade is the result of the odd step applied to the 4-form number near the end of the cascade (for example, after step 8 in Table \ref{table:64CascadeAnalysis}). The maximum value of the cascade is four times the value of the cascade transform and is always a column 4 number. \section{Stopping Times} \subsection{Stopping Times of Standard Forms} Standard 2-form numbers have a stopping time of 1 because they immediately result in a smaller number. \begin{table}[!htbp] \caption{Stopping Times of Standard Forms} \centering \begin{tabular}{c l p{65mm}} \hline\hline \rule{0pt}{4mm} Step & Form & \\ \hline \rule{0pt}{4mm} 0 & $2n$ & Even \\ 1 & $2n/2=n$ & Odd or even, depending on n, \\ & & but smaller value than $2n\text{ for all }n \in N$ \\ && \\ \multicolumn{3}{l}{Standard 4-form numbers have a stopping time of 3.} \\ Step & Form & \\ 0 & $4n+1$ & Odd \\ 1 & $3(4n+1)+1=12n+4$ & Even \\ 2 & $(12n+4)/2=6n+2$ & Even \\ 3 & $(6n+2)/2=3n+1$ & Odd or even, depending on n, \\ & & but smaller value than $4n+1\text{ for all }n \in N$ \\ & & ($n=0$ is the trivial case for the number 1) \\ \hline \end{tabular} \label{table:StdFormsStopTimes} \end{table} \begin{table}[!htbp] \caption*{Table 14: Stopping Times of Standard Forms (continued)} \centering \begin{tabular}{c l p{5cm}} \hline\hline && \\ \multicolumn{3}{p{12cm}}{Standard 8-form numbers are indeterminate after 5 steps (their parity depends on n), and as a group do not have a stopping time.}\\ && \\Step& Form&\\ \hline \rule{0pt}{4mm} 0 & $8n+3$ & Odd, 8-form \\ 1 & $3(8n+3)+1=24n+10$ & Even \\ 2 & $(24n+10)/2=12n+5$ & Odd, 4-form $=4(3n+1)+1$\\ 3 & $3(12n+5)+1=36n+16$ & Even \\ 4 & $(36n+16)/2=18n+8$ & Even \\ 5 & $(18n+8)/2=9n+4$ & Odd or even, depending on n, \\ & & but larger value than \\ & & $8n+3\text{ for all }n \in W$ \\ && \\ \multicolumn{3}{l}{Standard 32-form numbers are indeterminate after 9 steps (their parity}\\ \multicolumn{3}{l}{ depends on n), and as a group do not have a stopping time.}\\ Step & Form & \\ \hline \rule{0pt}{4mm} 0 & 32n + 15 & Odd, 32-form \\ 1 & 3(32n + 15) + 1 = 96n + 46 & Even \\ 2 & (96n + 46) / 2 = 48n + 23 & Odd, 16-form $= 16(3n + 1) + 7$ \\ 3 & 3(48n + 23) + 1 = 144n + 70 & Even \\ 4 & (144n + 70) / 2 = 72n + 35 & Odd, 8-form $= 8(9n + 4) + 3$ \\ 5 & 3(72n + 35) + 1 = 216n + 106 & Even \\ 6 & (216n + 106) / 2 = 108n + 53 & Odd, 4-form $= 4(27n + 13) + 1$ \\ 7 & 3(108n + 53) + 1 = 324n + 160 & Even \\ 8 & (324n + 160) / 2 = 162n + 80 & Even \\ 9 & (162n + 80) / 2 = 81n + 40 & Odd or even, depending on n, \\ & & but larger value than \\ & & $32n + 15 \text{ for all }n \in W$ \\ \hline && \\ \multicolumn{3}{p{12cm}}{We find that 8- and higher standard forms, as a group, are indeterminate and do not have a stopping time because the base becomes odd before the stopping time of the offset is reached.} \\ && \\ \multicolumn{3}{p{12cm}}{All 8- and higher standard forms are indeterminate after $2p-1$ steps (i.e, at the end of the cascade).} \\ \end{tabular} \end{table} \subsection{Stopping Times of Composite Forms} The odd/even parity of the result of each step in the Collatz sequence of a composite form is the same as the parity of the offset, as long as the base of the composite form at that step is even. When the base is odd (a mixed form), the parity of the number is indeterminate (depends on the value of n). Standard form $8n+3$ is indeterminate after 5 steps, having a mixed form of $9n+4$, with a parity that depends on n and a value larger than $8n+3$ for all $n \in W$. The offset of this form, 3, has a stopping time of 6, which is found by applying the Collatz algorithm. If the base were even after step 5, the next step would result in an offset of 2, which is less than the starting offset. If we multiply the initial base (8) by 2, we get the composite form $16n+3$. Analyzing this form we find that it has a stopping time of 6 with a mixed form $9n+2$, which is smaller than $16n+3$ for all $n \in W$. Standard form $32n+15$ is indeterminate after 9 steps, having a mixed form of $81n+40$, with a parity that depends on n and a value larger than $32n+15$ for all $n \in W$. The offset of this form, 15, has a stopping time of 11, which is found by applying the Collatz algorithm. If the base were even through the step just prior to the stopping time of the offset (in this case step 10), the next step would result in an offset of 10, which is less than the starting offset. If we multiply the initial base (32) by 4 (we’ll describe how to determine this factor later), we get the composite form $128n+15$. Analyzing this form (see Table \ref{table:128CascadeAnalysis}) we find that it has a stopping time of 11 with a mixed form $81n+10$, which is smaller than $128n+15 \text{ for all }n \in W$. \begin{table}[!htbp] \caption{Collatz Sequence for Composite Form $128n+15$} \centering \begin{tabular}{c l p{5cm}} \hline\hline \rule{0pt}{4mm} Step& Form \\ 0 & $128n + 15$ & Odd, 32-form $= 32(4n) + 15$ \\ 1 & $3(128n + 15) + 1 = 384n + 46$ & Even $(\#1)$ \\ 2 & $(384n + 46) / 2 = 192n + 23$ & Odd, 16-form $= 16(12n + 1) + 7$ \\ 3 & $3(192n + 23) + 1 = 576n + 70$ & Even $(\#2)$ \\ 4 & $(576n + 70) / 2 = 288n + 35$ & Odd, 8-form $= 8(36n + 4) + 3$ \\ 5 & $3(288n + 35) + 1 = 864n + 106$ & Even $(\#3)$ \\ 6 & $(864n + 106) / 2 = 432n + 53$ & Odd, 4-form $= 4(108n + 13) + 1$ \\ 7 & $3(432n + 53) + 1 = 1296n + 160$ & Even $(\#4)$ \\ 8 & $(1296n + 160) / 2 = 648n + 80$ & Even $(\#5)$ \\ 9 & $(648n + 80) / 2 = 324n + 40$ & Even $(\#6)$\\ 10 & $(324n + 40) / 2 = 162n + 20$ & Even $(\#7)$ \\ 11 & $(162n + 20) / 2 = 81n + 10$ & Odd or even, depending on n, \\ & & but smaller value than \\ & & $128n + 15 \text{ for all } n \in W$ \\ \hline \end{tabular} \label{table:128CascadeAnalysis} \end{table} \textbf{In order for a number in a composite form sequence to be smaller than the starting number, the power of 2 in the base of the starting number must be at least equal to the number of even steps in the Collatz sequence of the offset through its stopping time.} Looking at the sequence for composite form $128n+15$ (Table \ref{table:128CascadeAnalysis}), the number of even steps until the resulting offset is less than the starting offset is 7. This means that the smallest base for a composite form with an offset of 15 that ensures a finite stopping time is $2^{7} = 128$. All composite forms with an offset of 15 and a power of 2 greater than 128 in the base also have a stopping time of 11. This is true for all sequences. As long as the power of 2 in the base is equal to or larger than the number of even steps in the Collatz sequence of the offset through its stopping time, the sequence has a finite stopping time. \textbf{This means that every number represented by a composite form that has a stopping time also has that stopping time.} This was also identified by Garner in [3]. \subsection{Calculating the Minimum Base That Ensures a Stopping Time for a Particular Offset} There is a direct relationship between stopping time and number of even steps to the stopping time in a Collatz sequence. We can, in fact, calculate the minimum power of 2 in the base that is required to ensure that a composite form will have a finite stopping time, based on the stopping time of the offset. In order to reach a number smaller than the starting number, the ratio of the effect of odd steps (multiply by 3) to even steps (divide by 2) must be between 0.5 and 1. (We can disregard the $+ 1$ term in the odd steps for now – it doesn’t seem to affect these calculations.) This means that \begin{table}[!htbp] \caption{Calculating Minimum Base to Ensure Stopping Time} \centering \begin{tabular}{l p{5cm}r} \hline\hline \rule{0pt}{4mm} $0.5 < 3^{S-E}/2^{E} < 1$ & where S is the stopping time of the offset, E is the number of even steps to the stopping time, and $S-E$ is the number of odd steps to the stopping time.&(5)\\ $0.5 < 3^{S}/(3^{E}*2^{E}) < 1$ & rearrange $3^{-E}$ &\\ $0.5 < 3^{S}/6^{E} < 1$ & combine $3^{E} *2^{E}$&\\ $2 > 6^{E}/3^{S} > 1$ & take reciprocals&\\ $1 < 6^{E}/3^{S} < 2$ & reorder&\\ $3^{S} < 6^{E} < 2*3^{S}$ & multiply by $3^{S}$&\\ $S*ln(3) < E*ln(6) < ln(2) + S*ln(3)$ & take natural logs&\\ & &\\ \multicolumn{2}{p{10cm}}{$S*ln(3)/ln(6) < E < S*ln(3)/ln(6)+ln(2)/ln(6)$}&(6)\\ & divide by $ln(6) \approx 1.792$ &\\ \multicolumn{2}{p{10cm}}{or approximately $0.613*S < E < 0.613*S+0.387$} &\\ \end{tabular} \label{table:CalcMinimumBase} \end{table} Since E must be a natural number there is at most one value of E that will satisfy inequality (6) for any value of S; the integer portion of $0.613*S$ must be smaller than the integer portion of $0.613*S + 0.387$. We can calculate E for a given value of S using the following equation: \begin{table}[!htbp] \centering \begin{tabular}{p{10cm}r} $E=[int(S*A+B)-int(S*A)]*int(S*A+B)$ &(7)\\ where $A=ln(3)/ln(6)$ and $B=ln(2)/ln(6)$&\\ \end{tabular} \end{table} For many values of S there is no value of E that satisfies inequality (6). This should mean that there is no sequence that has that value of S as its stopping time, and the authors have found this to be true for all numbers tested (for example, when S = 4, 5, or 7). Table \ref{table:EvensToStopTime} shows number of even steps to the stopping time and power of 2 in the base to ensure composite forms have stopping times for some small values of S. Numbers omitted from the stopping time column do not exist as the stopping time of any Collatz sequence. (Identified by Terras with different nomenclature, Table B, p. 248 [6].) \begin{table}[!htbp] \caption{Even Steps Required to Stopping Time} \centering \begin{tabular}{ccc} \hline\hline \rule{0pt}{4mm} & &Minimum\\ Stopping Time & Even Steps Required & Value of Base \\ S & E & $2^{E}$ \\ \hline \rule{0pt}{4mm} 1 & 1 & 2 \\ 3 & 2 & 4 \\ 6 & 4 & 16 \\ 8 & 5 & 32 \\ 11 & 7 & 128 \\ 13 & 8 & 256 \\ 16 & 10 & 1024 \\ 19 & 12 & 4096 \\ 21 & 13 & 18192 \\ 24 & 15 & 32768 \\ 26 & 16 & 65536 \\ … & … & … \\ \end{tabular} \label{table:EvensToStopTime} \end{table} For example, this means that all sequences that have a stopping time of 8 are part of a group of numbers that have a base of 32. It turns out that only two composite forms have a stopping time of 8: $32n+11$ and $32n+23$. This also allows us to determine the group in which a given offset will have a finite stopping time. For example, the Collatz sequence for 27 has a stopping time of 96. When we calculate E for $S=96$ using equation (7), we get a value of 59, which is the number of even steps in the sequence for 27 until the stopping time. Thus, all numbers of the composite form $2^{59}n+27$ have a stopping time of 96. \subsection{Principal Composite Forms} As a group, no standard form higher than 4 has a stopping time. As we analyze larger and larger numbers, we find that most numbers can be described by composite forms having finite stopping times, and offsets that are smaller than the number being analyzed. For example, 41 is part of group $4n+1$ with a stopping time of 3, and 43 is part of group $32n+11$ with a stopping time of 8 [3]. (Kannan and Moorthy [4] identified similar results, with some of the results in other than lowest form.) However, some numbers are not part of a group with a stopping time, and an offset smaller than the number, such as 47, which is part of group $2^{54}n+47$; or 27, which is part of group $2^{59}n+27$. We call a composite form that has a stopping time, and an offset equal to the number being analyzed a \emph{principal form}, because no numbers smaller than the offset are described by that form. Table 12 shows principal forms required to describe all numbers up to 100. \begin{table}[!htbp] \caption{Principal Forms} \centering \begin{tabular}{c} required to describe \\ all numbers up to 100\\ \hline \rule{0pt}{4mm} $2 * n$ \\ $4 * n + 1$ \\ $16 * n + 3$ \\ $128 * n + 7$ \\ $32 * n + 11$ \\ $128 * n + 15$ \\ $32 * n + 23$ \\ $2^{59} * n + 27$ \\ $2^{56} * n + 31$ \\ $256 * n + 39$ \\ $2^{54} * n + 47$ \\ $128 * n + 59$ \\ $2^{54} * n + 63$ \\ $2^{51} * n + 71$ \\ $256 * n + 79$ \\ $2^{45} * n + 91$ \\ $256 * n + 95$ \\ \end{tabular} \label{table:PrincipalForms} \end{table} An additional 10 principal forms are required to describe all numbers between 101 and 200, and 9 more are required for all numbers between 201 and 300. Garner identified a number of these Principal Composite Forms in [3]. If there exists a number M such that no additional principal forms are required to ensure a stopping time for all numbers greater than M, the Collatz Conjecture can be proven true. We find, however, that the number of principal forms per 10,000 numbers analyzed behaves roughly as shown in Figure 2. \begin{figure} \caption{Average Principal Forms per 10,000 Numbers} \label{figure:FormsPer10K} \end{figure} The line in Figure \ref{figure:FormsPer10K} appears to reach a constant value, but there is actually some variation in the average values of principal forms per 10,000 numbers tested. Table 19 shows statistics for some interesting points in Figure \ref{figure:FormsPer10K}. \begin{table}[!htbp] \caption{Principal Forms per 10,000 Numbers} \centering \begin{tabular}{ccccc} \hline\hline \rule{0pt}{4mm} Million* &Average &Std. Dev. & Max & Min\\ \hline \rule{0pt}{4mm} 1 & 303.4 & 43.6 & 589 & 268 \\ 2 & 261.7 & 9.2 & 297 & 245 \\ 3 & 225.9 & 14.5 & 270 & 200 \\ 4 & 222.4 & 7.4 & 239 & 197 \\ 5 & 222.2 & 8.0 & 246 & 196 \\ 6 & 222.2 & 7.8 & 237 & 199 \\ 7 & 222.4 & 7.3 & 240 & 201 \\ 8 & 222.2 & 8.1 & 242 & 201 \\ 9 & 209.2 & 13.1 & 236 & 183 \\ 10 & 201.4 & 8.0 & 216 & 182 \\ … & & & & \\ 16 & 201.3 & 7.0 & 216 & 186 \\ 17 & 194.8 & 15.3 & 216 & 153 \\ 18 & 170.6 & 8.6 & 195 & 148 \\ 19 & 170.5 & 7.4 & 186 & 148 \\ … & & & & \\ 66 & 170.4 & 6.8 & 186 & 152 \\ 67 & 170.7 & 7.0 & 187 & 151 \\ 68 & 157.0 & 9.8 & 190 & 135 \\ 69 & 153.6 & 7.6 & 170 & 135 \\ 70 & 154.4 & 9.2 & 177 & 133 \\ … & & & & \\ 133 & 154.6 & 7.9 & 178 & 135 \\ 134 & 154.1 & 7.6 & 174 & 135 \\ 135 & 137.3 & 12.8 & 172 & 113 \\ 136 & 129.3 & 8.3 & 151 & 112 \\ … & & & & \\ 250 & 131.5 & 7.9 & 149 & 115 \\ … & & & & \\ 500 & 131.4 & 8.2 & 150 & 110 \\ \multicolumn{5}{l}{* the one million numbers ending at}\\ \multicolumn{5}{l}{this number times 1,000,000}\\ \end{tabular} \label{table:FormsPer10K} \end{table} This appears to be a downward trend which may continue, indicating that the number M may exist, or may reach a limit of some number of principal forms per 10,000 numbers which would indicate that this approach cannot be used to confirm the Collatz Conjecture. \indent Continuing analysis of Collatz sequences of selected groups of very large numbers shows that principal composite forms decrease to 0 or 1 per 10,000 for those groups of numbers. The authors have found an astonishing string of 50,000 consecutive odd numbers with the same total stopping time (number of steps to reach 1). The numbers are in the range $10^{142} - 10^{6} + 1$ through 99,999. Analysis of principal composite forms for very large numbers continues. \subsection{Generating and Analyzing Composite Forms} We can generate composite forms by replacing the index of a standard form with expressions for $\#$-forms. For example, if we replace the index n in the standard 8-form with the 2-form expression ($2n$) we get a new expression for an 8-form with a 2-form base: $8(2n) + 3 = 16n + 3$. We identify this composite form as $8.2$. Another example: form $16.4.8$ is $16(4(8n + 3) + 1) + 7 = 512n + 215$. As stated earlier in this paper, the form of the number that results from a cascade determines whether the sequence will go up or down and by how much. If we could determine the forms of cascade results we might gain some insight into the overall behavior of Collatz sequences. The 4-cascade transforms the starting value of $4n+1$ into a cascade result of $3n+1$. The values of $3n+1$ are a \emph{mix} of forms that depend on the value of n. The 4.2-cascade transform is $8n+1$ to $6n+1$, which is also a mix of different forms, depending on the value of n. However, the 4.4 and higher 4.x cascade transforms are all 2-form numbers. Form 4.4 can be described as $4(4n+1)+1$, and a cascade transforms it to $3(4n+1)+1$, which is $12n+4$, which is always even (2-form). Form 4.8 and higher can be described as $4(4n+3)+1$, and a cascade transforms it to $3(4n+3)+1$, which is $12n+10$, which is always even (2-form). Tables \ref{table:Level1} through \ref{table:Level3} show forms of cascade results for a number of composite forms. Note that each row of the tables is one of two general types: \begin{adjustwidth}{1cm}{0cm} \hspace{5 mm}For Type 1, the value shown in the .2 column is \emph{Mix}, followed by a particularform for .4 and higher columns. For Type 2, the value in the .2 column is a particular form, followed by increasing forms until a \emph{Mix} results, followed by the next higher form after the one just preceding the \emph{Mix} for all higher columns. \end{adjustwidth} In each row of each table, the smallest form of numbers in the mix column is 1 more than the highest form in the other columns of the row. The forms for the \emph{Mix} values are shown in the rightmost column of each row. Tables \ref{table:Level2} and \ref{table:Level3} show results for the \emph{Mix} values from the previous table. While these results are not conclusive, they indicate yet another interesting pattern in Collatz sequences. \begin{table}[!htbp] \caption{Standard Forms of Cascade Results - Level 1} \centering \begin{tabular}{ccccccccc} \multicolumn{9}{c}{(Standard forms of numbers resulting from applicable cascades)}\\ \hline\hline \rule{0pt}{4mm} &\multicolumn{8}{c}{<========== Level 2 ==========>}\\ Level 1& .2& .4& .8& .16& .32& .64& .128& Mix\\ \hline \rule{0pt}{4mm} 4 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 8 &2 &4 &Mix &8 &8 &8 &8 &16+ \\ 16 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 32 &2 &4 &8 &Mix &16 &16 &16 &32+ \\ 64 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 128 &2 &4 &Mix &8 &8 &8 &8 &16+ \\ 256 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 512 &2 &4 &8 &16 &Mix &32 &32 &64+ \\ 1024 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 2048 &2 &4 &Mix &8 &8 &8 &8 &16+ \\ 4096 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 8192 &2 &4 &8 &Mix &16 &16 &16 &32+ \\ 16384 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 32768 &2 &4 &Mix &8 &8 &8 &8 &16+ \\ 65536 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 131072 &2 &4 &8 &16 &32 &Mix &64 &128+ \\ 262144 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 524288 &2 &4 &Mix &8 &8 &8 &8 &16+ \\ 1048576 &Mix &2 &2 &2 &2 &2 &2 &4+ \\ 2097152 &2 &4 &8 &Mix &16 &16 &16 &32+ \\ \end{tabular} \label{table:Level1} \end{table} \begin{table}[!htbp] \caption{Standard Forms of Cascade Results - Level 2} \centering \begin{tabular}{ccccccccc} \multicolumn{9}{c}{(Standard forms of numbers resulting from applicable cascades)}\\ \multicolumn{9}{c}{Entries shown only for \emph{Mix} Values in Table \ref{table:Level1}}\\ \hline\hline \rule{0pt}{4mm} &\multicolumn{8}{c}{<========== Level 3 ==========>}\\ Level 2 & .2 & .4 & .8 & .16 & .32 & .64 & .128& Mix\\ \hline \rule{0pt}{4mm} 4.2 &4 &Mix &8 &8 &8 &8 &8 &16+ \\ 8.8 &Mix &16 &16 &16 &16 &16 &16 &32+ \\ 16.2 &4 &8 &Mix &16 &16 &16 &16 &32+ \\ 32.16 &Mix &32 &32 &32 &32 &32 &32 &64+ \\ 64.2 &4 &Mix &8 &8 &8 &8 &8 &16+ \\ 128.8 &16 &Mix &32 &32 &32 &32 &32 &64+ \\ 256.2 &4 &8 &16 &Mix &32 &32 &32 &64+ \\ 512.32 &Mix &64 &64 &64 &64 &64 &64 &128+ \\ 1024.2 &4 &Mix &8 &8 &8 &8 &8 &16+ \\ 2048.8 &Mix &16 &16 &16 &16 &16 &16 &32+ \\ 4096.2 &4 &8 &Mix &16 &16 &16 &16 &32+ \\ 8192.16 &32 &64 &128 &Mix &256 &256 &256 &512+ \\ 16384.2 &4 &Mix &8 &8 &8 &8 &8 &16+ \\ 32768.8 &16 &32 &Mix &64 &64 &64 &64 &128+ \\ 65536.2 &4 &8 &16 &32 &Mix &64 &64 &128+ \\ 131072.64 &Mix &128 &128 &128 &128 &128 &128 &256+ \\ 262144.2 &4 &Mix &8 &8 &8 &8 &8 &16+ \\ 524288.8 &Mix &16 &16 &16 &16 &16 &16 &32+ \\ 1048576.2 &4 &8 &Mix &16 &16 &16 &16 &32+ \\ 2097152.16 &Mix &32 &32 &32 &32 &32 &32 &64+ \\ \end{tabular} \label{table:Level2} \end{table} \begin{table}[!htbp] \caption{Standard Forms of Cascade Results - Level 3} \centering \begin{tabular}{ccccccccc} \multicolumn{9}{c}{(Standard forms of numbers resulting from applicable cascades)}\\ \multicolumn{9}{c}{Entries shown only for \emph{Mix} Values in Table \ref{table:Level2}}\\ \hline\hline \rule{0pt}{4mm} &\multicolumn{8}{c}{<========== Level 4 ==========>}\\ Level 3 &.2 &.4 &.8 &.16 &.32 &.64 &.128 &Mix\\ \hline \rule{0pt}{4mm} 4.2.4 &16 &Mix &32 &32 &32 &32 &32 &64+ \\ 8.8.2 &Mix &32 &32 &32 &32 &32 &32 &64+ \\ 16.2.8 &32 &64 &128 &256 &Mix &512 &512 &1024+ \\ 32.16.2 &64 &Mix &128 &128 &128 &128 &128 &256+ \\ 64.2.4 &Mix &16 &16 &16 &16 &16 &16 &32+ \\ 128.8.4 &Mix &64 &64 &64 &64 &64 &64 &128+ \\ 256.2.16 &64 &128 &Mix &256 &256 &256 &256 &512+ \\ 512.32.2 &128 &256 &Mix &512 &512 &512 &512 &1024+ \\ 1024.2.4 &16 &32 &Mix &64 &64 &64 &64 &128+ \\ 2048.8.2 &32 &64 &Mix &128 &128 &128 &128 &256+ \\ 4096.2.8 &Mix &32 &32 &32 &32 &32 &32 &64+ \\ 8192.16.16 &512 &Mix &1024 &1024 &1024 &1024 &1024 &2048+ \\ 16384.2.4 &Mix &16 &16 &16 &16 &16 &16 &32+ \\ 32768.8.8 &Mix &128 &128 &128 &128 &128 &128 &256+ \\ 65536.2.32 &128 &256 &Mix &512 &512 &512 &512 &1024+ \\ 131072.64.2 &256 &512 &1024 &Mix &2048 &2048 &2048 &4096+ \\ 262144.2.4 &16 &Mix &32 &32 &32 &32 &32 &64+ \\ 524288.8.2 &Mix &32 &32 &32 &32 &32 &32 &64+ \\ 1048576.2.8 &32 &Mix &64 &64 &64 &64 &64 &128+ \\ 2097152.16.2 &Mix &64 &64 &64 &64 &64 &64 &128+ \\ &&&&&&&&\\ \multicolumn{9}{c}{ It appears that this pattern continues at higher levels of detail.}\\ \end{tabular} \label{table:Level3} \end{table} \section{Conclusions} Forms help us analyze large groups of numbers having the same characteristics, and cascades help us see the rigid structure within Collatz sequences, providing important insights into what appear to be random ups and downs. Cascades also help us understand the next steps after a cascade – namely the start of another cascade. Understanding results of cascades is key to understanding the Collatz Conjecture. Unfortunately, any cascade can result in a number of any form. Column analysis exposes hidden patterns within cascades. Successive reverse cascades of non-multiples of 3 all seem to lead to an odd multiple of 3, providing another interesting approach to possibly confirming the Collatz Conjecture. The relationship between stopping time and number of even steps to stopping time allows us to group many numbers having the same principal composite form – all of which have the same stopping time. Analysis of principal composite forms for very large numbers continues. Total stopping time is not related to the form of a number or its stopping time. The authors hope that the patterns we have discovered in Collatz sequences may lead to a proof of the Collatz Conjecture. [1] Barina, David (2020). "Convergence verification of the Collatz problem." The Journal of Supercomputing. doi:10.1007/s11227-020-03368-x. S2CID 220294340.\vspace*{3mm}\newline [2] Cadogan, Charles C (1991). "Some Observations on the 3x + 1 Problem.” Proc. Sixth Caribbean Conference on Combinatorics $\&$ Computing, University of the West Indies: St. Augustine Trinidad (C. C. Cadogan, Ed.) Jan 1991, 84-91.\vspace*{3mm}\newline [3] Garner, Lynn E. (1981). “On the Collatz 3n+1 Algorithm.” Proceedings of the American Mathematical Society, Volume 82, Number 1, May 1981, 19-22.\vspace*{3mm}\newline [4] Kannan, T. and Moorthy, C. Ganesa (2016). “Two Methods to Approach Collatz Conjecture.” International Journal of Mathematics And its Applications, Volume 4, Issue 1-B (2016), 161-167.\vspace*{3mm}\newline [5] Mehendale, Dhananjay P. (2005). “Some Observations on the 3x+1 Problem.” arXiv:math/0504355 [math.GM] 18 Apr 2005.\vspace*{3mm}\newline [6] Terras, Riho (1976). “A stopping time problem on the positive integers.” Acta Arithmetica 30 (1976), 241-252.\vspace*{3mm}\newline [7] Trümper, Manfred (2006). “Handles, Hooks, and Scenarios: A fresh Look at the Collatz Conjecture.” arXiv:Math/0612228v1 [math.GM] 9 Dec 2006. \section{Appendices} \subsection*{Appendix I - Proof of Lemma 1} \textbf{Lemma 1:} All natural numbers can be expressed in standard form by the equation: \begin{table}[!htbp] \centering \begin{tabular}{l l r} $C=2^{p} n+2^{p-1}-1;$ & $p \in N, n \in W (N \cup \{0\})$ & (1)\\ \end{tabular} \end{table} This equation was developed independently by the authors, and was previously identified by Mehendale as Obs. 2 in [5] and by Cadogan as (3.2) of [2], and also describes even numbers when p=1. \textbf{Proof of Lemma 1:} \begin{table}[!htbp] \begin{tabular}{lllr} \multicolumn{4}{l}{All natural numbers are either even or odd.}\\ \multicolumn{4}{l}{Case 1: Even natural numbers $C = 2t$; $t \in N$,}\\ & $C = 2t$ & Standard 2-form, & (8) \\ & $C = 2t + 1 - 1$ & & \\ & $C = 2^{p}n + 2^{p-1} - 1$ & with $p = 1$ and $n = t$ & \\ \multicolumn{4}{l}{All even natural numbers are standard form $2^{p}n + 2^{p-1} - 1$}\\ \multicolumn{4}{l}{with $p = 1$ and $n \in N$.}\\ &&&\\ \multicolumn{4}{l}{Case 2: Odd natural numbers $C = 2t+1$; $t \in W$,}\\ & $C = 2t+1$ & Non-standard 2-form, & (9) \\ & $C = 2t + 2 - 1$ & & \\ & $C = 2^{p}n + 2^{p} - 1$ & with $p = 1$ and $n = t$ & \\ \multicolumn{4}{l}{All natural numbers are either standard form ($2n$, even) }\\ \multicolumn{4}{l}{or non-standard 2-form ($2n+1$, odd).}\\ &&&\\ \multicolumn{4}{l}{Continuing, $t$ in equation (9) must be even or odd} \\ \multicolumn{2}{l}{If $t$ is even, let $t=2m$} &$m \in W$ & \\ From (9)& $C = 2(2m)+1$ & & \\ & $C = 4m+1$ & Standard 4-form & (10) \\ & $C = 4m+2-1$ & & \\ & $C = 2^{p}n + 2^{p-1} - 1$ & with $p = 2$ and $n = m$ & \\ &&&\\ \multicolumn{2}{l}{If $t$ is odd, let $t=2m+1$} &$m \in W$ & \\ From (9)& $C = 2(2m+1)+1$ & & \\ & $C = 4m+2+1$ & & \\ & $C = 4m+3$ & Non-standard 4-form & (11) \\ & $C = 4m+4-1$ & & \\ & $C = 2^{p}n + 2^{p} - 1$ & with $p = 2$ and $n = m$ & \\ \multicolumn{4}{l}{All natural numbers of non-standard 2-form ($2n+1$, odd) are either}\\ \multicolumn{4}{l}{standard 4-form ($4n+1$) or non-standard 4-form ($4n+3$).}\\ &&&\\ \end{tabular} \end{table} \begin{table}[!htbp] \centering \begin{tabular}{lllr} \multicolumn{4}{l}{Continuing, $m$ in equation (11) must be even or odd} \\ \multicolumn{2}{l}{If $m$ is even, let $m=2q$} &$q \in W$ & \\ From (11)& $C = 4(2q)+3$ & & \\ & $C = 8q+3$ & Standard 8-form & (12) \\ & $C = 8q+4-1$ & & \\ & $C = 2^{p}n + 2^{p-1} - 1$ & with $p = 3$ and $n = q$ & \\ &&&\\ \multicolumn{2}{l}{If $m$ is odd, let $m=2q+1$}&$q \in W$ & \\ From (11)& $C = 4(2q+1)+3$ & & \\ & $C = 8q+4+3$ & & \\ & $C = 8q+7$ & Non-standard 8-form & (13) \\ & $C = 8q+8-1$ & & \\ & $C = 2^{p}n + 2^{p} - 1$ & with $p = 3$ and $n = q$ & \\ \multicolumn{4}{l}{All natural numbers of non-standard 4-form ($4n+3$) are either}\\ \multicolumn{4}{l}{standard 8-form ($8n+3$) or non-standard 8-form ($8n+7$).}\\ &&&\\ \multicolumn{4}{l}{Continuing, $q$ in equation (13) must be even or odd} \\ \multicolumn{2}{l}{If $q$ is even, let $q=2r$} &$r \in W$ & \\ From (13)& $C = 8(2r)+7$ & & \\ & $C = 16r+7$ & Standard 16-form & (14) \\ & $C = 16r+8-1$ & & \\ & $C = 2^{p}n + 2^{p-1} - 1$ & with $p = 4$ and $n = r$ & \\ &&&\\ \multicolumn{2}{l}{If $q$ is odd, let $q=2r+1$}&$r \in W$ & \\ From (13)& $C = 8(2r+1)+7$ & & \\ & $C = 16r+8+7$ & & \\ & $C = 16r+15$ & Non-standard 16-form & (15) \\ & $C = 16r+16-1$ & & \\ & $C = 2^{p}n + 2^{p} - 1$ & with $p = 4$ and $n = r$ & \\ \multicolumn{4}{l}{All natural numbers of non-standard 8-form ($8n+7$) are either}\\ \multicolumn{4}{l}{standard 16-form ($16n+7$)or non-standard 16-form ($16n+15$).}\\ &&&\\ \end{tabular} \end{table} In general, any number of a non-standard form $2^{p}n + 2^{p} - 1$ is either the next-higher standard form $2^{p+1}n + 2^{p} - 1$ or the next higher non-standard form $2^{p+1}n + 2^{p+1} - 1$. This process continues indefinitely (p increases with each step) and will generate all odd numbers in the form $2^{p}n + 2^{p-1} - 1$ (standard form). Standard 2-form and non-standard 2-form include all natural numbers (evens and odds, respectively). Similarly, standard 4-form and non-standard 4-form include all odd numbers (developed from equation 9). Non-standard 4-form includes all odd numbers that are not standard 4-form (numbers of a higher form), and non-standard 8-form includes all odd numbers that are not standard 4-form or standard 8-form. This continues to infinity, as illustrated below. \newline\noindent |-------------- even form ------------------|--------------------- odd forms ------------------|\newline |------------------- 2n ----------------------|------------------------ 2n+1 ---------------------|\newline .\hspace{54mm}|----------- 4n+1 ----------|-------- 4n+3 --------|\newline .\hspace{90mm}|-- 8n+3 -|-- 8n+7-| \newline …and so forth, to infinity. All odd numbers can be described in the form $2^{p}n + 2^{p-1} - 1$, with $p \geq 2$ and $n \in W$. \textbf{Thus, Lemma 1 is proved: All natural numbers can be described in the form $2^{p}n + 2^{p-1} - 1$, with $p \in N$, $p \geq 1$ and $n \in W$.} \subsection*{Appendix II - Determining the Standard Form of a Number} The standard form of a particular number can be calculated as follows: \begin{table}[!htbp] \centering \begin{tabular}{lllr} \multicolumn{4}{l}{Starting with equation (1), repeated here for reference:} \\ &\multicolumn{2}{l}{$C=2^{p}n + 2^{p-1} - 1$} & (1) \\ & $C+1 = 2^{p}n + 2^{p-1}$ & Add 1 & \\ & $C+1 = 2^{p-1}(2n+1)$ & Factor out $2^{p-1}$ & (16) \\ & $2^{p-1}=gcd(C+1,2^{30})$ & gcd is greatest common divisor; & \\ & & $2^{30}$ is simply a & \\ & & sufficiently large power of 2 & \\ & $p-1 = log_{2}(gcd(C+1,2^{30}))$ & Take $log_{2}$ of each side & \\ & $p = log_{2}(gcd(C+1,2^{30}))+1$ & Add 1 &(17) \\ From (16)&$2n+1=(C+1)/2^{p-1}$ & also identified by Cadogan [2] & \\ & $2n=(C+1)/2^{p-1}-1$ & Subtract 1 & \\ & $2n=(C+1-2^{p-1})/2^{p-1}$ & Rearrange & \\ & $n=(C+1-2^{p-1})/2^{p} $ & Divide by 2 & (18) \\ \end{tabular} \end{table} \subsection*{Appendix III - Proof of Lemma 2} \textbf{Lemma 2:} Every natural number can be described in standard form by a unique combination of p and n. Conversely, the combination of p and n for each natural number is unique. \textbf{Proof of Lemma 2:}\newline Assume that there is another pair of numbers, say q and m, that generates the same natural number as p and n, respectively. \begin{table}[!htbp] \centering \begin{tabular}{lllr} Then, & $2^{p}n + 2^{p-1} - 1 = 2^{q}m + 2^{q-1} - 1$ &Assume $q \neq p$ and $m \neq n$ &\\ & $2^{p}n + 2^{p-1} = 2^{q}m + 2^{q-1}$ & &\\ & $2^{p-1}(2n+1) = 2^{q-1}(2m+1)$ & Factor out $2^{p-1}$ and $2^{q-1}$ &\\ & $2^{p-q} = (2m+1)/(2n+1)$ & & (19)\\ But, &\multicolumn{3}{l}{$(2m+1)/(2n+1)$ must be odd, since both $(2m+1)$ and}\\ &\multicolumn{3}{l}{$(2n+1)$ are odd}\\ Therefore, &$2^{p-q}$ must also be odd & & \\ &\multicolumn{3}{l}{The only power of 2 that is odd is $2^{0}=1$, so $p-q$ must equal 0,}\\ &\multicolumn{3}{l}{and q must equal p}\\ Thus, &$2^{0}=1=(2m+1)/(2n+1)$ & & \\ from (19) & & &\\ &$2n+1=2m+1$ & &\\ And &$n=m$ & &\\ &&&\\ \multicolumn{4}{l}{Since q must equal p and m must equal n, the original assumption is false.}\\ \end{tabular} \end{table} \textbf{Thus, Lemma 2 is proved: Each unique combination of p and n generates a unique natural number, and vice versa.} \subsection*{Appendix IV - Example of an Odd Cycle} \begin{table}[!htbp] \centering \begin{tabular}{llll} & \multicolumn{3}{l}{Applying an odd cycle to 87:} \\ $C_{i}$ & $ 87$ & Odd, standard 16-form & $=16(5)+7$ \\ $C_{i+1}$ & $ 3*87+1=262$ & Even, standard 2-form & $=2(131)$ \\ $C_{i+2}$ & $ 262/2=131$ & Odd, standard 8-form & $=8(16)+3$ \\ & & & $=8(3*5+1)+3$ \\ \end{tabular} \end{table} In this cycle the base of the number decreases from 16 to 8, the index increases from 5 to 16, and the value of the number increases from 87 to 131. \subsection*{Appendix V - Analysis of General 16-Cascade} \begin{table}[!htbp] \centering \begin{tabular}{lllp{5cm}} Step & Form & Resulting form & Resulting parity \\ \hline 0 & $16n + 7$ & $= 16n + 7$ & Odd, standard 16-form \\ 1 & $3(16n + 7) + 1= 48n + 22$ & & Even \\ 2 & $(48n + 22) / 2= 24n + 11$ & $= 8(3n + 1) + 3$ & Odd, standard 8-form \\ 3 & $3(24n + 11) + 1= 72n + 34$ & Even \\ 4 & $(72n + 34) / 2= 36n + 17$ & $= 4(9n + 4) + 1$ & Odd, standard 4-form Note: $9n + 4 = 3(3n + 1) + 1$ \\ 5 & $3(36n + 17) + 1= 108n + 52$ & & Even \\ 6 & $(108n + 52) / 2 = 54n + 26$ & $=2(27n + 13)$ & Even, standard 2-form Note: $27n + 13 = 3(9n + 4) + 1$ \\ 7 & $(54n + 26) / 2 = 27n + 13$ & $= 27n + 13$ & Mixed form, Odd or even depending on n. \\ \end{tabular} \end{table} \end{document}
\begin{document} \date{} \maketitle \centerline{\scshape Dmitrii Rachinskii\footnote{The author was supported by National Science Foundation grant DMS-1413223.}} {\footnotesize \centerline{Department of Mathematical Sciences} \centerline{The University of Texas at Dallas, Richardson, TX, USA} } {\bf AMS subject classification:} {Primary: 34C55; Secondary: 37J45} {\bf Keywords:} {Multi-stability, hysteresis, gradient system, heteroclinic connection} \begin{abstract} We consider gradient systems with an increasing potential that depends on a scalar parameter. As the parameter is varied, critical points of the potential can be eliminated or created through saddle-node bifurcations causing the system to transit from one stable equilibrium located at a (local) minimum point of the potential to another minimum along the heteroclinic connections. These transitions can be represented by a graph. We show that any admissible graph has a realization in the class of two dimensional gradient flows. The relevance of this result is discussed in the context of genesis of hysteresis phenomena. The Preisach hysteresis model is considered as an example. \end{abstract} \section{Introduction} Hysteresis is a complex type of relationship between variables. The term was first introduced by James Ewing in his studies of properties of magnetic materials and may be translated as ``to lag behind'' from ancient Greek. The key feature of hysteresis is the dependence of the value of an output variable on some past values of the input variable, that is a type of memory. This dependence is mediated by the internal state that changes in response to variations of the input and is affected by the history of these variations (hence, to predict future outputs, one needs to know either the current internal state or the history of input variations). The term hysteresis is associated with a specific type of memory that can be characterized as persistent memory (as opposed to fading or scale-dependent memory such as described by the convolution integrals, delayed arguments, etc.). A mathematical idealization that is used to describe hysteresis is a so-called rate-independent input-output operator, that is an operator that commutes with the group of all increasing transformations of time \cite{Vis}. A consequence of the rate-independence (in case of scalar-valued inputs) is that only certain (local) maximum and minimum values of the input achieved in the past can affect the future. Hysteresis occurs in many phenomena, for example, it is encountered in magnetism \cite{May, Mal}, plasticity \cite{Lub,mroz}, friction \cite{Lam, Rud}, mechanical damage and fatigue \cite{Mic}, constitutive relationships of ``smart'' materials (shape memory alloys, piezo-electric and magnetostrictive materials \cite{Kuh, Cir}), porous media filtration \cite{Par,Rah,Bot}, phase transitions \cite{BroSpr,Dah} and engineering (thermostat, non-ideal switch and backlash nonlinearity are usual examples \cite{Mih,Gur}). The simplest example of hysteresis is probably a bi-stabile system (in dynamical systems theory the two terms, bi-stability and hysteresis, are sometimes used almost as synonyms). Suppose that at an equilibrium the system minimizes some potential function (such as Helmholtz free energy density in Landau's theory of phase transitions and critical phenomena \cite{Lan}). Fig.~\ref{Fig1} shows a double well potential perturbed by a linear term. Let us consider the coefficient $u$ of this term as a control parameter (input) of the system. Suppose the system occupies the state corresponding to the right minimum point of the potential at some time. As $u$ varies, the profile of the potential changes, and the right minimum can be eliminated, in which case the system has to make a transition to the other (remaining) minimum point, see Fig.~\ref{Fig1}. A similar transition from the left to the right minimum point occurs at a different threshold value of the control parameter $u$, at which the left minimum point gets eliminated. Thus, we observe co-existence of two equilibrium states for some interval $[u_-,u_+]$ of the input values with transitions from one state to the other happening when $u$ reaches the value $u_-$ and the opposite transitions happening at the other end $u_+$ of the bi-stability interval. This is exactly a description of the simple hysteretic switch known as the non-ideal two-threshold relay, see Fig.~\ref{Fig2} (left). It is important to note that the applicability of the bi-stable relay model hinges on certain assumptions about characteristic time scales of several processes. In statistical physics, the global minimum of the energy potential corresponds to a persistent stable equilibrium, while local (relative) minima are interpreted as metastable states that can persist for limited characteristic time only before decaying eventually to the global minimum due to thermal fluctuations. The non-ideal relay model assumes that the characteristic time that passes between the transitions of the system from one state to another due to variations of the input $u$ is much shorter than the characteristic time that the system would spend in a metastable state before thermal fluctuations would guide it to the absolute minimum. On the other hand, the process of relaxation to a new state after the system has left some metastable state (that has been eliminated by a variation of the input) is assumed to be much faster than the input process, that is transitions are almost instantaneous compared to the time that the system spends in the stable and metastable states. Let us also remark that a mechanisms of switching between states of a bi-stable system is not necessarily related to minimization of some functional. Two further important examples of multi-stable systems are presented by slow-fast systems \cite{Oma} and systems with feedback (such as, for instance, in Weiss mean field model of magnetization \cite{Wei}). Fig.~\ref{Fig1} presents the simple hysteresis effect. More complex hysteresis phenomena (that have been well known in magnetism, plasticity and sorption for decades) are described by input-output diagrams with a continual family of ascending and descending curves and nested hysteresis loops, see Fig.~\ref{Fig2} (right). A schematic input-output diagram on the right panel of Fig.~\ref{Fig2} may represent a complex dependence of magnetization of a ferromagnetic material on the applied magnetic field, or a stress-strain relationship in an elasto-plastic body, or moisture content of a porous medium vs applied pressure, etc. Various models of complex hysteresis relationships have been proposed and used by physicists and engineers. A few most prominent models include the Preisach model of magnetic materials, the Prandtl-Ishlinskii model of plasticity and friction and the Ising spin-interaction model of phase transitions in statistical physics (that have been also adapted to model sorption hysteresis \cite{OKa,Hyd,Iye,energy}; damage accumulation \cite{Ryc}; constitutive laws of ``smart'' materials; hysteresis in economics \cite{Fin,Pok, Cro}, social dynamics and population biology \cite{Mel,Bio,Bio1,Bio2}; see \cite{Sci, Siam} for further examples). Mathematical tools that have been developed in the area of modeling hysteresis phenomena and systems with hysteretic components are diverse and include the method of hysteresis operators \cite{KraPok}, differential inclusions \cite{Kre}, variational inequalities \cite{KreBoo}, variational approach based on $\Gamma$-convergence \cite{Mie} and switched systems \cite{Ast}. The above mentioned models of hysteresis are phenomenological and either describe hysteresis effects on a macroscopic level (for example, the Preisach and Prandtl-Ishlinskii models) or use an extremely simplified microscopic model of reality (such as in the Ising model). Derivation of macroscopic models of hysteresis from first principles, or microscopic physics, remains a daunting challenge \cite{Sci}. One idea is that in a complex energy landscape with a large number of metastable states, transitions between these states caused by variation of the control parameter (such as shown in Fig.~\ref{Fig1} for a simple double well potential) may result in a complex hysteresis relationship between macroscopic variables. Indeed, this idea has been entertained by researchers in hysteretic systems for a period of time. However, to the best of our knowledge, no precise results validating this idea have been obtained so far. A question that may be asked in this relation is whether any given ``map'' of transitions between metastables states can be realized by a gradient system that transits from one equilibrium to another in response two variations of the input parameter. In this paper, we give a positive answer to this question under the assumptions that (a) the hysteretic system has a discrete set of states (and hence can be described as a directed graph); and, (b) the input of the system is a scalar variable. It turns out that any system of this type can be realized by a gradient system with two degrees of freedom. We do not impose restrictions on the functional of the gradient system. Whether it is possible to use functionals from a certain class may be a subject of future work. Another question which goes beyond the scope of the present paper is whether continuous hysteresis relationships could be thought of as some continuous limit of the gradient systems. The paper is organized as follows. In the next section we discuss the setting of the problem and present the main result. In Section 3, a graph describing the dynamics of the discrete Preisach nonlinearity is considered as an example. Section 4 contains the proofs. We use an explicit construction for the functional of the gradient system in order to obtain a realization of any given graph that encodes transitions between the states of a hysteretic system. Finally, a brief discussion of multi-scale dynamics and their adiabatic limit is presented in the Apprndix. \begin{figure} \caption{Double well potential depending on a control parameter $u$. Black point indicates the state of the system. When $u$ increases, the right minimum collides with the local maximum of the potential and disappears. As a result of this bifurcation, the black point that was sitting in the right minimum (left panel) makes a transition to the remaining minimum (right panel). } \label{Fig1} \end{figure} \section{Main result} The problem can be formally stated as follows. Consider a directed graph $\Gamma$ with the following properties. (i) The set $S$ of all vertices is a union of $n+1$ non-empty disjoint subsets: $$ S=\cup_{i=0}^n S_i;\qquad S_i\cap S_j=\emptyset, \ i\ne j;\qquad S_i\ne\emptyset. $$ (ii) For every $1\le i\le n-1$ and every vertex $s\in S_i$ there are exactly two edges $s s'$ and $s s''$ emanating from $s$. The end point $s'$ of one edge belongs to $S_{i-1}$, the end point $s''$ of the other edge belongs to $S_{i+1}$. Furthermore, for every $s\in S_0\cup S_{n}$ there is exactly one edge $ss'$ emanating from $s$; the end point $s'$ of this edge belongs to $S_1$ if $s\in S_0$ and to $S_{n-1}$ if $s\in S_n$. Graphs with these two properties will be called {\em admissible}. Dynamics on an admissible graph $\Gamma$ is defined as follows. It is assumed that the input $u$ of the hysteretic system takes values from a finite set $$ U=\{u^0,\ldots,u^n\},\qquad u^0< u^1<\ldots <u^n. $$ Any input sequence $u_t$ should satisfy $u_t\in U$ and either $u_{t}=u^{i-1}$ or $u_t=u^i$ or $u_t=u^{i-1}$ if $u_t=u^i$ for all $t$. Vertices $s$ from the set $S_i$ represent possible states of the hysteretic system at any moment when $u_t=u^i$. Edges of the graph define transitions between the states induced by the input sequence. If $u_{t-1}=u^i$, $u_t=u^{i+ \sigma}$ with $\sigma=\pm1$, and $s_{t-1}=s'\in S_i$, then $s_t=s''$ where $s's''$ is a unique directed edge with $s''\in S_{i+\sigma}$. Now, let us consider the gradient system \begin{equation}\label{3} \dot {\bm x} = -\nabla_x V(\bm{x},u) \end{equation} with $\bm{x}\in \mathbb{R}^d,\ u\in \mathbb{R}$. Question is whether for any given admissible graph $\Gamma$ it is possible to construct a functional $V(\bm{x},u)$ such that the anti-gradient dynamics defined by $V$ agrees with the dynamics on $\Gamma$. A few more definitions are in order to formalize this question. We will assume everywhere that $V$ is continuously differentiable in $\bm{x}$ and continuous with respect to $\bm{x},u$. Suppose that the functional $V(\cdot,u)$ has an isolated critical point $\bm{x}^*(u)$ for each $u\in [u_-,u_+]$ and that this point continuously depends on $u$. Suppose that $\bm{x}^{*}(u)$ is a point of local minimum for $u\in [u_-,u_+)$, which undergoes a saddle-node bifurcation at $u=u_+$. That is, $\bm{x}^*(u_+)$ is a saddle-node point of the gradient field $-\nabla_x V(\cdot;u_*)$ and a ball $|\bm{x}-\bm{x}^*(u_+)|<\rho$ does not contain critical points of $V(\cdot,u)$ for some interval $u\in [u_+,u_++\delta)$. Assume that $\bm{x}^{**}(u_+)$ is an isolated local minimum point of $V(\cdot,u_+)$. We say that the local minimum point $\bm{x}^*(u_-)$ of $V(\cdot,u_-)$ {\em connects to} the local minimum point $\bm{x}^{**}(u_+)$ of $V(\cdot,u_+)$ if the unstable manifold of the saddle-node equilibrium point $\bm{x}^*(u_+)$ of the gradient field $-\nabla_x V(\cdot,u_+)$ belongs to the basin of attraction of the stable equilibrium $\bm{x}^{**}(u_+)$ of this field. \begin{figure} \caption{{\bf Left:} \label{Fig2} \end{figure} If $u$ changes adiabatically (very slowly) from $u_-$ to $u_+$ and gradient system \eqref{3} is at the equilibrium $\bm{x}^*(u_-)$ for $u=u_-$, then this system will remain at the equilibrium $\bm{x}^*(u)$ until the adiabatic input $u$ crosses the value $u=u_+$. At this point, the equilibrium $\bm{x}^*(u)$ will disappear in the saddle-node bifurcation and the system will switch to another equilibrium $\bm{x}^{**}(u_+)$ along the unstable manifold of $\bm{x}^*(u_+)$. Some brief discussion of multi-scale systems that lead to equation \eqref{3} in the adiabatic limit is presented in the Appendix. Similarly to the above definition, we say that a local minimum point $\bm{x}^*(u_+)$ of $V(\cdot,u_+)$ {\em connects to} a local minimum point $\bm{x}^{**}(u_-)$ of $V(\cdot,u_-)$ if $\bm{x}^*(u)$ undergoes the saddle node-bifurcation at $u=u_-$ and the unstable manifold of $\bm{x}^*(u_-)$ belongs to the basin of attraction of the stable equilibrium $\bm{x}^{**}(u_-)$ of the gradient flow. We will also say that $\bm{x}^*(u_-)$ and $\bm{x}^{*}(u_+)$ are {\em reversibly connected} if $\bm{x}^*(u)$ is an isolated local minimum point of $V(\cdot,u)$ for all $u\in[u_-,u_+]$ and $\bm{x}^*(u)$ continuously depends on $u$. For such equilibrium points, the gradient system remains at the equilibrium state $\bm{x}^*(u)$ at all times as long as the input is varied adiabatically within the range $[u_-,u_+]$ and provided that the system was at this state initially. Let us extend the above definitions to the case of several transitions between the equilibrium points points. Suppose that a local minimum point $\bm{x}^*_{k-1}$ of the functional $V(\cdot,v_{k-1})$ connects to the local minimum point $\bm{x}_{k}^*$ of the functional $V(\cdot,v_k)$ for every $k=1,\ldots,\ell$, where $u_-=v_0<v_1<\cdots<v_\ell=u_+$ is a partition of the interval $[u_-,u_+]$. Then we say that $\bm{x}_0^*$ {\em connects to} $\bm{x}_\ell^*$. Similarly, we say that $\bm{x}_\ell^*$ {\em connects to} $\bm{x}_0^*$ if $\bm{x}_{k}^*$ connects to $\bm{x}^*_{k-1}$ for every $k=1,\ldots,\ell$. We call a gradient system $\dot {\bm{x}}=-\nabla_x V(\bm{x},u)$, $\bm{x}\in \mathbb{R}^d$ a {\em realization} of an admissible graph $\Gamma$ if (i) The functional $V(\cdot,u)$ is radially increasing for each $u\in [u_0,u_n]$: $$ \lim_{|\bm{x}|\to \infty}V(\bm{x},u)=\infty. $$ (ii) The set of the points of local minimum of the functional $V(\cdot,u_i)$ is in one-to-one correspondence $\bm{x}^*=X_i(s)$ with the subset $S_i\ni s$ of the vertices of $\Gamma$ for every $i=0,\ldots,n$. (iii) A local minimum point $\bm{x}^*$ of the functional $V(\cdot,u_i)$ connects to a local minimum point $\bm{x}^{**}$ of the functional $V(\cdot,u_{i+\sigma})$ with $\sigma=\pm1$ iff there is an edge $s's''$ of $\Gamma$ such that $\bm{x}^*=X_i(s')$, $\bm{x}^{**}=X_{i+\sigma}(s'')$. Adiabatic variation of the input $u$ of the gradient system over the interval $[u^0,u^n]$ results in transitions from one to another equilibrium state. If we restrict observations to the instants when the input value belongs to the finite set $U=\{u^0,\ldots, u^n\}$, then according to the above definition, the dynamics of transitions between the equilibrium states of the gradient system realizing the graph $\Gamma$ will be identical to dynamics on the graph $\Gamma$. The following statement is the main result of this paper. \begin{theorem}\label{theorem1} There is a planar realization for every admissible graph $\Gamma$. \end{theorem} A constructive proof of this theorem is presented in Section \ref{proof}. \section{Example: Realization of the Preisach hysteresis model} Fig.~\ref{Fig1} defines one of the simplest hysteresis operators called the non-ideal relay. The state of the relay corresponding to the right minimum of the double well potential is denoted by $0$, the other state corresponding to the left minimum is denoted by $1$. The relay switches from state $0$ to state $1$ when the input $u=u_t$ exceeds a threshold value $u_+$ and from state $1$ to state $0$ when the input becomes smaller than a different threshold value $u_-$ which satisfies $u_-<u_+$. Therefore $[u_-,u_+]$ is a bi-stability interval for the relay, see Fig.~\ref{Fig2} (left). For a given input sequence $u_t$ we will denote the time series of the varying state of the non-ideal relay by $$ r_t={\mathcal R}_{u_{-},u_+}[u_t]. $$ The finite (discretized) Preisach model is a superposition of $N(N+1)/2$ relays that have a common input $u=u_t$ which often is also discretized. The relays have different thresholds. The output of the model is defined by the formula $$ p_t=\sum_{i=1}^{N}\sum_{j=1}^{i} {\mathcal R}_{\alpha_{ij},\beta_{ij}}[u_t] $$ where the thresholds of the relays, $(\alpha_{ij},\beta_{ij})$, typically sit on a lattice of step $\varepsilon$ in the half-plane $\beta\ge \alpha$ of a plane $(\alpha,\beta)$ and form a mesh in an isosceles right triangle; and, the input takes values $\{u^0,\ldots,u^N\}$ at the nodes situated between the threshold values, see Fig.~\ref{Fig3} (left). States of the model can be identified with the polylines $L$ that connect the vertex $O$ at right angle of the triangle with its hypothenuse on the line $\alpha=\beta$, see Fig.~\ref{Fig3} (left). Each polyline has $N$ links that have length $\varepsilon$ and are either horizontal or vertical. Therefore a state can be encoded by, and identified with, an $N$-tuple $(a_1,\ldots,a_N)$ where each $a_i$ equals either 0 or 1 and we write 0 for a vertical link and 1 for a horizontal link; $a_1$ defines the direction (vertical or horizontal) for the link starting at the point $O$, $a_2$ correspond to the next link and so on. The set $S_i$ corresponding to the input value $u^i$ consists of the states $L$ that have exactly $i$ entries 1. \begin{figure} \caption{{\bf Left:} \label{Fig3} \end{figure} A detailed description of the Preisach model can be found, for example, in \cite{BroSpr, Pre, Pre1, Pre2, Pr, Pre3}. In particular, transitions between the states are defined by simple rules. Adapting these rules for the discretized model considered here, we obtain the following description of transitions between the states. If the system is at a state $(a_1,\ldots,a_N)\in S_i$ and the input changes from $u^i$ to $u^{i+1}$, then the system transits to the state $(b_1,\ldots,b_N)$ defined by the formulas $$ b_k=1 \quad {\rm if} \quad a_{k}=0 \ {\rm and\ either} \ k=N \ {\rm or} \ a_{k+1}=\cdots=a_N=1;\quad b_j=a_j \ {\rm for} \ j\ne k. $$ That is, the last 0 in the $N$-tuple $(a_1,\ldots,a_N)$ changes to 1. Similarly, when the input changes from $u^i$ to $u^{i-1}$, the system transits from a state $(a_1,\ldots,a_N)\in S_i$ to the state $(b_1,\ldots,b_N)$ defined by $$ b_k=0 \quad {\rm if} \quad a_{k}=1 \ {\rm and\ either} \ k=N \ {\rm or} \ a_{k+1}=\cdots=a_N=0;\quad b_j=a_j \ {\rm for} \ j\ne k, $$ that is, the last 1 in the $N$-tuple $(a_1,\ldots,a_N)$ changes to 0. These rules define the edges of the graph $\Gamma$. Fig.~\ref{Fig3} (right) shows $\Gamma$ for $N=4$. \section{Proofs}\label{proof} \subsection{Proof of Theorem \ref{theorem1}} Realization of the system for $u=u^i$ will be a smooth functional $$ V(\bm{x},u^i)=f_i(x_1)+x_2^2, \qquad \bm{x}=(x_1,x_2)\in \mathbb{R}^2, $$ which has $N_i$ local minimum points $\bm{x}_1^m=(x^m_1,0),\ldots,\bm{x}_{N_i}^m=(x^m_{N_i},0)$, where $x^m_1<\cdots<x^m_{N_i}$ are the local minima of the scalar function $f_i: \mathbb{R}\to \mathbb{R}$; $N_i$ is a number of vertices in the set $S_i$; and, $f_i(x_1)=y_1^2$ for sufficiently large $|x_1|$, see Fig.~\ref{Fig4}. We require for convenience that $f_i(x^m_1)=\cdots=f_i(x^m_{N_i})=m_*$ as well as $f_i(x^M_1)=\cdots=f_i(x^M_{N_i-1})=M_*>m_*$ for the local maximum points of $f_i$ that separate its local minima (although this is not necessary for the following construction), see Fig.~\ref{Fig4}. \begin{figure} \caption{ Function $f_i:\mathbb{R} \label{Fig4} \end{figure} For the intermediate value $u^{i+1/2}=(u_i+u_{i+1})/2$, let us define the functional $V(\cdot,u^{i+1/2})$ in a similar fashion so that it would have $N_i+N_{i+1}$ minimum points. That is, $V(\cdot,u^{i+1/2})=f_{i+1/2}(x_1)+x_2^2$ where $f_{i+1/2}(x^m_1)=\cdots=f_{i+1/2}(x^m_{N_i})=f_{i+1/2}(\tilde x^m_1)=\cdots=f_{i+1/2}(\tilde x^m_{N_{i+1}})=m_*$ at the local minimum points and $f_{i+1/2}(x^M_1)=\cdots=f_{i+1/2}(x^M_{N_i})=f_{i+1/2}(\tilde x^M_1)=\cdots=f_{i+1/2}(\tilde x^M_{N_{i+1}-1})=M_*$ at the local maximum points of $f_{i+1/2}$ as well as $f_{i+1/2}(x_1)=x_1^2$ for sufficiently large $|x_1|$. The main step of the proof is the following lemma. \begin{lemma}\label{lemma1} Suppose that a smooth functional $V_0(\bm{x})=V_0(x_1,x_2)=f(x_1)+x_2^2$ has $N$ isolated local minimum points and $f(x_1)=x_1^2$ for sufficiently large $|x_1|$. Let $(j_1,\ldots,j_N)$ be a permutation of numbers $(1,\ldots,N)$. Then there is a smooth deformation $V(\cdot,u)$, $u\in[u_-,u_+]$, such that $$ V(\cdot,u_-)=V(\cdot,u_+)=V_0(\cdot) $$ and the functional $V(\cdot,u)$ has $N$ isolated local minimum points $\bm{x}_i^*(u)$ for each $u\in[u_-,u_+]$. Moreover, the continuous functions $\bm{x}_i^*(u): [u_-,u_+]\to \mathbb{R}^2$ satisfy $$ \bm{x}_k^*(u_-)=\bm{x}_{j_k}^*(u_+),\quad \quad k=1,\ldots,N. $$ \end{lemma} A constructive proof of this statement is presented in the next section. We will say that the deformation defined in Lemma \ref{lemma1} {\em permutes} the minimum points. Indeed, for every $k$, the minimum point $\bm{x}_k^*(u_-)$ of $V(\cdot,u_-)$ and the minimum point $\bm{x}_{j_k}^*(u_+)$ of $V(\cdot,u_+)$ are reversibly connected. Assume that a smooth function $f:\mathbb{R}\to \mathbb{R}$ that has $N$ local minimum points $x^m_k$ separated by $N-1$ local maximum points $x^M_k$, \begin{equation}\label{n1} x_1^m<x_1^M<x_2^m<x_2^M<\dots<x_{N-1}^m<x_{N-1}^M<x_N^m, \end{equation} and grows at infinity as in Lemma \ref{lemma1}. Assume that a smooth function $\tilde f: \mathbb{R}\to \mathbb{R}$ has $N-1$ local minimum points $\tilde x^m_k$ separated by points of local maximum $\tilde x^M_k$, \begin{equation}\label{n2} \tilde x_1^m<\tilde x_1^M<\tilde x_2^m<\tilde x_2^M<\dots<\tilde x_{N-2}^M<\tilde x_{N-1}^m, \end{equation} and grows at infinity in the same way. Consider a smooth deformation $f(\cdot,u)$, $u\in[u_-,u_+]$, with $f(\cdot,u_-)=f(\cdot)$, $ f(\cdot,u_+)=\tilde f(\cdot)$ such that the maximum point $x_{N-1}^M$ and the minimum point $x_N^m$ collide and disappear in the saddle node bifurcation for some $u_{SN}\in (u_-,u_+)$, while the other extremum points continue from $u_-$ to $u_+$, see Fig.~\ref{Fig1}. That is, $f(\cdot,u)$ has $2N-1$ critical points for $u\in[u_-,u_{SN})$ and $2N-3$ critical points for $u\in(u_{SN},u_+]$, each critical point $x_k^m=x_k^m(u)$, $x_k^M=x_k^M(u)$ is isolated, and they satisfy relations \eqref{n1} for $u\in[u_-,u_{SN})$, relations \eqref{n2} for $u\in(u_{SN},u_+]$ and the relation $x_{N-1}^M(u_{SN})=x_N^m(u_{SN})$ at the bifurcation point $u=u_{SN}$. Furthermore, all the critical points $x_1^m(u),x_1^M(u),\ldots,x_{N-2}^M(u), x_{N-1}^m(u)$ continuously depend on $u$ on the interval $[u_-,u_+]$ and the critical points $x_{N-1}^M(u), x_N^m(u)$ continuously depend on $u$ in their domain $[u_-,u_{SN}]$. We will say that a deformation satisfying these conditions {\em connects} the local minimum $x_N^m=x_N^m(u_-)$ of $f$ to the local minimum $\tilde x_{N-1}^m=x_{N-1}^m(u_+)$ of $\tilde f$. Without loss of generality, we will impose an extra condition $\partial V/\partial u (\cdot,u_-)=\partial V/\partial u (\cdot,u_+)=0$. Such deformations can be easily constructed explicitly. The above deformation $f(\cdot,u)$ induces the homotopy $$ V(\bm{x},u)=f(x_1,u)+x_2^2,\qquad \bm{x}=(x_1,x_2)\in \mathbb{R}^2,\ u\in[u_-,u_*], $$ that connects the local minimum point $\bm{x}_N^*(u_-)=(x_N^m(u_-),0)$ of the functional $V(\cdot,u_-)$ to the local minimum $\bm{x}_{N-1}^*(u_-)=(x_{N-1}^m(u_+),0)$ of the functional $V(\cdot,u_+)$. Every other local minimum $\bm{x}_k^*(u_-)=(x_k^m(u_-),0)$ of $V(\cdot,u_-)$ is reversibly connected with the local minimum $\bm{x}_k^*(u_+)=(x_k^m(u_+),0)$ of $V(\cdot,u_+)$ ($k=1,\ldots,N-2$) along this deformation. A deformation $V(\cdot,u)$, $u\in[u_-,u_+]$, will be called a {\em concatenation} of deformations $V_k(\cdot,u)$, $u\in[v^k,v^{k+1}]$ if $V(\cdot,u)=V_k(\cdot,u)$ for $u\in[v^k,v^{k+1}]$, where $u_-=v^0<v^1<\cdots<v^{K-1}<v^K=u_+$ is a partition of the interval $[u_-,u_+]$. We now define the function $V(\cdot,\cdot)$ for $\bm{x}\in\mathbb{R}^2$, $u\in [u^i,u^{i+1}]$ as a concatenation of deformations $V(\cdot,u)$ that permute minima and deformations that eliminate minima via a saddle-node bifurcation. Assume that the set $S_i$ of the graph $\Gamma$ has $N$ vertices $s_k$ and the set $S_{i+1}$ has $N'$ vertices $s_k'$. Using the construction described above, we define the functionals $V(\cdot, u^i)$, $V(\cdot, u^{i+1})$, $V(\cdot, u^{i+1/2})$ with $N$, $N'$ and $N+N'$ minimum points, respectively. Now, consider an arbitrary partition of the segment $[u^{i+1/2},u^{i+1}]$ into $2N+1$ intervals, $u^{i+1/2}=v_0<v_1<\cdots<v_{2N+1}=u^{i+1}$, and a partition of the segment $[u^{i},u^{i+1/2}]$ into $2N'+1$ intervals, $u^{i}=w_{2N'+1}<\cdots<w_1<w_0=u^{i+1/2}$. Let us associate a local minimum $\bm{x}_{k}^*$ of the functional $V(\cdot,u^i)$ with a vertex $s_k\in S_i$ of $\Gamma$ (as one-one correspondence), each minimum $\bm{x}_{j}^{**}$ of $V(\cdot,u^{i+1})$ with a vertex $s_j'\in S_{i+1}$, and each minimum $\bm{x}^\dagger_{\ell}$ of $V(\cdot,u^{i+1/2})$ with one point of $S_i\cup S_{i+1}$. Next, let us associate the point $v_{2k}$, $k=1,\ldots,N$, with the edge $s_k s_{j_k}'$ of $\Gamma$ that originates at the vertex $s_k\in S_i$ and the points $w_{2k}$ with the edge $s_k's_{\ell_k}$ where $s_k'\in S_{i+1}$. Consider a deformation $V(\cdot,u)$, $u\in [v_0,v_1]$, that permutes the local minima and connects the minimum $\bm{x}^\dagger_1$ of $V(\cdot,v_0)$ associated with the vertex $s_1$ with the largest local minimum of $V(\cdot,v_1)$ (with respect to the ordering defined by the $x_1$-coordinate of the minimum) and the minimum $\bm{x}^\dagger_{N+j_1}$ of $V(\cdot,v_0)$ associated with the vertex $s_{j_1}'$ with the second largest local minimum of $V(\cdot,v_1)$. Then consider a deformation $V(\cdot,u)$, $u\in [v_1,v_2]$, that eliminates the largest minimum of $V(\cdot,v_1)$ via the saddle-node bifurcation. A concatenation of these two deformations reversibly connects the minimum point $\bm{x}^\dagger_{N+j_1}$ of $V(\cdot,v_0)$ associated with $s_{j_1}'$ with the largest minimum point of $V(\cdot,v_2)$ as well as connects the minimum point $\bm{x}^\dagger_{1}$ of $V(\cdot,v_0)$ associated with $s_1$ to the same largest minimum point of $V(\cdot,v_2)$. Thus, this concatenation realizes the transition $s_1s_{j_1}'$ as $u$ changes from $v_0=u^{i+1/2}$ to $v_2$. Furthermore, every other minimum of $V(\cdot,v_0)$ is reversibly connected with one and only one minimum of $V(\cdot,v_2)$. The total number of minima of $V(\cdot,v_2)$ is $N+N'-1$, one less than $V(\cdot,v_0)$ has. Now, we proceed by induction to extend the deformation from an interval $[v_0,v_{2k-2}]$ to the interval $[v_0,v_{2k}]$. The functional $V(\cdot,v_{2k-2})$ has a minimum $\bm{x}^\ddagger_k$ reversibly connected with the minimum $\bm{x}_k^\dagger$ of $V(\cdot,v_0)$, which is associated with the vertex $s_k$. Also, $V(\cdot,v_{2k-2})$ has a minimum $\bm{x}^\ddagger_{N+j_k}$ reversibly connected with the minimum $\bm{x}_{N+j_k}^\dagger$ of $V(\cdot,v_0)$, which is associated with the vertex $s_{j_k}'$ (recall that $v_{2k}$ is associated with the edge $s_k s_{j_k}'$ of $\Gamma$). We use a deformation $V(\cdot,u)$, $u\in[v_{2k-2},v_{2k-1}]$, that permutes the minima and connects the minimum points $\bm{x}^\ddagger_k$ and $\bm{x}^\ddagger_{N+j_k}$ of $V(\cdot,v_{2k-2})$ with the largest minimum and the second largest minimum of the functional $V(\cdot,v_{2k-1})$, respectively. We further use a deformation $V(\cdot,u)$, $u\in[v_{2k-1},v_{2k}]$ that eliminates the largest minimum. A concatenation of these deformations with the deformation defined on the interval $v_0\le u\le v_{2k-2}$ completes the definition of $V$ on the interval $v_0\le u\le v_{2k-2}$ and the inductive step. After $N$ steps, we obtain the deformation defined for $u\in [v_0,v_{2N}]$. Finally, on the last interval $v_{2N}\le u\le v_{2N+1}=u^{i+1}$ we can use any deformation $V(\cdot,u)$ that permutes $N'$ local minima $\bm{z}_1^*,\ldots, \bm{z}_{N'}^*$ of $V(\cdot,v_{2N})$ and reversibly connects a local minimum $\bm{x}^{**}_{k}$ of $V(\cdot,v_{2N+1})$ with those (unique) local minimum $\bm{z}^*_j$ of $V(\cdot,v_{2N})$ that is reversibly connected with the minimum $\bm{x}^\dagger_{N+k}$ of $V(\cdot,v_{0})$ (for every $k=1,\ldots,N'$). Now, the concatenation of this deformation with the deformation $V(\cdot,u)$, $u\in[v_0,v_{2N}]$, realizes all the edges $s_ks_{j_k}'$ of $\Gamma$ for the input increasing from $u^{i+1/2}$ to $u^{i+1}$ and, simultaneously, establishes the reversible connection between the minimum points $\bm{x}^\dagger_{N+k}$ and $\bm{x}^{**}_k$ of $V(\cdot,u^{i+1/2})$ and $V(\cdot,u^{i+1})$, respectively, for each $k=1,\ldots,N'$. Using the same construction, we define a deformation $V(\cdot,\cdot)$ on the interval $[u^i,u^{i+1/2}]$ that realizes all the edges $s_k' s_{\ell_k}$ of $\Gamma$ with $s_k'\in S_{i+1}$, $s_{\ell_k}\in S_i$ for the input decreasing from $u^{i+1/2}$ to $u^{i}$ and, simultaneously, establishes the reversible connection between the minimum points $\bm{x}^\dagger_{j}$ and $\bm{x}^{*}_j$ of $V(\cdot,u^{i+1/2})$ and $V(\cdot,u^{i})$, respectively, for each $j=1,\ldots,N$. By construction, the concatenation of the two deformations $V(\cdot,\cdot)$ defined on the intervals $[u^i,u^{i+1/2}]$ and $[u^{i+1/2},u^{i+1}]$ realizes all the edges $s_k s_{j_k}'$ with $s_k\in S_i$, $s_{j_k}'\in S_{i+1}$ and all the edges $s_k's_{\ell_k}$ with $s_k'\in S_{i+1}$, $s_{\ell_k}\in S_i$ of the graph $\Gamma$. Finally, the concatenation of such deformations defined on all the intervals $[u^i,u^{i+1}]$ realizes the whole graph $\Gamma$. This completes the proof of Theorem \ref{theorem1}. \subsection{Proof of Lemma \ref{lemma1}} Suppose that the functional $V_0(\bm{x})=f(x_1)+x_2^2$ has exactly $N$ local minimum points $\bm{x}_1^*=({x}_1^m,0),\ldots, \bm{x}=({x}_N^m,0)$. Here the isolated local minimum points $x_i^m$ of the scalar function $f$ are separated by the isolated local maximum points $$x_1^m<x_1^M<x_2^m<\cdots<x_{N-1}^M<x_N^m$$ and we assume that $f(x_1^m)=\cdots=f(x_N^m)=m_*<M_*=f(x_1^M)=\cdots=f(x_{N-1}^M)$. It suffices to prove the lemma for a permutation that interchanges the points $\bm{x}_j^*$ and $\bm{x}_{j+1}^*$ because any permutation can be obtained as a finite sequence of such ``elementary'' permutations and therefore a realization of any permutation can be obtained as a concatenation of the realizations of the ``elementary'' permutations each interchanging two neighboring minimum points. Starting with the functional $ V(\bm{x},u_-)=V_0(\bm{x}) $ we will concatenate several deformations to obtain the functional $V(\bm{x},u_+)=V(\bm{x},u_-)$ with $\bm{x}^*_j(u_+)= \bm{x}^*_{j+1}(u_-)$, $\bm{x}^*_{j+1}(u_+)= \bm{x}^*_{j}(u_-)$ and $\bm{x}^*_i(u_+)= \bm{x}^*_{i}(u_-)$ for $i\ne j, j+1$. \begin{figure} \caption{Function $\tilde f$ and points $x_{j-1} \label{Fig5} \end{figure} First, define a smooth function $\tilde f(x_1)$ such that $\tilde f$ and $f$ have the same extremum points $x_i^m, x_i^M$; the maximum value of $\tilde f$ at the point $x_j^M$ satisfies $\tilde f(x_j^M)<M_*$ where $M_*$ is the value of $f$ at all its maximum points; there are points $r_-,r_+$ satisfying $x_{j-1}^M<r_-<x_j^m<x_j^M<x_{j+1}^m<r_+<x^M_{j+1}$ such that $\tilde f(x_1)=f(x_1)$ for $x_1\le r_- $ and $x_1\ge r_+$ (we agree that $x_{0}^M$, $x_{N}^M$ are the non-critical points at which $f$ equals $M_*$) and $\tilde f(r_-)>\tilde f (x_j^M)$, $\tilde f(r_+)>\tilde f (x_j^M)$; and, there are two intervals $[r_-^D,r_-^d]\subset (r_-,x_j^m)$ and $[r_+^d,r_+^D]\subset (x_{j+1}^m,r_+)$ such that $r^d_- -r^D_-=r^D_+-r^d_+$ and \begin{equation}\label{sym} \tilde f(r_-^D+s)=\tilde f(r_+^D-s),\quad \ \ s\in[0,r^d_- -r^D_-], \end{equation} \begin{equation}\label{condi} \tilde f(r_\pm^d)>\tilde f (x_j^M), \end{equation} see Fig.~\ref{Fig5}. Without loss of generality we can assume that $$ |x_j^m-x_j^M|=|x_{j+1}^m-x_j^M|,\quad |r_-^d-x_j^M|=|r_+^d-x_j^M|,\quad |r_-^D-x_j^M|=|r_+^D-x_j^M|. $$ The first deformation we use is the linear deformation $$ V(x_1,x_2,u)=(1-a(u)) f(x_1)+a(u) \tilde f(x_1)+x_2^2,\quad u\in[u_-,v^1], $$ with $v^1<u_+$, where $a(u_-)=0$, $a(v^1)=1$ and $\frac{d a}{du}(u_-)=\frac{d a}{du}(v^1)=0$ (the latter condition will be used to smoothly concatenate this deformation with the next one). Next, we introduce two more points $r^0_\pm$ such that $$ x_{j-1}^M<r_-<r_-^D<r_-^d<r_-^0< x_j^m<x_j^M<x_{j+1}^m< r_+^0<r_+^d<r_+^D<r_+<x^M_{j+1}. $$ Let us define two concentric circles $\partial D_L$ and $\partial D_S$ with the center $(x_j^M,0)$ and the radii $(r_+^D-r_-^D)/2=x_j^M-r_-^D=r^D_+-x_j^M$ and $(r_+^d-r_-^d)/2=x_j^M-r_-^d=r^d_+-x_j^M$, respectively; a narrow ellipse $\partial E_S$ elongated along the $x_1$-axis that intersects this axis at the points $(r_\pm^0,0)$; and a narrow ellipse $\partial E_L$ elongated along the $x_2$-axis that intersects the $x_1$-axis at the points $(r_\pm,0)$. We denote by $E_S$ the closed domain bounded by the smaller ellipse, by $E_L$ the closed domain bounded by the larger ellipse, and by $D_L$ and $D_S$ the discs bounded by the circles $\partial D_L$ and $\partial D_S$, see Fig.~\ref{Fig6} (left). That is, $E_S\subset D_S\subset D_L\subset E_L$. By construction, the domain $E_S$ contains the critical points $(x_j^m,0), (x_j^M,0), (x_{j+1}^m,0)$ of the functional $V(\cdot,v^1)$, while all the other critical points of this functional lie outside $E_L$. Let us denote by $J$ the segment $J=\{\bm{x}=(x_1,0): r_-\le x_1\le r_+\}$ and define the functional $\Phi$ by the formulas \begin{equation}\label{new} \Phi(x_1,x_2)=V(x_1,x_2,v^1) \quad {\rm for}\quad \bm{x}=(x_1,x_2)\in (\mathbb{R}^2\setminus E_L)\cup E_S \cup J, \end{equation} \begin{equation}\label{new1} \Phi(x_1,x_2)=V\bigl(x_j^M+\sqrt{(x_1-x_j^M)^2+x_2^2},0,v^1\bigr) \quad {\rm for}\quad \bm{x}=(x_1,x_2)\in D_L\setminus D_S \end{equation} (the latter formula is consistent with \eqref{sym}) in the union of the ellipse $E_S$, the annulus $D_L\setminus D_S$, the segment $J$ and outside the ellipse $E_L$. We extend this functional to the rest of the plane $(x_1,x_2)$ along vertical segments as follows. For every vertical segment (of positive length) that belongs either to the upper half-plane $x_2\ge 0$ or the lower half-plane $x_2\le 0$ and that has one end $(x_1^e,x_2)$ on the circle $\partial D_S$ and the other end $(x_1^f,x_2)$ either on the curve $\partial E_S$ or the interval $\{(x_1,0): r_-^d\le x_1\le r_-^0\}$ or the interval $\{(x_1,0): r_+^0\le x_1\le r_+^d\}$, the functional is defined by \begin{equation}\label{phi3} \Phi(x_1,x_2)=\frac{|x_2^2-(x_2^e)^2|}{|(x_2^e)^2-(x_2^f)^2|}\Phi(x_1,x_2^f)+\frac{|x_2^2-(x_2^f)^2|}{|(x_2^e)^2-(x_2^f)^2|}\Phi(x_1,x_2^e). \end{equation} The same formula defines $\Phi$ on every vertical segment that belongs either to the upper half-plane $x_2\ge 0$ or the lower half-plane $x_2\le 0$ and that has one end $(x_1^e,x_2)$ on the curve $\partial E_L$ and the other end $(x_1^f,x_2)$ either on the circle $\partial D_L$ or the interval $\{(x_1,0): r_-\le x_1\le r_-^D\}$ or the interval $\{(x_1,0): r_+^D\le x_1\le r_+\}$. By construction, the continuous and piecewise smooth functional $\Phi$ satisfies $\Phi(x_1,-x_2)=\Phi(x_1,x_2)$. Furthermore, we assume that the ellipse $D_S$ has a sufficiently short vertical semi-axis $b_S$; then relation \eqref{condi} implies that $\Phi(x_1,x_2)$ is increasing in $x_2$ in the domain $\{(x_1,x_2): x_2\ge0\}\cap (D_S\setminus E_S)$. We also assume that the ellipse $D_L$ has a sufficiently long vertical semi-axis $b_L$ so that $\Phi(x_1,x_2)$ is increasing in $x_2$ in the domain $\{(x_1,x_2): x_1\in [r^D_-,r^D_+], x_2\ge0\}\cap (E_L\setminus D_L)$. Since the functional $V(x_1,x_2,v^1)$ increases in $x_2$ in the upper half-plane and \eqref{new} holds, we conclude that $\Phi(x_1,x_2)$ increases in $x_2$ in the whole upper half-plane $x_2\ge0$ too. \begin{figure} \caption{{\bf Left:} \label{Fig6} \end{figure} Now we smoothen $\Phi$ using averaging over a small disc of radius $\rho$, that is we define \begin{equation}\label{phit} \tilde \Phi(x_1,x_2)=\frac{1}{\pi \rho^2} \iint\limits_{\xi_1^2+\xi_2^2\le \rho^2} \Phi(x_1+\xi_1,x_2+\xi_2)\,d\xi_1d\xi_2. \end{equation} This is a smooth functional with the derivatives defined by \begin{equation}\label{derivatives} \frac{\partial \tilde\Phi}{\partial x_1}(x_1,x_2)=\frac{1}{\pi \rho^2}\oint\limits_{\xi_1^2+\xi_2^2=\rho^2} \Phi(x_1+\xi_1,x_2+\xi_2)\,d\xi_2, \end{equation} \begin{equation}\label{derivatives'} \frac{\partial \tilde\Phi}{\partial x_2}(x_1,x_2)=- \frac{1}{\pi \rho^2}\oint\limits_{\xi_1^2+\xi_2^2=\rho^2} \Phi(x_1+\xi_1,x_2+\xi_2)\,d\xi_1 \end{equation} (with the integrals along the circle taken counterclockwise). Since $\Phi$ is an even functional with respect to $x_2$, so is $\tilde \Phi$, that is $\tilde \Phi(x_1,-x_2)=\tilde \Phi(x_1,x_2)$. Therefore, \begin{equation}\label{11} \frac{\partial \tilde\Phi}{\partial x_2}=0\quad \ {\rm for}\quad \ x_2=0. \end{equation} Let us show that \begin{equation}\label{12} \frac{\partial \tilde\Phi}{\partial x_2}>0\quad \ {\rm for}\quad \ x_2>0. \end{equation} Indeed, if the circle of radius $\rho$ centered at the point $(x_1,x_2)$ belongs to the upper half-plane $x_2\ge 0$, then relation \eqref{12} follows from \eqref{derivatives'} because the functional $\Phi$ strictly increases in $x_2$ in this half-plane. If part of the circle lies in the lower half-plane, then \eqref{12} follows from the same equality due to the fact that the functional $\Phi$ strictly increases in $x_2$ and satisfies $\Phi(x_1,-x_2)=\Phi(x_1,x_2)$. Relations \eqref{11}, \eqref{12} imply that all the critical points of the functional $\tilde \Phi$ belong to the line $x_2=0$ and are defined by the equation $\frac{\partial \tilde\Phi}{\partial x_1}=0$. Note that by construction $\tilde \Phi=\Phi$ in some neighborhood $\Omega$ of the critical points of the functional $\Phi$. Let us choose a sufficiently small parameter $\rho$ of smoothening in \eqref{phit} so that the closed disc of radius $\rho$ centered at every critical point belongs to $\Omega$ and $\rho<r^D_+-r^d_+$. Now, we prove that $\Phi$ and $\tilde \Phi$ have the same critical points. It suffices to show that $\frac{\partial \tilde\Phi}{\partial x_1}(x_1,0)\ne 0$ on the intervals $[r_-,r^0_-]$ and $[r_+^0,r_+]$ (as on the rest of the $x_1$ axis $\Phi=\tilde \Phi$). To this end, let us take a $h\in[0,\rho]$ and consider the horizontal segment $J_h=\{(x_1,x_2): r_--\rho\le x_1\le r^0_-+\rho,\ x_2=h\}$. We are going to show that $\Phi$ strictly decreases in $x_1$ along this segment and therefore equality \eqref{derivatives} (together with the relation $\Phi(x_1,-x_2)=\Phi(x_1,x_2)$) implies that $\frac{\partial \tilde\Phi}{\partial x_1}(x_1,0)< 0$ for $x_1\in [r_-,r^0_-]$. Consider the intersections $J_1$, $J_2$, $J_3$, $J_4$, $J_5$ of the interval $J_h$ with the sets $\mathbb{R}^2\setminus E_L$, $E_L\setminus D_L$, $D_L\setminus D_S$, $D_S\setminus E_S$ and $E_S$, respectively (see Fig.~\ref{Fig6}). On the intervals $J_1$ and $J_5$, the functional $\Phi$ decreases with the increasing $x_1$ because $\Phi=V(\cdot,v^1)=\tilde f(x_1)+h^2$ and $\tilde f'(x_1)<0$ in a neighborhood of the interval $[r_-,r_-^0]$. On the interval $J_3$, according to \eqref{new1}, we have $\Phi(x_1,h)=V\bigl(x_j^M+\sqrt{(x_1-x_j^M)^2+h^2},0,v^1\bigr)=\tilde f(x_j^M+\sqrt{(x_1-x_j^M)^2+h^2})$, which also implies $\frac{\partial \tilde\Phi}{\partial x_1}< 0$. It remains to consider the segments $J_2$ and $J_4$. The segment $J_2$ can be divided into two parts, $$ J_2^\ell=\bigl\{(x_1,h): x_j^M-a_L\sqrt{1-h^2/b_L^2}\le x_1 \le r_-^D\bigr\}, $$ $$ J_2^r=\bigl\{(x_1,h): r_-^D \le x_1 \le x_j^M-\sqrt{R_L^2-h^2}\bigr\}, $$ where $(x_1-x_j^M)^2/a_L^2+x_2^2/b_L^2=1$ is the equation of the ellipse $\partial E_L$ and $(x_1-x_j^M)^2+x_2^2=R_L^2$ is the equation of the circle $\partial D_L$ (see Fig.~\ref{Fig6}). According to \eqref{phi3}, on the interval $J_2^\ell$, $$ \Phi(x_1,h)=\left(1-\frac{h^2}{b_L^2\bigl({1-{{a_L^{-2}}(x_1-x_j^M)^2}}\bigr)}\right)\tilde f(x_1)+\frac{h^2\bigl(\tilde f(x_1)+b_L^2\bigl(1-{a_L^{-2}}{(x_1-x_j^M)^2}\bigr)\bigr)}{b_L^2\bigl({1-{a_L^{-2}}{(x_1-x_j^M)^2}}\bigr)}, $$ hence $\Phi(x_1,h)=\tilde f(x_1)+h^2$ and $\frac{\partial \Phi}{\partial x_1}(x_1,h)=\tilde f'(x_1)<0$ on $J_2^\ell$. On the interval $J_2^r$, $$ \Phi(x_1,h)=\left(1-c(x_1)\right)\tilde f(x_j^M-R_L)+ c(x_1) \bigl(\tilde f(x_1)+ b_L^2({1-{a_L^{-2}}{(x_1-x_j^M)^2}})\bigr) $$ with $$ c(x_1)=\frac{h^2-(R_L^2-(x_1-x_j^M)^2)}{b_L^2(1-{a_L^{-2}}{(x_1-x_j^M)^2})-(R_L^2-(x_1-x_j^M)^2)}, $$ hence $$ \begin{array}{rcl} \frac{\partial \Phi}{\partial x_1}&=&c'(x_1)\bigl(\tilde f(x_1)+ b_L^2({1-{a_L^{-2}}{(x_1-x_j^M)^2}})-\tilde f(x_j^M-R_L)\bigr)\\ &+&c(x_1)\bigl(\tilde f'(x_1)-2(x_1-x_j^M)b_L^2 a_L^{-2}\bigr). \end{array} $$ Here $$ \begin{array}{rcl} {\rm sgn}\, c'(x_1)&=&{\rm sgn}\, \left[(b_L^2(1-{a_L^{-2}}{(x_1-x_j^M)^2})-(R_L^2-(x_1-x_j^M)^2))(x_1-x_j^M)\right.\\ &+&\left.(a_L^{-2} b_L^2-1)(x_1-x_j^M)(h^2-(R_L^2-(x_1-x_j^M)^2))\right]\ =\ -1 \end{array} $$ because $b_L\gg 1$; and, $c(x_1)=O(h^2)$. Therefore, $\frac{\partial \Phi}{\partial x_1}<0$ for small $h$. The segment $J_4$ can be divided into two parts, $$ J_4^\ell=\bigl\{(x_1,h): x_j^M-\sqrt{R_S^2-h^2} \le x_1 \le r_-^0\bigr\}, $$ $$ J_4^r=\bigl\{(x_1,h): r_-^0\le x_1 \le x_j^M-a_S\sqrt{1-h^2 b_S^{-2}}\bigr\}, $$ where $(x_1-x_j^M)^2 a_S^{-2}+x_2^2 b_S^{-2}=1$ is the equation of the ellipse $\partial E_S$ and $(x_1-x_j^M)^2+x_2^2=R_S^2$ is the equation of the circle $\partial D_S$. On the interval $J_4^\ell$, $$ \Phi(x_1,h)=\left(1-\frac{h^2}{R_S^2-(x_1-x_j^M)^2}\right)\tilde f(x_1)+\frac{h^2}{R_S^2-(x_1-x_j^M)^2}\tilde f(x_j^M-R_S), $$ and therefore, $$ \frac{\partial \Phi}{\partial x_1}(x_1,h)= \left(1-\frac{h^2}{R_S^2-(x_1-x_j^M)^2}\right)\tilde f'(x_1)+\frac{2h^2(x_1-x_j^M)}{(R_S^2-(x_1-x_j^M)^2)^2}(\tilde f(x_j^M-R_S)-\tilde f(x_1)), $$ hence the relationships $\tilde f(x_j^M-R_S)>\tilde f(x_1)$ and $\tilde f'(x_1)<0$ imply $\frac{\partial \Phi}{\partial x_1}<0$. Finally, on the segment $J_2^r$, $$ \Phi(x_1,h)=\tilde c(x_1)\bigl(\tilde f(x_1)+b_S^2(1-{a_S^{-2}}{(x_1-x_j^M)^2})\bigr)+ (1-\tilde c(x_1))\tilde f(x_j^M-R_S) $$ with $$ \tilde c(x_1)=\frac{R^2_S-(x_1-x_j^M)^2-h^2}{R^2_S-(x_1-x_j^M)^2-b_S^2(1-{a_S^{-2}}{(x_1-x_j^M)^2})}, $$ hence $$ \begin{array}{rcl} \frac{\partial \Phi}{\partial x_1}(x_1,h)&=& \tilde c'(x_1)\bigl(\tilde f(x_1)+ b_S^2({1-{a_S^{-2}}{(x_1-x_j^M)^2}})-\tilde f(x_j^M-R_S)\bigr)\\ &+& \tilde c(x_1)(\tilde f'(x_1)-2(x_1-x_j^M)b_S^2 a_S^{-2}). \end{array} $$ Since $b_S\ll1$, $\tilde f'(x_1)<0$ and $f(x_j^M-R_S)>\tilde f(x_1)$, the term $\tilde c(x_1)(\tilde f'(x_1)-2(x_1-x_j^M)b_S^2 a_S^{-2})$ in this expression is negative and the term $\bigl(\tilde f(x_1)+ b_S^2({1-{a_S^{-2}}{(x_1-x_j^M)^2}})-\tilde f(x_j^M-R_S)\bigr)$ is positive. Furthermore, $$ \begin{array}{rcl} {\rm sgn}\, \tilde c'(x_1)&=&{\rm sgn}\, \bigl[-\bigl(R_S^2-(x_1-x_j^M)^2- b_S^2(1-{a_S^{-2}}(x_1-x_j^M)^2)\bigr)(x_1-x_j^M) \\ &-&(R_L^2-(x_1-x_j^M)^2-h^2)(b_S^2 a_S^{-2}-1)(x_1-x_j^M)\bigr]\ =\ 1, \end{array} $$ where we again use $b_S\ll 1$. Hence, $\frac{\partial \Phi}{\partial x_1}<0$ on this interval too. We conclude that $\Phi$ decreases along each segment $J_h$, $|h|\le\rho$, and hence $\frac{\partial \tilde\Phi}{\partial x_1}(x_1,0)< 0$ for $x_1\in [r_-,r^0_-]$. Similarly, one can show that $\Phi$ strictly increases along each segment $\{(x_1,x_2): r_+^0-\rho\le x_1\le r_++\rho,\ x_2=h\}$ and therefore $\frac{\partial \tilde\Phi}{\partial x_1}(x_1,0)>0$ for $x_1\in [r^0_+,r_+]$. Thus, the critical points of $\tilde \Phi$ coincide with the critical points of $\Phi$. Now, we fix $v^2\in (v^1,u_+)$ and define the linear deformation $$ V(\bm{x},u)=V(x_1,x_2,u)=(1-\tilde a(u)) V(x_1,x_2,v^1)+\tilde a(u)\tilde \Phi(x_1,x_2),\quad u\in[v^1,v^2], $$ where $\tilde a(v^1)=0$, $\tilde a(v^2)=1$ and $\frac{d \tilde a}{du}(v^1)=\frac{d \tilde a}{du}(v^2)=0$. Since both functionals $V(x_1,x_2,v^1)=\tilde f(x_1)+x_2^2$ and $\tilde \Phi(x_1,x_2)$ are even in $x_2$, strictly increase in $x_2$ for positive $x_2$, have the same critical points and the same monotonicity on the intervals of the line $x_2=0$ separated by the critical points, this deformation also has the same critical points for every $u\in[v^1,v^2]$. By construction (see formula \eqref{new1}), each circle $(x_1-x_j^M)^2+x_2^2=R^2$ within the annulus $D_L\setminus D_S$ is a level line of the functional $\Phi$ and since the smoothening parameter $\rho$ has been chosen small enough, there is an annulus ${\mathcal R}=\{(x_1,x_2): R_1< (x_1-x_j^M)^2+x_2^2< R_2^2\}\subset D_L\setminus D_S$ such that each circle $(x_1-x_j^M)^2+x_2^2=R^2$ is a level line of the functional $\tilde \Phi$. Now, we can define a deformation that interchanges the local minimum points $x_j^m=(x_j^m,0)$ and $x_{j+1}^m=(x_{j+1}^m,0)$ simply using the rotation: \begin{equation}\label{rotate} V(x_1,x_2,u)=\left\{ \begin{array}{ll} V(x_1\cos \alpha -\sin\alpha, x_1\sin\alpha+x_2\cos\alpha,v^2),& (x_1-x_j^M)^2+x_2^2\le R,\\ V(x_1,x_2,v^2),& (x_1-x_j^M)^2+x_2^2> R \end{array} \right. \end{equation} with a smooth function $\alpha=\alpha(u)$ defined for $u\in[v^2,v^3]\subset [v^2,u_+)$, where $R\in (R_1,R_2)$ and $\alpha(v^2)=0$, $\alpha(v^3)=\pi$, $\frac{d\alpha}{du}(u)>0$ for $u\in(v^2,v^3)$ and $\frac{d\alpha}{du}(v^2)=\frac{d\alpha}{du}(v^3)=0$. By construction, the functional $V(\cdot,v^3)$ increases in $x_2$ in the upper half-plane and is even in $x_2$. Finally, we use the linear deformation $$ V(x_1,x_2,u)=(1-b(u)) V(x_1,x_2,v^3)+ b(u)(f(x_1)+x_2^2),\quad u\in[v^3,u_+], $$ with $b(v^3)=0$, $b(u_+)=1$ to connect the functional $V(\cdot,v^3)$ to the functional $V(\cdot, u_+)=V(\cdot, u_-)$. This completes the proof of the lemma and the theorem. \section*{Appendix} Here we briefly discuss multi-scale systems that may lead to equation \eqref{3} in the adiabatic limit. Consider a two-dimensional system $$ \ddot{\bm{x}}+ \gamma \dot{\bm{x}} +\nabla_x V(\bm{x},u)=0 $$ with $\bm{x}=(x_1,x_2)\in\mathbb{R}^2$ where the potential $V$ depends on a scalar parameter $u$. We assume large friction, $\gamma\gg 1$. Introducing the slow time $\tau =\gamma^{-1} t$, this equation can be rewritten as a slow-fast system $$ \begin{array}{rcl} \bm{x}' &=& \bm{y} \\ \varepsilon \bm{y}' &=& - \nabla_x V(\bm{x},u)-\bm{y} \end{array} $$ with a small parameter $\varepsilon = \gamma^{-2}\ll 1$ (dot stands for $d/dt$, prime denotes the derivative $d/d\tau$ with respect to the slow time). This system has an attracting slow manifold which approaches the critical surface $\bm{y} = - \nabla_x V(\bm{x},u)$ in the limit $\varepsilon\to 0$ (equivalently, $\gamma\to \infty)$. Hence, for large $\gamma$, dynamics on this manifold can be approximated by the gradient equation \begin{equation}\label{3'} \bm{ x}' = -\nabla_x V(\bm{x},u) \end{equation} or, $\gamma \dot{\bm{x}} = -\nabla_x V(\bm{x},u)$ using the original time $t$. Now, we assume that the parameter $u$ can vary in time slowly, that is $u=u(\nu t)$. Hence, we have a system with three time scales, \begin{equation}\label{4} \ddot{\bm{x}}+ \gamma \dot{\bm{x}} +\nabla_x V(\bm{x},u(\nu t))=0, \end{equation} and the approximating system with two time scales: \begin{equation}\label{5} \bm{x}' = -\nabla_x V(\bm{x},u (\mu \tau)) \end{equation} where $\mu=\gamma \nu$. The variable $u$ can be interpreted as input of these systems. Finally, assume that the characteristic time scale associated with the input variations, $\theta=\nu t$, is the slowest time scale and, furthermore, $\mu=\gamma \nu\ll 1$. In this case, dynamics of system \eqref{5} (as well as dynamics of system \eqref{4} in the limit $\gamma\to \infty$, $\mu=\gamma \nu\to 0$) on the slowest time scale of the input variations, $\theta=\nu t$, can be described as a sequence of transitions between local minimum points of the potential energy $V(\cdot,u)$ along the gradient flow of system \eqref{3'}. We remark that hysteresis is a cause of energy dissipation. Namely, each transition from a critical point $x_*(u)$ to a critical point $x^*(u)$ of the potential $V(\cdot,u)$ (induced by a saddle-node bifurcation that eliminates the critical point $x_*(u)$) is associated with irreversible dissipation of $V(x^*(u),u)-V(x_*(u),u)$ units of energy. Continuous evolution of the system with a minimum point $x_*(u)$ of the potential is reversible and dissipationless. \end{document}
\begin{document} \title{Finding similarity of orbits between two discrete dynamical systems via optimal principle} \author[Y. Chen and Y. Li]{Yuting Chen\affil{1} and Yong Li\affil{1,2}\comma\corrauth} \address{\affilnum{1}\ College of Mathematics, Jilin University, Changchun 130012, P.R. China. \\ \affilnum{2}\ School of Mathematics and Statistics, and Center for Mathematics and Interdisciplinary Sciences, Northeast Normal University, Changchun 130024, P.R. China.} \emails{{\tt [email protected]} (Y.~Chen), {\tt [email protected]} (Y.~Li)} \begin{abstract} Whether there is similarity between two physical processes in the movement of objects and the complexity of behavior is an essential problem in science. How to seek similarity through the adoption of quantitative and qualitative research techniques still remains an urgent challenge we face. To this end, the concepts of similarity transformation matrix and similarity degree are innovatively introduced to describe similarity of orbits between two complicated discrete dynamical systems that seem to be irrelevant. Furthermore, we present a general optimal principle, giving a strict characterization from the perspective of dynamical systems combined with optimization theory. For well-known examples of chaotic dynamical systems, such as Lorenz attractor, Chua's circuit, R$\rm\ddot{o}$ssler attractor, Chen attractor, L$\rm\ddot{u}$ attractor and hybrid system, with using of the homotopy idea, some numerical simulation results demonstrate that similarity can be found in rich characteristics and complex behaviors of chaotic dynamics via the optimal principle we presented. \end{abstract} \ams{37N40, 49K15, 65K10} \keywords{similarity, optimal principle, homotopy , discrete dynamical system, chaotic attractor.} \maketitle \section{Introduction} \label{sec1} Discrete dynamical systems described by iteration of mappings appear everywhere, showing directive laws from physical science or result from simulations to better understand differential equations numerically. Generally, it is much more difficult but interesting to investigate how complex behavior happens to discrete dynamical systems than continuous dynamical systems after some iterations, since there are probably greater covered ranges and more ghost phenomena. Along with the development of computer technology, modeling problems by means of discrete dynamical systems mathematically has already been gained in different fields such as biology, economics, demography, engineering, and so on. It is universally acknowledge that no matter how different the various technologies develop as well as the objects appear in the research process, there are certain underlying similarities. Similarity, in addition to being frequently encountered, is viewed as a fundamental concept in scientific research. The idea of similarity has gained widespread popularity in the era of big data and machine learning by various means. For instance, scale similarity is found in many natural phenomena in the universe \cite{Wang2017(1)}. An embedding-based vehicle method with deep representation learning drastically accelerates trajectory similarity computation \cite{Chen2022}. A novel brain electroencephalography (EEG) clustering algorithm not only handles the problem of unlabeled EEG, but also avoids the time-consuming task of manually marking the EEG \cite{Dai2022}. Based on the cosine similarity, a transductive long short-term memory model is developed for temperature forecasting \cite{Karevan2020}. Self-similar coordinates are investigated in Lattice Boltzmann equation, showing that the time averaged statistics for velocity and vorticity express self-similarity at low Reynolds \cite{Zarghami2012}. Many other applications include gene expression \cite{Arbela2018}, image registration \cite{Mang2017}, web pages and scientific literature \cite{Wang2017}, fuzzy linguistic term sets \cite{Liao2014}, collaborative filtering \cite{Liu2014}, pattern analysis \cite{Zhao2013} and preferential attachment \cite{Papadopoulos2012}. Indeed, the ubiquitous similarity is attributed to facilitate prediction of indeterminate events by analyzing known data, being an essential task in many natural systems and phenomena of real life. A core part of similarity search is the so-called similarity measure whose famous characteristic is able to assess how similar two sequences are, in other words, the degree to which a given sequence resembles another. Many researchers have paid great attention to devise a proper similarity measure and have achieved several valuable results, which can be roughly categorized into two sorts. One sort is based on the traditional measures, such as Euclidean distance, dynamic time warping, cosine and cotangent similarity measures and Pearson correlation coefficient \cite{Liao2005}. The other sort is some transform-based methods, such as singular value decomposition, principal component analysis, Fourier coefficients, auto-regression and moving average model \cite{Bartolini2005,Fu2011}. The cautious selection of similarity measure scheme has long been a research hotspot, affecting the accuracy of further data mining tasks directly, such as classification, clustering and indexing \cite{Shen2018,Torrente2021}. Up to now, whether there is similarity between two physical processes and how to seek similarity through a mathematical principle are still remain a significant challenge. Several theoretical approaches are available to deal with this problem by taking into account asymptotic equivalence, synchronization and stability just as some kind of similarity. Furthermore, almost all similarity measure criteria are exploited according to application background and actual data, which can only be regarded as quantitative representations to estimate pairwise similarity of a given series resembles another under certain conditions. Determining similarity between orbits derived from chaotic systems generally characterized by highly complex behavior is particularly difficult when the general similarity measure is employed. Having in view that, for any two chaotic dynamical systems, what are their similarities and how do we find them are both fundamental but challenging subjects in science and engineering. For this purpose, we will try to touch these problems. To the authors best knowledge, this is the first work to develop a mathematical framework, studying the connection of orbits between two discrete dynamical systems from the perspective of dynamical systems combined with optimization theory. Novel contributions and results of this paper include 1) proposing the concepts of similarity transformation matrix and similarity degree to describe what extent the orbits derived from two discrete dynamical systems are similar; 2) presenting a general optimality principle by employing variational method when orbits of two discrete dynamical systems are similar at some step; 3) constructing hybrid dynamical system with richer and more complex dynamical behavior via the idea of homotopy, applying to numerical simulation. The remainder of this paper begins with review of several typical chaotic attractors related to this study. We formalize the similarity via optimization techniques and give the definitions of similarity transformation matrix and similarity degree mathematically to assess what extent the orbits of two dynamical systems are similar, followed by establishing the main results of this paper in Section \ref{sec3}. Section \ref{sec4} reports numerical simulation results of chaotic systems to support the theoretical findings. Conclusions are drawn in Section \ref{sec5}. \section{Chaotic systems} \label{sec2} Among a broad variety of dynamical systems in the universe, we consider some typical chaotic systems such as Lorenz attractor, Chua' circuit, R$\rm\ddot{o}$ssler attractor, Chen attractor, L$\rm\ddot{u}$ attractor and their hybrid systems. Lorenz attractor, the first chaotic dynamical system, was obtained by Lorenz in 1963 from simplified mathematical model developed for atmospheric convection while modelling meteorological phenomena \cite{Lorenz1963}. The chaotic system is a typical nonlinear system with three differential equations known as the Lorenz equations \begin{equation}\label{1} \dot{x}=-\sigma x+\sigma y,~~ \dot{y}=-xz+rx-y,~~ \dot{z}=xy-bz, \end{equation} where $x$, $y$, $z$ represent the system states and the system parameters are selected as $\sigma=10$, $b=8/3$, $r=28$. The initial conditions are $x(0)=0.1$, $y(0)=0.1$, $z(0)=0.1$, then the behavior of Lorenz attractor resembling a butterfly or figure eight is illustrated in Fig. \ref{Fig.1}. \begin{figure} \caption{Lorenz attractor.} \label{Fig.1} \caption{Chua's circuit.} \label{Fig.2} \end{figure} Chua's circuit is the simplest electronic circuit known as nonperiodic oscillator \cite{Chua1992}. It has been confirmed by numerous experimental simulations and rigorous mathematical analysis that this circuit is able to produce an oscillating waveform exhibiting classic chaos behavior and many well-known bifurcation phenomena. Three ordinary differential equations are found as below in the analysis of Chua's circuit \begin{equation}\label{2} \dot{x}=\alpha[y-x-f(x)],~~ \dot{y}=x-y+z,~~ \dot{z}=-\beta y, \end{equation} where $x$, $y$ denote the voltage of capacities, $z$ represents inductance current and the parameters $\alpha$, $\beta$ are determined by the particular values of the circuit components. The function $f(x)$ is defined as a piece-linear function, describing the electrical response of nonlinear resistor \begin{equation}\label{3} f(x)=m_1x+\dfrac{1}{2}(m_0-m_1)(|x+1|-|x-1|). \end{equation} Fig. \ref{Fig.2} shows the double scroll attractor from Chua's circuit, in which the initial states are $x(0)=0.1$, $y(0)=0.1$, $z(0)=0.1$, and the parameters are determined as $\alpha=10$, $\beta=15$, $m_0=-1.2$, $m_1=-0.6$. R$\rm\ddot{o}$ssler attractor behaves similar to Lorenz attractor, and it is the most simple chaotic attractor from the topological point of view. This attractor is applied to modelling equilibrium in chemical reactions which is a chaotic solution to the system of three differential equations \begin{equation}\label{4} \dot{x}=-y-z,~~ \dot{y}=x+ay,~~ \dot{z}=b+z(x-c), \end{equation} where $x$, $y$, $z$ denote the system states and three parameters $a$, $b$ and $c$ are assumed to be positive \cite{Rossler1976}. We select numerical values of parameters as $a=0.2$, $b=0.2$, $c=5.7$ and give a typical orbit of R$\rm\ddot{o}$ssler attractor, which admits chaotic behavior, as shown in Fig. \ref{Fig.3}. \begin{figure} \caption{R$\rm\ddot{o} \label{Fig.3} \caption{Chen attractor.} \label{Fig.4} \end{figure} Chen attractor is found in the pursuit of chaotification, being similar but topologically not equivalent to Lorenz attractor \cite{Chen1999}. Despite Chen attractor with simple structure is the dual to Lorenz system, it is considered displaying even more sophisticated dynamical behaviors \cite{Ueta2000}. The three-dimensional autonomous system of ordinary differential equations with quadratic nonlinearities that describe Chen system are \begin{equation}\label{5} \dot{x}=a(y-x),~~ \dot{y}=(c-a)x-xz+cy, ~~ \dot{z}=xy-bz, \end{equation} where $x$, $y$, $z$ are the system states and $a$, $b$, $c$ are real parameters. For parameters values $a=40$, $b=3$, $c=28$, we obtain a Lorenz-based wing attractor as shown in Fig. \ref{Fig.4}. L$\rm\ddot{u}$ attractor is another example that captures the paradigms of chaotic system, which connects Lorenz attractor and Chen attractor and represents the transition from one to the other \cite{Lu2002(1),Lu2002(2)}. In order to reveal the topological structure of this chaotic attractor, consider its controlled system which is obtained by adding a constant $u$ to the second equation of L$\rm\ddot{u}$ system \begin{equation}\label{6} \dot{x}=a(y-x),~~ \dot{y}=-xz+cy+u,~~ \dot{z}=xy-bz, \end{equation} where $x$, $y$, $z$ denote the system states, $a$, $b$, $c$ are the system parameters. By varying the parameter $u$ considered as ``controller" of the controlled system, one can observe different dynamical behaviors, contributing to a better understanding of all similar and closely related chaotic system \cite{Lu2002(3)}. When $a=36$, $b=3$, $c=20$, all the simulation figures are summarized in Fig. \ref{Fig.5}. \begin{figure} \caption{L$\rm\ddot{u} \label{Fig.5a} \label{Fig.5b} \label{Fig.5c} \label{Fig.5} \end{figure} The high sensitivity of chaotic systems to small perturbations of the initial states, together with the complex dynamical behavior characterized by rapidly changing solutions, make the research on chaotic dynamical systems challenging. The purpose of this paper focuses on finding similarity between the orbits of chaotic attractors via the general optimality principle, which will be discussed in next section. \section{Main results} \label{sec3} We are now in the position to demonstrate that similarity of orbits derived by discrete dynamical systems can be found through a strict mathematical principle. This allows us to better understanding the motion trajectory and predicting process trend when looking at the rich behavior of complex physical processes. \subsection{Simple Dynamical Systems} Consider the following two discrete dynamical system \begin{equation}\label{7} x_{k+1}=f(k,x_k), \end{equation} \begin{equation} \label{8} y_{k+1}=g(k,y_k), \end{equation} where the mappings $f,g:\mathbb{N_+}\times\mathbb{R}^n\rightarrow\mathbb{R}^n$ are of $\mathbb{C}^1$. Starting from initial states $x_0$ and $y_0$, the solutions of (\ref{7}) and (\ref{8}) are denoted as \begin{equation}\label{9} x_k=x(k,x_0), \end{equation} \begin{equation}\label{10} y_k=y(k,y_0), \end{equation} respectively. We introduce a new concept of similarity transformation matrix to deal with the problem of drawing a relation of similarity between (\ref{7}) and (\ref{8}). \begin{definition}\label{def1} Let $[n]$ denote the set $\{1,2,\ldots,n\}$. There exists an $n-$order matrix $A=(a_{ij})\in\Omega$ satisfies: $\bullet$ $a_{ij}$ denotes some matrix element, where $i\in [n]$ and $j\in[n]$ are the $i$th row and $j$th column of $A$, respectively; $\bullet$ $\Omega$ is a bounded closed convex set of~~$\mathbb{M}^{n\times n}$ whose interior $\Omega^\circ\neq\emptyset$; $\bullet$ $y_0=Ax_0$. We say that $A$ is a similarity transformation matrix if solutions between two discrete dynamical systems (\ref{7}) and (\ref{8}) satisfy $y_k=Ax_k$ at $k$th step. Otherwise, we say that they are not similar if such A does not exist. \end{definition} From the definition given above, it follows that the way to estimate similarity transformation matrix $A$ is to minimize the cost functional \begin{align}\label{11} J(A)=\min\limits_A\sum\limits_{k=0}^N\|Ax_k-y_k\|_2^2. \end{align} Motivated by first-order optimality condition which is the foundation for many of optimization algorithms, we know that the optimal solution of (\ref{11}) is equivalent to \begin{align}\label{12} \frac{\partial J(A)}{\partial a_{ij}} =\frac{\partial}{\partial a_{ij}}(x_k^{\rm T}A^{\rm T}Ax_k) -\frac{\partial}{\partial a_{ij}}(2x_k^{\rm T}A^{\rm T}y_k) +\frac{\partial}{\partial a_{ij}}(y_k^{\rm T}A^{\rm T}yx_k) =0. \end{align} For the first term of the right-hand side in (\ref{12}), we have \begin{align}\label{13} \dfrac{\partial}{\partial a_{ij}}(x_k^{\rm T}A^{\rm T}Ax_k) &=\dfrac{\partial}{\partial a_{ij}}(x_k^{\rm T}A^{\rm T})Ax_k +x_k^{\rm T}A^{\rm T}\dfrac{\partial}{\partial a_{ij}}(Ax_k) \nonumber\\ &=x_k^{\rm T}\left(\dfrac{\partial}{\partial a_{ij}}A^{\rm T}\right)Ax_k +x_k^{\rm T}A^{\rm T}\left(\dfrac{\partial}{\partial a_{ij}}A\right)x_k \nonumber\\ &=(0,\ldots,0,x_{kj},0,\ldots,0)Ax_k+x_k^{\rm T}A^{\rm T}\left( \begin{array}{lllllll} ~0 \\ ~\vdots\\ ~0 \\ x_{kj}\\ ~0 \\ ~\vdots \\ ~0 \\ \end{array} \right) \nonumber\\ &=(a_{i1}x_{kj},\ldots,a_{in}x_{kj}) \left( \begin{array}{lll} x_{k1} \\ ~~\vdots \\ x_{kn} \\ \end{array} \right) +(x_{k1},\ldots,x_{kn}) \left( \begin{array}{lll} a_{i1}x_{kj} \\ ~~~\vdots\\ a_{in}x_{kj} \\ \end{array} \right) \nonumber\\ &=\sum\limits_{r=1}^n a_{ir}x_{kj}x_{kr}+x_{kj}\sum\limits_{r=1}^n a_{ir}x_{kr} \nonumber\\ &=2x_{kj}\sum\limits_{r=1}^n a_{ir}x_{kr}, \end{align} where $i,j\in[n]$, $k=0,1,2,\ldots,n$. In the following, we give the estimate of variational equation (\ref{10}), which plays an important role in analysis of the optimal principle. The result can be shown by induction. For the case $k=0$, from the fact that $y_1=g(0,y_0)=g(0,Ax_0)$, we have \begin{align}\label{14} \dfrac{\partial y_1}{\partial a_{ij}} &=\dfrac{\partial}{\partial {a_{ij}}}g(0,Ax_0)\ =\dfrac{\partial y_1}{\partial y_0^{\rm T}}\dfrac{\partial y_0}{\partial a_{ij}}\nonumber\\ &=\left(\begin{array}{lll} \dfrac{\partial y_{11}}{\partial y_{01}}\cdots\dfrac{\partial y_{11}}{\partial y_{0n}}\\ ~~\vdots~~~~\ddots ~~\vdots\\ \dfrac{\partial y_{1n}}{\partial y_{01}}\cdots\dfrac{\partial y_{1n}}{\partial y_{0n}} \end{array} \right) \left(\begin{array}{lllllll} ~0\\ ~\vdots\\ ~0\\ x_{0j}\\ ~0\\ ~\vdots\\ ~0 \end{array} \right) =\left(\begin{array}{lll} \dfrac{\partial y_{11}}{\partial y_{0i}}x_{0j}\\ ~~~~~\vdots\\ \dfrac{\partial y_{1n}}{\partial y_{0i}}x_{0j} \end{array} \right) =\left(\begin{array}{lll} \dfrac{\partial y_{11}}{\partial y_{0i}}\\ ~~~\vdots\\ \dfrac{\partial y_{1n}}{\partial y_{0i}} \end{array} \right) x_{0j}. \end{align} When $k=1$, it follows from the iterative formula $y_2=g(1,y_1)=g(1,g(0,Ax_0))$ and (\ref{14}) that \begin{align*} \dfrac{\partial y_2}{\partial a_{ij}} &=\dfrac{\partial}{\partial {a_{ij}}}g(1,g(0,Ax_0)) =\dfrac{\partial y_2}{\partial y_1^{\rm T}} \dfrac{\partial y_1}{\partial a_{ij}} \\ &=\left(\begin{array}{lll} \dfrac{\partial y_{21}}{\partial y_{11}}\cdots\dfrac{\partial y_{21}}{\partial y_{1n}}\\ ~~\vdots~~~~\ddots ~~\vdots\\ \dfrac{\partial y_{2n}}{\partial y_{11}}\cdots\dfrac{\partial y_{2n}}{\partial y_{1n}} \end{array} \right) \left(\begin{array}{lll} \dfrac{\partial y_{11}}{\partial y_{0i}}\\ ~~~\vdots\\ \dfrac{\partial y_{1n}}{\partial y_{0i}} \end{array} \right) x_{0j} \\ &=\left(\begin{array}{lll} \dfrac{\partial y_{21}}{\partial y_{11}}\dfrac{\partial y_{11}}{\partial y_{0i}}+ \cdots+ \dfrac{\partial y_{21}}{\partial y_{1n}}\dfrac{\partial y_{1n}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~~\vdots\\ \dfrac{\partial y_{2n}}{\partial y_{11}}\dfrac{\partial y_{11}}{\partial y_{0i}}+ \cdots+ \dfrac{\partial y_{2n}}{\partial y_{1n}}\dfrac{\partial y_{1n}}{\partial y_{0i}} \end{array} \right)x_{0j} =\left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{21}}{\partial y_{1t}}\dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{2n}}{\partial y_{1t}}\dfrac{\partial y_{1t}}{\partial y_{0i}} \end{array} \right)x_{0j}. \end{align*} Let us make the induction hypothesis, assuming that the following expression is true for $k-1$, that is \begin{align}\label{15} \dfrac{\partial y_k}{\partial a_{ij}} =\dfrac{\partial}{\partial {a_{ij}}}g(k-1,y_{k-1}) =\left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ \end{array} \right)x_{0j}. \end{align} We now show that it continues to hold for $k$. By combining (\ref{10}) with (\ref{15}), we obtain \begin{align*} \dfrac{\partial y_{k+1}}{\partial a_{ij}} &=\dfrac{\partial}{\partial {a_{ij}}}g(k,y_k) =\dfrac{\partial y_{k+1}}{\partial y_k^{\rm T}} \dfrac{\partial}{\partial a_{ij}}g(k-1,y_{k-1})\\ &=\left(\begin{array}{lll} \dfrac{\partial y_{(k+1)1}}{\partial y_{k1}}\cdots\dfrac{\partial y_{(k+1)1}}{\partial y_{kn}}\\ ~~~~~\vdots~~~~~\ddots ~~~~~~\vdots\\ \dfrac{\partial y_{(k+1)n}}{\partial y_{k1}}\cdots\dfrac{\partial y_{(k+1)n}}{\partial y_{kn}} \end{array} \right) \left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ \end{array} \right)x_{0j} \\ &=\left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{(k+1)1}}{\partial y_{kt}}\dfrac{\partial y_{kt}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{(k+1)n}}{\partial y_{kt}}\dfrac{\partial y_{kt}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ \end{array} \right)x_{0j}. \end{align*} Then, if $\{y_k\}$ is generated by (\ref{10}), we deduce that \begin{equation}\label{16} \dfrac{\partial y_{k+1}}{\partial a_{ij}} =\dfrac{\partial}{\partial a_{ij}}g(k,y_k) =\left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{(k+1)1}}{\partial y_{kt}}\dfrac{\partial y_{kt}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{(k+1)n}}{\partial y_{kt}}\dfrac{\partial y_{kt}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ \end{array} \right)x_{0j}. \end{equation} For the second term of the right-hand side in (\ref{12}), we obtain the following equation by means of derivative rule of compound function, \begin{equation}\label{17} \dfrac{\partial}{\partial a_{ij}}(x_k^{\rm T}A^{\rm T}y_k) =x_k^{\rm T}\left(\dfrac{\partial}{\partial {a_{ij}}}A^{\rm T}\right)y_k +x_k^{\rm T}A^{\rm T}\left(\dfrac{\partial}{\partial {a_{ij}}}y_k\right). \end{equation} It is obvious that \begin{equation}\label{18} x_k^{\rm T}\left(\dfrac{\partial}{\partial a_{ij}}A^{\rm T}\right)y_k =(0,\ldots,0,x_{kj},0,\ldots,0)y_k =x_{kj}y_{ki}. \end{equation} Together with (\ref{16}), it yields that \begin{align}\label{19} x_k^{\rm T}A^{\rm T}\left(\dfrac{\partial}{\partial {a_{ij}}}y_k\right) =&x_k^{\rm T}A^{\rm T} \left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \end{array} \right)x_{0j} \nonumber\\ =& x_k^{\rm T} \left(\begin{array}{lll} a_{11}\sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}}\cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} +\cdots +a_{n1}\sum\limits_{t=1}^n\dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\vdots\\ a_{1n}\sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} +\cdots+a_{nn}\sum\limits_{t=1}^n\dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \end{array} \right) x_{0j} \nonumber\\ =& x_{k1}\left(a_{11}\sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} +\cdots+a_{n1}\sum\limits_{t=1}^n\dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right)x_{0j} \nonumber\\ & +\cdots \nonumber\\ & +x_{kn}\left(a_{1n}\sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} +\cdots+a_{nn}\sum\limits_{t=1}^n\dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right)x_{0j} \nonumber\\ =&x_{0j}\sum\limits_{r=1}^n \sum\limits_{s=1}^n \sum\limits_{t=1}^n x_{kr}a_{sr} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots\dfrac{\partial y_{1t}}{\partial y_{0i}}\right). \end{align} Using (\ref{18}) and (\ref{19}) in (\ref{17}), we can easily get \begin{align}\label{20} \dfrac{\partial}{\partial a_{ij}}\left(x_k^{\rm T}A^{\rm T}y_k\right) =x_{kj}y_{ki}+x_{0j}\sum\limits_{r=1}^n \sum\limits_{s=1}^n \sum\limits_{t=1}^n x_{kr}a_{sr} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots\dfrac{\partial y_{1t}}{\partial y_{0i}}\right), \end{align} where $i,j\in[n]$, $k=0,1,2,\ldots,n$. For the last term of the right-hand side in (\ref{12}), it is enough to compute that \begin{align}\label{21} \dfrac{\partial}{\partial a_{ij}}(y_k^{\rm T}y_k) =&\left(\dfrac{\partial}{\partial a_{ij}}y_k^{\rm T}\right)y_k +y_k^{\rm T}\left(\dfrac{\partial}{\partial a_{ij}}y_k\right) \nonumber\\ =&\left( \sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}, \cdots, \sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right)x_{0j}y_k \nonumber\\ &+y_k^{\rm T}\left(\begin{array}{lll} \sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ ~~~~~~~~~~~~~~~~\vdots\\ \sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\\ \end{array} \right)x_{0j} \nonumber\\ =& 2x_{0j}\left( y_{k1}\sum\limits_{t=1}^n \dfrac{\partial y_{k1}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}+ \cdots+ y_{kn}\sum\limits_{t=1}^n \dfrac{\partial y_{kn}}{\partial y_{(k-1)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right) \nonumber\\ =& 2x_{0j}\sum\limits_{s=1}^n\sum\limits_{t=1}^n y_{ks} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}}\dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\right), \end{align} where $i,j\in[n]$, $k=0,1,2,\ldots,n$. In this way, one of the main results in this paper is built up by substituting (\ref{13}), (\ref{20}) and (\ref{21}) into (\ref{12}). \begin{proposition} \label{pro:1} Let $\{x_k\}$ and $\{y_k\}$ be generated by (\ref{7}) and (\ref{8}), respectively. If matrix $A$ is the optimal solutions of (\ref{11}), then there exists the optimal principle via first-order optimality condition: \begin{align}\label{22} x_{kj}\sum\limits_{r=1}^n a_{ir}x_{kr} -x_{kj}y_{ki} -x_{0j}\sum\limits_{r=1}^n\sum\limits_{s=1}^n \sum\limits_{t=1}^n x_{kr}a_{sr} \left( \dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\right) \nonumber\\ +x_{0j}\sum\limits_{s=1}^n \sum\limits_{t=1}^n y_{ks} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right) =0,~~~~~~~~~~~~~~~~~~ \end{align} where $x_{kj}$ denotes the $j$th component of column vector $x_k$ at step $k$, and other similar representations have the same meanings. \end{proposition} \subsection{Homotopy Dynamical Systems} In light of above analysis, a natural extension of that is a more complex similarity study for general dynamical systems. Consider hybrid systems formed by two chaotic attractors via homotopy method, which will show richer and more interesting dynamical behavior. Two simple systems as shown in (\ref{7}) and (\ref{8}) can be connected by constructing such a homotopy \begin{equation}\label{23} H(k,y_k,\lambda)=(1-\lambda)f(k,y_k)+\lambda g(k,y_k), \end{equation} where $\lambda\in[0,1]$ is an embedding parameter. Note that the homotopy $H$ is exactly a path connecting $f$ and $g$ such that $H(k,y_k,0)=f(k,y_k)$ and $H(k,y_k,1)=g(k,y_k)$. As the parameter $\lambda$ increases from 0 to 1, the homotopy $H$ varies continuously from one system to another. Take into account two dynamical systems with the following forms \begin{equation}\label{24} x_{k+1}=H_1(k,x_k,\lambda_1), \end{equation} \begin{equation}\label{25} y_{k+1}=H_2(k,y_k,\lambda_2), \end{equation} where homotopies (\ref{24}) and (\ref{25}) stand for general hybrid dynamical systems with the embedding parameters $\lambda_1,\lambda_2\in[0,1]$ changing per iteration. The solutions of (\ref{24}) and (\ref{25}) are denoted as \begin{equation}\label{26} x_k=H_1(k,x_0,\lambda_1), \end{equation} \begin{equation}\label{27} y_k=H_2(k,y_0,\lambda_2), \end{equation} respectively. At this point, the similarity transformation matrix, becoming related to the parameter $\lambda=(\lambda_1,\lambda_2)^{\rm T}$, is re-expressed as $A=A(\lambda)$. Let the cost functional be written in the form \begin{equation}\label{28} J(A,\lambda)=\min\limits_{A,\lambda}\sum\limits_{k=0}^N\|Ax_k-y_k\|_2^2. \end{equation} According to Karush-Kuhn-Tucker (KKT for short) optimality conditions that are often checked for investigating whether a solution of nonlinear programming problem is optimal, $(A,\lambda)$ is a stationary point of (\ref{28}) if and only if \begin{equation}\label{29} \frac{\partial J(A,\lambda)}{\partial a_{ij}} =\frac{\partial}{\partial a_{ij}}(x_k^{\rm T}A^{\rm T}Ax_k) -\frac{\partial}{\partial a_{ij}}(2x_k^{\rm T}A^{\rm T}y_k) +\frac{\partial}{\partial a_{ij}}(y_k^{\rm T}A^{\rm T}yx_k) =0, \end{equation} and \begin{equation}\label{30} \frac{\partial J(A,\lambda)}{\partial \lambda} =\frac{\partial}{\partial \lambda}(x_k^{\rm T}A^{\rm T}Ax_k) -\frac{\partial}{\partial \lambda}(2x_k^{\rm T}A^{\rm T}y_k) +\frac{\partial}{\partial \lambda}(y_k^{\rm T}A^{\rm T}yx_k) =0, \end{equation} simultaneously. The derivation of (\ref{29}) is the same as (\ref{12}), and hence the details are omitted here. For the first term of the right-hand side in (\ref{30}), by direct calculation, we get \begin{align}\label{31} \dfrac{\partial}{\partial \lambda}(x_k^{\rm T}A^{\rm T}Ax_k) =&\left(\dfrac{\partial}{\partial \lambda}x_k^{\rm T}\right)A^{\rm T}Ax_k +x_k^{\rm T}A^{\rm T}A\left(\dfrac{\partial}{\partial \lambda}x_k\right) \nonumber\\ =&\left(\dfrac{\partial x_{k1}^{\rm T}}{\partial \lambda},\cdots, \dfrac{\partial x_{kn}^{\rm T}}{\partial \lambda}\right)A^{\rm T}Ax_k +x_k^{\rm T}A^{\rm T}A \left( \begin{array}{lll} \dfrac{\partial{x_{k1}}}{\partial{\lambda}} \\ ~~~\vdots\\ \dfrac{\partial{x_{kn}}}{\partial{\lambda}} \\ \end{array} \right) \nonumber\\ =&\left(a_{11}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{1n}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}, \cdots, a_{n1}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{nn}\dfrac{\partial{x_{kn}}}{\partial{\lambda}} \right) \left( \begin{array}{lll} \sum\limits_{s=1}^na_{1s}x_{ks}\\ ~~~~~~~\vdots\\ \sum\limits_{s=1}^na_{ns}x_{ks} \\ \end{array} \right) \nonumber\\ & +\left(\sum\limits_{s=1}^na_{1s}x_{ks},\cdots,\sum\limits_{s=1}^na_{ns}x_{ks}\right) \left( \begin{array}{lll} a_{11}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{1n}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\\ ~~~~~~~~~~~~~~~~~~~\vdots\\ a_{n1}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{nn}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\\ \end{array} \right) \nonumber\\ =& 2\sum\limits_{s=1}^na_{1s}x_{ks} \left(a_{11}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{1n}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right)+\cdots \nonumber\\ &+2\sum\limits_{s=1}^na_{ns}x_{ks} \left(a_{n1}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{nn}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right). \end{align} For the second term of the right-hand side in (\ref{30}), it follows from derivative rule of compound function that \begin{align}\label{32} \dfrac{\partial}{\partial \lambda}(x_k^{\rm T}A^{\rm T}y_k) =\left(\dfrac{\partial}{\partial \lambda}x_k^{\rm T}\right)A^{\rm T}y_k +x_k^{\rm T}A^{\rm T}\left(\dfrac{\partial}{\partial \lambda}y_k\right). \end{align} On the one hand, \begin{align}\label{33} \left(\dfrac{\partial}{\partial \lambda}x_k^{\rm T}\right)A^{\rm T}y_k =&\left(\dfrac{\partial{x_{k1}}}{\partial{\lambda}},\cdots, \dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right)A^{\rm T}y_k \nonumber\\ =&\left(a_{11}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{1n}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}, \cdots, a_{n1}\dfrac{\partial{x_{k1}}}{\partial{\lambda}}+\cdots +a_{nn}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right) \left(\begin{array}{lll} y_{k1}\\ ~~\vdots\\ y_{kn} \\ \end{array} \right) \nonumber\\ =&\left(a_{11}\dfrac{\partial{x_{k1}}}{\partial{\lambda}} +\cdots +a_{1n}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right)y_{k1} +\cdots +\left(a_{n1}\dfrac{\partial{x_{k1}}}{\partial{\lambda}} +\cdots +a_{nn}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}\right)y_{kn} \nonumber\\ =&\sum\limits_{t=1}^na_{t1}y_{kt}\dfrac{\partial{x_{k1}}}{\partial{\lambda}} +\cdots +\sum\limits_{t=1}^na_{tn}y_{kt}\dfrac{\partial{x_{kn}}}{\partial{\lambda}}. \end{align} On the other hand, \begin{align}\label{34} x_k^{\rm T}A^{\rm T}\left(\dfrac{\partial}{\partial \lambda}y_k\right) =&x_k^{\rm T}A^{\rm T} \left(\begin{array}{lll} \dfrac{\partial{y_{k1}}}{\partial{\lambda}}\\ ~~~\vdots\\ \dfrac{\partial{y_{kn}}}{\partial{\lambda}} \\ \end{array} \right) \nonumber\\ =&\left(\sum\limits_{s=1}^na_{1s}x_{ks},\cdots,\sum\limits_{s=1}^na_{ns}x_{ks}\right) \left(\begin{array}{lll} \dfrac{\partial{y_{k1}}}{\partial{\lambda}}\\ ~~~\vdots\\ \dfrac{\partial{y_{kn}}}{\partial{\lambda}} \\ \end{array} \right) \nonumber\\ =&\sum\limits_{s=1}^na_{1s}x_{ks}\dfrac{\partial{y_{k1}}}{\partial{\lambda}} +\cdots +\sum\limits_{s=1}^na_{ns}x_{ks}\dfrac{\partial{y_{kn}}}{\partial{\lambda}}. \end{align} Combining (\ref{33}) and (\ref{34}) with (\ref{32}), we obtain \begin{align}\label{35} \dfrac{\partial}{\partial \lambda}(x_k^{\rm T}A^{\rm T}y_k) =\sum\limits_{t=1}^n a_{t1}y_{kt}\dfrac{\partial{x_{k1}}}{\partial \lambda} +\cdots +\sum\limits_{t=1}^n a_{tn}y_{kt}\dfrac{\partial{x_{kn}}}{\partial \lambda} +\sum\limits_{s=1}^n a_{1s}x_{ks}\dfrac{\partial{y_{k1}}}{\partial \lambda} +\cdots +\sum\limits_{s=1}^n a_{ns}x_{ks}\dfrac{\partial{y_{kn}}}{\partial \lambda}. \end{align} For the last term of the right-hand side in (\ref{30}), it is obvious that \begin{align}\label{36} \dfrac{\partial}{\partial \lambda}(y_k^{\rm T}y_k) =&\left(\dfrac{\partial}{\partial \lambda}y_k^{\rm T}\right)y_k +y_k^{\rm T}\left(\dfrac{\partial}{\partial \lambda}y_k\right) \nonumber\\ =&\left(\dfrac{\partial{y_{k1}}}{\partial \lambda}, \cdots, \dfrac{\partial{y_{kn}}}{\partial \lambda}\right) \left(\begin{array}{lll} y_{k1}\\ ~~\vdots\\ y_{kn} \\ \end{array} \right) +(y_{k1},\cdots,y_{kn}) \left(\begin{array}{lll} \dfrac{\partial{y_{k1}}}{\partial{\lambda}}\\ ~~~\vdots\\ \dfrac{\partial{y_{kn}}}{\partial{\lambda}} \\ \end{array} \right) \nonumber\\ =&2y_{k1}\dfrac{\partial{y_{k1}}}{\partial \lambda} +\cdots +2y_{kn}\dfrac{\partial{y_{kn}}}{\partial \lambda}. \end{align} According to all derivations above, we deduce the other main result of this paper, which deals with how similar between orbits of general dynamical systems. By substituting (\ref{31}), (\ref{35}) and (\ref{36}) into (\ref{30}), we give the existence of the solution of (\ref{28}). \begin{proposition} \label{pro:2} Let $\{x_k\}$ and $\{y_k\}$ be generated by (\ref{24}) and (\ref{25}), respectively. If matrix $A=A(\lambda)$ and parameter $\lambda$ are the optimal solutions of (\ref{28}), then there exist the following general optimal principle: \begin{align}\label{37} x_{kj}\sum\limits_{r=1}^n \bar{a}_{ir}x_{kr} -x_{kj}y_{ki} -x_{0j}\sum\limits_{r=1}^n\sum\limits_{s=1}^n \sum\limits_{t=1}^n x_{kr}\bar{a}_{sr} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}}\right)\nonumber\\ ~~~~~~~~~~~~~~~~+x_{0j}\sum\limits_{s=1}^n \sum\limits_{t=1}^n y_{ks} \left(\dfrac{\partial y_{ks}}{\partial y_{(k-1)t}} \dfrac{\partial y_{(k-1)t}}{\partial y_{(k-2)t}} \cdots \dfrac{\partial y_{1t}}{\partial y_{0i}} \right) =0, ~~~~~~~~~~~~~~~~~ \end{align} and \begin{align}\label{38} \left(\sum\limits_{r=1}^n\sum\limits_{s=1}^n\bar{a}_{r1}\bar{a}_{rs}x_{ks} -\sum\limits_{t=1}^n\bar{a}_{t1}y_{kt}\right)\frac{\partial x_{k1}}{\partial\lambda} +\cdots +\left(\sum\limits_{r=1}^n\sum\limits_{s=1}^n\bar{a}_{rn}\bar{a}_{rs}x_{ks} -\sum\limits_{t=1}^n\bar{a}_{tn}y_{kt}\right)\frac{\partial x_{kn}}{\partial\lambda} \nonumber\\ -\left(\sum\limits_{s=1}^n\bar{a}_{1s}x_{ks}-y_{k1}\right)\frac{\partial y_{k1}}{\partial\lambda} -\cdots -\left(\sum\limits_{s=1}^n\bar{a}_{ns}x_{ks}-y_{kn}\right)\frac{\partial y_{kn}}{\partial\lambda}=0, ~~~~~~~~~~~~ \end{align} where the similarity transformation matrix becomes related to the parameter $\lambda=(\lambda_1,\lambda_2)^{\rm T}$, and $\bar{a}_{ij}$ stands for some matrix element of $A=A(\lambda)$, other representations of components have the same meanings as before. \end{proposition} \begin{remark} Although (\ref{37}) is formally consistent with (\ref{22}), in fact, each component of similarity transformation matrix $A$ is related to parameter $\lambda$. \end{remark} \begin{remark} The existing literature on the variation of functional with respect to matrix rather than just vector or even scalar is rare, as considered in (\ref{11}) and (\ref{28}). Taking variation of a matrix can be converted to the partial derivative of each matrix elements, which is exactly the tedious calculations of derivation, especially when the number of iterations is large. \end{remark} For most cases, it is almost impossible to find similarity transformation matrix that makes solutions between two discrete dynamical systems exactly similar. By virtue of the characterizations of function \begin{equation*} h(\omega)=\frac{\log(1+\omega)}{\omega},~\omega\in\mathbb{R}, \end{equation*} we define a similarity function by setting $\omega=\dfrac{1}{N}\sum\limits_{k=1}^N\|Ax_k-y_k\|_2^2$. When no similarity transformation matrix can be found to make two orbits completely similar, we also ask to what extent they are similar. \begin{definition}\label{def2} The similarity degree $\rho(A)$ of solutions between two discrete dynamical systems is defined as \begin{equation}\label{39} \rho(A)=\left\{\begin{array}{ll} ~~~~~ 1 ,& ~~{\rm{if}} \,\omega=0,\\ \dfrac{\log(1+\omega)}{\omega}, & ~~{\rm{otherwise.}}\,\end{array}\right. \end{equation} It is easy to see that $\rho(A)$$\in$$(0,1]$, according to the above definition. \end{definition} We close this section by summing up that the solutions of two systems are said to be completely similar if $\rho(A)=1$, otherwise, some are similar. \section{Experimental results} \label{sec4} In this section, some examples are given to show the utility of general optimal principle proposed in this paper. All codes are written in MATLAB R2021a and run on PC with 1.80 GHz CPU processor and 8.00 GB RAM memory. Unless otherwise specified, the numerical results are accurate to four decimal places throughout this paper. To cope with over-fitting, L2-norm regularization is introduced naturally. In reinforcement learning and neural networks, this happens frequently when samples are limited and computation is expensive. The cost functional in (\ref{11}) or (\ref{28}) is augmented to include a L2-norm penalty of matrix $A$ with the following form \begin{equation}\label{40} \tilde{J}(A)=\min\limits_A\sum\limits_{k=0}^N\|Ax_k-y_k\|_2^2+\tau\|A\|_2^2, \end{equation} where $\tau$ is a positive constant called regularization parameter that balances the two objective terms. In the optimal control theory, the most fundamental but crucial two optimization methods are Pontryagin's maximum principle and Bellman's dynamic programming. Taking into account the proposed optimal principles, we conduct the numerical simulations by following Pontryagin's maximum principle and Bellman's dynamic programming respectively for similarity of orbits between various chaotic systems. The chaotic systems chosen in this section are all solved numerically by means of the widely used fourth-order Runge-Kutta method with time step size equal 0.01. The following numerical experiments mainly include two parts. \subsection{Pontryagin's maximum principle} As a necessary condition to solve optimal control problems, Pontryagin's maximum principle was proposed by Pontryagin and his group in the 1960s \cite{Pontryagin1962}. Outstanding feature of Pontryagin's maximum principle lies in that the optimal control signal transfering dynamical system from one state to another can be found under the condition that the state or input is fixed. The following examples concern to verify Pontryagin's maximum principle formulated in terms of the proposed optimal principle (\ref{22}) when studying the similarity of orbits between two chaotic attractors. {\bf Example~4.1.} Similarity of orbits between Lorenz attractor and Chua's circuit. Let $\{x_k\}$ and $\{y_k\}$ be the numerical solutions derived from Lorenz and Chua systems for 2000 time steps (namely, $N$$=$$2000$) from the same initial states \begin{equation*} x_0=y_0=(0.1,0.1,0.1)^{\rm T}. \end{equation*} We divide the sequences into multi-stage decisions consisting of 10 steps for each (denoted by $N_1$$-$$N_{200}$), with the final state of previous stage as the initial condition of current stage. The optimal similarity transformation matrix of each stage is found by optimal principle (\ref{22}) whose accuracy performance is assessed by similarity degree (\ref{39}). Figs. \ref{Fig.6}-\ref{Fig.7} describe three dimensional stereograms and two dimensional plans of Lorenz attractor, Chua's circuit and the trajectory acted by the optimal similarity transformation matrix for each stage. Enlarge the trajectories marked by dotted box so as to find similarity of orbits between $\{Ax_k\}$ and $\{y_k\}$ more clearly. We can observe that Lorenz attractor and Chua's circuit with different orbits become mainly similar after the optimal similarity transformation matrix is employed according to Figs. \ref{Fig.6a} and \ref{Fig.7a}. As can be seen in Figs. \ref{Fig.6b} and \ref{Fig.7b}, if the regularization parameter $\tau$ is selected as $10^{-4}$, the orbit $\{x_k\}$ under action of optimal similarity transformation matrix is surprisingly close to the orbit $\{y_k\}$, supporting the availability of tuning parameter and the stability of L2-norm regularization. \begin{figure} \caption{Three dimensional stereograms of Example~4.1.} \label{Fig.6a} \label{Fig.6b} \label{Fig.6} \end{figure} \begin{figure} \caption{Two dimensional plans of Example~4.1.} \label{Fig.7a} \label{Fig.7b} \label{Fig.7} \end{figure} Results of similarity degree are shown in Fig. \ref{Fig.8}. From Fig. \ref{Fig.8a}, we can see that the values of similarity degree can achieve over 0.9 in the majority of results. When L2-norm penalty is introduced, only 4 results are less than 0.9. This exactly implies that the stability of solution is improved by introducing L2-norm regularization with suitable regularization parameter. \begin{figure} \caption{Similarity degree of Example~4.1.} \label{Fig.8a} \label{Fig.8b} \label{Fig.8} \end{figure} {\bf Example~4.2.} Similarity of orbits between Lorenz attractor and R$\rm\ddot{o}ssler$ attractor. Let $\{x_k\}$ and $\{y_k\}$ be the numerical solutions of Lorenz and R$\rm\ddot{o}ssler$ systems, sharing the same initial states $x_0$$=$$y_0$$=$$(0.1,0.1,0.1)^{\rm T}$. We perform the same experimental procedure as in Example 4.1. Figs. \ref{Fig.9} and \ref{Fig.10} shown the stereograms and plans of Lorenz attractor, R$\rm\ddot{o}$ssler attractor and the trajectory acted by optimal similarity transformation matrix for each stage when $N$$=$$2000$. Even if overlapped trajectories marked by dotted box are enlarged, we could still observe that the orbits of R$\rm\ddot{o}$ssler system almost coincides with that of Lorenz system acted by optimal similarity transformation matrix. \begin{figure} \caption{Three dimensional stereograms of Example~4.2.} \label{Fig.9a} \label{Fig.9b} \label{Fig.9} \end{figure} \begin{figure} \caption{Two dimensional plans of Example~4.2.} \label{Fig.10a} \label{Fig.10b} \label{Fig.10} \end{figure} Lorenz attractor with butterfly shape and R$\rm \ddot{o}ssler$ attractor with spiral shape that appear to be different geometrically, become remarkably similar within the proper precision under the action of Pontryagin's maximum principle using optimal principle (\ref{22}), which is greatly an amazing finding. Surprisingly, the numerical results indicated in Fig. 11 show that only six results are less than 0.95 without regularization term. It should be point out that when we carry out tests to find optimal similarity transformation matrix based on L2-norm penalty (\ref{40}), all values of similarity degree are greater than 0.98. \begin{figure} \caption{Similarity degree of Example~4.2.} \label{Fig.11a} \label{Fig.11b} \label{Fig.11} \end{figure} To summarize, even without the use of L2-norm regularization, we can still get very satisfactory results to some extent. \subsection{Bellman's dynamic programming} Dynamic programming was studied by Bellman to deal with situations where the best decisions are made in stages \cite{Bellman1957}. Whatever the initial state and initial decision are, the decisions that will follow must also constitute an optimal policy for the remaining problems, when the stage and state formed by the first step decision are considered as initial conditions. Applying Bellman's dynamic programming in terms of the proposed optimal principle, we analyze the similarity of orbits between two chaotic attractors, see the following three examples. {\bf Example~4.3.} Similarity of orbits between Lorenz attractor and Chen attractor. Let $\{x_k\}$ and $\{y_k\}$ be the numerical solutions obtained from Lorenz and Chen systems for 2000 time steps. Instead of solving 2000 steps one at a time, we consider multi-stage decision making process by breaking the complex problems into ten simple subproblems denote as $N_1$$-$$N_{200}$ with 10 steps for each. More precisely, for stage $N_1$, the initial conditions of two systems are determined as \begin{equation*} x_0=(0.1,0.1,0.1)^{\rm T}~\mbox{and}~y_0=Ax_0 \end{equation*} with $A$ unknown. Make use of Runge-Kutta method, we calculate the states of this stage including the values of $x_1$$-$$x_{10}$ and $y_1$$-$$y_{10}$ whose components are represented by expressions containing elements of $A$. By solving a nonlinear equations formulated by (\ref{22}), we obtain an approximate solution with a high precision. For the following stages, we do the same actions and compare the current similarity transformation matrix $\bar{A}$ with the one obtained by previous stage denoted as $\bar{\bar{A}}$ subject to similarity degree defined in (\ref{39}), then let \begin{equation*} A=\max\limits_{\bar{A},\bar{\bar{A}}} \{\rho(\bar{A}), \rho(\bar{\bar{A}})\} \end{equation*} be the optimal similarity transformation matrix of this stage. We obtain the approximate solution which makes similarity degree reach 1.0000 for each stage. As shown in Figs. \ref{Fig.12}-\ref{Fig.13}, Lorenz and Chen systems with different orbits can become distinct similar through the adjustment of similarity transformation matrix derived by the proposed optimal principle. \begin{figure} \caption{Two dimensional plans of Example~4.3.} \label{Fig.12a} \label{Fig.12b} \label{Fig.12c} \label{Fig.12} \end{figure} Now we elaborate the advantage of multi-stage dynamic programming with the help of numerical results. For $N$$=$$2000$, the optimal similarity transformation matrix $A_0$ can be found according to optimal principle (\ref{22}), which makes similarity degree reach 0.988837. When only the optimal similarity transformation matrix at stage $N_1$ is taken and $A_0$ is still employed in other stages, similarity degree increases to 0.988839. If we adopt the corresponding optimal similarity transformation matrix in both $N_1$ and $N_2$, the similarity degree rises to 0.9888435, and so on. The final similarity degree of solutions between Lorenz attractor and Chen attractor in this example can reach 0.999995 and each stage is optimal at this point, which meets Bellman's principle of optimality. To get a better view of the change in similarity degree, the numerical results of each stage are depicted in Fig. \ref{Fig.14}. We observe that as the number of the optimal similarity transformation matrix in corresponding stage increases, similarity degree is increase progressively. \begin{figure} \caption{Three dimensional stereograms of Example~4.3.} \label{Fig.13} \caption{Change in similarity degree of Example~4.3.} \label{Fig.14} \end{figure} {\bf Example~4.4.} Similarity of orbits between Lorenz attractor and L$\rm\ddot{u}$ attractor. Let $\{x_k\}$ and $\{y_k\}$ be the numerical solutions got from Lorenz system and L$\rm\ddot{u}$ system with $u=0$ for 2000 time steps. Similar to the multi-stage decision in Example 4.3, we also divide the steps into 200 stage. For each stage, only the initial state of sequence $\{x_k\}$ is known. The values of similarity degree can reach 1.0000 for all stages, implying the effectiveness of the optimal similarity transformation matrix. We are surprised by the effectiveness of similarity transformation matrix formulated by the proposed optimal principle, as shown in Figs. \ref{Fig.15}-\ref{Fig.16}. Even if we enlarge the trajectories represented in dotted box to the coordinate diagram with small horizontal and vertical coordinates, the orbits of L$\rm\ddot{u}$ attractor and Lorenz attractor acted by optimal similarity transformation matrix can still coincide almost exactly. The change of similarity degree gradually increase from $0.999609$ to $0.999994$ with the increase of the number of optimal similarity transformation matrix, satisfying Bellman's principle of optimality, see Fig. \ref{Fig.17}. \begin{figure} \caption{Two dimensional plans of Example~4.4.} \label{Fig.15a} \label{Fig.15b} \label{Fig.15c} \label{Fig.15} \end{figure} \begin{figure} \caption{Three dimensional stereograms of Example~4.4.} \label{Fig.16} \caption{Change in similarity degree of Example~4.4.} \label{Fig.17} \end{figure} {\bf Example~4.5.} Similarity of orbits between Hybrid attractor and L$\rm\ddot{u}$ attractor. The last example concerns hybrid Lorenz-Chua chaotic system formed by Lorenz attractor and Chua's circuit using the homotopy approach (\ref{23}), modelling below \begin{equation}\label{41} \begin{array}{lll} \dot{x}=\lambda(-\sigma x+\sigma y)+(1-\lambda)\cdot\alpha[y-x-f(x)],\\ \dot{y}=\lambda(-xz+rz-y)+(1-\lambda)(x-y+z), \\ \dot{z}=\lambda(xy-bz)+(1-\lambda)(-\beta y), \end{array} \end{equation} where the piece-linear function $f(x)$ is defined in (\ref{3}), and all the same parameters as in (\ref{1})-(\ref{3}). The study on hybrid attractor is more challenging due to its more complex topologies and dynamics. Different dynamical behaviors in L$\rm\ddot{u}$ attractor's controlled system (\ref{6}) can be generated by varying the parameter $u$. For the parameters $u$$=$$-1$, $u$$=$$8$, $u$$=$$-12$ and $u$$=$$12$ that produce complete attractor, partial attractor, left-attractor and right-attractor, we simulate the similarity between orbits of hybrid Lorenz-Chua system and L$\rm\ddot{u}$ attractor respectively. \begin{figure} \caption{Two dimensional plans of Example~4.5.} \label{Fig.18a} \label{Fig.18b} \label{Fig.18c} \label{Fig.18d} \label{Fig.18} \end{figure} Let $\{x_k\}$ and $\{y_k\}$ be the numerical solutions obtained from hybrid system (\ref{41}) and L$\rm\ddot{u}$ attractor for 1000 time steps. Breaking the problems into 200 simple subproblems with 5 steps for each, the approximate solutions of parameter $\lambda$ and optimal similarity transformation matrix are found by (\ref{37}) and (\ref{38}). We show the numerical performance in Fig.\ref{Fig.18} for the four different values of $u$, respectively. For the purpose of demonstrating the optimal principle simulated effect more intuitively, Fig. \ref{Fig.19} illustrates the orbit of L$\rm\ddot{u}$ system (actual) and that of hybrid Lorenz-Chua attractor acted by the optimal similarity transformation matrix (simulated). In spite of L$\rm\ddot{u}$ system exhibits various dynamical behaviors due to varying parameter $u$, the two sequences almost completely coincide, showing the availability and universality of the proposed optimal principle. For the sake of completeness, the change in similarity degree of four cases of Example 4.5 are shown in Fig. \ref{Fig.20}, which also fulfill Bellman's principle of optimality. \begin{figure} \caption{Comparison of actual and simulated sequences of Example~4.5.} \label{Fig.19a} \label{Fig.19b} \label{Fig.19c} \label{Fig.19d} \label{Fig.19} \end{figure} \begin{figure} \caption{Change in similarity degree of Example~4.5.} \label{Fig.20} \end{figure} Chaotic systems, are generally characterized by complex behavior and rapidly changing solutions, whose orbits become quite similar taking advantage of the general optimal principle presented in this paper. \section{Conclusions} \label{sec5} In scientific research, capturing certain underlying similarity between two complex physical processes is one of the most intensively essential problems. The critical challenge for finding similarity of orbits between dynamical systems arises when facing the high sensitivity to small perturbations with respect to initial states of chaotic systems. Main contribution, in addition to proposing some concepts described what extent the orbits between two markedly different systems are similar, is the general optimal principle built up from the viewpoint of dynamical systems together with optimization theory. This optimal principle is applied to various well-known chaotic attractors and some kind of hybrid chaotic system that is constructed on the basis of homotopy idea, yielding encouraging numerical simulation results surprisingly. Specifically, attention is paid to find similarity of orbits between dynamical systems with complex behavior, mathematically. As necessary foundations for this paper, the definitions of similarity transformation matrix and similarity degree are introduced. We present a general optimal principle based on optimality condition and variational method, finding some underlying similarity between orbits of two dynamical systems. The numerical simulations concern with both Pontryagin's maximum principle and Bellman's dynamic programming formulated in terms of the optimal principle for similarity of orbits between various chaotic systems. The orbits differed markedly become remarkably similar under action of the optimal similarity transformation matrix, and the value of similarity degree also supports this, implying significance of the optimal principle we proposed in this paper. \section*{Acknowledgments} This work was supported by National Basic Research Program of China (2013CB834100), National Natural Science Foundation of China (11571065, 11171132, 12071175), Project of Science and Technology Development of Jilin Province (2017C028-1, 20190201302JC), and Natural Science Foundation of Jilin Province (20200201253JC). \end{document}
\begin{document} \title{Auxiliary Variables for Multi-Dirichlet Priors} \begin{abstract} Bayesian models that mix multiple Dirichlet prior parameters, called \textit{Multi-Dirichlet priors} (MD) in this paper, are gaining popularity~\cite{lin2012coupling,kling2014detecting}. Inferring mixing weights and parameters of mixed prior distributions seems tricky, as sums over Dirichlet parameters complicate the joint distribution of model parameters. This paper shows a novel auxiliary variable scheme which helps to simplify the inference for models involving hierarchical MDs and MDPs. Using this scheme, it is easy to derive fully collapsed inference schemes which allow for an efficient inference. \end{abstract} \section{Introduction} In order to develop a collapsed variational inference for hierarchical Dirichlet processes, Teh et al.~\cite{teh08b} introduced an auxiliary variable scheme based on truncated Dirichlet processes. The same auxiliary variables can be used for efficiently inferring hierarchical models of multinomial distributions with Dirichlet-distributed parameters. Dirichlet priors can mix multiple parameters to realise predictions of multinomial parameters based on multiple influence factors~\cite{lin2012coupling,kling2014detecting}. In this paper, we call these priors \textit{Multi-Dirichlet priors} (MD). The sampled parameters can serve as parts of Dirichlet priors in a lower level of hierarchical Dirichlet distributions or Dirichlet processes, paving the way for arbitrarily complex hierarchical MD models. So far, no collapsed variational inference scheme has been proposed for hierarchical MD distributions. In this paper, an auxiliary variable scheme is proposed which allows to directly employ the collapsed variational inference scheme for hierarchical Dirichlet processes given in~\cite{teh08b}. \section{Multinomial distributions and Dirichlet priors} If we sample $n$ single values from a multinomial distribution with parameters $\theta_1, \dots, \theta_K$, the likelihood of observations is is given by \begin{align} &\operatorname{Mult}(n_1, \dots, n_n \mid \boldsymbol{\theta}) = \prod_{k=1}^K \theta_k^{n_{k}} \end{align} where $n_k$ are counts which give the frequency of observing the $k$th category in the observations $\boldsymbol{x}$. The Dirichlet distribution is the conjugate prior distribution of the multinomial: \begin{align} &\operatorname{Dir}\left(\boldsymbol{\theta} \mid \boldsymbol{\alpha}\right) = \frac{\Gamma\left(\sum_{k=1}^K \alpha_{k}\right)}{\prod_{k=1}^K \Gamma\left(\alpha_{k}\right)} \prod_{k=1}^K \theta_k^{ \alpha_{k}-1} \end{align} The storyline behind a multinomially distributed variable $x$ with a Dirichlet prior is \begin{align} \boldsymbol{\theta} \mid \boldsymbol{\alpha} \sim & \operatorname{Dir}\left(\alpha_{1},\dots,\alpha_{K}\right)\nonumber\\ x_1, \dots, x_n \mid \boldsymbol{\theta} \sim & \operatorname{Mult}\left(\theta_1, \dots, \theta_K\right). \end{align} The graphical model is shown in Fig.~\ref{fig:Dirichlet-prior}. The joint distribution over observations and multinomial parameters of the model is: \begin{align} \operatorname{Mult}(n_{1},\dots,n_{K} \mid \boldsymbol{\theta}) \cdot \operatorname{Dir}(\boldsymbol{\theta} \mid \boldsymbol{\alpha}) = & \frac{\Gamma(\sum_{k=1}^K \alpha_{k})}{\prod_{k=1}^K \Gamma( \alpha_{k})} \prod_{k=1}^K \theta_k^{ n_{k} + \alpha_{k} -1}. \label{eq:dir_mult} \end{align} The posterior of a Dirichlet-multinomial distribution is a Dirichlet distribution with parameters $\alpha_1 + n_1, \dots, \alpha_K + n_K$ where the Dirichlet parameters of the prior act as pseudo-counts: \begin{align} \boldsymbol{\theta} \sim \operatorname{Dir}(\alpha_1 + n_1, \dots, \alpha_K + n_K) \end{align} Integrating out $\boldsymbol{\theta}$ yields \begin{align} & p(\boldsymbol{n} \mid \boldsymbol{\alpha}) = \frac{\Gamma(\sum_{k=1}^K \alpha_{k})}{\Gamma(\sum_{k=1}^K \alpha_{k} + n_{k})} \prod_{k=1}^K \frac{\Gamma(\alpha_{k} + n_{k})}{\Gamma(\alpha_{k})} \label{eq:margin_dir_mult} \end{align} For efficient inference schemes, auxiliary variables $m_1, \dots, m_K$ can be introduced~\cite{antoniak74,teh08b}: \begin{align} & p(\boldsymbol{n},\boldsymbol{m} \mid \boldsymbol{\alpha}) = \frac{\Gamma(\sum_{k=1}^K \alpha_{k})}{\Gamma(\sum_{k=1}^K \alpha_{k} + n_{k})} \prod_{k=1}^K \operatorname{s}(m_{k},n_{k}) \cdot \left( \alpha_{k}\right)^{m_k} \label{eq:aux-sterling} \end{align} where the following equality is used: \begin{align} & \sum_{m_k = 0}^{n_k} \operatorname{s}(m_{k},n_{k}) \cdot \left( \alpha_{k}\right)^{m_k} = \frac{\Gamma(\alpha_{k} + n_{k})}{\Gamma(\alpha_{k})}. \label{eq:aux_tables} \end{align} The auxiliary variables $\boldsymbol{m}$ behave like balls in the Polya urn scheme (or like tables in the Chinese restaurant process for truncated Dirichlet processes)~\cite{teh08b}. The expected values of $\boldsymbol{m}$ are \begin{align} \operatorname{E}\left[ m_k \right] = \alpha_k \cdot \left( \Psi(\alpha_k+n_k) - \Psi(\alpha_k) \right). \end{align} If $\alpha_{k}$ contains multinomial parameters, their inference is simple, as $m_1, \dots, m_K$ behave like observed counts of a multinomial. For non-multinomial parameters, further auxiliary variables allow for an elegant inference using gamma distributions~\cite{teh08b}. \begin{figure} \caption{\textbf{Plate notation for a multinomial distribution with \subref{fig:Dirichlet-prior} \label{fig:Dirichlet-prior} \label{fig:Multi-Dirichlet-prior} \label{fig:my_label} \end{figure} \section{The multi-Dirichlet prior} Given that we have a multinomial distribution with parameters $\theta_1, \dots, \theta_K$ from which we sample $n$ single values, stored in $x_1, \dots, x_n$. We place a Dirichlet prior over the multinomial parameters $\boldsymbol{\theta}$ which has $K$ parameters. The Dirichlet parameters are calculated by summing over $J$ different $K$-dimensional vectors called \textit{parent prior parameters} $\boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J}$. Then the $k$th parameter of our Dirichlet prior distribution is a sum over parent parameters $\alpha_{1k}, \dots, \alpha_{Jk}$. In the following, this prior distribution will be called a \textit{Multi-Dirichlet} (MD) prior. Formally: \begin{align} \boldsymbol{\theta} \mid \boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J} \sim & \operatorname{Dir}\left(\sum_{j=1}^J \alpha_{j1},\dots,\sum_{j=1}^J \alpha_{jK}\right) ;\quad \alpha_{jk} \in \mathbb{R}_{>0} \, \forall j, k \nonumber\\ x_1, \dots, x_n \mid \boldsymbol{\theta} \sim & \operatorname{Mult}\left(\theta_1, \dots, \theta_K\right) \end{align} where we show vectors in bold. The graphical model for a multi-Dirichlet distribution is shown in Fig.~\ref{fig:Multi-Dirichlet-prior}. If we observed counts $n_1, \dots, n_K$ for the $K$ categories from n draws, the joint distribution of observations and parameters becomes: \begin{align} & \frac{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk})}{\prod_{k=1}^K \Gamma(\sum_{j=1}^J \alpha_{jk})} \prod_{k=1}^K \theta_k^{ n_{k} + \left(\sum_{j=1}^J \alpha_{jk}\right) -1} \label{eq:dir_mult} \end{align} The same holds for a Multi-Dirichlet Process (MDP)~\cite{kling2014detecting} with truncation level $K$. After integrating over the multinomial parameters $\theta$, the joint distribution is: \begin{align} p(\boldsymbol{n}\mid \boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J}) = & \frac{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk})}{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk} + n_{k})} \prod_{k=1}^K \frac{\Gamma(\sum_{j=1}^J \alpha_{jk} + n_{k})}{\Gamma(\sum_{j=1}^J \alpha_{jk})} \label{eq:margin_dir_mult} \end{align} \subsection{Aggregation property} An important feature of the Dirichlet distribution is the \textit{aggregation property}: Every sum of Dirichlet-distributed parameters follows a Dirichlet distribution where the corresponding parameters were summed up. For our Dirichlet-mul\-ti\-no\-mial distribution we have a Dirichlet-distributed posterior where the parameters are counts plus pseudo counts (the Dirichlet parameters). \begin{align} (\theta_{1}, \dots, \theta_{j-1}, \theta_j, \dots, \theta_J) \sim& \operatorname{Dir}(\alpha_1, \dots, \alpha_{j-1}, \alpha_j, \dots, \alpha_J)\nonumber\\ \Rightarrow (\theta_{1}, \dots, \theta_{j-1} + \theta_j, \dots, \theta_J) \sim& \operatorname{Dir}(\alpha_1, \dots, \alpha_{j-1} + \alpha_j, \dots, \alpha_J) \end{align} Since we have to learn about the contribution of each parent to the Dirichlet prior for learning their parameters, we would like to have individual counts: Instead of storing counts $n_k$ telling us how often we saw category $k$, we would like to store counts $n'_{jk}$ telling how often we saw category $k$ caused by parent $j$. Now if those counts are part of the parameters of a Dirichlet-multinomial posterior, we know that summing over those counts with $\sum_{j=1}^J n'_{jk} = n_k$ yields the original Dirichlet posterior of our model. To do so, we introduce probabilities $\theta'_{jk}$ corresponding to the probability of seeing category $k$ explained by the $j$th parent parameter. Then \begin{align} \sum_{j=1}^J n'_{jk} = n_{k}; \qquad \sum_{j=1}^J \theta'_{jk} = \theta_k \label{eq:aux_n_theta} \end{align} and the posterior of a multinomial distribution over $\boldsymbol{\theta'}$ with a Dirichlet prior distribution is \begin{align} \boldsymbol{\theta'} \sim& \operatorname{Dir}(\alpha_{11} + n'_{11}, \dots,\alpha_{1K} + n'_{1K},\dots, \alpha_{J1} + n'_{J1}, \dots, \alpha_{JK} + n'_{JK} )\nonumber\\ \Rightarrow \boldsymbol{\theta} \sim& \operatorname{Dir}(\sum_{j=1}^J \alpha_{j1} + n_{1}, \dots, \sum_{j=1}^J \alpha_{jK} + n_{K}) \end{align} which directly follows from the aggregation property. \subsection{Auxiliary variables for category counts} Based on the aggregation property of the Dirichlet distribution we introduce the auxiliary variables $n'_{jk}$ from Eq.~\ref{eq:aux_n_theta} corresponding to the share of counts assigned to factor $k$ explained by the $j$th parent parameter. We have to account for the possible orderings of parent counts $\boldsymbol{n'}$ and get: \begin{align} &p(\boldsymbol{n},\boldsymbol{n'}\mid \boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J}) =\nonumber \\ & \frac{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk})}{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk} + n_{k})} \prod_{k=1}^K {n_{k} \choose n'_{1k}, \dots, n'_{Jk}} \prod_{j=1}^J \frac{\Gamma(\alpha_{jk} + n'_{jk})}{\Gamma(\alpha_{jk})} \label{eq:disgregation} \end{align} which is a product of $K$ Dirichlet-multinomial distributions with prior parameters $\alpha_{1k}, \dots, \alpha_{Jk}$ and therefore \begin{equation} \operatorname{E}[n'_{jk}] = \frac{\alpha_{jk}}{\sum_{j'=1}^J \alpha_{jk'}} \cdot n_{k}. \label{eq:exp_aux_counts} \end{equation} Summing over all possible values of $n'_{jk}$ yields the original Eq.~\ref{eq:margin_dir_mult}. In order to get rid of the gamma functions in Eq.~\ref{eq:margin_dir_mult}, Teh et al.~\cite{teh08b} introduced auxiliary variables $m_k$ and $m_{jk}$, corresponding to tables per topic and tables per topic and per parent prior, respectively (see Eq.~\ref{eq:aux_tables}): \begin{align} & p(\boldsymbol{n},\boldsymbol{m} \mid \boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J}) = \frac{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk})}{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk} + n_{k})} \prod_{k=1}^K \operatorname{s}(m_{k},n_{k}) \cdot \left(\sum_{j=1}^J \alpha_{jk}\right)^{m_k} \label{eq:exp_sum} \end{align} where $s(m,n)$ are the unsigned Stirling numbers of the first kind. \subsection{Auxiliary parent-level counts} For MD prior distributions, Eq.~\ref{eq:exp_sum} complicates the inference for $\alpha_{jk}$, as the single parts of the parent prior parameters are hidden in exponentiated sums. However, introducing the parent counts $\boldsymbol{n'}$ from Eq.~\ref{eq:disgregation} and using the auxiliary variables from Eq.~\ref{eq:aux-sterling} we can readily see that \begin{align} & p(\boldsymbol{n},\boldsymbol{n'},\boldsymbol{m'}\mid \boldsymbol{\alpha_1}, \dots, \boldsymbol{\alpha_J}) =\nonumber\\ & \frac{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk})}{\Gamma(\sum_{k=1}^K \sum_{j=1}^J \alpha_{jk} + n_{k})} \prod_{k=1}^K {n_k \choose n'_{1k}, \dots, n'_{Jk}} \prod_{j=1}^J \operatorname{s}(m'_{jk},n'_{jk}) \cdot \alpha_{jk}^{m'_{jk}} \end{align} after which the exponentiated sums disappear. The auxiliary variables $\boldsymbol{m'}$ behave like balls in a Polya urn scheme (or like tables in a Chinese restaurant process for truncated Dirichlet processes) where we distinguish not only between $K$ categories, but also distinguish between the $J$ parent prior parameters. As the choice of parent priors parameters in the Polya urn scheme directly depends on their relative sizes, the expectations for $\boldsymbol{m'}$ are: \begin{equation} \operatorname{E}[m'_{jk}] = \frac{\alpha_{jk}}{\sum_{j'=1}^J \alpha_{j'k}} \cdot m_{k} = \alpha_{jk} \cdot \left( \Psi(\alpha_k+n_k) - \Psi(\alpha_k) \right) . \label{eq:exp_aux_tables} \end{equation} If $\alpha_{jk}$ contains multinomial parameters, it now is simple to infer them, as $\boldsymbol{m'}$ behave like observed counts of a multinomial. For non-multinomial parameters, further auxiliary variables allow for an elegant inference using gamma distributions~\cite{teh08b}. \section{Inference} For inference on categories, one can directly work with the summed parameters and counts from Eq.~\ref{eq:exp_sum}. These sums can be calculated in advance and can be updated after a fixed number of inference steps. The resulting inference scheme is identical as for standard Dirichlet-multinomial models~\cite{teh08b}. For learning about the auxiliary variables for the parent prior parameters, we use the expectations from Eq.~\ref{eq:exp_aux_counts} and Eq.~\ref{eq:exp_aux_tables} which now can be directly calculated given category counts. \textbf{We do not have to explicitly calculate variational distributions over the new auxiliary variables $\boldsymbol{n'}$ and $\boldsymbol{m'}$, which allows for an efficient inference.} If we separate our parent prior parameters into a mean and a precision part as in~\cite{minka00}, the multinomial mean part of the parent parameters follow a Dirichlet distribution, while the precision part follows a Gamma function, as shown in~\cite{teh08b}. \textbf{Using the auxiliary variable scheme, it is also possible to integrate over Dirichlet distributed means of parent priors, which allows for collapsed inference schemes for hierarchical multi-Dirichlet and multi-Dirichlet process models.} \end{document}
\begin{document} \begin{titlepage} \title{Incremental Exact Min-Cut in Poly-logarithmic \ Amortized Update Time} \thispagestyle{empty} \begin{abstract} We present a deterministic incremental algorithm for \textit{exactly} maintaining the size of a minimum cut with $\widetilde{O}(1)$ amortized time per edge insertion and $O(1)$ query time. This result partially answers an open question posed by Thorup [Combinatorica 2007]. It also stays in sharp contrast to a polynomial conditional lower-bound for the fully-dynamic weighted minimum cut problem. Our algorithm is obtained by combining a recent sparsification technique of Kawarabayashi and Thorup [STOC 2015] and an exact incremental algorithm of Henzinger [J. of Algorithm 1997]. We also study space-efficient incremental algorithms for the minimum cut problem. Concretely, we show that there exists an ${O}(n\log n/\varepsilon^2)$ space Monte-Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, the algorithm maintains a $(1+\varepsilon)$-approximation to the minimum cut. The algorithm has $\widetilde{O}(1)$ amortized update-time and constant query-time. \end{abstract} \end{titlepage} \section{Introduction} Computing a minimum cut of a graph is a fundamental algorithmic graph problem. While most of the focus has been on designing static efficient algorithms for finding a minimum cut, the dynamic maintenance of a minimum cut has also attracted increasing attention over the last two decades. The motivation for studying the dynamic setting is apparent, as real-life networks such as social or road network undergo constant and rapid changes. Given an initial graph $G$, the goal of a dynamic graph algorithm is to build a data-structure that maintains $G$ and supports update and query operations. Depending on the types of update operations we allow, dynamic algorithms are classified into three main categories: (i) \emph{fully dynamic}, if update operations consist of both edge insertions and deletions, (ii) \emph{incremental}, if update operations consist of edge insertions only and (iii) \emph{decremental}, if update operations consist of edge deletions only. In this paper, we study incremental algorithms for maintaining the size of a minimum cut of an unweighted, undirected graph (denoted by $\lambdambda(G) = \lambdambda$) supporting the following operations: \begin{itemize} \item \textsc{Insert}$(u,v)$: Insert the edge $(u,v)$ in $G$. \item \textsc{QuerySize}: Return the exact (approximate) size of a minimum cut of the current $G$. \end{itemize} For any $\alpha \geq 1$, we say that an algorithm is an $\alpha$-approximation of $\lambdambda$ if \textsc{QuerySize} returns a positive number $k$ such that $\lambdambda \leq k \leq \alpha \cdot \lambdambda$. Our problem is characterized by two time measures; \emph{query time}, which denotes the time needed to answer each query and \emph{total update time}, which denotes the time needed to process \emph{all} edge insertions. We say that an algorithm has an $O(t(n))$ amortized update time if it takes $O(m(t(n)))$ total update time for $m$ edge insertions starting from an empty graph. We use $\widetilde{O}(\cdot)$ to hide poly-logarithmic factors. \paragraph*{Related Work.} For over a decade, the best known static and deterministic algorithm for computing a minimum cut was due to Gabow~\cite{Gabow95} which runs in $O(m + \lambdambda^{2} \log n)$ time. Recently, Kawarabayashi and Thorup~\cite{thorup} devised a $\widetilde{O}(m)$ time algorithm which applies only to simple, unweighted and undirected graphs. Randomized Monte Carlo algorithms in the context of static minimum cut were initiated by Karger~\cite{Karger99}. The best known randomized algorithm is due to Karger~\cite{kargermin} and runs in $O(m \log^{3} n)$ time. Karger~\cite{Karger94} was the first to study the dynamic maintenance of a minimum cut in its full generality. He devised a fully dynamic, albeit randomized, algorithm for maintaining a $\sqrt{1+2/\varepsilon}$-approximation of the minimum cut in $\widetilde{O}(n^{1/2 + \varepsilon})$ expected amortized time per edge operation. In the incremental setting, he showed that the update time for the same approximation ratio can be further improved to $\widetilde{O}(n^{\varepsilon})$. Thorup and Karger~\cite{thorupkarger} improved upon the above guarantees by achieving an approximation factor of $\sqrt{2+o(1)}$ and an $\widetilde{O}(1)$ expected amortized time per edge operation. Henzinger~\cite{henzinger97} obtained the following guarantees for the incremental minimum cut; for any $\varepsilon \in (0,1]$, (i) an $O(1/\varepsilon^{2})$ amortized update-time for a $(2+\varepsilon)$-approximation, (ii) an $O(\log^{3} n / \varepsilon^{2})$ expected amortized update-time for a $(1+\varepsilon)$-approximation and (iii) an $O(\lambdambda \log n)$ amortized update-time for the exact minimum cut. For minimum cut up to some poly-logarithmic size, Thorup~\cite{fullythorup} gave a fully dynamic Monte-Carlo algorithm for maintaining exact minimum cut in $\widetilde{O}(\sqrt{n})$ time per edge operation. He also showed how to obtain an $1+o(1)$-approximation of an arbitrary sized minimum cut with the same time bounds. In comparison to previous results, it is worth pointing out that his work achieves \textit{worst-case} update times. Lacki and Sankowski~\cite{LackiS11} studied the dynamic maintenance of the exact size of the minimum cut in planar graphs with arbitrary edge weights. They obtained a fully dynamic algorithm with $\widetilde{O}(n^{5/6})$ worst-case query and update time. There has been a growing interest in proving conditional lower bounds for dynamic problems in the last few years~\cite{abboud, henzinger15}. A recent result of Nanongkai and Saranurak~\cite{DS16} shows the following conditional lower-bound for the \emph{exact weighted} minimum cut assuming the Online Matrix-Vector Multiplication conjecture: for any $\varepsilon > 0$, there are no fully-dynamic algorithms with polynomial-time preprocessing that can simultaneously achieve $O(n^{1-\varepsilon})$ update-time and $O(n^{2-\varepsilon})$ query-time. \paragraph*{Our Results and Technical Overview.} We present two new incremental algorithms concerning the maintenance of the size of a minimum cut. Both algorithms apply to undirected, unweighted graphs. Our first and main result, presented in Section \ref{sec: exactMinCut}, shows that there is a deterministic incremental algorithm for \textit{exactly} maintaining the size of a minimum cut with $\widetilde{O}(1)$ amortized time per operation and $O(1)$ query time. This result allows us to partially answer in the affirmative a question regarding efficient dynamic algorithms for exact minimum cut posed by Thorup~\cite{fullythorup}. Additionally, it also stays in sharp contrast to the polynomial conditional lower-bound for the fully-dynamic weighted minimum cut problem of ~\cite{DS16}. We obtain our result by heavily relying on a recent sparsification technique developed in the context of static minimum cut algorithms. Specifically, for a given simple graph $G$, Kawarabayashi and Thorup~\cite{thorup} designed an $\widetilde{O}(m)$ procedure that contracts vertex sets of $G$ and produces a multigraph $H$ with considerably fewer vertices and edges while preserving some family of cuts of size up to $(3/2)\lambdambda(G)$. Motivated by the properties of $H$, we crucially observe that it is ``safe'' to work entirely with graph $H$ as long as the sequence of newly inserted edges do not increase the size of the minimum cut in $H$ by more than $(3/2) \lambdambda(G)$. If the latter occurs, we recompute a new multigraph $H$ for the current graph $G$. Since $\lambdambda(G) \leq n$, the number of such re-computations is $O(\log n)$. For maintaining the minimum-cut of $H$, we appeal to the exact incremental algorithm due to Henzinger~\cite{henzinger97}. Though the combination of this two algorithms might seem immediate at first sight, it is not alone sufficient for achieving the claimed bounds. Our main contribution is to overcome some technical obstacles and formally argue that such combination indeed leads to our desirable guarantees. Motivated by the recent work on \textit{space-efficient} dynamic algorithms~\cite{sayan, GibbKKT15}, we also study the efficient maintenance of the size of a minimum cut using only $\widetilde{O}(n)$ space. Concretely, we present a ${O}(n\log n / \varepsilon^2)$ space Monte-Carlo algorithm that can process a stream of edge insertions starting from an empty graph, and with high probability, it maintains an $(1+\varepsilon)$-approximation to the minimum cut in ${O}(\alpha(n) \log^3 n /\varepsilon^2)$ amortized update-time and constant query-time. Note that none of the existing streaming algorithms for $(1+\varepsilon)$-approximate minimum cut~\cite{AhnG09,KelnerL13,AhnGM12} achieves these update and query times. \section{Preliminary} Let $G = (V,E)$ be an undirected, unweighted multi-graph with no self-loops. Two vertices $x$ and $y$ are $k$-\textit{edge connected} if there exist $k$ edge-disjoint paths connecting $x$ and $y$. A graph $G$ is $k$-\textit{edge connected} if every pair of vertices is $k$-edge connected. The \textit{local edge connectivity} $\lambdambda(G,x,y)$ of vertices $x$ and $y$ is the largest $k$ such that $x$ and $y$ are $k$-edge connected in $G$. The \textit{edge connectivity} $\lambdambda(G)$ of $G$ is the largest $k$ such that $G$ is $k$-edge connected. For a subset $S \subseteq V$ in $G$, the \textit{edge cut} $E_G(S, V \setminus S)$ is a set of edges that have one endpoint in $S$ and the other in $ V \setminus S$. We may omit the subscript when clear from the context. Let $\lambdambda(S,G) = |E_G(S, V \setminus S)|$ be the size of the edge cut. If $S$ is a singleton, we refer to such cut as a \textit{trivial} cut. Two vertices $x$ and $y$ are \textit{separated} by $E(S, V \setminus S)$ if they do not belong to the same connected component induced by the edge cut. A \textit{minimum edge cut} of $x$ and $y$ is a cut of minimum size among all cuts separating $x$ and $y$. A \textit{global minimum cut} $\lambdambda(G)$ for $G$ is the minimum edge cut over all pairs of vertices. By Menger's Theorem \cite{menger}, (a) the size of the minimum edge cut separating $x$ and $y$ is $\lambdambda(x,y,G)$, and (b) the size of the global minimum cut is equal to $\lambdambda(G)$. Let $n$, $m_0$ and $m_1$ be the number of vertices, initial edges and inserted edges, respectively. The total number of edges $m$ is the sum of the initial and inserted edges. Moreover, let $\lambdambda$ and $\delta$ denote the size of the global minimum cut and the minimum degree in the final graph, respectively. Note that the minimum degree is always an upper bound on the edge connectivity, i.e., $\lambdambda \leq \delta$ and $m = m_0 + m_1 = \mathcal{O}mega{(\delta n)}$. A subset $U \subseteq V$ is \textit{contracted} if all vertices in $U$ are identified with some element of $U$ and all edges between them are discarded. For $G=(V,E)$ and a collection of vertex sets, let $H=(V_H,E_H)$ denote the graph obtained by contracting such vertex sets. Such contractions are associated with a mapping $h : V \rightarrow V_H$. For an edge subset $N \subseteq E$, let $N_h= \{(h(a),h(b)) : (a,b) \in N\} \subseteq E_H$ be its corresponding edge subset induced by $h$. \section{Sparse certificates} \lambdabel{sec: sparseCertificates} In this section we review a useful sparsification tool, introduced by Nagamochi and Ibaraki~\cite{NagamochiI92}. \begin{definition}[\cite{BenczurK15}] A \emph{sparse $k$-connectivity certificate}, or simply a \emph{$k$-certificate}, for an unweighted graph $G$ with $n$ vertices is a subgraph $G'$ of $G$ such that \begin{enumerate} \item $G'$ consists of at most $k(n-1)$ edges, and \item $G'$ contains all edges crossing cuts of size at most $k$. \end{enumerate} \lambdabel{sparsedef} \end{definition} Given an undirected graph $G = (V,E)$, a \textit{maximal spanning forest decomposition (msfd)} $\mathcal{F}$ of order $k$ is a decomposition of $G$ into $k$ edge-disjoint spanning forests $F_i$, $1\leq i \leq k$, such that $F_i$ is a maximal spanning forest of $G \setminus (F_1 \cup F_2 \ldots \cup F_{i-1})$. Note that $G_k = (V, \bigcup_{i \leq k} F_i)$ is a $k$-certificate. An msfd fulfills the following property whose proof we defer to the appendix. \begin{lemma}[\cite{NagamochiI}] \lambdabel{lemm: Nagamochi} Let $\mathcal{F}=(F_1,\ldots,F_m)$ be an \emph{msfd} of order $m$ of a graph $G=(V,E)$, and let $k$ be an integer with $1 \leq k \leq m$. Then for any nonempty and proper subset $S \subset V$, \[ \lambdambda(S,G_k) \begin{cases} \geq k,& \text{if } \lambdambda(S,G) \geq k\\ = \lambdambda(S,G) & \text{if } \lambdambda(S,G) \leq k-1. \end{cases} \] \end{lemma} As $G_k$ is a subgraph of $G$, $\lambdambda(G_k) \leq \lambdambda(G)$. This implies that $\lambdambda(G_k) = \min(k,\lambdambda(G))$. Nagamochi and Ibaraki~\cite{NagamochiI92} presented an $O(m+n)$ time algorithm to construct a special msfd, which we refer to as DA-msfd. \section{Incremental Exact Minimum Cut} \lambdabel{sec: exactMinCut} In this section we present a deterministic incremental algorithm that exactly maintains $\lambdambda(G)$. The algorithm has an $\widetilde{O}(1)$ update-time, an $O(1)$ query time and it applies to any undirected, unweighted graph $G = (V,E)$. The result is obtained by carefully combining a recent result of Kawarabayashi and Thorup~\cite{thorup} on static min-cut and the incremental exact min-cut algorithm of Henzinger~\cite{henzinger97}. We start by describing the maintenance of non-trivial cuts, that is, cuts with at least two vertices on both sides. \paragraph*{Maintaining non-trivial cuts.} Kawarabayashi and Thorup~\cite{thorup} devised a near-linear time algorithm that contracts vertex sets of a simple input graph $G$ and produces a sparse multi-graph preserving all non-trivial minimum cuts of $G$. In the following theorem, we state a slightly generalized version of this algorithm. \begin{theorem}[\textsc{KT-Sparsifier}~\cite{thorup}] Given an undirected, unweighted graph $G$ with $n$ vertices, $m$ edges, and min-cut $\lambdambda$, in $\widetilde{O}(m)$ time, we can contract vertex sets and produce a multigraph $H$ which consists of only $m_H = \widetilde{O}(m/\lambdambda)$ edges and $n_H = \widetilde{O}(n/\lambdambda)$ vertices, and which preserves all non-trivial minimum cuts along with the non-trivial cuts of size up to $(3/2) \lambdambda$ in $G$. \lambdabel{SparsificationThm} \end{theorem} As far as non-trivial cuts are concerned, the above theorem implies that it is safe work on $H$ instead of $G$ as long as the sequence of newly inserted edges satisfies $\lambdambda_H \leq (3/2) \lambdambda$. To incrementally maintain the correct $\lambdambda_H$, we apply Henzinger's algorithm~\cite{henzinger97} on top of $H$. The basic idea to verify the correctness of the solution is to compute and store all min-cuts of $H$. Clearly, a solution is correct as long as an edge insertion does not increase the size of all min-cuts. If all min-cuts have increased, a new solution is computed using information about the previous solution. We next show how to do this efficiently. To store all minimum edge cuts we use the \textit{cactus tree} representation by Dinitz, Karzanov and Lomonosov~\cite{dinitz}. A cactus tree of a graph $G=(V,E)$ is a weighted graph $G_c = (V_c, E_c)$ defined as follows: There is a mapping $\phi: V \rightarrow V_c$ such that: \begin{enumerate} \item Every node in $V$ maps to exactly one node in $V_c$ and every node in $V_c$ corresponds to a (possibly empty) subset of $V$. \item $\phi(x) = \phi(y)$ iff $x$ and $y$ are $(\lambdambda(G)+1)$-edge connected. \item Every minimum cut in $G_c$ corresponds to a min-cut in $G$, and every min-cut in $G$ corresponds to \text{at least} one min-cut in $G_c$. \item If $\lambdambda$ is odd, every edge of $E_c$ has weight $\lambdambda$ and $G_c$ is a tree. If $\lambdambda$ is even, $G_c$ consists of paths and simple cycles sharing at most one vertex, where edges that belong to a cycle have weight $\lambdambda / 2$ while those not belonging to a cycle have weight $\lambdambda$. \end{enumerate} Dinitz and Westbrook~\cite{DinitzW98} showed that given a cactus tree, we can use the data structures from~\cite{GalilI93, Poutre00} to maintain the cactus tree for minimum cut size $\lambdambda$ under $u$ insertions, reporting when the minimum cut size increases to $\lambdambda+1$ in $O(u+n)$ total time. To quickly compute and update the cactus tree representation of a given multigraph $G$, we use an algorithm due to Gabow~\cite{Gabow91}. The algorithm computes first a subgraph of $G$, called a \textit{complete $\lambdambda$-intersection} or $I(G,\lambdambda)$, with at most $\lambdambda n$ edges, and uses $I(G,\lambdambda)$ to compute the cactus tree. Given some initial graph with $m_0$ edges, the algorithm computes $I(G,\lambdambda)$ and the cactus tree in $\widetilde{O}(m_0 + \lambdambda^{2}n)$ time. Moreover, given $I(G,\lambdambda)$ and a sequence of edge insertions that increase the minimum cut by 1, the new $I(G,\lambdambda)$ and the new cactus tree can be computed in $\widetilde{O}(m')$, where $m'$ is the number of edges in the current graph (this corresponds to one execution of the Round Robin subroutine~\cite{Gabow95}). \paragraph*{Maintaining trivial cuts.} We remark that the multigraph $H$ from Theorem \ref{SparsificationThm} preserves only non-trivial cuts of $G$. If $\lambdambda = \delta$, then we also need a way to keep track of a trivial minimum cut. We achieve this by maintaining a minimum heap $\mathcal{H}_G$ on the vertices, where each vertex is stored with its degree. If an edge insertion is performed, the values of the edge endpoints are updated accordingly in the heap. It is well known that constructing $\mathcal{H}_G$ takes $O(n)$ time. The supported operations \textsc{Min($\mathcal{H}_G$)} and \textsc{UpdateEndpoints($\mathcal{H}_G$,$e$)} can be implemented in $O(1)$ and $O(\log n)$ time, respectively (see \cite{Cormen}). This leads to Algorithm \ref{algo: ExactMinCut}. \def\textbf{if}~{\textbf{if}~} \def~\textbf{then}{~\textbf{then}} \def\textbf{endif}~{\textbf{endif}~} \def\textbf{while}~{\textbf{while}~} \def\textbf{endwhile}~{\textbf{endwhile}~} \def\textbf{else}~{\textbf{else}~} \def\textbf{Goto}~{\textbf{Goto}~} \def\quad~~{\quad~~} \begin{algorithm} \caption{\textsc{Incremental Exact Minimum Cut}} \begin{algorithmic}[1] \State Compute the size $\lambdambda_0$ of the min-cut of $G$ and set $\lambdambda^* = \lambdambda_0$. \Statex Build a heap $\mathcal{H}_G$ on the vertices, where each vertex stores its degree as a key. \Statex Compute a multigraph $H$ by running \textsc{KT-sparsifier} on $G$ and a mapping $h : V \rightarrow V_H$. \Statex Compute the size $\lambdambda_H$ of the min-cut of $H$, a DA-msfd $F_1, \ldots, F_m$ of order $m$ of $H$, \Statex $I(H,\lambdambda_H)$, and a cactus-tree of $\bigcup_{i \leq \lambdambda_H+1} F_i$. \State Set $N_h = \emptyset$. \Statex \textbf{while}~ there is at least one minimum cut of size \textsc{$\lambdambda_H$}~\textbf{do} \Statex \quad~~ \textbf{Receive the next operation}. \Statex \quad~~ \textbf{if}~ it is a query~\textbf{then} \Return $\min$\{$\lambdambda_H$, \textsc{Min($\mathcal{H}_G$)}\} \Statex \quad~~ \textbf{else}~ it is the insertion of an edge $(u,v)$, \textbf{then} \Statex \quad~~ update the cactus tree according to the insertion of the new edge $(h(u),h(v))$, \Statex \quad~~ add the edge $(h(u),h(v))$ to $N_h$ and update the degrees of $u$ and $v$ in $\mathcal{H}_G$. \Statex \quad~~ \textbf{endif}~ \Statex \textbf{endwhile}~ \Statex Set $\lambdambda_H = \lambdambda_H + 1$. \State \textbf{if}~ $\min$\{$\lambdambda_H$, \textsc{Min($\mathcal{H}_G$)}\}$> (3/2) \lambdambda^{*}$~\textbf{then} \Statex \quad~~ \texttt{// Full Rebuild Step} \Statex \quad~~ Compute $\lambdambda(G)$ and set $\lambdambda^{*} = {\lambdambda(G)}$. \Statex \quad~~ Compute a multigraph $H$ by running \textsc{KT-sparsifier} on the current graph $G$. \Statex \quad~~ Update $\lambdambda_H$ to be the min-cut of $H$, compute a DA-msfd $F_1, \ldots, F_m$ of order $m$ of $H$, \Statex \quad~~ and then $I(H, \lambdambda_H)$ and a cactus tree of $\bigcup_{i \leq \lambdambda_H+1} F_i$. \Statex \textbf{else}~ \textbf{if}~ $\lambdambda_H \leq (3/2) \lambdambda^{*}$~\textbf{then} \Statex \quad~~ \quad~~ // \texttt{Partial Rebuild Step} \Statex \quad~~ \quad~~ Compute a DA-msfd $F_1, \ldots, F_m$ of order $m$ of $\bigcup_{i \leq \lambdambda_H + 1} F_i \cup N_h$ and \Statex \quad~~ \quad~~ call the resulting forests $F_1,\ldots,F_m$. \Statex \quad~~ \quad~~ Let $H' = (V_H,E')$ be a graph with $E' = I(H,\lambdambda_H - 1) \cup \bigcup_{i \leq \lambdambda_H + 1} F_i$. \Statex \quad~~ \quad~~ Compute $I(H', \lambdambda_H)$ and a cactus tree of $H'$. \Statex \quad~~ \textbf{else}~ // \texttt{Special Step} \Statex \quad~~ \quad~~ \textbf{while}~ \textsc{Min($\mathcal{H}_G$)} $\leq (3/2) \lambdambda^*$ ~\textbf{do} \Statex \quad~~ \quad~~ \quad~~ \textbf{if}~ the next operation is a query~\textbf{then} \Return \textsc{Min($\mathcal{H}_G$)} \Statex \quad~~ \quad~~ \quad~~ \textbf{else}~ update the degrees of the edge endpoints in $\mathcal{H}_G$. \Statex \quad~~ \quad~~ \quad~~ \textbf{endif}~ \Statex \quad~~ \quad~~ \textbf{endwhile}~ \Statex \quad~~\quad~~ \textbf{Goto}~ 3. \Statex \quad~~ \textbf{endif}~ \Statex \textbf{endif}~ \Statex \textbf{Goto}~ 2. \end{algorithmic} \lambdabel{algo: ExactMinCut} \end{algorithm} \paragraph*{Correctness.} Let $G$ be the current graph throughout the execution of the algorithm and let $H$ be the corresponding multigraph maintained by the algorithm. Recall that $H$ preserves some family of cuts from $G$. We say that $H$ is \textit{correct} if and only if there exists a minimum cut from $G$ that is contained in the union of (a) all trivial cuts of $G$ and (b) all cuts in $H$. Note that we consider $H$ to be correct even in the \texttt{Special Step} (i.e., when $\lambdambda_H > (3/2) \lambdambda^*$), where $H$ is not updated anymore since we are certain that the smallest trivial cut is smaller than any cut in $H$. To prove the correctness of the algorithm we will show that (1) it correctly maintains a trivial min-cut at any time, (2) $H$ is correct as long as $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} \leq (3/2) \lambdambda^{*}$ (and when this condition fails we rebuild $H$), and (3) as long as $\lambdambda_H \leq (3/2) \lambdambda^*$, the algorithm correctly maintains all cuts of size up to $\lambdambda_H + 1$ of $H$. Let $N_h$ be the set of recently inserted edges in $H$ that the algorithm maintains during the execution of the \textbf{while} loop in Step 2. \begin{lemma} \lambdabel{correctness1} Let $H=(V_H,E_H)$ be a multigraph with minimum cut $\lambdambda_H$ and let $N_h$ be a set with $N_h \subseteq E_H$. Further, let $F_1,\ldots,F_m$ be a DA-msfd of order $m \ge \lambdambda_H + 1$ of $H \setminus N_h$, and let $H'=(V_H, E')$ be a graph with $E' = N_h \cup \bigcup_{i \leq \lambdambda_H+1} F_i$. Then, a cut is a min-cut in $H'$ iff it is a min-cut in $H$. \end{lemma} \begin{proof} We first show that every non-min cut in $H$ is a non-min cut in $H'$. By contrapositive, we get that a min-cut in $H'$ is a min-cut in $H$. To this end, let $(S,V_H \setminus S)$ be a cut with $|E_H(S,V_H \setminus S)| \geq \lambdambda_H +1$ in $H$. Define $E_H(S, V_H \setminus S) \cap N_h = S_{N_h}$ and $E_H(S, V_H \setminus S) \cap (E_H \setminus N_h) = S_{H \setminus N_h}$ such that $E_H(S,V_H \setminus S) = S_{N_h} \uplus S_{H \setminus N_h}$ and $|E_H(S,V_H \setminus S)| = |S_{N_h}| + |S_{H \setminus N_h}|$. Letting $F' = \bigcup_{i \leq \lambdambda_H+1} F_i$, we similarly define edge sets $S'_{N_h}$ and $S'_{F'}$ partitioning the edges $E'(S, V_H \setminus S)$ that cross the cut $(S, V_H \setminus S)$ in $H'$. First, observe that $S_{N_h} = S'_{N_h}$ since edges of $N_h$ are always included in $H'$. In addition, by Lemma \ref{lemm: Nagamochi}, we know that $F'$ preserves all cuts of $H \setminus N_h$ up to size $\lambdambda_H+1$. Thus, if $|S_{H \setminus N_h}| \leq \lambdambda_H + 1$ (Case 1), we get that $|S_{H \setminus N_h}| = |S'_{F'}|$. It follows that $|E'(S,V_H \setminus S)| = |S'_{N_h}| + |S'_{F'}| = |S_{N_h}| + |S_{H \setminus N_h}| = |E_H(S,V_H \setminus S)| \geq \lambdambda_H +1$. If $|S_{H \setminus N_h}| > \lambdambda_H + 1$ (Case 2), then $F'$ must contain at least $\lambdambda_H + 1$ edges crossing such cut and thus $|S'_{F'}| \geq \lambdambda_H + 1$. The latter implies that $|E'(S,V_H \setminus S)| = |S'_{N_h}| + |S'_{F'}| \geq \lambdambda_H + 1$. In both cases, $H'$ being a subgraph of $H$ implies that $\lambdambda(H') \leq \lambdambda_H$. Thus $(S, V_H \setminus S)$ cannot be a min-cut in $H'$. For the other direction, consider a min-cut $(D,V_H \setminus D)$ of size $|E'(D, V_H \setminus D)|$ in $H'$. Let $D_{N_{h}}, D_{H \setminus N_h}, D'_{F'}, D'_{N_h}$ be defined as above. Considering the cut $(D, V_H \setminus D)$ in $H$, we know that $|E_H(D, V_H \setminus D)|= |D_{N_h}| + |D_{H \setminus N_h}| \geq \lambdambda_H$. We first note that $D_{N_h} = D'_{N_h}$ since edges of $N_h$ are always included in $H'$. Then, similarly as above, by Lemma \ref{lemm: Nagamochi} we know that if $|D_{H \setminus N_h}| \leq \lambdambda_H + 1$, then $|E'(D, V_H \setminus D)| = |D'_{N_h}| + |D'_{F'}| = |D_{N_h}| + |D_{H \setminus N_h}| = |E_H(D, V_H \setminus D)| \geq \lambdambda_H$. If $|D_{H \setminus N_H}| > \lambdambda_H + 1$, then $F'$ must contain at least $\lambdambda_H + 1$ edges crossing such cut and thus $|E'(D, V_H \setminus D)| \geq \lambdambda_H + 1$. Combining both bounds we obtain that $|E'(D, V_H \setminus D)| \geq \lambdambda_H$. Since $(D,V_H \setminus D)$ was chosen arbitrarily, we get that $\lambdambda(H') \geq \lambdambda_H$ must hold. The latter along with $\lambdambda(H') \leq \lambdambda_H$ imply that $\lambdambda(H') = \lambdambda_H$. \end{proof} \begin{lemma} \lambdabel{correctness3} The algorithm correctly maintains a trivial min-cut in $G$. \end{lemma} \begin{proof} This follows directly from the min-heap property of $\mathcal{H}_G$. \end{proof} To simplify our notation, in the following we will refer to Step 1 as a \texttt{Full Rebuild Step} (namely the initial \texttt{Full Rebuild Step}). \begin{lemma} \lambdabel{correctness2} For some current graph G, let $H$ be the maintained multi-graph of $G$ under the vertex mapping $h$ and assume that $\lambdambda_H \leq (3/2) \lambdambda^{*}$, where $\lambdambda^{*}$ denotes the min-cut of $G$ at the last \texttt{\em{Full Rebuild Step}}. Then the algorithm correctly maintains $\lambdambda_H = \lambdambda(H)$. \end{lemma} \begin{proof} At the time of the last \texttt{Full Rebuild Step}, the algorithm calls \textsc{KT-sparsifier} on $G$, which yields a multigraph $H$ that preserves all non-trivial min-cuts of $G$. The value of $\lambdambda_H$ is updated to $\lambdambda(H)$ and a DA-msfd and a cactus tree are constructed for $H$. The latter preserve all cuts of $H$ of size up to $\lambdambda_H +1$. Thus, the value of $\lambdambda_H$ is correct at this step. Now suppose that the graph after the last \texttt{Full Rebuild Step} has undergone a sequence of edge insertions, which resulted in the current graph $G$ and its corresponding multigraph $H$ under the vertex mapping $h$. During these insertions, as long as $\lambdambda_H \leq (3/2)\lambdambda^*$, a sequence of $k$ \texttt{Partial Rebuild Steps} is executed, for some $k\geq 1$. Let $\lambdambda_H^{(i)}$ be the value of $\lambdambda_H$ after the $i$-th execution of \texttt{Partial Rebuild Step}, where $1\leq i \leq k$. Since, $\lambdambda_H^{(k)} = \lambdambda(H)$, it suffices to show that $\lambdambda_H^{(k)}$ is correct. We proceed by induction. For the base case, we show that $\lambdambda_H^{(1)}$ is correct. First, using the fact that $\lambdambda_H$ and the cactus tree are correct at the last \texttt{Full Rebuild Step} and that the incremental cactus tree algorithm correctly tell us when to increment $\lambdambda_H$, we conclude that incrementing the value of $\lambdambda_H$ in Step 2 is valid. Thus, $\lambdambda_H^{(1)}$ is correct. Next, in a \texttt{Partial Rebuild Step}, the algorithm sparsifies the graph while preserving all cuts of size up to $\lambdambda_H^{(1)} + 1$ and producing a new cactus tree for the next insertions. The correctness of the sparsification follows from Lemma \ref{correctness1}. For the induction step, let us assume that $\lambdambda_H^{(k-1)}$ is correct. Then, similarly to the base case, the correctness of $\lambdambda_H^{(k-1)}$, the cactus tree from the $(k-1)$-st \texttt{Partial Rebuild Step} and the correctness of the incremental cactus tree algorithm give that incrementing the value of $\lambdambda_H^{(k-1)}$ in Step 2 is valid and yields a correct $\lambdambda_H^{(k)}$. \end{proof} Note that when $\lambdambda_H > (3/2) \lambda^*$, the above lemma is not guaranteed to hold as the algorithm does not execute a \texttt{Partial Rebuild Step} in this case. However, we will show below that this is not necessary for the correctness of the algorithm. The fact that we do not need to execute a \texttt{Partial Rebuild Step} in this setting is crucial for achieving our time bound. \begin{lemma} \lambdabel{correctness4} If $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} \leq 3/2 \lambdambda^{*}$, then $H$ is correct. \end{lemma} \begin{proof} Let $(S',V \setminus S')$ be any non-trivial cut in $G$ that is not in $H$. Such a cut must have cardinality strictly greater than $(3/2) \lambdambda^{*}$ since otherwise it would be contained in $H$. We show that $(S',V \setminus S')$ cannot be a minimum cut as long as $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} \leq (3/2) \lambdambda^{*}$ holds. We distinguish two cases. \begin{enumerate} \item If $\lambdambda_H \leq (3/2) \lambdambda^*$, then by Lemma \ref{correctness2} the algorithm maintains $\lambdambda_H$ correctly. Since $H$ is obtained from $G$ by contracting vertex sets, there is a cut $(S,V_H,S)$ in $H$, and thus in $G$, of value $\lambdambda_H$. It follows that $(S',V \setminus S')$ cannot be a minimum cut of $G$ since $|E(S', V \setminus S')| > (3/2) \lambdambda^* \geq \lambdambda_H = \lambdambda(H) \geq \lambdambda(G)$, where the last inequality follows from the fact that $H$ is a contraction of $G$. \item If $\textsc{Min}(\mathcal{H}_G) \leq (3/2) \lambdambda^{*}$, then by Lemma \ref{correctness3} there is a cut of size $\textsc{Min}(\mathcal{H}_G) = \delta$ in $G$. Similarly, $(S', V \setminus S')$ cannot be a minimum cut of $G$ since $|E(S', V \setminus S')| > (3/2) \lambdambda^{*} \geq \delta \geq \lambdambda(G)$. \end{enumerate} Appealing to the above cases, we conclude $H$ is correct since a min-cut of $G$ is either contained in $H$ or it is a trivial cut of $G$. \end{proof} \begin{lemma} Let $G$ be some current graph. Then the algorithm correctly maintains $\lambdambda(G)$. \end{lemma} \begin{proof} Let $G$ be some current graph and $H$ be the maintained multi-graph of $G$ under the vertex mapping $h$. We will argue that $\lambdambda(G) = \min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\}$. If $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} \leq (3/2) \lambdambda^{*}$, then by Lemma \ref{correctness4}, $H$ is correct i.e., there exists a minimum cut of $G$ that is contained in the union of all trivial cuts of $G$ and all cuts in $H$. Lemma \ref{correctness3} guarantees that the algorithm correctly maintains $\textsc{Min}(\mathcal{H}_G)$, i.e., the trivial minimum cut of $G$. If $\lambdambda_H \leq (3/2) \lambdambda^*$, then Lemma \ref{correctness2} ensures that $\lambdambda_H = \lambdambda(H)$, and thus $\min\{\textsc{Min}(\mathcal{H}_G), \lambdambda_H\} = \lambdambda(G)$. If, however, $\lambdambda_H > (3/2) \lambda^*$ but $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} \leq (3/2) \lambdambda^{*}$, then $\lambdambda_H > \min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\}$ which implies that $\min\{\textsc{Min}(\mathcal{H}_G), \lambdambda_H\} = \textsc{Min} (\mathcal{H}_G) = \lambdambda(G)$. As we argued above, the algorithm correctly maintains $\textsc{Min}(\mathcal{H}_G)$ at any time. Thus it follows that the algorithm correctly maintains $\lambdambda(G)$ in this case as well. The only case that remains to consider is $\textsc{Min}(\mathcal{H}_G) > (3/2)\lambdambda^{*}$ and $\lambdambda_H > (3/2)\lambdambda^{*}$. But this implies that $\min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\} > (3/2) \lambdambda^{*}$, and the algorithm computes a $H$ and $\lambdambda(G)$ from scratch and sets $\lambdambda_H$ correctly. After this full rebuild $\lambda(G) = \min\{\textsc{Min}(\mathcal{H}_G),\lambdambda_H\}$ trivially holds. \end{proof} \paragraph*{Running Time Analysis.} \begin{theorem} Let $G$ be a simple graph with $n$ nodes and $m_0$ edges. Then the total time for inserting $m_1$ edges and maintaining a minimum edge cut of $G$ is $ \widetilde{O}(m_0 + m_1)$. If we start with an empty graph, the amortized time per edge insertion is $\widetilde{O}(1)$. The size of a minimum cut can be answered in constant time. \end{theorem} \begin{proof} We first analyse Step 1. Building the heap $\mathcal{H}_G$ and computing $\lambdambda_0$ take $O(n)$ and $\widetilde{O}(m_0)$ time, respectively. The total running time for constructing $H$, $I(H,\lambdambda_H)$ and the cactus tree is dominated by $\widetilde{O}(m_0 + \lambdambda^{2}_0 \cdot ( n / \lambdambda_0)) = \widetilde{O}(m_0)$. Thus, the total time for Step 1 is $\widetilde{O}(m_0)$. Let $\lambdambda_H^0, \ldots, \lambdambda_H^f$ be the values that $\lambdambda_H$ assumes in Step 2 during the execution of the algorithm in increasing order. We define \text{Phase} $i$ to be all steps executed after Step $1$ while $\lambdambda_H = \lambdambda_H^{i}$, excluding Full Rebuild Steps and Special Steps. Additionally, let $\lambdambda^{*}_0, \ldots, \lambdambda^{*}_{O(\log n)}$ be the values that $\lambdambda^{*}$ assumes during the algorithm. We define \textit{Superphase} $j$ to consist of the $j$-th \texttt{Full Rebuild Step} along with all steps executed while $\min\{\textsc{Min}(\mathcal{H}_G), \lambdambda_H\} \leq (3/2) \lambdambda^{*}_j$, where $\lambdambda^{*}_j$ is the value of $\lambdambda(G)$ at the \texttt{Full Rebuild Step}. Note that a superphase consists of a sequence of phases and potentially a final \texttt{Special Step}. Moreover, the algorithm runs a phase if $\lambdambda_H \leq (3/2) \lambdambda^{*}$. We say that $\lambdambda_H^i$ \textit{belongs} to superphase $j$, if the $i$-th phase is executed during superphase $j$ and $\lambdambda_H^i\leq (3/2) \lambdambda_j^{*}$. We remark that the number of vertices in $H$ changes only at the beginning of a superphase, and remains unchanged during its lifespan. Let $n_j$ denote the number of vertices in some superphase $j$. We bound this quantity as follows: \begin{fact} \lambdabel{fact} Let $j$ be a superphase during the execution of the algorithm. Then, we have \[ n_j = \widetilde{O}(n / \lambdambda_H^i), \text{ for all } \lambdambda_H^i \text{ belonging to superphase } j. \] \end{fact} \begin{proof} From Step 3 we know that $n_j = \widetilde{O}(n / \lambdambda^{*}_j)$. Moreover, observe that $\lambdambda_j^{*} \leq \lambdambda_H^i$ and a phase is executed whenever $\lambdambda_H^i \leq (3/2) \lambdambda_j^{*}$. Thus, for all $\lambdambda_H^i$'s belonging to superphase $j$, we get the following relation \begin{equation} \lambdabel{MinCutRelation} \lambdambda^{*}_j \leq \lambdambda_H^i \leq (3/2) \lambdambda^{*}_j, \end{equation} which in turn implies that $n_j = \widetilde{O}(n / \lambdambda^{*}_j) = \widetilde{O}(n / \lambdambda_H^i)$. \end{proof} For the remaining steps, we divide the running time analysis into two parts (one part corresponding to phases, and the other to superphases). \paragraph*{Part $1$.}For some superphase $j$, the $i$-th phase consists of the $i$-th execution of a \texttt{Partial Rebuild Step} followed by the execution of Step 2. Let $u_i$ be the number of edge insertions in Phase $i$. The total time for Step 2 is $O(n_j+u_i \log n) = \widetilde{O}(n + u_i)$. Using Fact $9$, we observe that $\bigcup_{i \leq \lambdambda_H +1}F_i \cup N_h$ has size $O(u_{i-1} + \lambdambda^{i}_H n_j) = \widetilde{O}(u_{i-1} + n)$. Thus, the total time for computing DA-msfd in a \texttt{Partial Rebuild Step} is $\widetilde{O}(u_{i-1} + n)$. Similarly, since $H'$ has $O(\lambdambda_H^{i} n_j) = \widetilde{O}(n)$ edges, it takes $\widetilde{O}(n)$ time to compute $I(H',\lambdambda_{H}^i)$ and the new cactus tree. The total time spent in Phase $i$ is $\widetilde{O}(u_{i-1} + u_{i} + n)$. Let $\lambdambda$ and $\lambdambda_H$ denote the size of the minimum cut in the final graph and its corresponding multigraph, respectively. Note that $\sum_{i=1}^{\lambdambda} u_i \leq m_1$, $\lambdambda n \leq m_0 + m_1$ and recall Eqn. (\ref{MinCutRelation}). This gives that the total work over all phases is \[ \sum_{i = 1}^{\lambdambda_H} \widetilde{O}\left(u_{i-1} + u_{i} + n\right) = \sum_{i = 1}^{\lambdambda} \widetilde{O}\left(u_{i-1} + u_{i} + n\right) = \widetilde{O}(m_0 + m_1). \] \paragraph*{Part $2$.}The $j$-th superphase consists of the $j$-th execution of a \texttt{Full Rebuild Step} along with a possible execution of a \texttt{Special Step}, depending on whether the condition is met. In a \texttt{Full Rebuild Step}, the total running time for constructing $H$, $I(H,\lambdambda^{*}_j)$ and the cactus tree is dominated by $\widetilde{O}(m_0 + m_1 + (\lambdambda^{*}_j)^{2} \cdot (n / \lambdambda^{*}_j)) = \widetilde{O}(m_0 + m_1)$. The running time of a Special Step is $\widetilde{O}(m_1)$. Throughout its execution, the algorithm begins a new superphase whenever $\lambdambda(G) =\min$ $\{\textsc{Min}(\mathcal{H}_G), \lambdambda_H\} > (3/2)\lambdambda^{*}$. This implies that $\lambdambda(G)$ must be at least $(3/2)\lambdambda^{*}$, where $\lambdambda^{*}$ is the value of $\lambdambda(G)$ at the last \texttt{Full Rebuild Step}. Thus, a new superphase begins whenever $\lambdambda(G)$ has increased by a factor of $3/2$, i.e., only $O(\log n)$ times over all insertions. This gives that the total time over all superphases is $\widetilde{O}(m_0 + m_1)$. \end{proof} \section{Incremental \texorpdfstring{$(1+\varepsilon)$}{1+eps} Minimum Cut with \texorpdfstring{$\widetilde{O}(n)$}{O(n poly log n)} space} \lambdabel{sec: ApproxMinCut} In this section we present two $\widetilde{O}(n)$ space incremental Monte-Carlo algorithms that w.h.p maintain the size of a min-cut up to a $(1+\varepsilon)$-factor. Both algorithms have $\widetilde{O}(1)$ update-time and $\widetilde{O}(1)$, resp.~$O(1)$ query-time. The first algorithm uses $O(n \log^{2}n / \varepsilon^2)$ space, while the second one improves the space complexity to $O(n \log n / \varepsilon^2)$. \subsection{An \texorpdfstring{$O(n \log^2 n / \varepsilon^2)$}{O (n log2 n)} space algorithm} Our first algorithm follows an approach that was used in several previous work~\cite{henzinger97, thorupkarger, fullythorup}. The basic idea is to maintain the min-cut up to some size $k$ using small space. We achieve this by maintaining a sparse $(k+1)$-certificate and incorporating it into the incremental exact min-cut algorithm due to Henzinger~\cite{henzinger97}, as described in Section \ref{sec: exactMinCut}. Finally we apply the well-known randomized sparsification result due to Karger~\cite{Karger99} to obtain our result. \paragraph*{Maintaining min-cut up to size $k$ using $O(kn)$ space.} We incrementally maintain an msfd for an unweighted graph $G$ using $k+1$ union-find data structures $\mathcal{F}_1, \ldots, \mathcal{F}_{k+1}$ (see~\cite{Cormen}). Each $\mathcal{F}_i$ maintains a spanning forest $F_i$ of $G$. Recall that $F_1,\ldots,F_{k+1}$ are edge-disjoint. When a new edge $e=(u,v)$ is inserted into $G$, we define $i$ to be the first index such that $\mathcal{F}_i.$\textsc{Find}$(u)$ $\neq$ $\mathcal{F}_i.$\textsc{Find}$(v)$. If we found such an $i$, we append the edge $e$ to the forest $F_i$ by setting $\mathcal{F}_i.$\textsc{Union}$(u,v)$ and return $i$. If such an $i$ cannot be found after $k+1$ steps, we simply discard edge $e$ and return NULL. We refer to such procedure as $(k+1)$-\textsc{Connectivity}$(e)$. It is easy to see that the forests maintained by $(k+1)$-\textsc{Connectivity}$(e)$ for every newly inserted edge $e$ are indeed edge-disjoint. Combining this procedure with techniques from Henzinger~\cite{henzinger97} leads to the following Algorithm \ref{algo: ExactMinCutUpToK}. \begin{algorithm} \caption{\textsc{Incremental Exact Min-Cut up to size $k$}} \begin{algorithmic}[1] \State Set $\lambdambda = 0$, initialize $k$ union-find data structures $\mathcal{F}_1, \ldots, \mathcal{F}_{k+1}$, \Statex $k$ empty forests $F_1,\ldots,F_{k+1}$, $I(G,\lambdambda)$, and an empty cactus tree. \State \textbf{while}~ there is at least one minimum cut of size \textsc{$\lambdambda$}~\textbf{do} \Statex \quad~~ \textbf{Receive the next operation}. \Statex \quad~~ \textbf{if}~ it is a query~\textbf{then} \Return $\lambdambda$ \Statex \quad~~ \textbf{else}~ it is the insertion of an edge $e$,~\textbf{then} \Statex \quad~~ Set $i =$ $(k+1)$-\textsc{Connectivity}$(e)$. \Statex \quad~~ \quad~~~ \textbf{if}~ $i \neq $ NULL ~\textbf{then} \Statex \quad~~ \quad~~ \quad~~ Set $F_i = F_i \cup \{e\}$. \Statex \quad~~ \quad~~ \quad~~ Update the cactus tree according to the insertion of the edge $e$. \Statex \quad~~ \quad~~ ~~\textbf{endif}~ \Statex \quad~~ \textbf{endif}~ \Statex \textbf{endwhile}~ \State Set $\lambdambda = \lambdambda + 1$. \Statex Let $G' = (V,E')$ be a graph with $E' = I(G,\lambdambda - 1) \cup \bigcup_{i \leq \lambdambda+1} F_i$. \Statex Compute $I(G',\lambdambda)$ and a cactus tree of $G'$. \Statex \textbf{Goto}~ 2. \end{algorithmic} \lambdabel{algo: ExactMinCutUpToK} \end{algorithm} The correctness of the above algorithm is immediate from Lemmas \ref{correctness1} and \ref{correctness2}. The running time and query bounds follow from Theorem 8 of Henzinger~\cite{henzinger97}. For the sake of completeness, we provide here a full proof. \begin{corollary} \lambdabel{cor: ExactPolyLog} For $k > 0$, there is an $O(kn)$ space algorithm that processes a stream of edge insertions starting from any empty graph $G$ and maintains an exact value of $\min\{\lambdambda(G),k\}$. Starting from an empty graph, the total time for inserting $m$ edges is $O(km\alpha(n) \log n )$ and queries can be answered in constant time, where $\alpha(n)$ stands for the inverse of Ackermann function. \end{corollary} \begin{proof} We first analyse Step $1$. Initializing $k+1$ union-find data structures takes $O(kn)$ time. The running time for constructing $I(G,\lambdambda)$ and building an empty cactus tree is also dominated by $O(kn)$. Thus, the total time for Step $1$ is $O(kn)$. Let $\lambdambda_0, \ldots, \lambdambda_f$, where $\lambdambda_f \leq k$, be the values that $\lambdambda$ assumes in Step $2$ during the execution of the algorithm in increasing order. We define Phase $i$ to be all steps executed while $\lambdambda = \lambdambda_i$. For $i\geq 1$, we can view Phase $i$ as the $i$-th execution of Step $3$ followed by the execution of Step $2$. Let $u_i$ denote the number of edge insertion in Phase $i$. The total time for testing the $(k+1)$-connectivity of the endpoints of the newly inserted edges, and updating the cactus tree in Step $2$ is dominated by $O(n + k \alpha(n) u_i)$. Since the graph $G'$ in Step $3$ has always at most $O(kn)$ edges, the running time to compute $I(G',\lambdambda)$ and the cactus tree of $G'$ is $O(kn \log n)$. Combining the above bounds, the total time spent in Phase $i$ is $O(k(\alpha(n)u_i + n \log n))$. Thus, the total work over all phases is $O(km\alpha(n) \log n)$. The space complexity of the algorithm is only $O(kn)$, since we always maintain at most $k+1$ spanning forests during its execution. \end{proof} \paragraph*{Dealing with min-cuts of arbitrary size.} We observe that Corollary \ref{cor: ExactPolyLog} gives polylogarithmic amortized update time only for min-cuts up to some polylogarithmic size. For dealing with min-cuts of arbitrary size, we use the well-known sampling technique due to Karger~\cite{Karger99}. This allows us to get an $(1+\varepsilon)$-approximation to the value of a min-cut with high probability. \begin{lemma}[\cite{Karger99}] \lambdabel{lemm: Karger} Let $G$ be any graph with minimum cut $\lambdambda$ and let $p \geq 12(\log n)/(\varepsilon^{2}\lambdambda)$. Let $G(p)$ be a subgraph of $G$ obtained by including each of edge of $G$ to $G(p)$ with probability $p$ independently. Then the probability that the value of any cut of $G(p)$ has value more than $(1+\varepsilon)$ or less than $(1-\varepsilon)$ times its expected value is $O(1/n^{4})$. \end{lemma} For some integer $i \geq 1$, let $G_i$ denote a subgraph of $G$ obtained by including each edge of $G$ to $G_i$ with probability $1/2^{i}$ independently. We now have all necessary tools to present our incremental algorithm. \begin{algorithm} \caption{\textsc{$(1+\varepsilon)$-Min-Cut with $O(n \log^2 n / \varepsilon^{2})$ Space}} \begin{algorithmic}[1] \State \textbf{For} $i=0,\ldots, \lfloor \log n \rfloor$, let $G_i$ be an initially empty sampled subgraph. \State \textbf{Receive the next operation}. \Statex \textbf{if}~ it is a query~\textbf{then} \Statex \quad~~ Find the minimum $j$ such that $\lambdambda(G_j) \leq k$ and \Return $2^{j}\lambdambda(G_j)/(1-\varepsilon)$. \Statex \textbf{else}~ it is the insertion of an edge $e$,~\textbf{then} \Statex \quad~~ Include edge $e$ to each $G_i$ with probability $1/2^{i}$. \Statex \quad~~ Maintain the exact min cut of each $G_i$ up to size $k=48 \log n / \varepsilon^2$ using Algorithm \ref{algo: ExactMinCutUpToK}. \Statex \textbf{endif}~ \State \textbf{Goto}~ 2. \end{algorithmic} \lambdabel{algo: SmallSpaceMinCut1} \end{algorithm} \begin{theorem} \lambdabel{thm: space1} There is an $O(n \log^{2} n/\varepsilon^{2})$ space randomized algorithm that processes a stream of edge insertions starting from an empty graph $G$ and maintains a $(1+\varepsilon)$-approximation to a min-cut of $G$ with high probability. The amortized update time per operation is $O(\alpha(n)\log^{3} n / \varepsilon^{2})$ and queries can be answered in $O(\log n)$ time. \end{theorem} \begin{proof} We first prove the correctness of the algorithm. For an integer $t \geq 0$, let $G^{(t)} = (V,E^{(t)})$ be the graph after the first $t$ edge insertions. Further, let $\lambdambda(G^{(t)})$ denote the min-cut of $G^{(t)}$, $p^{(t)}=12(\log n)/(\varepsilon^{2}\lambdambda^{(t)})$ and $\lambdambda(G,S) = |E_G(S, V \setminus S)|$, for some cut $(S, V \setminus S)$. For any integer $i \leq \lfloor \log_2 1 / p^{(t)} \rfloor$, Lemma \ref{lemm: Karger} implies that for any cut $(S,V \setminus S)$, $(1-\varepsilon)/2^{i} \lambdambda(G^{(t)},S) \leq \lambdambda(G_{i}^{(t)},S) \leq (1+\varepsilon)/2^{i} \lambdambda(G^{(t)},S)$, with probability $1-O(1/n^{4})$. Let $(S^*, V \setminus S^*)$ be a min-cut of $G_{i}^{(t)}$, i.e., $\lambdambda(G_{i}^{(t)}, S^*) = \lambdambda(G_{i}^{(t)})$. Setting $i= \lfloor \log_2 1/p^{(t)} \rfloor$, we get that: \[ \mathbb{E}[\lambdambda(G^{(t)}_i)] \leq \lambdambda(G^{(t)})/2^{i} \leq 2p^{(t)} \lambdambda(G^{(t)}) \leq 24 \log n/\varepsilon^{2}. \] The latter along with the implication of Lemma \ref{lemm: Karger} give that for any $\varepsilon \in (0,1)$, the size of the minimum cut in $G^{(t)}_{i}$ is at most $(1+\varepsilon) 24 \log n / \varepsilon^{2} \leq 48 \log n / \varepsilon^{2}$ with probability $1-O(1/n^{4})$. Thus, $j \leq \lfloor \log_2 1 / p^{(t)} \rfloor$ with probability $1-O(1/n^{4})$. Additionally, we observe that the algorithm returns a $(1+O(\varepsilon)) $-approximation to a min-cut of $G^{(t)}$ w.h.p. since by Lemma \ref{lemm: Karger}, $2^{i} \lambdambda(G_i^{(t)})/(1-\varepsilon) \leq (1+\varepsilon)/(1-\varepsilon)\lambdambda(G^{(t)}) = (1+O(\varepsilon))\lambdambda(G^{(t)})$ w.h.p. Note that for any $t$, $\lfloor \log_2 1 / p^{(t)} \rfloor \leq \lfloor \log n \rfloor$, and thus it is sufficient to maintain only $O(\log n)$ sampled subgraphs. Since our algorithm applies to unweighted simple graphs, we know that $t \leq O(n^{2})$. Now applying union bound over all $t \in \{1,\ldots O(n^{2})\}$ gives that the probability that the algorithm does not maintain a $1 + O(\varepsilon)$-approximation is at most $O(1/n^2)$. The total expected time for maintaining a sampled subgraph is $O(m\alpha(n) \log^{2} n / \varepsilon^{2})$ and the required space is $O(n \log n / \varepsilon^{2})$ (Corollary \ref{cor: ExactPolyLog}). Maintaining $O(\log n)$ such subgraphs gives an $O(\alpha(n)\log^{3} n / \varepsilon^{2})$ amortized time per edge insertion and an $O(n \log^2 n / \varepsilon^{2})$ space requirement. The $O(\log n)$ query time follows as in the worst case we scan at most $O(\log n)$ subgraphs, each answering a min-cut query in constant time. \end{proof} \subsection{Improving the space to \texorpdfstring{$O(n \log n / \varepsilon^2)$}{O (n log n)}} We next show how to bring down the space requirement of the previous algorithm to $O(n \log n / \varepsilon^{2})$ without degrading its running time. The main idea is to keep a single sampled subgraph instead of $O(\log n)$ of them. Let $G=(V,E)$ be an unweighted undirected graph and assume each edge is given some random weight $p_e$ chosen uniformly from $[0,1]$. Let $G^{w}$ be the resulting weighted graph. For any $p > 0$, we denote by $G(p)$ the unweighted subgraph of $G$ that consists of all edges that have weight at most $p$. We state the following lemma due to Karger~\cite{kargerPHD}: \begin{lemma} \lambdabel{lemm: Karger2} Let $k = 48 \log n / \varepsilon^{2}$. Given a connected graph $G$, let $p$ be a value such that $p \geq k/ (4 \lambdambda(G))$. Then with high probability, $\lambdambda(G(p)) \leq k$ and $\lambdambda(G(p))/p$ is an $(1+\varepsilon)$-approximation to a min-cut of $G$. \end{lemma} \begin{proof} Since the weight of every edge is uniformly distributed, the probability that an edge has weight at most $p$ is exactly $p$. Thus, $G(p)$ is a graph that contains every edge of $G$ with probability $p$. The claim follows from Lemma~\ref{lemm: Karger}. \end{proof} For any graph $G$ and some appropriate weight $p \geq k/(4\lambdambda(G))$, the above lemma tells us that the min-cut of $G(p)$ is bounded by $k$ with high probability. Thus, instead of considering the graph $G$ along with its random edge weights, we build a collection of $k+1$ minimum edge-disjoint spanning forests (using those edge weights). We note that such a collection is an msfd of order $k+1$ for $G$ with $O(kn)$ edges and by Lemma \ref{correctness1}, it preserves all minimum cuts of $G$ up to size $k$. Our algorithm uses the following two data structures: (1) {\textsc{NI-Sparsifier}$(k)$ data-structure}: Given a graph $G$, where each edge $e$ is assigned some weight $p_e$ and some parameter $k$, we devise an insertion-only data-structure that maintains a collection of $k+1$ minimum edge-disjoint spanning forests $F_1,\ldots,F_{k+1}$ with respect to the edge weights. Let $F = \bigcup_{i\leq k+1} F_i$. Since we are in the incremental setting, it is known that the problem of maintaining a single minimum spanning forest can be solved in time $O(\log n)$ per insertion using the dynamic tree structure of Sleator and Tarjan~\cite{SleatorT83}. Specifically, we use this data-structure to determine for each pair of nodes $(u,v)$ the maximum weight of an edge in the cycle that the edge $(u,v)$ induces in the minimum spanning forest $F_i$. Let \text{max-weight}$(F_i(u,v))$ denote such a maximum weight. The update operation works as follows: when a new edge $e = (u,v)$ is inserted into $G$, we first use the dynamic tree data structure to test whether $u$ and $v$ belong to the same tree. If no, we link their two trees with the edge $(u,v)$ and return the pair (TRUE, NULL) to indicate that $e$ was added to $F_i$ and no edge was evicted from $F_i$. Otherwise, we check whether $p_e > \text{max-weight}(F_i(e))$. If the latter holds, we make no changes in the forest and return (FALSE, $e$). Otherwise, we replace one of the maximum edges, say $e'$, on the path between $u$ and $v$ in the tree by $e$ and return (TRUE, $e'$). The boolean value that is returned indicates whether $e$ belongs to $F_i$ or not, the second value that is returned gives an edge that does not (or no longer) belong to $F_i$. Note that each edge insertion requires $O(\log n)$ time. We refer to this insert operation as \textsc{Insert-MSF}$(F_i, e, p_e)$. Now, the algorithm that maintains the weighted minimum spanning forests implements the following operations: \begin{itemize} \item \textsc{Initialize-NI}$(k)$: Initializes the data structure for $k+1$ empty minimum spanning forests. \item \textsc{Insert-NI}$(e,p_e)$: Set $i = 1$, $e' = e$, taken = FALSE. \\ \hspace*{2.8cm} \textbf{while} ($(i \le k+1$) and $e' \neq \text{NULL}$) \textbf{do} \\ \hspace*{3.5cm} Set ($t'$, $e''$) = $\textsc{Insert-MSF}(F_i, e', p_{e'})$.\\ \hspace*{3.5cm} \textbf{if} ($e' = e$) \textbf{then} set taken = $t'$ \textbf{endif} \\ \hspace*{3.5cm} Set $e' = e''$ and $i = i + 1$.\\ \hspace*{2.8cm} \textbf{endwhile}\\ \hspace*{2.8cm} \textbf{if} ($e' \ne e$) \textbf{then} \textbf{return} (taken, $e'$) \textbf{else} \textbf{return} (taken, NULL). \end{itemize} The boolean value that is returned indicates whether $e$ belongs to $F$ or not, the second value returns an edge that is removed from $F$, if any. Recall that $F = \bigcup_{i \le k+1} F_i$. We use the abbreviation $\textsc{NI-Sparsifier}(k)$ to refer to this data-structure. Throughout the algorithm we will associate a weight with each edge in $F$ and use $F^w$ to refer to this weighted version of $F$. \begin{lemma} \lambdabel{lemma: NI} For $k > 0$ and any graph $G$, \textsc{NI-Sparsifier$(k)$} maintains a weighted mfsd of order $k+1$ of $G$ under edge insertions. The algorithm uses $O(kn)$ space and the total time for inserting $m$ edges is $O(k m\log n)$. \end{lemma} \begin{proof} We first show that \textsc{NI-Sparsifier$(k)$} maintains a forest decomposition such that (1) the forests are edge-disjoint and (2) each forest is maximal. We proceed by induction on the number $m$ of edge insertions. For $m=0$, the forest decomposition is empty. Thus the edge-disjointness and maximality of forests trivially hold. For $m>0$, consider the $m$-th edge insertion, which inserts an edge $e$. Let $F'$, resp. $F$, denote the union of forests before, resp. after, the insertion of edge $e$. By the inductive assumption, $F'$ satisfies (1) and (2). If $F = F'$, i.e., the edge $e$ was not added to any of the forests when \textsc{Insert-NI}$(e,p_e)$ was called, then $F$ also satisfies (1) and (2). Otherwise $F \neq F'$ and note that by construction, $e$ is appended to exactly one forest. Let $F'_j$, resp. $F_j$, denote such maximal forest before, resp. after, the insertion of $e$. We distinguish two cases. If $e$ links two trees of $F'_j$, then $F_j$ is also a maximal forest and forests of $F$ are edge-disjoint. Thus $F$ satisfies (1) and (2). Otherwise, the addition of $e$ results in the deletion of another edge $e' \in F'_j$. It follows that $F_j$ is maximal and the current forests are edge-disjoint. Applying a similar argument to the addition of edge $e'$ in the remaining forests, we conclude that $F$ satisfies (1) and $(2)$. We next argue about time and space complexity. The dynamic tree data structure can be implemented in $O(n)$ space, where each query regarding $\text{max-weight}(F_i(u,v))$ can be answered in $O(\log n)$ time. Since the algorithm maintains $k+1$ such forests, the space requirement is $O(kn)$. The total running time follows since insertion of an edge can result in at most $k+1$ executions of the \textsc{Insert-MSF}$(F_i,e,p_e)$ procedures, each running in $O(\log n)$ time. \end{proof} (2) {\textsc{Limited Exact Min-Cut}$(k)$ data-structure}: We use Algorithm \ref{algo: ExactMinCutUpToK} to implement the following operations for any unweighted graph $G$ and parameter $k$, \begin{itemize} \item \textsc{Insert-Limited}$(e)$: Executes the insertion of edge $e$ using Algorithm \ref{algo: ExactMinCutUpToK}. \item \textsc{Query-Limited}$()$: Returns $\lambdambda$. \item \textsc{Initialize-Limited}$(G,k)$: Builds a data structure for $G$ with parameter $k$ by executing Step 1 of Algorithm \ref{algo: ExactMinCutUpToK} and then \textsc{Insert-Limited}$(e)$ for each edge $e$ in $G$. \end{itemize} We use the abbreviation \textsc{Lim}$(k)$ to refer to such data-structure. Combining the above data-structures leads to Algorithm \ref{algo: SmallSpaceMinCut}. \begin{algorithm} \caption{\textsc{$(1+\varepsilon)$-Min-Cut with $O(n \log n / \varepsilon^{2})$ Space}} \begin{algorithmic}[1] \State Set $k = 48 \log n / \varepsilon^{2}$. \Statex Set $p = 12 \log n / \varepsilon^{2}$. \Statex Let $H$ and $F^w$ be empty graphs. \State \textsc{Initialize-Limited}$(H,k)$. \Statex \textbf{while}~ $\textsc{Query-Limited()} < k$ ~\textbf{do} \Statex \quad~~ \textbf{Receive the next operation}. \Statex \quad~~ \textbf{if}~ it is a query~\textbf{then} \Return $\textsc{Query-Limited()}/\min\{1,p\}$. \Statex \quad~~ \textbf{else}~ it is the insertion of an edge $e$,~\textbf{then} \Statex \quad~~ Sample a random weight from $[0,1]$ for the edge $e$ and denote it by $p_e$. \Statex \quad~~ \textbf{if}~ $p_e \leq p$ ~\textbf{then} \textsc{Insert-Limited}$(e)$~\textbf{endif}~ \Statex \quad~~ Set (taken, $e'$) = \textsc{Insert-NI}$(e, p_e)$. \Statex \quad~~ \quad~~~ \textbf{if}~ taken~\textbf{then} \Statex \quad~~ \quad~~ \quad~~ Insert $e$ into $F^w$ with weight $p_e$. \Statex \quad~~ \quad~~ \quad~~ \textbf{if}~ ($e' \ne$ NULL) \textbf{then} remove $e'$ from $F^w$. \Statex \quad~~ \quad~~ ~~\textbf{endif}~ \Statex \quad~~ \textbf{endif}~ \Statex \textbf{endwhile}~ \State // \texttt{Rebuild Step} \Statex Set $p = p/2$. \Statex Let $H$ be the unweighted subgraph of $F^w$ consisting of all edges of weight at most $p$. \Statex \textbf{Goto}~ 2. \end{algorithmic} \lambdabel{algo: SmallSpaceMinCut} \end{algorithm} \paragraph*{Correctness and Running Time Analysis. } Throughout the execution of Algorithm \ref{algo: SmallSpaceMinCut}, $F$ corresponds exactly to the msfd of order $k+1$ of $G$ maintained by \textsc{NI-Sparsifier}($k$). In the following, let $H$ be the graph that is given as input to \textsc{Lim}($k$). Thus, by Corollary \ref{cor: ExactPolyLog}, \textsc{Query-Limited}$()$ returns $\min\{k,\lambdambda(H)\}$, i.e., it returns $\lambdambda(H)$ as long as $\lambdambda(H) \leq k$. We now formally prove the correctness. \begin{lemma}\lambdabel{lem:1} Let $\epsilon \le 1$, $k = 48 \log n / \varepsilon^2$ and assume that the algorithm is started on an empty graph. As long as $\lambdambda(G) < k$, we have $H=G$, $p = k/4$, and \textsc{Query-Limited}$()$ returns $\lambdambda(G)$. The first rebuild step is triggered after the first insertion that increases $\lambdambda(G)$ to $k$ and at that time, it holds that $\lambdambda(G) = \lambdambda(H) = k$. \end{lemma} \begin{proof} The algorithm starts with an empty graph $G$, i.e., initially $\lambdambda(G)= 0$. Throughout the sequence of edge insertions $\lambdambda(G)$ never decreases. We show by induction on the number $m$ of edge insertions that $H=G$ and $p = k/4$ as long as $\lambdambda(G) < k$. Note that $k/4 \ge 1$ by our choice of $\epsilon$. For $m = 0$, the graphs $G$ and $H$ are both empty graphs and $p$ is set to $k/4$. For $m > 0$, consider the $m$-th edge insertion, which inserts an edge $e$. Let $G$ and $H$ denote the corresponding graphs after the insertion of $e$. By the inductive assumption, $p = k/4$ and $G \setminus \{e\} = H \setminus \{e \}$. As $p \ge 1$, $e$ is added to $H$ and, thus, it follows that $G = H$. Hence, $\lambdambda(H) = \lambdambda(G)$. If $\lambdambda(G) < k$ but $\lambdambda(G \setminus \{e\}) < k$, no rebuild is performed and $p$ is not changed. If $\lambdambda(G) = k$, then the last insertion was exactly the insertion that increased $\lambdambda(G)$ from $k-1$ to $k$. As $H = G$ before the rebuild, \textsc{Query-Limited}$()$ returns $k$, triggering the first execution of the rebuild step. \end{proof} We next analyze the case that $\lambdambda(G) \ge k$. In this case, both $H$ and $p$ are random variables, as they depend on the randomly chosen weights for the edges. Let $F(p)$ be the unweighted subgraph of $F^w$ that contains all edges of weight at most $p$. \begin{lemma}\lambdabel{lem:H} Let $N_h(p)$ be the graph consisting of all edges that were inserted after the last rebuild and have weight at most $p$ and let $F^{\text{old}}(p)$ be $F(p)$ right after the last rebuild. Then it holds that $H = F^{\text{old}}(p) \cup N_h(p)$. \end{lemma} \begin{proof} Up to the first rebuild, $N_h = G$ and $p \ge 1$. Thus $N_h(p) = N_h = G$. Lemma \ref{lem:1} shows that until the first rebuild $H=G$. As $F^{\text{old}}(p) = \emptyset$, it follows that $H = G = N_h(p) \cup F^{\text{old}}(p)$ up to the first rebuild. Immediately after each rebuild step, $N_h = \emptyset$ and $H$ is set to be $F(p)$, thus the claim holds. After each subsequent edge insertion that does not trigger a rebuild, the newly inserted edge is added to $N_h(p)$ and to $H$ iff its weight is at most $p$. Thus, both $N_h(p)$ and $H$ change in the same way, which implies that $H = F^{\text{old}}(p) \cup N_h(p)$. \end{proof} \begin{lemma}\lambdabel{DA-msfd} At the time of a rebuild $F(p)$ is an msfd of order $k+1$ of $G(p)$. \end{lemma} \begin{proof} \textsc{NI-sparsifier} maintains a maximal spanning forest decomposition based on minimum-weight spanning forests $F_1, \dots F_{k+1}$ of $G$ using the weights $p_e$. Now consider the hierarchical decomposition $F_1(p), \dots, F_{k+1}(p)$ of $G(p)$ induced by taking only the edges of weight at most $p$ of each forest $F_i$. Note that \textsc{NI-sparsifier} would return exactly the same hierarchy $F_1(p), \dots, F_{k+1}(p)$ if only the edges of $G(p)$ were inserted into \textsc{NI-sparsifier}. Thus $F_1(p), \dots, F_{k+1}(p)$ is an msfd of order $k+1$ of $G(p)$. \end{proof} In order to show that $\lambdambda(H)/\min\{1,p\}$ is an $(1+\varepsilon)$-approximation of $\lambdambda(G)$ with high probability, we need to show that if $\lambdambda(G) \geq k$ then (a) the random variable $p$ is at least $k / (4 \lambdambda(G))$ w.h.p., which implies that $\lambdambda(G(p))$ is a $(1 + \varepsilon)$-approximation of $\lambdambda(G)$ w.h.p. and (b) that $\lambdambda(H) = \lambdambda(G(p))$ (by Lemma \ref{lemm: Karger2}). \begin{lemma} Let $\varepsilon \le 1$. If $\lambdambda(G) \geq k$, then (1) $p \geq k / (4\lambdambda(G))$ with probability $1- O(\log n/n^4)$ and (2) $\lambdambda(H) = \lambdambda(G(p))$. \end{lemma} \begin{proof} For any $i \geq 0$, after the $i$-th rebuild we have $p = p^{(i)} := 12 \log n / (2^{i}\varepsilon^{2})$. Let $\ell = \lfloor \log (12 \log n / \varepsilon^2)\rfloor$ denote the index of the last rebuild at which $p^{(i)} \geq 1$. For any $i \geq \ell + 1$, we will show by induction on $i$ that (1) $p^{(i)} = 12 \log n / (2^{i}\varepsilon^{2}) \geq 12 \log n / (\varepsilon^{2}\lambdambda(G))$ with probability $1-O((i-1-\ell)/n^{4})$, which is equivalent to showing that $\lambdambda(G) \geq 2^i$ and that (2) at any point between the $i-1$-st and the $i$-th rebuild, $\lambdambda(H) = \lambdambda(G(p^{(i-1)}))$. Once we have shown this, we can argue that the number of rebuild steps is small, thus giving the claimed probability in the lemma. Indeed, note that $\lambdambda(G) \leq n$ since $G$ is unweighted. Additionally, from above we get that after the $i$-th rebuild, $\lambdambda(G) \geq 2^{i}$ with high probability. Combining these two bounds yields $i \leq O(\log n)$ w.h.p., i.e., the number of rebuild steps is at most $O(\log n)$. We first analyse $i=\ell+1$. Note that $\ell+1$ is the index of the first rebuild at which $p^{(i)} < 1$. Assume that the insertion of some edge $e$ caused the first rebuild. Lemma~\ref{lem:1} showed that (1) at the first rebuild $\lambdambda(G) = k$ and (2) that up to the first rebuild $G(p) = G = H$. We observe that (1) and (2) remain true up to the $(\ell+1)$-st rebuild. In addition, $\lambdambda(G) = k \geq 24 \log n / \varepsilon^2 \geq 2^{i}$, which implies that $p^{(i)} \geq 1/2$. This shows the base case. For the induction step ($i > \ell+1$), we inductively assume that (1) at the $(i-1)$-st rebuild, $p^{(i-1)} \geq 12 \log n / (\varepsilon^{2} \lambdambda(G^{\text{old}}))$ with probability $1- O((i-2-\ell)/n^{4})$, where $G^{\text{old}}$ is the graph $G$ right before the insertion that triggered the $i$-th rebuild (i.e., at the last point in time when \textsc{Query-Limited}$()$ returned a value less than $k$), and (2) that $\lambdambda(H) = \lambdambda(G(p^{(i-2)}))$ at any time between the $(i-2)$-nd and the $(i-1)$-st rebuild. Let $e$ be the edge whose insertion caused the $i$-th rebuild. Define $G^{\text{new}} = G^{\text{old}} \cup \{e\}$. By induction hypothesis, with probability $1-O((i-2-\ell)/n^4)$, $p^{(i-1)} \geq 12 \log n / (\varepsilon^{2}\lambdambda(G^{\text{old}})) \geq 12 \log n / (\varepsilon^{2}\lambdambda(G^{\text{new}}))$ as $\lambdambda(G^{\text{old}}) \leq \lambdambda(G^{\text{new}})$. Thus, by Lemma \ref{lemm: Karger2}, we get that $\lambdambda(G^{\text{new}}(p^{(i-1)}))/p^{(i-1)} \leq (1+\varepsilon) \lambdambda(G^{\text{new}})$ with probability $1-O(1/n^4)$. Applying an union bound, we get that the two previous statements hold simultaneously with probability $1-O((i-1-\ell)/n^4)$. We show below that $\lambdambda(G^{\text{new}}(p^{(i-1)})) = \lambdambda(H^{\text{new}})$, where $H^{\text{new}}$ is the graph stored in \textsc{Lim}($k$) right before the $i$-th rebuild. Thus, $\lambdambda(H^{\text{new}}) = k$, which implies that \begin{align*} \lambdambda(G^{\text{new}}(p^{(i-1)})) = k =48 \log n / \varepsilon^{2} & \leq (1+\varepsilon) \lambdambda(G^{\text{new}}) \cdot p^{(i-1)} \\ & = (1+\varepsilon) \lambdambda(G^{\text{new}}) \cdot 12 \log n /(2^{i-1}\varepsilon^{2}), \end{align*} with probability $1-O((i-1-\ell)/n^4)$. This in turn implies that with probability $1-O((i-1-\ell)/n^4)$, $\lambdambda(G^{\text{new}}) \geq 2^{i+1}/(1+\varepsilon) \geq 2^{i}$ by our choice of $\varepsilon$. It remains to show that $\lambdambda(G^{\text{new}}(p^{(i-1)})) = \lambdambda(H^{\text{new}})$. Note that this is a special case of (2), which claims that at any point between that $(i-1)$-st and the $i$-th rebuild $\lambdambda(H) = \lambdambda(G(p^{(i-1)}))$, where $H$ and $G$ are the current graphs. Thus, to complete the proof of the lemma it suffices to show (2). As $H$ is a subgraph of $G(p^{(i-1)})$, we know that $\lambdambda(G(p^{(i-1)})) \ge \lambdambda(H)$. Thus, we only need to show that $\lambdambda(G(p^{(i-1)})) \le \lambdambda(H)$. Let $G^{i-1}$, resp.~$F^{i-1}$, resp.~$H^{i-1}$, be the graph $G$, resp.~$F$, resp.~$H$, right after rebuild $i-1$ and let $N_h$ be the set of edges inserted since, i.e., $G = G^{(i-1)} \cup N_h$. As we showed in Lemma~\ref{lem:H}, $H = F^{i-1}(p^{(i-1)}) \cup N_h(p^{(i-1)})$. Thus, $H^{i-1} = F^{i-1}(p^{(i-1)})$. Additionally, by Lemma~\ref{DA-msfd}, $F^{i-1}(p^{(i-1)})$ is an msfd of order $k+1$ of $G^{i-1}(p^{(i-1)})$. Thus by Lemma \ref{lemm: Nagamochi}, for every cut $(A, V\setminus A)$ of value at most $k$ in $H^{i-1}$, $\lambdambda(H^{i-1},A) = \lambdambda(F^{i-1}(p^{(i-1)}),A) =\lambdambda(G^{i-1}(p^{(i-1)}),A)$, where $\lambdambda(G, A) = |E_G(A,V \setminus A)|$. Now assume towards contradiction that $\lambdambda(G(p^{(i-1)})) > \lambdambda(H)$ and consider a minimum cut $(A, V\setminus A)$ in $H$, i.e., $\lambdambda(H) = \lambdambda(H,A)$. We know that at any time $k \ge \lambdambda(H).$ Thus $k \ge \lambdambda(H) = \lambdambda(H,A)$, which implies $k \ge \lambdambda(H^{i-1},A)$. By Lemma \ref{lemm: Nagamochi} it follows that $\lambdambda(H^{i-1},A) = \lambdambda(G^{i-1}(p^{(i-1)}),A)$. Note that $H = H^{i-1} \cup N_h(p^{(i-1)})$ and $G(p^{(i-1)}) = G^{i-1}(p^{(i-1)}) \cup N_h(p^{(i-1)})$. Let $x$ be the number of edges of $N_h(p^{(i-1)})$ that cross the cut $(A, V\setminus A)$. Then $\lambdambda(H) = \lambdambda(H,A) = \lambdambda(H^{i-1}, A) + x = \lambdambda(G^{i-1}(p^{(i-1)}),A) + x = \lambdambda(G(p^{(i-1)}),A)$, which contradicts the assumption that $\lambdambda(G(p^{(i-1)})) > \lambdambda(H)$. \end{proof} Since our algorithm is incremental and applies only to unweighted graphs, we know that there can be at most $O(n^{2})$ edge insertions. The above lemma implies that for any current graph $G$, Algorithm \ref{algo: SmallSpaceMinCut} returns a $(1+\varepsilon)$-approximation to a min-cut of $G$ with probability $1-O(\log n/n^{4})$. Applying an union bound over $O(n^{2})$ possible different graphs, gives that the probability that the algorithm does not maintain a $(1+\varepsilon)$-approximation is at most $O(\log n/n^2) = O(1/n)$. Thus, at any time we return a $(1+\varepsilon)$-approximation with probability $1-O(1/n)$. \begin{theorem} \lambdabel{thm: space2} There is an $O(n \log n/\varepsilon^{2})$ space randomized algorithm that processes a stream of edge insertions starting from an empty graph $G$ and maintains a $(1+\varepsilon)$-approximation to a min-cut of $G$ with high probability. The total time for insertiong $m$ edges is $O(m \alpha(n)\log^{3} n / \varepsilon^{2})$ and queries can be answered in constant time. \end{theorem} \begin{proof} The space requirement is $O(n \log n/ \varepsilon^{2})$ since at any point of time, the algorithm keeps $H$, $F^{w}$, \textsc{Lim}($k$), and \textsc{NI-Sparsifier} ($k)$, each of size at most $O(n \log n/ \varepsilon^{2})$ (Corollary \ref{cor: ExactPolyLog} and Lemma \ref{lemma: NI}). When Algorithm \ref{algo: SmallSpaceMinCut} executes a \texttt{Rebuild Step}, only the \textsc{Lim}($k)$ data-structure is rebuilt, but not \textsc{NI-Sparsifier}($k$). During the whole algorithm $m$ \textsc{Insert-NI} operations are performed. Thus, by Lemma \ref{lemma: NI}, the total time for all operations involving $\textsc{NI-Sparsifier}(k)$ is $O(m\log^2 n / \varepsilon^{2})$. It remains to analyze Steps $2$ and $3$. In Step 2, \textsc{Initialize-Limited}$(H,k)$ takes at most $O(m \alpha(n)\log^{2} n / \varepsilon^{2})$ total time (Corollary \ref{cor: ExactPolyLog}). The running time of Step $3$ is $O(m)$ as well. Since the number of \texttt{Rebuild Steps} is at most $O(\log n)$, it follows that the total time for all \textsc{Initialize-Limited}$(H,k)$ calls in Steps $2$ and the total time of Step $3$ throughout the execution of the algorithm is $O(m \alpha(n)\log^{3} n / \varepsilon^{2})$. We are left with analyzing the remaining part of Step 2. Each query operation executes one \textsc{Query-Limited}() operation, which takes constant time. Each insertion executes one \textsc{Insert-NI}($e,p_e$) operation, which takes amortized time $O(\log^2n/ \varepsilon)$. We maintain the edges of $F^w$ in a balanced binary tree so that each insertion and deletion takes $O(\log n)$ time. As there are $m$ edge insertions the remaining part of Step 2 takes total time $O(m \log^{2} n / \varepsilon^{2})$. Combining the above bounds gives the theorem. \end{proof} \paragraph*{Acknowledgements} The research leading to these results has received funding from the European Research Council under the European Union's 7th Framework Programme (FP/2007-2013) / ERC Grant Agreement no.~340506 for M. Henzinger. M. Thorup's research is partly supported by Advanced Grant DFF-0602-02499B from the Danish Council for Independent Research under the Sapere Aude research career programme. This work was done in part while M. Henzinger and M. Thorup were visiting the Simons Institute for the Theory of Computing. \appendix \section{Missing proofs in Section \ref{sec: sparseCertificates}} Next we show a proof for Lemma \ref{lemm: Nagamochi}. The arguments closely follow the work of Nagamochi and Ibaraki~\cite{NagamochiI}. We first present the following helpful lemma. \begin{lemma} \lambdabel{lemm: sparseHelpful} Let $\mathcal{F} = (F_1,\ldots,F_m)$ be an \emph{msfd} of order $m$ of a graph $G=(V,E)$. Then for any edge $(u,v) \in F_j$ and any $i \le j$, it holds that $\lambdambda(u,v,\bigcup_{l \leq i}F_l) \geq i$. \end{lemma} \begin{proof} Fix some edge $e=(u,v) \in F_j$. We first argue that for each $i=1,\ldots,j-1$, the forest $(V,F_i)$ contains some $(u,v)$-path. Indeed, by the maximality of the forest $(V,F_i)$, the graph $(V,F_i \cup \{e\})$ must have some cycle $C$ that contains $e$. Thus, $P=C \setminus e$ is the $(u,v)$-path in the forest $(V,F_i)$. It follows that $(V,\bigcup_{l \leq i}F_l)$ has $i$ edge-disjoint paths. Next, observe that $G_j=(V,\bigcup_{l \leq j}F_l)$ has $j$ edge-disjoint paths, namely the $j-1$ edge disjoint paths in $G_{j-1}$ (which does not contain the edge $(u,v)$) and the 1-edge path consisting of the edge $(u,v)$. Hence, $\lambdambda(u,v,\bigcup_{l \leq i}F_l) \geq i$, \end{proof} \begin{pfof}{Lemma \ref{lemm: Nagamochi}} Assume that $\lambdambda(S,G) \leq k-1$. Then by definition of $G_k$, we know that $G_k$ preserves any cut $S$ of size up to $k$. Thus $\lambdambda(S,G_k) = \lambdambda(S,G)$. For the other case, $\lambdambda(S,G) \geq k$ and assume that $\lambdambda(S,G_k) < \lambdambda(S,G)$ (otherwise the lemma follows). Then there is an edge $e=(u,v) \in E_G(S,V \setminus S) \setminus E_{G_k}(S, V \setminus S)$. Since $e \not \in \bigcup_{i \leq k} F_i$, this means that $e$ belongs to some forest $F_j$ with $j > k$. By Lemma \ref{lemm: sparseHelpful}, we have that $\lambdambda(u,v,G_k) \geq k$. Since $(S,V \setminus S)$ separates $u$ and $v$ in $G_k$, it follows that $\lambdambda(S,G_k) = |E_{G_k}(S, V \setminus S)| \geq \lambdambda (u,v,G_k) \geq k$. \end{pfof} \end{document}
\begin{document} \title[Extinction of CSBP in critical environment]{Extinction rate of continuous state branching processes in critical L\'evy environments} \author{Vincent Bansaye} \address{CMAP, \'Ecole Polytechnique, Route de Saclay, F-91128 Palaiseau Cedex, France} \mathbb{E}mail{[email protected]} \author{Juan Carlos Pardo} \address{CIMAT A.C. Calle Jalisco s/n. C.P. 36240, Guanajuato, Mexico} \mathbb{E}mail{[email protected]} \author{Charline Smadi} \address{Universit\'e Clermont Auvergne, Irstea, UR LISC, Centre de Clermont-Ferrand, 9 avenue Blaise Pascal CS 20085, F-63178 Aubi\`ere, France and Complex Systems Institute of Paris Ile-de-France, 113 rue Nationale, Paris, France} \mathbb{E}mail{[email protected]} \maketitle \noindent {\sc Key words and phrases}: Continuous State Branching Processes; L\'evy processes conditioned to stay positive; Random Environment; Spitzer's condition; Extinction; Long time behaviour \noindent MSC 2000 subject classifications: 60J80; 60G51; 60H10; 60K37. \begin{abstract} We study the speed of extinction of continuous state branching processes in a L\'evy environment, where the associated L\'evy process oscillates. Assuming that the L\'evy process satisfies Spitzer's condition and the existence of some exponential moments, we extend recent results where the associated branching mechanism is stable. The study relies on the path analysis of the branching process together with its L\'evy environment, when the latter is conditioned to have a non-negative running infimum. For that purpose, we combine the approach developed in Afanasyev et al. \cite{Afanasyev2005}, for the discrete setting and i.i.d. environments, with fluctuation theory of L\'evy processes and a result on exponential functionals of L\'evy processes due to Patie and Savov \cite{patie2016bernstein}. \mathbb{E}nd{abstract} \tableofcontents \section{Introduction and main results} In this manuscript, we are interested in continuous state branching processes (CSBPs) which can be considered as the continuous analogues of Galton-Watson (GW) processes in time and state space. Formally speaking, a process in this class is a strong Markov process taking values in $[0,\infty]$, where $0$ and $\infty$ are absorbing states, and satisfying the branching property, that is to say the law of the process started from $x+y$ is the same as the law of the sum of two independent copies of the same process issued respectively from $x$ and $y$. CSBPs have been introduced by Jirina \cite{MR0101554} in the late fifties, of the last century, and since then they have been deeply studied by many authors including Bingham \cite{MR0410961}, Grey \cite{MR0408016}, Grimvall \cite{MR0362529}, Lamperti \cite{MR0208685,MR0217893}, to name but a few. An interesting feature of CSBPs is that they can be obtained as scaling limits of GW processes, see for instance Grimvall \cite{MR0362529} and Lamperti \cite{MR0217893}. Galton-Watson processes in random environment (GWREs) were introduced by Smith and Wilkinson \cite{smith1969branching} in the late sixties of the last century. This type of processes has attracted considerable interest in the last decade, see for instance \cite{afanasyev2012limit,Afanasyev2005,babe,bo10} and the references therein. Indeed, such a family of processes provides a richer class of population models, taking into account the effect of the environment on demographic parameters and letting new phenomena appear. In particular, the classification of the asymptotic behaviour of rare events, such as survival probability and large deviations, is much more complex, since it may combine environmental and demographical stochasticities. Scaling limits of GWREs have been studied by Kurtz \cite{Kurtz} in the continuous path setting and, more recently, by Bansaye and Simatos \cite{bansima} and Bansaye et al. \cite{bansaye2018scaling} for a larger class of processes in random environment that includes CSBPs. The limiting processes satisfy the Markov property and the \mathbb{E}mph{quenched} branching property, i.e. conditionally on the environment the process started from $x+y$ is distributed as the sum of two independent copies of the same process issued respectively from $x$ and $y$. Such processes may be thought of, and therefore called, {\it CSBPs in random environment}. An interesting subclass of the aforementioned family of processes arises from several scalings of discrete models in i.i.d. environments (see for instance \cite{BPS, bansima, BH}) and can be characterized by a stochastic differential equation (SDE) where the linear term is driven by a L\'evy process. Such a L\'evy process captures the effect of the environment on the mean offspring distribution of individuals. A process in this subclass is known as \mathbb{E}mph{CSBP in L\'evy environment} and its construction has been given by He et al. \cite{he2016continuous} and by Palau and Pardo \cite{PP1}, independently, as the unique strong solution of a SDE which will be specified below. The study of the long term behaviour of CSBPs in L\'evy environment has attracted considerable attention recently, see for instance \cite{BPS,BH,li2016asymptotic,palau2017continuous,palau2016asymptotic}. In all these manuscripts, the speed of extinction has been computed for the case where the associated branching mechanism is stable since the survival probability can be expressed explicitly in terms of exponential functionals of L\'evy processes. In \cite{BH}, B\"oinghoff and Hutzenthaler have studied the Feller diffusion case in a Brownian environment and exploited the explicit density of the exponential functional of a Brownian motion with drift. Then Bansaye et al. \cite{BPS} studied the long term behaviour for stable branching mechanisms where the random environment is driven by a L\'evy process with bounded variation paths. Palau et al. \cite{palau2016asymptotic} and Li and Xu \cite{li2016asymptotic} extended these results and obtained the extinction probability for stable CSBPs in a general L\'evy environment. Our aim is to relax the assumption that the branching mechanism is stable, that is to say, we are interested in studying the survival probability for a larger class of branching mechanisms associated to CSBPs in L\'evy environments. Here we focus on the critical case, more precisely in oscillating L\'evy environments satisfying the so-called Spitzer's condition which is a well-known assumption in fluctuation theory of L\'evy processes (see assumption {\bf (H1)} below). In order to do so, we use two main tools in our arguments: fluctuation theory and the asymptotic behaviour of exponential functionals of L\'evy processes satisfying Spitzer's condition. We follow the point of view of Afanasyev et al. \cite{Afanasyev2005} in the discrete time setting, to deduce pathwise relationships between the dynamics of the CSBP in random environment and the L\'evy process driving the random environment on the survival event. More precisely, we prove that the survival of the process is strongly related to its survival up to the time when the random environment reaches its running infimum. Then, we decompose its paths into two parts, the pre-infimum and post-infimum processes. If the process survives until the time when the random environment reaches its running infimum, then it has a positive probability to survive after this time and, consequently, it evolves in a ``favorable'' environment. As we will see below, the global picture stays unchanged compared to \cite{Afanasyev2005} but new difficulties arise in the continuous space setting. In particular, the state $0$ can be polar and the process might become very close to $0$ but never reach this point. To focus on the absorption event, we use Grey's condition which guarantees that $0$ is accessible. Another difficulty arises at the upper bound for the probability of survival. Indeed, in the discrete setting, to bound the probability of survival we can use the fact that the probability that the process survives at times when the environment reaches a local minimum is equal to the probability that the current population size is bigger or equal than 1 at times when the environment reaches a local minimum. Then Chebyshev or Markov inequality will help to obtain a suitable upper bound. In the continuous setting, this strategy is not helpful. In fact, it is suitable to perform good estimates for the probability that the process survives at times when the environment reaches a local minimum. In order to do so explicit knowledge of the probability of extinction is required but the latter can only be derived in few cases, even in the case when the environment is fixed. When the environment is fixed, good estimates of such probability can be derived when the branching mechanism is regularly varying at $\infty$ with index $\vartheta\in(1,2)$ or possesses a Blumenthal-Getoor index bigger than one. In our case, the latter type of estimates cannot be obtained due to the environment as we will explain below. So in order to overcome these difficulties, we impose some assumptions on the branching mechanism and on the environment which are not so restrictive. \subsection{CSBPs in a L\'evy environment} Let $(\Omega^{(b)}, \mathcal{F}^{(b)}, (\mathcal{F}^{(b)}_t)_{t\ge 0}, \mathbb{P}^{(b)})$ be a filtered probability space satisfying the usual hypothesis and introduce a $(\mathcal{F}^{(b)}_t)_{t\ge 0}$-adapted standard Brownian motion $B^{(b)}=(B^{(b)}_t,t\geq 0)$ and an independent $(\mathcal{F}^{(b)}_t)_{t\ge 0}$-adapted Poisson random measure $N^{(b)}(\mathrm{d} s,\mathrm{d} z,\mathrm{d} u)$ defined on $\mathbb{R}^3_+$, with intensity $\mathrm{d} s\mu(\mathrm{d} z)\mathrm{d} u$. The measure $\mu$ is concentrated on $(0,\infty)$ and in the whole paper we assume that \begin{equation}\label{finitemom} \int_{(0,\infty)} (z\wedge z^2)\mu(\mathrm{d} z)<\infty, \mathbb{E}nd{equation} which guarantees non-explosivity (see Lemma \ref{conservative} in the Appendix for the proof of this fact). We denote by $\widetilde{N}^{(b)}$ the compensated measure of $N^{(b)}$. According to Dawson and Li \cite{dawsonLi06}, we can define $Y=(Y_t, t\ge 0)$, a CSBP, as the unique strong solution of the following SDE \begin{equation*}\label{csbp} Y_t=Y_0-\psi^\prime(0+)\int_0^t Y_s \mathrm{d} s+\int_0^t \sqrt{2\gamma^2 Y_s}\mathrm{d} B^{(b)}_s +\int_0^t\int_{(0,\infty)}\int_0^{Y_{s-}}z\widetilde{N}^{(b)}(\mathrm{d} s,\mathrm{d} z,\mathrm{d} u), \mathbb{E}nd{equation*} where $\gamma\ge 0$ and $\psi^\prime(0+)\in \mathbb{R}$, denotes the right derivative at $0$ of the so-called branching mechanism associated to $Y$ which satisfies the celebrated L\'evy-Khintchine formula, i.e. \begin{equation}\label{defpsi} \psi(\lambda)= \lambda \psi'(0+) + \gamma^2 \lambda^2 +\int_{(0,\infty)}\left( e^{-\lambda x}-1 + \lambda x \right)\mu (\mathrm{d} x), \qquad \textrm{for}\quad \lambda\ge 0. \mathbb{E}nd{equation} For the random environment, we consider $(\Omega^{(e)},\mathcal{F}^{(e)}, (\mathcal{F}^{(e)}_t)_{t\ge 0}, \mathbb{P}^{(e)})$ a filtered probability space satisfying the usual hypothesis and a $(\mathcal{F}^{(e)}_t)$-L\'evy process $K=(K_t,t\ge 0)$ which is defined as follows \begin{equation*}\label{env} K_t=\alpha t+\sigma B^{(e)}_t+\int_0^t\int_{\mathbb{R}\setminus (-1,1)} (e^z-1) {N}^{(e)}(\mathrm{d} s,\mathrm{d} z ) +\int_0^t\int_{(-1,1)} (e^z-1) \widetilde{N}^{(e)}(\mathrm{d} s,\mathrm{d} z ), \mathbb{E}nd{equation*} where $\alpha\in \mathbb{R}$, $\sigma\geq 0$, $B^{(e)}=(B^{(e)}_t,t\geq 0)$ denotes a $(\mathcal{F}^{(e)}_t)_{t\ge 0}$-adapted standard Brownian motion and $N^{(e)}(\mathrm{d} s, \mathrm{d} z)$ is a $(\mathcal{F}^{(e)}_t)_{t\ge 0}$-adapted Poisson random measure on $\mathbb{R}_+\times \mathbb{R} $ with intensity $\mathrm{d} s\pi(\mathrm{d} y)$, which is independent of $B^{(e)}$. The measure $\pi$ is concentrated on $\mathbb{R}\setminus\{0\}$ and fulfills the following integral condition $$\int_{\mathbb{R}} (1\wedge z^2)\pi(\mathrm{d} z)<\infty.$$ In our model, the population size has no impact on the evolution of the environment and we are considering independent processes for demography and environment. More precisely, we work now on the product space $(\Omega, \mathcal{F}, (\mathcal{F})_{t\ge 0}, \mathbb{P})$, where $\Omega=\Omega^{(e)}\times \Omega^{(b)}$, $\mathcal{F}=\mathcal{F}^{(e)} \otimes \mathcal{F}^{(b)}$, and $\mathcal{F}_t=\mathcal{F}^{(e)}_t \otimes \mathcal{F}^{(b)}_t$ for $t\ge 0$, $ \mathbb{P}= \mathbb{P}^{(e)} \otimes \mathbb{P}^{(b)}$ and we make the direct extension of $B^{(b)}$, $N^{(b)}$, $B^{(e)}$, $N^{(e)}$ and $K$ to $\Omega$ by projection respectively on $\Omega^{(b)}$ and $\Omega^{(e)}$. In particular, $(B^{(e)}, N^{(e)})$ is independent of $(B^{(b)}, N^{(b)})$. Letting $Z_0\in [0,\infty)$ a.s., a CSBP in a L\'evy environment $Z$ can be defined as the unique non-negative strong solution of the following SDE, \begin{equation}\label{csbplre} \begin{split} Z_t=&Z_0-\psi^\prime(0+)\int_0^t Z_s \mathrm{d} s+\int_0^t \sqrt{2\gamma^2 Z_s}\mathrm{d} B^{(b)}_s \\ &\mathcal{H}space{3cm}+\int_0^t\int_{(0,\infty)}\int_0^{Z_{s-}}z\widetilde{N}^{(b)}(\mathrm{d} s,\mathrm{d} z,\mathrm{d} u)+\int_0^t Z_{s-}\mathrm{d} K_s. \mathbb{E}nd{split} \mathbb{E}nd{equation} According to He et al. \cite{he2016continuous} and Palau and Pardo \cite{PP1}, pathwise uniqueness and strong existence hold for this SDE. Actually, Palau and Pardo also considered the case when $\psi^\prime(0+)=-\infty$, and obtained that the corresponding SDE has a unique strong solution up to explosion and by convention here it is identically equal to $+\infty$ after the explosion time. It turns out that \mathbb{E}qref{finitemom} is a sufficient condition to conclude that the process $Z$ is conservative or in other words that it does not explode in finite time. The conservativeness was first observed by Palau and Pardo in \cite{{palau2017continuous}} (see Proposition 1) in the case when the random environment is driven by a Brownian motion. A similar result also holds in our context: if \mathbb{E}qref{finitemom} holds then \[ \mathbb{P}_z(Z_t<\infty)=1 , \qquad \textrm{for any} \qquad t\ge 0, \] and any $z\ge 0$ where $\mathbb{P}_z$ denotes the law of the process $Z$ starting from $z\ge 0$. The proof follows from similar arguments as those used in \cite{{palau2017continuous}} and is deferred to the Appendix (see Lemma \ref{conservative}). The analysis of the process $Z$ is deeply related to the behaviour and fluctuations of the L\'evy process $\overline{K}$, defined on the same filtration as $K$, which provides a quenched martingale. We set \begin{equation}\label{envir2} \overline{K}_t=\overline{\alpha}t+\sigma B^{(e)}_t+\int_0^t\int_{(-1,1)}z \widetilde{N}^{(e)}(\mathrm{d} s,\mathrm{d} z)+ \int_0^t\int_{\mathbb{R}\setminus (-1,1)}z {N}^{(e)}(\mathrm{d} s,\mathrm{d} z), \mathbb{E}nd{equation} where \begin{equation}\label{defbeta} \overline{\alpha} :=\alpha -\psi^{\prime}(0+)- \frac{\sigma^2}{2} - \int_{(-1,1)}(e^z-1-z) \pi(\mathrm{d} z), \mathbb{E}nd{equation} and we obtain the following statement. \begin{proposition} \label{martingquenched} For $\mathbb{P}^{(e)}$ almost every $w^{(e)}\in\Omega^{(e)}$, $\left(\mathbb{E}xp\{-\overline{K}_t(w^{(e)},.)\}Z_t(w^{(e)},.): t\geq 0\right)$ is a $(\Omega^{(b)}, {\mathcal F}^{(b)}, \mathbb{P}^{(b)})$-martingale and for any $t\geq 0$ and $z\geq 0$, $$\mathbb{E}_z[Z_t \ \vert \ K]=ze^{\overline{K}_t}, \ \qquad \mathbb{P} \ \textrm{-a.s}.$$ \mathbb{E}nd{proposition} The proof is deferred to the Appendix. In other words, the process $\overline{K}$ plays an analogous role as the random walk associated to the logarithm of the offspring means in the discrete time framework and leads to the usual classification for the long time behaviour of branching processes. We say that the process $Z$ is subcritical, critical or supercritical accordingly as $\overline{K}$ drifts to $-\infty$, oscillates or drifts to $+\infty$. We refer to \cite{BPS,BH,palau2016asymptotic,li2016asymptotic} for asymptotic results under different regimes. We observe that in the critical case and contrary to the discrete framework, the process may oscillate between $0$ and $\infty$ a.s., see for instance \cite{BPS}. \subsection{Properties of the L\'evy environment} Recall that $\overline{K}=(\overline{K}_t, t\ge0)$ denotes the real valued L\'evy process defined in \mathbb{E}qref{envir2}. That is to say $\overline{K}$ has stationary and independent increments with c\`adl\`ag paths. For simplicity, we denote by $\mathbb{P}^{(e)}_x$ (resp. $\mathbb{E}^{(e)}_x$) the probability (resp. expectation) associated to the process $\overline{K}$ starting from $x\in \mathbb{R}$, and when $x=0$, we use the notation $\mathbb{P}^{(e)}$ for $\mathbb{P}^{(e)}_0$ (resp. $\mathbb{E}^{(e)}$ for $\mathbb{E}_0^{(e)}$), i.e. \[ \mathbb{P}^{(e)}_x(\overline{K}_t\in B)=\mathbb{P}^{(e)}(\overline{K}_t+x\in B),\qquad \textrm{for }\quad B\in\mathcal{B}(\mathbb{R}). \] We assume in the sequel that $\overline{K}$ is not a compound Poisson process.\\ In what follows, we assume a general condition which is known as \mathbb{E}mph{Spitzer's condition} in fluctuation theory of L\'evy processes, i.e. $$ {\bf (H1)} \mathcal{H}space{2cm} \frac{1}{t} \int_0^t \mathbb{P}^{(e)}(\overline{K}_s \geq 0) \mathrm{d} s\longrightarrow \rho \in (0,1),\qquad \textrm{as }\quad t\to\infty. \mathcal{H}space{3cm} $$ Spitzer's condition implies that $\overline{K}$ oscillates and implicitly, from Proposition \ref{martingquenched}, that the process $Z$ is in the critical regime. According to Bertoin and Doney \cite{bertoin1997spitzer} condition {\bf (H1)} is equivalent to \[ \mathbb{P}^{(e)}(\overline{K}_t \geq 0) \longrightarrow \rho \in (0,1),\qquad \textrm{as }\quad t\to\infty. \] Spitzer's condition is a key condition to understand the tail distribution of first passage times (see \mathbb{E}qref{limitv} and \mathbb{E}qref{infimumspitzer} for instance). Notice that if Spitzer's condition holds and $\bar{K}$ has a finite variance, then necessarily $\rho=1/2$. Examples of L\'evy processes satisfying Spitzer's condition are the standard Brownian motion, and stable processes where $\rho\in(0,1)$ plays the role of the positivity parameter. Furthermore, any symmetric L\'evy process satisfies Spitzer's condition with $\rho=1/2$ and any L\'evy process in the domain of attraction of a stable process with positivity parameter $\rho\in (0,1)$ as $t\to \infty$, satisfies Spitzer's condition. Our arguments on the survival event rely on the running infimum of $\overline{K}$, here denoted by $I=(I_t,t \geq 0)$ where \begin{equation} \label{running_inf} I_t=\inf_{0\le s\le t} \overline{K}_s, \qquad t\ge 0. \mathbb{E}nd{equation} To be more precise, we use fluctuation theory of L\'evy processes reflected at their running infimum. Let us recall that the reflected process $\overline{K}-I$ is a Markov process with respect to the filtration $(\mathcal{F}^{(e)}_t)_{t \geq 0}$ and whose semigroup satisfies the Feller property (see for instance Proposition VI.1 in the monograph of Bertoin \cite{bertoin1998levy}). We denote by $\widehat{L}$ the local time of $\overline{K}-I$ at $0$ in the sense of Chapter IV in \cite{bertoin1998levy}. Similarly to the discrete framework \cite{Afanasyev2005}, the asymptotic analysis and the role of the initial condition involve the renewal function $\widehat{V}$ which is defined, for all $x\ge 0$, as follows \begin{equation}\label{renewalfct} \widehat{V}(x):=\mathbb{E}^{(e)}\left[\int_{[0,\infty)}\mathbf{1}_{\{I_t\ge -x\}}\mathrm{d} \widehat{L}_t\right]. \mathbb{E}nd{equation} The renewal function $ \widehat{V}$ is subadditive, continuous and increasing and satisfies $ \widehat{V}(x)\ge 0$ for $x\ge 0$ and $\widehat{V}(x)>0$ for $x>0$. See for instance the monograph of Doney \cite{doney2007fluctuation} or Section 2 for further details about the previous facts. Under Spitzer's condition (see Theorem VI.18 in Bertoin \cite{bertoin1998levy}) the asymptotic behaviour of the probability that the L\'evy process $\overline{K}$ remains positive, i.e. $\mathbb{P}^{(e)}_x(I_t>0)$ for $x>0$, is regularly varying at $\infty$ with index $\rho-1$ and moreover, for any $x,y>0$, we have \begin{equation}\label{limitv} \lim_{t\to \infty}\frac{\mathbb{P}^{(e)}_x(I_t>0)}{\mathbb{P}^{(e)}_y(I_t>0)}=\frac{\widehat{V}(x)}{ \widehat{V}(y)}. \mathbb{E}nd{equation} In other words, we obtain that for any $x >0$, \begin{equation}\label{infimumspitzer} \mathbb{P}^{(e)}_x(I_t>0)\sim \widehat{V}(x)t^{\rho-1}\mathbb{E}ll (t), \quad \text{as} \quad t \to \infty, \mathbb{E}nd{equation} where $\mathbb{E}ll$ is a slowly varying function at $\infty$, that is to say, for $c>0$, \[ \lim_{t\to\infty}\frac{\mathbb{E}ll(c t)}{\mathbb{E}ll(t)}=1. \] \subsection{Main result} We now state our main result which is devoted to the speed of extinction of CSBPs in L\'evy environment under the assumption that the environment oscillates. It is important to note that the survival of the process is associated to "favorable" environments which are characterized by the running infimum of the environment, which is not too small from our assumptions. We need some assumptions to control the effect of the environment on the event of survival. The following moment assumption is needed to guarantee the non-extinction of the process in favorable environments (see Proposition \ref{posi_CSBP} for further details), $${\bf (H2)} \qquad \qquad \qquad \qquad \qquad \quad \int^\infty \theta \ln^2(\theta) \mu(\mathrm{d} \theta) <\infty. \qquad \qquad \qquad \qquad \qquad \qquad \quad \text{} $$ The above condition is similar to $x\log (x)$ moment condition on the measure $\mu$, used in Proposition 2 in \cite{palau2017continuous} to determine that the probability of survival of CSBP processes in L\'evy environments that drifts to $+\infty$, is positive. As we will see below, Spitzer's condition ({\bf H1}) and assumption ({\bf H2}) are sufficient conditions to provide a lower bound for the survival probability. In order to get the upper bound, further assumptions on the branching mechanism and the environment are required. Let \begin{equation}\label{psi0} \psi_0(\lambda):=\psi(\lambda)-\lambda\psi'(0+),\qquad \textrm{for}\quad \lambda\ge 0, \mathbb{E}nd{equation} and assume that there exist $\beta\in(0,1]$, $\theta^+>1$ and $C>0$ such that $$ \mathcal{H}space{-2cm}{\bf (H3)} \mathcal{H}space{1.5cm} \psi_0(\lambda)\geq C \lambda^{1+\beta}, \quad \text{for } \lambda \geq 0.$$ Assumption {\bf (H3)} allows us to control the absorption of the process for bad environments (see Lemma \ref{L2preuve}) and in particular, it guarantees that $\psi_0(\lambda)$ satisfies the so-called Grey's condition, i.e. \begin{equation}\label{GreysCond} \int^\infty \frac{\mathrm{d} z}{\psi_0(z)}<\infty, \mathbb{E}nd{equation} which is a necessary and sufficient condition for absorption of CSBPs, see for instance \cite{MR0408016}. Recently, He et al. \cite{he2016continuous} have shown that this condition is also necessary and sufficient for CSBPs in a L\'evy environment to get absorbed with positive probability (see Theorem 4.1 in \cite{he2016continuous}). In our case since the process $\overline{K}$ oscillates and Grey's condition \mathbb{E}qref{GreysCond} is satisfied then absorption occurs a.s. according to Corollary 4.4 in \cite{he2016continuous}. \begin{theorem}\label{maintheo} Assume that assumptions ${\bf (H1)-(H3)}$ hold. Then there exists a positive function $c$ such that for any $z>0$, $$ \mathbb{P}_z(Z_t>0) \sim c(z) \mathbb{P}^{(e)}_1(I_t>0)\sim c(z) \widehat{V}(1) t^{\rho-1}\mathbb{E}ll (t), \quad \text{as} \quad t \to \infty, $$ where $\mathbb{E}ll$ is the slowly varying function introduced in \mathbb{E}qref{infimumspitzer}. \mathbb{E}nd{theorem} We point out that we only need assmptions ${\bf (H1)}$ and ${\bf (H2)}$ to ensure that the probability of survival of the process $Z$ satisfies \begin{equation}\label{lowbound} \mathbb{P}_z(Z_t>0) \geq c(z) \mathbb{P}^{(e)}_1(I_t>0)\sim \widehat{V}(1) c(z)t^{\rho-1}\mathbb{E}ll (t), \quad \text{as} \quad t \to \infty. \mathbb{E}nd{equation} It seems quite difficult to deduce the asymptotic behaviour of the probability of survival just under assumptions {\bf (H1)} and {\bf (H2)}, as we explain below. Let us briefly explain why stronger assumptions such as ${\bf (H3)}$ are required for the upper bound. Recall from Proposition 2 in \cite{PP1}, that there exists a functional $v_t(s,\lambda, \overline{K})$ which is the $\mathbb{P}^{(e)}$-a.s. unique solution of the backward differential equation \begin{equation}\label{backward} \frac{\partial}{\partial s} v_t(s, \lambda, \overline{K})=e^{\overline{K}_s}\psi_0(v_t(s, \lambda, \overline{K})e^{-\overline{K}_s}), \qquad v_t(t, \lambda, \overline{K})=\lambda, \mathbb{E}nd{equation} where $\psi_0$ is defined as in \mathbb{E}qref{psi0}. For simplicity of exposition, we denote by $\mathbb{P}_{(z,x)}$ (resp. $\mathbb{E}_{(z,x)}$ its expectation) for the law of the couple $(Z,\overline{K})$ started from $(z,x)$ where $z,x>0$, under $\mathbb{P}$. Thus, the functional $v_t(s,\lambda, \overline{K})$ determines the law of the reweighted process $(Z_t e^{-\overline{K}_t}, t\ge 0)$ as follows, \[ \begin{split} \mathbb{E}_{(z, 1)}\Big[\mathbb{E}xp\Big\{-\lambda Z_t e^{-\overline{K}_t}\Big\}\Big]&=\mathbb{E}_{(z,0)}\Big[\mathbb{E}xp\Big\{-\lambda e^{-1} Z_t e^{-\overline{K}_t}\Big\}\Big]\\ &=\mathbb{E}^{(e)}\Big[\mathbb{E}xp\Big\{-zv_t(0,\lambda e^{-1},\overline{K})\Big\}\Big]. \mathbb{E}nd{split} \] Under Grey's condition \mathbb{E}qref{GreysCond} and the previous identity, we can deduce \begin{equation}\label{cotasup} \mathbb{P}_{(z, 1)}(Z_t>0, I_t\le -y)=\mathbb{E}^{(e)}\left[\left(1-e^{-z \overline{v}_t(0,\infty, \overline{K})}\right)\mathbf{1}_{\{I_t\le -y-1\}}\right], \qquad \textrm{ for } \quad y\ge 0, \mathbb{E}nd{equation} where $\overline{v}_t(0,\infty, \overline{K})$ is $\mathbb{P}^{(e)}$-a.s. finite for all $t\ge 0$, (according to Theorem 4.1 in \cite{he2016continuous}) but perhaps equals 0. Actually, assumption {\bf (H2)} guarantees that $\overline{v}_t(0,\infty, \overline{K})>0$, $\mathbb{P}^{(e)}$-a.s., for all $t>0$; and even in ``favorable'' environments (see Proposition \ref{posi_CSBP}). The right-hand side of \mathbb{E}qref{cotasup} seems difficult to estimate due to the nature of the functional $\overline{v}_t(0,\infty, \overline{K})$. Even under the assumption that $\psi_0$ is regularly varying at $\infty$, it is not so clear how to handle $\overline{v}_t(0,\infty, \overline{K})$ due to the dependence on the environment. In the discrete setting such a probability can be estimated in terms of the infimum of the environment (which is a random walk) since the event of survival is equal to the event of the current population being bigger or equal to one. Assumption {\bf (H3)} allows us to upper bound \mathbb{E}qref{cotasup} by the exponential functional of $\overline{K}$, and to study its asymptotic behaviour. We end our exposition with some examples where the renewal function can be computed explicitly and the previous results can be applied. \subsection{Examples} {\it a) Brownian case.} In the particular case when $\overline{K}$ is a standard Brownian motion starting from $x>0$, it is known that the renewal measure is proportional to the identity, i.e. $ \widehat{V}(y)\propto y$, for $y\ge 0$. Moreover, Brownian motion oscillates and satisfies Spitzer's condition {\bf (H1)} with $\rho=1/2$. Then, assuming that conditions {\bf (H2)} and {\bf (H3)} are fulfilled, we obtain that the CSBP in a Brownian environment satisfies \[ \mathbb{P}_z(Z_t>0) \sim c(z) t^{-1/2}\mathbb{E}ll (t), \quad \text{as} \quad t \to \infty. \] In this particular case, we can compute the function $\mathbb{E}ll$, i.e. \[ \mathbb{E}ll(t)=\int_1^\infty e^{-\frac{1}{2tu}}\frac{\mathrm{d} u}{\sqrt{2\pi u^3}},\qquad t> 0, \] which follows from the fact that the law of the infimum of a Brownian motion is given by \[ \mathbb{P}^{(e)}_1(I_t>0)=\int_t^\infty e^{-\frac{1}{2 w}}\frac{\mathrm{d} w}{\sqrt{2\pi w^3}}, \qquad t>0. \] {\it b) Spectrally negative case}. If $\overline{K}$ is a spectrally negative L\'evy process, i.e. it has no positive jumps, then the renewal measure is given by the so-called scale function $W:[0,\infty)\to[0,\infty)$, which is defined as the unique continuous increasing function satisfying \[ \int_0^\infty e^{-\lambda x}W(x)\mathrm{d} x=\frac{1}{\phi(\lambda)} \qquad \textrm{for} \qquad \lambda \ge 0, \] where $\phi$ denotes the Laplace exponent of $\overline{K}$ which is given by $\phi(\lambda):=\log\mathbf{E}[e^{\lambda \overline{K}_1}]$ and satisfies the so-called L\'evy-Khintchine formula. In other words, we identify the renewal function $\widehat{V}$ with the scale function W (i.e. $\widehat{V}\mathbb{E}quiv W$). In this case, there is an interpretation of Spitzer's condition in terms of the Laplace exponent $\phi$. More precisely, from Proposition VII.6 in Bertoin \cite{bertoin1998levy}, the spectrally negative L\'evy process $\overline{K}$ satisfies Spitzer's condition with $\rho\in(0,1)$ if and only if its Laplace exponent $\phi$ is regularly varying at $0$ with index $1/\rho$. This proposition also mentions that in this case, $\rho$ is necessarily larger than $1/2$. Hence assuming that the Laplace exponent $\phi$ is regularly varying at $0$ with index $1/\rho$, Theorem 1 holds under the assumption that the branching mechanism satisfies $\psi_0(\lambda)\ge C \lambda^{1+\beta}, $ for some $\beta>0$, together with condition {\bf (H2)}. In the particular case where $\overline{K}$ is a spectrally negative stable process with index $\alpha\in (1,2)$, we have $W(x)=x^{\alpha-1}/\Gamma(\alpha)$, for $x\ge 0$, where $\Gamma$ denotes the so-called Gamma function. Moreover, it satisfies Spitzer's condition with $\rho=1/\alpha$. \\ {\it c) Stable case.} Assume that $\alpha\in(1,2)$ and that $\overline{K}$ is a stable L\'evy process with positivity parameter $\rho\in(0,1)$. It is known that the descending ladder height is a stable subordinator with parameter $\alpha(1-\rho)$ (see for instance Lemma VIII.1 in \cite{bertoin1998levy} and Section 2 for a proper definition of the descending ladder height) which implies that the renewal function $ \widehat{V}(x)$ is proportional to $x^{\alpha(1-\rho)}$, for $x>0$. Indeed, its Laplace transform satisfies \[ \int_0^\infty e^{-\lambda x}\widehat{V}(\mathrm{d} x)=\frac{1}{\lambda^{\alpha(1-\rho)}}, \quad \lambda>0. \] Hence, Theorem 1 holds under the assumption that the branching mechanism satisfies $\psi_0(\lambda)\ge C \lambda^{1+\beta}, $ for some $\beta>0$, together with condition {\bf (H2)}.\\ The remainder of the manuscript is organized as follows. In Section 2, some preliminaries on fluctuation theory of L\'evy processes are introduced, as well as the definition of their conditioned version to stay positive. Moreover some useful properties of the latter are also studied. Section 3 is devoted to the study of CSBPs in a conditioned random environment whose properties are needed for our purposes. The proof of the main result is provided in Section 4 and, finally, in Appendix \ref{appendix} we provide the proofs of Proposition \ref{martingquenched} as well as the non-explosivity of CSBPs in a L\'evy random environment. \section{Preliminaries} \label{sectionLevyCondi} In order to provide a precise description of the relationship between the survival probability of the process $Z$ and the behaviour of the running infimum of $\overline{K}$, the description of the L\'evy process $\overline{K}$ conditioned to stay positive is needed as well as the description of the process $Z$ under this conditioned random environment. In this section, we introduce L\'evy processes conditioned to stay positive as well as some of their properties that we will use in the sequel. \subsection{L\'evy processes and fluctuation theory} Recall that $I_t=\inf_{0\le s\le t}\overline{K}_s$, for $t\ge 0$, and that the reflected process $\overline{K}-I$ is a Markov process with respect to the filtration $ (\mathcal{F}^{(e)})_{t\ge 0}$ and whose semigroup satisfies the Feller property. It is important to note that the same properties are satisfied by the reflected process at its running supremum $S-\overline{K}$, where $S_t=\inf_{0\le s\le t}\overline{K}_s$, since the dual process $-\overline{K}$ is also a L\'evy process satisfying that for any fixed time $t>0$, the processes \[ (\overline{K}_{(t-s)}-\overline{K}_{t}, 0\le s\le t)\qquad \textrm{and}\qquad (-\overline{K}_s, 0\le s\le t), \] have the same law. We also recall that $\widehat{L}$ denotes the local time of the reflected process $\overline{K}-I$ at $0$ in the sense of Chapter IV in \cite{bertoin1998levy}. Similarly, we denote by $L$ for the local time at 0 of $S-\overline{K}$. If $0$ is regular for $(-\infty,0)$ or regular downwards, i.e. \[ \mathbb{P}^{(e)}(\tau^-_0=0)=1, \] where $\tau^{-}_0=\inf\{s> 0: \overline{K}_s\le 0\}$, then $0$ is regular for the reflected process $\overline{K}-I$ and then, up to a multiplicative constant, $\widehat{L}$ is the unique additive functional of the reflected process whose set of increasing points is $\{t:\overline{K}_t=I_t\}$. If $0$ is not regular downwards then the set $\{t:\overline{K}_t=I_t\}$ is discrete and we define the local time $\widehat{L}$ as the counting process of this set. Let us denote by $\widehat{L}^{-1}$ for the inverse local time and introduce the so-called descending ladder height process $\widehat{H}$ which is defined by \begin{equation}\label{defwidehatH} \widehat{H}_t=-I_{\widehat{L}_t^{-1}}, \qquad t\ge 0. \mathbb{E}nd{equation} The pair $(\widehat{L}^{-1}, \widehat{H})$ is a bivariate subordinator, as is $({L}^{-1}, {H})$ where \[ {H}_t=S_{{L}_t^{-1}}, \qquad t\ge 0. \] Both pairs are known as descending and ascending ladder processes, respectively. The Laplace transform of the descending ladder process $(\widehat{L}^{-1}, \widehat{H})$ is such that for $ \theta,\lambda \geq 0$, \begin{equation} \label{sub} \mathbb{E}^{(e)}\left[\mathbb{E}xp\left\{-\theta \widehat{L}^{-1}_t-\lambda\widehat{H}_t \right\}\right]=\mathbb{E}xp\left\{-t \widehat{\kappa}(\theta, \lambda)\right\},\qquad t\ge 0, \mathbb{E}nd{equation} writing $\widehat{\kappa}(\cdot,\cdot)$ for its bivariate Laplace exponent ($\kappa(\cdot, \cdot)$ for that of the ascending ladder process) which has the form \[ \widehat{\kappa}(\theta,\lambda)=\widehat{\delta}\theta+\widehat{\mathbf{d}}\lambda+\int_{(0,\infty)^2}\Big(1-e^{-(\theta x+\lambda y)}\Big)\widehat{\mu}(\mathrm{d} x, \mathrm{d} y), \] with $\widehat{\delta}, \widehat{\mathbf{d}}\ge 0$ and \[ \int_{(0,\infty)^2}(x\land 1)(y\land 1)\widehat{\mu}(\mathrm{d} x, \mathrm{d} y)<\infty. \] Notice that both $(\widehat{L}^{-1}, \widehat{H})$ and $({L}^{-1}, {H})$ have no killing terms, since we are assuming that the process $\overline{K}$ oscillates. Implicity, the Laplace exponent of $\widehat{H}$ satisfies \begin{equation}\label{defwidehatkappa} \widehat{\kappa}(0,\lambda)=\widehat{\mathbf{d}}\lambda +\int_{(0,\infty)} \Big(1-e^{-\lambda y}\Big)\widehat{\mathbb{E}ta}(\mathrm{d} y), \mathbb{E}nd{equation} where $\widehat{\mathbb{E}ta}(B)=\widehat{\mu}((0,\infty), B)$ for $B\in\mathcal{B}((0,\infty))$. An interesting connection between the distribution of the ladder processes and that of $\overline{K}$ is given by the Wiener-Hopf factorisation \begin{equation}\label{WHfactors} \mathbb{E}^{(e)}\Big[e^{i\theta \overline{K}_{\mathbf{e}_q}}\Big]=\mathbb{E}^{(e)}\Big[e^{i\theta S_{\mathbf{e}_q}}\Big]\mathbb{E}^{(e)}\Big[e^{i\theta I_{\mathbf{e}_q}}\Big], \mathbb{E}nd{equation} where $\mathbf{e}_q$ denotes an exponential random variable with parameter $q\ge 0$ which is independent of $\overline{K}$, \[ \mathbb{E}^{(e)}\Big[e^{i\theta S_{\mathbf{e}_q}}\Big]=\frac{\kappa(q, 0)}{\kappa(q, -i\theta)} \qquad \textrm{and} \qquad\mathbb{E}^{(e)}\Big[e^{i\theta I_{\mathbf{e}_q}}\Big]=\frac{\widehat{\kappa}(q, 0)}{\widehat{\kappa}(q, i\theta)}. \] We refer to Chapter VI in Bertoin \cite{bertoin1998levy} or Chapter 4 in of Doney \cite{doney2007fluctuation} for further details on the descending ladder processes $(\widehat{H}, \widehat{L})$ as well as for the Wiener-Hopf factorisation. Next, we consider the renewal function $\widehat{V}$ which was defined in \mathbb{E}qref{renewalfct}. It is known that $\widehat{V}$ is a finite, continuous, increasing and subadditive function on $[0,\infty)$ satisfying \begin{equation} \label{grandO} \widehat{V}(x)\leq C_1 x,\quad \text{ for any } \quad x\geq 0, \mathbb{E}nd{equation} where $C_1$ is a finite constant (see for instance Lemma 6.4 and Section 8.2 in the monograph of Doney \cite{doney2007fluctuation}). Moreover $\widehat{V}(0)=0$ if $0$ is regular downwards and $ \widehat{V}(0)=1$ otherwise. By a simple change of variables we can relate the definition of the renewal function $\widehat{V}$ and the descending ladder height $\widehat{H}$. Indeed, the measure induced by $\widehat{V}$ can be rewritten as follows, \[ \widehat{V}(\mathrm{d} x)=\mathbb{E}^{(e)}\left[\int_{0}^\infty\mathbf{1}_{\{\widehat{H}_t \in \mathrm{d} x\}}\mathrm{d} t\right]. \] Roughly speaking, the renewal function $ \widehat{V}(x)$ ``measures'' the amount of time that the descending ladder height process spends on the interval $[0,x]$ and in particular induces a measure on $[0,\infty)$ which is known as the renewal measure. The latter implies \begin{equation}\label{LaplaceV} \int_{[0,\infty)}e^{-\lambda x}{\widehat{V}}(\mathrm{d} x)=\int_{0}^\infty \mathbb{E}^{(e)}\left[e^{-\lambda \widehat{H}_t } \right]\mathrm{d} t=\frac{1}{\widehat{\kappa}(0,\lambda)}, \qquad \textrm{for}\quad \lambda \ge 0. \mathbb{E}nd{equation} Similarly, we introduce the renewal funtion $ V$ associated with the ascending ladder height induced by \begin{equation}\label{ascren} V(\mathrm{d} x)=\mathbb{E}^{(e)}\left[\int_{0}^\infty\mathbf{1}_{\{{H}_t \in \mathrm{d} x\}}\mathrm{d} t\right], \mathbb{E}nd{equation} which is also a finite, continuous, increasing and subadditive function on $[0,\infty)$ such that $V(0)=0$ if $0$ is regular upwards and $ V(0)=1$ otherwise. \subsection{L\'evy processes conditioned to stay positive} Let us define the probability $\mathbb{Q}_{x}$ associated to the L\'evy process $\overline{K}$ started at $x>0$ and killed at time $\zeta$ when it first enters $(-\infty, 0)$, that is to say $$ \mathbb{Q}_{x}\big[f(\overline{K}_t)\mathbf{1}_{\{\zeta>t\}}\Big ]:= \mathbb{E}^{(e)}_{x}\Big[f(\overline{K}_t)\mathbf{1}_{\{I_t> 0\}}\Big], $$ where $f:\mathbb{R}_+\to \mathbb{R}$ is measurable. According to Lemma 1 in Chaumont and Doney \cite{chaumont2005levy}, under the assumption that $\overline{K}$ does not drift towards $-\infty$, we have that the renewal function $\widehat{V}$ is invariant for the killed process. In other words, for all $x> 0$ and $t\ge 0$, \begin{equation} \label{fctharm} \mathbb{Q}_x\left[\widehat{V}(\overline{K}_t)\mathbf{1}_{\{\zeta>t\}}\right]=\mathbb{E}^{(e)}_x\left[ \widehat{V}(\overline{K}_t)\mathbf{1}_{\{I_t> 0\}}\right]=\widehat{V}(x). \mathbb{E}nd{equation} We now recall the definition of L\'evy processes conditioned to stay positive as a Doob-$h$ transform. Before doing so, let us recall that $\overline{K}$ is adapted to the filtration $(\mathcal{F}^{(e)}_t)_{t\ge 0}$. Under the assumption that $\overline{K}$ does not drift towards $-\infty$, the law of the process $\overline{K}$ conditioned to stay positive is defined as follows, for $\Lambda \in \mathcal{F}^{(e)}_t$ and $x>0$, \begin{equation} \label{defPuparrowx} \mathbb{P}^{(e),\uparrow}_{x} (\Lambda) :=\frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_{x}\left[ \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}} \mathbf{1}_{\Lambda}\right]. \mathbb{E}nd{equation} The term \textit{conditioned to stay positive} in definition \mathbb{E}qref{defPuparrowx} is justified from the following convergence result due to Chaumont \cite{chaumont1996conditionings} (see also Remark 1 in the aforementioned paper as well as Chaumont and Doney \cite{chaumont2005levy}) that we recall here in the particular case when the process $\overline{K}$ fulfills Spitzer's condition {\bf (H1)}. \begin{lemma}\label{thmcondpos} Assume that Spitzer's condition {\bf (H1)} is fulfilled. Then, for all $x>0$, $t \geq 0$ and $\Lambda \in \mathcal{F}^{(e)}_t$, $$ \lim_{s \to \infty} \mathbb{P}^{(e)}_x(\Lambda | \overline{K}_u>0, 0 \leq u \leq s)=\mathbb{P}_x^{(e),\uparrow}(\Lambda). $$ \mathbb{E}nd{lemma} The following inequality is also important for our purposes. Recall that $\mathcal{H}at{\kappa}$ denotes the Laplace exponent of the descending ladder process (see identity \mathbb{E}qref{sub}) and that $\tau_0^- = \inf \{ s \geq 0 : \bar{K}_s \leq 0 \}$. \begin{lemma} For $x>0$, we have \begin{equation}\label{majP} \mathbb{P}^{(e)}_x(\tau^{-}_0 >t)\le 2e\widehat{\kappa}(1/t,0) \widehat{V}(x), \qquad \textrm{for}\quad t>0. \mathbb{E}nd{equation} \mathbb{E}nd{lemma} \begin{proof} We first observe that the following series of inequalities holds for $ q, t>0$, \[ \frac{t}{2}e^{-q t}\mathbb{P}^{(e)}_x(\tau^{-}_0 >t)\le \int_{t/2}^t e^{-qs}\mathbb{P}^{(e)}_x(\tau^{-}_0>s) \mathrm{d} s\le \int_{0}^\infty e^{-qs}\mathbb{P}^{(e)}_x(\tau^{-}_0>s)\mathrm{d} s. \] From the Wiener-Hopf factorization \mathbb{E}qref{WHfactors}, we have \[ \mathbb{E}^{(e)}\Big[e^{\theta I_{\mathbf{e}_q}}\Big]=\frac{\widehat{\kappa}(q, 0)}{\widehat{\kappa}(q, \theta)}, \] where $\mathbf{e}_q$ is an exponential random variable with parameter $q>0$, which is independent of $\overline{K}$. Hence, by a classical identity on tail distribution using Fubini's theorem, we deduce \[ \begin{split} \frac{\widehat{\kappa}(q, 0)}{\widehat{\kappa}(q, \theta)}&=\int_0^\infty e^{-\theta x}\mathbb{P}^{(e)}(-I_{\mathbf{e}_q}\in \mathrm{d} x)\\ & =\theta\int_0^\infty e^{-\theta x}\mathbb{P}^{(e)}(I_{\mathbf{e}_q}> -x)\mathrm{d} x=\theta\int_0^\infty e^{-\theta x}\mathbb{P}^{(e)}_x(\tau^{-}_0> \mathbf{e}_q)\mathrm{d} x. \mathbb{E}nd{split} \] Next, for every $q>0$, we consider the function given by \[ \widehat{V}^{(q)}(x):=\mathbb{E}^{(e)}\left[\int_0^\infty e^{-q \widehat{L}^{-1}_t} \mathbf{1}_{\{\widehat{H}_t\le x\}}\mathrm{d} t\right]. \] Performing a straightforward computation and using identity \mathbb{E}qref{sub}, we deduce \[ \theta \int_0^\infty e^{-\theta x}\widehat{V}^{(q)}(x)\mathrm{d} x=\mathbb{E}^{(e)}\left[\int_0^\infty \mathbb{E}xp\left\{-q \widehat{L}^{-1}_t-\theta \widehat{H}_t \right\}\mathrm{d} t \right]=\frac{1}{\widehat{\kappa}(q, \theta)}. \] The latter implies \[ q\int_0^\infty e^{-q s}\mathbb{P}^{(e)}_x(\tau^{-}_0>s)\mathrm{d} s=\widehat{\kappa}(q,0)\widehat{V}^{(q)}(x). \] We thus deduce for $t, q> 0$, that \[ \frac{t}{2}e^{-q t}\mathbb{P}^{(e)}_x(\tau^{-}_0>t)\le \frac{\widehat{\kappa}(q,0)}{q}\widehat{V}^{(q)}(x)\le \frac{\widehat{\kappa}(q,0)}{q}\widehat{V}(x). \] Taking $q=1/t$ yields \mathbb{E}qref{majP}, and completes the proof. \mathbb{E}nd{proof} \section{CSBP in a conditioned random environment} \label{SectionCSBPCondi} \subsection{Definition and first properties} Similarly to the definition of L\'evy processes conditioned to stay positive \cite{chaumont2005levy} and following a similar strategy as in the discrete framework in Afanasyev et al. \cite{Afanasyev2005}, we would like to introduce a CSBP in a L\'evy environment conditioned to stay positive as a Doob-$h$ transform. In order to do so, we first observe that $( \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t \geq 0\}}, t\ge0)$ is also a martingale with respect to $(\mathcal{F}_t)_{t\ge 0}$, under $\mathbb{P}$. This result is more or less clear since it is a martingale under $\mathbb{P}^{(e)}$. Nonetheless we provide its proof for the sake of completeness. Recall that $\mathbb{P}_{(z,x)}$ (resp. $\mathbb{E}_{(z,x)}$ its expectation) denotes the law of the couple $(Z,\overline{K})$ started from $(z,x)$ where $z,x>0$, under $\mathbb{P}$. \begin{lemma} Let us assume that $z,x>0$. The process $( \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t \geq 0\}}, t\ge0)$ is a martingale with respect to $(\mathcal{F}_t)_{t\ge 0}$, under $\mathbb{P}_{(z,x)}$. \mathbb{E}nd{lemma} \begin{proof} Let $s\ge 0$ and $A \in \mathcal{F}_s$. We first claim that $\mathbb{P}(A| K)$ is a $\mathcal{F}^{(e)}_s$-measurable r.v. Indeed, since the family of sets \[ \mathcal{C}_s=\{F_b\times F_e: F_b\in \mathcal{F}^{(b)}_s, F_e\in \mathcal{F}^{(e)}_s\}, \] is a $\pi$-system that generates $\mathcal{F}_s$, we deduce that for any $D\in \mathcal{C}_s$ such that $D=B\times C$ with $B\in \mathcal{F}^{(b)}_s$ and $C\in\mathcal{F}^{(e)}_s$, the following identity holds \[ \mathbb{P}(D|K)=\mathbf{1}_{C}\mathbb{P}(B|K)=\mathbf{1}_C\mathbb{P}^{(b)}(B), \] where in the last identity we have used that $B$ is independent of the environment and that $\mathbb{P}^{(b)}$ is the projection of $\mathbb{P}$ on $\Omega^{(b)}$. A monotone class argument allows us to conclude our claim. Next, we assume $s\le t$ and take $A\in \mathcal{F}_s$. By conditioning on the environment and recalling that $\mathbb{P}^{(e)}$ is the projection of $\mathbb{P}$ on $\Omega^{(e)}$, we observe \[ \begin{split} \mathbb{E}_{(z,x)}\left[ \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}\mathbf{1}_{A}\right]&=\mathbb{E}_{(z,x)}\left[\widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}\mathbb{P}_{(z, x)}(A|K)\right]\\ &=\mathbb{E}^{(e)}_{x}\left[ \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}\mathbb{P}_{(z, x)}(A|K)\right] \mathbb{E}nd{split}. \] Let us now introduce the process $\widetilde{K}$ via $\widetilde{K}_u:=\overline{K}_{u+s}-\overline{K}_s$, for $u\ge 0$, which is independent of $\mathcal{F}^{(e)}_s$ and has the same law as $\overline{K}$. We also define its running infimum up to time $t$ by $\widetilde{I}_t$, i.e. \[ \widetilde{I}_t=\inf_{0\le u\le t}\widetilde{K}_u. \] By taking $\mathfrak{P}$ a $\mathcal{F}^{(e)}_s$-measurable random variable, we deduce by conditioning on $\mathcal{F}^{(e)}_s$ and from identity \mathbb{E}qref{fctharm}, that \[ \begin{split} \mathbb{E}^{(e)}_{x}\left[ \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}\mathfrak{P}\right]&=\mathbb{E}^{(e)}_{x}\left[ \widehat{V}(\overline{K}_s+{K}_{t-s}) \mathbf{1}_{\{\widetilde{I}_{t-s}+ \overline{K}_s > 0\}}\mathbf{1}_{\{I_s > 0\}}\mathfrak{P}\right]\\ &=\mathbb{E}^{(e)}_{x}\left[\mathfrak{P}\mathbf{1}_{\{I_s> 0\}}\mathbb{E}^{(e)}_{\overline{K}_s}\left[\widehat{V}(\widetilde{{K}}_{t-s}) \mathbf{1}_{\{\widetilde{I}_{t-s}> 0\}}\right]\right]\\ &=\mathbb{E}^{(e)}_{x}\left[\mathfrak{P}\mathbf{1}_{\{I_s > 0\}} \widehat{V}(\overline{K}_s)\right]. \mathbb{E}nd{split} \] Putting all pieces together, we obtain \[ \begin{split} \mathbb{E}_{(z,x)}\left[\widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}\mathbf{1}_{A}\right]&=\mathbb{E}^{(e)}_{x}\left[\widehat{V}(\overline{K}_s) \mathbf{1}_{\{I_s > 0\}}\mathbb{P}_{(z, x)}(A|K)\right]\\ &=\mathbb{E}_{(z,x)}\left[ \widehat{V}(\overline{K}_s) \mathbf{1}_{\{I_s > 0\}}\mathbb{P}_{(z, x)}(A|K)\right]\\ &=\mathbb{E}_{(z,x)}\left[ \widehat{V}(\overline{K}_s) \mathbf{1}_{\{I_s > 0\}}\mathbf{1}_A\right], \mathbb{E}nd{split} \] which allows us to conclude that the process $( \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}}, t\ge0)$ is a martingale with respect to $(\mathcal{F}_t)_{t\ge 0}$, under $\mathbb{P}_{(z,x)}$. \mathbb{E}nd{proof} From the previous result, we construct the law of a CSBP in a L\'evy environment conditioned to stay positive as a Doob-$h$ transform. To be more precise, for $\Lambda \in \mathcal{F}_t$ and $x,z>0$, we define \begin{equation} \label{defPuparrowzx} \mathbb{P}^{\uparrow}_{(z,x)} (\Lambda):=\frac{1}{ \widehat{V}(x)}\mathbb{E}_{(z,x)}\left[ \widehat{V}(\overline{K}_t) \mathbf{1}_{\{I_t > 0\}} \mathbf{1}_{\Lambda}\right]. \mathbb{E}nd{equation} Similarly as in Lemma \ref{thmcondpos}, the term \textit{L\'evy environment conditioned to stay positive} in definition \mathbb{E}qref{defPuparrowzx} is justified from the following convergence result, which is crucial to prove Theorem~\ref{maintheo}. \begin{lemma} \label{lemme25AGKV} Assume that Spitzer's condition {\bf (H1)} holds and let $z,x>0$. For $t \geq 0$ and $\Lambda \in \mathcal{F}_t$, we have $$ \lim_{s \to \infty} \mathbb{P}_{(z,x)}(\Lambda | \overline{K}_u>0, 0 \leq u \leq s)=\mathbb{P}_{(z,x)}^{\uparrow}(\Lambda). $$ Moreover if $(G_t, t \geq 0)$ is a uniformly bounded process which is adapted to $(\mathcal{F}_t)_{t\ge 0}$ and such that it converges to $G_\infty,$ as $t \to \infty$, $\mathbb{P}^{\uparrow}_{(z,x)} $-almost surely, then $$ \lim_{t \to \infty}\mathbb{P}_{(z,x)} \big[ G_t \big| \overline{K}_u>0, 0 \leq u \leq t\big]=\mathbb{P}^{\uparrow}_{(z,x)} \big[G_\infty\big]. $$ \mathbb{E}nd{lemma} \begin{proof} We proceed similarly as in Proposition 1 in \cite{chaumont2005levy}. Let $ h, t\ge 0$ and take $\Lambda \in \mathcal{F}_t$. Then from the Markov property at time $t$, we obtain \begin{equation}\label{exparrow} \mathbb{P}_{(z,x)}( \Lambda |I_{t+h}> 0 ) = \mathbb{E}_{(z,x)} \left[ \mathbf{1}_\Lambda \frac{\mathbb{P}^{(e)}_{\overline{K}_t}(I_h> 0)}{\mathbb{P}^{(e)}_{x}(I_{t+h}> 0)}\mathbf{1}_{\{I_t > 0\}}\right]. \mathbb{E}nd{equation} From inequality \mathbb{E}qref{majP}, we see \[ \frac{\mathbb{P}^{(e)}_{\overline{K}_t}(I_h> 0)}{\mathbb{P}^{(e)}_{x}(I_{t+h}> 0)}\mathbf{1}_{\{I_t > 0\}}\leq 2e \frac{\widehat{\kappa}\Big(h^{-1}, 0\Big)}{\mathbb{P}^{(e)}_x(\tau^-_0> t+h)} \widehat{V}(\overline{K}_t)\mathbf{1}_{\{I_t > 0\}}. \] On the other hand from Spitzer's condition, we know that $\widehat{\kappa}(\cdot, 0)$ is regularly varying at $0+$ with index $1-\rho$ and $ \mathbb{P}^{(e)}_x(\tau^-_0>\cdot)$ is also regularly varying with index $\rho-1$ at $\infty$. Moreover, there is a slowly varying function $\mathbb{E}ll(\cdot)$ at $\infty$ such that \begin{equation} \label{asymp_kappa_hat} \widehat{\kappa}(q,0) \sim\frac{\Gamma(1+\rho)}{\rho} \mathbb{E}ll(1/q)q^{1-\rho}, \quad \text{as} \quad q \to 0, \mathbb{E}nd{equation} and \[ \mathbb{P}^{(e)}_x(\tau^-_0> t)\sim \widehat{V}(x)t^{\rho-1}\mathbb{E}ll (t),\quad \text{as} \quad t \to \infty, \] see for instance the proof of Theorem VI.18 in \cite{bertoin1998levy}. Therefore from Potter's Theorem (see Theorem 1.5.6 in Bingham et al. \cite{bingham1989regular}) for any $C_2>1$ and $\delta>0$ there exists $M$ such that for $h\ge M$, \[ \frac{\mathbb{P}^{(e)}_{\overline{K}_t}(I_h> 0)}{\mathbb{P}^{(e)}_{x}(I_{t+h}> 0)}\mathbf{1}_{\{I_t > 0\}}\leq C(\rho) \left(1+\frac{t}{M}\right)^{1-\rho+\delta} \frac{\widehat{V}(\overline{K}_t)}{\widehat{V}(x)}\mathbf{1}_{\{I_t > 0\}}, \] with \begin{equation}\label{csterho} C(\rho)=\frac{2e \Gamma(1+\rho)}{\rho} C_2. \mathbb{E}nd{equation} Since $\mathbb{E}_{(z,x)}[\widehat{V}(\overline{K}_t)\mathbf{1}_{\{I_t > 0\}}]=\widehat{V}(x)$ and $\widehat{V}$ is finite, we may apply Lebesgue's theorem of dominated convergence on the right-hand side of \mathbb{E}qref{exparrow} when $h$ goes to $\infty$. We conclude from the asymptotic \mathbb{E}qref{limitv} and the definition of \mathbb{E}qref{defPuparrowzx}. For the second part of our statement, we use similar arguments as those used in Lemma 2.5 in \cite{Afanasyev2005}. We let $s \leq t$ and $\gamma \in (1,2]$ and apply the Markov property at time $t$ and inequality \mathbb{E}qref{majP}, to deduce that \begin{eqnarray*} \left|\mathbb{E}_{(z,x)}\Big[G_t - G_s \Big|I_{\gamma t}> 0 \Big]\right| & \leq & \mathbb{E}_{(z,x)} \left[ \Big|G_t - G_s\Big| \frac{\mathbb{P}^{(e)}_{\overline{K}_t}(I_{(\gamma-1)t}> 0)}{\mathbb{P}^{(e)}_{x}(I_{\gamma t}> 0)}\mathbf{1}_{\{I_t > 0\}}\right]\\ & \leq & 2e \frac{\widehat{\kappa}\Big(\frac{1}{(\gamma-1)t}, 0\Big)}{\mathbb{P}^{(e)}_x(\tau^-_0>\gamma t)} \mathbb{E}_{(z,x)} \Big[ |G_t - G_s| \widehat{V}(\overline{K}_t)\mathbf{1}_{\{I_t > 0\}}\Big]\\ & = & 2e \frac{\widehat{\kappa}\Big(\frac{1}{(\gamma-1)t}, 0\Big) \widehat{V}(x)}{\mathbb{P}^{(e)}_x(\tau^-_0>\gamma t)} \mathbb{E}_{(z,x)}^\uparrow \Big[|G_t - G_s|\Big]. \mathbb{E}nd{eqnarray*} Again Potter's Theorem (see Theorem 1.5.6 in Bingham et al. \cite{bingham1989regular}) guarantees that for any $C_2>1$ and $\delta>0$ there exists $M$ such that for $t\ge M$, \[ \begin{split} & \left|\mathbb{E}_{(z,x)}\Big[G_t - G_s \Big|I_{\gamma t}> 0 \Big]\right|\\ &\mathcal{H}space{3cm}\le C(\rho)\max\left\{\left( \frac{\gamma}{\gamma-1} \right)^{\delta+{1-\rho}}, \left( \frac{\gamma}{\gamma-1} \right) ^{-\delta+1-\rho}\right\}\mathbb{E}_{(z,x)}^\uparrow \Big[|G_t - G_s|\Big], \mathbb{E}nd{split} \] and $C(\rho)$ is defined in \mathbb{E}qref{csterho}. Let $\mathbb{E}ps>0$. As $(G_t, t \geq 0)$ is a uniformly bounded process which converges to $G_\infty,$ as $t \to \infty$, $\mathbb{P}^{\uparrow}_{(z,x)} $-almost surely, there exists $A_\mathbb{E}ps >0$ such that for any $A_\mathbb{E}ps \leq s \leq t$, $$ C(\rho)\max\left\{\left( \frac{\gamma}{\gamma-1} \right)^{\delta+{1-\rho}}, \left( \frac{\gamma}{\gamma-1} \right) ^{-\delta+1-\rho}\right\}\mathbb{E}_{(z,x)}^\uparrow \Big[|G_t - G_s|\Big] \leq \mathbb{E}ps$$ and \begin{equation} \label{uti_conv} \left| \mathbb{E}_{(z,x)}^\uparrow [G_s]-\mathbb{E}_{(z,x)}^\uparrow [G_\infty] \right|\leq \mathbb{E}ps. \mathbb{E}nd{equation} Hence for any $s \geq A_\mathbb{E}ps$, letting $t$ go to infinity and applying the first statement of this lemma, we get $$\limsup_{t \to \infty}\left| \frac{\mathbb{E}_{(z,x)}\left[G_t \mathbf{1}_{\{I_{\gamma t}>0 \}}\right]}{\mathbb{P}^{(e)}_{x}(I_{\gamma t}> 0)}-\mathbb{E}_{(z,x)}^\uparrow [G_s] \right|\leq \mathbb{E}ps.$$ Adding \mathbb{E}qref{uti_conv}, we get $$\mathbb{E}_{(z,x)}\left[G_t \mathbf{1}_{\{I_{\gamma t}>0 \}} \right]=\Big(\mathbb{E}_{(z,x)}^\uparrow[G_\infty]+o(1)\Big)\mathbb{P}^{(e)}_{x}(I_{\gamma t}> 0). $$ Thus, there exists a constant $C_3>0$ such that \[ \begin{split} \Big|\mathbb{E}_{(z,x)}[G_t\mathbf{1}_{\{I_t > 0\}}]- \mathbb{E}_{(z,x)}^\uparrow[G_\infty]\mathbb{P}^{(e)}_{x}(I_{ t}> 0)\Big|&\le C_3 \mathbb{P}^{(e)}_{x}(I_t > 0, I_{\gamma t} \le 0)\\ &+ \Big|\mathbb{E}_{(z,x)}[G_t\mathbf{1}_{\{I_{\gamma t} >0\}}]- \mathbb{E}_{(z,x)}^\uparrow[G_\infty]\mathbb{P}^{(e)}_{x}(I_{\gamma t}> 0)\Big|\\ &\leq \Big(o(1)+c(1-\gamma^{\rho -1})\Big)\mathbb{P}^{(e)}_{x}(I_t > 0), \mathbb{E}nd{split} \] where we applied the asymptotic in \mathbb{E}qref{infimumspitzer} for the second inequality. Note that since $\mathbb{E}ll$ in \mathbb{E}qref{infimumspitzer} is slowly varying and $\gamma \in (1,2]$, we can choose $c$ independent from $\gamma$ (see again Theorem 1.5.6 in Bingham et al. \cite{bingham1989regular}). Since the choice of $\gamma$ on $(1,2]$ was arbitrary, we finally obtain $$\mathbb{E}_{(z,x)}[G_t\mathbf{1}_{\{I_t > 0\}}]- \mathbb{E}_{(z,x)}^\uparrow[G_\infty]\mathbb{P}^{(e)}_{x}(I_t> 0)= o(\mathbb{P}^{(e)}_{x}(I_t > 0)) ,$$ which completes the proof. \mathbb{E}nd{proof} \subsection{Non-absorption} In this section, we are interested in the event of survival of the process $Z$ under the conditioned environment. To estimate the latter, we first compute the probability of the event of extinction at a given time, under the conditioned environment, and then we will observe that such a probability is strictly positive if and only if Grey's condition \mathbb{E}qref{GreysCond} is fullfilled. It is important to note that the latter statement can be deduced directly from Theorem 4.1 in \cite{he2016continuous} but actually, in this case its proof is rather simple and for completeness we decide to include it. Recall from Proposition 2 in \cite{PP1} (or after the comments of Theorem 1), that there exists a functional $v_t(s,\lambda, \overline{K})$ which is the $\mathbb{P}^{(e)}$-a.s. unique solution of the backward differential equation given in \mathbb{E}qref{backward} and satisfies \begin{equation}\label{defvtslambdaK} \begin{split} \mathbb{E}_{(z, 0)}\Big[\mathbb{E}xp\Big\{-\lambda e^{-x} Z_t e^{-\overline{K}_t}\Big\}\Big|\mathcal{F}^{(e)}_t\Big]&=\mathbb{E}xp\Big\{-zv_t(0,\lambda e^{-x},\overline{K})\Big\}. \mathbb{E}nd{split} \mathbb{E}nd{equation} A similar identity holds for CSBPs in a L\'evy environment conditioned to stay positive as we see below. \begin{proposition}\label{posi_CSBP1} For $x,z> 0$ and $\lambda\ge 0$, we have \[ \mathbb{E}_{(z,x)}^\uparrow \left[ e^{-\lambda Z_t e^{-\overline{K}_t}} \right] =\mathbb{E}^{(e), \uparrow}_x \left[ e^{- zv_t(0,\lambda e^{-K_0},\overline{K}-K_0) } \right], \] In particular, $$ \mathbb{P}^\uparrow_{(z,x)} (Z_t=0) = \mathbb{E}^{(e), \uparrow}_x \left[ e^{-z v_t(0,\infty,\overline{K}-K_0) } \right], \qquad \textrm{for}\quad t> 0,$$ which is strictly positive if and only if Grey's condition \mathbb{E}qref{GreysCond} is satisfied. \mathbb{E}nd{proposition} \begin{proof} Let $x,z> 0$. From the definition of CSBPs in a L\'evy environment conditioned to stay positive \mathbb{E}qref{defPuparrowzx}, we deduce that for every non-negative $\lambda$, \[ \begin{split} \mathbb{E}_{(z,x)}^\uparrow \left[ e^{-\lambda Z_t e^{-\overline{K}_t}} \right] &=\frac{1}{ \widehat{V}(x)}\mathbb{E}_{(z,x)} \left[ \widehat{V}(\overline{K}_t)e^{-\lambda Z_t e^{-\overline{K}_t}} \mathbf{1}_{\{I_t > 0\}}\right] \\ &=\frac{1}{ \widehat{V}(x)}\mathbb{E}_{(z,0)} \left[ \widehat{V}(\overline{K}_t+x)e^{-\lambda e^{-x} Z_t e^{-\overline{K}_t}} \mathbf{1}_{\{I_t > -x\}}\right] \\ &=\frac{1}{ \widehat{V}(x)}\mathbb{E}_{(z,0)} \left[ \widehat{V}(\overline{K}_t+x)\mathbf{1}_{\{I_t > -x\}}\mathbb{E}_{(z,0)}\Big[e^{-\lambda e^{-x} Z_t e^{-\overline{K}_t}} \Big| \mathcal{F}^{(e)}_t\Big]\right] \\ &=\frac{1}{ \widehat{V}(x)} \mathbb{E}_{(z,0)} \left[ \widehat{V}(\overline{K}_t+x)\mathbf{1}_{\{I_t > -x\}}e^{- v_t(0,\lambda e^{-x},\overline{K})} \right]\\ &=\frac{1}{ \widehat{V}(x)} \mathbb{E}_{(z,x)} \left[ \widehat{V}(\overline{K}_t)\mathbf{1}_{\{I_t > 0\}}e^{- v_t(0,\lambda e^{-K_0},\overline{K}-K_0)} \right]\\ &= \mathbb{E}^{(e), \uparrow}_x \left[ e^{- zv_t(0,\lambda e^{-K_0},\overline{K}-K_0) } \right]. \mathbb{E}nd{split} \] By letting $\lambda$ go to infinity, we get $$ \mathbb{P}^\uparrow_{(z,x)} (Z_t=0) = \mathbb{E}^{(e), \uparrow}_x\left[ e^{-z v_t(0,\infty,\overline{K}-K_0) } \right]. $$ From the previous identity, it is clear that \[ 0<\mathbb{P}^\uparrow_{(z,x)} (Z_t=0) \qquad \textrm{if and only if} \qquad \mathbb{P}^{(e), \uparrow}_x\left( v_t(0,\infty,\overline{K}-K_0)<\infty\right)>0. \] Therefore, in order to deduce the last statement it is enough to show that Grey's condition \mathbb{E}qref{GreysCond} is necessary and sufficient for $\mathbb{P}^{(e), \uparrow}_x(v_t(0,\infty,\overline{K}-K_0)<\infty)>0$. We first observe from the Wiener-Hopf factorisation \mathbb{E}qref{WHfactors} applied to the spectrally positive L\'evy process associated to the branching mechanism $\psi_0$, that there exists a non decreasing function $\mathbb{P}hi$ (which is associated to its ascending ladder height) satisfying, \begin{equation}\label{WHbranching} \psi_0(\lambda)=\lambda \mathbb{P}hi(\lambda)\qquad \textrm{for} \quad \lambda\ge 0. \mathbb{E}nd{equation} More precisely, from \mathbb{E}qref{defpsi}, \mathbb{E}qref{psi0} and integration by parts, we have \begin{equation}\label{def_Phi} \begin{split} \mathbb{P}hi(\lambda)= \gamma^2 \lambda +\int_{(0,\infty)}\frac{ e^{-\lambda x}-1 + \lambda x }{\lambda} \mu(\mathrm{d} x)\\ =\gamma^2 \lambda +\int_{(0,\infty)} \Big(1-e^{-\lambda x}\Big) \overline{\mu}( x)\mathrm{d} x, \mathbb{E}nd{split} \mathbb{E}nd{equation} where $\overline{\mu}( x):=\mu((x, \infty))$. Since $\mathbb{P}hi$ is the Laplace exponent of a subordinator, it is well-known that for any $\lambda>0$ and $k>1$, we have $\mathbb{P}hi(\lambda)\le k\mathbb{P}hi(\lambda/k)$ (see for instance the proof of Proposition III.1 in \cite{bertoin1998levy}). In particular, from \mathbb{E}qref{backward} and under the event that $\{t< \tau_{-x}^-\}$, we have \begin{align*} \frac{\partial}{\partial s} v_t(s,\lambda e^{-x},\overline{K}-x)&=e^{\overline{K}_s-x}\psi_0(e^{-\overline{K}_s+x}v_t(s,\lambda e^{-x},\overline{K}-x)) \\ &= v_t(s,\lambda e^{-x},\overline{K}-x)\mathbb{P}hi(e^{-\overline{K}_s+x}v_t(s,\lambda e^{-x},\overline{K}-x))\\ &\geq e^{-\overline{K}_s+x}\psi_0(v_t(s,\lambda e^{-x},\overline{K}-x)). \mathbb{E}nd{align*} This entails $$ \int_{v_t(0,\lambda e^{-x},\overline{K}-x)}^{\lambda e^{-x}}\frac{\mathrm{d} u}{\psi_0(u)} \geq \int_0^t e^{-\overline{K}_s +x} \mathrm{d} s. $$ Assuming that \mathbb{E}qref{GreysCond} holds, we deduce \begin{equation}\label{tomate} \int_{v_t(0,\infty,\overline{K}-x)}^{\infty}\frac{\mathrm{d} u}{\psi_0(u)} \geq \int_0^t e^{-\overline{K}_s+x} \mathrm{d} s, \mathbb{E}nd{equation} which clearly implies that $v_t(0,\infty,\overline{K}-K_0)<\infty$ with positive probability under $\mathbb{P}^{(e), \uparrow}_x$. Next, we assume that $\mathbb{P}^{(e), \uparrow}_x(v_t(0,\infty,\overline{K}-K_0)<\infty)>0$. Since $\psi_0$ is non-decreasing, we deduce, under the event that $\{t< \tau_{-x}^-\}$, that \[ \begin{split} \frac{\partial}{\partial s} v_t(s,\lambda e^{-x},\overline{K}-x)&=e^{\overline{K}_s-x}\psi_0(e^{-\overline{K}_s+x}v_t(s,\lambda e^{-x},\overline{K}-x))\\ & \leq e^{\overline{K}_s-x}\psi_0(v_t(s,\lambda e^{-x},\overline{K}-x)), \mathbb{E}nd{split} \] which implies \[ \int_{v_t(0,e^{-x},\overline{K}-x)}^{\lambda e^{-x}}\frac{\mathrm{d} u}{\psi_0(u)} \leq \int_0^t e^{\overline{K}_s-x} \mathrm{d} s.\] Therefore, by letting $\lambda$ goes to $\infty$, we have \[ \int_{v_t(0,\infty,\overline{K}-x)}^{\infty}\frac{\mathrm{d} u}{\psi_0(u)} \leq \int_0^t e^{\overline{K}_s-x} \mathrm{d} s, \] with positive probability under $\mathbb{P}^{(e), \uparrow}_x$. It implies that Grey's condition \mathbb{E}qref{GreysCond} holds and completes the proof. \mathbb{E}nd{proof} Actually, from Grey's condition \mathbb{E}qref{GreysCond} and inequality \mathbb{E}qref{tomate}, we can deduce a nice lower bound for the probability of extinction. Indeed, let us introduce \[ f(t):=\int_t^\infty \frac{\mathrm{d} u}{\psi_0(u)}, \qquad \textrm{for}\quad t>0, \] and note that the function $f:(0,\infty)\to(0,\infty)$ is a decreasing bijection and thus its inverse exists. We denote this inverse by $\varphi$. Therefore from \mathbb{E}qref{tomate}, we get \[ v_t(0,\infty, \overline{K}-x)\le \varphi\left(\int_0^t e^{-\overline{K}_s+x} \mathrm{d} s\right). \] In other words, \[ \mathbb{E}^{(e), \uparrow}_x \left[ \mathbb{E}xp\left\{-z \varphi\left(\int_0^t e^{-\overline{K}}_s\right)\mathrm{d} s \right\} \right]\le\mathbb{E}^{(e), \uparrow}_x\left[ e^{-z v_t(0,\infty,\overline{K}-K_0) } \right], \] implying \[ 0<\mathbb{E}^{(e), \uparrow}_x \left[ \mathbb{E}xp\left\{-z \varphi\left(\int_0^\infty e^{-\overline{K}_s}\right)\mathrm{d} s \right\} \right]\le\lim_{t\to\infty}\mathbb{P}^\uparrow_{(z,x)} (Z_t=0), \] since $\varphi$ is non-increasing and \begin{equation}\label{finitexp} \int_0^\infty e^{-\overline{K}_s}\mathrm{d} s <\infty, \qquad \mathbb{P}^{(e), \uparrow}_x\mathrm{-a.s.} \mathbb{E}nd{equation} The claim in \mathbb{E}qref{finitexp} follows from the following argument. From Theorem VI.20 in Bertoin \cite{bertoin1998levy}, we observe \[ \begin{split} \mathbb{E}^{(e), \uparrow}_x \left[ \int_0^\infty e^{-\overline{K}_s} \mathrm{d} s\right]&= \int_0^\infty \mathbb{E}^{(e), \uparrow}_x \left[ e^{-\overline{K}_s} \right]\mathrm{d} s\\ &= \int_0^\infty \frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_x\left[ e^{-\overline{K}_s} \widehat{V}(\overline{K}_s)\mathbf{1}_{\{I_s > 0\}}\right]\mathrm{d} s\\ &= \frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_x\left[ \int_0^{\tau^-_0} e^{-\overline{K}_s} \widehat{V}(\overline{K}_s)\mathrm{d} s\right]\\ &= \frac{\mathbf{c}}{ \widehat{V}(x)}\int_{[0,\infty)} V(\mathrm{d} y) \int_{[0,x]} \widehat{V}(\mathrm{d} z) e^{-x-y+z} \widehat{V}(x+y-z),\\ \mathbb{E}nd{split} \] where we recall that $ V$ denotes the renewal measure associated to the ascending ladder height and $\mathbf{c}$ is a constant that only depends on the normalisation of the local times $L$ and $\widehat{L}$. For the sake of simplicity we take $\mathbf{c}=1$. Thus, since $\widehat{V}$ is increasing we have \[ \mathbb{E}^{(e), \uparrow}_x \left[ \int_0^\infty e^{-\overline{K}_s} \mathrm{d} s\right]\le \int_{[0,\infty)}V(\mathrm{d} y) e^{-y} \widehat{V}(x+y). \] The latter integral is finite since $\widehat{V}$ satisfies \mathbb{E}qref{grandO} and \begin{equation}\label{Lapsharp} \int_{[0, \infty)}e^{-\theta x} V(\mathrm{d} x)=\frac{1}{\kappa(0,\theta)}, \qquad \textrm{for} \quad \theta>0, \mathbb{E}nd{equation} which follows from the definition of $ V$ (see \mathbb{E}qref{ascren}) and similar arguments as in \mathbb{E}qref{LaplaceV}. In other words the claim in \mathbb{E}qref{finitexp} holds. On the other hand, for our purposes, we are interested in conditions which guarantee that \[ \lim_{t\to\infty}\mathbb{P}^\uparrow_{(z,x)} (Z_t=0)<1. \] This problem is similar to determining when the probability of survival of a CSBP process in L\'evy environments that drifts to $+\infty$, is positive. According to Proposition 2 in \cite{palau2017continuous}, the latter holds under a $x\log (x)$ moment condition on the measure $\mu$. Assumption {(H2)} is very similar to the previous condition and implies that $Z$ has a positive probability to survive when living in a ``favorable" environment, or in other words when the running infimum of the L\'evy environment is positive. \begin{proposition}\label{posi_CSBP} If condition {(\bf H2)} holds then $$ \lim_{t\to\infty}\mathbb{P}^\uparrow_{(z,x)} (Z_t>0) >0. $$ \mathbb{E}nd{proposition} \begin{proof} Let us assume that condition {\bf (H2)} holds. We follow similar ideas as in the proof of Proposition 2 in \cite{palau2017continuous}. First recall that the function $s \mapsto v_t(s,\lambda e^{-x},\overline{K}-x)$ is non-decreasing on $[0,t]$ since $\psi_0$ is positive. Hence for any $s\in[0,t]$, we have $$ v_t(s,\lambda e^{-x},\overline{K} -x) \leq \lambda e^{-x}. $$ In particular, from \mathbb{E}qref{backward} we have \begin{align*} \frac{\partial}{\partial s} v_t(s,\lambda e^{-x},\overline{K} -x)&=e^{\overline{K}_s-x}\psi_0(e^{-\overline{K}_s+x}v_t(s,\lambda e^{-x},\overline{K} -x)) \\ &= v_t(s,\lambda e^{-x},\overline{K}-x)\mathbb{P}hi(e^{-\overline{K}_s+x}v_t(s,\lambda e^{-x},\overline{K} -x)) \\ & \leq v_t(s,\lambda e^{-x},\overline{K} -x) \mathbb{P}hi(e^{-\overline{K}_s}\lambda), \mathbb{E}nd{align*} as $\mathbb{P}hi$ is non-decreasing and $v_t(s,\lambda e^{-x},\overline{K} -x)$ is non-decreasing with $s$ and equals $\lambda e^{-x}$ when $s=t$. This entails $$ v_t(0,\lambda e^{-x},\overline{K} -x) \geq \lambda e^{-x} \mathbb{E}xp \left\{ - \int_0^t \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s \right\}. $$ Thus, for any $\lambda \ge 0$ \[ \begin{split} \mathbb{P}^\uparrow_{(z,x)} (Z_t=0)&=\mathbb{E}^{(e), \uparrow}_x \left[ e^{-z v_t(0,\infty,\overline{K} -x) } \right]\\ &=\frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_x\left[ \widehat{V}(\overline{K}_t)e^{-z v_t(0,\infty,\overline{K} -x) }\mathbf{1}_{\{t< \tau^-_0\}} \right]\\ &\leq \frac{1}{\widehat{V}(x)}\mathbb{E}^{(e)}_x\left[ \widehat{V}(\overline{K}_t)e^{-z v_t(0,\lambda e^{-x},\overline{K} -x) }\mathbf{1}_{\{t< \tau^-_0\}} \right]\\ & \leq \frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_x \left[ \widehat{V}(\overline{K}_t) e^{-z \lambda e^{-x} \mathbb{E}xp \left\{ - \int_0^t \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s \right\} }\mathbf{1}_{\{t< \tau^-_0\}} \right]\\ &=\mathbb{E}^{(e), \uparrow}_x \left[ e^{-z \lambda e^{-x}\mathbb{E}xp \left\{ - \int_0^t \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s \right\} } \right], \mathbb{E}nd{split} \] where we have used that $\lambda \mapsto v_t(0,\lambda ,\overline{K}-x)$ is non-decreasing, see Proposition 2.2 in He et al. \cite{he2016continuous}. Hence, we have \[ \lim_{t\to\infty}\mathbb{P}^\uparrow_{(z,x)} (Z_t=0)\le \mathbb{E}^{(e), \uparrow}_x \left[ e^{-z \lambda e^{-x}\mathbb{E}xp \left\{ - \int_0^\infty \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s \right\} } \right]. \] If \begin{equation}\label{finitude} \mathbb{E}^{(e), \uparrow}_x \left[ \int_0^\infty \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s\right]<\infty ,\mathbb{E}nd{equation} we get $$ \mathbb{E}xp \left\{ - \int_0^{\infty} \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s \right\} >0 , \quad \mathbb{P}^{(e), \uparrow}_x -\textrm{a.s.,} $$ and $\lim_{t\rightarrow \infty} \mathbb{P}^\uparrow_{(z,x)} (Z_t=0)<1. $ In other words, in order to deduce our result it is enough to show that \mathbb{E}qref{finitude} holds. We proceed similarly as in the proof of \mathbb{E}qref{finitexp}. From the definition of $\mathbb{P}^{(e), \uparrow}_x$ and Theorem VI.20 in Bertoin \cite{bertoin1998levy}, we observe \[ \begin{split} \mathbb{E}^{(e), \uparrow}_x \left[ \int_0^\infty \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right)\mathrm{d} s\right]&= \frac{1}{ \widehat{V}(x)}\mathbb{E}^{(e)}_x\left[ \int_0^{\tau^-_0} \mathbb{P}hi\left( \lambda e^{-\overline{K}_s} \right) \widehat{V}(\overline{K}_s)\mathrm{d} s\right]\\ &= \frac{1}{ \widehat{V}(x)}\int_{[0,\infty)} V(\mathrm{d} y) \int_{[0,x]} \widehat{V}(\mathrm{d} z)\mathbb{P}hi\left(\lambda e^{-x-y+z}\right) \widehat{V}(x+y-z).\\ \mathbb{E}nd{split} \] Recalling the definition of $\mathbb{P}hi$ in \mathbb{E}qref{def_Phi} and observing that it is increasing, as well as the renewal function $\widehat{V}$, we obtain that \[ \begin{split} \frac{1}{\widehat{V}(x)}\int_{[0,\infty)} V(\mathrm{d} y) \int_{[0,x]} & \widehat{V}(\mathrm{d} z)\mathbb{P}hi\left(\lambda e^{-x-y+z}\right) \widehat{V}(x+y-z)\\ &\le \int_{[0,\infty)}V(\mathrm{d} y) \mathbb{P}hi\left(\lambda e^{-y}\right) \widehat{V}(x+y) \\ &= \gamma^2\lambda \int_{[0,\infty)} V(\mathrm{d} y) e^{-y} \widehat{V}(x+y)\\ &\mathcal{H}space{.5cm}+\int_{[0,\infty)} V(\mathrm{d} y) \widehat{V}(x+y)\int_{(0,\infty)} \Big(1-e^{-\lambda e^{-y}z}\Big) \overline{ \mu} (z) \mathrm{d} z. \mathbb{E}nd{split} \] The first integral of the right-hand side is finite from identity \mathbb{E}qref{Lapsharp} and since $\widehat{V}$ satisfies \mathbb{E}qref{grandO}. For the second integral, we first rewrite \[ \begin{split} \int_{[0,\infty)}V(\mathrm{d} y) & \widehat{V}(x+y)\int_{(0,\infty)} \Big(1-e^{-\lambda e^{-y}z}\Big)\overline{ \mu} (z) \mathrm{d} z\\ &=\int_{(0,\infty)}\mathrm{d} z \overline{\mu}( z)\int_{[0,\infty)} V(\mathrm{d} y) \left(1-e^{-\lambda e^{-y}z} \right) \widehat{V}(x+y)\\ &=\int_{(0,\infty)}\mathrm{d} z \overline{\mu}( z)g(z), \mathbb{E}nd{split} \] with \begin{equation}\label{defg} g(z):=\int_{[0,\infty)} V(\mathrm{d} y) \left(1-e^{-\lambda e^{-y}z} \right) \widehat{V}(x+y). \mathbb{E}nd{equation} In order to conclude our proof, we need to show that under condition {\bf (H2)}, the integral of $z\mapsto \overline{\mu}(z)g(z)$ is finite. In other words, we need to study the behaviour of $g(z)$ when $z$ is close to $0$ and to $\infty$. With this aim in mind, we use that $\widehat{V}$ is subadditive and identity \mathbb{E}qref{Lapsharp}, as well as the following inequality, $$ 1 -e^{-z} \leq 1 \wedge z, $$ which holds for every $z>0$. For $z$ small enough, and using inequality \mathbb{E}qref{grandO}, we get \[ \begin{split} g(z)&\leq \lambda z\int_{[0,1)} V(\mathrm{d} y) \widehat{V}(x+y) e^{-y}+\lambda z\int_{[1,\infty)} V(\mathrm{d} y) \widehat{V}(x+y) e^{-y}\\ &\leq \lambda z \left( \widehat{V}(x+1) V(1)+ C(x)\int_{[1,\infty)} V(\mathrm{d} y) y e^{-y} \right)\\ &\leq C_1(x)\lambda z, \mathbb{E}nd{split} \] where $C(x)$ and $C_1(x)$ are two finite constants that only depend on $x$. For $z$ large enough, we split the integral in \mathbb{E}qref{defg} into three terms. To be more precise, \[ \begin{split} g(z) &\leq \int_{[0,\infty)} V(\mathrm{d} y) \widehat{V}(x+y) (\lambda z e^{-y} \wedge 1)\\ & \leq \int_{[0,1)} V(\mathrm{d} y)\widehat{V}(x+y)+\int_{[1,2\ln(\lambda z))} V(\mathrm{d} y) \widehat{V}(x+y)\\ &\mathcal{H}space{5cm}+\lambda z\int_{[2\ln(\lambda z), \infty)} V(\mathrm{d} y) \widehat{V}(x+y) e^{-y}. \mathbb{E}nd{split} \] We study the three terms from above separately. First, it is clear that the first term satisfies $$ \int_{[0,1)} V(\mathrm{d} y) \widehat{V}(x+y) \leq V(1) \widehat{V}(x+1). $$ For the third term, we use \mathbb{E}qref{grandO} and deduce \[ \begin{split} \lambda z \int_{2\ln(\lambda z)}^\infty V(\mathrm{d} y) \widehat{V}(x+y) e^{-y} &\leq C_1 \lambda z e^{-2\ln(\lambda z)/2}\int_{2\ln(\lambda z)}^\infty V(\mathrm{d} y) (x+y) e^{-y/2} \\ &\leq C_1 (x+1)\int_{0}^\infty V(\mathrm{d} y) (1+y) e^{-y/2} \mathbb{E}nd{split} \] where $$\int_{0}^\infty V(\mathrm{d} y) (1+y) e^{-y/2}\leq \sum_{i\geq 0} V ([i,i+1)) (1+i+1)e^{-i/2}\leq C_4\sum_{i\geq 0} (i+2)^2e^{-i/2}<\infty,$$ with $C_4>0$ such that \[ V(x)\le C_4 x, \qquad\textrm{for}\quad x\ge 0. \] Finally, \[ \int_{[1,2\ln(\lambda z))} V(\mathrm{d} y) \widehat{V}(x+y) \leq C(x) \int_{[1,2\ln(\lambda z))}yV(\mathrm{d} y) \leq C_2(x) \ln^2(\lambda z), \] where $C(x)$ and $C_2(x)$ are two finite constants that only depend on $x$. Since condition ${\bf (H2)}$ holds, the proof of our result is now complete. \mathbb{E}nd{proof} \section{Proof of Theorem \ref{maintheo}} We have now collected all the necessary results to study the asymptotic behaviour of the extinction probability of $Z$. The proof of Theorem \ref{maintheo} follows from studying the event of survival at time $t$, $\{Z_t>0\}$ in three different situations that depend on the behaviour of the infimum of the environment. To be more precise, we split the survival event as follows: for $z,x>0$, \begin{multline}\label{decomp} \mathbb{P}_{(z,x)}(Z_t>0) = \mathbb{P}_{(z,x)}(Z_t>0,I_t>0) \\ + \mathbb{P}_{(z,x)}(Z_t>0,-y<I_t\leq 0)+ \mathbb{P}_{(z,x)}(Z_t>0,I_t\leq -y), \mathbb{E}nd{multline} where $y>0$ will be chosen later on. In other words, to deduce our result, we study such events separately for $t$ sufficiently large. Our first result in this section concerns the first term in the right hand side of \mathbb{E}qref{decomp}. It says that this is the leading term in \mathbb{E}qref{decomp}. \begin{lemma}\label{L1preuve} Assume that assumptions {\bf (H1)} and {\bf (H2)} hold. For $z,x>0$, there exists a positive constant $c(z,x)$ such that $$ \mathbb{P}_{(z,x)}(Z_t>0,I_t>0) \sim c(z,x) \mathbb{P}^{(e)}_x(I_t>0) \sim c(z,x) \widehat{V}(x)t^{-(1-\rho)}\mathbb{E}ll(t), \quad \text{as} \quad t \to \infty, $$ where $\mathbb{E}ll$ is a slowly varying function at $\infty$, introduced in \mathbb{E}qref{infimumspitzer}. \mathbb{E}nd{lemma} \begin{proof} Since $\mathbf{1}_{\{Z_t>0\}}$ converges to $\mathbf{1}_{\{\forall s\ge 0,\, Z_s>0\}}$, $\mathbb{P}^\uparrow_{(z,x)}$-almost surely, as $t$ goes to $\infty$, we can apply Lemma \ref{lemme25AGKV} and \[ \begin{split} \mathbb{P}_{(z,x)}(Z_t>0,I_t>0)&= \mathbb{P}_{(z,x)}(Z_t>0|I_t>0)\mathbb{P}^{(e)}_x(I_t>0) \\ &\sim \mathbb{P}^\uparrow_{(z,x)}(\forall s\ge 0,\, Z_s>0)\mathbb{P}^{(e)}_x(I_t>0),\qquad \textrm{as}\quad t \to \infty. \mathbb{E}nd{split} \] From Proposition \ref{posi_CSBP}, we know that $$ \mathbb{P}^\uparrow_{(z,x)}(\forall s\ge 0,\, Z_s>0)>0. $$ We conclude the proof by recalling the asymptotic behaviour in \mathbb{E}qref{infimumspitzer} to deduce the second equivalence. \mathbb{E}nd{proof} We now prove that the last term in the right hand side of \mathbb{E}qref{decomp} is negligible for $y$ large enough, under assumptions {\bf (H1)} and {\bf (H3)}. \begin{lemma}\label{L2preuve} Let $\mathbb{E}ps, z,x>0$, $\delta\in(0,1)$ and assume that assumptions {\bf (H1)} and {\bf (H3)} hold. Then for $t$ and $y$ large enough, we have $$ \mathbb{P}_{(z,x)}(Z_t>0,I_{t-\delta}<-y) \leq \mathbb{E}ps \mathbb{P}_{(z,x+y)}(Z_t>0,I_{ t-\delta}>0). $$ \mathbb{E}nd{lemma} \begin{proof} Recall that for $s\in[0,t]$, the functional $v_t(s,\lambda, \overline{K})$ is the $\mathbb{P}^{(e)}$-a.s. unique solution of the backward differential equation \mathbb{E}qref{backward}. We also recall that the quenched survival probability satisfies \begin{eqnarray}\label{majquenched} \mathbb{P}_{(z,x)}(Z_t>0|\overline{K})= 1- e^{-zv_t(0,\infty,\overline{K}-x)}. \mathbb{E}nd{eqnarray} From assumption {\bf (H3)} and definition \mathbb{E}qref{backward}, we obtain that for $s \leq t$ and $\lambda \geq 0$, \[ \begin{split} \frac{ \partial}{\partial s} v_t(s,\lambda e^{-x},\overline{K} -x)& \geq C e^{\overline{K}_s-x} \left( v_t(s,\lambda e^{-x},\overline{K}-x) e^{-\overline{K}_s+x} \right)^{\beta+1} \\ &= Cv^{\beta+1}_t(s,\lambda e^{-x},\overline{K}-x) e^{-\beta (\overline{K}_s-x)}. \mathbb{E}nd{split} \] This yields $$ \frac{1}{v^\beta_t(0,\lambda e^{-x},\overline{K}-x)}-\frac{1}{\lambda^\beta}\geq \beta C \mathcal{I}_t(\beta(\overline{K}-x)) ,$$ where \[ \mathcal{I}_t(\beta (\overline{K}-x)):= \int_0^te^{-\beta (\overline{K}_s-x)}\mathrm{d} s. \] Letting $\lambda$ go to $\infty$, we obtain \begin{equation}\label{majv} v_t(0,\infty,\overline{K}-x)\leq \left(\beta C \mathcal{I}_t(\beta (\overline{K}_s-x))\right)^{-1/\beta} . \mathbb{E}nd{equation} Using \mathbb{E}qref{majquenched} and \mathbb{E}qref{majv}, we get the following upper bound \begin{multline}\label{term_to_bound} \mathbb{P}_{(z,x)}(Z_t>0,I_{t-\delta}<-y)\leq \mathbb{E}^{(e)}_x \left[ \left(1 -e^{-z \left(\beta C \mathcal{I}_t(\beta (\overline{K}-x)) \right)^{-1/\beta}} \right)\mathbf{1}_{\{I_{t-\delta}<-y\}} \right]\\ =\mathbb{E}^{(e)} \left[ \left(1 -e^{-z \left(\beta C \mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right)\mathbf{1}_{\{I_{t-\delta}<-y-x\}} \right]. \mathbb{E}nd{multline} On the other hand, under assumption {\bf (H1)}, Theorems 2.18 and 2.20 in \cite{patie2016bernstein} guarantee that for $q\in(0,1)$, \[ \mathbb{E}^{(e)} \left[ \mathcal{I}_t(\beta \overline{K})^{-q} \right]<\infty, \qquad t>0, \] and for $F\in C_b(\mathbb{R}_+)$ \[ \lim_{t\to\infty} \frac{\mathbb{E}^{(e)} \left[ \mathcal{I}_t(\beta \overline{K})^{-q} F(\mathcal{I}_t(\beta \overline{K})\right]}{\widehat{\kappa}(1/t,0)}= \int_0^\infty F(x)\nu_{q, \rho}(\mathrm{d} x), \] where $\nu_{q, \rho}$ is a finite measure on $(0,\infty)$, see equation (2.46) in \cite{patie2016bernstein} for further details about $\nu_{q, \rho}$. Thus, by taking $F_z(x)=x^q(1-e^{-z C_\beta x^{-1/\beta}})$ with $C_\beta=(\beta C)^{-1/\beta}$, we deduce \begin{equation} \label{equiv_mathcal_I} \lim_{t\to\infty} \frac{\mathbb{E}^{(e)} \left[ 1 -e^{-z C_\beta\left(\mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right]}{\widehat{\kappa}(1/t,0)}= \int_0^\infty x^q(1-e^{-zC_\beta x^{-1/\beta}})\nu_{q, \rho}(\mathrm{d} x) =: C_{\beta, q, \rho}(z), \mathbb{E}nd{equation} where the last notation has been introduced for the sake of readability. Hence, in particular from \mathbb{E}qref{asymp_kappa_hat} we have \begin{equation*} \mathbb{E}^{(e)} \left[ 1 -e^{-z C_\beta\left(\mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right] \sim \frac{ \Gamma(1+\rho)C_{\beta, q, \rho}(z)}{\rho} t^{\rho-1}\mathbb{E}ll(t), \qquad \textrm{as} \quad t \to \infty, \mathbb{E}nd{equation*} where $\mathbb{E}ll$ is the slowly varying function at $\infty$ introduced in \mathbb{E}qref{asymp_kappa_hat}. The latter implies that there exists $t_0$ such that if $t \geq t_0$, \begin{equation} \label{asymp_mathcalI} \mathbb{E}^{(e)} \left[ 1 -e^{-z C_\beta\left(\mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right] \leq 2 \frac{\Gamma(1+\rho)C_{\beta, q, \rho}(z)}{\rho} t^{\rho-1}\mathbb{E}ll(t). \mathbb{E}nd{equation} Next, we recall from \mathbb{E}qref{infimumspitzer} that \begin{equation*} \mathbb{P}^{(e)}(I_t >-y) = \mathbb{P}^{(e)}_{y}(I_t >0) \sim \widehat{V}(y)t^{\rho-1}\mathbb{E}ll(t), \qquad \textrm{as} \quad t \to \infty. \mathbb{E}nd{equation*} On the other hand from Potter's Theorem (see Theorem 1.5.6 in Bingham et al. \cite{bingham1989regular}), we deduce that for any $A>1$ and $\delta_1>0$ there exists $t_1:=t_1(A_1, \delta_1)$ such that for $s\ge h\ge t_1$, \[ \frac{\mathbb{P}^{(e)}(I_h >-y)}{\mathbb{P}^{(e)}(I_{s} >-y)}\leq A\left( \frac{s}{h} \right)^{1-\rho+\delta_1}. \] Let us fix $A>1$ and $\delta_1>0$ and introduce $\tau_{-y}=\inf\{t: \overline{K}_t\le -y\},$ the first hitting time of $-y$ by $\overline{K}$. The previous inequality implies that for $s\ge h\geq t_2:=t_0\lor t_1$, \begin{align} \label{asymp_tauy} \mathbb{P}^{(e)}(h < \tau_{-y} \leq s) & = \mathbb{P}^{(e)}(I_h >-y)-\mathbb{P}^{(e)}(I_{s} >-y) \nonumber \\& = \mathbb{P}^{(e)}(I_{s} >-y)\left(\frac{\mathbb{P}^{(e)}(I_h >-y)}{\mathbb{P}^{(e)}(I_{s} >-y)}-1\right) \nonumber \\& \leq \left(A\left( \frac{s}{h} \right)^{1-\rho+\delta_1}-1\right)\mathbb{P}^{(e)}(I_{s} >-y). \mathbb{E}nd{align} For simplicity, we introduce the notation $\tilde{y}=y+x$. Hence from the property of independent increments of $\overline{K}$, we get the following sequence of inequalities, for $t\ge 3t_2$, \[ \begin{split} \mathbb{E}^{(e)} \bigg[ &\left(1 -e^{-z C_\beta \left( \mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right),\tau_{-\tilde{y}} \leq t -\delta\bigg] \\ & \mathcal{H}space{.5cm} \leq \mathbb{E}^{(e)} \left[ \left(1 -e^{-z C_\beta\left( \int_{\tau_{-\tilde{y}}}^{t} e^{- \beta \overline{K}_s}\mathrm{d} s \right)^{-1/\beta}} \right)\mathbf{1}_{\{\tau_{-\tilde{y}} \leq t -\delta\}}\right]\\ & \mathcal{H}space{.5cm} \leq \mathbb{E}^{(e)} \left[ \left(1 -\mathbb{E}xp\left\{-z C_\beta e^{-\tilde{y}}\left( \int_{0}^{t-\tau_{-\tilde{y}}} e^{- \beta \big(\overline{K}_{\tau_{-\tilde{y}}+u}-\overline{K}_{\tau_{-\tilde{y}}}\big)}\mathrm{d} s\right)^{-1/\beta}\right\} \right)\mathbf{1}_{\{\tau_{-\tilde{y}} \leq t -\delta\}}\right]\\ &\mathcal{H}space{.5cm}\leq \mathbb{E}^{(e)} \left[ \left(1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\frac{t+t_2}{2}}(\beta \overline{K})\right)^{-1/\beta}} \right)\right] \\ &\mathcal{H}space{2cm}+\mathbb{E}^{(e)} \left[ \left(1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\delta}(\beta \overline{K})\right)^{-1/\beta}} \right)\right] \mathbb{P}^{(e)} \left( \frac{t-t_2}{2}<\tau_{-\tilde{y}} \leq t-\delta\right). \mathbb{E}nd{split} \] Thus from \mathbb{E}qref{asymp_mathcalI}, \mathbb{E}qref{asymp_tauy} and \mathbb{E}qref{infimumspitzer}, we have \[ \begin{split} \mathbb{E}^{(e)} \bigg[ &\left(1 -e^{-z C_\beta \left( \mathcal{I}_t(\beta (\overline{K})) \right)^{-1/\beta}} \right),\tau_{-\tilde{y}} \leq t -\delta\bigg]\\ & \le 2^{2-\rho} \frac{\Gamma(1+\rho)C_{\beta, q, \rho}(ze^{-\tilde{y}})}{\rho} (t+t_2)^{\rho-1}\mathbb{E}ll\left(\frac{t+t_2}{2}\right)\\ &+\left(A2^{1-\rho+\delta_1}\left( 1+\frac{t_2-\delta}{2t_2} \right)^{1-\rho+\delta_1}-1\right)\mathcal{H}at{V}(\tilde{y})(t-\delta)^{\rho-1}\mathbb{E}ll(t-\delta)\\ &\mathcal{H}space{5cm}\times \mathbb{E}^{(e)} \left[ \left(1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\delta}(\beta \overline{K})\right)^{-1/\beta}} \right)\right] . \mathbb{E}nd{split} \] Next, we introduce \[ \begin{split} c_1(z,\tilde{y})= &\left(2^{2-\rho} \frac{\Gamma(1+\rho)C_{\beta, q, \rho}(ze^{-\tilde{y}})}{\rho} \right .\\ &\left. \vee \left(A2^{1-\rho+\delta_1}\left( 1+\frac{t_2-\delta}{2t_2} \right)^{1-\rho+\delta_1}-1\right)\mathbb{E}^{(e)} \left[ 1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\delta}(\beta \overline{K})\right)^{-1/\beta}} \right]\right). \mathbb{E}nd{split} \] Therefore from \mathbb{E}qref{term_to_bound} and again from Potter's Theorem, we get for $t\ge 3t_2$ \[ \begin{split} \frac{ \mathbb{P}_{(z,x)}(Z^\uparrow_t>0,I_{t-\delta}<-y)}{t^{\rho-1}\mathbb{E}ll(t)}&\leq c_1(z,\tilde{y})\left( \frac{\mathbb{E}ll\left(\frac{t+t_2}{2}\right)}{\mathbb{E}ll(t)} + \left(1-\frac{\delta}{3t_2}\right)^{\rho-1} \widehat{V}(y+x)\frac{\mathbb{E}ll\left(t-\delta\right)}{\mathbb{E}ll(t)} \right)\\ &\le c_1(z,\tilde{y})A \left( 2^{\delta_1} + \left(1-\frac{\delta}{3t_2}\right)^{\rho-1-\delta_1} \widehat{V}(y+x) \right). \mathbb{E}nd{split} \] Finally, we observe that the map $x\mapsto x^q(1-e^{-z C_\beta e^{-\tilde{y}} x^{-1/\beta}})$ is bounded and goes to $0$ as $y$ goes to $\infty$. Similarly the r.v. $1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\delta}(\beta \overline{K})\right)^{-1/\beta}}$ is bounded by one and goes to $0$, $\mathbb{P}^{(e)}$-a.s., as $y$ goes to $\infty$. Thus by the Dominated Convergence Theorem, we have \[ C_{\beta, q, \rho}(ze^{-\tilde{y}})=\int_0^\infty x^q(1-e^{-zC_\beta e^{-\tilde{y}}x^{-1/\beta}})\nu_{q, \rho}(\mathrm{d} x)\xrightarrow[y\to \infty]{} 0, \] and \[ \mathbb{E}^{(e)} \left[ 1 -e^{-z C_\beta e^{-\tilde{y}} \left(\mathcal{I}_{\delta}(\beta \overline{K})\right)^{-1/\beta}} \right]\xrightarrow[y\to\infty]{} 0. \] In other words $c_1(z,\tilde{y})\to 0$, as $y$ increases. This implies that \[ \lim_{y\to\infty} \limsup_{t\to\infty} \frac{ \mathbb{P}_{(z,x)}(Z^\uparrow_t>0,I_{t-\delta}<-y)}{t^{\rho-1}\mathbb{E}ll(t)} =0, \] since $\widehat{V}(y)=\mathcal{O}(y)$ and for $y\rightarrow \infty$. \mathbb{E}nd{proof} Using Lemmas \ref{L1preuve} and \ref{L2preuve}, we are now able to conclude the proof of our main result. \begin{proof}[Proof of Theorem \ref{maintheo}] Let $z,x,\mathbb{E}ps>0$ and $\delta\in(0, 1)$. From Lemma \ref{L2preuve}, we can choose $y$ such that for $t$ large enough, \[ \mathbb{P}_{(z,x)}(Z_t>0,I_{t-\delta}<-y) \le \mathbb{E}ps \mathbb{P}_{(z,x+y)}(Z_t>0,I_t > 0). \] Hence we deduce \[ \begin{split} \mathbb{P}_{z}(Z_t>0) &=\mathbb{P}_{(z,x)}(Z_t>0,I_{t-\delta}>0)\\ &\mathcal{H}space{1cm}+ \mathbb{P}_{(z,x)}(Z_t>0,-y<I_{t-\delta}\leq 0)+ \mathbb{P}_{(z,x)}(Z_t>0,I_{t-\delta}\leq -y)\\ &\le \mathbb{P}_{(z,x+y)}(Z_t>0,I_{t-\delta}>0) + \mathbb{E}ps\mathbb{P}_{(z,x+y)}(Z_t>0,I_t> 0) \\ &\le \mathbb{P}_{(z,x+y)}(Z_{t-\delta}>0,I_{t-\delta}>0) + \mathbb{E}ps\mathbb{P}_{(z,x+y)}(Z_t>0,I_t> 0) \\ &\le \left(\frac{\mathbb{P}_{(z,x+y)}(Z_{t-\delta}>0,I_{t-\delta}>0)}{\mathbb{P}_{(z,x+y)}(Z_t>0,I_t> 0)} + \mathbb{E}ps\right)\mathbb{P}_{(z,x+y)}(Z_t>0,I_t> 0). \mathbb{E}nd{split} \] From Lemma \ref{L1preuve}, we know \begin{equation} \label{asymp_tgrand} \begin{split} \mathbb{P}_{(z,x+y)}(Z_t>0,I_t>0) &\sim c(z,x+y) \mathbb{P}^{(e)}_{x+y}(I_t>0)\\ & \sim c(z,x+y) \widehat{V}(x+y)t^{-(1-\rho)}\mathbb{E}ll(t), \quad \text{as } t \to \infty. \mathbb{E}nd{split} \mathbb{E}nd{equation} From Potter's Theorem (see Theorem 1.5.6 in Bingham et al. \cite{bingham1989regular}), we deduce that for any $A>1$ and $\delta_1>0$ there exists $t_1:=t_1(A_1,\delta_1)$ such that \[ \mathbb{P}_{z}(Z_t>0) \le \left(A\left(1+\frac{\delta}{t_1-\delta}\right)^{1-\rho+\delta_1} + \mathbb{E}ps\right)\mathbb{P}_{(z,x+y)}(Z_t>0,I_t> 0). \] In other words, for every $\mathbb{E}ps>0$, there exists $y^\prime>0$ such that $$ \mathbb{P}_{(z,y^\prime)}(Z_t>0,I_t>0) \leq \mathbb{P}_{z}(Z_t>0) \leq \left(A\left(1+\frac{\delta}{t_1-\delta}\right)^{1-\rho+\delta_1} + \mathbb{E}ps\right)\mathbb{P}_{(z,y^\prime)}(Z_t>0,I_t> 0), $$ for some $A>1$ and $\delta_1>1$. Recall that $y'$ is a sequence which may depend on $z$ and $\mathbb{E}ps$ and goes to infinity as $\mathbb{E}ps$ goes to $0$. Thus, let us take any sequence $y(z,\mathbb{E}ps)$ satisfying for any $z,\mathbb{E}ps>0$ \begin{equation} \label{prelim} \begin{split} \mathbb{P}_{(z,y(z,\mathbb{E}ps))}(Z_t>0,I_t>0) &\leq \mathbb{P}_{z}(Z_t>0)\\ &\leq \left(A\left(1+\frac{\delta}{t_1-\delta}\right)^{1-\rho+\delta_1} + \mathbb{E}ps\right)\mathbb{P}_{(z,y(z,\mathbb{E}ps))}(Z_t>0,I_t> 0), \mathbb{E}nd{split} \mathbb{E}nd{equation} and prove that $$C(z):= \lim_{\mathbb{E}ps \to 0}c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))$$ exists and is positive and finite. Dividing equation \mathbb{E}qref{prelim} by $t^{\rho-1}\mathbb{E}ll(t)$ and using \mathbb{E}qref{asymp_tgrand}{\color{blue},} we deduce \[ \begin{split} 0<c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))&\leq \liminf_{t \to \infty} \frac{ \mathbb{P}_{z}(Z_t>0)}{t^{\rho-1}\mathbb{E}ll(t)}\\ &\leq \left(A\left(1+\frac{\delta}{t_1-\delta}\right)^{1-\rho+\delta_1} + \mathbb{E}ps\right) c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))<\infty. \mathbb{E}nd{split} \] Now, letting $\delta$ goes to $0$ and then $\mathbb{E}ps$ tends to $0$, we get \begin{align*} 0<\limsup_{\mathbb{E}ps\to 0}c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))&\leq \liminf_{t \to \infty} \frac{ \mathbb{P}_{z}(Z_t>0)}{t^{-(1-\rho)}\mathbb{E}ll(t)}\\ &\leq \liminf_{\mathbb{E}ps \to 0}\Big( A+\mathbb{E}ps\Big)c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))\\ &= A\liminf_{\mathbb{E}ps \to 0}c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))<\infty. \mathbb{E}nd{align*} Since $A$ can be taken arbitrarily close to 1, the inferior and superior limits (when $\mathbb{E}ps$ goes to $0$) of the sequence $c(z,y(z,\mathbb{E}ps)) \widehat{V}(y(z,\mathbb{E}ps))$ are thus equal, positive and finite. We thus deduce that this sequence has a limit $C(z)$ when $\mathbb{E}ps$ goes to $0$, which is also the limit of $$ \frac{ \mathbb{P}_{z}(Z_t>0)}{t^{-(1-\rho)}\mathbb{E}ll(t)} \qquad \textrm{when $t$ goes to $\infty$,}$$ and obtain $$ \mathbb{P}_{z}(Z_t>0) \sim C(z)t^{-(1-\rho)}\mathbb{E}ll(t).$$ This completes the proof. \mathbb{E}nd{proof} \appendix \section{Appendix} \label{appendix} We provide in Appendix the proof of some technical results for the sake of completeness. \begin{lemma}\label{conservative} If \mathbb{E}qref{finitemom} holds then the process $Z$ is conservative, i.e. \[ \mathbb{P}_z(Z_t<\infty)=1 , \qquad \textrm{for any} \qquad t\ge 0, \] and any starting point $z\ge 0$. \mathbb{E}nd{lemma} \begin{proof} Recall that there exists a functional $v_t(s,\lambda, \overline{K})$ which is the unique solution of the backward differential equation \mathbb{E}qref{backward} which determines the law of the reweighted process $(Z_t e^{-\overline{K}_t}, t\ge 0)$ as follows, \begin{equation}\label{defvtslambdaK} \mathbb{E}_{(z,x)}\Big[\mathbb{E}xp\Big\{-\lambda Z_t e^{-\overline{K}_t}\Big\}\Big]=\mathbb{E}^{(e)}\Big[\mathbb{E}xp\Big\{-zv_t(0,\lambda e^{-x},\overline{K})\Big\}\Big]. \mathbb{E}nd{equation} If we let $\lambda$ go to $0$ in the previous identity, we deduce \[ \mathbb{P}_z\big(Z_t<\infty\big)=\underset{\lambda\downarrow 0}{\lim}\,\mathbb{E}_{(z,x)}\Big[\mathbb{E}xp\Big\{-\lambda Z_t e^{-\overline{K}_t}\Big\}\Big] =\mathbb{E}^{(e)}\left[\mathbb{E}xp\left\{-z\lim_{\lambda \downarrow 0}v_t(0, \lambda e^{-x}, \overline{K})\right\}\right], \] where the limits are justified by monotonicity and dominated convergence. This implies that the process $Z$ is conservative if and only if \[ \lim_{\lambda \downarrow 0}v_t(0,\lambda e^{-x},\overline{K})=0, \] for every positive $t$. Let us recall that the function $\mathbb{P}hi(\lambda)$ equals $\lambda^{-1}\psi_0(\lambda)$ and observe that $\mathbb{P}hi(0)=\psi'_0(0+)=0$ (see \mathbb{E}qref{WHbranching}). Since $\psi_0$ is convex and non-negative, we deduce that $\mathbb{P}hi$ is increasing. Finally, if we solve equation (\ref{backward}) with $\psi_0(\lambda)=\lambda \mathbb{P}hi(\lambda)$, we get $$v_t(s,\lambda e^{-x},\overline{K})=\lambda e^{-x} \mathbb{E}xpo{-\int_s^t \mathbb{P}hi(e^{-\overline{K}_r}v_t(r,\lambda e^{-x},\overline{K}))\mathrm{d} r}.$$ Therefore, since $\mathbb{P}hi$ is increasing and $\mathbb{P}hi(0)=0$, we have $$0\leq\underset{\lambda\rightarrow 0}{\lim}v_t(0,\lambda e^{-x},\overline{K})=\underset{\lambda\rightarrow 0}{\lim}\lambda e^{-x}\mathbb{E}xpo{-\int_0^t \mathbb{P}hi(e^{-\overline{K}_r}v_t(r,\lambda e^{-x},\overline{K}))\mathrm{d} r}\leq \underset{\lambda\rightarrow 0}{\lim}\lambda e^{-x}=0,$$ implying that $Z$ is conservative. \mathbb{E}nd{proof} \begin{proof}[Proof of Proposition \ref{martingquenched}] By It\^o's formula, we have \[ Z_te^{-\overline{K}_{t}}= Z_0+\int_0^t e^{-\overline{K}_s}\sqrt{2\gamma^2Z_s} \mathrm{d} B^{(b)}_s+\int_0^t\int_{(0,\infty)} \int_0^{Z_{s-}} ze^{-\overline{K}_{s-}}\widetilde{N}^{(b)}(\mathrm{d} s, \mathrm{d} z, \mathrm{d} u), \] $\mathbb{P}$-a.s. Then, for $\mathbb{P}^{(e)}$ almost every $w^{(e)}$, we consider $$Y_t^{w^{(e)}}=Y_0^{w^{(e)}}+M_t^{w^{(e)}}+N_t^{w^{(e)}}+W_t^{w^{(e)}}\qquad \mathbb{P}^{(b)}\textrm{-a.s.,}$$ for any $t\geq 0$, where $Y_t^{w^{(e)}}=Z_t(w^{(e)},.)\mathbb{E}xp(-\overline{K}_t(w^{(e)}))$ and \begin{eqnarray*} M_t^{w^{(e)}}&=&\int_0^t e^{-\overline{K}_s(w^{(e)})}\sqrt{2\gamma^2Z_s} \mathrm{d} B^{(b)}_s \\ N_t^{w^{(e)}}&=&\int_0^t\int_{(0,1]} \int_0^{Z_{s-}} ze^{-\overline{K}_{s-}(w^{(e)})}\widetilde{N}^{(b)}(\mathrm{d} s, \mathrm{d} z, \mathrm{d} u),\\ W_t^{w^{(e)}}&=&\int_0^t\int_{[1,\infty)} \int_0^{Z_{s-}} ze^{-\overline{K}_{s-}(w^{(e)})}\widetilde{N}^{(b)}(\mathrm{d} s, \mathrm{d} z, \mathrm{d} u), \mathbb{E}ea are $(\Omega^{(b)}, {\mathcal F}^{(b)}, \mathbb{P}^{(b)})$ local martingales. Let us now check that $Y^{w^{(e)}}$ is a $(\Omega^{(b)}, {\mathcal F}^{(b)}, \mathbb{P}^{(b)})$ martingale by proving that the first moment of its supremum on $[0,T]$ is finite, for any $T>0$. We consider the first time $\tau_N$ when $Y^{w^{(e)}}$ goes beyond $N$. Using $\vert x \vert \leq 1+x^2$ and that $Y^{w^{(e)}}$ is bounded before the stopping time $\tau_N$, we get $$ \mathbb{E}\left[\sup_{s < t\wedge \tau_N} Y_s^{w^{(e)}}\right] \leq 2+ \mathbb{E}\left[\sup_{s< t\wedge \tau_N} \Big(M_t^{w^{(e)}}\Big)^2\right] + \mathbb{E}\left[\sup_{s < t\wedge \tau_N} \Big(N_t^{w^{(e)}}\Big)^2\right] + \mathbb{E}\left[\sup_{s < t\wedge \tau_N} \Big\vert W_t^{w^{(e)}}\Big\vert \right] .$$ Using that $\sup_{[0,T]} \vert \overline{K}(w^{(e)})\vert<\infty$, we obtain that $e^{-\overline{K}(w^{(e)})}$ is bounded before time $T$ (and the bound does not depend on $N$). Thanks to Doob inequality applied to the stopped martingales, there exists $C_7$ (which does not depend on $N$) such that for any $t\leq T$, \begin{eqnarray*} \mathbb{E}\left[\sup_{s< t\wedge \tau_N } \Big(M_s^{w^{(e)}}\Big)^2 \right] &\leq & C_7 \int_0^t\mathbb{E}\left[\sup_{s< t\wedge \tau_N} Y_s^{w^{(e)}}\right]\mathrm{d} s, \\ \mathbb{E}\left[\sup_{s < t\wedge \tau_N} \Big(N_s^{w^{(e)}}\Big)^2 \right] &\leq & C_7\int_{[0,1]} z^2\mu(\mathrm{d} z) \int_0^t \mathbb{E}\left[\sup_{s< t\wedge \tau_N} Y_s^{w^{(e)}}\right]\mathrm{d} s, \\ \mathbb{E}\left[\sup_{s < t\wedge \tau_N} \Big\vert W_s^{w^{(e)}} \Big\vert \right] &\leq & C_7\int_{[1,\infty]} z\mu(\mathrm{d} z) \int_0^t \mathbb{E}\left[\sup_{s < t\wedge \tau_N} Y_s^{w^{(e)}}\right]\mathrm{d} s. \mathbb{E}ea Then Gronwall's Lemma ensures that there exists $C(T)$ such that for any $t\leq T$ and $N\geq 1$, $\mathbb{E}\left[\sup_{s < t\wedge \tau_N} Y_s^{w^{(e)}}\right]\leq C(T)$. Letting $N$ go to infinity completes the proof. \mathbb{E}nd{proof} {\bf Acknowledgements:} {\sl The authors are very grateful to the anonymous referees for their thorough review. This work was partially funded by the Chair "Mod\'elisation Math\'ematique et Biodiversit\'e" of VEOLIA-Ecole Polytechnique-MNHN-F.X and ANR ABIM 16-CE40-0001. JCP acknowledge support from the Royal Society and CONACyT (CB-250590). This work was concluded whilst JCP was on sabbatical leave holding a David Parkin Visiting Professorship at the University of Bath. He gratefully acknowledges the kind hospitality of the Department and University. } \mathbb{E}nd{document}
\begin{document} \title{Branching Random Walks in Time Inhomogeneous Environments} \author{Ming Fang\thanks{School of Mathematics, University of Minnesota, 206 Church St. SE, Minneapolis, MN 55455, USA. The work of this author was partially supported by NSF grant DMS-0804133} \and Ofer Zeitouni\thanks{School of Mathematics, University of Minnesota, 206 Church St. SE, Minneapolis, MN 55455, USA and Faculty of Mathematics, Weizmann Institute, POB 26, Rehovot 76100, Israel. The work of this author was partially supported by NSF grant DMS-0804133, a grant from the Israel science foundation, and the Taubman professorial chair at the Weizmann Institute. }} \date{November 18, 2011} \maketitle {\abstract We study the maximal displacement of branching random walks in a class of time inhomogeneous environments. Specifically, binary branching random walks with Gaussian increments will be considered, where the variances of the increments change over time macroscopically. We find the asymptotics of the maximum up to an $O_P(1)$ (stochastically bounded) error, and focus on the following phenomena: the profile of the variance matters, both to the leading (velocity) term and to the logarithmic correction term, and the latter exhibits a phase transition.} {\sigma}_{\mbox{\rm eff}}ction{Introduction} Branching random walks and their maxima have been studied mostly in space-time homogeneous environments (deterministic or random). For work on the deterministic homogeneous case of relevance to our study we refer to \cite{Bramson78_BBM} and the recent \cite{Addario-BerryReed09} and \cite{Aidekon10}. For the random environment case, a sample of relevant papers is \cite{GantertMullerPopovVachkovskaia10, GrevenHollander92, HeilNakashimaYoshida11, HuYoshida09, Liu07, MachadoPopov00, Nakashima11}. As is well documented in these references, under reasonable hypotheses, in the homogeneous case the maximum grows linearly, with a logarithmic correction, and is tight around its median. Branching random walks are also studied under some space inhomogeneous environments. A sample of those papers are \cite{BerestyckiBrunetHarrisHarris10, DoeringRoberts11, EnglanderHarrisKyprianou10, GitHarrisHarris07, HarrisHarris09, HarrisWilliams96, Koralov11}. Recently, Bramson and Zeitouni \cite{BramsonZeitouni09} and Fang \cite{Fang11} showed that the maxima of branching random walks, recentered around their median, are still tight in time inhomogeneous environments satisfying certain uniform regularity assumptions, in particular, the laws of the increments can vary with respect to time and the walks may have some local dependence. A natural question is to ask, in that situation, what is the asymptotic behavior of the maxima. Similar questions were discussed in the context of branching Brownian motion using PDE techniques, see e.g. Nolen and Ryzhik \cite{NolenRyzhik09}, using the fact that the distributions of the maxima satisfy the KPP equation whose solution exhibits a traveling wave phenomenon. In all these models, while the linear traveling speed of the maxima is a relatively easy consequence of the large deviation principle, the evaluation of the second order correction term, like the ones in Bramson \cite{Bramson78_BBM} and Addario-Berry and Reed \cite{Addario-BerryReed09}, is more involved and requires a detailed analysis of the walks; to our knowledge, it has so far only been performed in the time homogeneous case. Our goal is to start exploring the time inhomogeneous setup. As we will detail below, the situation, even in the simplest setting, is complex and, for example, the order in which inhomogeneity presents itself matters, both in the leading term and in the correction term. In this paper, in order to best describe the phenomenon discussed above, we focus on the simplest case of binary branching random walks where the diffusivity of the particles takes two distinct values as a function of time. We now describe the setup in detail. For $\sigma>0$, let $N(0,\sigma^2)$ denote the normal distributions with mean zero and variance $\sigma^2$. Let $n$ be an integer, and let $\sigma_1^2,\sigma_2^2>0$ be given. \ We start the system with one particle at location 0 at time 0. Suppose that $v$ is a particle at location $S_v$ at time $k$. Then $v$ dies at time $k+1$ and gives birth to two particles $v1$ and $v2$, and each of the two offspring ($\{vi,i=1,2\}$) moves independently to a new location $S_{vi}$ with the increment $S_{vi}-S_{v}$ independent of $S_v$ and distributed as $N(0,\sigma_1^2)$ if $k<n/2$ and as $N(0,\sigma_2^2)$ if $n/2\leq k<n$. Let $\mathds{D}_n$ denote the collection of all particles at time $n$. For a particle $v\in \mathds{D}_n$ and $i<n$, we let $v^i$ denote the $i$th level ancestor of $v$, that is the unique element of $\mathds{D}_i$ on the geodesic connecting $v$ and the root. We study the maximal displacement $M_n=\max_{v\in\mathds{D}_n}S_v$ at time $n$, for $n$ large. \footnote[1]{Since one can understand a branching random walk as a `competition' between branching and random walk, one may get similar results by fixing the variance and changing the branching rate with respect to time.} It will be clear that the analysis extends to a wide class of inhomogeneities with finitely many values and `macroscopic' change (similar to the description in the previous paragraph), and to the Galton-Watson setup. A universal result that will allow for continuous change of the variances is more complicated, is expected to present different correction terms, and is the subject of further study. In order to describe the results in a concise way, we recall the notation $O_P(1)$ for stochastically boundedness. That is, if a sequence of random variables $R_n$ satisfies $R_n=O_P(1)$, then, for any $\epsilon>0$, there exists an $M$ such that $P(|R_n|> M)<\epsilon$ for all $n$. An interesting feature of $M_n$ is that the asymptotic behavior depends on the order relation between $\sigma_1^2$ and $\sigma_2^2$. That is, while \begin{equation}\label{uni_max} M_n=\left(\sqrt{2\log 2}\; {\sigma}_{\mbox{\rm eff}}\right)n-\beta \frac{ {\sigma}_{\mbox{\rm eff}}}{\sqrt{2\log 2}}\log n +O_P(1) \end{equation} is true for some choice of $ {\sigma}_{\mbox{\rm eff}}$ and $\beta$, $ {\sigma}_{\mbox{\rm eff}}$ and $\beta$ take different expressions for different ordering of $\sigma_1$ and $\sigma_2$. Note that \eqref{uni_max} is equivalent to say that the sequence $\{M_n-Med(M_n)\}_n$ is tight and $$Med(M_n)=\left(\sqrt{2\log 2}\; {\sigma}_{\mbox{\rm eff}}\right)n-\beta \frac{ {\sigma}_{\mbox{\rm eff}}}{\sqrt{2\log 2}}\log n +O(1),$$ where $Med(X)=\sup\{x:P(X\leq x)\leq \frac{1}{2}\}$ is the median of the random variable $X$. In the following, we will use superscripts to distinguish different cases, see \eqref{eqvar}, \eqref{inc} and \eqref{dec} below. A special and well-known case is when $\sigma_1=\sigma_2=\sigma$, i.e., all the increments are i.i.d.. In that case, the maximal displacement is described as follows: \begin{equation}\label{eqvar} M_n^==\left(\sqrt{2\log 2}\;\sigma\right) n-\frac{3}{2}\frac{\sigma}{\sqrt{2\log 2}}\log n+O_P(1); \end{equation} the proof can be found in \cite{Addario-BerryReed09}, and its analog for branching Brownian motion can be found in \cite{Bramson78_BBM} using probabilistic techniques and \cite{Lau85} using PDE techniques. This homogeneous case corresponds to \eqref{uni_max} with $ {\sigma}_{\mbox{\rm eff}}=\sigma$ and $\beta=\frac{3}{2}$. In this paper, we deal with the extension to the inhomogeneous case. The main results are the following two theorems. \begin{theorem}\label{th_inc} When $\sigma_1^2<\sigma_2^2$ (increasing variances), the maximal displacement is \begin{equation}\label{inc} M_n^{\uparrow}=\left(\sqrt{(\sigma_1^2+\sigma_2^2)\log 2}\right)n-\frac{\sqrt{\sigma_1^2+\sigma_2^2}}{4\sqrt{\log 2}}\log n+O_P(1), \end{equation} which is of the form \eqref{uni_max} with $ {\sigma}_{\mbox{\rm eff}}=\sqrt{\frac{\sigma_1^2+\sigma_2^2}{2}}$ and $\beta=\frac{1}{2}$. \end{theorem} \begin{theorem}\label{th_dec} When $\sigma_1^2>\sigma_2^2$ (decreasing variances), the maximal displacement is \begin{equation}\label{dec} M_n^{\downarrow}=\frac{\sqrt{2\log 2}(\sigma_1+\sigma_2)}{2}n-\frac{3(\sigma_1+\sigma_2)}{2\sqrt{2\log 2}}\log n+O_P(1), \end{equation} which is of the form \eqref{uni_max} with $ {\sigma}_{\mbox{\rm eff}}=\frac{\sigma_1+\sigma_2}{2}$ and $\beta=3$. \end{theorem} For comparison purpose, it is useful to introduce the model of $2^n$ independent (inhomogeneous) random walks with centered independent Gaussian variables, with variance profile as above. Denote by $M_n^{\text{ind}}$ the maximal displacement at time $n$ in this model. Because of the complete independence, it can be easily shown that \begin{equation}\label{ind} M_n^{\text{ind}}=\left(\sqrt{(\sigma_1^2+\sigma_2^2)\log 2}\right)n-\frac{\sqrt{\sigma_1^2+\sigma_2^2}}{4\sqrt{\log 2}}\log n+O_P(1) \end{equation} for all choices of $\sigma_1^2$ and $\sigma_2^2$. Thus, in this case, $ {\sigma}_{\mbox{\rm eff}}=\sqrt{(\sigma_1^2+\sigma_2^2)/2}$ and $\beta=1/2$. Thus, the difference between $M_n^=$ and $M_n^{\text{ind}}$ when $\sigma_1^2=\sigma_2^2$ lies in the logarithmic correction. As commented (for branching Brownian motion) in \cite{Bramson78_BBM}, the different correction is due to the intrinsic dependence between particles coming from the branching structure in branching random walks. Another related quantity is the sub-maximum obtained by a greedy algorithm, which only considers the maximum over all decendents of the maximal particle at time $n/2$. Applying \eqref{eqvar}, we find that the output of such algorithm is \begin{eqnarray}\label{subMax} &&\left(\sqrt{2\log 2}\sigma_1\frac{n}{2}-\frac{3}{2}\frac{\sigma_1}{\sqrt{2\log 2}}\log \frac{n}{2}\right)+\left(\sqrt{2\log 2}\sigma_2\frac{n}{2}-\frac{3}{2}\frac{\sigma_2}{\sqrt{2\log 2}}\log \frac{n}{2}\right)+O_P(1)\nonumber\\ &&=\frac{\sqrt{2\log 2}(\sigma_1+\sigma_2)}{2}n-\frac{3(\sigma_1+\sigma_2)}{2\sqrt{2\log 2}}\log n+O_P(1). \end{eqnarray} Comparing \eqref{subMax} with the theorems, we see that this algorithm yields the maximum up to an $O_P(1)$ error in the case of decreasing variances (compare with \eqref{dec}) but not in the case of increasing variances (compare with \eqref{inc}) or of homogeneous increments (compare with \eqref{eqvar}). A few comparisons are now in order. \begin{itemize} \item[1.] When the variances are increasing, $M_n^{\uparrow}$ is asymptotically (up to $O_P(1)$ error) the same as $M_n^{\text{ind}}$, which is exactly the same as the maximum of independent homogeneous random walks with effective variance $\frac{\sigma_1^2+\sigma_2^2}{2}$. \item[2.] When the variances are decreasing, $M_n^{\downarrow}$ shares the same asymptotic behavior with the sub-maximum \eqref{subMax}. In this case, a greedy strategy yields the approximate maximum. \item[3.] With the same set of diffusivity constants $\{\sigma_1^2,\sigma_2^2\}$ but different order, $M_n^{\uparrow}$ is greater than $M_n^{\downarrow}$. \item[4.] While the leading order terms in \eqref{eqvar}, \eqref{inc} and \eqref{dec} are continuous in $\sigma_1$ and $\sigma_2$ (they coincide upon setting $\sigma_1=\sigma_2$), the logarithmic corrections exhibit a phase transition phenomenon (they are not the same when we let $\sigma_1=\sigma_2$). \end{itemize} We will prove Theorem \ref{th_inc} in Section \ref{sec_inc} and Theorem \ref{th_dec} in Section \ref{sec_dec}. Before proving the theorems, we state a tightness result. \begin{lemma}\label{lem_tight} The sequences $\{M_n^{\uparrow}-\text{Med}(M_n^{\uparrow})\}_n$ and $\{M_n^{\downarrow}-\text{Med}(M_n^{\downarrow})\}_n$ are tight. \end{lemma} This lemma follows from Bramson and Zeitouni \cite{BramsonZeitouni09} or Fang \cite{Fang11}. One can write down a similar recursion for the distribution of $M_n$ to the one in those two papers, except for different subscripts and superscripts. Since the argument there depends only on one step of the recursion, it applies here directly without any change and leads to the tightness result in the lemma. A note on notation: throughout, we use $C$ to denote a generic positive constant, possibly depending on $\sigma_1$ and $\sigma_2$, that may change from line to line. {\sigma}_{\mbox{\rm eff}}ction{Increasing Variances: $\sigma_1^2<\sigma_2^2$} \label{sec_inc} In this section, we prove Theorem \ref{th_inc}. We begin in Subsection \ref{sec_fluc} with a result on the fluctuation of an inhomogeneous random walk. In the short Subsection \ref{sec_ldp} we provide large-deviations based heuristics for our results. While it is not used in the actual proof, it explains the leading term of the maximal displacement and gives hints about the derivation of the logarithmic correction term. The actual proof of Theorem \ref{th_inc} is provided in subsection \ref{sec_inc_proof}. \subsection{Fluctuation of an Inhomogeneous Random Walk}\label{sec_fluc} Let \begin{equation}\label{inRW} S_n=\sum_{i=1}^{n/2}X_i+\sum_{i=n/2+1}^{n}Y_i \end{equation} be an inhomogeneous random walk, where $X_i\sim N(0,\sigma_1^2)$, $Y_i\sim N(0,\sigma_2^2)$, and $X_i$ and $Y_i$ are independent. Define \begin{equation}\label{intPos} s_{k,n}(x)=\left\{ \begin{aligned} &\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}x, &\;\;0\leq k\leq \frac{n}{2},\\ &\frac{\sigma_1^2\frac{n}{2}+\sigma_2^2(k-\frac{n}{2})} {(\sigma_1^2+\sigma_2^2)\frac{n}{2}} x,& \;\; \frac{n}{2}\leq k\leq n, \end{aligned} \right. \end{equation} and \begin{equation}\label{bigFluc} f_{k,n}=\left\{\begin{aligned} &c_fk^{2/3},\;&\;k\leq n/2,\\ &c_f(n-k)^{2/3},\;& \; n/2<k\leq n.\end{aligned}\right. \end{equation} As the following lemma says, conditioned on $\{S_n=x\}$, the path of the walk $S_n$ follows $s_{k,n}(x)$ with fluctuation less than or equal to $f_{k,n}$ at level $k\leq n$. \begin{lemma}\label{lem_bigfluc} There exists a constant $C>0$ (independent of $n$) such that $$P(S_n(k)\in [s_{k,n}(S_n)-f_{k,n},s_{k,n}(S_n)+f_{k,n}]\;\text{ for all }\;0\leq k\leq n|S_n) \geq C,$$ where $S_n(k)$ is the sum of the first $k$ summands of $S_n$, i.e., $$S_n(k)=\left\{\begin{aligned} &\sum_{k=1}^{k}X_k,\;&\;k\leq n/2,\\ &\sum_{k=1}^{n/2}X_k+\sum_{k=n/2+1}^{k}Y_k,\;& \; n/2<k\leq n.\end{aligned}\right.$$ \end{lemma} \begin{proof} Let $\tilde{S}_{k,n}=S_n(k)-s_{k,n}(S_n)$. Then, similar to Brownian bridge, one can check that $\tilde{S}_{k,n}$ are independent of $S_n$. To see this, first note that the covariance between $\tilde{S}_{k,n}$ and $S_n$ is $$Cov(\tilde{S}_{k,n},S_n)=E\tilde{S}_{k,n}S_n-E\tilde{S}_{k,n}ES_n =E\tilde{S}_{k,n}S_n,$$ since $ES_n=0$ and $E\tilde{S}_{k,n}=0$. For $k\leq n/2$, $$\tilde{S}_{k,n}=\left(1-\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right) \sum_{i=1}^{k}X_i-\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=k+1}^{n/2}X_i-\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=n/2+1}^{n}Y_i.$$ Expand $\tilde{S}_{k,n}S_n$, take expectation, and then all terms vanish except for those containing $X_i^2$ and $Y_i^2$. Taking into account that $EX_i^2=\sigma_1^2$ and $EY_i^2=\sigma_2^2$, one has \begin{eqnarray}\label{cov} &&Cov(\tilde{S}_{k,n},S_n)=E\tilde{S}_{k,n}S_n\nonumber\\ &=&\left(1-\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right) \sum_{i=1}^{k}EX_i^2 -\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=k+1}^{n/2}EX_i^2 -\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=n/2+1}^{n}EY_i^2 \nonumber\\ &=&\left(1-\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right)k\sigma_1^2 -\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} (n/2-k)\sigma_1^2 -\frac{\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} (n/2)\sigma_2^2\nonumber\\ &=&0. \end{eqnarray} For $n/2< k\leq n$, one can calculate $Cov(\tilde{S}_{k,n},S_n)=0$ similarly as follows. First, $$\tilde{S}_{k,n}=\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=1}^{n/2}X_i+\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=n/2+1}^{k}Y_i- \left(1-\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right) \sum_{i=k+1}^{n}Y_i.$$ Then, expanding $\tilde{S}_{k,n}S_n$ and taking expectation, one has \begin{eqnarray*} &&Cov(\tilde{S}_{k,n},S_n)=E\tilde{S}_{k,n}S_n\nonumber\\ &=& \frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=1}^{n/2}EX_i^2+\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} \sum_{i=n/2+1}^{k}EY_i^2- \left(1-\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right) \sum_{i=k+1}^{n}EY_i^2\\ &=& \frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} (n/2)\sigma_1^2+\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}} (k-n/2)\sigma_2^2- \left(1-\frac{\sigma_2^2(n-k)}{(\sigma_1^2+\sigma_2^2)\frac{n}{2}}\right) (n-k)\sigma_2^2\\ &=&0 \end{eqnarray*} Therefore, $\tilde{S}_{k,n}$ are independent of $S_n$ since they are Gaussian. Using this independence, \begin{eqnarray*} &&P(S_n(k)\in [s_{k,n}(S_n)-f_{k,n},s_{k,n}(S_n)+f_{k,n}]\;\text{ for all }\;0\leq k\leq n|S_n)\\ &=&P(\tilde{S}_{k,n}\in [-f_{k,n},f_{k,n}]\;\text{ for all }\;0\leq k\leq n|S_n)\\ &=&P(\tilde{S}_{k,n}\in [-f_{k,n},f_{k,n}]\;\text{ for all }\;0\leq k\leq n). \end{eqnarray*} By calculation similar to \eqref{cov}, $\tilde{S}_{k,n}$ is a Gaussian sequence with mean zero and variance $k\sigma_1^2\frac{\left((\sigma_1^2+\sigma_2^2)n-2\sigma_1^2k\right)} {(\sigma_1^2+\sigma_2^2)n}$ for $k\leq n/2$ and $(n-k)\sigma_2^2 \frac{\left((\sigma_1^2+\sigma_2)n-2\sigma_2^2(n-k)\right)}{(\sigma_1^2+\sigma_2^2)n}$ for $n/2<k\leq n$. The above quantity is $$ 1-P(|\tilde{S}_{k,n}|>f_{k,n},\;\text{for some}\;0\leq k\leq n)\geq 1-\sum_{k=1}^{n}P(|\tilde{S}_{k,n}|>f_{k,n}).$$ Using a standard Gaussian estimate, e.g. \cite[Theorem 1.4]{Durrett05}, the above quantity is at least, $$1-\sum_{k=1}^{n} \frac{c_0}{\sqrt{k}}e^{-\frac{f_{k,n}^2}{k}c_1} \geq 1-2\sum_{k=1}^{\infty}\frac{c_0}{\sqrt{k}}e^{-c_f^2c_1k^{1/3}}:=C>0$$ where $c_0,c_1$ are constants depending on $\sigma_1$ and $\sigma_2$, and $C>0$ can be realized by choosing the constant $c_f$ large. This proves the lemma. \end{proof} \subsection{Sample Path Large Deviation Heuristics}\label{sec_ldp} We explain (without giving a proof) what we expect for the order $n$ term of $M_n{\uparrow}$, by giving a large deviation argument. The exact proof will be postponed to the next subsection. Consider the same $S_n$ as defined in \eqref{inRW} and a function $\phi(t)$ defined on $[0,1]$ with $\phi(0)=0$. A sample path large deviation result, see \cite[Theorem 5.1.2]{DemboZeitouni98_LDP}, tells us that the probability for $S_{\lfloor rn\rfloor}$ to be roughly $\phi(r)n$ for $0\leq r\leq s\leq 1$ is roughly $e^{-nI_s(\phi)}$, where \begin{equation}\label{rateFun} I_s(\phi)=\int_0^s\Lambda^*_r(\dot{\phi}(r))dr, \end{equation} $\dot{\phi}(r)=\frac{d}{dr}\phi(r)$, and $\Lambda^*_r(x)=\frac{x^2}{2\sigma_1^2}$, for $0\leq r\leq 1/2$, and $\frac{x^2}{2\sigma_1^2}$, for $1/2<r\leq 1$. A first moment argument would yield a necessary condition for a walk that roughly follows the path $\phi(r)n$ to exist among the branching random walks, \begin{equation}\label{LDPshort} I_s(\phi)\leq s\log 2,\;\;\mbox{ for all }\; 0\leq s\leq 1. \end{equation} This is equivalent to \begin{equation}\label{LDP} \left\{ \begin{aligned} &\int_0^s \frac{\dot{\phi}^2(r)}{2\sigma_1^2}dr\leq s\log 2, &\;\;0\leq s\leq \frac{1}{2},\\ &\int_0^{\frac{1}{2}}\frac{\dot{\phi}^2(r)}{2\sigma_1^2}dr+\int_{\frac{1}{2}}^s \frac{\dot{\phi}^2(r)}{2\sigma_2^2}dr\leq s\log 2,& \;\; \frac{1}{2}\leq s\leq 1. \end{aligned} \right. \end{equation} Otherwise, if \eqref{LDPshort} is violated for some $s_0$, i.e., $I_{s_0}(\phi)> s_0\log 2$, there will be no path following $\phi(r)n$ to $\phi(s_0)n$, since the expected number of such paths is $2^{sn}e^{-nI_s(\phi)}=e^{-(I_s(\phi)-s\log 2)n}$, which decreases exponentially. Our goal is then to maximize $\phi(1)$ under the constraints \eqref{LDP}. By Jensen's inequality and convexity, one can prove that it is equivalent to maximizing $\phi(1)$ subject to \begin{equation}\label{optimization} \frac{\phi^2(1/2)}{\sigma_1^2}\leq \frac{1}{2} \log 2,\; \frac{\phi^2(1/2)}{\sigma_1^2}+\frac{(\phi(1)-\phi(1/2))^2}{\sigma_2^2}\leq \log 2. \end{equation} Note that the above argument does not necessarily require $\sigma_1^2<\sigma_2^2$. Under the assumption that $\sigma_1^2<\sigma_2^2$, we can solve the optimization problem with the optimal curve \begin{equation}\label{inc_LDP_curve} \phi(s)=\left\{ \begin{aligned} &\frac{2\sigma_1^2\sqrt{\log 2}}{\sqrt{(\sigma_1^2+\sigma_2^2)}}s, &\;\;0\leq s\leq \frac{1}{2},\\ &\frac{2\sigma_1^2\sqrt{\log 2}}{\sqrt{(\sigma_1^2+\sigma_2^2)}}\frac{1}{2}+\frac{2\sigma_2^2\sqrt{\log 2}}{\sqrt{(\sigma_1^2+\sigma_2^2)}}(s-\frac{1}{2}),& \;\; \frac{1}{2}\leq s\leq 1. \end{aligned} \right. \end{equation} If we plot this optimal curve and the suboptimal curve leading to \eqref{subMax} as in Figure \ref{incFig}, it is easy to see that the ancestor at time $n/2$ of the actual maximum at time $n$ is not a maximum at time $n/2$, since $\frac{2\sigma_1^2\sqrt{\log 2}}{\sqrt{(\sigma_1^2+\sigma_2^2)}}<\sqrt{2\sigma_1^2\log 2}$. A further rigorous calculation as in the next subsection shows that, along the optimal curve \eqref{inc_LDP_curve}, the branching random walks have an exponential decay of correlation. Thus a fluctuation between $n^{1/2}$ and $n$ that is larger than the typical fluctuation of a random walk is admissible. This is consistent with the naive observation from Figure \ref{incFig}. This kind of behavior also occurs in the independent random walks model, explaining why $M_n^{\uparrow}$ and $M_n^{\mbox{ind}}$ have the same asymptotical expansion up to an $O(1)$ error, see \eqref{inc} and \eqref{ind}. \begin{figure} \caption{Dashed: maximum at time $n$ of BRW starting from maximum at time $n/2$.\newline \quad Solid: maximum at time $n$ of BRW starting from time $0$.} \label{incFig} \end{figure} \subsection{Proof of Theorem \ref{th_inc}}\label{sec_inc_proof} With Lemma \ref{lem_bigfluc} and the observation from Section \ref{sec_ldp}, we can now provide a proof of Theorem \ref{th_inc}, applying the first and second moments method to the appropriate sets. In the proof, we use $S_n$ to denote the walk defined by \eqref{inRW} and $S_k$ to denote the sum of the first $k$ summand in $S_n$. \begin{proof}[Proof of Theorem \ref{th_inc}] \quad {\it Upper bound.} Let $a_n=\sqrt{(\sigma_1^2+\sigma_2^2)\log 2}n-\frac{\sqrt{\sigma_1^2+\sigma_2^2}}{4\sqrt{\log 2}}\log n$. Let $N_{1,n}=\sum_{v\in\mathds{D}_n}1_{\{S_v>a_n+y\}}$ be the number of particles at time n whose displacements are greater than $a_n+y$. Then $$EN_{1,n}=2^nP(S_n\geq a_n+y)\leq c_2e^{-c_3y}$$ where $c_2$ and $c_3$ are constants independent of $n$ and the last inequality is due to the fact that $S_n\sim N(0,\frac{\sigma_1^2+\sigma_2^2}{2}n)$. So we have, by the Chebyshev's inequality, \begin{equation}\label{incUB} P(M_n^{\uparrow}>a_n+y)=P(N_1\geq 1)\leq EN_{1,n}\leq c_2e^{-c_3y}. \end{equation} Therefore, this probability can be made as small as we wish by choosing a large $y$. {\it Lower bound.} Consider the walks which are at $s_n\in I_n=[a_n,a_n+1]$ at time n and follow $s_{k,n}(s_n)$, defined by \eqref{intPos}, at intermediate times with fluctuation bounded by $f_{k,n}$, defined by \eqref{bigFluc}. Let $I_{k,n}(x)=[s_{k,n}(x)-f_{k,n},s_{k,n}(x)+f_{k,n}]$ be the `admissible' interval at time $k$ given $S_n=x$, and let $$N_{2,n}=\sum_{v\in\mathds{D}_n}1_{\{S_v\in I_n,S_{v^k}\in I_{k,n}(S_v) \mbox{ for all } 0\leq k\leq n\}}$$ be the number of such walks. By Lemma \ref{lem_bigfluc}, \begin{eqnarray}\label{incLBfirst} EN_{2,n}&=&2^nP(S_n\in I_n,S_n(k)\in I_{k,n}(S_n) \mbox{ for all } 0\leq k\leq n ) \nonumber\\ &=&2^nE(1_{\{S_n\in I_n\}}P(S_n(k)\in I_{k,n}(S_n) \mbox{ for all } 0\leq k\leq n |S_n))\nonumber \\ &\geq &2^nCP(S_n\in I_n)\geq c_4. \end{eqnarray} Next, we bound the second moment $EN_{2,n}^2$. By considering the location of any pair $v_1,v_2\in\mathds{D}_n$ of particles at time $n$ and at their common ancestor $v_1\wedge v_2$, we have \begin{eqnarray*} &&EN_{2,n}^2=E\sum_{v_1,v_2\in\mathds{D}_n}1_{\{S_{v_i}\in I_n,\;S_{(v_i)^j}\in I_{j,n}(S_{(v_i)^j}) \mbox{ for all } 0\leq j\leq n,i=1,2\}}\\ &=& \sum_{k=0}^n\sum_{\substack{v_1,v_2\in\mathds{D}_n\\ v_1\wedge v_2\in \mathds{D}_k}}E1_{\{S_{v_i}\in I_n,\;S_{(v_i)^j}\in I_{j,n}(S_{(v_i)^j}) \mbox{ for all } 0\leq j\leq n,i=1,2\}}\\ &\leq & \sum_{k=0}^n\sum_{\substack{v_1,v_2\in\mathds{D}_n\\ v_1\wedge v_2\in \mathds{D}_k}}P(S_{v_1}\in I_n,\;S_{(v_1)^j}\in I_{j,n}(S_{(v_1)^j}) \mbox{ for all } 0\leq j\leq n)\\ &&\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\cdot P(S_{v_2}-S_{v_1\wedge v_2}\in [x-s_{k,n}(x)-f_{k,n},x-s_{k,n}(x)+f_{k,n}],x\in I_n), \end{eqnarray*} where we use the independence between $S_{v_2}-S_{v_1\wedge v_2}$ and $S_{(v_1)^j}$ in the last inequality. And the last expression (double sum) in the above display is \begin{eqnarray*} && \sum_{k=0}^n2^{2n-k}P(S_n\in I_n,S_n(j)\in I_{j,n}(S_n) \mbox{ for all } 0\leq j\leq n )\\ &&\;\;\;\;\;\;\;\cdot P(S_n-S_n(k) \in[x-s_{k,n}(x)-f_{k,n},x-s_{k,n}(x)+f_{k,n}],x\in I_n)\\ &\leq& EN_{2,n}\sum_{k=0}^n2^{n-k}P(S_n-S_n(k) \in[x-s_{k,n}(x)-f_{k,n},x-s_{k,n}(x)+f_{k,n}],x\in I_n). \end{eqnarray*} The above probabilities can be estimated separately when $k\leq n/2$ and $n/2<k\leq n$. For $k\leq n/2$, $S_n-S_n(k)\sim N(0,\frac{n}{2}(\sigma_1^2+\sigma_2^2)-k\sigma_1^2)$. Thus, \begin{eqnarray*} && P(S_n-S_n(k) \in[x-s_{k,n}(x)-f_{k,n},x-s_{k,n}(x)+f_{k,n}],x\in I_n)\\ &\leq & 2f_{k,n}\frac{1}{\sqrt{\pi ((\sigma_1^2+\sigma_2^2)n-2k\sigma_1^2)}} \exp\left(-\frac{\left((1-\frac{2\sigma_1^2k}{(\sigma_1^2+\sigma_2^2)n})a_n-f_{k,n} \right)^2} {(\sigma_1^2+\sigma_2^2)n-2k\sigma_1^2}\right)\\ &\leq & 2^{-n+\frac{2\sigma_1^2}{\sigma_1^2+\sigma_2^2}k+o(k)}. \end{eqnarray*} For $n/2<k\leq n$, $S_n-S_n(k)\sim N(0,(n-k)\sigma_2^2)$. Thus, \begin{eqnarray*} && P(S_n-S_n(k) \in[x-s_{k,n}(x)-f_{k,n},x-s_{k,n}(x)+f_{k,n}],x\in I_n)\\ &\leq & 2f_{k,n}\frac{1}{\sqrt{2\pi(n-k)\sigma_2^2}} \exp\left(-\frac{\left(\frac{2\sigma_2^2(n-k)} {(\sigma_1^2+\sigma_2^2)n}a_n-f_{k,n}\right)^2} {2(n-k)\sigma_2^2}\right)\\ &\leq & 2^{-\frac{2\sigma_2^2}{\sigma_1^2+\sigma_2^2}(n-k)+o(n-k)}. \end{eqnarray*} Therefore, \begin{equation}\label{incLBsec} EN_{2,n}^2\leq EN_{2,n}\left(\sum_{k=0}^{n/2}2^{\frac{\sigma_1^2-\sigma_2^2}{\sigma_1^2+\sigma_2^2} k+o(k)}+\sum_{k=n/2+1}^n2^{\frac{\sigma_1^2-\sigma_2^2}{\sigma_1^2+\sigma_2^2}(n-k) +o(n-k)}\right)\leq c_5EN_{2,n}, \end{equation} where $c_5=2\sum_{k=0}^{\infty}2^{\frac{\sigma_1^2-\sigma_2^2}{\sigma_1^2+\sigma_2^2} k+o(k)}$. By the Cauchy-Schwartz inequality, \begin{equation}\label{incLB} P(M_n^{\uparrow}\geq a_n)\geq P(N_{2,n}>0)\geq \frac{(EN_{2,n})^2}{EN_{2,n}^2}\geq c_4/c_5>0. \end{equation} The upper bound \eqref{incUB} and lower bound \eqref{incLB} imply that there exists a large enough constant $y_0$ such that $$P(M_n^{\uparrow}\in [a_n,a_n+y_0])\geq \frac{c_4}{2c_5}>0.$$ Lemma \ref{lem_tight} tells us that the sequence $\{M_n^{\uparrow}-\text{Med}(M_n^{\uparrow})\}_n$ is tight, so $M_n^{\uparrow}=a_n+O(1)$ a.s.. That completes the proof. \end{proof} {\sigma}_{\mbox{\rm eff}}ction{Decreasing Variances: $\sigma_1^2>\sigma_2^2$}\label{sec_dec} We will again separate the proof of Theorem \ref{th_dec} into two parts, the lower bound and the upper bound. Fortunately, we can apply \eqref{eqvar} directly to get a lower bound so that we can avoid repeating the second moment argument. However, we do need to reproduce (the first moment argument) part of the proof of \eqref{eqvar} in order to get an upper bound. \subsection{An Estimate for Brownian Bridge} Toward this end, we need the following analog of Bramson \cite[Proposition 1']{Bramson78_BBM}. The original proof in Bramson's used the Gaussian density and reflection principle of continuous time Brownian motion, which also hold for the discrete time version. The proof extends without much effort to yield the following estimate for the Brownian bridge $B_k-\frac{k}{n}B_n$, where $B_n$ is a random walk with standard normal increments. \begin{lemma}\label{lem_bb} Let $$L(k)=\left\{\begin{array}{ll} 0 & \mbox{ if } s=0,n, \\ 100\log k & \mbox{ if } k=1,\dots,n/2,\\ 100\log (n-k) & \mbox{ if } k=n/2,\dots,n-1. \end{array}\right.$$ Then, there exists a constant $C$ such that, for all $y>0$, $$P(B_n-\frac{k}{n}B_n\leq L(k)+y \mbox{ for } 0\leq k \leq n)\leq \frac{C(1+y)^2}{n}.$$ \end{lemma} The coefficient $100$ before $\log$ is chosen large enough to be suitable for later use, and is not crucial in Lemma \ref{lem_bb}. \subsection{Proof of Theorem \ref{th_dec}} Before proving the theorem, we discuss the equivalent optimization problems \eqref{LDP} and \eqref{optimization} under our current setting $\sigma_1^2>\sigma_2^2$. It can be solved by employing the optimal curve \begin{equation}\label{dec_LDP_curve} \phi(s)=\left\{ \begin{aligned} &\sqrt{2\log 2}\sigma_1s, &\;\;0\leq s\leq \frac{1}{2},\\ &\sqrt{2\log 2}\sigma_1\frac{1}{2}+\sqrt{2\log 2}\sigma_2(s-\frac{1}{2}),& \;\; \frac{1}{2}\leq s\leq 1. \end{aligned} \right. \end{equation} If we plot the curve $\phi(s)$ and the suboptimal curve leading to \eqref{subMax} as in Figure \ref{decFig}, these two curves coincide with each other up to order $n$. Figure \ref{decFig} seems to indicate that the maximum at time $n$ for the branching random walk starting from time $0$ comes from the maximum at time $n/2$. As will be shown rigorously, if a particle at time $n/2$ is left significantly behind the maximum, its descendents will not be able to catch up by time $n$. The difference between Figure \ref{incFig} and Figure \ref{decFig} explains the difference in the logarithmic correction between $M_n^{\uparrow}$ and $M_n^{\downarrow}$. \begin{figure} \caption{Dash: both the optimal path to the maximum at time $n$ and the path leading to the maximum of BRW starting from the maximum at time $n/2$. Solid: the path to the maximal (rightmost) descendent of a particle at time $n/2$ that is significantly behind the maximum then.} \label{decFig} \end{figure} \begin{proof}[Proof of Theorem \ref{th_dec}] {\it Lower Bound.} For each $i=1,2$, the formula \eqref{eqvar} implies that there exist $y_i$ (possibly negative) such that, for branching random walk at time $n/2$ with variance $\sigma_i^2$, $$P\left(M_{n/2}>\sqrt{2\log 2}\sigma_i\frac{n}{2}-\frac{3\sigma_i}{2\sqrt{2\log 2}}\log \frac{n}{2}+y_i\right)\geq \frac{1}{2}.$$ By considering a branching random walk starting from a particle at time $n/2$, whose location is greater than $\sqrt{2\log 2}\sigma_1\frac{n}{2}-\frac{3\sigma_1}{2\sqrt{2\log 2}}\log \frac{n}{2}+y_1$, and applying the above display with $i=1$ and $2$,we know that \begin{equation}\label{decLB} P\left(M_n^{\downarrow}>\frac{\sqrt{2\log 2}(\sigma_1+\sigma_2)}{2}n-\frac{3(\sigma_1+\sigma_2)}{2\sqrt{2\log 2}}\log \frac{n}{2}+y_1+y_2\right)\geq \frac{1}{4}. \end{equation} {\it Upper Bound.} We will use a first moment argument to prove that there exists a constant $y$ (large enough) such that \begin{equation}\label{decUB} P\left(M_n^{\downarrow}>\frac{\sqrt{2\log 2}(\sigma_1+\sigma_2)}{2}n-\frac{3(\sigma_1+\sigma_2)}{2\sqrt{2\log 2}}\log \frac{n}{2}+y\right)<\frac{1}{10}. \end{equation} Similarly to the last argument in the proof of Theorem \ref{th_inc}, the upper bound \eqref{decUB} and the lower bound \eqref{decLB}, together with the tightness result from Lemma \ref{lem_tight}, prove Theorem \ref{th_dec}. So it remains to show \eqref{decUB}. Toward this end, we define a polygonal line (piecewise linear curve) leading to $\frac{\sqrt{2\log 2}(\sigma_1+\sigma_2)}{2}n-\frac{3(\sigma_1+\sigma_2)}{2\sqrt{2\log 2}}\log \frac{n}{2}$ as follows: for $1\leq k\leq n/2$, $$M(k)= \frac{k}{n/2}(\sqrt{2\log 2}\sigma_1\frac{n}{2} -\frac{3\sigma_1}{2\sqrt{2\log 2}}\log \frac{n}{2});$$ and for $n/2+1\leq k\leq n$, $$M(k)=M(n/2)+ \frac{k-n/2}{n/2}(\sqrt{2\log 2}\sigma_2\frac{n}{2} -\frac{3\sigma_2}{2\sqrt{2\log 2}}\log \frac{n}{2}).$$ Note that $\frac{k}{n}\log n\leq \log k$ for $k\leq n$. Also define $$f(k)=\left\{\begin{array}{ll} y & k=0,\frac{n}{2},n,\\ y+\frac{5\sigma_1}{2\sqrt{2\log 2}}\log k & 1\leq k\leq n/4, \\ y+\frac{5\sigma_1}{2\sqrt{2\log 2}}\log (\frac{n}{2}-k) & \frac{n}{4}\leq k\leq \frac{n}{2}-1, \\ y+\frac{5\sigma_2}{2\sqrt{2\log 2}}\log (k-\frac{n}{2}) & \frac{n}{2}+1\leq k\leq \frac{3n}{4},\\ y+\frac{5\sigma_2}{2\sqrt{2\log 2}}\log (n-k) & \frac{3n}{4}\leq k\leq n-1. \end{array}\right.$$ We will use $f(k)$ to denote the allowed offset (deviation) from $M(k)$ in the following argument. The probability on the left side of \eqref{decUB} is equal to $$P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y).$$ For each $v\in\mathds{D}_n$, we define $\tau_v=\inf\{k:S_{v^k}>M(k)+f(k)\}$; then \eqref{decUB} is implied by \begin{equation}\label{decUBsum} \sum_{k=1}^nP(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)<1/10. \end{equation} We will split the sum into four regimes: $[1,n/4]$, $[n/4,n/2]$, $[n/2,3n/4]$ and $[3n/4,n]$, corresponding to the four parts of the definition of $f(k)$. The sum over each regime, corresponding to the events in the four pictures in Figure \ref{figFour}, can be made small. The first two are the discrete analog of the upper bound argument in Bramson \cite{Bramson78_BBM}. We will present a complete proof for the first two cases, since the argument is not too long and the argument (not only the result) is used in the latter two cases. \begin{figure} \caption{Four small probability events. Dash line: $M(k)$. Solid curve: $M(k)+f(k)$.\newline Polygonal line: a random walk.} \label{figFour} \end{figure} (\romannumeral1). When $1\leq k \leq n/4$, we have, by the Chebyshev's inequality, \begin{eqnarray*} &&P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\\ &\leq & P(\exists v\in\mathds{D}_k,\mbox{ such that } S_v>M(k)+f(k))\leq E\sum_{v\in\mathds{D}_k}1_{\{S_v>M(k)+f(k)\}}. \end{eqnarray*} The above expectation is less than or equal to \begin{eqnarray}\label{endptEst} \frac{C2^k} {\sqrt{k}}e^{-\frac{(M(k)+f(k))^2}{2\sigma_1^2}} &\leq & \frac{C2^k}{\sqrt{k}}\exp\left(-\frac{\left(\sqrt{2\log 2}\sigma_1k +\frac{\sigma_1}{\sqrt{2\log 2}}\log k+y\right)^2}{2k\sigma_1^2}\right)\nonumber\\ &\leq & Ck^{-3/2} e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}. \end{eqnarray} Summing these upper bounds over $k\in[1,n/4]$, we obtain that \begin{equation}\label{quad1} \sum_{k=1}^{n/4}P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k) \leq Ce^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}\sum_{k=1}^{\infty} k^{-3/2}. \end{equation} The right side of the above inequality can be made as small as we wish, say at most $\frac{1}{100}$, by choosing $y$ large enough. (\romannumeral2). When $n/4\leq k\leq n/2$, we again have, by Chebyshev's inequality, \begin{eqnarray*} &&P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\\ &\leq & P(\exists v\in\mathds{D}_k,\mbox{ such that } S_v>M(k)+f(k), \mbox{ and } S_{v^i}\leq M(i)+f(i)\mbox{ for }1\leq i\leq k)\\ &\leq & E\sum_{v\in\mathds{D}_k}1_{\{S_v>M(k)+f(k), \mbox{ and } S_{v^i}\leq M(i)+f(i)\mbox{ for }1\leq i< k\}}. \end{eqnarray*} Letting $S_k$ be a copy of the random walks before time $n/2$, then the above expectation is equal to \begin{eqnarray}\label{quad2Prob} &&2^kP(S_k>M(k)+f(k), \mbox{ and } S_i\leq M(i)+f(i)\mbox{ for }1\leq i< k)\nonumber\\ &\leq & 2^kP(S_k>M(k)+f(k), \mbox{ and } \frac{1}{\sigma_1} (S_i-\frac{i}{k}S_k)\leq \frac{1}{\sigma_1}(f(i)-\frac{i}{k}f(k))\mbox{ for }1\leq i\leq k).\nonumber\\ && \end{eqnarray} $\frac{1}{\sigma_1}(S_i-\frac{i}{k}S_k)$ is a discrete Brownian bridge and is independent of $S_k$. Because of this independence, the above quantity is less than or equal to $$2^kP(S_k>M(k)+f(k))\cdot P( \frac{1}{\sigma_1}(S_i-\frac{i}{k}S_k)\leq \frac{1}{\sigma_1}(f(i)-\frac{i}{k}f(k))\mbox{ for }1\leq i< k).$$ The first probability can be estimated similarly to \eqref{endptEst}, \begin{eqnarray}\label{quad2est1} &&P(S_k>M(k)+f(k))\nonumber\\ &\leq & \frac{C}{\sqrt{k}}\exp\left(-\frac{\left(\sqrt{2\log 2}\sigma_1 k -\frac{3\sigma_1}{2\sqrt{2\log 2}}\log k+ \frac{5\sigma_1}{2\sqrt{2\log 2}}\log (\frac{n}{2}-k) +y\right)^2}{2k\sigma_1^2}\right)\nonumber\\ &\leq & C2^{-k}k(\frac{n}{2}-k)^{-5/2}e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}. \end{eqnarray} To estimate the second probability, we first estimate $\frac{1}{\sigma_1}(f(i)-\frac{i}{k}f(k))$. It is less than or equal to $\frac{1}{\sigma_1}f(i)=\frac{y}{\sigma_1}+\frac{5}{2\sqrt{2\log 2}}\log i$ for $i\leq k/2<n/4$, and, for $k/2\leq i< k$, it is less than or equal to \begin{eqnarray*} &&\frac{5}{2\sqrt{2\log 2}}\log(n/2-i)-\frac{i}{k}\frac{5}{2\sqrt{2\log 2}}\log(n/2-k)+\frac{y}{\sigma_1}(1-\frac{i}{k})\\ &=&\frac{5}{2\sqrt{2\log 2}}\left(\log(n/2-i)-\log(n/2-k)+\frac{k-i}{k}\log(n/2-k)\right)+\frac{y}{\sigma_1} (1-\frac{i}{k})\\ &\leq &\frac{5}{2\sqrt{2\log 2}}\left(\log(k-i)+\frac{k-i}{k}\log k\right)+\frac{y}{\sigma_1}\leq 100\log(k-i)+\frac{y}{\sigma_1}. \end{eqnarray*} Therefore, applying Lemma \ref{lem_bb}, we have \begin{eqnarray}\label{bbEst} &&P\left(\frac{1}{\sigma_1} (S_i-\frac{i}{k}S_k)\leq \frac{1}{\sigma_1}(f(i)-\frac{i}{k}f(k))\mbox{ for }1\leq i\leq k\right)\nonumber\\ &\leq & P\left(\frac{1}{\sigma_1} (S_i-\frac{i}{k}S_k)\leq 100\log i+\frac{y}{\sigma_1}\mbox{ for }1\leq i\leq k/2,\mbox{ and }\frac{1}{\sigma_1} (S_i-\frac{i}{k}S_k)\leq\right. \nonumber\\ &&\mbox{ } \left.100\log(k-i)+\frac{y}{\sigma_1} \mbox{ for } k/2\leq i\leq k\right)\leq C(1+y)^2/k, \end{eqnarray} where $C$ is independent of $n$, $k$ and $y$. By all the above estimates \eqref{quad2Prob}, \eqref{quad2est1} and \eqref{bbEst}, \begin{equation}\label{quad2} \sum_{k=n/4}^{n/2}P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k) \leq C(1+y)^2e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}\sum_{k=1}^{\infty} k^{-5/2}. \end{equation} This can again be made as small as we wish, say at most $\frac{1}{100}$, by choosing $y$ large enough. (\romannumeral3). When $n/2\leq k\leq 3n/4$, we have \begin{eqnarray*} &&P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\\ &\leq & P(\exists v\in\mathds{D}_k\mbox{ such that } S_v>M(k)+f(k) \mbox{ and } S_{v^i}\leq M(i)+f(i)\mbox{ for }1\leq i\leq n/2)\\ &\leq & E\sum_{v\in\mathds{D}_k}1_{\{S_v>M(k)+f(k), \mbox{ and } S_{v^i}\leq M(i)+f(i)\mbox{ for }1\leq i< n/2\}}. \end{eqnarray*} The above expectation is, by conditioning on $\{S_{v^{n/2}}=M(n)+x\}$, \begin{eqnarray}\label{quad3Int} &&2^k\int_{-\infty}^yP(S_{k-n/2}'>M(k)-M(n/2)+f(k)-x)\cdot\nonumber\\ &&\;\;\;\;\;\;\cdot P(S_i-\frac{i}{n/2}S_{n/2}\leq f(i)-\frac{i}{k}x\mbox{ for }1\leq i< n/2)\cdot\nonumber\\ && \;\;\;\;\;\;\cdot p_{S_{n/2}}(M(n/2)+x) dx, \end{eqnarray} where $S$ and $S'$ are two copies of the random walks before and after time $n/2$, respectively, and $p_{S_{n/2}}(x)$ is the density of $S_{n/2}\sim N(0,\frac{\sigma_1^2n}{2})$. We then estimate the three factors of the integrand separately. The first one, which is similar to \eqref{endptEst}, is bounded above by \begin{eqnarray*} &&P(S_{k-n/2}'>M(k)-M(n/2)+f(k)-x)\leq \frac{C}{\sqrt{k-n/2}} e^{-\frac{\left(M(k)-M(n/2)+f(k)-x\right)^2}{2(k-n/2)\sigma_2^2}}\\ &\leq & C 2^{-(k-n/2)}(k-\frac{n}{2})^{-3/2}e^{-\frac{\sqrt{2\log 2}}{\sigma_2}(y-x)}. \end{eqnarray*} The second one, which is similar to \eqref{bbEst}, is estimated using Lemma \ref{lem_bb}, \begin{equation}\label{bbEstHalfway} P(S_i-\frac{i}{n/2}S_{n/2}\leq f(i)-\frac{i}{k}x\mbox{ for }1\leq i< n/2)\leq C(1+2y-x)^2/n. \end{equation} The third one is simply the normal density \begin{equation}\label{endptEstHalfway} p_{S_{n/2}}(M(n/2)+x)=\frac{C}{\sqrt{n}}e^{-\frac{(M(n/2)+x)^2}{n\sigma_1^2}}\leq C 2^{-n/2}n e^{-\frac{\sqrt{2\log 2}}{\sigma_1}x}. \end{equation} Therefore, the integral term \eqref{quad3Int} is no more than $$C(k-n/2)^{-3/2}e^{-\frac{\sqrt{2\log 2}}{\sigma_2}y}\int_{-\infty}^{y}(1+2y-x)^2e^{(\frac{\sqrt{2\log 2}}{\sigma_2}-\frac{\sqrt{2\log 2}}{\sigma_1})x}dx,$$ which is less than or equal to $C(1+y)^2e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}(k-n/2)^{-3/2}$ since $\sigma_2<\sigma_1$. Summing these upper bounds together, we obtain that \begin{equation}\label{quad3} \sum_{k=n/2}^{3n/4}P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\leq C(1+y)^2e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}\sum_{k=1}^{\infty}k^{-3/2}. \end{equation} This can again be made as small as we wish, say at most $\frac{1}{100}$, by choosing $y$ large enough. (\romannumeral4). When $3n/4< k\leq n$, we have \begin{eqnarray*} &&P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\\ &\leq & P(\exists v\in\mathds{D}_k\mbox{ such that } S_v>M(k)+f(k), \mbox{ and } S_{v^i}\leq M(i)+f(i)\mbox{ for }1\leq i<k)\\ &\leq & E\sum_{v\in\mathds{D}_k}1_{\{S_v>M(k)+f(k), \mbox{ and } S_{v^i}\leq M(i)+f(i),\mbox{ for }1\leq i< k\}}. \end{eqnarray*} The above expectation is, by conditioning on $\{S_{v^{n/2}}=M(n)+x\}$, \begin{eqnarray*} &&2^k\int_{-\infty}^yP(S_{k-n/2}'>M(k)-M(n/2)+f(k)-x,\\ &&\;\;\;\;\;\;\;\;\;\;\;\;\;\;\;\; S_i'<M(i)-M(n/2)+f(i)-x, \mbox{ for }n/2<i\leq k)\\ &&\;\;\;\;\;\;\cdot P(S_i-\frac{i}{n/2}S_{n/2}\leq f(i)-\frac{i}{k}x\mbox{ for }1\leq i< n/2)\cdot p_{S_{n/2}}(M(n/2)+x) dx \end{eqnarray*} where $S$ and $S'$ are copies of the random walks before and after time $n/2$, respectively. The second and third probabilities in the integral are already estimated in \eqref{bbEstHalfway} and \eqref{endptEstHalfway}. It remains to bound the first probability. Similar to \eqref{quad2Prob}, it is bounded above by \begin{eqnarray*} &&P\left(S_{k-n/2}'>M(k)-M(n/2)+f(k)-x, S_i'<M(i)-M(n/2)+f(i)-x,\right.\\ &&\;\;\;\;\; \mbox{ for }n/2<i\leq k\Big)\leq C(1+2y-x)^2e^{-\frac{\sqrt{2\log 2}}{\sigma_2}(2y-x)}(n-k)^{-5/2}. \end{eqnarray*} With these estimates, we obtain in this case, in the same way as in (\romannumeral3), that \begin{equation}\label{quad4} \sum_{k=3n/4}^{n}P(\exists v\in\mathds{D}_n \mbox{ such that } S_v>M(n)+y,\tau_v=k)\leq C(1+y)^2e^{-\frac{\sqrt{2\log 2}}{\sigma_1}y}\sum_{k=1}^{\infty}k^{-5/2}. \end{equation} This can again be made as small as we wish, say at most $\frac{1}{100}$, by choosing $y$ large enough. Summing \eqref{quad1}, \eqref{quad2}, \eqref{quad3} and \eqref{quad4}, then \eqref{decUBsum} and thus \eqref{decUB} follow. This concludes the proof of Theorem \ref{th_dec}. \end{proof} {\sigma}_{\mbox{\rm eff}}ction{Further Remarks} We state several immediate generalization and open questions related to binary branching random walks in time inhomogeneous environments where the diffusivity of the particles takes more than two distinct values as a function of time and changes macroscopically. Results involving finitely many monotone variances can be obtained similarly to the results on two variances in the previous sections. Specifically, let $k\geq 2$ (constant) be the number of inhomogeneities, $\{\sigma_i^2>0:i=1,\dots,k\}$ be the set of variances and $\{t_i>0:i=1,\dots,k\}$, satisfying $\sum_{i=1}^{k}t_i=1$, denote the portions of time when $\sigma_i^2$ governs the diffusivity. Consider binary branching random walk up to time $n$, where the increments over the time interval $[\sum_{i=1}^{j-1}t_in,\sum_{i=1}^jt_in)$ are $N(0,\sigma_j^2)$ for $1\leq j\leq k$. When $\sigma_1^2<\sigma_2^2<\dots<\sigma_k^2$ are strictly increasing, by an argument similar to that in Section \ref{sec_inc}, the maximal displacement at time $n$, which behaves asymptotically like the maximum for independent random walks with effective variance $\sum_{i=1}^kt_i\sigma_i^2$, is $$\sqrt{2(\log 2)\sum_{i=1}^kt_i\sigma_i^2}n-\frac{1}{2}\frac{\sqrt{\sum_{i=1}^kt_i\sigma_i^2}} {\sqrt{2\log 2}}\log n+O_P(1).$$ When $\sigma_1^2>\sigma_2^2>\dots>\sigma_k^2$ are strictly decreasing, by an argument similar to that in Section \ref{sec_dec}, the maximal displacement at time $n$, which behaves like the sub-maximum chosen by the previous greedy strategy (see \eqref{subMax}), is $$\sqrt{2\log 2}(\sum_{i=1}^kt_i\sigma_i)n-\frac{3}{2}(\sum_{i=1}^k\frac{\sigma_i}{\sqrt{2\log 2}})\log n+O_P(1).$$ Results on other inhomogeneous environments are open and are subjects of further study. We only discuss some of the non rigorous intuition in the rest of this section. In the finitely many variances case, when $\{\sigma_i^2:i=1,\dots,k\}$ are not monotone in $i$, the analysis of maximal displacement could be case-by-case and a mixture of the previous monotone cases. The leading order term is surely a result of the optimization problem \eqref{LDPshort} from the large deviation. But, the second order term may depend on the fluctuation constraints of the path leading to the maximum, as in the monotone case. One could probably find hints on the fluctuation from the optimal curve solving \eqref{LDPshort}. In some segments, the path may behave like Brownian bridge (as in the decreasing variances case), and in some segments, the path may behave like a random walk (as in the increasing variances case). In the case where the number of different variances increases as the time $n$ increases, analysis seems more challenging. A special case is when the variances are decreasing, for example, at time $0\leq i\leq n$ the increment of the walk is $N(0,\sigma_{i,n}^2)$ with $\sigma_{i,n}^2=2-i/n$. The heuristics (from the finitely many decreasing variances case) seem to indicate that the path leading to the maximum at time $n$ cannot be left `significantly' behind the maxima at all intermediate levels. This path is a `rightmost' path. From the intuition of \cite{FangZeitouni10}, if the allowed fluctuation is of order $n^{\alpha}$ ($\alpha<1/2$), then the correction term is of order $n^{1-2\alpha}$, instead of $\log n$ in \eqref{uni_max}. However, the allowed fluctuation from the intermediate maxima, implicitly imposed by the variances, becomes complicated as the difference between the consecutive variances decreases to zero. A good understanding of this fluctuation may be a key to finding the correction term. \end{document}
\begin{document} \title{Singular perturbation analysis of a regularized MEMS model hanks{ unding{This work was funded by the Fonds zur F\"orderung der wissenschaftlichen Forschung (FWF) via the doctoral school ``Dissipation and Dispersion in Nonlinear PDEs'' (project number W1245).} \begin{abstract} Micro-Electro Mechanical Systems (MEMS) are defined as very small structures that combine electrical and mechanical components on a common substrate. Here, the electrostatic-elastic case is considered, where an elastic membrane is allowed to deflect above a ground plate under the action of an electric potential, whose strength is proportional to a parameter $\lambda$. Such devices are commonly described by a parabolic partial differential equation that contains a singular nonlinear source term. The singularity in that term corresponds to the so-called ``touchdown" phenomenon, where the membrane establishes contact with the ground plate. Touchdown is known to imply the non-existence of steady-state solutions and blow-up of solutions in finite time. We study a recently proposed extension of that canonical model, where such singularities are avoided due to the introduction of a regularizing term involving a small ``regularization" parameter $\varepsilon$. Methods from dynamical systems and geometric singular perturbation theory, in particular the desingularization technique known as ``blow-up", allow for a precise description of steady-state solutions of the regularized model, as well as for a detailed resolution of the resulting bifurcation diagram. The interplay between the two principal model parameters $\eps$ and $\lambda$ is emphasized; in particular, the focus is on the singular limit as both parameters tend to zero. \end{abstract} \begin{keywords} Micro-Electro Mechanical Systems, touchdown, boundary value problem, regularization, bifurcation diagram, saddle-node bifurcation, geometric singular perturbation theory, blow-up method \end{keywords} \begin{AMS} 34B16, 34C23, 34E05, 34E15, 34L30, 35K67, 74G10 \end{AMS} \section{Introduction}\label{sec:intro} Micro-Electro Mechanical Systems (MEMS) are very small structures that combine electrical and mechanical components on a common substrate to perform various tasks. In particular, electrostatic-elastic devices have found important applications in drug delivery~\cite{Ts07}, micro pumps~\cite{Iv08}, optics~\cite{Do04}, and micro-scale actuators~\cite{Wa09}. In these devices, an elastic membrane is allowed to deflect above a ground plate under the action of an electric potential $V$, where the distance between plate and membrane is typically much smaller than their diameter; see \cref{fig:eem}. When a critical voltage threshold $V^\ast$ (``\emph{pull-in voltage}'') is reached, a phenomenon called \emph{touchdown} or \emph{snap-through} can occur, {\it i.e.}, the membrane touches the ground plate, which may cause a short circuit. \begin{figure} \caption{Schematic representation of an electrostatic-elastic MEMS device. The elastic membrane deflects towards the ground plate when an electric potential $V$ is applied (dashed curve). If $V$ exceeds a critical value $V^*$ (the so-called ``pull-in voltage''), the membrane touches the ground plate, causing touchdown (dotted line).} \label{fig:eem} \end{figure} The physical forces acting between the elastic components of the device -- which can, {\it e.g.}, be of Casimir or Van der Waals type -- may lead to \emph{stiction}, which causes complications in reverting the process in order to return to the original state. In the canonical mathematical models proposed in the literature~\cite{GW05, LY07, Pe02, PB02}, such systems are described by partial differential equations involving the Laplacian or the bi-Laplacian and a singular source term. The touchdown phenomenon leads to non-existence of steady states, or blow-up of solutions in finite time, or both. Hence, no information on post-touchdown configurations can be captured by these models. Recently, an extension of the canonical model has been proposed, where the introduction of a potential mimicking the effect of a thin insulating layer above the ground plate prevents physical contact between the elastic membrane and the substrate \cite{Li14}. Mathematically, a nonlinear source term that depends on a small ``regularization'' parameter $\eps$ is added to the partial differential equation. The resulting regularized models have been studied in relevant work by Lindsay {\it et al.}; see {\it e.g.} \cite{Li14,Li15} for the membrane case, while the case where the elastic structure is modelled as a beam is discussed in \cite{LL12,Li14,Li15}. In one spatial dimension, the governing equations are given by \begin{align} \begin{split} u_t &=u_{xx}-\frac\lambda{(1+u)^2} + \frac{\lambda \eps^{m-2}}{(1+u)^{m}} \\ & \quad \text{for }x\in [-1,1],\text{ with }u=0\text{ when }x=\mp 1\quad (\text{membrane}) \\\label{LiLap} \end{split} \end{align} and \begin{align} \begin{split} u_t &=-u_{xxxx}-\frac\lambda{(1+u)^2} + \frac{\lambda \eps^{m-2}}{(1+u)^{m}} \\ & \quad \text{for }x\in [-1,1], \text{ with }u = \partial_n u = 0\text{ when }x=\mp 1\quad (\text{beam}), \label{LiBiLap} \end{split} \end{align} respectively. Physically speaking, the variable $u$ denotes the (dimensionless) deflection of the surface, while the parameter $\lambda$ is proportional to the square of the applied voltage $V$. The regularizing term $\lambda \eps^{m-2}(1+u)^{-m}$ with $\eps>0$ and $m>2$, as introduced in \cite{Li14}, accounts for various physical effects that are of particular relevance in the vicinity of the ground plate, {\it i.e.}, at $u=-1$; that term induces a potential which simulates the effect of an insulating layer whose non-dimensional width is proportional to $\eps$. In the following, we will consider $m=4$, which corresponds to a Casimir effect; alternative choices describe other physical phenomena and can be studied in a similar fashion. Here, we focus on steady-state solutions of the Laplacian case corresponding to a membrane; see \Cref{LiLap}: \begin{align}\label{eq-2} u_{xx}=\frac\lambda{(1+u)^2}\bigg[1-\frac{\eps^2}{(1+u)^2}\bigg]\qquad\text{ for }x\in[-1,1], \text{ with }u=0\text{ when }x=\mp1. \end{align} For literature on the bi-Laplacian case, \Cref{LiBiLap}, we refer to~\cite{Li14,Li16,Li15}. \begin{remark}\label{rem:even} Due to the symmetry of the boundary value problem \cref{eq-2} under the transformation $x\mapsto -x$, all solutions thereof must be even; the proof is straightforward, and is omitted here. \end{remark} Before addressing the novel features of the regularized model which are the focus of the present article, we briefly summarize the main properties of the non-regularized case corresponding to $\eps=0$ in \cref{eq-2}, which are well understood~\cite{Pe02,PB02}. The numerically computed bifurcation diagram associated to~\cref{eq-2} for $\eps=0$ is shown in \cref{fig:Lin0:a}; it contains two branches of steady-state solutions, where the lower branch is stable and the upper one is unstable. The upper branch limits on the $\Vert u \Vert_2^2$-axis in the point $B=\left( 0, \frac{2}{3} \right)$, which plays a crucial role in the bifurcation diagram of the regularized problem. The two branches are separated by a fold point that is located at $\lambda=\lambda^\ast$. For $\lambda > \lambda^\ast$, steady-state solutions of~\cref{LiLap} cease to exist, with the transient dynamics leading to a blow-up in finite time. Sample solutions along the two branches are plotted in \cref{fig:Lin0:b}; in addition, the piecewise linear singular solution corresponding to the point $B$ is shown. That singular solution undergoes touchdown at $x=0$. \begin{figure} \caption{(a) Bifurcation diagram of the membrane model, \Cref{eq-2} \label{fig:Lin0:a} \label{fig:Lin0:b} \label{fig:Lin0} \end{figure} The inclusion of the $\eps$-dependent regularizing term, where $0<\eps\ll 1$, considerably alters the structure of the bifurcation diagram in \cref{fig:Lin0:a}. The principal new feature is the emergence of a third branch of stable steady-state solutions, resulting in the $S$-shaped curve shown in \cref{fig:Lin:a}; that diagram was established numerically and via matched asymptotics in~\cite{Li14}. In addition to the fact that the fold point at $\lambda^\ast$ now depends on $\eps$, there exists another fold point at $\lambda_\ast$ -- which is also $\eps$-dependent -- such that, for $\lambda_\ast < \lambda < \lambda^\ast$, there are three branches of steady states, the middle one of which is unstable. Solutions on that newly emergent branch are in fact bounded below by $u=-1+\eps$. With increasing $\lambda$, solutions exhibit a growing ``flat'' portion close to $u=-1+\eps$; cf.~the solution labeled $d$ in~\cref{fig:Lin:b}. For $\lambda < \lambda_\ast$ and $\lambda > \lambda^\ast$, there exists a unique stable steady state; in particular, and in contrast to the non-regularized case, numerical simulations indicate that a stable steady state exists for every value of $\lambda>0$. \begin{figure} \caption{(a) Numerically computed bifurcation diagram of the one-dimensional membrane model, \Cref{eq-2} \label{fig:Lin:a} \label{fig:Lin:b} \label{fig:Lin} \end{figure} For very small values of $\eps$, the bifurcation diagram in \cref{fig:Lin:a} is difficult to resolve, even numerically. These difficulties are particularly prominent in the vicinity of the upper branch and the fold point at $\lambda_\ast(\eps)$; see, {\it e.g.},~\Cref{eq-normu} and \cref{rem-slope} for details. The highly singular nature of the bifurcation diagram in \cref{fig:Lin:a}, as well as the influence of the regularization parameter $\eps$ on the structure thereof, are the principal features of interest to us here. \\ In the present work, we will give a detailed geometric analysis of \Cref{eq-2} for small values of $\varepsilon$; in particular, we will prove that the (numerically computed) bifurcation diagram, as shown in \cref{fig:Lin:a}, is correct. Moreover, we will explain the underlying structure of that diagram. In summary, our main result can be expressed as follows: \begin{theorem}\label{thm-1} For $\eps\in(0,\eps_0)$, with $\eps_0 > 0$ sufficiently small, and \mbox{$\lambda\in[0,\Lambda]$}, with $\Lambda=\OO(1)$ positive and fixed, the bifurcation diagram for the boundary value problem \cref{eq-2} has the following properties: \begin{itemize} \item[(i)] In the $(\lambda, \Vert u \Vert_2^2)$-plane, the set of solutions to \cref{eq-2} corresponds to an $S$-shaped curve emanating from the origin. The curve consists of three branches -- lower, middle, and upper -- that are separated by two fold points which are located at $\lambda=\lambda_\ast(\varepsilon)$ and $\lambda=\lambda^\ast(\varepsilon)$. Specifically, there exists one steady-state solution to \cref{eq-2} for $\lambda<\lambda_\ast(\varepsilon)$ and $\lambda>\lambda^\ast(\varepsilon)$, while for $\lambda_\ast(\varepsilon)<\lambda< \lambda^\ast(\varepsilon)$, there exist three steady-state solutions. \item[(ii)] Along the lower and upper branches in \cref{fig:Lin:a}, $\Vert u \Vert_2^2$ is a strictly increasing function of $\lambda$, whereas $\Vert u \Vert_2^2$ is a decreasing function of $\lambda$ along the middle branch. \item[(iii)] The function $\lambda_\ast(\varepsilon)$ is $C^1$ in $\varepsilon$ and smooth as a function of $(\varepsilon, \ln \varepsilon)$, and admits the expansion \begin{align*} \lambda_\ast(\varepsilon) = \frac34\eps-\bigg(\sqrt{\frac32}+\frac98\bigg)\eps^2\ln\eps+ \OO(\eps^2). \end{align*} Moreover, $\lambda^\ast(\varepsilon)$ is smooth in $\varepsilon$ and admits the expansion \begin{align*} \lambda^\ast (\varepsilon) = \lambda_0^\ast + \lambda_1^\ast \varepsilon^2 + \mathcal{O}(\varepsilon^4), \end{align*} with appropriately chosen coefficients $\lambda_0^\ast$ and $\lambda_1^\ast$. \item[(iv)] Outside of a fixed neighborhood of the point $B$, the lower and middle branches in \cref{fig:Lin:a} are smooth perturbations of the non-regularized bifurcation curve illustrated in \cref{fig:Lin0:a}, while the upper branch has the following expansion: \begin{equation} \label{eq-normu} \Vert u \Vert_2^2 = 2 \bigg(1-\frac{\sqrt3}{3}\sqrt{\frac{\eps}{\lambda}} - 2 \eps + \OO(\eps^{\frac32} \ln\eps)\bigg). \end{equation} \end{itemize} \end{theorem} The detailed asymptotic resolution of the bifurcation diagram associated to the boundary value problem~\cref{eq-2}, carried out in the proof of~\Cref{thm-1}, is accomplished through separate investigation of three distinct, yet overlapping, regions in the diagram, both in the singular limit of $\eps=0$ and for $\eps$ positive and sufficiently small. To that end, we first reformulate \cref{eq-2} in a dynamical systems framework; then, identification of two principal parameters in the resulting equations yields a two-parameter singular perturbation problem. Careful asymptotic analysis of that problem will allow us to identify the corresponding limiting solutions, and to show how the third branch in the diagram found for non-zero $\eps$ emerges from the singular limit of $\eps=0$. On that basis, we will prove the existence and uniqueness of solutions close to these limiting solutions. While the three regions in the diagram share some common features, they need to be investigated separately for the structure of the diagram to be fully resolved. Our analysis is based on a variety of dynamical systems techniques and, principally, on geometric singular perturbation theory \cite{Fe79,GSPT,Kue} and the blow-up method, or ``geometric desingularization''~\cite{Du93,DR96,KS01}. In particular, a combination of these techniques will allow us to perform a detailed study of the saddle-node bifurcation at the fold point at $\lambda_\ast$, and to obtain an asymptotic expansion (in $\eps$) for $\lambda_\ast(\eps)$. While such an expansion has been derived by Lindsay via the method of matched asymptotic expansions~\cite{Li14}, cf.~Figure~12 therein, as well as our \cref{fig:Lin:a}, the leading-order coefficients in that expansion are calculated explicitly here. In the process, it is shown that the occurrence of logarithmic switchback terms in the steady-state asymptotics for \Cref{eq-2}, which has also been observed via asymptotic matching in~\cite{Li14}, is due to a resonance phenomenon in one of the coordinate charts after blow-up~\cite{Po05, PS041, PS042, SS04}; cf.~\cref{sec:logsw}. Without loss of generality, we fix $\Lambda=1$ in~\Cref{thm-1}. The proof of \cref{thm-1} follows from a combination of \cref{prop-1,prop-2,prop-3} below; each of these pertains to one of the three above-mentioned regions in the bifurcation diagram. The article is structured as follows: in \Cref{sec:dynfor}, we reformulate the boundary value problem \cref{eq-2} as a dynamical system. In \Cref{sec:blup}, we introduce the principal blow-up transformation on which our analysis of the dynamics of \cref{eq-2} close to touchdown is based. In \Cref{sec:bifdiag}, we describe in detail the structure of the bifurcation diagram in \cref{fig:Lin:a} by investigating separately three main regions therein, as illustrated in \cref{fig:bdsegm} below. Finally, in \Cref{sec:diou}, we discuss our findings, and we present an outlook to future research. \section{Dynamical Systems Formulation}\label{sec:dynfor} For our analysis, we reformulate \Cref{eq-2} as a boundary value problem for a corresponding first-order system by introducing the new variable $w=u'$; here, it is useful to keep in mind that $w$ represents the slope of the solution $u$ to \Cref{eq-2}. Moreover, we append the trivial dynamics of both the spatial variable $x$, which we relabel as $\xi$, and the regularizing parameter $\eps$, to the resulting system: \begin{subequations}\label{eq-3} \begin{align} u' &=w, \\ w' &=\frac\lambda{(1+u)^2}\bigg[1-\frac{\eps^2}{(1+u)^2}\bigg], \\ \xi' &=1, \\ \eps' &=0; \end{align} \end{subequations} here, the prime denotes differentiation with respect to $x$. Next, we multiply the right-hand sides in \Cref{eq-3} with a factor of $(1+u)^4$, which allows us to desingularize the flow near the touchdown singularity at $u=-1$\footnote{That desingularization corresponds to a transformation of the independent variable which leaves the phase portrait of \cref{eq-3} unchanged for $u>-1$, since the factor $(1+u)^4$ is positive throughout then.}. Finally, we define a shift in $u$ via \begin{equation} \label{eq-shiftu} \tilde u=1+u, \end{equation} which translates that singularity to $\tilde u=0$. Omitting the tilde and denoting differentiation with respect to the new independent variable by a prime, as before, we obtain the system \begin{subequations}\label{eq-4} \begin{align} u' &=u^4w, \label{eq-4a} \\ w' &=\lambda(u^2-\eps^2), \label{eq-4b} \\ \xi' &=u^4, \label{eq-4c} \\ \eps' &=0 \end{align} \end{subequations} in $(u,w,\xi,\eps)$-space, with parameter $\lambda$ and subject to the boundary conditions \begin{align}\label{eq-5} u=1\qquad\text{for }\xi=\mp1. \end{align} Since $\eps$ is small, it seems natural to attempt a perturbative construction of solutions to the boundary value problem \{\cref{eq-4},\cref{eq-5}\}, which turns out to be non-trivial in spite of the apparent simplicity of the governing equations. For $\eps=0$, \Cref{eq-4} can be solved explicitly and admits degenerate equilibria at $u=0$, which corresponds to the touchdown singularity at $u=-1$ in the original model, \Cref{eq-2}. We denote the resulting manifold of equilibria for \cref{eq-4} as \begin{align}\label{S0} \SS^0=\set{(0,w,\xi,0)}{w\in\mathbb{R},\ \xi\in\mathbb{R}}. \end{align} \begin{figure} \caption{Projection of the singular flow of \Cref{eq-4} \label{fig:SysNTT:a} \end{figure} One complication is introduced by the fact that, for $\lambda \neq 0$, the singular flow of \cref{eq-4} in $(u,w)$-space that is obtained for $\eps=0$ is not transverse to $\SS^0$; cf.~\cref{fig:SysNTT:a}. As transversality is a necessary requirement of geometric singular perturbation theory~\cite{Fe79, Kue}, we need to find a way to remedy the lack thereof. For $\lambda=0$ in~\cref{eq-4}, the singular flow becomes even more degenerate; see \cref{fig:SysNTT:b}. Furthermore, the set \begin{align}\label{M0} \MM^0:=\set{(u,0,\xi,0)}{u\in\mathbb{R}^+,\ \xi\in\mathbb{R}} \end{align} now also represents a manifold of equilibria for \Cref{eq-4a,eq-4b}. \begin{figure} \caption{Singular flow of \Cref{eq-4} \label{fig:SysNTT:b} \end{figure} As it turns out, it is beneficial to introduce the following rescaling of $w$ first: \begin{align}\label{eq-6} w=\frac{\tilde w}{\delta}, \end{align} where \begin{align}\label{delta} \delta=\sqrt{\frac{\eps}{\lambda}} \end{align} is a new, non-negative parameter. \begin{remark} The scaling of $w$ by $\sqrt{\lambda}$ in~\cref{eq-6} shifts $\lambda$ from~\cref{eq-4b} to \cref{eq-4c}, the $\xi$-equation, after a rescaling of time. The scaling with $\eps^{-\frac12}$ in~ \cref{eq-6} reflects the fact that, for $\lambda=\OO(1)$, $w=\OO(\eps^{-\frac12})$, in agreement with numerical simulations and asymptotic analysis performed in \cite{Li14}. \end{remark} \begin{remark} Some parts of our analysis are conveniently carried out in the parameters $\eps$ and $\lambda$, while others are naturally described in terms of $\eps$ and $\delta$. Hence, we will alternate between these two descriptions, as needed. \end{remark} Substituting \cref{eq-6} into \cref{eq-4}, multiplying the right-hand sides in the resulting equations with a factor of $\delta$, omitting the tilde and retaining the prime for differentiation with respect to the new independent variable, as before, we find \begin{subequations}\label{eq-7} \begin{align} u' &=u^4w, \label{eq-7a} \\ w' &=\eps(u^2-\eps^2), \label{eq-7b} \\ \xi' &=\delta u^4, \label{eq-7c} \\ \eps' &= 0, \end{align} \end{subequations} still subject to the boundary conditions \begin{align}\label{eq-5b} u=1\qquad\text{for }\xi=\mp1. \end{align} We remark that the fast-slow structure of \Cref{eq-7} is very simple, since \Cref{eq-7a,eq-7b} decouple from \Cref{eq-7c}; the latter induces a slow drift in $\xi$. \Cref{eq-7,eq-5b} will form the basis for the subsequent analysis. Two strategies suggest themselves for constructing solutions to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}. The first such strategy involves two sets of boundary conditions, corresponding to suitable intervals of $w$-values that are defined at $\xi=-1$ and $\xi=1$, respectively. Flowing these two sets of boundary conditions forward and backward, respectively, we verify the transversality of the intersection of the two resulting manifolds at $\xi=0$. Each initial $w$-value $w_0$ for which these two manifolds intersect gives a solution to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}. In particular, that strategy will be used to prove \cref{prop-1}. Since all solutions to \{\cref{eq-7},\cref{eq-5b}\} are even, by \cref{rem:even}, another possible strategy consists of considering \Cref{eq-7} on the $\xi$-interval $[-1,0]$, with boundary conditions $u(-1)=1$ and $w(0)=0$. The set of initial conditions at $\xi=-1$ and $u=1$, but with arbitrary initial $w$-value $w_0$, is then tracked forward to the hyperplane $\{w=0\}$. The resulting manifold is naturally parametrized by $u(w,\eps,\delta,w_0)$ and $\xi(w,\eps,\delta,w_0)$; the unique ``correct'' value $w_0(\eps,\delta)$ corresponding to a solution to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} is obtained by solving $\xi(w_0,\eps,\delta)=0$ under the constraint that $w(w_0,\eps,\delta)=0$. Details will be presented in the individual proofs below, in particular in those of \cref{prop-2} and \cref{prop-3}. Given \cref{rem:even}, any solution can be obtained via that second strategy; in fact, the intrinsic symmetry of the problem is also clearly visible in \cref{fig:Lin:b}. \Cref{eq-7} constitutes a two-parameter fast-slow system in its fast formulation. The small parameter $\eps$ represents the principal singular perturbation parameter here, while the limit of $\delta\to 0$ is also singular. For $\delta=\OO(1)$, the variables $u$ and $\xi$ are fast, while $w$ is slow; however, for $\delta$ small, the variable $\xi$ is slow, as well. The manifold $\SS^0$ defined in \cref{S0} is still invariant under the flow of \cref{eq-7}. Furthermore, for $\delta=0$, the manifold $\MM^0$ defined in~\cref{M0} also represents a set of equilibria for~\cref{eq-7}. (We remark that the same scenario occurs for $\lambda=0$ in~\cref{eq-4}.) Setting $\eps=0$ in \Cref{eq-7}, we obtain the so-called {\it layer problem} \begin{subequations}\label{eq-113} \begin{align} u' &=u^4w, \\ w' &=0, \\ \xi' &=\delta u^4, \\ \eps' &= 0; \end{align} \end{subequations} see \cref{fig:SysNTT:b} for an illustration of the corresponding phase portrait in~$(u,w)$-space and, in particular, of the transversality of orbits of the layer problem to $\SS^0$. Rescaling the independent variable in \cref{eq-7} by multiplying it with $\eps$ yields the slow formulation \begin{subequations}\label{eq-110} \begin{align} \eps\dot u &=u^4w, \\ \dot w &=u^2-\eps^2, \\ \eps\dot\xi &=\delta u^4, \\ \dot\eps &= 0. \end{align} \end{subequations} The {\it reduced} problem, which is found by taking $\eps\to 0$ in \cref{eq-110}, reads \begin{subequations}\label{eq-111} \begin{align} 0 &=u^4w, \\ \dot w &=u^2, \\ 0 &=\delta u^4, \\ \dot\eps &= 0. \end{align} \end{subequations} For $\delta=0$, the manifolds $\SS^0$ and $\MM^0$, as defined in~\cref{S0} and \cref{M0}, respectively, now represent two branches of the {\it critical manifold} for \Cref{eq-7}; however, neither branch is normally hyperbolic, as the Jacobian of the linearization of the layer flow about both $\SS^0$ and $\MM^0$ is nilpotent. Moreover, as is obvious from \cref{eq-111}, the reduced flow on $\SS^0$ vanishes, and is hence highly degenerate. Therefore, standard geometric theory does not apply directly. The underlying non-hyperbolicity can be remedied by means of the blow-up method~ \cite{Du93,DR96,KS01,KS01b}. A blow-up with respect to $\eps$ will allow us to describe the dynamics of \cref{eq-4} in a neighborhood of the manifold $\SS^0$; cf.~\Cref{sec:blup}. Our analysis relies on a number of dynamical systems techniques, such as classical geometric singular perturbation theory \cite{Fe79}, normal form transformations \cite{Wi03}, and the Exchange Lemma \cite{JKK96, JK94, Kue}, the combination of which will result in precise and rigorous asymptotics for \Cref{eq-7}. To determine the appropriate blow-up transformation, we focus on the $(u,w)$-subsystem \{\cref{eq-7a},\cref{eq-7b}\}, which for $\eps>0$ admits two saddle equilibria at $(\pm\eps,0)$. As we restrict to $u\ge 0$, we consider the positive equilibrium only. The scaling $u=\eps\hat u$ transforms \{\cref{eq-7a},\cref{eq-7b}\} into \begin{align*} \hat u' &= \eps^3 \hat u^4 w, \\ w' &= \eps^3(\hat u^2-1), \end{align*} which yields the integrable system \begin{subequations} \label{xy} \begin{align} \hat u' &= \hat u^4 w, \\ w' &= \hat u^2-1 \end{align} \end{subequations} after division through the common factor $\eps^3$. The saddle equilibrium at $(1,0)$, together with its stable and unstable manifolds, will play a crucial role in the following; the line $\hat u=0$ is invariant, with $w$ decreasing thereon. The corresponding phase portrait is shown in \cref{fig:saddle10}. \begin{figure} \caption{The saddle point $(1,0)$ of \Cref{xy} \label{fig:saddle10} \end{figure} \section{Geometric Desingularization (``Blow-Up")}\label{sec:blup} In this section, we introduce the blow-up transformation that will allow us to desingularize the flow of \Cref{eq-7} near the non-hyperbolic manifold $\SS^0$. The discussion at the end of \Cref{sec:dynfor} suggests the following blow-up: \begin{align}\label{eq-8} u=\bar r\bar u,\quad w=\bar w,\quad\xi=\bar \xi,\quad\text{and}\quad\eps=\bar r\bar\eps, \end{align} where $(\bar w, \bar \xi) \in \mathbb{R}^2$ and $(\bar u, \bar \eps) \in S^1$, {\it i.e.}, $\bar u^2+\bar\eps^2=1$. Moreover, $\bar{r} \in [0,r_0)$, with $r_0>0$. We note that the equilibrium at $(u,\eps)=(0,0)$ is blown up to the circle $\{\bar{r} = 0\}$; here, we emphasize that we do not blow up the variables $w$ and $\xi$. The vector field that is induced by \cref{eq-7} on the cylindrical manifold in $(\bar u,\bar w,\bar\xi,\bar\eps,\bar r)$-space is best described in coordinate charts. We require two charts here, $K_1$ and $K_2$, which are defined by $\bar u=1$ and $\bar\eps=1$, respectively: \begin{subequations}\label{eq-9} \begin{align} K_1:\ & (u,w,\xi,\eps)=(r_1,w_1,\xi_1,r_1\eps_1), \label{eq-9a} \\ K_2:\ & (u,w,\xi,\eps)=(r_2u_2,w_2,\xi_2,r_2). \label{eq-9b} \end{align} \end{subequations} \begin{remark} \label{rem:K1K2} The phase-directional chart $K_1$ describes the ``outer" regime, which corresponds to the transient dynamics from $u=1$ to $u=0$, while the rescaling chart $K_2$ -- also known as the \emph{scaling chart} -- covers the ``inner" regime where $u \approx 0$, in the context of \Cref{eq-7}; in particular, in chart $K_2$, we recover \Cref{xy}. \end{remark} \begin{figure} \caption{Flow of \Cref{eq-7} \label{fig:blowup} \end{figure} The change of coordinates between charts $K_1$ and $K_2$, which we denote by $\kappa_{12}$, can be written as \begin{align}\label{eq-33} \kappa_{12}:\ (u_2,w_2,\xi_2,r_2)=\big(\eps_1^{-1},w_1,\xi_1,r_1\eps_1\big), \end{align} while its inverse $\kappa_{21}$ is given by \begin{align}\label{eq-34} \kappa_{21}:\ (r_1,w_1,\xi_1,\eps_1)=\big(r_2u_2,w_2,\xi_2,u_2^{-1}\big). \end{align} To obtain the governing equations in $K_1$, we substitute the transformation from \cref{eq-9a} into \Cref{eq-7}; a straightforward calculation yields \begin{subequations}\label{eq-10} \begin{align} r_1' &=r_1^4w_1, \\ w_1' &=r_1^3\eps_1(1-\eps_1^2), \\ \xi_1' &=\delta r_1^4, \\ \eps_1' &=-r_1^3\eps_1w_1. \end{align} \end{subequations} Since $\eps=r_1 \eps_1$, the singular limit of $\eps=0$ corresponds to the restriction of the flow of \cref{eq-10} to one of the invariant planes $\{r_1=0\}$ or $\{\eps_1=0\}$. In order to obtain a non-vanishing vector field for $r_1=0$, we desingularize \Cref{eq-10} by dividing out a factor of $r_1^3$ from the right-hand sides, which again represents a rescaling of the corresponding independent variable: \begin{subequations}\label{eq-11} \begin{align} r_1' &=r_1w_1, \label{eq-11a} \\ w_1' &=\eps_1(1-\eps_1^2), \label{eq-11b} \\ \xi_1' &=\delta r_1, \label{eq-11c} \\ \eps_1' &=-\eps_1w_1. \label{eq-11d} \end{align} \end{subequations} The governing equations in $K_2$ are obtained by substituting the transformation in \cref{eq-9b} into \cref{eq-7}, which gives \begin{subequations}\label{eq-12} \begin{align} u_2' &=r_2^3u_2^4w_2, \\ w_2' &=r_2^3(u_2^2-1), \\ \xi_2' &=\delta r_2^4u_2^4. \\ r_2' &=0. \end{align} \end{subequations} Desingularizing as before, by dividing out a factor of $r_2^3$ from the right-hand sides in \cref{eq-12}, we find \begin{subequations}\label{eq-13} \begin{align} u_2' &=u_2^4w_2, \label{eq-13a} \\ w_2' &=u_2^2-1, \label{eq-13b} \\ \xi_2' &=\delta r_2u_2^4, \label{eq-13c} \\ r_2' &=0. \end{align} \end{subequations} Here, we remark that, by construction, the $(u_2,w_2)$-subsystem \{\cref{eq-13a},\cref{eq-13b}\} corresponds to \Cref{xy}. Finally, we define various sections for the blown-up vector field, which will be used throughout the following analysis: in $K_1$, we will require the entry and exit sections \begin{subequations}\label{eq-20} \begin{align} \Sigma_1^{\rm in} &:=\set{(\rho,w_1,\xi_1,\eps_1)}{w_1\in[w_-,w_+],\ \xi_1\in[\xi_-,\xi_+],\text{ and }\eps_1\in[0,\sigma]}\quad\text{and} \label{eq-20a} \\ \Sigma_1^{\rm out} &:=\set{(r_1,w_1,\xi_1,\sigma)}{r_1\in[0,\rho],\ w_1\in[w_-,w_+],\text{ and }\xi_1\in[\xi_-,\xi_+]}, \label{eq-20b} \end{align} \end{subequations} respectively, where $0<\rho<1$ and $0<\sigma<1$ are appropriately defined constants, while $w_\mp$ and $\xi_\mp$ are real constants, with $w_-<-\frac2{\sqrt3}$ and $w_+>\frac2{\sqrt3}$. Similarly, in chart $K_2$, we will employ the section \begin{align}\label{eq-21} \Sigma_2^{\rm in}:=\set{(\sigma^{-1},w_2,\xi_2,r_2)}{w_2\in[w_-,w_+],\ \xi_2\in[\xi_-,\xi_+],\text{ and }r_2\in[0,\rho\sigma]}; \end{align} here, we note that $\Sigma_2^{\rm in} = \kappa_{21}\big(\Sigma_1^{\rm out}\big)$. \Cref{eq-11,eq-13} will allow us to construct solutions of~\{\cref{eq-7},\cref{eq-5b}\}. Following the strategy outlined in~\Cref{sec:dynfor}, we will focus our attention on the $\xi$-interval $[-1,0]$ with boundary conditions $u(-1)=1$ and $w(0)=0$; in particular, and as indicated in \Cref{rem:K1K2}, the ``outer" regime will be realized in terms of the flow between the sections $\Sigma_1^{\rm in}$ and $\Sigma_1^{\rm out}$ in chart $K_1$. Translating the resulting asymptotics into chart $K_2$ via the transformation in \Cref{eq-33}, we will then construct solutions in the ``inner" regime between the section $\Sigma_2^{\rm in}$ and the hyperplane corresponding to $\{w=0\}$. \begin{remark} \label{not} In the following, we will denote a given general variable $z$ in blown-up space with $\bar{z}$. In charts $K_i$, $i=1,2$ that variable will instead be labeled with the corresponding subscript, as $z_i$. \end{remark} \section{Analysis of Bifurcation Diagram -- Proof of \cref{thm-1}} \label{sec:bifdiag} In this section, we establish the bifurcation diagram in \cref{fig:Lin:a} for $\eps$ positive and sufficiently small, proving \cref{thm-1}. To that end, we investigate the existence and uniqueness of solutions to \Cref{eq-7}, subject to the boundary conditions in \cref{eq-5b}. All such solutions arise as perturbations of certain limiting solutions that are obtained in the limit of $\eps=0$. We denote these limiting solutions as singular solutions, as is usual in geometric singular perturbation theory. The approach adopted thereby is the following: first, singular solutions are constructed by analyzing the dynamics in charts $K_1$ and $K_2$ separately in the limit as $\eps\to0$. Then, the persistence of singular solutions for non-zero $\eps$ is shown via the shooting argument outlined in \Cref{sec:dynfor}, which relies on the transversality of the geometric objects involved. That transversality translates into the existence of solutions to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} along the branches depicted in the bifurcation diagram in \cref{fig:Lin:a}. \begin{definition}\label{def:soltyp} \hspace{-.2cm}We distinguish three types of singular solutions to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}; see \cref{fig:soltyp}: \begin{description} \item[\emph{Type I.}] Solutions of type I satisfy $u=0$ for $x \in I$, where $I$ is an interval centered at $x=0$. Consequently, the slope of such solutions must initially satisfy $|w|>1$, in terms of the original $w$-variable. Type I-solutions, which will henceforth be illustrated in blue, occur in two subtypes: the ones corresponding to $\lambda=\OO(\eps)$ have constant finite slope $w$ outside of $I$, while the ones corresponding to $\lambda=\OO(1)$ vanish on $I=(-1,1)$. \item[\emph{Type II.}] Solutions of type II are those of slope $w\equiv\mp1$, in terms of the original $w$-variable. These solutions exhibit ``touchdown'', reaching $\{u=0\}$ at one point only, namely at $\xi=0$. Type II-solutions will be indicated in green in all subsequent figures. \item[\emph{Type III.}] Solutions of type III never reach $\{u=0\}$; hence, no touchdown phenomena occur. These solutions correspond to solutions of the non-regularized model, with $\eps=0$ in \Cref{eq-2} \cite{Pe02, PB02}. \end{description} \end{definition} \begin{remark} \label{rem:soltyp} The usage of the plural in the definition of type II-solutions requires additional clarification. For \Cref{eq-4}, there exists just one singular solution of type II for $\lambda=0$ with slope $w=\mp 1$; see the solution labeled~$d$ in Figure~\cref{fig:Lin0}. However, in our blow-up analysis, that singular solution corresponds to a one-parameter family of type II-solutions. \end{remark} \begin{figure} \caption{Singular solutions to \Cref{eq-7} \label{fig:soltypI} \label{fig:soltypII} \label{fig:soltyp} \end{figure} For $\eps>0$, we divide the bifurcation diagram in \cref{fig:Lin:a} into three overlapping regions, as shown in \cref{fig:bdsegm}. \begin{remark}\label{normu} Henceforth, we will refer to the norm $\Vert u\Vert_2^2$ in terms of the original variable $u$ in order to be able to compare our analysis with that in \cite{Li14}; see \cref{fig:Lin0,fig:Lin}. \end{remark} Region $\mathcal{R}_1$ is defined as \begin{align}\label{eq:R1} \mathcal{R}_1:=[0,1]\times\bigg[\frac23+\nu_1,2\bigg],\qquad\text{with }\nu_1 >0; \end{align} that region covers the upper part of the bifurcation diagram, where we find the newly emergent branch of solutions for $\eps>0$ in \cref{eq-2} by perturbing from singular solutions of type I. Region $\mathcal{R}_2$, which is defined as \begin{align}\label{eq:R2} \mathcal{R}_2:=[0,\eps\lambda_2]\times\bigg[\frac23-\nu_2,\frac23+\nu_2\bigg],\qquad \text{with }\lambda_2>0\text{ and }\nu_2>0 \end{align} for $\nu_2>\nu_1$ and $\lambda_2$ large, but fixed, represents a small neighborhood of the point $B$ that is depicted as a rectangle in \cref{fig:bdsegm}. That region shrinks with decreasing $\eps$, collapsing to the segment $\{0\}\times\big[\frac23-\nu_2,\frac23+\nu_2\big]$ as $\eps \to 0$. The branch of solutions contained in this ``transition'' region is constructed by perturbation from singular solutions of types I and II. Finally, region $\mathcal{R}_3$ is defined as \begin{align}\label{eq:R3} \mathcal{R}_3:=[0,1]\times\bigg[0,\frac23+\nu_2\bigg]\setminus[0,\eps\lambda_3] \times\bigg[\frac23-\nu_3,\frac23+\nu_2\bigg],\qquad\text{with }\lambda_3>0\text{ and }\nu_3 >0, \end{align} where $\nu_3<\nu_2$ and $\lambda_3$ is again large, but fixed, with $\lambda_3 < \lambda_2$. Region $\mathcal{R}_3$ covers the lower part of the bifurcation diagram in \cref{fig:Lin:a}, and contains the branch of solutions which is obtained by perturbing from solutions of types II and III. \begin{figure} \caption{Covering of the bifurcation diagram for the boundary value problem \{\cref{eq-7} \label{fig:bdsegm} \end{figure} The true meaning of these regions becomes clearer when we consider a blow-up of the bifurcation diagram in parameter space, {\it i.e.}, with respect to $\lambda$ and $\eps$, as illustrated in \cref{fig:bdblup}. (That same point of view will also prove useful in parts of the following analysis.) We first embed the diagram, which depends on $(\lambda, \Vert u \Vert_2^2)$, into $\mathbb{R}^3$ by including the third variable $\eps$. Then, we blow up the line $\{(0,0)\} \times \mathbb{R}$ by introducing $\bar{r}, \bar{\lambda}$, and $\bar{\eps}$ such that \begin{align*} \lambda=\bar{r}\bar{\lambda}\qquad\text{and}\qquad\eps=\bar{r}\bar{\eps} \end{align*} with $\bar{\lambda}^2+\bar{\eps}^2=1$, {\it i.e.}, for $(\bar{\lambda},\bar{\eps})\in S^1$, and $\bar{r} \in [0, r_0)$, where $r_0>0$. In the blown-up space $S^1\times\mathbb{R}^2$, the line $\{(0,0)\}\times\mathbb{R}$ is hence blown up to a cylinder $S^1\times\{0\}\times\mathbb{R}$. After blow-up, the curve of singular solutions obtained for $\eps=0$ consists of three portions which correspond to singular solutions of types I, II, and III, cf. \cref{fig:soltyp}, and which are shown in blue, green, and black, respectively. The black curve (type III) is located in $\bar{\eps}=0$, while the green curve (type II) lies on the cylinder, {\it i.e.}, in $\{\bar r=0\}$, with $\Vert u \Vert_2^2=\frac23$ constant. Finally, the blue curve (type I) consists of a branch on the cylinder, corresponding to $\lambda = \OO(\eps)$, and of another branch in the plane $\{\bar{\eps}=0\}$ that corresponds to $\lambda=\OO(1)$. In the former case, type I-solutions resemble the one shown in the left panel of \cref{fig:soltypI}; in the second case, type I-solutions are as in the right panel of \cref{fig:soltypI}. These two branches correspond to $\mathcal{B}_1$ and $\mathcal{B}_2$, respectively, as defined in \cref{fig:bdsegm}. Loosely speaking, in blown-up space, a neighborhood of the green curve is hence covered by region $\mathcal{R}_2$ and part of $\mathcal{R}_3$. The blue curve is mostly covered by region $\mathcal{R}_1$, with a small portion close to $\delta=\frac2{\sqrt3}$ covered by $\mathcal{R}_2$. Finally, region $\mathcal{R}_3$ covers the remainder of the green curve close to $\delta=0$, and the black curve. The curve obtained for $0<\eps\ll 1$, which is depicted in red in \cref{fig:bdblup}, lifts off from the singular curve corresponding to the limit of $\eps = 0$. \begin{remark} When referring to regions $\mathcal{R}_i$, $i=1,2,3$, in blown-up space, we need to consider the preimages of $\mathcal{R}_i\times [0,\eps_0]$ under the blow-up transformation defined above, strictly speaking. However, for the sake of simplicity, we will use the two notations interchangeably. \end{remark} \begin{figure} \caption{Bifurcation diagram for the boundary value problem \{\cref{eq-7} \label{fig:bdblup} \end{figure} As stated in \cref{thm-1}, we consider $\lambda \in [0,\Lambda]$, where we take $\Lambda=1$ for the sake of simplicity. In region $\mathcal{R}_3$, away from the point $B$, the perturbation with $\eps$ is regular. As will be shown below, singular solutions in regions $\mathcal{R}_1$ and $\mathcal{R}_2$ exist only for $\lambda\geq\frac34 \eps$ or, equivalently, for $\delta\leq\frac2{\sqrt3}$; cf.~\cref{ssec:reg1,ssec:reg2}. Hence, in these regions, we need to take $\lambda\in\big[\frac34\eps,1\big]$, {\it i.e.}, \begin{align} \label{delrange} \delta\in\bigg[\sqrt{\eps},\frac2{\sqrt3}\bigg], \end{align} which corresponds to the region shaded in gray in \cref{fig:deleps}. \begin{figure} \caption{Region in $(\eps,\delta)$-space, as considered in our analysis. The region, which is shaded in gray, is bounded from below by $\{\delta=\sqrt{\eps} \label{fig:deleps} \end{figure} As evidenced in \cref{fig:deleps}, $\delta=0$ occurs only when $\eps=0$, which is the point represented by the blue dot therein. The corresponding, highly degenerate limit gives a singular orbit of type I with very singular structure, as shown in the right panel in \cref{fig:soltypI}. Hence, the whole line $\mathcal{B}_2$ in the bifurcation diagram for $\eps=0$ shown in \cref{fig:bdsegm} corresponds to that one singular solution. \subsection{Region $\mathcal{R}_1$}\label[subsection]{ssec:reg1} Region $\mathcal{R}_1$ in the bifurcation diagram in \cref{fig:bdsegm} corresponds to solutions that reduce to those of type I in the singular limit; cf.~\cref{def:soltyp}. For $\eps$ positive and sufficiently small, solutions on that branch come very close to $\{u=\eps\}$; moreover, the length of the interval $I$ where $u\approx\eps$ grows with $\lambda$. In the singular limit of $\eps = 0$, the slope of the respective solutions is moderate for $\lambda=\OO(\eps)$, corresponding to $0<\delta<\frac{2}{\sqrt3}$, while it tends to infinity for $\lambda=\OO(1)$ -- {\it i.e.}, as $\delta\to 0$ -- along the two segments where $u$ changes from $u = 0$ to $u=1$. These observations are confirmed by the rescaling of $w$ in \cref{eq-6}: for $\lambda=\OO(\eps)$, that rescaling translates into $w=\OO(1)$, while it gives $w\to\infty$ for $\lambda=\OO(1)$; cf.~\cref{fig:soltypI}. Interestingly, the proof of our main result in this section, which is stated below, is very similar for these two $\lambda$-regimes: \begin{proposition}\label{prop-1} Given $\delta_1$ fixed, with $0 <\delta_1<\frac2{\sqrt3}$ and $\delta_1\approx\frac2{\sqrt3}$, there exists $\eps_0>0$ sufficiently small such that in region $\mathcal{R}_1$, the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} has a unique branch of solutions for $\eps\in(0,\eps_0)$ and $\lambda\in\big[\frac\eps{\delta_1^2},1\big]$. As $\eps\to 0$, these solutions limit on a singular solution $\Gamma$ of type I. \end{proposition} \begin{remark} The singular solution $\Gamma$ depends on $\lambda$ or, equivalently, on $\delta$. Interpreted in terms of $\delta$, the range for which singular solutions exist corresponds to $\delta\in\big[\sqrt\eps,\delta_1\big]$; recall~\cref{delta}. \end{remark} To prove \cref{prop-1}, we construct solutions corresponding to the branch that is contained in region $\mathcal{R}_1$ for fixed $\lambda$ in the regime considered here. For $\delta$ fixed, a unique singular orbit $\Gamma$ is determined in blown-up phase space by investigating the dynamics of the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} separately in charts $K_1$ and $K_2$, and by then combining the results obtained in these charts. Finally, the singular orbit $\Gamma$, which is essentially determined by the dynamics in chart $K_2$, is shown to persist for $\eps$ positive and sufficiently small. \subsubsection{Dynamics in chart $K_2$} The flow of \Cref{eq-7} from the section $\Sigma_2^{\rm in}$ back to itself, whereby the sign of $w$ changes from negative to positive, is naturally described in chart $K_2$; cf.~\cref{fig:singorb}. Recalling that $r_2=\eps$, we observe that \Cref{eq-13} constitutes a fast-slow system in the standard form of geometric singular perturbation theory \cite{Fe79, GSPT, Kue}, with $(u_2,w_2)$ the fast variables and $\xi_2$ the slow variable. The fast system is given by \cref{eq-13}, whence the corresponding slow system is obtained by a rescaling of the independent variable with $r_2$: \begin{subequations}\label{eq-47} \begin{align} r_2\dot{u}_2 &=u_2^4w_2, \\ r_2\dot{w}_2 &=u_2^2-1, \\ \dot{\xi}_2 &=\delta u_2^4, \\ \dot{r}_2 &=0. \end{align} \end{subequations} The associated layer and reduced problems, which are obtained by setting $r_2=0$ in~\cref{eq-13} and~\cref{eq-47}, respectively, read \begin{subequations}\label{eq-35} \begin{align} u_2' &=u_2^4w_2, \label{eq-35a}\\ w_2' &=u_2^2-1, \label{eq-35b}\\ \xi_2' &=0, \\ r_2' &=0 \end{align} \end{subequations} and \begin{subequations}\label{eq-36} \begin{align} 0 &=u_2^4w_2, \\ 0 &=u_2^2-1, \\ \dot{\xi}_2 &=\delta u_2^4, \label{eq-36c} \\ \dot{r}_2 &=0, \end{align} \end{subequations} respectively. (We note that the $(u_2,w_2)$-subsystem \{\cref{eq-35a},\cref{eq-35b}\} is precisely equal to \Cref{xy}.) The critical manifold for \Cref{eq-36} is given by the line \begin{align}\label{eq-24} \SS_2^0:=\set{(1,0,\xi_2,0)}{\xi_2\in[\xi_-,\xi_+]}, \end{align} where the constants $\xi_\mp$ are defined as before. \begin{remark} While steady states are also found for $u_2=-1$ in \cref{eq-13}, these states are irrelevant, since $u_2$ and $r_2$ are both non-negative and since $\{u_2=0\}$ is an invariant hyperplane for \cref{eq-13} which the flow cannot cross. \end{remark} Linearization of \cref{eq-35} about the critical manifold $\SS_2^0$ shows that any point $Q_2=(1,0,\xi_2,0)\in\SS_2^0$ is a saddle, with Jacobian \begin{align*} \Bigg[\begin{array}{cc} 4u_2^3w_2 & u_2^4 \\ 2u_2 & 0 \end{array}\Bigg]\Bigg|_{(u_2,w_2)=(1,0)}=\Bigg[\begin{array}{cc} 0 & 1 \\ 2 & 0 \end{array}\Bigg] \end{align*} and eigenvalues $\pm\sqrt{2}$. Hence, the manifold $\SS_2^0$ is normally hyperbolic. The reduced flow thereon is described by $\dot{\xi}_2=\delta$, which corresponds to a constant drift in the positive \mbox{$u_2$-direction} with speed $\delta$. To describe the integrable layer flow away from $\SS_2^0$, we introduce $u_2$ as the independent variable, dividing \cref{eq-35b} formally by \cref{eq-35a}: \begin{align*} \frac{{\rm d} w_2}{{\rm d} u_2}=\frac{u_2^2-1}{u_2^4w_2(u_2)}. \end{align*} Solving the above equation with $w_2(1)=0$, we find \begin{align}\label{eq-27} w_2^\mp(u_2)=\mp\sqrt{\frac43-\frac2{u_2}+\frac2{3u_2^3}}. \end{align} In particular, it follows from \cref{eq-27} that, for any fixed choice of $\xi_2$, the stable and unstable manifolds of $Q_2$ can be written as graphs over $u_2$: \begin{subequations}\label{eq-28} \begin{align} \WW_2^{\rm s}(Q_2) &=\set{(u_2,w_2^-(u_2),\xi_2,0)}{u_2\in[1,\infty)}, \\ \WW_2^{\rm u}(Q_2) &=\set{(u_2,w_2^+(u_2),\xi_2,0)}{u_2\in[1,\infty)}. \end{align} \end{subequations} We have the following result. \begin{lemma}\label{lem-1} Let $r_2\in(0,r_0)$, with $r_0$ positive and sufficiently small. Then, the following statements hold for \Cref{eq-47}: \begin{enumerate} \item The normally hyperbolic critical manifold $\SS_2^0$ perturbs to a slow manifold \begin{align*} \SS_2^{r_2}=\set{(1,0,\xi_2,r_2)}{\xi_2\in[\xi_-,\xi_+]}, \end{align*} where $\xi_\mp$ are appropriately chosen constants. In particular, we emphasize that $(u_2,w_2)=(1,0)\in\SS_2^{r_2}$. \item The corresponding stable and unstable foliations $\FF_2^{\rm s}(\SS_2^{r_2})$ and $\FF_2^{\rm u}(\SS_2^{r_2})$ are identical to $\FF_2^{\rm s}(\SS_2^0)$ and $\FF_2^{\rm u}(\SS_2^0)$, except for their constant $r_2$-component. For $r_2\in[0,r_0)$ fixed, these foliations may be written as \begin{subequations}\label{eq-29} \begin{align} \FF_2^{\rm s}(\SS_2^{r_2}) &=\set{(u_2,w_2^-(u_2),\xi_2,r_2)}{u_2\in[1,\infty),\ \xi_2\in[\xi_-,\xi_+]}\quad\text{and} \label{eq-29a} \\ \FF_2^{\rm u}(\SS_2^{r_2}) &=\set{(u_2,w_2^+(u_2),\xi_2,r_2)}{u_2\in[1,\infty),\ \xi_2\in[\xi_-,\xi_+]}. \label{eq-29b} \end{align} \end{subequations} \end{enumerate} \end{lemma} \begin{proof} Both statements follow immediately from standard geometric singular perturbation theory~\cite{Fe79}, in combination with the preceding analysis; in particular, the fact that the plane $\{(u_2,w_2)=(1,0)\}$ is invariant for \Cref{eq-13} irrespective of the choice of $r_2$ implies that the restrictions of $\SS_2^{r_2}$ and $\SS_2^0$ to $(u_2,w_2,\xi_2)$-space do not depend on $r_2$. \end{proof} \begin{remark} The fast-slow structure of \Cref{eq-13} is very simple, since the $(u_2,w_2)$-subsystem \{\cref{eq-13a},\cref{eq-13b}\} decouples from \Cref{eq-13c}. Even for $\eps>0$, the fast dynamics is determined by that integrable planar system, and organized by the saddle point at $(1,0)$ and the stable and unstable manifolds thereof. The slow flow on the slow manifold $\SS_2^{r_2}$ is just the drift given by $\dot{\xi}=\delta$. \end{remark} In the limit as $u_2 \to \infty$, $w_2^\mp(u_2)$ converges to $w_2^\mp(\infty)=\mp \frac2{\sqrt3}$; recall~\cref{eq-27}. Transforming the stable manifold $\WW_2^{\rm s}(Q_2)$ and the unstable manifold $\WW_2^{\rm u}(Q_2)$ to chart $K_1$, via the coordinate change $\kappa_{21}$ defined in~\cref{eq-34}, we see that these manifolds limit on the points $\big(0,\mp\frac2{\sqrt3},\xi_1,0\big)$, respectively, for $\xi_1$ fixed; see \cref{fig:singorb}. \subsubsection{Dynamics in chart $K_1$} The portions of the singular orbit $\Gamma$ corresponding to the flow between two sets of boundary conditions that are located at $\xi=\mp1$ and the section $\Sigma_1^{\rm out}$ are studied in chart $K_1$. A simple calculation reveals that \Cref{eq-11} admits a line of steady states at \begin{align}\label{eq-14} \SS_1^0:=\set{(0,0,\xi_1,1)}{\xi_1\in[\xi_-,\xi_+]}, \end{align} as well as the plane of steady states \begin{align}\label{eq-15} \pi_1:=\set{(0,w_1,\xi_1,0)}{w_1\in[w_-,w_+]\text{ and }\xi_1\in[\xi_-,\xi_+]}; \end{align} here, $w_\mp$ and $\xi_\mp$ are defined as in \cref{eq-20}. (Another set of equilibria, with $\eps_1=-1$, is irrelevant to us due to our assumption that $r_1$ and $\eps_1$ are both non-negative.) The line $\SS_1^0$ corresponds to the saddle equilibrium at $(\hat u,w)=(1,0)$ of \Cref{xy}, and coincides with the critical manifold $\SS_2^0$ introduced in chart $K_2$; cf.~\Cref{eq-24}. In chart $K_1$, the singular limit of $\eps=0$ corresponds to either $r_1=0$ or $\eps_1=0$ in \Cref{eq-11}, which yields the following two limiting systems in the corresponding invariant hyperplanes: \begin{subequations}\label{eq-16} \begin{align} r_1' &=0, \\ w_1' &=\eps_1(1-\eps_1^2), \label{eq-16b} \\ \xi_1' &= 0, \\ \eps_1' &=-\eps_1w_1 \label{eq-16d} \end{align} \end{subequations} and \begin{subequations}\label{eq-17} \begin{align} r_1' &=r_1w_1, \label{eq-17a} \\ w_1' &=0, \\ \xi_1' &=\delta r_1, \label{eq-17c} \\ \eps_1' &=0, \end{align} \end{subequations} respectively. \Cref{eq-16} is equivalent to \Cref{eq-35} in chart $K_2$ under the coordinate change $\kappa_{21}$ defined in~\cref{eq-34}; these equations describe the portion of the singular orbit $\Gamma$ in chart $K_1$ that is located between $\Sigma_1^{\rm out}$ and the hyperplane $\{\eps_1=0\}$. \Cref{eq-17}, on the other hand, determines the portion of the singular orbit which connects the hyperplane $\{r_1=0\}$ with the boundary conditions imposed at $r_1=1$. Hence, we first focus our attention on that limiting system. The value of $w_1$ in \Cref{eq-17} is constant: $w_1\equiv w_0$, for some constant $w_0$. Since $w_0$ must match the $w_2$-value obtained in the limit $u_2\to\infty$ in~\cref{eq-27} in chart $K_2$, see \cref{fig:singorb}, $w_1\equiv\mp\frac{2}{\sqrt3}$ must hold in the hyperplane $\{\eps_1=0\}$. The corresponding orbits of~\cref{eq-17} are then easily found by dividing \cref{eq-17c} formally by \cref{eq-17a}: $\frac{{\rm d}\xi_1}{{\rm d}r_1}=\frac{\delta}{w_0}$. For any initial condition $\xi_1(1)=\xi_0$, the solution to that equation reads \begin{align}\label{eq-44} \xi_1(r_1)=\frac{\delta}{w_0}(r_1-1)+\xi_0. \end{align} The boundary conditions in~\cref{eq-5b} imply $\xi_0=\mp1$; hence, and since $w_0=\mp\frac{2}{\sqrt3}$, we obtain \begin{align}\label{eq-44b} \xi_1^\mp(r_1)=\mp\frac{\sqrt3}2\delta(r_1-1)\mp1. \end{align} Any orbit of \cref{eq-17} can then be written as \begin{align}\label{eq-45} \bigg\{\bigg(r_1,\mp \frac2{\sqrt3},\xi_1^\mp(r_1),0\bigg)\, \bigg|\, r_1\in[0,1]\bigg\}. \end{align} Orbits of the integrable \Cref{eq-16} can be found by introducing $\eps_1$ as the independent variable: dividing \cref{eq-16b} formally by \cref{eq-16d}, we obtain \mbox{$\frac{{\rm d}w_1}{{\rm d}\eps_1}=-\frac{1-\eps_1^2}{w_1(\eps_1)}$}, which can be solved explicitly with $w_1(0)=\mp \frac2{\sqrt3}$ to yield \begin{align}\label{eq-19} w_1^\mp(\eps_1)=\mp\sqrt{\frac43-2\eps_1+\frac23\eps_1^3}, \end{align} where the sign in \cref{eq-19} equals that of the initial $w_1$-value. (We remark that \cref{eq-19} corresponds to \Cref{eq-27}, after transformation to $K_1$-coordinates.) The corresponding values of $\xi_1$ are constant, and must equal the respective values of $\xi_1^\mp(r_1)$ in~\cref{eq-44b} at $r_1=0$, {\it i.e.}, \begin{equation} \xi_1^\mp(0)= \pm \frac{\sqrt3}2 \delta \mp 1. \end{equation} \begin{remark} \label{rem:delta} For $\delta=\frac2{\sqrt3}$, it follows that $\xi_1^\mp(0)=0$, {\it i.e.}, we obtain a singular orbit of type II; see \cref{fig:soltypII,fig:3lam}. Hence, we must assume $\delta<\frac2{\sqrt3}$ in the statement of \cref{prop-1}. \end{remark} Any orbit of \cref{eq-16} can thus be represented as \begin{align}\label{eq-32} \set{(0,w_1^\mp(\eps_1),\xi_1^\mp(0),\eps_1)}{\eps_1\in[0,\sigma]}, \end{align} where $\sigma$ is as in the definition of the section $\Sigma_1^{\rm out}$; recall~\cref{eq-20}. Concatenation of the two orbit segments defined in \Cref{eq-45,eq-32} with the respective signs will yield the singular orbits $\Gamma_1^-$ and $\Gamma_1^+$, which are located between the sections $\mathcal{V}_{1_0}^-$ and $\Sigma_1^{\rm out}$ and $\Sigma_1^{\rm out}$ and $\mathcal{V}_{1_0}^+$, respectively. Here, \begin{align}\label{eq-60bis} \VV_{1_0}^\mp:=\set{(1,w,\mp1,0)}{w\in I^\mp}, \end{align} with $I^\mp$ being appropriately defined neighborhoods of the points $w_0^-=-\frac2{\sqrt3}$ and \mbox{$w_0^+=\frac2{\sqrt3}$}, respectively; see~\cref{fig:singorb}. \subsubsection{Singular orbit $\Gamma$}\label{sec:soO1} A singular orbit $\Gamma$ for \Cref{eq-7} can now be constructed on the basis of the dynamics in charts $K_1$ and $K_2$, by taking into account the corresponding boundary conditions in \Cref{eq-5b}. After transformation to $K_1$, the manifolds $\WW_2^{\rm s}(Q_2)$ and $\WW_2^{\rm u}(Q_2)$ meet the portions of the orbits $\Gamma_1^-$ and $\Gamma_1^+$, respectively, as given by~\cref{eq-32}, in the points \begin{align}\label{eq:p1mp} P_1^\mp=\bigg(0,\mp\frac2{\sqrt3},\pm\frac{\sqrt3}{2}\delta\mp1,0\bigg). \end{align} These points are contained in the two lines \begin{subequations} \begin{align} \ell_1^-&=\set{(0,-\tfrac2{\sqrt3},\xi_1,0)}{\xi_1\in[\xi_-,\xi_+]}\quad\text{and} \\ \ell_1^+&=\set{(0,\tfrac2{\sqrt3},\xi_1,0)}{\xi_1\in[\xi_-,\xi_+]}, \end{align} \end{subequations} respectively, in the hyperplane $\{\eps_1=0\}$, which are both located in the plane of steady states $\pi_1$; cf.~\cref{eq-15}. The portions of the singular orbit $\Gamma$ that lie in chart $K_1$ can hence finally be written as \begin{subequations}\label{eq-31} \begin{align} \begin{split} \Gamma_1^- &=\set{(r_1,-\tfrac2{\sqrt3},-\tfrac{\sqrt3}{2} \delta (r_1-1)-1,0)}{r_1\in(0,1]}\cup P_1^- \\ & \cup\set{(0,-\sqrt{\tfrac43-2\eps_1+\tfrac23\eps_1^3},\tfrac{\sqrt3}{2} \delta-1,\eps_1)}{\eps_1\in(0,\sigma]}\quad\text{and} \end{split} \label{eq-31a} \\ \begin{split} \Gamma_1^+ &=\set{(r_1,\tfrac2{\sqrt3},\tfrac{\sqrt3}{2} \delta (r_1-1)+1,0)}{r_1\in(0,1]}\cup P_1^+ \\ & \cup\set{(0,\sqrt{\tfrac43-2\eps_1+\tfrac23\eps_1^3},-\tfrac{\sqrt3}{2} \delta +1,\eps_1)}{\eps_1\in(0,\sigma]}. \end{split} \label{eq-31b} \end{align} \end{subequations} It remains to identify the portion of $\Gamma$ that is located in chart $K_2$; we denote the corresponding singular orbit by $\Gamma_2$. We note that, for $r_2=0$, \Cref{eq-35} implies $\xi_2\equiv {\rm constant}$ on $\Gamma_2$. Given the definition of $\Gamma_1^\mp$ and the fact that $\xi_2=\xi_1$, we define the points \begin{align}\label{Q2} Q_2^\mp=\bigg(1,0,\pm \frac{\sqrt3}{2} \delta \mp1,0\bigg)\in\SS_2^0; \end{align} therefore, we may write \begin{align}\label{eq-37} \Gamma_2=\WW_2^{\rm s}(Q_2^-)\cup Q_2^-\cup\set{(1,0,\xi_2,0)}{\xi_2\in(\tfrac{\sqrt3}{2} \delta-1, -\tfrac{\sqrt3}{2} \delta+1)}\cup Q_2^+\cup\WW_2^{\rm u}(Q_2^+), \end{align} recall \Cref{eq-28}, where $u_2$ now varies in the range $[1,\sigma^{-1}]$. The orbit $\Gamma_2$ is hence defined as the union of three segments, with the first being the stable manifold of $Q_2^-$, the second corresponding to the slow drift in $\xi_2$ from $Q_2^-$ to $Q_2^+$, as shown in the inset of \cref{fig:singorb}, and the third being the unstable manifold of $Q_2^+$. The sought-after singular orbit $\Gamma$, which represents the singular solution to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}, can then be written as the union of $\Gamma_1^-$, $\Gamma_2$, and $\Gamma_1^+$ in blown-up space: \begin{align*} \Gamma := \Gamma_1^- \cup \Gamma_2 \cup \Gamma_1^+. \end{align*} A visualization of the orbit $\Gamma$ is given in \cref{fig:singorb}. \begin{figure} \caption{Geometry of the singular orbit $\Gamma=\Gamma_1^-\cup\Gamma_2\cup\Gamma_1^+$ for \Cref{eq-7} \label{fig:singorb} \end{figure} \subsubsection{Persistence of $\Gamma$ -- Proof of \cref{prop-1}} The proof of \cref{prop-1} is based on the shooting argument outlined in \Cref{sec:dynfor}, which is implemented by approximating the dynamics of \Cref{eq-7} for $\eps$ small in the two coordinate charts $K_1$ and $K_2$. We begin by defining the two manifolds \begin{align}\label{eq-60} \VV_{1_\eps}^\mp:=\set{(1,w,\mp1,\eps)}{w\in I^\mp}\qquad\text{for }\eps\in[0,\eps_0), \end{align} which represent the boundary conditions in \cref{eq-5b} in chart $K_1$, with $r_1=1$ for $\xi_1=\mp1$; hence, it also follows that $\eps_1=\frac\eps{r_1}=\eps$ there. (We note that, for $\eps=0$, the manifolds $\VV_{1_\eps}^\mp$ in \cref{eq-60} reduce to $\VV_{1_0}^\mp$, respectively, as defined in \cref{eq-60bis}.) The intervals $I^-$ and $I^+$ are defined as neighborhoods of the points $w_0^-=-\frac2{\sqrt3}$ and $w_0^+=\frac2{\sqrt3}$, respectively, as before. We note that the manifolds $\VV_{1_\eps}^\mp$ are mapped onto each other by the transformation $(r_1,w_1,\xi_1,\eps_1)\mapsto(r_1,-w_1,-\xi_1,\eps_1)$, in accordance with the symmetry properties of the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}, as discussed in \Cref{sec:intro}. It is hence sufficient to consider the transition from $\VV_{1_\eps}^-$ to $\Sigma_1^{\rm out}$ under the flow of \cref{eq-11}, as its counterpart, the transition between $\Sigma_1^{\rm out}$ and $\VV_{1_\eps}^+$, can be obtained in a symmetric fashion. We now introduce $\eps_1$ as the independent variable in \Cref{eq-11}, whence \begin{subequations}\label{eq-25} \begin{align} \frac{{\rm d}r_1}{{\rm d}\eps_1} &=-\frac{r_1}{\eps_1}, \label{eq-25a} \\ \frac{{\rm d}w_1}{{\rm d}\eps_1} &=-\frac{1-\eps_1^2}{w_1(\eps_1)}, \label{eq-25b} \\ \frac{{\rm d}\xi_1}{{\rm d}\eps_1} &=-\delta \frac{r_1(\eps_1)}{\eps_1w_1(\eps_1)}. \label{eq-25c} \end{align} \end{subequations} Here, we remark that $w_1(\eps_1)$ remains non-zero for $\eps$ sufficiently small, as we know that $w_1=\mp\frac2{\sqrt3}+\OO(\eps_1)\ne 0$ in the singular limit, {\it i.e.}, for $\eps=0$. Solving \Cref{eq-25a,eq-25b}, with initial condition $(1,w,-1,\eps)\in\VV_{1_\eps}^-$, we find \begin{align}\label{eq-26} r_1(\eps_1)=\frac\eps{\eps_1}\quad\text{and}\quad w_1^-(\eps_1)=-\sqrt{w^2+2(\eps-\eps_1)-\frac23(\eps^3-\eps_1^3)}. \end{align} Substituting the expressions in \cref{eq-26} into \cref{eq-25c} and expanding the result for $\eps_1$ small, we obtain \begin{align*} \frac{{\rm d}\xi_1}{{\rm d}\eps_1} &=\delta \frac\eps{\eps_1^2} \frac1{\sqrt{w^2+2(\eps-\eps_1)-\frac23(\eps^3-\eps_1^3)}} \\ &=\delta \frac\eps{\eps_1}\frac1{\sqrt{w^2+2\eps-\frac23\eps^3}} \bigg[\frac1{\eps_1}+\frac1{w^2+2\eps-\frac23\eps^3}\bigg]+\OO(1), \end{align*} which can be solved to the order considered here and evaluated in $\Sigma_1^{\rm out}$ -- {\it i.e.}, for $\eps_1=\sigma$ -- to yield \begin{equation} \label{eq-112} \xi_1^{{\rm out}-}=-1-\frac{\delta}{w} + \frac{\delta}{w^3}\eps\ln\eps+\OO(\eps). \end{equation} Similarly, evaluating \cref{eq-26} in $\Sigma_1^{\rm out}$, we find \begin{multline*} \big(r_1^{{\rm out}-},w_1^{{\rm out}-},\xi_1^{{\rm out}-},\eps_1^{{\rm out}-}\big) \\ =\bigg(\frac\eps\sigma,-\sqrt{w^2+2(\eps-\sigma)-\frac23(\eps^3-\sigma^3)}, -1-\frac{\delta}{w}+\OO(\eps\ln\eps),\sigma\bigg), \end{multline*} which defines a curve $(w_1^{{\rm out}-},\xi_1^{{\rm out}-})(w)$ that is parametrized by the initial $w_1$-value $w$ in $\VV_{1_\eps}^-$. That curve, which we denote by $\VV_{1_\eps}^{{\rm out}-}$, is located in a two-dimensional subset of $\Sigma_1^{\rm out}$ and, specifically, in the $(w_1,\xi_1)$-plane, with $(r_1,\eps_1)$ fixed: \begin{equation}\label{eq-40} \VV_{1_\eps}^{{\rm out}-}:=\bigg\{\bigg(-\sqrt{w^2+2(\eps-\sigma)-\frac23(\eps^3-\sigma^3)}, -1-\frac{\delta}{w}+\OO(\eps\ln\eps)\bigg)\, \bigg| \, w\in I^-\bigg\}. \end{equation} It remains to study the stable foliation $\FF_2^{\rm s}(\SS_2^{r_2})$ in coordinate chart $K_2$, and to show that the intersection thereof with $\VV_{1_\eps}^{{\rm out}-}$ is transverse for $\eps$ sufficiently small. To that end, we recall the definition of $\FF_2^{\rm s}(\SS_2^{r_2})$ in \cref{eq-29a}, which we restrict to the section $\Sigma_2^{\rm in}=\kappa_{12}(\Sigma_1^{\rm out})$: taking $r_2(=\eps)$ fixed, as before, and evaluating $\FF_2^{\rm s}(\SS_2^{r_2})$ at $u_2=\sigma^{-1}$ defines a curve $\FF_2^{{\rm in}-}$ in $\Sigma_2^{\rm in}$ which is parametrized by $\xi_2\in[\xi_-,\xi_+]$ via \begin{align*} (u_2^{\rm in},w_2^{\rm in},\xi_2^{\rm in},r_2^{\rm in})=\Big(\sigma^{-1},-\sqrt{\tfrac43-2\sigma +\tfrac23\sigma^3},\xi_2,r_2\Big), \end{align*} for any $r_2\in[0,\rho\sigma]$; cf.~\cref{eq-21}. Transforming $\FF_2^{{\rm in}-}$ to chart $K_1$, we obtain the corresponding curve $\FF_1^{{\rm in}-}$: \begin{equation}\label{eq-41} \FF_1^{{\rm in}-}:=(w_1^{\rm out},\xi_1^{\rm out})=\bigg\{\bigg(-\sqrt{\frac43-2\sigma+\frac23\sigma^3},\xi_1\bigg)\, \bigg| \, \xi_1\in[\xi_-,\xi_+]\bigg\}. \end{equation} Comparing \Cref{eq-40,eq-41} and expanding \begin{align*} -\sqrt{w^2+2(\eps-\sigma)-\tfrac23(\eps^3-\sigma^3)}=w+\frac{\eps-\sigma}{w}+\OO[(\eps-\sigma)^2] \end{align*} and \begin{align*} -\sqrt{\tfrac43-2\sigma+\tfrac23\sigma^3}=-\frac2{\sqrt3}+\frac{\sqrt3}2\sigma+\OO(\sigma^2), \end{align*} we conclude that $\VV_{1_\eps}^{{\rm out}-}$ and $\FF_1^{{\rm in}-}$ intersect in some point \begin{align*} P_1^{{\rm out}-}=\bigg(-\frac2{\sqrt3}+\OO(\eps),-1-\frac{\delta}{w}+\OO(\eps\ln\eps)\bigg). \end{align*} As the corresponding tangent vectors in the $(w_1,\xi_1)$-plane are given by $(1,\frac{\delta}{w^2})$ and $(0,1)$ to leading order, that intersection is transverse for any $\eps$ small. More precisely, transversality between $\VV_{1_\eps}^{{\rm out}-}$ and $\FF_1^{{\rm in}-}$ occurs already for $\eps=0$, {\it i.e.}, in $\{r_1=0\}$, which is sufficient for the Exchange Lemma to apply in chart $K_2$; cf.~\cref{fig:trint}. As these two curves perturb smoothly, the transversality of their intersection persists for $\eps \neq 0$, as well. \begin{figure} \caption{Transverse intersection of the sets $\VV_{1_0} \label{fig:trint} \end{figure} Next, and as stated above, the symmetry of \Cref{eq-11} implies the existence of a point $P_1^{{\rm out}+}=\big(\frac2{\sqrt3}+\OO(\eps),1-\frac{\delta}{w}+\OO(\eps\ln\eps)\big)$ in $\Sigma_1^{\rm out}$ in which the curves \begin{align*} \VV_{1_\eps}^{{\rm out}+}=\bigg\{\bigg(\sqrt{w^2+2(\eps-\sigma)-\frac23(\eps^3-\sigma^3)}, 1-\frac{\delta}{w}+\OO(\eps\ln\eps)\bigg)\, \bigg| \, w \in I^+\bigg\} \end{align*} and \begin{align*} \FF_1^{{\rm out}+}=\bigg\{\bigg(\sqrt{\frac43-2\sigma+\frac23\sigma^3},\xi_1\bigg)\, \bigg| \, \xi_1\in[\xi_-,\xi_+]\bigg\} \end{align*} intersect transversely. In summary, we have hence constructed a connection between the two manifolds of boundary conditions $\VV_{1_\eps}^-$ and $\VV_{1_\eps}^+$, as follows: in the singular limit of $\eps=0$, the image $\VV_{1_0}^{{\rm out}-}$ in $\Sigma_1^{\rm out}$ of $\VV_{1_0}^-$ under the forward flow intersects transversely the equivalent of the stable manifold $\WW_2^{\rm s}(Q_2)$ under the change of coordinates to chart $K_1$, namely, $\FF_1^{{\rm out}-}$. Then, a slow drift occurs along the critical manifold $\SS_2^0$ until the flow leaves along the unstable manifold $\WW_2^{\rm u}(Q_2)$. In $\Sigma_1^{\rm out}$, that manifold -- which corresponds to $\FF_1^{{\rm out}+}$ after transformation to $K_1$-coordinates -- again intersects transversely the image $\VV_{1_0}^{{\rm out}+}$ of the boundary manifold $\VV_{1_0}^+$ under the backward flow. The construction persists for $\eps \neq 0$ sufficiently small; in fact, it guarantees a transverse intersection between $\VV_{1_\eps}^{{\rm out}\mp}$ and $\FF_1^{{\rm out}\mp}$ when $0<\eps\ll1$. Finally, the fact that the perturbed orbit approaching the stable foliation of the slow manifold $\SS_2^{r_2}$ will leave along the unstable foliation thereof is guaranteed by the Exchange Lemma. The above argument allows us to obtain the portion of the branch of solutions in the bifurcation diagram which perturbs from $\mathcal{B}_1$ for $\delta \geq \hat{\delta}$, with $\hat{\delta}>0$ small; see \cref{fig:bdblup,fig:deleps}. The portion of the branch perturbing from the part of $\mathcal{B}_1$ for $0 \leq \delta < \hat{\delta}$, as well as from $\mathcal{B}_2$, can be obtained in a similar spirit. However, as that regime involves the limit as $\delta\to 0$, it requires further consideration. Setting $\delta=0$ does not affect our construction in chart $K_1$; however, it destroys the slow drift on $\SS_2^0$ in chart $K_2$ for $\eps=0$, cf.~\Cref{eq-36}. The segment $\mathcal{B}_2$ is associated to the regime where $\lambda=\OO(1)$. Singular solutions in that regime are of type I; see the right panel of \cref{fig:soltypI}. We recall that $\delta=0$ occurs only for $\eps=0$, cf.~\cref{fig:deleps}, and that $\delta$ is bounded below by $\sqrt\eps$. Hence, it is convenient to introduce the rescaling \begin{equation}\label{delres} \delta=\sqrt\eps \tilde{\delta}, \end{equation} with $\tilde\delta \geq 1$, which we substitute into the governing \Cref{eq-11,eq-13} in charts $K_1$ and $K_2$, respectively. In chart $K_1$, the rescaling in \cref{delres} yields the same dynamics as is obtained by setting $\delta=0$ in~\cref{eq-11}: the singular limit of $\eps=0$ implies $\xi_1\equiv\mp1$ in the invariant hyperplane~$\{\eps_1=0\}$; cf.~\Cref{eq-17}. It follows that the value of $\xi$ in the transition from $u=1$ to $u=0$ does not change, as can also be seen in the corresponding type I-solution; see again the right panel of \cref{fig:soltypI}. In chart $K_2$, introduction of the rescaling in~\cref{delres} again yields a fast-slow system, \begin{subequations}\label{eq-13tilde} \begin{align} u_2' &=u_2^4w_2, \\ w_2' &=u_2^2-1, \\ \xi_2' &=\tilde{\delta} r_2^{\frac32}u_2^4, \\ r_2' &=0. \end{align} \end{subequations} The only difference to the previous case of $\delta\neq 0$ is that the slow dynamics is now even slower, as the small perturbation parameter in~\cref{eq-13} is given by $r_2^{3/2}$, instead of by $r_2$. The global construction illustrated in this section is unaffected by that difference, though, as the techniques we have relied on -- such as, {\it e.g.}, the Exchange Lemma -- still apply. As $\tilde{\delta}$ grows to $\OO(r_2^{-1/2})$, the transition between the two regimes occurs. \begin{remark}\label{rem:lam34} We emphasize that the restriction on $\delta$ in the statement of \cref{prop-1} is due to the fact that we require $Q_2^-\ne Q_2^+$; cf.~also \cref{rem:delta}. Specifically, for the Exchange Lemma to apply, $\frac{\sqrt3}{2}\delta-1<-\frac{\sqrt3}{2}\delta+1$ must hold, which is equivalent to $\delta<\frac{2}{\sqrt{3}}=:\delta_\ast$. The case where that condition is violated is studied in \cref{ssec:reg2} below, which covers region $\mathcal{R}_2$. In particular, it is shown there that \Cref{eq-7} then locally admits a pair of solutions which limit on a solution of type I and one of type II, respectively; these two singular solutions meet in a saddle-node bifurcation at $\delta=\delta_\ast$. \end{remark} \subsubsection{Logarithmic switchback}\label{sec:logsw} In Lindsay's work~\cite{Li14}, logarithmic terms in $\eps$, as well as fractional powers of $\eps$, arise in the asymptotic expansions of solutions to \Cref{eq-2} as ``switchback'' terms that need to be included during matching in order to ensure the consistency of these expansions~\cite{Li11}. In this subsection, we show that these terms are due to a resonance phenomenon in the blown-up vector field, see \cite{Po05}, hence establishing a connection between our dynamical systems approach and the method of matched asymptotic expansions. That connection has already been observed in various classical singular perturbation problems; examples include Lagerstrom's model equation for low Reynolds number flow~\cite{La84,La88,PS041}, front propagation in the Fisher-Kolmogorov-Petrowskii-Piscounov equation with cut-off \cite{Dumortier_2014,Dumortier_2007}, and the generalized Evans function for degenerate shock waves derived in~\cite{SS04}. \\ The occurrence of logarithmic switchback is necessarily studied in chart $K_1$, as the small parameter $\eps$ has to appear as a dynamic variable for resonances to be possible between eigenvalues of the linearization about an appropriately chosen steady state, namely $P_1^\mp$; recall \Cref{eq:p1mp}. Due to the symmetry properties of the corresponding vector field, it again suffices to restrict to the transition under the flow of \cref{eq-11} past $P_1^-=\big(0,-\frac2{\sqrt3},\frac{\sqrt3}{2}\delta-1,0\big)$ only. \begin{proposition} Let $\eps\in(0,\eps_0)$, with $\eps_0$ positive and sufficiently small. Then, \Cref{eq-11} admits the normal form \begin{subequations} \label{eq:K1sysf} \begin{align} r_1' &=-r_1, \label{eq:K1sysfa} \\ W_1' &=\frac{3\sqrt3}8W_1^2\eps_1+\frac{27}{16}W_1\eps_1^2-\frac{5\sqrt3}{64}\eps_1^3+\OO(4), \label{eq:K1sysfb} \\ \Xi_1' &=\frac{3\sqrt3}8\delta \eps+\frac{27}{16}\delta \eps W_1 +\frac{3\sqrt3}8\delta r_1W_1^2+\frac{27\sqrt3}{64}\delta \eps\eps_1 +\OO(4), \label{eq:K1sysfc} \\ \eps_1' &=\eps_1 \label{eq:K1sysfd} \end{align} \end{subequations} in an appropriately chosen neighborhood of $P_1^-$. (Here, $\OO(4)$ denotes terms of order $4$ and upwards in $(r_1,W_1,\Xi_1,\eps_1)$.) \end{proposition} \begin{proof} The proof is based on a sequence of near-identity transformations in a neighborhood of $P_1^-$ which reduces \Cref{eq-11} to the system of equations in \cref{eq:K1sysf}. In a first step, we shift $P_1^-$ to the origin, introducing the new variables $\tilde w_1$ and $\tilde\xi_1$ via $w_1=-\frac2{\sqrt3}+\tilde w_1$ and $\xi_1=\xi_1^-+\tilde\xi_1$. (Here and in the following, we write $\xi_1^-=\frac{\sqrt3}{2}\delta-1$.) Then, we divide out a positive factor of $\frac2{\sqrt3}-\tilde w_1(=-w_1)$ from the right-hand sides in the resulting equations, which corresponds to a transformation of the independent variable that leaves the phase portrait unchanged: \begin{subequations}\label{eq-11n} \begin{align} r_1' &=-r_1, \\ \tilde w_1' &=\frac{\eps_1(1-\eps_1^2)}{\frac2{\sqrt3}-\tilde w_1}, \label{eq-11nb} \\ \tilde\xi_1' &=\delta \frac{r_1}{\frac2{\sqrt3}-\tilde w_1}, \label{eq-11nc} \\ \eps_1' &=\eps_1. \end{align} \end{subequations} Next, we expand $\big(\tfrac2{\sqrt3}-\tilde{w_1}\big)^{-1}=\tfrac{\sqrt3}2\big(1-\tfrac{\sqrt3}2\tilde w_1\big)^{-1}=\tfrac{\sqrt3}2\big(1+\tfrac{\sqrt3}2\tilde w_1+\tfrac34\tilde w_1^2+\OO(w_1^3)\big)$ in~\Cref{eq-11nb,eq-11nc}, whence \begin{align*} \tilde w_1' &=\frac{\sqrt3}2\eps_1\bigg(1+\frac{\sqrt3}2\tilde w_1+\frac34\tilde w_1^2-\eps_1^2\bigg)+\OO(4), \\ \tilde\xi_1' &=\frac{\sqrt3}2\delta r_1\bigg(1+\frac{\sqrt3}2\tilde w_1+\frac34\tilde w_1^2\bigg)+\OO(4). \end{align*} Since none of the terms in the $\tilde w_1$-equation above are resonant, they can be removed by a sequence of near-identity transformations. For instance, setting $\tilde w_1=\hat w_1+\frac{\sqrt3}2\eps_1$, we may eliminate the linear $\eps_1$-term from that equation, whence \begin{align*} \hat w_1'=\frac34\eps_1\hat{w_1}+\frac{3\sqrt3}8\eps_1^2+\frac{3\sqrt3}8\hat w_1^2\eps_1 +\frac98\hat w_1\eps_1^2-\frac{7\sqrt3}{32}\eps_1^3+\OO(4). \end{align*} Similarly, we can eliminate the linear $r_1$-terms in the $\tilde\xi_1$-equation by introducing $\tilde\xi_1=\hat\xi_1-\frac{\sqrt3}2\delta r_1$; the equation for $\hat\xi_1$ then reads \begin{align*} \hat\xi_1'=\frac34\delta r_1\hat w_1+\frac{3\sqrt3}8 \delta \eps+ \frac{3\sqrt3}8\delta r_1\hat w_1^2+\frac98\delta \eps\hat w_1 +\frac{9\sqrt3}{32}\delta \eps\eps_1+\OO(4). \end{align*} The term $\frac{3\sqrt3}8\delta\eps=\frac{3\sqrt3}8\delta r_1\eps_1$ in the above equation is now resonant of order $2$, as $(-1)+0+0+1=0$ for the eigenvalues corresponding to the monomial $r_1\eps_1$ therein; hence, that term cannot be eliminated in general. (Here, we note that any factor of $\eps$ contributes a quadratic term to the asymptotics when considered in $(r_1,\hat w_1,\hat\xi_1,\eps_1)$-coordinates.)\\ A final sequence of near-identity transformations allows us to eliminate any non-resonant second-order terms from \cref{eq-11n}. Specifically, introducing $W_1$ and $\Xi_1$ such that \begin{align*} \hat w_1&=W_1+\frac34W_1\eps_1+\frac{3\sqrt3}{16}\eps_1^2, \\ \hat\xi_1&=\Xi_1-\frac34\delta r_1W_1, \end{align*} we obtain \Cref{eq:K1sysf}, as required. \end{proof} Next, we outline how the normal form in \Cref{eq:K1sysf} gives rise to logarithmic (``switchback") terms in the expansion for $\xi_1$ -- or, rather, for the value $\xi_1^{\rm out}$ thereof in the section $\Sigma_1^{\rm out}$, as defined in~\cref{eq-20b}; see also Section~\ref{ssec:reg1}. In the process, we refine the approximation for $\xi_1^{\rm out}$ that was derived in the proof of Propositions~\ref{prop-1}; recall \Cref{eq-40}. \begin{lemma}\label{lem:xi1out} Let $\VV_{1_\eps}^-$ be defined as in \Cref{eq-60}, and consider the point $(1,w,-1,\eps)\in\VV_{1_\eps}^-$, with $w$ in a small neighbourhood of $w_0^-=-\frac2{\sqrt3}$. Then, the orbit of \Cref{eq-11} that is initiated in that point intersects the section $\Sigma_1^{\rm out}$ in a point $\big(\frac\eps\sigma,w_1^{\rm out},\xi_1^{\rm out},\delta\big)$, with \begin{align}\label{eq-70} \xi_1^{\rm out}=-1+\frac{\sqrt3}2\delta -\frac{3\sqrt3}8\delta \eps \ln\eps+\OO(\delta \eps). \end{align} \end{lemma} \begin{proof} \Cref{eq:K1sysfa,eq:K1sysfd} can be solved explicitly for $r_1$ and $\eps_1$, which gives \begin{align}\label{eq:r1e1} r_1(\tilde x)=\rho\mathrm{e}^{-\tilde x}\quad\text{and}\quad \eps_1(\tilde x)=\frac\eps\rho\mathrm{e}^{\tilde x}; \end{align} here, $\tilde x$ denotes the rescaled independent variable that was introduced in the derivation of \cref{eq:K1sysf}. Hence, the transition ``time" $\widetilde X$ between the sections $\Sigma_1^{\rm in}$ and $\Sigma_1^{\rm out}$ under the flow of \Cref{eq:K1sysf} is given by \begin{align}\label{eq:xel} \widetilde X=\ln{\frac{\rho\delta}{\eps}}. \end{align} For the sake of simplicity, we will henceforth only consider terms of up to order $2$ in \Cref{eq:K1sysfb,eq:K1sysfc}, which gives \begin{align}\label{eq-80a} W_1'=0\quad\text{and}\quad\Xi_1'=\frac{3\sqrt3}8\delta \end{align} to that order. Hence, solving \Cref{eq-80a} for $W_1$ and $\Xi_1$ in forward time gives \begin{align}\label{eq-81a} W_1\equiv W_0\quad\text{and}\quad\Xi_1=\Xi_0+\frac{3\sqrt{3}}8\delta \eps\tilde x, \end{align} where $W_0=W_1(0)$ and $\Xi_0=\Xi_1(0)$ are constants that remain to be determined. \\ Undoing the above sequence of near-identity transformations -- i.e., reverting to the shifted variable $\tilde\xi_1$ -- we obtain \begin{align}\label{eq:xi1} \tilde\xi_1=\Xi_1-\frac{\sqrt3}2\delta r_1-\frac34\delta r_1W_1 =\Xi_0+\frac{3\sqrt{3}}8\delta \eps\tilde x -\frac{\sqrt3}2\delta r_1-\frac34\delta r_1W_1. \end{align} Hence, we also need to undo the transformation for $W_1\equiv W_0$; inverting the successive transformations for the variable $w_1$, we have \begin{align}\label{eq-82a} w_1=-\frac2{\sqrt3}+\Big(1+\frac34\eps_1\Big)W_0+\frac{\sqrt3}2\eps_1+\frac{3\sqrt3}{16} \eps_1^2. \end{align} Since $w_1\to-\frac2{\sqrt3}$ in the singular limit as $\eps_1 \to 0$, it follows that $W_0=0$ to the order considered here. In fact, expanding the expression for $w_1(\eps_1)$ in \Cref{eq-26} and retracing the above sequence of normal form transformations $w_1\mapsto\tilde w_1\mapsto\hat w_1\mapsto W_1$, we may infer from \cref{eq-81a} that $W_0=\tilde w_0+\OO(\eps)$, where we have written $w_0=-\frac2{\sqrt3}+\tilde w_0$ in \cref{eq-26}. As $\tilde w_0=\OO(\eps)$, by the proof of Proposition~\ref{prop-1}, we may conclude that $W_0=\OO(\eps)$.\\ Next, substituting into~\cref{eq:xi1} and noting that $\Xi_0=\tilde\xi_0+\frac{\sqrt3}2\delta\rho+\OO\big(\delta \eps\big)$ due to $r_1=\rho$ in $\Sigma_1^{\rm in}$, we obtain \begin{align}\label{eq:xi12} \tilde\xi_1=\tilde\xi_0+\frac{\sqrt3}2\delta (\rho-r_1)+\frac{3\sqrt3}8 \delta\eps\tilde x+\OO(\delta \eps). \end{align} Reverting to the original variable $\xi=\xi_1^-+\tilde\xi_1$, we then conclude that in $\Sigma_1^{\rm out}$, \begin{align}\label{eq:xi1out} \xi_1^{\rm out}=\xi_1(\widetilde X)=\xi_0+\frac{\sqrt3}2\delta \rho -\frac{3\sqrt3}8\delta\eps\ln\eps+\OO(\delta \eps). \end{align} We emphasize that the resonant term $\frac{3\sqrt3}8\delta\eps$ in \cref{eq-81a} gives rise to $\frac{3\sqrt3}8\delta\eps\tilde x$ in \cref{eq:xi12} after integration which, for $\tilde x=\widetilde X$, yields an $\eps\ln\eps$-term in the expansion for $\xi_1^{\rm out}$. (Here, the error estimate in \cref{eq:xi1out} is again due to the fact that $W_1=\OO(\eps)$ throughout.)\\ It remains to approximate $\xi_0$. To that end, we consider \Cref{eq-11nc}, rewritten with $r_1$ as the independent variable: solving \begin{align*} \frac{{\rm d}\tilde\xi_1}{{\rm d}r_1}=\frac{{\rm d}\xi_1}{{\rm d}r_1}=-\frac{\delta }{\frac2{\sqrt3}-\tilde w_1} =-\frac{\sqrt3}2\delta \big(1+\OO(\tilde w_1)\big) \end{align*} with $\xi_1(1)=-1$ and noting that $\tilde w_1=\OO(\eps)$, by \cref{eq-81a}, we find \begin{align*} \xi_1(r_1)=-1-\frac{\sqrt3}2\delta (r_1-1)+\OO\big(\delta \eps\big) \end{align*} and, hence, $\xi_0=\xi_1(\rho)=-1-\frac{\sqrt3}2\delta (\rho-1)+\OO\big(\delta \eps\big)$ which, in combination with \cref{eq:xi1out}, yields \Cref{eq-70}, as claimed. \end{proof} \begin{remark} The fact that \Cref{eq-11nc} is decoupled, in combination with the structure of the above sequence of normal form transformations $\tilde w_1\mapsto\hat w_1\mapsto W_1$ -- which depends on $\eps_1$ only -- implies that no resonances will occur in the corresponding expansion for $w_1^{\rm out}$. In fact, such an expansion can immediately be derived from \cref{eq-82a}. \end{remark} \begin{remark}\label{rem:xc} One can show that \Cref{lem:xi1out} is consistent with Lindsay's results \cite[Section~3]{Li14}; in fact, up to a transformation of variables, the quantity $\xi_1^{\rm out}$ corresponds to the point $-x_c$ introduced there, with $\lambda_{0c}=\frac{m-1}{2(m-2)}=\frac34$ due to $m=4$ in our case: \begin{equation} \label{eq:xi1outli} -x_c = -1+ \eps^{\frac12}\bar{x_c} = -1+\frac{\sqrt3}2\delta -\frac{3\sqrt3}8\sqrt{\frac\eps\lambda}\eps \ln\eps+\OO(\delta \eps). \end{equation} \end{remark} \subsection{Region $\mathcal{R}_2$}\label[subsection]{ssec:reg2} For $\eps >0$, region $\mathcal{R}_2$ covers a small neighborhood of the point $B$ in $(\lambda,\Vert u\Vert_2^2)$-space; recall \cref{fig:bdsegm}. That region contains the portion of the branch of solutions in the bifurcation diagram which limit on solutions of types I and II as $\eps\to 0$; moreover, $\mathcal{R}_2$ establishes the connection with the branches of solutions that are contained in regions $\mathcal{R}_1$ and $\mathcal{R}_3$. According to the definition in~\cref{eq:R2}, the size of $\mathcal{R}_2$ is $\eps$-dependent; in particular, that region collapses onto a line as $\eps \to 0$. We will show that, for $0<\eps\ll 1$, a saddle-node bifurcation occurs in $\mathcal{R}_2$ at $\lambda=\lambda_\ast$, as defined in \cite{Li14}; see \cref{fig:bdfolds}. \begin{figure} \caption{Numerical bifurcation diagram showing solutions of~\cref{eq-2} \label{fig:bdfolds} \end{figure} Due to the singular dependence of $\lambda_\ast$ on the regularization parameter $\eps$, an accurate numerical approximation is difficult to obtain for small values of $\eps$. Using matched asymptotics, it was shown in~\cite{Li14} that $\lambda_{\ast}=\OO(\eps)$, with an expansion of the form \begin{align*} \lambda_{\ast}(\eps)=\lambda_{\ast 0}\eps+\lambda_{\ast 1}\eps^2\ln\eps+\lambda_{\ast 2}\eps^2 +\OO(\eps^3). \end{align*} However, the coefficients $\lambda_{\ast i}$ remained undetermined there. Here, we confirm rigorously the structure of the above expansion, and we determine explicitly the values of the coefficients $\lambda_{\ast i}$ therein for $i=0,1$. Moreover, we indicate how higher-order coefficients may be found systematically, and we identify the source of the logarithmic (``switchback") terms (in $\eps$) in the expansion for $\lambda_{\ast}$; cf.~\cref{prop-2} below. \begin{remark} While a saddle-node bifurcation is equally observed in the \linebreak{bi-Laplacian} case, recall~\Cref{LiBiLap}, Lindsay's work \cite{Li16} shows that the asymptotics of the associated $\lambda$-value $\lambda_\ast$ is far less singular in that case, allowing for a straightforward and explicit calculation of the corresponding coefficients. \end{remark} To leading order, $\lambda_\ast$ equals the abovementioned critical value $\frac34\eps$, which corresponds to $\delta_\ast=\frac2{\sqrt3}$ in terms of $\delta$. That critical $\delta$-value was not covered in our discussion of region $\mathcal{R}_1$ in the previous section, as the argument applied in that region failed there; cf.~\cref{rem:lam34}. Hence, a different argument is required for analysing the local dynamics in a neighborhood of the saddle-node bifurcation point at $\delta_\ast$. In a first step, we consider the existence of singular solutions for varying $\delta$; in particular, the existence of type II-solutions in region $\mathcal{R}_2$ is guaranteed by the following \begin{lemma}\label{lem-II} Let $\frac1{\sqrt{\lambda_2}}\leq\delta\leq \delta_1$, with $\delta_1<\frac2{\sqrt3}$. Then, a singular solution of type II exists if and only if $w_1=\mp \delta$ at $\xi_1=\mp 1$. \end{lemma} \begin{proof} In the original model, \Cref{eq-3}, the ``touchdown'' solution of type II satisfies $w=\mp1$ at $x=\mp1$; cf.~\cref{def:soltyp}. After the $w$-rescaling in~\cref{eq-6}, these boundary conditions are equivalent to $\tilde w=\mp\delta$ at $\xi=\mp1$. The dynamics close to the boundary is naturally studied in chart $K_1$, which implies that $w_1=\mp\delta$ must hold at $\xi_1=\mp1$; cf.~\cref{eq-9a}. For $\frac1{\sqrt{\lambda_2}} \leq\delta\leq\delta_1$, and in contrast to the solutions of type I considered in \cref{ssec:reg1}, the corresponding orbits can be fully studied in chart $K_1$, as they stay away from the critical manifold $\SS_2^0$ in $K_2$; recall~\Cref{eq-24}. The existence of a connecting orbit on the blow-up cylinder between $w_1=-\delta$ and $w_1=\delta$ then follows automatically; see the upper panel of \cref{fig:3lam}. \end{proof} \begin{remark} For $\delta=0$, the type II-solution constructed in \cref{lem-II} collapses onto the line $\{w_1=0\}$. That case, which requires further consideration, is studied in region $\mathcal{R}_3$; cf.~\cref{ssec:reg3}. In fact, and as mentioned previously, both $\mathcal{R}_2$ and $\mathcal{R}_3$ are required to cover the green curve in \cref{fig:bdblup}. \end{remark} \cref{lem-II} guarantees the existence of a type II-solution for every $\delta\in\big[ \frac1{\sqrt{\lambda_2}},\delta_1\big]$, with $\delta_1<\delta_\ast$. For the same range of $\delta$, {\it i.e.}, in the overlap between regions $\mathcal{R}_1$ and $\mathcal{R}_2$, \cref{prop-1} implies the local existence of type I-solutions. Hence, we can conclude that the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} admits a pair of singular solutions for $\delta<\delta_\ast$; one of these is of type I, while the other is of type II. At $\delta=\delta_\ast$, the two singular solutions coalesce in a type II-solution. Finally, for $\delta>\delta_\ast$, no singular solution exists. The resulting three scenarios are illustrated in \cref{fig:3lam}. In particular, we note that solutions of \mbox{type I} satisfy $w_1=\mp\frac{2}{\sqrt{3}}$ -- or, equivalently, $w=\mp\frac{2}{\sqrt{3}\delta}$ in the original formulation -- for $\xi_1=\mp 1$, while those of type II are characterized by $w_1=\mp\delta$ at $\xi_1=\mp 1$, as proven in \cref{lem-II}; see again \cref{fig:3lam}. \begin{figure} \caption{Saddle-node bifurcation in the singular limit of $\eps=0$ in \Cref{eq-7} \label{fig:3lam} \end{figure} The main result of this section is the following \begin{proposition}\label{prop-2} There exists $\eps_0>0$ sufficiently small such that in region $\mathcal{R}_2$, the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} admits a unique branch of solutions for $\eps\in(0,\eps_0)$. That branch consists of two sub-branches which limit on singular solutions of types I and II, respectively, as $\eps\to0$. The two sub-branches meet in a saddle-node bifurcation point at $\lambda_\ast(\eps)$, where two solutions exist for $\lambda>\lambda_\ast$ and $|\lambda-\lambda_\ast|$ small, whereas no solution exists for $\lambda < \lambda_\ast$. Moreover, for $\eps\in(0,\eps_0)$, $\lambda_\ast$ has the asymptotic expansion \begin{align}\label{eq-42} \lambda_{\ast}(\eps)=\frac34\eps-\bigg(\sqrt{\frac32}+\frac98\bigg)\eps^2\ln\eps+ \OO(\eps^2). \end{align} The transition between regions $\mathcal{R}_2$ and $\mathcal{R}_1$ occurs as the branch of solutions limiting on solutions of type I connects to the branch already constructed in \cref{prop-1}. \end{proposition} \begin{proof} The proof consists of two parts: we first consider a small neighborhood of $\delta_\ast=\frac{2}{\sqrt{3}}$ -- {\it i.e.}, of $\lambda=\frac34 \eps$ -- where the saddle-node bifurcation occurs. We define a suitable bifurcation equation, which describes the transition from solutions which limit on type I-solutions to those which limit on solutions of type II. Based on that equation, we infer the presence of the saddle-node bifurcation, and we calculate the expansion for the corresponding $\lambda$-value $\lambda_\ast$. In a second step, we consider the branch of solutions that limit on type II-solutions for the remaining values of $\lambda$ in $\mathcal{R}_2$. Later, that branch will be shown to connect to solutions that are covered by region $\mathcal{R}_3$. We begin by constructing the requisite bifurcation equation for the first step in our proof. Since $w\approx-\frac2{\sqrt{3}}$ and $\delta\approx\frac{2}{\sqrt{3}}$, we write \begin{align} \label{w0ll} w_0=-\frac{2}{\sqrt{3}}+\Delta w\quad\text{and}\quad\delta=\frac{2}{\sqrt{3}}+\Delta\delta. \end{align} in chart $K_1$. Applying the shooting argument outlined in \Cref{sec:dynfor}, we track the corresponding orbit from the initial manifold $\VV_{1_\eps}^-$ defined in \cref{eq-60} through $K_1$ and into the section $\Sigma_1^{\rm out}$; we denote that orbit by $\gamma_1^-$. In chart $K_2$, the point of intersection of the equivalent orbit $\gamma_2^-$ with the section $\Sigma_2^{\rm in}$ is then given by $(\sigma^{-1},w_2^{\rm in}, \xi_2^{\rm in},\eps)$, for appropriately defined $w_2^{\rm in}$ and $\xi_2^{\rm in}$. \begin{figure} \caption{Sketch of the shooting argument underlying the proof of \cref{prop-2} \label{fig:prop3} \end{figure} Next, we consider the evolution of the orbit $\gamma_2^-$ through $K_2$. Let $X^{\rm out}$ denote the ``time'' at which $\gamma_2^-$ reaches the hyperplane $\Delta_2=\{w_2=0\}$, viz. $w_2(X^{\rm out})=0$. (By symmetry, it then follows that the reflection $\gamma_2^+$ of $\gamma_2^-$ under the map $(u_2,w_2,\xi_2,r_2)\mapsto (u_2,-w_2,-\xi_2,r_2)$ will satisfy the boundary condition at $\VV_{1_\eps}^+$, with $w_0=\frac{2}{\sqrt{3}}-\Delta w$, after transformation to $K_1$.) Clearly, $X^{\rm out}$ depends on $w_2^{\rm in}$ and, in particular, on $\Delta w$, {\it i.e.}, on the initial deviation of the orbit from its singular limit $\Gamma_1^-$ in chart $K_1$. As per our shooting argument, we need to impose the constraint that $\xi_2(X^{\rm out})=0$. Dividing \Cref{eq-13c} by \Cref{eq-13a} and recalling that $r_2=\eps$ in chart $K_2$, we find $\frac{\mathrm{d}\xi_2}{\mathrm{d} u_2}=\frac{\delta\eps}{w_2}$ and, therefore, \begin{align}\label{eq:csi2int} \xi_2(u_2)=\xi_2^{\rm in}+\delta\eps\int_{u_2^{\rm in}}^{u_2^{\rm out}}\frac{1}{w_2(u_2)}\, \mathrm{d} u_2. \end{align} Here, $u_2^{\rm in}=\sigma^{-1}$ as in the definition of $\Sigma_2^{\rm in}$ in \cref{eq-21}, while $u_2^{\rm out}$ denotes the value of $u_2$ such that $w_2(u_2^{\rm out})=0$; cf.~again \cref{fig:prop3}. The sought-after bifurcation equation now corresponds to a relation between $\Delta w$, $\eps$, and $\delta$ that is satisfied for any solution to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} close to the saddle-node bifurcation in \Cref{eq-7}. To derive such a relation, we must first approximate $u_2^{\rm out}$: recalling the explicit expression for $w_1(\eps_1)$ on $\gamma_1^-$, as given in \cref{eq-26}, substituting the Ansatz made in \Cref{w0ll}, and rewriting the result in the coordinates of chart $K_2$, we find \begin{align}\label{w2u2} w_2(u_2) = -\sqrt{\Big(-\frac{2}{\sqrt{3}}+\Delta w\Big)^2+2\Big(\varepsilon-\frac{1}{u_2}\Big)-\frac23\Big(\varepsilon^3-\frac{1}{u_2^3}\Big)} \end{align} on $\gamma_2^-$. Next, we write $u_2^{\rm out}=1+\Delta u$ in \cref{w2u2}, where $\Delta u$ is assumed to be sufficiently small due to the fact that we stay close to the equilibrium at $(u_2,w_2)=(1,0)$ in $K_2$. Then, we solve the resulting expression for $\Delta u$ to find three roots; two of these are complex conjugates, and are hence irrelevant due to the real nature of our problem. Expanding the third root, which is real irrespective of the value of $\Delta w$, in a series with respect to $\Delta w$ and $\varepsilon$, we find \begin{align}\label{u2out} u_2^{\rm out}=1+\frac{13\sqrt{3}}{9}\Delta w-\frac{13}{6}\varepsilon+\OO(2), \end{align} to first order in $\Delta w$ and $\varepsilon$. It remains to determine the leading-order asymptotics of the integral in~\cref{eq:csi2int}. To that end, we expand the integrand therein as \begin{align}\label{expintd} \frac{1}{w_2(u_2)}=-\sqrt{\frac{3 u_2^3}{2 (u_2-1)^2 (2u_2+1)}}+\OO(\Delta w,\eps), \end{align} which can be shown to be sufficient to the order of accuracy considered here. (The inclusion of higher-order terms in \cref{eq:csi2int} would yield a refined bifurcation equation, and would hence allow us to take the expansion for $\lambda_\ast$ in \cref{eq-42} to higher order in $\eps$.) Combining~\cref{expintd} and~\cref{u2out} and noting that $u_2^{\rm in}$ only enters through higher-order terms in $\Delta w$, which are neglected here, we finally obtain the expansion \begin{align}\label{expint} \int_{u_2^{\rm in}}^{u_2^{\rm out}} \frac{1}{w_2(u_2)}\,\mathrm{d}u_2=-\frac{\sqrt{2}}{2}\ln\Delta w+C +\mathcal{O}(\Delta w) \end{align} where $C$ is a computable constant. (The above expansion reflects the fact that, as $\Delta w\to 0$, {\it i.e.}, as the point $(\sigma^{-1},w_2^{\rm in},\xi_2^{\rm in},\eps)$ tends to the stable manifold $\WW_2^{\rm s}(Q_2)$, the ``time'' required for reaching $\Delta_2$ tends to infinity. Moreover, it is consistent with the observation that expansions of solutions passing close to equilibria or slow manifolds of saddle type frequently involve logarithmic terms.) Next, we substitute $\xi_2^{\rm in}(=\xi_1^{\rm out})=-1-\frac{\delta}{w_0}+ \frac{\delta}{w_0^3}\eps\ln\eps+\OO(\eps)$ from~\cref{eq-112} into \cref{expint} to obtain \begin{align} \label{csi2} \xi_2(u_2^{\rm out})=-1-\frac{\delta}{w_0}-\frac{\sqrt{2}}{2}\delta\eps\ln\Delta w+\frac{\delta}{w_0^3}\eps\ln\eps+\OO(\eps)\stackrel{!}{=}0. \end{align} Shifting $w_0$ and $\delta$ by $\Delta w$ and $\Delta\delta$, cf.~\cref{w0ll}, and solving \cref{csi2} for $\Delta \delta$, we obtain the following bifurcation equation in $(\Delta w, \Delta\delta,\eps)$: \begin{align} \label{bifeq} \Delta\delta=-\Delta w+\frac{2\sqrt{2}}{3}\eps\ln\Delta w+\frac{\sqrt{3}}{2}\eps\ln\eps+\OO(\eps). \end{align} The last step consists in finding the $\Delta w$-value $\Delta w_\ast$ at which the bifurcation equation in~\cref{bifeq} attains its minimum, corresponding to the approximate location of the saddle-node bifurcation in \Cref{eq-7}, and in reverting to the original scalings. To that aim, we differentiate \Cref{bifeq} and solve $\frac{\mathrm{d}\Delta\delta}{\mathrm{d}\Delta w}=0$ to leading order, which yields $\Delta w_\ast=\frac{2\sqrt{2}}{3}\varepsilon$; see~\cref{fig:bdsn}. Substituting into~\cref{bifeq}, we obtain the corresponding value of $\Delta\delta_\ast$, which implies $\lambda_\ast=\frac{\eps}{\delta_\ast^2}=\eps\big(\frac{2}{\sqrt{3}}+\Delta\delta_\ast\big)^{-2}$ by \Cref{w0ll}. Hence, we find the desired asymptotic expansion for $\lambda_\ast$, viz. \begin{align} \label{snexp} \lambda_{\ast}(\eps)=\frac34\eps-\bigg(\sqrt{\frac32}+\frac98\bigg)\eps^2\ln\eps+\OO(\eps^2), \end{align} as claimed. Finally, since \begin{align*} \frac{\mathrm{d}^2\Delta\delta}{\mathrm{d}(\Delta w)^2}\bigg|_{\Delta w=\Delta w_\ast}= -\frac{2 \sqrt{2}}{3} \frac{\eps}{(\Delta w_\ast)^2} \end{align*} is negative, the function $\Delta\delta(\Delta w)$ is locally concave, which implies that the unfolding of solutions to \Cref{eq-7} for $|\lambda-\lambda_\ast|$ small is as given in the statement of the proposition; see~\cref{fig:bdsna}. In particular, the branch of solutions which limits on solutions of type I overlaps with the one contained in region $\mathcal{R}_1$, as $\delta_1$ can be chosen arbitrarily close to $\frac{2}{\sqrt{3}}$ in the statement of \cref{prop-1}. The last part of the proof concerns the existence of solutions to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} which limit on type II-solutions as $\eps\to 0$ for the remaining values of $\lambda$ in $\mathcal{R}_2$, {\it i.e.}, for $\delta\in\big(\frac1{\sqrt{\lambda_2}},\delta_1\big)$. The existence of singular solutions of type II in that range is ensured by \cref{lem-II}. In the singular limit, {\it i.e.}, for $\eps=0$, we have transversality at $\xi_1=0$ with respect to variation of $w_1$ at $\xi_1=\mp 1$ around $\mp\delta$. Hence, the corresponding singular solutions perturb to solutions of the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} for $0<\eps \ll 1$, which completes the proof. \end{proof} \begin{remark} The branch of solutions derived in the last part of the proof is still described by the bifurcation equation in \cref{bifeq}, the difference being that the $\eps \ln\Delta w$-term is now regular, \emph{i.e.}, $\OO(\eps)$, due to $\Delta w=\OO(1)$. The above proof also implies that $\Delta\delta$ must be larger than $\OO(\eps)$; in fact, Lindsay's work \cite{Li14} shows that $\Delta\delta=\OO(\eps\ln\eps)$. \end{remark} \begin{remark} The presence of an $\eps \ln \eps$-term in the bifurcation equation \cref{bifeq} implies that the convergence to the singular limit of $\eps=0$ cannot be smooth in $\eps$; rather, it will be regular in $(\eps, \ln \eps)$. A similar situation was encountered in \cref{prop-1} above, where the presence of logarithmic switchback terms in $\eps$ was observed; recall \cref{sec:logsw}. Here, we emphasize that the source of these terms in \cref{bifeq} is two-fold: in addition to switchback due to a resonance in chart $K_1$, logarithmic terms are also introduced through the passage of the flow past the saddle point at $(1,0)$ in $K_2$, as is evident from the $\eps\ln\Delta w$-term in \Cref{expint}. In particular, both contributions manifest in the expansion for $\lambda_\ast(\eps)$ in \Cref{snexp}. \end{remark} \begin{figure} \caption{Illustration of the saddle-node bifurcation at $\lambda_\ast$ in \Cref{eq-7} \label{fig:bdsna} \label{fig:bdsnb} \label{fig:bdsn} \end{figure} The asymptotic expansion for $\lambda_\ast$ in~\cref{eq-42} shows excellent agreement with numerical values that were obtained using the continuation software package~\texttt{AUTO}~\cite{AU}; see \cref{fig:errcom}. In particular, the distance between the two curves is $\OO(\eps^2)$, {\it i.e.}, of higher order in $\eps$, as postulated. \begin{figure} \caption{Comparison between the asymptotic expansion for $\lambda_\ast(\eps)$ in~\cref{eq-42} \label{fig:errcom} \end{figure} \subsection{Region $\mathcal{R}_3$} \label[subsection]{ssec:reg3} It remains to analyse region $\mathcal{R}_3$, which contains the branch of solutions in the bifurcation diagram that perturb from type III-solutions, corresponding to the non-regularized problem \begin{align}\label{eq:nrm} u''=\frac{\lambda}{u^2},\qquad\text{ for }x\in[-1,1],\text{ with }u=1\text{ when }x=\mp1. \end{align} By \cref{def:soltyp}, solutions of type III differ from those of types I and II, in that they do not exhibit touchdown phenomena. Regularization affects them only weakly, {\it i.e.}, in a regular fashion, with the effect becoming slightly more pronounced as $\lambda \to 0$; cf.~\cref{fig:segm3}. Thus, most of the solutions contained in region $\mathcal{R}_3$ perturb from $\mathcal{B}_3$ in a regular way, and are hence easy to obtain. The limit of $\lambda \to 0$, {\it i.e.}, the transition from $\mathcal{R}_3$ to $\mathcal{R}_2$, needs to be treated more carefully. \begin{remark} It is easy to see that \Cref{eq:nrm} -- or, rather, the corresponding first-order system -- is Hamiltonian; the level curves of the associated Hamiltonian are given precisely by the singular solutions in panel (b) of \cref{fig:segm3}. \end{remark} \begin{figure} \caption{(a) Covering of the curve $\mathcal{B} \label{fig:segm3} \end{figure} Type III-solutions are contained in the curve $\mathcal{B}_3$ in the limit of $\delta=0$; see \cref{fig:deleps}. That limit was not covered in region $\mathcal{R}_2$, as the approach used there required the assumption that $\delta\geq \frac1{\sqrt{\lambda_2}}$. The limit as $\delta\to 0$, however, results in singular dynamics in chart $K_1$, as the type II-solution (green) -- corresponding to $w_1=\mp \delta$ at $\xi_1=\mp1$ -- collapses onto the line \begin{align}\label{eq:M10} \MM_1^0:=\set{(r_1,0,\xi_1,0)}{r_1\in\mathbb{R}^+,\ \xi_1\in\mathbb{R}}; \end{align} see \cref{fig:SysNTT:b} and the upper panel of \cref{fig:3lam}. Clearly, $\MM_1^0$ constitutes a line of non-hyperbolic equilibria for \Cref{eq-11} which corresponds to the manifold $\MM^0$ in~\cref{M0}, after blow-down. The singular nature of $\MM_1^0$ is related to the rescaling of $w$ introduced in~\cref{eq-6}. That rescaling, which corresponded to a ``zooming out'', turned out to be particularly useful for our analysis in regions $\mathcal{R}_1$ and $\mathcal{R}_2$. However, it cannot provide a good description of region $\mathcal{R}_3$. To study the dynamics in $\mathcal{R}_3$, we would have to perform another blow-up involving $\delta$, $w_1$, and $\eps_1$ in chart $K_1$ in order to basically undo the $w$-rescaling in~\cref{eq-6}. It is much simpler to consider the $\delta$-range covered by $\mathcal{R}_3$ by returning to the original system without any rescaling of $w$; cf.~\Cref{eq-4}. The main result of this section is the following \begin{proposition}\label{prop-3} There exists $\eps_0>0$ sufficiently small such that in region $\mathcal{R}_3$, the boundary value problem \{\cref{eq-4},\cref{eq-5}\} admits a unique branch of solutions for $\eps\in(0,\eps_0)$. Outside of a fixed neighborhood of the point $B$, that branch converges smoothly as $\eps\to 0$ to the curve $\mathcal{B}_3$ along which solutions of the non-regularized boundary value problem, \Cref{eq:nrm}, exist. In the $\eps$-dependent region overlapping with $\mathcal{R}_2$, the branch of solutions limiting on solutions of type II described in \cref{prop-2} is recovered. There, the transition from solutions that limit on type-III solutions to those limiting on singular solutions of type II occurs. \end{proposition} \begin{proof} We recall the original first-order system, \Cref{eq-4}: \begin{align*} u' &=u^4w, \\ w' &=\lambda(u^2-\eps^2), \\ \xi' &=u^4, \\ \eps' &=0; \end{align*} given \Cref{delta}, we write $\eps=\delta^2 \lambda$ and obtain the equivalent system \begin{subequations}\label{sysdel} \begin{align} u' &=u^4w, \\ w' &=\lambda(u^2-\delta^4 \lambda^2), \\ \xi' &=u^4, \\ \delta' &=0. \end{align} \end{subequations} Here, the parameter $\delta$ plays the role of the small perturbation parameter, with the $\delta$-range corresponding to region $\mathcal{R}_3$ given by \begin{align*} \delta\in\Big[0,\frac1{\sqrt{\lambda_3}}\Big]; \end{align*} cf.~\cref{eq:R3}. In summary, it is hence more convenient to consider $\lambda$ and $\delta$, rather than $\lambda$ and $\eps$, as the relevant parameters in this regime. For $\delta=0$ and $\lambda>0$, the projection of the flow of \Cref{sysdel} is as illustrated in \cref{fig:SysNTT:a}. In region $\mathcal{R}_3$, however, we are also interested in covering a small neighborhood of $\lambda=0$, which again gives the singular dynamics shown in \cref{fig:SysNTT:b}. In $(u,w)$-space, the singular solution found for $\lambda=0$ consists of $[0,1]\times\{-1\}$ and $[0,1]\times\{1\}$, {\it i.e.}, it approaches the degenerate line of equilibria for \cref{sysdel} at $\{(0,w)\}$ under the forward and backward flow in $x$, respectively; see \cref{fig:flg}. \begin{figure} \caption{Singular solution of \Cref{sysdel} \label{fig:flg} \end{figure} To analyze the dynamics close to that line, we have to introduce a blow-up of $(u,\lambda)=(0,0)$. As the blow-up involves $\lambda$, we append the trivial equation $\lambda'=0$ to \cref{sysdel}: \begin{subequations} \label{sysdele} \begin{align} u' &=u^4w, \\ w' &=\lambda(u^2-\delta^4 \lambda^2), \\ \xi' &=u^4, \\ \lambda' &=0, \\ \delta'&=0. \end{align} \end{subequations} The requisite blow-up transformation is then given by \begin{align}\label{buR3} u=\bar{r}\bar{u}\qquad\text{and}\qquad\lambda=\bar{r} \bar{\lambda}, \end{align} where $(\bar{u},\bar{\lambda}) \in S^1$, {\it i.e.}, $\bar{u}^2+\bar{\lambda}^2=1$, and $\bar{r} \in [0, r_0)$, with $r_0>0$. We denote the chart corresponding to $\bar{u}=1$ by $\kappa_1$. The analysis in that chart turns out to be sufficient for proving \cref{prop-3}. In chart $\kappa_1$, the blow-up transformation in~\cref{buR3} reads \begin{align}\label{kappa1c} u=r_1\qquad\text{and}\qquad\lambda=r_1 \lambda_1. \end{align} which gives \begin{subequations} \label{sysdel1} \begin{align} r_1' &=r_1 w, \\ w' &=\lambda_1(1-\delta^4 \lambda_1^2),\\ \xi' &=r_1, \\ \lambda_1' &= -\lambda_1 w, \\ \delta' &=0 \end{align} \end{subequations} for \Cref{sysdele}; here, $\delta$ is the small (regular) perturbation parameter. For any $\lambda \in [0,1]$, the existence of solutions to \cref{sysdel1} can be studied via the symmetric shooting argument outlined in \Cref{sec:dynfor}. To that end, we define a set of initial conditions at $(r_1,\xi)=(1,-1)$, as follows: \begin{align}\label{Vl} \mathcal{V}_\lambda=\set{(1,w_0,-1,\lambda,\delta)}{w_0 \in I}, \end{align} where $I$ is a neighborhood of $w=-1$. We remark that the initial value $\lambda$ for $\lambda_1$ follows from $\lambda=r_1 \lambda_1$, cf.~\cref{kappa1c}, as $r_1=1$ initially. Next, we introduce $w$ as the independent variable in \cref{sysdel1}, whence \begin{subequations}\label{sysdel1w} \begin{align} \frac{{\rm d} r_1}{{\rm d} w} &=\frac{r_1 w}{\lambda_1(1-\delta^4 \lambda_1^2)}, \\ \frac{{\rm d} \xi}{{\rm d} w} &=\frac{r_1}{\lambda_1(1-\delta^4 \lambda_1^2)}, \\ \frac{{\rm d} \lambda_1}{{\rm d} w} &= -\frac{w}{1-\delta^4 \lambda_1^2}, \\ \frac{{\rm d} \delta}{{\rm d} w} &=0, \end{align} \end{subequations} with initial conditions \begin{align}\label{sysdelbc} r_1(w_0)=1,\quad\xi(w_0)=-1,\quad\lambda_1(w_0)=\lambda,\quad\text{and}\quad\delta(w_0)=0. \end{align} We track $\mathcal{V}_\lambda$ under the flow of \cref{sysdel1w} up to the hyperplane $\{w=0\}$; see \cref{fig:Vl}. There, we obtain a point $(r_1^{\rm out},0,\xi^{\rm out},\lambda_1^{\rm out},\delta)$ in $(r_1,w,\xi,\lambda_1,\delta)$-space. Our shooting argument implies that we have to solve the equation \begin{align}\label{xiout11} \xi^{\rm out}(w_0,\lambda,\delta)=0. \end{align} \begin{figure} \caption{Dynamics of \Cref{sysdel1} \label{fig:Vl} \end{figure} At this point, we split $\mathcal{R}_3$ into two subregions in which we apply separate arguments to prove the existence of a unique branch of solutions, as claimed in the statement of the proposition. For $\lambda\geq\tilde{\lambda}$, with $\tilde{\lambda}$ fixed and positive, and $\delta=0$, \Cref{sysdel1w} can be solved explicitly subject to \cref{sysdelbc}; moreover, a solution $w_0=w_0(\lambda)$ of \Cref{xiout11} can be proven to exist for $\lambda\leq\lambda^\ast$. At $\lambda=\lambda^\ast$, transversality breaks down, as \Cref{xiout11} does not admit a solution for $\lambda>\lambda^\ast$. The corresponding singular solutions are of type III; cf.~\cref{def:soltyp}. Due to the regularity of \cref{xiout11} with respect to $\delta$, these solutions perturb in a regular fashion to solutions of \{\cref{sysdel1w},\cref{sysdelbc}\} for $\delta$ positive and small; in particular, we consider $\delta\leq\frac1{\sqrt{\lambda_3}}$ with $\lambda_3$ large, in accordance with \cref{eq:R3}. For $\lambda$ close to $\lambda^\ast$, individual solutions do not perturb regularly; however, the structurally stable saddle-node bifurcation at $\lambda^\ast$ as a whole will persist as a regular perturbation, giving rise to a slightly perturbed value $\lambda^\ast(\delta)$ for the perturbed saddle-node point. Since the resulting asymptotics of $\lambda^\ast(\delta)$ is not our main concern, we do not consider it further here. The second subregion of $\mathcal{R}_3$, which includes the overlap with region $\mathcal{R}_2$, corresponds to a small neighborhood of $(\lambda,\delta)=(0,0)$ that is given by \begin{align} \label{p3p2} (\lambda,\delta)\in\big[0,\tilde{\lambda}\big]\times\Big[0,\frac1{\sqrt{\lambda_3}}\Big]. \end{align} To study the branch of solutions in this subregion, we solve \Cref{sysdel1w} with initial conditions as in~\cref{sysdelbc} by expanding around $(w_0,\lambda,\delta)=(-1,0,0)$, and by making use of the fact that the equations can be solved explicitly for $\delta=0$. Linearizing \Cref{xiout11} around $\delta=0$, we obtain a regular perturbation problem in $\delta$ for $\xi^{\rm out}$, which gives the following expanded form of \Cref{xiout11}, up to higher-order terms in $(w_0,\lambda,\delta)$: \begin{align}\label{w0A} w_0+1-(4+3 w_0)\lambda\ln\lambda+\frac1{288}(1+w_0)\delta^8\ln\lambda=0. \end{align} \Cref{w0A} again contains logarithmic terms due to resonance between the eigenvalues $-1$, $0$ (double), and $1$ of the linearization of \Cref{sysdel1} about the steady state at $(0,-1,-1,0)$ in chart $\kappa_1$. These terms arise in the passage of orbits through a neighborhood of $\{r_1=0\}$, as was observed in chart $K_1$; see \cref{sec:logsw}. Solving \Cref{w0A} for $w_0$ gives \begin{align}\label{w0A2} w_0=-1+\lambda\ln\lambda+C(\delta)\lambda+\OO\big[\lambda^2(\ln\lambda)^2\big] \end{align} with $C(\delta)=\OO(\delta^8)$, which is regular in $\delta$, as expected. We note that, for $\lambda=0$, \cref{w0A} reduces to the trivial equation $w_0+1=0$, which is solved by $w_0=-1$, irrespective of $\delta$. The resulting singular solutions are type II-solutions, which are shown as the part of the green curve in the blown-up bifurcation diagram in \cref{fig:bdblup} that corresponds to $\bar{\eps}$ small. In line with these observations, \Cref{w0A} is identical to \Cref{csi2} up to terms of order $\OO(\delta^2\lambda)$ after the rescaling of $w$ in~\cref{eq-6}. For $\delta=0$ and $\lambda>0$, on the other hand, we match with the branch obtained in the part of region $\mathcal{R}_3$ that corresponds to $\lambda\geq\tilde{\lambda}$. The results obtained in the above two subregions prove the existence and uniqueness of a curve of solutions to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} in $\mathcal{R}_3$, as stated in \cref{prop-3}. It remains to consider the overlap between regions $\mathcal{R}_3$ and $\mathcal{R}_2$: in $(\lambda,\delta)$-space, $\mathcal{R}_3$ corresponds to \begin{align}\label{R3ps} [0,1]\times\bigg[0,\frac1{\sqrt{\lambda_3}}\bigg]\setminus [0,\eps\lambda_3]\times \bigg[\frac1{\sqrt{\lambda_2}},\frac1{\sqrt{\lambda_3}}\bigg], \end{align} while $\mathcal{R}_2$ covers the area \begin{align}\label{R2ps} [0,\eps\lambda_2]\times\bigg[\frac1{\sqrt{\lambda_2}},\delta_1\bigg], \end{align} where $\delta_1<\frac2{\sqrt3}$ is defined as in \cref{prop-1}. Hence, in $(\lambda,\delta)$-space, regions $\mathcal{R}_3$ and $\mathcal{R}_2$ overlap in the rectangle \begin{align}\label{R2R3} [\eps\lambda_3,\eps\lambda_2]\times\bigg[\frac1{\sqrt{\lambda_2}},\frac1{\sqrt{\lambda_3}}\bigg], \end{align} see \cref{fig:ld}, which is the area where the transition between the two regions occurs. This concludes the proof of \cref{prop-3}. \end{proof} \begin{figure} \caption{Regions $\mathcal{R} \label{fig:ld} \end{figure} The last step in the proof of \cref{thm-1} consists in proving \Cref{eq-normu}. \begin{proposition} For $\varepsilon\in(0,\varepsilon_0)$, with $\varepsilon_0>0$ sufficiently small, the upper branch of solutions in \cref{fig:Lin:a} has the expansion stated in \Cref{eq-normu}. \end{proposition} \begin{proof} We first express $\Vert u\Vert_2^2$, with $u$ being the original variable considered in \Cref{eq-2}, in terms of our shifted variable $\tilde u$, as defined in \Cref{eq-shiftu}: \begin{equation}\label{eq-normutil} \Vert u \Vert_2^2 = 2-2 \Vert \tilde u \Vert_1 + \Vert \tilde u \Vert_2^2. \end{equation} (While we had omitted the tilde in our notation following \cref{eq-shiftu}, we now include it again for the sake of clarity.) Due to the symmetry of the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}, we can focus our attention on the interval $[-1,0]$; correspondingly, we split the integrals occurring in \Cref{eq-normutil} into two parts, which are divided by the section $\Sigma_1^{\rm out}$ defined in \cref{eq-20b}. Since $\xi_1^{\rm out}=\xi_2^{\rm in}$, that split implies $[-1,0]=[-1,\xi_1^{\rm out}]\cup[\xi_2^{\rm in},0]$ and, hence, that these integrals can be investigated separately in charts $K_1$ and $K_2$: \begin{subequations}\label{eq-norm} \begin{align} \begin{split} \Vert \tilde u \Vert_1 &= \int_{-1}^1 \tilde u(\xi)\,\mathrm{d}\xi = 2 \int_{-1}^0 \tilde u(\xi)\,\mathrm{d}\xi \\ &= 2 \Bigg( \int_{-1}^{\xi_1^{\mathrm{out}}} r_1(\xi_1)\,\mathrm{d}\xi_1+\int_{\xi_2^{\mathrm{in}}}^0 r_2(\xi_2) u_2(\xi_2)\,\mathrm{d}\xi_2 \Bigg) \quad\text{and} \end{split} \label{eq-norm1} \\ \begin{split} \Vert \tilde u \Vert_2^2 &= \int_{-1}^1 \tilde u^2(\xi)\,\mathrm{d}\xi = 2 \int_{-1}^0 \tilde u^2(\xi)\,\mathrm{d}\xi \\ &= 2 \Bigg( \int_{-1}^{\xi_1^{\mathrm{out}}} r_1^2(\xi_1)\,\mathrm{d}\xi_1+\int_{\xi_2^{\mathrm{in}}}^0 (r_2(\xi_2) u_2(\xi_2))^2\,\mathrm{d}\xi_2 \Bigg), \end{split} \label{eq-norm2} \end{align} \end{subequations} where $\xi_1^{\mathrm{out}}(=\xi_1^{{\rm out}-})$ is approximated as in \Cref{eq-112}. Dividing \Cref{eq-11c} by \Cref{eq-11a} and using \cref{eq-26}, where we recall that $\eps_1=\frac{\eps}{r_1}$, we can rewrite the $\xi_1$-integrals in \Cref{eq-norm} as integrals in $r_1$, with $r_1\in[1,\frac\eps\sigma]$. Expanding the resulting integrands for small $\eps$ and evaluating the integrals to the corresponding order, we obtain \begin{align*} \int_{-1}^{\xi_1^{\mathrm{out}}} r_1(\xi_1)\,\mathrm{d}\xi_1=\frac{\sqrt3}2 \sqrt{\frac{\eps}{\lambda}} + \OO(\eps^{\frac32})\quad\text{and}\quad\int_{-1}^{\xi_1^{\mathrm{out}}} r_1^2(\xi_1)\,\mathrm{d}\xi_1=\frac{\sqrt3}3 \sqrt{\frac{\eps}{\lambda}} + \OO(\eps^2). \end{align*} As for the integrals in $\xi_2$, we recall from \cref{eq-9b} that $r_2=\eps$ in chart $K_2$. Moreover, given the fast-slow structure of \Cref{eq-13}, $u_2$ can be expressed as the sum of a slow and a fast component, \begin{align*} u_2(\xi_2)=1+\check u_2(\tfrac{\xi_2}\eps); \end{align*} by the definition of the slow manifold $\SS_2^\eps$ in \cref{lem-1}, the slow contribution is given by $u_2(\xi_2)\sim1$, while the fast contribution $\check u_2$ is obtained from the corresponding stable foliation $\FF_2^{\rm s}(\SS_2^\eps)$. In particular, the latter yields higher-order terms in the $\xi_2$-integrals in \cref{eq-norm}, which implies \begin{align*} \int_{\xi_2^{\mathrm{in}}}^0 r_2(\xi_2) u_2(\xi_2)\,\mathrm{d}\xi_2=2 \eps + \OO(\eps^{\frac32}\ln \eps)\quad\text{and}\quad\int_{\xi_2^{\mathrm{in}}}^0 \big(r_2(\xi_2) u_2(\xi_2)\big)^2\,\mathrm{d}\xi_2=\OO(\eps^2). \end{align*} Combining these estimates into \Cref{eq-normutil}, we obtain \begin{align*} \Vert u\Vert_2^2 &=2-2 \bigg(\frac{\sqrt3}2 \sqrt{\frac{\eps}{\lambda}}+2 \eps+\OO(\eps^{\frac32} \ln\eps)\bigg)+\frac{\sqrt3}3 \sqrt{\frac{\eps}{\lambda}}+\OO(\eps^2) \\ &= 2\bigg(1-\frac{\sqrt3}{3}\sqrt{\frac{\eps}{\lambda}}-2\eps+\OO(\eps^{\frac32}\ln\eps)\bigg), \end{align*} which is precisely \Cref{eq-normu}. \end{proof} \cref{thm-1} is hence proven. \begin{remark}\label{rem-slope} Our analysis suggests that the expansion for the upper solution branch in \Cref{eq-normu} is still valid up to an $\OO(\eps)$-neighborhood of the fold point at $\lambda_\ast(\eps)$; that expansion hence provides a good approximation close to the point where the middle and upper branches in \cref{fig:Lin:a} meet. Differentiating \Cref{eq-normu} with respect to $\lambda$, evaluating the derivative at \mbox{$\lambda=\lambda_\ast(\eps)$}, as given in \Cref{eq-42}, and expanding for $\eps$ small, we obtain \begin{equation} \frac{\mathrm{d}\Vert u \Vert_2^2}{\rm{d}\lambda}\bigg|_{\lambda=\lambda_\ast(\eps)} = \frac{8}{9 \eps}+ \frac29 \big(9+4 \sqrt6\big)\ln\eps + \frac{5}{36}\big(59+24 \sqrt6\big)\eps(\ln \eps)^2 + \OO(\eps^2), \end{equation} which tends to infinity for $\eps\to 0^+$. \end{remark} \section{Discussion and Outlook}\label{sec:diou} In this article, we have investigated stationary solutions of a regularized model for Micro-Electro Mechanical Systems (MEMS). In particular, we have unveiled the asymptotics of the bifurcation diagram for solutions of the boundary value problem \{\cref{eq-7},\cref{eq-5b}\}, as the regularization parameter $\eps$ tends to zero. In the process, we have proven that the new branch of solutions which emerges in the bifurcation diagram of the regularized model derives from an underlying, very degenerate singular structure. Applying tools from dynamical systems theory and, specifically, geometric singular perturbation theory and the blow-up method, we have considered separately three principal regions in the bifurcation diagram; cf.~\cref{fig:bdsegm}. We emphasize that our findings are consistent with formal asymptotics and numerical simulations of Lindsay {\it et al.}; see, in particular, Section~3 of \cite{Li14} and Section~4 of \cite{Li16}. One of the most interesting features of the regularized model considered here is the presence of a highly singular saddle-node bifurcation point. While Lindsay {\it et al.}~\cite{Li14} were able to derive a formal leading-order asymptotic expansion in the regularization parameter at that point, the coefficients therein had remained undetermined thus far. Our approach, on the other hand, allows us to obtain the fold point as the minimum of an appropriately defined bifurcation equation and, hence, to calculate explicitly the coefficients in that expansion. (For completeness, we remark that the coefficient of the leading-order term therein appeared in \cite[Section~3]{Li14} in a different context: $\lambda_{0c}=\frac{m-1}{2(m-2)}$, which evaluates to $\frac34$ for $m=4$; see also \Cref{rem:xc}. However, that correspondence does not seem to have been noted there.) For verification, a comparison with numerical data obtained with the continuation package~\texttt{AUTO} has been performed, showing very good agreement with our asymptotic expansion. Finally, we have shown that the somewhat unexpected asymptotics of solutions to \Cref{eq-2}, as derived in~\cite{Li14}, arises naturally due to a resonance phenomenon in the blown-up vector field. In particular, we have justified the occurrence of logarithmic ``switchback" in that asymptotics via a careful description of the flow through one of the coordinate charts, viz. $K_1$, after blow-up; see also \cite{Po05}. Our analysis hence establishes a further connection between the geometric approach proposed here and the method of matched asymptotic expansions. Our geometric approach to the boundary value problem \{\cref{eq-7},\cref{eq-5b}\} can be extended to the analysis of steady states of the corresponding regularized fourth-order model, which has been studied in~\cite{Li14,Li16,Li15} both asymptotically and numerically. A future aim is to establish analogous results for that case. Another possible topic for future research is the geometric analysis of \Cref{eq-2} in higher dimensions, possibly under the simplifying assumption of radial symmetry. \section*{Acknowledgments} AI and PS would like to thank Alan Lindsay for helpful discussions. They would also like to acknowledge the Fonds zur F\"orderung der wissenschaftlichen Forschung (FWF) for support via the doctoral school ``Dissipation and Dispersion in Nonlinear PDEs'' (project number W1245). Moreover, AI is grateful to the School of Mathematics at the University of Edinburgh for its hospitality during an extensive research visit. Finally, the authors thank two anonymous referees for insightful comments that greatly improved the original manuscript. \end{document}
\begin{document} \title{Heegaard surfaces and measured laminations, I: the Waldhausen conjecture} \author{Tao Li} \thanks{Partially supported by NSF grants DMS-0102316 and DMS-0406038} \address{Department of Mathematics \\ Oklahoma State University \\ Stillwater, OK 74078\\ USA} \curraddr{Department of Mathematics \\ Boston College \\ Chestnut Hill, MA 02467\\ USA} \email{[email protected]} \begin{abstract} We give a proof of the so-called generalized Waldhausen conjecture, which says that an orientable irreducible atoroidal 3--manifold has only finitely many Heegaard splittings in each genus, up to isotopy. Jaco and Rubinstein have announced a proof of this conjecture using different methods. \end{abstract} \maketitle \begin{psfrags} \setcounter{tocdepth}{1} \tableofcontents \section{Introduction} A Heegaard splitting of a closed orientable 3--manifold $M$ is a decomposition of $M$ into two handlebodies along an embedded surface called a Heegaard surface. Heegaard splittings were introduced to construct and classify 3--manifolds. Every 3--manifold has a Heegaard splitting, and one can construct a Heegaard splitting of arbitrarily large genus for any 3--manifold by adding trivial handles to a Heegaard splitting. An important problem in 3--manifold topology is the classification of Heegaard splittings of a 3--manifold. The main questions are whether there are different Heegaard splittings in a 3--manifold and how the different Heegaard splittings are related. A conjecture of Waldhausen asserts that a closed orientable 3--manifold has only a finite number of Heegaard splittings of any given genus, up to homeomorphism. Johannson \cite{Jo1,Jo2} proved this conjecture for Haken manifolds. If $M$ contains an incompressible torus, one may construct an infinite family of homeomorphic but non-isotopic Heegaard splittings using Dehn twists along the torus. The so-called generalized Waldhausen conjecture says that a closed, orientable and atoroidal 3--manifold has only finitely many Heegaard splittings of any fixed genus, up to isotopy. This is also proved to be true for Haken manifolds by Johannson \cite{Jo1,Jo2}. The main purpose of this paper is to prove the generalized Waldhausen conjecture. \begin{theorem}\label{main} A closed, orientable, irreducible and atoroidal 3--manifold has only finitely many Heegaard splittings in each genus, up to isotopy. \end{theorem} Jaco and Rubinstein have announced a proof using normal surface theory and 1--efficient triangulations. The main tools used in this paper are measured laminations and branched surfaces. In a sequel to this paper \cite{L4}, we use measured laminations and Theorem~\ref{T2} of this paper to prove a much stronger result for non-Haken 3--manifolds, which says that, for non-Haken manifolds, adding trivial handles is virtually the only way of creating new Heegaard splittings. Methods of laminations and branched surfaces have been very useful in solving some seemingly unrelated problems, such as \cite{L5, L2}. This is the first time that they are used on Heegaard splittings. Both \cite{L4} and this paper use branched surfaces to analyze Heegaard surfaces. The main technical issues in this paper are on measured laminations, whereas the arguments in \cite{L4} rely more on the properties of strongly irreducible Heegaard splittings. A theorem of Schleimer \cite{Sch} says that every Heegaard splitting of sufficiently large genus has the disjoint curve property. So, an immediate corollary is that $M$ contains only finitely many full Heegaard splittings, see \cite{Sch}. \begin{corollary} In any closed orientable 3--manifold, there are only finitely many full Heegaard splittings, up to isotopy. \end{corollary} Theorem~\ref{main} provides a well-known approach to understand the structure of the mapping class group of 3--manifolds. Conjecturally, the mapping class group for such a 3--manifold is finite, but it is not clear how to obtain a geometric description of the mapping class group. For instance, there is no example of a non-trivial element in the mapping class group of such a 3--manifold that is invariant on a strongly irreducible Heegaard splitting. Very recently, Namazi \cite{Na} used the result of this paper and showed that if the distance of a Heegaard splitting is large then the mapping class group is finite. We give a very brief outline of the proof. By a theorem of Rubinstein and Stocking \cite{St}, every strongly irreducible Heegaard splitting is isotopic to an almost normal surface. So, similar to \cite{FO}, one can construct a finite collection of branched surfaces using normal disks and almost normal pieces of a triangulation, such that every almost normal strongly irreducible Heegaard surface is fully carried by a branched surface in this collection. If no branched surface in this collection carries any surface with non-negative Euler characteristics, then Theorem~\ref{main} follows immediately from a simple argument of Haken in normal surface theory. The key of the proof is to show that one can split a branched surface into a finite collection of branched surfaces so that no branched surface in this collection carries any normal torus and up to isotopy, each almost normal Heegaard surface is still carried by a branched surface in this collection, see sections \ref{Storus} and \ref{Slam}. Most of the paper are dedicated to proving Theorem~\ref{T2}, and Theorem~\ref{main} follows easily from this theorem, see section~\ref{Sproof}. \begin{theorem}\label{T2} Let $M$ be a closed orientable, irreducible and atoroidal 3--manifold, and suppose $M$ is not a small Seifert fiber space. Then, $M$ has a finite collection of branched surfaces, such that \begin{enumerate} \item each branched surface in this collection is obtained by gluing together normal disks and at most one almost normal piece, similar to \cite{FO}, \item up to isotopy, each strongly irreducible Heegaard surface is fully carried by a branched surface in this collection, \item no branched surface in this collection carries any normal 2--sphere or normal torus. \end{enumerate} \end{theorem} In the proof, we also use some properties of 0--efficient triangulations \cite{JR}. The use of 0--efficient triangulations does not seem to be absolutely necessary, but it makes many arguments much simpler. Jaco and Rubinstein also have a theory of 1--efficient triangulations, which can simplify our proof further, but due to the status of their paper, we decide not to use it. Some arguments in this paper are also similar in spirit to those in \cite{AL, L1}. One can also easily adapt the arguments in this paper into \cite{AL} so that the algorithm in \cite{AL} works without the use of 1--efficient triangulations. \begin{acknowledgments} I would like to thank Bus Jaco for many conversations and email communications on their theory of efficient triangulations. I also thank Saul Schleimer and Ian Agol for helpful conversations. I would also like to thank the referee for many corrections and suggestions. \end{acknowledgments} \section{Heegaard splittings, almost normal surfaces and branched surfaces}\label{Spre} \begin{notation} Throughout this paper, we will denote the interior of $X$ by $int(X)$, the closure (under path metric) of $X$ by $\overline{X}$, and the number of components of $X$ by $|X|$. We will use $\eta(X)$ to denote the closure of a regular neighborhood of $X$. We will use $M$ to denote a closed, orientable, irreducible and atoroidal 3--manifold, and we always assume $M$ is not a Seifert fiber space. \end{notation} In this section, we will explain some basic relations between Heegaard splittings, normal surface theory and branched surfaces. We will also explain some terminology and operations that are used throughout this paper. \subsection{Heegaard splittings} A \emph{handlebody} is a compact 3--manifold homeomorphic to a regular neighborhood of a connected graph embedded in $\mathbb{R}^3$. A \emph{Heegaard} splitting of a closed 3--manifold $M$ is a decomposition $M=H_1\cup_SH_2$, where $S=\partial H_1=\partial H_2=H_1\cap H_2$ is a closed embedded separating surface and each $H_i$ ($i=1,2$) is a handlebody. The surface $S$ is called a \emph{Heegaard surface}, and the genus of $S$ is the genus of this Heegaard splitting. The boundary of a regular neighborhood of the 1--skeleton of any triangulation of $M$ is a Heegaard surface. Hence, any closed orientable 3--manifold has a Heegaard splitting. The notion of Heegaard splitting can be generalized to manifolds with boundary, but in this paper, we only consider Heegaard splittings of closed 3--manifolds. Heegaard splitting became extremely useful when Casson and Gordon introduced strongly irreducible Heegaard splitting. \begin{definition} A \emph{compressing disk} of a handlebody $H$ is a properly embedded disk in $H$ with boundary an essential curve in $\partial H$. A Heegaard splitting is \emph{reducible} if there is an essential curve in the Heegaard surface that bounds compressing disks in both handlebodies. A Heegaard splitting $M=H_1\cup_SH_2$ is \emph{weakly reducible} \cite{CG} if there exist a pair of compressing disks $D_1\subset H_1$ and $D_2\subset H_2$ such that $\partial D_1\cap\partial D_2=\emptyset$. If a Heegaard splitting is not reducible (resp. weakly reducible), then it is \emph{irreducible} (resp. \emph{strongly irreducible}). \end{definition} A closed 3--manifold $M$ is \emph{reducible} if $M$ contains an embedded 2--sphere that does not bound a 3--ball. A lemma of Haken \cite{H} says that if $M$ is reducible, then every Heegaard splitting is reducible. Casson and Gordon \cite{CG} showed that if a Heegaard splitting of a non-Haken 3--manifold is irreducible, then it is strongly irreducible. The following theorem of Scharlemann \cite{S} is useful in proving Theorem~\ref{main}. \begin{theorem}[Theorem 3.3 of \cite{S}]\label{Tsch} Suppose $H_1\cup_S H_2$ is a strongly irreducible Heegaard splitting of a 3--manifold and $V\subset M$ is a solid torus such that $\partial V$ intersects $S$ in parallel essential non-meridian curves. Then $S$ intersects $V$ in a collection of $\partial$--parallel annuli and possibly one other component, obtained from one or two $\partial$-parallel annuli by attaching a tube along an arc parallel to a subarc of $\partial V$. \end{theorem} \subsection{Almost normal surfaces} A normal disk in a tetrahedron is either a triangle cutting off a vertex or a quadrilateral separating two opposite edges, see Figure 3 of \cite{JR} for a picture. An almost normal piece in a tetrahedron is either an octagon, or an annulus obtained by connecting two normal disks using an unknotted tube, see Figures 1 and 2 in \cite{St} for pictures. \begin{definition} Suppose a 3--manifold $M$ has a triangulation $\mathcal{T}$. We use $\mathcal{T}^{(i)}$ to denote the $i$--skeleton of $\mathcal{T}$. Let $S$ be a surface in $M$ that does not meet the 0--skeleton $\mathcal{T}^{(0)}$ and is transverse to $\mathcal{T}^{(1)}$ and $\mathcal{T}^{(2)}$. $S$ is called a \emph{normal surface} (or we say $S$ is normal) with respect to $\mathcal{T}$ if the 2--skeleton $\mathcal{T}^{(2)}$ cuts $S$ into a union of normal disks. $S$ is called an \emph{almost normal surface} if $S$ is normal except in one tetrahedron $T$, where $T\cap S$ consists of normal disks and at most one almost normal piece. \end{definition} Rubinstein and Stocking \cite{R,St} (see also \cite{K}) showed that any strongly irreducible Heegaard surface is isotopic to an almost normal surface with respect to any triangulation of the 3--manifold. Normal surfaces, introduced by Kneser \cite{Kn}, have been very useful in the study of incompressible surfaces. The results and techniques in normal surface theory are similarly applicable to almost normal surfaces. Let $S$ be a surface in $M$ transverse to the 1--skeleton of $\mathcal{T}$ and with $S\cap\mathcal{T}^{(0)}=\emptyset$. We define the \emph{weight} (or the combinatorial area) of $S$, denoted by $weight(S)$, to be $|S\cap\mathcal{T}^{(1)}|$. Let $\alpha$ be an arc such that $\alpha\cap\mathcal{T}^{(1)}=\emptyset$ and $\alpha$ is transverse to $\mathcal{T}^{(2)}$. We define the combinatorial length of $\alpha$, denoted by $length(\alpha)$, to be $|\alpha\cap\mathcal{T}^{(2)}|$. After a small perturbation, we may assume any arc to be disjoint from $\mathcal{T}^{(1)}$ and transverse to $\mathcal{T}^{(2)}$. In this paper, when we mention the length of an arc, we always use such combinatorial length. Let $S$ be a closed embedded normal surface in $M$. If we cut $M$ open along $S$, the manifold with boundary $\overline{M-S}$ has an induced cell decomposition. One can also naturally define normal disks and normal surfaces in $\overline{M-S}$ with respect to this cell decomposition. An embedded disk in a 3--cell is a \emph{normal disk} if its boundary curve does not meet the 0--cells, meets at least one edge, and meets no edge more than once. An isotopy of $M$ is called a \emph{normal isotopy} if it is invariant on the cells, faces, edges and vertices of the triangulation. In this paper, we will consider two normal surfaces (or laminations) the same if they are isotopic via a normal isotopy. Up to normal isotopy there are only finitely many equivalence classes of normal disks, and these are called \emph{normal disk types}. There are 7 types of normal disks in a tetrahedron. \subsection{Branched surfaces} A \emph{branched surface} in $M$ is a union of finitely many compact smooth surfaces glued together to form a compact subspace (of $M$) locally modeled on Figure~\ref{branch}(a). Given a branched surface $B$ embedded in a 3--manifold $M$, we denote by $N(B)$ a regular neighborhood of $B$, as shown in Figure~\ref{branch}(b). One can regard $N(B)$ as an $I$--bundle over $B$, where $I$ denotes the interval $[0,1]$. We denote by $\pi : N(B)\to B$ the projection that collapses every $I$--fiber to a point. The \emph{branch locus} of $B$ is $L=\{b\in B:$ $b$ does not have a neighborhood homeomorphic to $\mathbb{R}^2 \}$. So, $L$ can be considered as a union of smoothly immersed curves in $B$, and we call a point in $L$ a \emph{double point} of $L$ if any small neighborhood of this point is modeled on Figure~\ref{branch}(a). We call the closure (under the path metric) of each component of $B-L$ a \emph{branch sector} of $B$. We say that a surface (or lamination) $S$ is carried by a branched surface $B$ (or carried by $N(B)$) if $S$ lies in $N(B)$ and is transverse to the $I$--fibers of $N(B)$. We say $S$ is \emph{fully carried} by $B$, if $S\subset N(B)$ transversely intersects every $I$--fiber of $N(B)$. The boundary of $N(B)$ consists of two parts, the horizontal boundary, denoted by $\partial_hN(B)$, and the vertical boundary, denoted by $\partial_vN(B)$. The vertical boundary is a union of subarcs of $I$--fibers of $N(B)$ and the horizonal boundary is transverse to the $I$--fibers of $N(B)$, as shown in Figure~\ref{branch} (b). \begin{figure}\label{branch} \end{figure} Let $\mu\subset N(B)$ be a lamination carried by $N(B)$ (or $B$), and let $b$ be a branch sector of $B$. We say that $\mu$ \emph{passes through} the branch sector $b$ if $\mu\cap\pi^{-1}(int(b))\ne\emptyset$, where $\pi : N(B)\to B$ is the collapsing map. So, $\mu$ is fully carried by $B$ if and only if $\mu$ passes through every branch sector. Let $x\in int(b)$ be a point and $I_x=\pi^{-1}(x)$ the corresponding $I$--fiber. If $\mu$ is a closed surface, then $m=|I_x\cap\mu|$ is a non-negative integer and $m$ does not depend on the choice of $x\in int(b)$. We call $m$ the weight of $\mu$ at the branch sector $b$. \begin{definition} A \emph{disk of contact} is an embedded disk in $N(B)$ transverse to the $I$--fibers of $N(B)$ and with $\partial D\subset\partial_vN(B)$, see \cite{FO} for a picture. A \emph{monogon} is a disk $E$ properly embedded in $M-int(N(B))$ with $\partial E=\alpha\cup\beta$, where $\alpha\subset\partial_vN(B)$ is a subarc of an $I$--fiber of $N(B)$ and $\beta\subset\partial_hN(B)$. If a component of $M-int(N(B))$ is a 3--ball whose boundary consists of two disk components of $\partial_hN(B)$ and a component of $\partial_vN(B)$, then we call this 3--ball a $D^2\times I$ region. If a component of $M-int(N(B))$ is a solid torus, whose boundary consists of an annulus component of $\partial_hN(B)$ and a component of $\partial_vN(B)$, and a meridian disk of the solid torus is a monogon, then we call this solid torus a $monogon\times S^1$ region. Let $A$ be an annulus in $N(B)$. We call $A$ a \emph{vertical annulus} if $A$ is a union of subarcs of the $I$--fibers of $N(B)$. \end{definition} For any embedded (almost) normal surface $S$, by identifying all the normal disks of the same disk type as in \cite{FO}, we obtain a branched surface fully carrying $S$. Since $M$ is compact and there are only finitely many different types of normal disks, there are only finitely many such branched surfaces. This construction is first used by Floyd and Oertel \cite{FO} to study incompressible surfaces, then used in \cite{G8, L1, L2, L3} to study essential laminations and immersed surfaces. Since strongly irreducible Heegaard surfaces are isotopic to almost normal surfaces, by the argument above, we have the following. \begin{proposition}\label{Pfinite} There is a finite collection of branched surfaces in $M$ with the following properties. \begin{enumerate} \item each branched surface is obtained by gluing normal disks and at most one almost normal piece, similar to \cite{FO}, \item after isotopy, every strongly irreducible Heegaard surface is fully carried by a branched surface in this collection. \end{enumerate} \end{proposition}\qed Let $B$ be a branched surface, and $\mathcal{B}$ the set of branched sectors of $B$. If a subset of $\mathcal{B}$ also form a branched surface $B'$, then we call $B'$ a \emph{sub-branched surface} of $B$. If a lamination $\mu$ is carried but not fully carried by $B$, then the branch sectors that $\mu$ passes through form a sub-branched surface of $B$ that fully carries $\mu$. Let $B$ be a branched surface as in Proposition~\ref{Pfinite}. If $B$ does not contain any almost normal piece, then every surface carried by $B$ is a normal surface. Suppose $B$ contains an almost normal branched sector, which we denote by $b_A$. Let $S$ be an almost normal surface fully carried by $B$. By the definition of almost normal surface, the weight of $S$ at the branch sector $b_A$ is one. Therefore, it is easy to see that $B_N=B-int(b_A)$ is a sub-branched surface of $B$. We call $B_N$ the \emph{normal part} of $B$. Every surface carried by $B_N$ is a normal surface. In this paper, we assume all the 3--manifolds are orientable. So, if $S$ is a non-orientable surface carried by $N(B)$, then a small neighborhood of $S$ in $N(B)$ is a twisted $I$--bundle over $S$ and the boundary of this twisted $I$--bundle is an orientable surface carried by $B$. Thus, we have the following trivial proposition. \begin{proposition}\label{Pnonori} If a branched surface in an orientable 3--manifold does not carry any 2--sphere (resp. torus), then $B$ does not carry any projective plane (resp. Klein bottle). \end{proposition} \subsection{Splitting branched surfaces}\label{SSsplit} \begin{definition} An isotopy of $N(B)$ is called a $B$--\emph{isotopy} if it is invariant on each $I$--fiber of $N(B)$. We say two surfaces carried by $N(B)$ are $B$--isotopic if they are isotopic via a $B$--isotopy of $N(B)$. \end{definition} Let $S$ be a compact surface embedded in $N(B)$ transverse to the $I$--fibers of $N(B)$, and let $N_S$ be a closed neighborhood of $S$ in $N(B)$. We call $N_S$ a \emph{fibered neighborhood} of $S$ in $N(B)$ if $N_S$ is an $I$--bundle over $S$ with each $I$--fiber of $N_S$ a subarc of an $I$--fiber of $N(B)$. After some small perturbation, $N(B)-int(N_S)$ can be considered as a fibered neighborhood $N(B')$ of another branched surface $B'$. We say that $B'$ is obtained by \emph{splitting $B$ along $S$}. For most splittings considered in this paper, we have $\partial S\cap\partial_vN(B)\ne\emptyset$ and $S$ is orientable. If $\mu$ be a surface or lamination carried by $N(B)$ and $S\subset N(B)-\mu$ is $B$--isotopic to a sub-surface of (a leaf of) $\mu$, then we also say that $B'$ is obtained by splitting $B$ along $\mu$. The inverse operation of splitting is called \emph{pinching}, and we say that $B$ is obtained by pinching $B'$. If $B'$ is a branched surface obtained by splitting $B$, then we may naturally consider $N(B')$ as a subset of $N(B)$ with the induced fiber structure. For any lamination $\mu$ carried by $B$, we say that $\mu$ is also carried by $B'$ if after some $B$--isotopies, $\mu$ is carried by $N(B')$ with $\mu\subset N(B')\subset N(B)$. Suppose $B'$ is obtained by splitting $B$. Since we can regard $N(B')\subset N(B)$, we have the following obvious proposition. \begin{proposition}\label{Pob} Suppose $B'$ is obtained by splitting $B$. Then, any lamination carried by $B'$ is also carried by $B$. \end{proposition}\qed The converse of Proposition~\ref{Pob} is not true. It is possible that some lamination is carried by $B$ but not carried by $B'$. For example, in Figure~\ref{sp1}, the train track $\tau_2$ is obtained by splitting the train track $\tau$ on the top. However, any lamination fully carried by $\tau_1$ or $\tau_3$ is carried by $\tau$ but not carried by $\tau_2$. Nevertheless, every lamination carried by $\tau$ is carried by some $\tau_i$ ($i=1,2,3$). Moreover, $\tau_2$ is a sub-traintrack of each $\tau_i$. One can apply such different splittings (as in Figure~\ref{sp1}) to branched surfaces. The next proposition is also obvious, see section~\ref{Storus} for a more general discussion of such splittings. \begin{proposition}\label{Psplit} Let $B$ be a branched surface and $\{S_n\}$ a sequence of distinct closed surfaces fully carried by $B$. Suppose $B'$ is a branched surface obtained by splitting $B$ and $B'$ fully carries some $S_m$. Then, there is a finite collection of branched surfaces, such that \begin{enumerate} \item each branched surface in this collection is obtained by splitting $B$, and $B'$ is in this collection, \item each $S_n$ is fully carried by a branched surface in this collection, \item if another branched surface $B''$ in this collection carries $S_m$, then $B'$ is a sub-branched surface of $B''$. \end{enumerate} \end{proposition} \begin{proof} First note that, in the one-dimension lower example Figure~\ref{sp1}, if $B$ is $\tau$ and $B'$ is $\tau_i$ ($i=1,2,3$), then $\tau_1$, $\tau_2$ and $\tau_3$ form a collection of train tracks satisfying the 3 conditions of the proposition. The 2-dimensional case is similar. Any splitting can be viewed as a sequence of successive local splittings similar to Figure~\ref{sp1}. During each local splitting, one can enumerate all possible splittings as in Figure~\ref{sp1} and get a collection of branched surfaces satisfying the conditions of this proposition. \end{proof} \begin{remark} If $B$ is obtained by gluing normal disks and at most one almost normal piece as in Proposition~\ref{Pfinite}, then the branched surface after splitting is also obtained by gluing normal disks and almost normal pieces. Moreover, if $\{S_n\}$ in Proposition~\ref{Psplit} are almost normal surfaces, since each $S_n$ has at most one almost normal piece, we may assume that each branched surface in this collection has at most one branch sector containing an almost normal piece. \end{remark} \begin{figure}\label{sp1} \end{figure} \section{Measured laminations and projective lamination spaces}\label{SPL} Let $B$ be a branched surface in $M$, and $F\subset N(B)$ be a surface carried by $B$. Let $L$ be the branch locus of $B$, and suppose $b_1,\dots, b_N$ are the components of $B-L$. For each $b_i$, let $x_i=|F\cap\pi^{-1}(b_i)|$. One can describe $F$ using a non-negative integer point $(x_1,\dots,x_N)\in\mathbb{R}^N$, and $(x_1,\dots,x_N)$ is a solution to the system of branch equations of $B$, see \cite{FO,O} for more details. $F$ is fully carried by $B$ if and only if each $x_i$ is positive. Each branch equation is of the form $x_k=x_i+x_j$. We use $\mathcal{S}(B)$ to denote the set of non-negative solutions to the system of branch equations of $B$. This gives a one-to-one correspondence between closed surfaces carried by $B$ and integer points in $\mathcal{S}(B)$. Throughout this paper, we do not distinguish a surface carried by $B$ from its corresponding non-negative integer point $(x_1,\dots,x_N)\in\mathcal{S}(B)$. We will call $x_n$ the \emph{weight} (or the \emph{coordinate}) of the surface at the branch sector corresponding to $b_n$. Let $F_1$ and $F_2$ be embedded closed orientable surfaces carried by $N(B)$ and suppose $F_1\cap F_2\ne\emptyset$. In general, there are two directions to perform cutting and pasting along an intersection curve of $F_1\cap F_2$, but only one of them results in surfaces still transverse to the $I$--fibers of $N(B)$. We call such cutting and pasting the \emph{canonical} cutting and pasting. This is similar to the Haken sum in normal surface theory. We use $F_1+F_2$ to denote the surface after the canonical cutting and pasting. This is a very natural operation, because if $F_1=(x_1,\dots,x_N)$ and $F_2=(y_1,\dots,y_N)$ in $\mathcal{S}(B)$ then $F_1+F_2=(x_1+y_1,\dots,x_N+y_N)$. Moreover, this sum preserves the Euler characteristic, $\chi(F_1)+\chi(F_2)=\chi(F_1+F_2)$. A theorem of Haken \cite{Ha} says that there is a finite set of fundamental integer solutions $F_1,\dots,F_k$ in $\mathcal{S}(B)$, such that any integer solution in $\mathcal{S}(B)$ can be written as $\sum_{i=1}^kn_iF_i$, where each $n_i$ is a non-negative integer. In other words, every surface carried by $B$ can be obtained by the canonical cutting and pasting on multiple copies of $F_1,\dots,F_k$. So, if $B$ does not carry any 2--sphere or torus, by Proposition~\ref{Pnonori}, $B$ does not carry any surface with non-negative Euler characteristic and hence there are only finitely many surfaces (carried by $B$) with any given genus. The positive non-integer points of $\mathcal{S}(B)$ correspond to measured laminations fully carried by $B$. We refer to \cite{H, O, MS} for details. Roughly speaking, one can construct the measured lamination as follows, see \cite{O} and section 2 of \cite{H}. We can first pinch each component of $\partial_vN(B)$ to a circle and change $N(B)$ to $N_w(B)$, see Figure 1.2 of \cite{O} or Figure 2.2 of \cite{L1}. $N(B)$ is basically the same as $N_w(B)$ except the vertical boundary of $N(B)$ becomes the cusp of $N_w(B)$. For each branch sector of $B$, we can take an $I$--bundle over this sector with a standard horizontal foliation. For any positive point in $\mathcal{S}(B)$, when we glue the branch sectors together, we glue the foliations according to the weights at these sectors, see Figure 1.2 of \cite{O}. This produces a singular foliation of $N_w(B)$ where the cusps are the singularity. So, there are a finite number of singular leaves. Now, one can split $B$ along these singular leaves. This is usually an infinite process, and the inverse limit is a measured lamination fully carried by $B$. It is not hard to show that if the singular foliation does not contain any compact leaf, then the singular leaves are dense in the lamination (see \cite{H, MS}). Throughout this paper, we always assume our measured laminations are constructed in this fashion. So, we may assume that there is a one-to-one correspondence between a point in $\mathcal{S}(B)$ and a measured lamination carried by $B$. Measured laminations in 3--manifolds have many remarkable properties. We say a lamination is \emph{minimal} if it has no sub-lamination except itself and the empty set. It is very easy to see that a lamination is minimal if and only if every leaf is dense in the lamination. We say that a lamination $\mu$ is an exceptional minimal lamination, if $\mu$ is minimal and does not have interior in $M$. Thus, the intersection of a transversal with an exceptional minimal lamination is a Cantor set. The following theorem is one of the fundamental results on measured laminations/foliations, see \cite{CC1} for measured foliations. \begin{theorem}[Theorem 3.2 in Chapter I of \cite{MS}, pp 410]\label{TMS} Let $\mu$ be a co-dimension one measured lamination in a closed connected 3--manifold $M$, and suppose $\mu\ne M$. Then, $\mu$ is the disjoint union of a finite number of sub-laminations. Each of these sub-laminations is of one of the following types: \begin{enumerate} \item A family of parallel compact leaves, \item A twisted family of compact leaves, \item An exceptional minimal measured lamination. \end{enumerate} \end{theorem} One can also naturally define the Euler characteristic for measured laminations, see \cite{MS}. For example, if a measured lamination consists of a family of parallel compact leaves, then its Euler characteristic is equal to the product of the Euler characteristic of a leaf and the total weight. Using branched surfaces, Morgan and Shalen gave a combinatorial formula for Euler characteristic of measured laminations. Let $B$ be a branched surface fully carrying a measured lamination $\mu$. For each branch sector $b$ of the branched surface $B$, one can define a special Euler characteristic $\chi(b)=\chi_{top}(b)-o(b)/4$, where $\chi_{top}(b)$ is the usual Euler characteristic for surfaces and $o(b)$ is the number of corners of $b$, see Definition 3.1 in Chapter II of \cite{MS} pp 424. Let $w(b)$ be the coordinate (or weight) of $\mu$ at the branched sector $b$. Then, $\chi(\mu)=\sum w(b)\cdot\chi(b)$ (see Theorem 3.2 in Chapter II of \cite{MS}, pp 424). The following proposition is easy to prove. \begin{proposition}\label{PMS} Let $\mu\subset M$ be a measured lamination with $\chi(\mu)=0$, and let $B$ be a branched surface fully carrying $\mu$. Suppose $B$ does not carry any 2--sphere. Then, $B$ fully carries a collection of tori. \end{proposition} \begin{proof} First note that, by Proposition~\ref{Pnonori}, $B$ does not carry any projective plane. We can add the equation $\sum\chi(b)\cdot w(b)=0$ to the branch equations, and get a new system of linear equations. By the formula above, every solution to this linear system corresponds to a measured lamination with Euler characteristic 0. Since all the coefficients are rational numbers and this linear system has a positive solution $\mu$, this linear system must have a positive integer solution. Hence, $B$ fully carries a collection of closed surfaces with total Euler characteristic 0. Since $B$ does not carry any closed surface with positive Euler characteristic, each surface in this collection has Euler characteristic 0. For any Klein bottle $K$ carried by $B$, the boundary of a twisted $I$--bundle over $K$ is a torus carried by $B$. So, we can get a collection of tori fully carried by $B$. \end{proof} The following theorem of Morgan and Shalen is also useful. \begin{theorem}[Theorem II 5.1 of \cite{MS}, pp 427]\label{TMS2} Let $B$ be a branched surface that does not carry any surface of positive Euler characteristic. Let $\mu$ be a measured lamination fully carried by $B$, and suppose every leaf $l$ of $\mu$ has virtually abelian fundamental group. Then, any measured lamination $\mu'$ carried by $B$ has $\chi(\mu')=0$. \end{theorem} An immediate corollary of Theorem~\ref{TMS2} is the following. \begin{corollary}\label{CMS} Let $B\subset M$ be a branched surface that does not carry any 2--sphere. If $B$ fully carries a measured lamination with Euler characteristic 0, then every measured lamination fully carried by $B$ has Euler characteristic 0. \end{corollary} \begin{proof} If $B$ fully carries a measured lamination with Euler characteristic 0, by Proposition~\ref{PMS}, $B$ fully carries a measured lamination consisting of tori. Hence, Theorem~\ref{TMS2} implies any measured lamination $\mu$ carried by $B$ has $\chi(\mu)=0$. \end{proof} Instead of considering the solution space of the system of branch equations, it is more common to consider the projective space, which is usually called the \emph{projective lamination space} (sometimes we also call it the projective solution space). This is first used by Thurston to study curves and 1-dimensional measured laminations on a surface through the use of train tracks, and it can be trivially generalized to 2-dimensional measured laminations and branched surfaces. Throughout this paper, we identify the projective lamination space with the set of points $(x_1,\dots,x_N)\in\mathcal{S}(B)$ satisfying $\sum_{i=1}^Nx_i=1$. We denote the projective lamination space (for the branched surfaces $B$) by $\mathcal{PL}(B)$. Thus, each rational point of $\mathcal{PL}(B)$ corresponds to a compact surface carried by $B$, and each irrational point corresponds to a measured lamination that contains an exceptional minimal sub-lamination. By an irrational point, we mean a point in $\mathcal{PL}(B)$ with at least two coordinates are not rationally related. We may also consider the set of points in $\mathcal{PL}(B)$ corresponding to measured laminations with Euler characteristic 0. The following proposition is obvious after adding the combinatorial formula of Euler characteristic into the linear system of branch equations. \begin{proposition}\label{Pcompact} Let $\mathcal{T}(B)\subset\mathcal{PL}(B)$ be the subset of points corresponding to measured laminations with Euler characteristic 0. Then $\mathcal{T}(B)$ is a closed and hence compact subset of $\mathcal{PL}(B)$. \end{proposition}\qed \section{Measured laminations with Euler characteristic 0}\label{Smin} The goal of this section is to prove Lemma~\ref{Llocus} which is a certain characterization of measured laminations with Euler characteristic 0. Lemmas~\ref{Lnodoc} and \ref{Lvan} are also used in \cite{L4}. The proof involves some basic properties of foliations and laminations, such as the Reeb stability theorem and local stability theorem. We refer to \cite{CN,CC1,Ta} for more details, see also \cite{GO} for lamination versions of these results. The Reeb stability theorem basically says that the holonomy along a trivial curve in a leaf must be trivial. The simplest version of the local stability theorem (for our purpose) basically says that, for any disk $\Delta$ in a leaf, there is a 3--ball neighborhood of $\Delta$ in $M$ whose intersection with the lamination consists of disks parallel to $\Delta$. The proof of next lemma is similar to some arguments in section 2 of \cite{L1}. \begin{lemma}\label{Lnodoc} Let $B$ be a branched surface fully carrying a measured lamination $\mu$. Suppose $\partial_hN(B)$ has no disk component and $N(B)$ does not carry any disk of contact that is disjoint from $\mu$. Then, $N(B)$ does not carry any disk of contact. \end{lemma} \begin{proof} After some isotopy, we may assume $\partial_hN(B)\subset\mu$ (note that if $\mu$ is a compact surface, we may need to take multiple copies of $\mu$ to achieve this). For any component $E$ of $\partial_hN(B)$, let $l_E$ be the leaf of $\mu$ containing $E$. Suppose $l_E-int(E)$ has a disk component $D$. Note that $D$ is a disk of contact by definition. Since $\partial_hN(B)$ has no disk component, we may choose $E$ so that $D$ does not contain any component of $\partial_hN(B)$. Then, after a small isotopy, we can get a disk of contact parallel to $D$ and disjoint from $\mu$. So, by our hypotheses, $E$ must be an essential sub-surface of $l_E$ and $E$ is not a disk. After replacing non-orientable leaves by $I$--bundles over these leaves and then deleting the interior of these $I$--bundles (operations 2.1.1--2.1.3 in \cite{G1}), we may assume every leaf of $\mu$ is orientable. After applying these operations to each leaf, we may also assume $\mu$ is nowhere dense \cite{G1}. Suppose there is a disk of contact $D\subset N(B)$. We may assume $\partial D\subset int(\partial_vN(B))$, $D\cap\mu\subset int(D)$, and $D$ is transverse to each leaf of $\mu$. Since $\mu$ is a measured lamination, there is no holonomy and every component of $D\cap\mu$ is a circle. For any circle $\alpha\subset D\cap\mu$, we denote by $\Delta_\alpha$ the disk in $D$ bounded by $\alpha$ and denote the leaf of $\mu$ containing $\alpha$ by $l_\alpha$. The circle $\alpha$ has two annular collars $A_\alpha^+$ and $A_\alpha^-$ in $l_\alpha$ on the two sides of $\alpha$, where $A_\alpha^+\cap A_\alpha^-=\alpha$ and $A_\alpha^+\cup A_\alpha^-$ is a regular neighborhood of $\alpha$ in $l_\alpha$. We may assume $A_\alpha^+$, the plus side of $\alpha$, is the one with the property that (after smoothing out the corners) the surface $A_\alpha^+\cup (D-int(\Delta_\alpha))$ is transverse to the $I$--fibers of $N(B)$, hence (after smoothing out the corners) $A_\alpha^-\cup\Delta_\alpha$ is transverse to the $I$--fibers of $N(B)$. We say that $\alpha$ is of type $I$ if $\alpha$ bounds a disk, denoted by $\Delta_\alpha'$, in $l_\alpha$ and $A_\alpha^+\subset\Delta_\alpha'$, see Figure~\ref{type}(a) for a one-dimension lower schematic picture. Otherwise, we say $\alpha$ is of type $II$. Notice that if $\alpha$ is of type $I$, the canonical cutting and pasting of $D$ and $l_\alpha$ at $\alpha$ produce another disk of contact $(D-\Delta_\alpha)\cup\Delta_\alpha'$. If every circle of $D\cap\mu$ is of type $I$, we can take the circles of $D\cap\mu$ which are outermost in $D$ and perform the canonical cutting and pasting along these curves. Then, after some isotopy, we get a disk of contact disjoint from $\mu$. So, there is at least one type $II$ circle in $D\cap\mu$. \begin{figure}\label{type} \end{figure} By the local stability theorem of foliations and laminations, the limit of type $II$ circles of $D\cap\mu$ cannot be a circle of type $I$. So, we can find a circle $\alpha$ in $D\cap\mu$ such that $\alpha$ is of type $II$ and is innermost in the sense that every circle in $int(\Delta_\alpha)\cap\mu$ is of type $I$. Since $\alpha$ is of type $II$, $\alpha$ does not bound a disk in $l_\alpha$ that contains $A_\alpha^+$. So, one cannot push $\Delta_\alpha$ into $l_\alpha$ along the $I$--fibers of $N(B)$, fixing $\alpha$. In other words, $\Delta_\alpha$ is not homotopic to a disk in $l_\alpha$ via a homotopy that fixes $\alpha$ and is invariant on each $I$--fiber of $N(B)$. Therefore, we can find an arc $\beta$ properly embedded in $\Delta_\alpha$, such that one cannot push $\beta$ (fixing $\partial\beta$) into $l_\alpha$ along the $I$--fibers. Notice that, for any point $x$ near $\partial\beta$, there is a subarc of an $I$--fiber connecting $x$ to a point in $A_\alpha^+\subset l_\alpha$. We can view $\beta$ as an embedding $\beta:[0,1]\to\Delta_\alpha$. So, there is a maximal interval $[0,t]$ ($t<1$) such that the arc $\beta([0,t])$ is homotopic to an arc in $l_\alpha$ via a homotopy that fixes $\partial\beta$ and is invariant on each $I$--fiber of $N(B)$. Thus, for each $\beta(s)$ ($0<s\le t$), there is a subarc $J_s$ of an $I$--fiber such that $\partial J_s$ consists of $\beta(s)$ and a point in $l_\alpha$. Note that $J_s$ may be degenerate, i.e., $J_s$ may be a single point, in which case $\beta(s)\in l_\alpha\cap\beta$. We may also regard $J_0$ as the point $\beta(0)$. Since $[0,t]$ is maximal, the arc $J_t$ must contain a vertical arc of $\partial_vN(B)$ (otherwise, one can trivially extend $\beta([0,t])$ along $\beta$ to a longer arc). This implies that there is an interior point $X$ of $J_t$ such that $X\in\partial_hN(B)\cap\partial_vN(B)$. We denote the component of $\partial_hN(B)$ containing $X$ by $E_X$ and denote the leaf containing $X$ by $l_X$. Since $\partial_hN(B)\subset\mu$, $E_X\subset l_X$. Now, we consider the intersection of $l_X$ and the (singular) triangle $\cup_{s\in[0,t]}J_s$ (the three edges of the triangle are $\beta([0,t])$, $J_t$ and an arc in $l_\alpha$). As shown in Figure~\ref{type}(c), there must be an arc in $l_X\cap(\cup_{s\in[0,t]}J_s)$ connecting $X$ to a point $\beta(s)$ with $0<s<t$. Since every circle in $\mu\cap int(\Delta_\alpha)$ is of type $I$, this implies that $X$ lies in a disk of $l_X$ bounded by a type $I$ circle of $\mu\cap int(\Delta_\alpha)$. Since $X\in\partial E_X$, $E_X$ lies in this disk of $l_X$ bounded by a type $I$ circle of $\mu\cap int(\Delta_\alpha)$, which contradicts our previous conclusion that each component of $\partial_hN(B)$ is a non-disk essential sub-surface of the corresponding leaf. \end{proof} \noindent\emph{Remark}. Lemma~\ref{Lnodoc} is true without the hypothesis that $\mu$ is measured. Suppose $\mu$ is an arbitrary lamination fully carried by $B$. Then by the Reeb stability theorem, any limiting circle of a spiral in $D\cap\mu$ cannot be of type $I$. So one can proceed as in the proof of Lemma~\ref{Lnodoc} except that a slightly more delicate argument on $D\cap\mu$ is needed in the end. \begin{definition}\label{Dvan} Recall that a vanishing cycle (see \cite{CN,GO}) in a foliation $\mathcal{F}$ is an curve $f_0: S^1\to l_0$, where $l_0$ is a leaf in $\mathcal{F}$, and $f_0$ extends to a map $F:[0, 1]\times S^1\to M$ satisfying the following properties. \begin{enumerate} \item for any $t\in [0, 1]$, the curve $f_t(S^1)$, defined by $f_t(x)=F(t,x)$, is contained in a leaf $l_t$, \item for any $x\in S^1$, the curve $t\to F(t,x)$ is transverse to $\mathcal{F}$, \item $f_0$ is an essential curve in $l_0$, but $f_t$ is null-homotopic in $l_t$ for $t>0$. \end{enumerate} We define a slightly different version of vanishing cycle for laminations. Let $\mu$ be a lamination in $M$ and $l_0$ be a leaf. We call a simple closed curve $f_0:S^1\to l_0$ an \emph{embedded vanishing cycle} in $\mu$ if $f_0$ extends to an embedding $F:[0, 1]\times S^1\to M$ satisfying the following properties. \begin{enumerate} \item $F^{-1}(\mu)=C\times S^1$, where $C$ is a closed set of $[0, 1]$, and for any $t\in C$, the curve $f_t(S^1)$, defined by $f_t(x)=F(t,x)$, is contained in a leaf $l_t$, \item for any $x\in S^1$, the curve $t\to F(t,x)$ is transverse to $\mu$ \item $f_0$ is an essential curve in $l_0$, but there is a sequence of points $\{t_n\}$ in $C$ such that $\lim_{n\to\infty}t_n=0$ and $f_{t_n}(S^1)$ bounds a disk in $l_{t_n}$ for all $t_n$. \end{enumerate} \end{definition} \begin{lemma}\label{Lvan} Let $M$ be a closed orientable and irreducible 3--manifold, and $\mu\subset M$ an exceptional minimal measured lamination. Suppose $\mu$ is fully carried by a branched surface $B$ and $B$ does not carry any $2$--sphere. Then, $\mu$ has no embedded vanishing cycle. \end{lemma} \begin{proof} The proof of this lemma is essentially an argument of Novikov. Novikov showed that \cite{N, CN} if a transversely orientable foliation has a vanishing cycle, then the foliation contains a Reeb component. Note that the $C^2$ assumption in Novikov's original proof is not necessary, see \cite{So} or section 9.3 of \cite{CC2}. We will use an adaptation of Novikov's argument as in the proof of Lemma 2.8 of \cite{GO}. Our proof is based on the proof of Lemma 2.8 of \cite{GO} (pp 54). So, before we proceed, we briefly describe the argument in \cite{GO}, which shows that a lamination fully carried by an essential branched surface has no vanishing cycle. In that proof \cite{GO}, the lamination $\lambda$ is fully carried by $N(B)$. Although the hypothesis of Lemma 2.8 of \cite{GO} is that $B$ is an essential branched surface, the only requirement is that each disk component of $\partial_hN(B)$ is a horizonal boundary component of a $D^2\times I$ region in $M-int(N(B))$. The first step of the proof in \cite{GO} is to consider $\hat{N}(B)$ which is the union of $N(B)$ and all the $D^2\times I$ regions of $M-int(N(B))$. So, $\partial_h\hat{N}(B)$ does not contain any disk component. Let $\hat{\mathcal{F}}$ be the associated (singular) foliation of $\hat{N}(B)$ ($\hat{\mathcal{F}}$ is obtained by filling the $I$--bundle regions of $\hat{N}(B)-\lambda$). The only singularities of $\hat{\mathcal{F}}$ are at $\partial_h\hat{N}(B)\cap\partial_v\hat{N}(B)$. Then, one simply applies Novikov's argument to the (singular) foliation $\hat{\mathcal{F}}$. The key of the proof in \cite{GO} is that when one extends the vanishing cycle to a map $F:(0,1]\times D^2\to M$ as in Novikov's argument \cite{N, CN}, the disk $F(\{t\}\times D^2)$ (lying in a leaf of $\hat{\mathcal{F}}$) does not contain any component of $\partial_h\hat{N}(B)$ (since $\partial_h\hat{N}(B)$ has no disk component and $N(B)$ does not carry any disk of contact). So, the singularities of $\hat{\mathcal{F}}$ never affect Novikov's argument. Hence, $\hat{\mathcal{F}}$ has a Reeb component. Note that, by taking a 2-fold cover of $\hat{N}(B)$ if necessary, one can always assume $\hat{\mathcal{F}}$ is transversely orientable. Now, we prove Lemma~\ref{Lvan} using the arguments above. However, since our lamination $\mu$ may be compressible, a disk component of $\partial_hN(B)$ may not correspond to a $D^2\times I$ region of $M-int(N(B))$. Let $\mathcal{C}$ be the number of components of $M-int(N(B))$ that are not $D^2\times I$ regions. We assume $\mathcal{C}$ is minimal among all such measured laminations and branched surfaces that satisfy the hypotheses of Lemma~\ref{Lvan} and contain embedded vanishing cycles. Suppose $\gamma$ is an embedded vanishing cycle in $\mu$. So, $\gamma$ is an essential simple closed curve in a leaf. There is an embedded vertical annulus $A$ in $N(B)$ containing $\gamma$. Since $\mu$ is a measured lamination, $\mu$ has no holonomy and we may assume $A\cap\mu$ is a union of parallel circles. Moreover, by Definition~\ref{Dvan} there is a sequence of circles $\{\gamma_n\}$ in $A\cap\mu$ such that $\lim_{n\to\infty}\gamma_n=\gamma$ and each $\gamma_n$ bounds a disk in $l_n$, where $l_n$ is the leaf of $\mu$ containing $\gamma_n$. Let $D_n$ be the disk bounded by $\gamma_n$ in $l_n$ and suppose $n$ is sufficiently large. First note that these $D_n$'s are all on the same side of $A$. More precisely, for any $D_m$ and $D_n$ (with both $m$ and $n$ sufficiently large), there is a map $\phi: D^2\times I\to M$ such that $\phi(\partial D^2\times I)\subset A$ and $\phi(D^2\times\partial I)=D_m\cup D_n$. This is because if $D_m$ and $D_n$ are on different sides of $A$, then the union of $D_m\cup D_n$ and the sub-annulus of $A$ bounded by $\gamma_m\cup\gamma_n$ is a 2--sphere $S$, and after a small perturbation, $S$ becomes an immersed 2--sphere carried by $N(B)$. The canonical cutting and pasting on $S$ can produce an embedded 2--sphere carried by $N(B)$, which contradicts our hypothesis. Let $A'$ be the sub-annulus of $A$ between $\gamma_m$ and $\gamma_n$. By assuming $m$ and $n$ to be sufficiently large, we may assume $\gamma_m$ and $\gamma_n$ are close to $\gamma$ and hence we may regard $A'$ as a vertical annulus in $N(B)$. We will show next that every circle in $A'\cap\mu$ bounds a disk in the leaf that contains this circle. We first consider the generic case: $D_m\cap A'=\partial D_m=\gamma_m$ and $D_n\cap A'=\partial D_n=\gamma_n$. Since $M$ is irreducible and since $m$ and $n$ are sufficiently large, $D_m\cup A'\cup D_n$ must be an embedded 2--sphere bounding a 3--ball $E=D^2\times I$, where $D^2\times\partial I=D_m\cup D_n$ and $\partial D^2\times I=A'$. If $E-int(N(B))$ consists of $D^2\times I$ regions, then $\mu\cap E$ is a union of parallel disks with boundary in $A'$. Conversely, if $\mu\cap E$ consists of parallel disks with boundary in $A'$, then after some splitting, $E-int(N(B))$ becomes a union of $D^2\times I$ regions. Recall that we have assumed that $\mathcal{C}$, the number of non-$D^2\times I$ regions of $M-int(N(B))$, is minimal among all such measured laminations. We claim that $E-int(N(B))$ consists of $D^2\times I$ regions. Otherwise, $\mu\cap E$ must contain non-disk leaves. By the local stability theorem, the union of all non-disk leaves of $\mu\cap E$ form a sub-lamination of $\mu\cap E$, and we denote this sub-lamination of $\mu\cap E$ by $\lambda$. So, we can obtain a new measured lamination $\mu'$ by cutting off $\lambda$ from $\mu$ and then gluing back disks along the boundary circles of $\lambda$. The disks that we glue back are parallel to the disk components of $\mu\cap E$, so we may assume the new lamination $\mu'$ is carried (not fully carried) by $N(B)$. Moreover, $\mu'$ has a transverse measure induced from that of $\mu$. Next, we show that $\gamma$ is still an embedded vanishing cycle for $\mu'$. Let $l_\gamma'$ be the leaf of $\mu'$ containing $\gamma$. By the construction, we only need to show that $\gamma$ is an essential curve in $l_\gamma'$. Suppose $\gamma$ bounds a disk $\Delta_\gamma$ in $l_\gamma'$. As $\gamma$ is essential in $\mu$ but trivial in $\mu'$, $\Delta_\gamma\cap E\ne\emptyset$. Since $\lim_{k\to\infty}\gamma_k=\gamma$ and $\lambda$ is a sub-lamination of $\mu\cap E$, if $k$ is sufficiently large, $D_k\cap E\ne\emptyset$ and $D_k\cap\lambda\ne\emptyset$, where $D_k$ is the disk bounded by $\gamma_k$ in $\mu$ as above. This implies that $\overline{D_k-\lambda}$ has a disk component $\Delta$ with $\partial\Delta\subset A'$. Moreover, after a slight perturbation, the union of $\Delta\cup D_n$ and the sub-annulus of $A'$ bounded by $\partial\Delta\cup\partial D_n$ form an immersed 2--sphere transverse to the $I$--fibers. After some cutting and pasting, one can obtain an embedded 2--sphere carried by $B$, contradicting our hypothesis. Therefore, $\gamma$ is still an embedded vanishing cycle for the new measured lamination $\mu'$. After splitting $N(B)$ along $\mu'\cap E$ and taking sub-branched surfaces, each component of $E-int(N(B))$ becomes a $D^2\times I$ region. This contradicts our assumption that $\mathcal{C}$ is minimal for $\mu$. So, in this generic case, every circle of $A'\cap\mu$ bounds a disk in the leaf. The non-generic case is very similar. If $D_m\cap int(A')=\emptyset$ but $D_n\subset int(D_m)$, then we have a map $\phi: D^2\times I\to M$ such that $\phi(\partial D^2\times I)=A'$, $\phi(D^2\times\{0\})=D_m$, $\phi(D^2\times\{1\})=D_n$, and $\phi$ restricted to $D^2\times(0,1)$ is an embedding (this is a standard picture in Novikov's argument on Reeb components, see pp 133 of \cite{CN} for a picture). So, we can apply the argument above to the (half open) 3--ball $\phi(D^2\times(0,1))$ and the proof is the same. If $D_m\cap int(A')\ne\emptyset$, then we can replace $D_n$ and $A'$ by a sub-disk of $D_m$ and a sub-annulus of $A'$ respectively and return to the case that $D_m\cap int(A')=\emptyset$. Thus, after choosing a sub-annulus of $A$, we may assume that $\gamma\subset\partial A$ and every circle in $\mu\cap(A-\gamma)$ bounds a disk in the corresponding leaf. Let $D_m$, $D_n$, $A'$ and $E=D^2\times I$ be as above. By the arguments above, $\mu\cap E$ consists of parallel disks. As $A'\subset int(N(B))$, if the 3--ball $E$ contains some components of $M-N(B)$, then we can split $N(B)$ in $E$ so that $E-int(N(B))$ consists of $D^2\times I$ regions. We can perform this splitting in all possible such 3--balls $E$, and this is a finite process since $|\partial_hN(B)|$ is bounded. Let $\hat{N}(B)$ be the union of $N(B)$ (after the splitting in the 3--balls $E$ above) and all the $D^2\times I$ regions of $M-int(N(B))$. The remaining proof is the same as the proof of Lemma 2.8 of \cite{GO}. We can extend $\mu$ to a (singular) foliation $\hat{\mathcal{F}}$ in $\hat{N}(B)$. By our construction above, for any disk $\Delta$ (in a leaf) bounded by a circle of $\mu\cap A$, $\Delta$ does not contain any component of $\partial_h\hat{N}(B)$, and hence $\Delta$ does not meet the singularity of $\hat{\mathcal{F}}$. So, we can apply Novikov's argument to $\hat{\mathcal{F}}$ as in the proof of Lemma 2.8 of \cite{GO}, and conclude that $\hat{\mathcal{F}}$ contains a Reeb component and hence has non-trivial holonomy. Since $\hat{\mathcal{F}}$ is obtained by filling the $I$--bundle regions of $\hat{N}(B)-\mu$, this implies that $\mu$ has non-trivial holonomy and is not a measured lamination. \end{proof} \begin{remark} One can apply Novikov's argument directly to laminations without using the (singular) foliation $\hat{\mathcal{F}}$. Moreover, the assumption that $B$ does not carry any 2--sphere seems unnecessary. One can prove Lemma~\ref{Lvan} (without the 2--sphere assumption) using the argument of Imanishi, which says that any 2-dimensional phenomenon like the Reeb foliation implies that the foliation/lamination has non-trivial holonomy, see \cite{L3} for an interpretation of Imanishi's argument using branched surfaces. \end{remark} \begin{lemma}\label{Llocus} Let $M$ be a closed orientable and irreducible 3--manifold and suppose $M$ is not $T^3=S^1\times S^1\times S^1$. Let $\mu\subset M$ be an exceptional minimal measured lamination with Euler characteristic 0. Suppose $\mu$ is fully carried by a branched surface $B$, and $B$ does not carry any 2--sphere. Then, there is a branched surface $B'$, obtained by splitting $B$ and taking sub-branched surfaces, such that $B'$ fully carries $\mu$, the branch locus $L'$ of $B'$ has no double point, and $B'-L'$ consists of annuli and M\"{o}bius bands. \end{lemma} \begin{proof} Suppose every leaf of $\mu$ is a plane. After eliminating all the disks of contact of $N(B)$ that are disjoint from $\mu$, we have that $\partial_hN(B)$ consists of disks. So there is no monogon and $\mu$ is an essential lamination. By a Theorem in \cite{G7} (see also Proposition 4.2 of \cite{L1}), $M\cong T^3$. So, at least one leaf of $\mu$ is not a plane. Let $\gamma$ be an essential simple closed curve in a non-plane leaf of $\mu$. Then, there is a vertical annulus $A$ in $N(B)$ containing $\gamma$. Since $\mu$ is a measured lamination and so has no holonomy, we may assume $A\cap\mu$ consists of circles parallel to $\gamma$. If $\mu$ contains a plane leaf $L$, since every leaf of $\mu$ is dense, $L\cap A$ contains an infinite sequence of circles whose limit is $\gamma$. Each circle in $L\cap A$ bounds a disk in the plane $L$, so $\gamma$ is an embedded vanishing cycle and we get a contradiction to Lemma~\ref{Lvan}. Thus, $\mu$ contains no plane leaf at all. After some isotopy, we may assume $\partial_hN(B)\subset\mu$. Since $\mu$ contains no plane leaf, for every component $S$ of $\partial_hN(B)$, we can split $N(B)$ along $\mu$ so that $S$ contains an essential curve in the leaf that contains $S$. If there is a disk of contact in $N(B)$ disjoint from $\mu$, then we can trivially eliminate the disk of contact by splitting $B$. After these splittings, each component $S$ of $\partial_hN(B)$ becomes an essential non-disk sub-surface of the leaf that contains $S$. By Lemma~\ref{Lnodoc}, $N(B)$ does not contain any disk of contact. Next, we show that each component of $\partial_hN(B)$ must be an annulus. By Proposition~\ref{PMS}, $B$ fully carries a collection of tori $T$. After some isotopy and taking multiple copies of $T$, we may assume $\partial_hN(B)\subset T$. If a component $S$ of $\partial_hN(B)$ is not an annulus, since no component of $\partial_hN(B)$ is a disk, there must be a boundary component of $S$ bounding a disk $D$ in $T-int(S)$, and $D$ is a disk of contact by definition. Since we have assumed that $\partial_hN(B)\subset\mu$ after isotopy, if a component of $\partial_hN(B)$ is a closed surface, $\mu$ must contain a closed surface, a contradiction to the hypothesis that $\mu$ is exceptional minimal. Thus, $\partial_hN(B)$ does not contain a torus and $\partial_hN(B)$ must consist of annuli. If a leaf of $\mu$ has non-zero Euler characteristic, then we can split $N(B)$ by ``blowing air" into $N(B)-\mu$ so that a component of $\partial_hN(B)$ is an essential sub-surface of a leaf and has negative Euler characteristic. So, the argument above implies that each leaf of $\mu$ is either an infinite annulus or an infinite M\"{o}bius band. Let $\eta$ be an essential simple closed curve in $int(\partial_hN(B))$ and let $A_\eta$ be a vertical annulus in $N(B)$ containing $\gamma$. So, we may assume $A_\eta\cap\mu$ is a union of parallel circles. By Lemma~\ref{Lvan}, $\eta$ is not an embedded vanishing cycle, hence we can choose the vertical annulus $A_\eta$ so thin that every circle of $A_\eta\cap\mu$ is an essential curve in a leaf of $\mu$. Since every leaf is dense in $\mu$, each leaf must intersect $A_\eta$. Moreover, the limit of each end of any leaf is a sub-lamination of $\mu$ and hence is the whole of $\mu$. So, each end of any leaf of $\mu$ must intersect $A_\eta$. After some splittings, we may also assume $|M-N(B)|$ is minimal among all such branched surfaces. Let $S$ be a component of $\partial_hN(B)$ and $l_S$ be the leaf of $\mu$ containing $S$. We first point out that $l_S$ must be an orientable surface. To see this, for any point $x$ in any leaf $l$ and for any transversal $\delta_x$ containing $x$, since every leaf is dense, $x$ is always an accumulation point in $\delta_x\cap\mu$. Since $M$ is orientable, if $l$ is a non-orientable surface, $x$ must be a limit point (of $\delta_x\cap\mu$) in both components of $\overline{\delta_x-x}$. However, if $x\in\partial_hN(B)$, $x$ can only be a limit point on one side of $\delta_x$. So, $l_S$ must be orientable and hence $l_S$ is an infinite annulus. Since both ends of $l_S$ intersect $A_\eta$ and no circle in $A_\eta\cap l_S$ bounds a disk in $l_S$, there is an annulus in each component of $l_S-int(S)$ connecting $\partial S$ to $A_\eta$. Therefore, we can find an annulus $A_S$ in $N(B)-\mu$, transverse to the $I$--fibers and with one boundary circle in $A_\eta$ and the other boundary circle in a component of $\partial_vN(B)$. Moreover, $A_S$ is parallel to a sub-annulus of $l_S-int(S)$ above. We can split $N(B)$ by deleting a fibered neighborhood of $A_S$ from $N(B)$. Note that since we have assumed $|M-N(B)|$ is minimal, the branched surface after this splitting still fully carries $\mu$ and satisfies all the previous properties. Since both components of $l_S-int(S)$ contain such annuli, we can find such an annulus in $N(B)$ connecting $A_\eta$ to each component of $\partial_vN(B)$. By deleting a small neighborhood of these annuli from $N(B)$, we can split $N(B)$ into $N(B')$ which is a fibered neighborhood of another branched surface $B'$ and $N(B')$ satisfies all the previous properties. Since the splittings are along the annuli connecting $\partial_vN(B)$ to $A_\eta$, each component of $\partial_vN(B')$ lies in a small neighborhood of $A_\eta$ and is parallel to a sub-annulus of $A_\eta$. Thus, after a small perturbation in a neighborhood of $A_\eta$, we may assume $\pi(\partial_vN(B'))=L'$ is a collection of disjoint circles in $B'$, where $\pi:N(B')\to B'$ is the map collapsing each $I$--fiber to a point. So, the branched surface $B'$ satisfies all the requirements in Lemma~\ref{Llocus}. \end{proof} \section{Normal tori and 0--efficient triangulations}\label{S0eff} Let $F$ be an embedded surface in $M$ and suppose $M$ has a triangulation $\mathcal{T}$. We use $\mathcal{T}^{(i)}$ to denote the $i$--skeleton of $\mathcal{T}$. After some isotopy, we may assume $F$ does not contain any vertices of the triangulation and $F$ is transverse to $\mathcal{T}^{(1)}$ and $\mathcal{T}^{(2)}$. If $F$ is not a normal surface, we can try to normalize $F$ using the following two types of normal moves. After these normal moves, $F$ consists of normal surfaces and possibly some trivial 2--spheres in 3--simplices. Note that if $F$ is incompressible, then the two normal moves are isotopies and there are no such 2--spheres. We refer to section 3.1 of \cite{JR} for more detailed descriptions. \begin{operation}\label{o1} Suppose $F$ is compressible in a 3--simplex, then there are two cases. The first case is that, for a 2--simplex $\Delta$, $F\cap\Delta$ contains circles. Let $c$ be a circle of $F\cap\Delta$ innermost in $\Delta$. If $c$ is a trivial circle in $F$, then the two disks bounded by $c$ in $F$ and $\Delta$ form a 2--sphere bounding a 3--ball. So, we can perform an isotopy on $F$ pushing the disk across this 3--ball and reduce the number of circles in $F\cap\Delta$. If $c$ is non-trivial in $F$, the disk bounded by $c$ in $\Delta$ is a compressing disk for $F$ and we can compress $F$ along this compressing disk. The latter operation increases the Euler characteristic of $F$ by $2$. The second case is that $F\cap\Delta$ contains no circle but $F$ is compressible in the interior of a 3--simplex. Similar to the first case, we can either compress $F$ in the interior of the 3--simplex increasing the Euler characteristic, or perform some isotopy reducing the intersection of $F$ with the 2--skeleton. \end{operation} \begin{operation}\label{o2} This operation is an isotopy on $F$. For any 3--simplex $X$, if $F\cap X$ is incompressible in $X$ and a component of $F\cap X$ intersects an edge of $X$ in more than one point, then one can find a $\partial$--compressing disk $D\subset X$ with $\partial D$ consisting of an arc in $F$ and an arc in an edge (technically $D$ is a $\partial$-parallel disk in $X$). We can perform an isotopy by pushing $F$ along $D$ across this edge. This operation reduces the weight of $F$ by two. \end{operation} In this section, we will assume the triangulation $\mathcal{T}$ is a $0$--efficient triangulation. A triangulation of $M$ is said to be $0$--efficient if the triangulation has only one vertex and the only normal 2--sphere in $M$ is the boundary sphere of a closed neighborhood of this vertex. In \cite{JR}, Jaco and Rubinstein showed that, if $M$ is irreducible and not a lens space, then $M$ admits a $0$--efficient triangulation. In fact, given any triangulation of $M$, there is an algorithm to collapse this triangulation into a 0--efficient one. One of the most useful techniques in \cite{JR} is the so-called \emph{barrier surfaces} or \emph{barriers}. We will briefly explain a special case of barriers used in our proof, see section 3.2 of \cite{JR} for more details. Let $F$ be a compact embedded normal surface in $M$. If we cut $M$ open along $F$, we get a manifold with boundary, denoted by $\overline{M-F}$, with an induced cell decomposition. Let $S$ be a properly embedded normal surface in $\overline{M-F}$ with respect to the induced cell decomposition. $F\cup S$ is a 2--complex in $M$. Now we consider the surface $\partial\eta(F\cup S)$ in $M$, where $\eta(F\cup S)$ is the closure of a small neighborhood of $F\cup S$. The surface $\partial\eta(F\cup S)$ may not be normal and we can use the operations~\ref{o1} and \ref{o2} to normalize $\partial\eta(F\cup S)$. Then, by \cite{JR}, $F\cup S$ forms a ``barrier" for these normalizing operations. More precisely, one can perform operations~\ref{o1} and \ref{o2} on $\partial\eta(F\cup S)$ totally in the 3--manifold $M-int(\eta(F\cup S))$ and get a normal surface (with respect to the triangulation of $M$) plus possible trivial 2--spheres in some tetrahedra. Note that it is possible that, after these operations, $\partial\eta(F\cup S)$ vanishes, i.e. becomes a collection of trivial 2--spheres in some tetrahedra. Since every normal 2--sphere in a 0--efficient triangulation is vertex-linking, it is easy to use the barrier technique to derive some nice properties of normal tori with respect to a 0--efficient triangulation. Lemmas~\ref{Ltorus}, \ref{Lannulus} and Corollary~\ref{Cklein} are well-known to people who are familiar with 0--efficient triangulations. \begin{lemma}\label{Ltorus} Suppose $M$ is irreducible and atoroidal and $M$ is not a lens space. Let $T$ be a normal torus with respect to a 0--efficient triangulation of $M$. Then, we have the following. \begin{enumerate} \item $T$ bounds a solid torus in $M$. \item Let $N$ be the solid torus bounded by $T$. Then, $M-int(N)$ is irreducible and $T$ is incompressible in $M-int(N)$. \end{enumerate} \end{lemma} \begin{proof} As $M$ is irreducible and atoroidal, $T$ is compressible and separating. Let $D$ be a compressing disk for $T$. Then, we can choose $D$ so that $D$ is normal with respect to the induced cell decomposition of $\overline{M-T}$. Hence, $T\cup D$ forms a barrier. Note that $\partial\eta(T\cup D)$ has a 2--sphere component $S$ and $S$ bounds a 3--ball $E_S$ in $M$. If this 3--ball $E_S$ lies in the complement of $T\cup D$, then $T$ bounds a solid torus, otherwise $E_S$ contains $T$ and $T$ bounds a ball with a knotted hole. Since $T\cup D$ forms a barrier, we can perform Operations \ref{o1} and \ref{o2} to normalize $S$ in the complement of $T\cup D$. Note that Operation~\ref{o2} is an isotopy. If Operation~\ref{o1} occurs, since $S$ is a 2--sphere, Operation~\ref{o1} on $S$ is also an isotopy. Therefore, we can isotope $S$ in $M-T\cup D$ either to a normal 2--sphere or to a 2--sphere in a 3--simplex. Since the only normal 2--sphere is the vertex-linking one and the normal torus $T$ cannot lie in a small neighborhood of the vertex, $T$ must lie outside the 3--ball bounded by $S$. Hence, $T$ must bound a solid torus. If $T$ is compressible in the complement of this solid torus $N$, then we have a compressing disk outside the solid torus. We can use the union of $T$ and this compressing disk as a barrier and the argument above implies that $T$ bounds a solid torus on the other side, which means $M$ is a lens space and contradicts our hypotheses. If $M-int(N)$ is reducible, then there is an essential normal 2--sphere in $M-int(N)$. Since the only normal 2--sphere is the vertex-linking one and bounds a 3--ball, we also get a contradiction as before. \end{proof} \begin{corollary}\label{Cklein} Suppose $M$ is a closed, orientable, irreducible and atoroidal 3--manifold and $M$ is not a small Seifert fiber space. Then, $M$ does not contain any normal projective plane or normal Klein bottle with respect a 0--efficient triangulation. \end{corollary} \begin{proof} If $M$ contains a normal projective plane $P$, then a closed neighborhood of $P$ in $M$, $\eta(P)$, is a twisted $I$--bundle over $P$, and $\partial\eta(P)$ is a normal 2--sphere. Since the only normal 2--sphere in $M$ is the vertex-linking one, this implies $M$ is $\mathbb{R}P^3$. If $M$ contains a normal Klein bottle $K$, then $\eta(K)$ is a twisted $I$--bundle over $K$ and $\partial\eta(K)$ is a normal torus. Since every normal torus bounds a solid torus in $M$, $M$ is the union of a solid torus and a twisted $I$--bundle over a Klein bottle, which implies that $M$ is a Seifert fiber space. \end{proof} \begin{lemma}\label{Lannulus} Suppose $M$ is closed, orientable, irreducible and atoroidal and suppose $M$ is not a small Seifert fiber space. Let $T$ be a normal torus with respect to a 0--efficient triangulation of $M$, and let $N$ be the solid torus bounded by $T$. Suppose $A$ is an annulus properly embedded in $M-int(N)$ and $\partial A$ is a pair of essential curves in $T$. Suppose $A$ is normal with respect to the induced cell decomposition of $M-int(N)$. Then, the following are true. \begin{enumerate} \item each component of $\partial\eta(N\cup A)$ bounds a solid torus in $M$, \item one component of $\partial\eta(N\cup A)$ bounds a sold torus in $M-int(\eta(N\cup A))$ and the other component of $\partial\eta(N\cup A)$ bounds a solid torus containing $N\cup A$. \item If $\partial A$ is a pair of meridian curves for the solid torus $N$, then $A$ is $\partial$--parallel in $M-int(N)$. \end{enumerate} \end{lemma} \begin{proof} Since $\partial A$ is essential in $T$, $\partial\eta(N\cup A)$ consists of two tori in $M-N$. Let $T_1$ be a component of $\partial\eta(N\cup A)$. The torus $T_1$ may not be normal, but $T\cup A$ forms a barrier and we can perform Operations \ref{o1} and \ref{o2} to normalize $T_1$ in $M-N\cup A$. During the normalization process, every step is an isotopy unless in Operation~\ref{o1}, there is a circle in $T_1\cap\Delta$ ($\Delta$ is a 3--simplex) bounding a compressing disk $D$ in $\Delta$. If this happens, we compress $T_1$ along $D$ as in Operation~\ref{o1} and change $T_1$ into a 2--sphere $T_1'$. After the compression, similar to the proof of Lemma~\ref{Ltorus}, we can isotope the 2--sphere $T_1'$ either to a normal 2--sphere or into a 3--simplex. As in the proof of Lemma~\ref{Ltorus}, $T_1'$ must bound a 3--ball in $M-N\cup A$. Since $N$ and the compressing disk $D$ are on different sides of $T_1$, similar to the proof of Lemma~\ref{Ltorus}, $T_1$ must bound a solid torus in $M-N\cup A$. If the compression operation never happens, then we can isotope the torus $T_1$ either to a normal torus, in which case $T_1$ bounds a solid torus by Lemma~\ref{Ltorus}, or into a 3--simplex. If $T_1$ can be isotoped into a 3--simplex, then we have a 3--ball containing $T_1$ and disjoint from $N\cup A$. This is impossible because the region between $T_1$ and $N\cup A$ is a product. Thus, each torus in $\partial\eta(N\cup A)$ must bound a solid torus in $M$. Let $T_1$ and $T_2$ be the two tori in $\partial\eta(N\cup A)$, and let $E_1$ and $E_2$ be the two components of $M-int(\eta(N\cup A))$ bounded by $T_1$ and $T_2$ respectively. So, $\partial E_i=T_i$ and each $T_i$ bounds a solid torus in $M$. If both $E_1$ and $E_2$ are solid tori, then $M$ is a union of $T\cup A$ and 3 solid tori, which implies that either $M$ is a small Seifert fiber space or $M$ is reducible. Thus, at lease one $E_i$ is not a solid torus. Suppose $E_1$ is not a solid torus. Since $T_1$ bounds a solid torus in $M$, $M-int(E_1)$ is a solid torus containing $N\cup A$. Moreover, by Lemma~\ref{Ltorus}, $M-N$ is irreducible and hence $E_1$ is irreducible. Since $E_1$ is not a solid torus and $E_1$ is irreducible, $T_1$ must be incompressible in $E_1$. We claim that $E_2$ must be a solid torus. Suppose $E_2$ is not a solid torus either. Then the argument above implies that $T_2$ is incompressible in $E_2$. Let $D_i$ be a meridian disk of the solid torus $M-int(E_i)$ ($i=1,2$). We first show that at least one of $D_1$ and $D_2$ is properly embedded in $M-int(E_1\cup E_2)$ after isotopy. Suppose $D_1\cap E_2\ne\emptyset$. Since $M$ and $E_2$ are irreducible, an isotopy can eliminate curves in $D_1\cap T_2$ that are trivial in $T_2$ and innermost in $D_1$. Since $T_2$ is incompressible in $E_2$, if $D_1\cap T_2\ne\emptyset$ after this isotopy, the subdisk $\Delta$ of $D_1$ bounded by an innermost circle of $D_1\cap T_2$ in $D_1$ is a meridian disk of the solid torus $M-int(E_2)$. By choosing $D_2$ to be $\Delta$, we have that $D_2$ is properly embedded in $M-int(E_1\cup E_2)$ and clearly $D_2\cap T_1=\emptyset$. Now suppose $D_2$ is properly embedded in $M-int(E_1\cup E_2)$. Since $M-int(E_2)$ is a solid torus, by compressing $T_2$ along $D_2$, we get a 2--sphere $S_2$ bounding a 3--ball and the 3--ball contains $E_1$. As $E_1$ lies in this 3--ball, this means that the 2--sphere $S_2$ lies in the solid torus $M-int(E_1)$ and hence bounds a 3--ball in the solid torus $M-int(E_1)$. Hence $M$ must be $S^3$, a contradiction. So exactly one of $E_1$ and $E_2$ is a solid torus and part 2 of the lemma holds. Suppose $\partial A$ is a pair of meridian curves for $N$. By part 2 of the lemma, $\partial A$ bounds an annulus $A'\subset T$ such that $A\cup A'$ bounds a solid torus $N'$ in $M-int(N)$. Moreover, each circle in $\partial A$ bounds a meridian disk of $N$. Since $M$ is not a lens space and $M$ is irreducible, $\partial A$ must be longitudes for the solid torus $N'$. Thus, $A$ is isotopic to $A'$ (fixing $\partial A$) in $N'$, and part 3 holds. \end{proof} Let $B$ be a branched surface in $M$ constructed by gluing normal disks together near the 2--skeleton, as in \cite{FO} and section~\ref{Spre}. By this construction, every surface carried by $B$ is a normal surface. Let $T$ be a normal surface fully carried by $B$, and we suppose $T\subset N(B)$ and $\partial_hN(B)\subset T$. So, $\partial_vN(B)$ is a union of annuli properly embedded in $\overline{M-T}$. By the construction of $B$, after a small perturbation and eliminating disks of contact, we may assume $\partial_vN(B)$ is normal with respect to the induced cell decomposition of $\overline{M-T}$. \begin{lemma}\label{L0eff} Let $M$ be a closed orientable irreducible and atoroidal 3--manifold with a 0--efficient triangulation. Suppose $M$ is not a Seifert fiber space. Let $B$ be a branched surface as above, i.e., $B$ is obtained by gluing together normal disks, $B$ fully carries a normal surface $T$ with $\partial_hN(B)\subset T$, and $\partial_vN(B)$ is normal with respect to the induced cell decomposition of $\overline{M-T}$. Suppose the branch locus $L$ of $B$ does not have any double point, $B-L$ consists of annuli and M\"{o}bius bands, and every component of $\partial_hN(B)$ is an annulus. Then, \begin{enumerate} \item $\partial_hN(B)$ is incompressible in $M-int(N(B))$, \item some component of $\partial N(B)$ bounds a solid torus in $M$ that contains $N(B)$, \item $M-int(N(B))$ contains a $monogon\times S^1$ region. \end{enumerate} \end{lemma} \begin{proof} By the hypotheses, any closed surface carried by $B$ is a normal surface with Euler characteristic 0. By Corollary~\ref{Cklein}, $M$ does not contain any normal Klein bottle. So, every closed surface carried by $B$ consists of normal tori. Let $T=\cup_{i=1}^mT_i$ be a collection of disjoint normal tori fully carried by $N(B)$, where each $T_i$ is a component of $T$, and we may assume $\partial_hN(B)\subset T$. Hence $\partial_vN(B)$ is a collection of annuli properly embedded in $\overline{M-T}$, whose boundary consists of essential curves in $T$. Moreover, $\partial_vN(B)$ is normal with respect to the induced cell decomposition of $\overline{M-T}$. By the hypotheses, every component of $\partial N(B)$ is a torus. Similar to the proof of Lemma~\ref{Lannulus}, $T\cup\partial_vN(B)$ form a barrier, and each component of $\partial N(B)$ bounds a solid torus in $M$. Let $E_1,\dots, E_n$ be the components of $M-int(N(B))$. Each $\partial E_i$ bounds a solid torus in $M$. Suppose $E_i$ is not a solid torus, then $M-int(E_i)$ is a solid torus that contains $N(B)$ and $T$. Moreover, by the proof of Lemma~\ref{Lannulus}, $E_i$ is irreducible. Since $E_i$ is not a solid torus, this implies that $\partial E_i$ is incompressible in $E_i$. Thus, similar to the proof of Lemma~\ref{Lannulus}, for any two components $E_i$ and $E_j$, at least one must be a solid torus. This implies that at most one component of $M-int(N(B))$ is not a solid torus. Next, we show that $\partial_hN(B)$ is incompressible in $M-int(N(B))$. The basic idea of the proof is that, if $\partial_hN(B)$ is compressible in $M-int(N(B))$, one can construct a solid torus bounded by a new normal torus carried by $N(B)$, and one can use the compressing disk of $M-int(N(B))$ to obtain a compressing disk of this new normal torus outside this solid torus, which contradicts part 2 of Lemma~\ref{Ltorus}. This solid torus is constructed by joining two $monogon\times S^1$ regions of $M-int(N(B))$. Let $N_i$ be the solid torus bounded by $T_i$. Suppose $\partial_hN(B)$ is compressible in $M-int(N(B))$ and let $D$ be a compressing disk. We may suppose $\partial D\subset\partial_hN(B)$ lies in $T_1$ and by Lemma~\ref{Ltorus}, $D$ is a meridian disk of the solid torus $N_1$ bounded by $T_1$. Let $H=\overline{N(B)-T}$. Since $\partial_hN(B)\subset T$, $H$ is a collection of $annuli\times I$ and twisted $I$--bundles over M\"{o}bius bands. $\partial H$ consists of two parts, the horizontal boundary $\partial H\cap T$ and the vertical boundary $\partial H\cap\partial_vN(B)$. We denote the horizontal boundary of $H$ by $\partial_hH$ and the vertical boundary of $H$ by $\partial_vH$ ($\partial_vH=\partial_vN(B)$). By the hypotheses, $\partial_hH$ consists of essential annuli in $T$. Since no component of $\partial_hN(B)$ is a torus, both $N_1$ and $M-int(N_1)$ contain some components of $H$. So, there must be a component of $\partial_vN(B)$, say $V$, properly embedded in $N_1$. By our assumptions, if $V$ is not $\partial$--parallel in $N_1$, then $V$ can be obtained by attaching a knotted tube to a pair of compressing disks of $N_1$. This implies that a component of $\overline{N_1-V}$, say $\Sigma$, is a 3--ball with a knotted hole. So, $\partial\Sigma$ is a torus incompressible in $\Sigma$. Since $T_1\cup V$ forms a barrier, we can use Operations~\ref{o1} and \ref{o2} to isotope $\partial\Sigma$ into a normal torus in $\Sigma$. However, by Lemma~\ref{Ltorus}, $M-int(\Sigma)$ must be a solid torus. Since $\Sigma$ is a ball with a knotted hole, this implies that $M$ is $S^3$. Therefore, each component of $\partial_vN(B)$ in $N_1$ must be $\partial$--parallel in $N_1$. This implies that there must be a $monogon\times S^1$ region of $M-int(N(B))$ in $N_1$. We denote this $monogon\times S^1$ region by $J_1$. So, $\partial J_1$ consists of an annulus in $T_1$ and a component of $\partial_vN(B)$, and $J_1\cap D=\emptyset$ ($D$ is the compressing disk above). Now, we consider the components of $H$ that lie in $M-int(N_1)$. The simplest case is that there is a component of $H$, say $H_1$, in $M-int(N_1)$ with its horizontal boundary totally in $T_1$. By the construction, the vertical boundary of $H_1$ consists of annuli properly embedded in $M-int(N_1)$. Since the branch locus $L$ has no double point and $\partial_hN(B)$ is compressible in $N_1$, the boundary curves of $\partial_vH_1$ are meridian curves in $\partial N_1$. By part 3 of Lemma~\ref{Lannulus}, each annulus in $\partial_vH_1$ is $\partial$--parallel in $M-int(N_1)$. So, there is also a $monogon\times S^1$ region $J_2$ of $M-int(N(B))$ in $M-int(N_1)$ with $\partial J_2$ consisting of an annulus in $T_1$ and a component of $\partial_vN(B)$. We denote the component of $\partial_vN(B)$ in $\partial J_i$ by $V_i$ ($i=1,2$). Within a small neighborhood of $T_1$ in $N(B)$, we can find an annulus $A\subset N(B)$ connecting $V_1$ to $V_2$ and transverse to the $I$--fibers of $N(B)$. The union of $\partial J_1-V_1$, $\partial J_2-V_2$ and two parallel copies of $A$ form a torus $T_J$ carried by $N(B)$, and $T_J$ bounds a solid torus $N_J$ which is the union of $J_1$, $J_2$ and a product neighborhood of $A$. By the hypothesis on $B$, $T_J$ is a normal torus. However, since the boundary of $\partial_vH_1$ consists of meridian curves of $\partial N_1$, a meridian disk of $N_1$ gives rise to a compressing disk for the torus $T_J$ in $M-int(N_J)$. This contradicts part 2 of Lemma~\ref{Ltorus}. Suppose there is a component $H_2$ of $H$ with one horizontal boundary component in $T_1$ and the other horizontal boundary component in $T_2$. Then, by our assumption on the meridian curves, the union of a vertical annulus of $H_2$ and a meridian disk of $N_1$ form a compressing disk for $T_2$. By part 2 of Lemma~\ref{Ltorus}, we must have $N_1\subset N_2$. Suppose $N_1\subset\dots\subset N_k$ are a maximal collection of nested solid tori, such that there is a component of $H$ between each pair of tori $T_i\cup T_{i+1}$, same as the $H_2$ above. Since $k$ is maximal, there must be a component of $H$ in $M-int(N_k)$ with horizonal boundary totally in $T_k$. As before, there is a $monogon\times S^1$ region $J_2$ of $M-int(N(B))$ in $M-int(N_k)$ with $\partial J_2$ consisting of an annulus in $T_k$ and a component of $\partial_vN(B)$. By assembling annuli in the $T_i$'s ($i=1,\dots,k$) and annuli in those components of $H$ between the tori $T_i\cup T_{i+1}$, we can construct an annulus $A\subset N(B)$, such that $A$ connects $J_1$ to $J_2$ as before and $A$ is transverse to the $I$--fibers of $N(B)$. Similarly, we can form a torus $T_J$ bounding a solid torus $N_J$, and $N_J$ is the union of $J_1$, $J_2$ and a product neighborhood of $A$. Moreover, a meridian disk of $N_1$ gives rise to a compressing disk for $T_J$ in $M-int(N_J)$, and we get a contradiction to part 2 of Lemma~\ref{Ltorus}. This proves that $\partial_hN(B)$ is incompressible in $M-int(N(B))$. By the hypotheses, $N(B)$ is a Seifert fiber space, and the Seifert fibration restricted to each annulus $\partial_hN(B)$ or $\partial_vN(B)$ is the standard foliation by circles. If every component of $M-int(N(B))$ is a solid torus, then since $\partial_hN(B)$ is incompressible in $M-int(N(B))$, $M$ is a Seifert fiber space. Therefore, by the conclusion before, exactly one component $E_i$ of $M-int(N(B))$ is not a solid torus, and $M-int(E_i)$ is a solid torus containing $N(B)$. Let $N_1$ be an innermost solid torus. By the argument before, each component of $\partial_vN(B)\cap N_1$ is $\partial$--parallel in $N_1$. This implies that there is a $monogon\times S^1$ region in $N_1$, and part 3 of the lemma holds. \end{proof} \section{Splitting branched surfaces, the torus case}\label{Storus} A main technical part of this paper is to show that, if a branched surface $B$ carries a sequence of Heegaard surfaces $\{S_n\}$ and a measured lamination $\mu$ with $\chi(\mu)=0$, then one can split $B$ into a collection of branched surfaces, such that each $S_n$ is carried by a branched surface in this collection and no branched surface in this collection carries $\mu$. In this section, we consider the case that $\mu$ is a torus, and we prove the case that $\mu$ is an exceptional minimal lamination in the next section. The goal of this section is to prove Lemma~\ref{Ltorus2}. Let $B$ be a branched surface carrying a sequence of closed orientable surfaces $\{S_n\}$. Suppose $\mu$ is a lamination carried (but may not be fully carried) by $B$. By section~\ref{Spre}, there is a sub-branched surface of $B$, denoted by $B_\mu$, fully carrying $\mu$. We may consider $N(B_\mu)\subset N(B)$ with compatible $I$--fiber structure. Let $D\subset N(B_\mu)\subset N(B)$ be a disk transverse to the $I$--fibers. We call $D$ a \emph{simple splitting disk} for $\mu$ if $D$ satisfies the following conditions. \begin{enumerate} \item Each $I$--fiber of $N(B)$ intersects $D$ in at most one point. \item $D\cap\mu=\emptyset$. \item For any $I$--fiber $K$ that intersects $D$, both components of $K-D$ intersect $\mu$. \end{enumerate} Suppose $D$ is a simple splitting disk. Let $N(B_\mu')$ and $N(B')$ be the manifold obtained by eliminating a small neighborhood of $D$ from $N(B_\mu)$ and $N(B)$ respectively. So, we may consider $N(B_\mu')$ and $N(B')$ as fibered neighborhoods of branched surfaces $B_\mu'$ and $B'$ respectively. $B_\mu'$ and $B'$ are called the branched surfaces obtained by splitting along $D$. By our assumptions on $D$, $B_\mu'$ is the sub-branched surface of $B'$ that fully carries $\mu$. It is possible that some surfaces in $\{S_n\}$ are not carried by $B'$ anymore. Nonetheless, we have the following Lemma. Recall that if $\mu\subset N(B)$ is a lamination carried by $B$, and $B'$ is obtained by splitting $B$, then we may assume $N(B')\subset N(B)$ and we say that $\mu$ is carried by $B'$ if $\mu\subset N(B')$ after some $B$--isotopy (see section~\ref{Spre} for the definition of $B$--isotopy). \begin{lemma}\label{Lsdisk} Let $B$, $\mu$, $B'$, $D$ and $\{S_n\}$ be as above. There are a finite collection of branched surfaces, obtained by splitting $B$, such that \begin{enumerate} \item each $S_n$ is carried by a branched surface in this collection, \item $B'$ is in the collection, \item if another branched surface $B''$ in this collection carries $\mu$, then $B'$ is a sub-branched surface of $B''$. In particular, $B'$ and $B''$ have the same sub-branched surface that fully carries $\mu$. \end{enumerate} \end{lemma} \begin{proof} Let $E$ be the union of $I$--fibers of $N(B)$ that intersect $D$. So, $E=\pi^{-1}(\pi(D))$, where $\pi:N(B)\to B$ is the collapsing map. Since each $I$--fiber of $N(B)$ intersects $D$ in at most one point, $E$ is homeomorphic to a 3--ball $D^2\times I$. After some small perturbation, we may simply identify $E$ to $D^2\times I$ with each $I$--fiber of $E$ coming from an $I$--fiber of $N(B)$. If $B'$ carries every surface in $\{S_n\}$, then there is nothing to prove. Suppose $S_n$ is not carried by $B'$. Then $S_n\cap D\ne\emptyset$ under any $B$--isotopy. If $S_n\cap\mu=\emptyset$ in $N(B)$, then by adding some branch sectors to $B'$, we can construct a branched surface $B''$ that carries $S_n$, and $B''$ satisfies part 3 of the lemma (this construction is similar to Figure~\ref{sp1}, where one can obtain $\tau_1$ by adding a branch sector to $\tau_2$). Moreover, $B''$ can also be obtained by splitting $B$. Since $D$ is compact, there are only finitely many ways to add such branch sectors. Hence, there are only finitely many such $B''$. Next, we will assume $S_n\cap\mu\ne\emptyset$ under any $B$--isotopy. $S_n\cap E$ is a union of compact surfaces transverse to the $I$--fibers and each component of $S_n\cap E$ is $B$--isotopic to a sub-surface of $D$. Let $P$ be a component of $S_n\cap E$ such that $P\cap\mu\ne\emptyset$ under any $B$--isotopy. We may assume $P$ intersects both components of $D^2\times\partial I$, where $D^2\times I=E$ as above. Since $S_n\cap\mu\ne\emptyset$ under any $B$--isotopy, there is a relatively short arc $\alpha\subset P$ with endpoints in different components of $D^2\times\partial I$, and after slightly extending $\alpha$ in $S_n$, we may assume $\alpha\cap\mu\ne\emptyset$ under any $B$--isotopy. So, by deleting a small neighborhood of $\alpha$ from $N(B)$, we can split $B$ into a new branched surface $B_1$. This splitting is similar to the splitting from $\tau$ to $\tau_1$ in Figure~\ref{sp1}. By the construction, $B_1$ carries $S_n$, but since $\alpha\cap\mu\ne\emptyset$ under any $B$--isotopy, $B_1$ does not carry $\mu$. Since $D$ is fixed, up to $B$--isotopy, there are only finitely many such compact surfaces $S_n\cap E$, and there are only finitely many different splittings like this. Hence, we can perform such splittings on $B$ in a neighborhood of $E$ and obtain finitely many branched surfaces $B_1,\dots, B_k$, such that no $B_i$ carries $\mu$. These $B_i$'s plus the branched surfaces $B''$ above are the collection of branched surfaces satisfying the conditions in the lemma. \end{proof} Note that any splitting along $\mu$ can be decomposed as a sequence of successive splittings along simple splitting disks. Hence, we can apply Lemma~\ref{Lsdisk} at each step and obtain a collection of branched surfaces with similar properties. \begin{lemma}\label{Ltorus0} Let $B$ be a branched surface in $M$, and $T$ a compact orientable surface carried by $N(B)$. Suppose $T$ is either a closed surface or a surface whose boundary lies in $\partial_vN(B)$. Then, there is a finite collection of branched surfaces $B_1,\dots, B_k$ obtained by splitting $B$, such that \begin{enumerate} \item if $B_i$ still carries $T$, then each $I$--fiber of $N(B_i)$ intersects $T$ in at most one point, \item any closed surface carried by $B$ is carried by some $B_i$. \end{enumerate} \end{lemma} \begin{proof} If every $I$--fiber of $N(B)$ intersects $T$ in at most one point, then there is nothing to prove. Let $m$ ($m>1$) be the maximal number of points that an $I$--fiber of $N(B)$ intersects $T$, and let $I_m$ be the union of those $I$--fibers of $N(B)$ that intersect $T$ in $m$ points. Since $m$ is maximal, $I_m$ is an $I$--bundle over a compact surface $F_m\subset N(B)$ and each $I$--fiber of $N(B)$ intersects $F_m$ in at most one point. After ``blowing air" into $N(B)$ if necessary, we may assume $F_m$ is not a closed surface. Moreover, since $m$ is maximal, $\partial F_m\subset \pi^{-1}(L)$, where $L$ is the branch locus of $B$ and $\pi:N(B)\to B$ is the collapsing map, and the induced branch direction at $\partial F_m$ points into $F_m$. Note that if $F_m$ is non-orientable, $I_m$ is a twisted $I$--bundle over $F_m$. Since both $T$ and $M$ are orientable, no matter whether $F_m$ is orientable or not, we may assume that $F_m\cap T=\emptyset$ and for any $I$--fiber $K$ that intersects $F_m$, both components of $K-F_m$ intersect $T$. Suppose a component of $F_m$ is not a disk, then let $\alpha$ be a properly embedded essential arc in $F_m$. We can split $B$ in a small neighborhood of $\alpha$, as described in section~\ref{Spre} and shown in Figure~\ref{sp1}, and obtain a finite collection of branched surfaces with the following properties. \begin{enumerate} \item Any closed surface carried by $B$ is still carried by a branched surface in this collection. \item Suppose $T$ is carried by a branched surface $B'$ in this collection. Let $I_m'$ be the union of $I$--fibers of $N(B')$ that intersect $T$ in $m$ points, hence $I_m'$ is an $I$--bundle over a compact surface $F_m'$. Then, $F_m'$ is homeomorphic to the surface obtained by cutting $F_m$ open along $\alpha$. \end{enumerate} Thus, after a finite number of splittings, we may assume $F_m$ is a collection of disks. Now, similar to the splittings in the proof of Lemma~\ref{Lsdisk}, we can split the branched surface in a neighborhood of each disk component of $F_m$. Since each $I$--fiber intersects $F_m$ in at most one point, such splittings take place in disjoint 3--balls. So, after these splittings, we get a collection of branched surfaces that satisfy part 2 of this lemma, and if $T$ is still carried by a branched surface $B_i$ in this collection, then the maximal number of points that an $I$--fiber of $N(B_i)$ intersects $T$ is smaller than $m$. Therefore, we can apply these splittings to each branched surface in this collection, and eventually get $m=1$ for each branched surface that carries $T$. \end{proof} Now, we consider a torus $T$ carried by a branched surface $B$ in $M$. \begin{lemma}\label{Ltorus1} Let $B$ be a branched surface in $M$ and $T\subset N(B)$ an embedded torus carried by $N(B)$. Suppose each $I$--fiber intersects $T$ in at most one point and $T$ bounds a solid torus in $M$. Let $S\subset N(B)$ be a closed orientable surface fully carried by $B$ and $S\cap T\ne\emptyset$ under any $B$--isotopy. Then, there are a surface $S'$, a number $\sigma$, and an arc $\alpha\subset S'$ such that \begin{enumerate} \item $S'$ is carried by $B$ and is isotopic to $S$ in $M$, \item $length(\alpha)<\sigma$ and $\sigma$ depends only on $B$ and $T$, not on $S$. \item $\alpha\cap T\ne\emptyset$ under any $B$--isotopy, \end{enumerate} \end{lemma} \begin{proof} Let $E$ be the union of the $I$--fibers of $N(B)$ that intersect $T$. Since each $I$--fiber intersects $T$ in at most one point, $E$ is homeomorphic to an $I$--bundle $T^2\times I$. After some perturbation at $T^2\times\partial I$, we may assume the $I$--fibers of $E=T^2\times I$ are from the $I$--fibers of $N(B)$ and $T=T^2\times\{1/2\}\subset E$. $S\cap E$ is a union of compact orientable surfaces properly embedded in $E$ and transverse to the $I$--fibers. Let $T_0$ and $T_1$ be the two components of $T^2\times\partial I$. If the boundary of every component of $S\cap E$ lies in the same component of $T^2\times\partial I$, then after some $B$--isotopy, $T$ is disjoint from $S$, which contradicts our hypothesis. So, there must be a component of $S\cap E$, say $P$, intersecting both $T_0$ and $T_1$. Suppose a component $c$ of $\partial P$ is a trivial circle in $T_i$ and let $\Delta_c$ be the disk in $T_i$ bounded by $c$. We say the circle $c$ is of type $I$, if (after smoothing out the corner) $P\cup\Delta_c$ is a surface transverse to the $I$--fibers of $E$, otherwise, $c$ is of type $II$. For each innermost trivial circle $c$ in $\partial P\cap T_i$ of type $I$, we can glue the disk $\Delta_c$ to $P$ and then push (a neighborhood of) the disk into the interior of $T^2\times I$. This operation yields a new surface transverse to the $I$--fibers of $T^2\times I$. We can keep performing such operations on the resulting surface and eventually get a surface $\hat{P}$ such that $\partial\hat{P}$ contains no trivial circle of type $I$. $\hat{P}$ is a connected compact surface properly embedded in $T^2\times I$ and transverse to the $I$--fibers. We have the following 4 cases to consider. Case 1. $\partial\hat{P}$ contains a trivial circle in $T_i$. Let $c$ be an innermost trivial circle of $\partial\hat{P}\cap T_i$, and $c$ bounds a disk $\Delta_c$ in $T_i$. By the assumptions on $\hat{P}$, $c$ is of type $II$. Now, we cut $E=T^2\times I$ open along $\hat{P}$ and obtain a manifold $N$ which is the closure (under path metric) of $E-\hat{P}$. Since $\hat{P}$ is transverse to the $I$--fibers, we may consider $N$ as an induced $I$--bundle with its vertical boundary pinched into circles/cusps. Let $N_1$ be the component of $N$ containing $\Delta_c$. Since $c$ is of type $II$, $\Delta_c$ must be a component of the horizontal boundary of the pinched $I$--bundle $N_1$. Thus, $N_1$ is a product $D^2\times I$ with vertical boundary $\partial D^2\times I$ pinched to a circle. As $\hat{P}$ is connected, $\hat{P}$ must be a disk $B$--isotopic to $\Delta_c\subset T_i$. Since $\hat{P}$ is obtained by gluing disks to $P$, $P$ must be a planar surface $B$--isotopic to a sub-surface of $T$. Moreover, by our assumptions on $P$, $\partial P$ has components in both $T_0$ and $T_1$. Thus, there is an arc $\alpha$ properly embedded in $P$ connecting a component of $\partial P\cap T_0$ to a component of $\partial P\cap T_1$. Since $P$ is $B$--isotopic to a sub-surface of $T$, we can choose $\alpha$ so that $length(\alpha)$ is bounded from above by a number $\sigma$ that depends only on $T$ and $B$, not on $S$. Case 2. $\partial\hat{P}=\emptyset$. Since $\hat{P}$ is transverse to $I$--fibers, this implies that $\hat{P}$ is a torus $B$--isotopic to $T$. Since $\hat{P}$ is obtained by gluing disks to $P$, $P$ is $B$--isotopic to a sub-surface of $T$. Since $\partial P$ has components in both $T_0$ and $T_1$, as in case 1, we can find an arc $\alpha\subset P$ connecting a component of $\partial P\cap T_0$ to a component of $\partial P\cap T_1$, and the length of $\alpha$ is bounded by a number $\sigma$ that does not depend on $S$. Case 3. $\partial\hat{P}$ contains no trivial circle and $\partial\hat{P}\subset T_0$. In this case, $\partial\hat{P}$ consists of parallel essential simple closed curves in the torus $T_0$. As in case 1, we cut $E=T^2\times I$ open along $\hat{P}$ and obtained a pinched $I$--bundle $N$. Since $\partial\hat{P}\subset T_0$, $N$ has a component $N_1$ containing $T_1$. As $\partial\hat{P}\subset T_0$, $T_1$ is a component of the horizontal boundary of $N_1$. Hence, $\hat{P}$ is $B$--isotopic to a sub-surface of $T_1$. Moreover, since $\hat{P}$ is connected and $\partial\hat{P}$ contains no trivial circle, $\hat{P}$ is an annulus with $\partial\hat{P}\subset T_0$. Since $\hat{P}$ is obtained by gluing disks to $P$, $P$ is a planar surface $B$--isotopic to a sub-surface of $T$. As in case 1, we can find an arc $\alpha\subset P$ connecting a component of $\partial P\cap T_0$ to a component of $\partial P\cap T_1$, and the length of $\alpha$ is bounded by a number $\sigma$ that does not depend on $S$. Case 4. $\partial\hat{P}$ contains no trivial circle and $\partial\hat{P}$ has components in both $T_0$ and $T_1$. As before, let $N$ be the manifold obtained by cutting $E=T^2\times I$ open along $\hat{P}$, and $N$ is a pinched $I$--bundle with the bundle structure induced from that of $T^2\times I$. The two sides of $\hat{P}$ correspond to two sub-surfaces $\hat{P}^+$ and $\hat{P}^-$ in the horizontal boundary of $N$. As $\partial\hat{P}$ contains no trivial circle, $\partial\hat{P}^+$ and $\partial\hat{P}^-$ does not bound disks in the horizontal boundary of $N$. So, $\hat{P}^+$ and $\hat{P}^-$ are $\pi_1$--injective in $N$, which implies that $\hat{P}$ is incompressible and $\pi_1$--injective in $E=T^2\times I$. So, $\pi_1(\hat{P})$ is a subgroup of $\mathbb{Z}\oplus\mathbb{Z}$. By the assumption on $\hat{P}$ in this case, $\hat{P}$ must be an annulus with one boundary circle in $T_0$ and the other boundary circle in $T_1$. If the distance (in $\hat{P}$) between the two components of $\partial\hat{P}$ is large, then the annulus $\hat{P}$ wraps around $T$ many times. Since $\hat{P}$ is obtained by gluing disks to $P$, either there is a relatively short arc $\alpha$ properly embedded in $P$ connecting $\partial P\cap T_0$ to $\partial P\cap T_1$, or $P$ contains a sub-surface which is a long annulus wrapping around $T$ many times. In the latter case, we can perform a Dehn twist in $T^2\times I$ to unwrap $\hat{P}$ and $P$. Since $T$ bounds a solid torus in $M$, a Dehn twist around $T$ is an isotopy in $M$. Therefore, after a Dehn twist in $T^2\times I$, we get a surface $S'$, which is isotopic to $S$ in $M$ and also fully carried by $B$, such that there is an arc $\alpha$ connecting $S'\cap T_0$ to $S'\cap T_1$ and $length(\alpha)$ is less than a fixed number $\sigma$ that does not depend on $S$ or $S'$. After slightly extending such arcs $\alpha$ in $S$ or $S'$, we have $\alpha\cap T\ne\emptyset$ under any $B$--isotopy. \end{proof} \begin{lemma}\label{Ltorus2} Let $B$ be a branched surface in $M$, $T\subset N(B)$ an embedded torus carried by $N(B)$, and suppose $T$ bounds a solid torus in $M$. Let $\{S_n\}$ be a sequence of closed orientable surfaces carried by $B$ and with genus at least 2. Then, there is a finite collection of branched surfaces, obtained by splitting $B$ and then taking sub-branched surfaces, with the following properties. \begin{enumerate} \item No branched surface in this collection carries $T$. \item For each $S_n$, there is a surface $S_n'$ isotopic to $S_n$ in $M$ and fully carried by a branched surface in this collection. \end{enumerate} \end{lemma} \begin{proof} This lemma is an easy corollary of Lemmas \ref{Ltorus0} and \ref{Ltorus1}. If there is an $I$--fiber of $N(B)$ that intersects $T$ in more than one point, by Lemma~\ref{Ltorus0}, we can split $B$ into a finite collection of branched surfaces $B_1,\dots, B_m$, such that any surface carried by $B$ is carried by some $B_i$, and if $B_i$ carries $T$, each $I$--fiber of $N(B_i)$ intersects $T$ in at most one point. Moreover, after taking sub-branched surfaces of each $B_i$, we may also assume that each $S_n$ is fully carried by some $B_i$. First note that if a branched surface $B$ fully carries $S_n$ then no component of $\partial_hN(B)$ can be a torus. This is because if $B$ fully carries $S_n$, $S_n$ intersects every $I$--fiber of $N(B)$ and hence any component of $\partial_hN(B)$ is isotopic to a subsurface of $S_n$. Thus if $\partial_hN(B)$ has a torus component then $S_n$ must be a torus, contradicting that $S_n$ has genus at least two. Let $B_i$ be a branched surface in this collection that carries $T$ and fully carries $S_n$. If $S_n\cap T=\emptyset$ in $N(B_i)$, then we cut $N(B_i)$ open along $T$ and obtain $\overline{N(B_i)-T}$ which carries both $T$ and $S_n$. However, a horizontal boundary component of $\overline{N(B_i)-T}$ is a torus parallel to $T$ and the argument above implies that $\overline{N(B_i)-T}$ dose not fully carry $S_n$. So, after taking a sub-branched surface of $\pi(\overline{N(B_i)-T})$ (where $\pi$ is the map collapsing every $I$--fiber of $\overline{N(B_i)-T}$ to a point), we get a branched surface $B_i'$ that fully carries $S_n$. Note that the operation of taking a sub-branched surface destroys the torus components of the horizontal boundary that come from cutting $N(B_i)$ along $T$. So the new branched surface $B_i'$ does no carry $T$. A branched surface has only finitely many sub-branched surfaces. Thus, after these operations, we may assume each $B_i$ has the property that $S_n\cap T\ne\emptyset$ under any $B_i$--isotopy, if $B_i$ carries $T$ and fully carries $S_n$. Now, by Lemma~\ref{Ltorus1}, for each surface $S_n$ fully carried by $B_i$, we can find a surface $S_n'$ and an arc $\alpha\subset S_n'$, such that $S_n'$ is isotopic to $S_n$ in $M$, $S_n'$ is also fully carried by $B$, $\alpha\cap T\ne\emptyset$ under any $B_i$--isotopy, and $length(\alpha)$ is bounded from above by a fixed number $\sigma$ depending only on $T$ and $B_i$. Then, similar to the proof of Lemma~\ref{Lsdisk}, we split $N(B_i)$ in a small neighborhood of $\alpha$, as the splitting from $\tau$ to $\tau_1$ in Figure~\ref{sp1}. Since $\alpha\subset S_n'$ and $\alpha\cap T\ne\emptyset$ under any $B_i$--isotopy, we may perform the splitting so that the branched surface after this splitting still carries $S_n'$ but does not carry $T$. We may assume $\pi(\alpha)$ is transverse to the branch locus. Since $T$ is fixed and the length of $\alpha$ is bounded by a number $\sigma$ which depends only on $B_i$ and $T$, there are only a finite number of different such splittings along arcs like $\alpha$. Thus, after performing a finite number of splittings on $B_i$, we get a finite collection of branched surfaces with the following properties. \begin{enumerate} \item No branched surface in this collection carries $T$. \item For any surface $S_n$ carried by $B_i$, there is a surface $S_n'$ that is isotopic to $S_n$ in $M$ and carried by $B_i$. \end{enumerate} After performing these splittings on each $B_i$ and taking sub-branched surfaces if necessary, we get a collection of branched surfaces satisfying the properties in the lemma. \end{proof} \section{Splitting branched surfaces, the lamination case}\label{Slam} Suppose $M$ is a closed, orientable, irreducible and atoroidal 3--manifold, and $M$ is not a Seifert fiber space. By \cite{JR}, we may assume $M$ has a 0--efficient triangulation. By \cite{R,St}, every strongly irreducible Heegaard surface is isotopic to an almost normal surface with respect to the 0--efficient triangulation. As in section~\ref{Spre} and Proposition~\ref{Pfinite}, we can construct a finite collection of branched surfaces by gluing together normal disks and almost normal pieces, and each strongly irreducible Heegaard surface is fully carried by a branched surface in this collection. Since the only normal 2--sphere is the vertex-linking one, after taking sub-branched surfaces, we may assume no branched surface in this collection carries any normal 2--sphere. Let $B$ be a branched surface in this collection. Since $B$ fully carries an almost normal surface, at most one branch sector of $B$ contains an almost normal piece. Let $b$ be the branched sector of $B$ that contains the almost normal piece. As in section~\ref{Spre}, $B_N=B-int(b)$ is a sub-branched surface of $B$, which is called the normal part of $B$. Clearly, every surface carried by $B_N$ is a normal surface, and $B_N$ does not carry any 2--sphere. \begin{lemma}\label{Lmu} Let $B$ and $B_N$ be the branched surface constructed above, and let $\{S_n\}$ be a sequence of strongly irreducible Heegaard surfaces fully carried by $B$. Let $\mu$ be an exceptional minimal measured lamination carried by $B_N$ with $\chi(\mu)=0$. Then, $B$ can be split into a finite collection of branched surfaces with the following properties. \begin{enumerate} \item Up to isotopy, each $S_n$ is carried by a branched surface in this collection. \item No branched surface in this collection carries $\mu$. \end{enumerate} \end{lemma} \begin{proof} Let $B_\mu$ be the sub-branched surface of $B$ that fully carries $\mu$. So, $B_\mu$ is also a sub-branched surface of $B_N$. Since $B_N$ does not carry any 2--sphere, $B_\mu$ does not carry any 2--sphere either. Moreover, every torus $T$ carried by $B_\mu$ is a normal torus, and by Lemma~\ref{Ltorus}, $T$ bounds a solid torus in $M$. Next, we perform some splittings on $B_\mu$. By Lemma~\ref{Llocus}, after some splittings, we have the following. \begin{description} \item[Property A] the branch locus $L_\mu$ of $B_\mu$ has no double point, \item[Property B] $B_\mu-L_\mu$ consists of annuli and M\"{o}bius bands. \end{description} Note that any splitting on a branched surface can be divided into a sequence of successive splittings along simple splitting disks (see section~\ref{Storus} for definition). By Lemma~\ref{Lsdisk}, we can perform splittings on $B$ and $B_\mu$ and obtain a finite collection of branched surfaces, such that \begin{enumerate} \item each surface in $\{S_n\}$ is carried by a branched surface in this collection, \item if a branched surface $B'$ in this collection carries $\mu$, then $B_\mu$, the sub-branched surface of $B'$ fully carrying $\mu$, satisfies properties A and B above. \end{enumerate} To simplify notation, we still use $B$ to denote a branched surface in this collection that carries $\mu$, use $B_\mu$ to denote the sub-branched surface of $B$ fully carrying $\mu$, and assume $B_\mu$ satisfies properties A and B above. Moreover, as in the proof of Lemma~\ref{Llocus}, we may assume the number of components of $M-N(B_\mu)$ is minimal among branched surfaces fully carrying $\mu$. After some isotopy, we can also assume $\partial_hN(B_\mu)\subset\mu$. Since $B_\mu$ is a sub-branched surface of $B$, we may also consider $B$ as a branched surface obtained by adding some branch sectors to $B_\mu$. Next, we will fix $B_\mu$ and split $B$ near $B_\mu$. We first analyze how the branch sectors of $\overline{B-B_\mu}$ are added to $B_\mu$ at the cusps of $B_\mu$. Let $c_x$ be a circle in $\partial_hN(B_\mu)$ parallel and close to a boundary circle of $\partial_hN(B_\mu)$. Since the branch locus of $B_\mu$ contains no double point, $\pi(c_x)$ is a circle parallel and close to a component of the branch locus of $B_\mu$. To simplify notation, we use $l_x$ to denote both the component of the branch locus and the corresponding cusp. The union of the $I$--fibers of $N(B_\mu)$ that intersect $c_x$ is a vertical annulus $A_x$, and $A_x\cap\mu$ is a union of parallel circles. By assuming $N(B_\mu)\subset N(B)$ as before, we may consider $A_x$ as a vertical annulus in $N(B)$. Let $\hat{A}_x$ be the union of $I$--fibers of $N(B)$ that intersect $A_x$. After enlarging $\hat{A_x}$ a little, we may consider $\hat{A}_x$ as a fibered neighborhood of a train track $\tau_x$ which consists of the circle $\pi(c_x)$ and some ``tails" along the circle, as the top train track in Figure~\ref{sp2}. Note that $\tau_x$ can be regard as the ``spine" of a small neighborhood of $\pi(c_x)$ in $B$. If $S_n$ is fully carried by $B$, then $S_n\cap\hat{A}_x$ is a union of arcs and/or circles transverse to the $I$--fibers. We have 3 cases. Case 1 is that $S_n\cap\hat{A}_x$ contains a circle. Case 2 is that $S_n\cap\hat{A}_x$ contains a spiral wrapping around $\hat{A}_x$ more than twice. Case 3 is that $S_n\cap\hat{A}_x$ contains no circle and the length of every arc in $S_n\cap\hat{A}_x$ is relatively short (compared with the length of the circle $\pi(c_x)$ in the train track). Now, we split $\hat{A}_x$ along $S_n\cap\hat{A}_x$. In the first case, we can split $\hat{A}_x$ along some relatively short arcs, as shown in splitting 1 of Figure~\ref{sp2}, and get a vertical annulus whose intersection with $S_n$ consists of circles. In the second case, we can split $\hat{A}_x$ along relatively short arcs, as shown in splitting 2 of Figure~\ref{sp2}, and get a fibered neighborhood of a train track whose intersection with $S_n$ consists of only spirals. The train track in the second case consists of a circle and some ``tails", and on each side of the circle, the cusps of the ``tails" have the same direction. In the third case, as shown in splitting 3 of Figure~\ref{sp2}, the splitting along a short arc will destroy the annulus $A_x$ and the circle $\pi(c_x)=\pi(A_x)$. \begin{figure}\label{sp2} \end{figure} Now, we consider the bigger 3-dimensional pictures of the splittings above. The third case is simple. In the third case, similar to the splittings in the poof of Lemma~\ref{Lsdisk}, the branched surface after the splitting does not carry $\mu$ anymore. Next, we will focus on the first two cases. For the first two cases, since $c_x$ lies in a small neighborhood of a boundary circle of $\partial_hN(B_\mu)$, we may assume the (2-dimensional) splittings occur in a small neighborhood of the cusp, and we perform some splittings and pinchings on $B$ accordingly, as shown in Figure~\ref{cusp} (a). Although both local splittings in Figure~\ref{cusp} (a) may happen in the two cases, the basic picture for the splittings in case 1 is the splitting 1 in Figure~\ref{cusp} (a), and the basic picture for the splittings in case 2 is the splitting 2 in Figure~\ref{cusp} (a). To simplify notation, we still use $B$ to denote the branched surface after the splittings above. In the first two cases, $B_\mu$ is still a sub-branched surface of $B$ after the splittings. In case 1, no branch sector of $\overline{B-B_\mu}$ intersects the cusp $l_x$ after the splittings, in other words, the cusp $l_x$ for $B_\mu$ is a cusp for $B$ after the splitting. In case 2, as shown in Figure~\ref{cusp} (b), the branch sectors of $\overline{B-B_\mu}$ have the coherent direction along the cusp $l_x$ after the splittings. \begin{figure}\label{cusp} \end{figure} Next, we show that the second case cannot happen. To prove this, we first show that the second case cannot happen at the cusp of a $monogon\times S^1$ region of $M-int(N(B_\mu))$. By Lemma~\ref{L0eff}, there is always a $monogon\times S^1$ region in $M-int(N(B_\mu))$. Let $D$ be a monogon disk properly embedded in $M-int(N(B_\mu))$ with $\partial D=\alpha\cup\beta$, where $\beta\subset\partial_vN(B_\mu)$ is a vertical arc in $\partial_vN(B_\mu)$ and $\alpha\subset\partial_hN(B_\mu)$. We can use $D\times S^1$ to denote the $monogon\times S^1$ region of $M-int(N(B_\mu))$. So, $\beta\times S^1$ is a component of $\partial_vN(B_\mu)$. As $\partial_hN(B_\mu)\subset\mu$, $\alpha\times S^1$ lies in a leaf $l$ of $\mu$. As in the proof of Lemma~\ref{Llocus}, $l$ is an infinite annulus. Since the number of components of $M-N(B_\mu)$ is minimal, $l$ is the boundary (under path metric) of a component of $M-\mu$ which is the product of $S^1$ and an end-compressing disk (i.e. a monogon with an infinitely long tail, see page 45 of \cite{GO}). Suppose we are in case 2 at the cusp of the $monogon\times S^1$ region $D\times S^1$, and suppose the branch sectors of $\overline{B-B_\mu}$ are coherent along this cusp $\beta\times S^1$, as shown in Figure~\ref{cusp} (b). For any surface $S_n$ fully carried by $B$, we can regard $S_n\cap (D\times S^1)$ as a compact surface carried by those branch sectors of $\overline{B-B_\mu}$ in this $monogon\times S^1$ region. Let $C_n$ be a component of $S_n\cap (D\times S^1)$ whose boundary intersects the cusp $\beta\times S^1$. The union of $C_n$ and $B_\mu$ naturally form a sub-branched surface of $B$. Let $c$ be a boundary circle of $C_n$ that intersects the cusp of this $monogon\times S^1$ region. So, $c$ is a circle lying in the branch locus of $B$ and has an induced cusp/branch direction. Let $\gamma_c$ be an arc in $c$ with both endpoints in the cusp $l_x=\pi(\beta\times S^1)$, see the two dashed arcs in Figure~\ref{cusp}~(b) for pictures of $\gamma_c$. We may regard $\gamma_c$ as an arc properly embedded in the annulus $\alpha\times S^1\subset\partial_hN(B_\mu)$ and $\gamma_c$ has a normal direction induced from the cusp direction at $\gamma_c$. Since we are in case 2 and the branch sectors of $\overline{B-B_\mu}$ are coherent along this cusp $l_x$, as shown in Figure~\ref{cusp}~(b), the cusp directions at $\partial\gamma_c$ cannot be extended to a compatible cusp direction along $\gamma_c$. Hence, the second case can never happen near the cusp of a $monogon\times S^1$ region. In other words, after some splittings as in Figure~\ref{cusp}~(a), either $\overline{B-B_\mu}$ has no branch sector intersecting the cusp of any $monogon\times S^1$ region of $M-B_\mu$, or the branched surface after the splitting does not carry $\mu$ anymore. Now, we consider the cusp of any component $l_x$ of the branch locus of $B_\mu$, and suppose we are in case 2 at this cusp. So, we may assume the branch sectors of $\overline{B-B_\mu}$ at the cusp have coherent directions as in Figure~\ref{cusp} (b). Let $A_x'=\pi^{-1}(l_x)$, where $\pi: N(B_\mu)\to B_\mu$ is the collapsing map. Since the branch locus of $B_\mu$ has no double points and $\mu$ is a measured lamination, $A_x'\cap\mu$ is a union of circles. Since every leaf of $\mu$ is dense, as in the proof of Lemma~\ref{Llocus}, there is an annulus in $N(B_\mu)-\mu$, transverse to the $I$--fibers and connecting $\beta\times S^1$ to $A_x'$, where $\beta\times S^1$ is the cusp of a $monogon\times S^1$ region above. By deleting a small neighborhood of this annulus, we can split $B_\mu$ so that the cusp of this $monogon\times S^1$ region passes $l_x$ and lies in a small neighborhood of $l_x$, as shown in Figure~\ref{sp3} (a). Since no branch sector of $\overline{B-B_\mu}$ intersects the cusp of a $monogon\times S^1$ region, this splitting does not really affect the branch sectors of $\overline{B-B_\mu}$. \begin{figure}\label{sp3} \end{figure} As before, let $c_x$ be a circle in $\partial_hN(B_\mu)$ parallel and close to the cusp, and let $A_x=\pi^{-1}(c_x)$ be the vertical annulus, where $\pi:N(B_\mu)\to B_\mu$ is the collapsing map. By our assumptions on the branch locus of $B_\mu$, $\partial A_x$ lies in $\partial_hN(B_\mu)$. Since we have split $B_\mu$ along the annulus above, as shown in Figure~\ref{sp3} (a), we may assume a component of $\partial A_x$ lies in the horizontal boundary of the $monogon\times S^1$ region in $M-int(N(B_\mu))$, close to the cusp $\beta\times S^1$. We may consider $N(B_\mu)\subset N(B)$ and consider $A_x$ as a vertical annulus in $N(B)$. Let $\hat{A}_x$ be the union of $I$--fibers of $N(B)$ that intersect $A_x$. As before, after enlarging $\hat{A}_x$ a little, we may consider $\hat{A}_x$ as a fibered neighborhood of a train track which consists of a circle and some ``tails" along the circle. Since we are in case 2, by our assumptions above, the branch sectors of $\overline{B-B_\mu}$ that intersect the cusp of $l_x$ have coherent direction, and there is no branch sector of $\overline{B-B_\mu}$ intersecting the cusp of the $monogon\times S^1$ region. So, a neighborhood of $\hat{A}_x$ in $N(B)$ must be like Figure~\ref{sp3} (b), where the smooth boundary circle is a circle in $\partial_hN(B_\mu)$ parallel and close to the cusp of the $monogon\times S^1$ region and the ``tails" correspond to the branch sectors of $\overline{B-B_\mu}$ that intersect the cusp of $l_x$. Since the branch sectors of $\overline{B-B_\mu}$ that intersect the cusp of $l_x$ have coherent direction, these ``tails" in $\hat{A}_x$ have the same cusp/branch direction along the annulus $A_x$, as shown in Figure~\ref{sp3}~(b). A standard Poincar\'{e}-Bendixson type argument implies that any curves fully carried by $\hat{A}_x$ must contain an infinite spiral and a limit cycle. So, $B$ cannot fully carry any compact surface with this configuration. Therefore, the second case above cannot happen at any cusp circle of $B_\mu$. Recall that in case 3, after the splittings above, the branched surface does not carry $\mu$ anymore. In case 1, after the splittings, $\overline{B-B_\mu}$ does not contain any branch sector that intersects the branch locus $L_\mu$ of $B_\mu$, which means each circle in $L_\mu$ is a component of the branch locus of $B$. Since the splittings performed above are along relatively short arcs, similar to the proof of Lemmas~\ref{Lsdisk} and \ref{Ltorus2}, by performing a finite number of splittings on $B$ (and taking sub-branched surfaces if necessary), we can obtain a finite collection of branched surfaces with the following properties. \begin{enumerate} \item Each $S_n$ is fully carried by a branched surface in this collection, \item If a branched surface $B'$ in this collection carries $\mu$, then $B_\mu$, the sub-branched surface of $B'$ that fully carries $\mu$, satisfies properties A and B before, and no branch sector of $\overline{B'-B_\mu}$ intersects the branch locus of $B_\mu$. \end{enumerate} To simplify notation, we still use $B$ to denote a branched surface in this collection that carries $\mu$, and use $B_\mu$ to denote the sub-branched surface of $B$ that fully carries $\mu$. Let $L_\mu$ be the branch locus of $B_\mu$. By our assumptions before, $B_\mu-L_\mu$ consists of annuli and M\"{o}bius bands. We only need to consider the case that $B_\mu-L_\mu$ is a union of annuli, and the proof for the case that $B_\mu-L_\mu$ contains a M\"{o}bius band is the same after ``blowing air" into $N(B_\mu)$. Let $l$ be an essential simple closed curve in a component of $B_\mu-L_\mu$. Let $A=\pi^{-1}(l)$, where $\pi:N(B_\mu)\to B$ is the collapsing map. So, $A$ is a vertical annulus with $\partial A\subset\partial_hN(B_\mu)$, and $A\cap\mu$ is a union of parallel circles. By assuming $N(B_\mu)\subset N(B)$, we may consider the annulus $A$ as a vertical annulus in $N(B)$, and we denote the union of the $I$--fibers of $N(B)$ that intersect $A$ by $\hat{A}$. As before, after enlarging $\hat{A}$ a little, we may consider $\hat{A}$ as a fibered neighborhood of a train track which consists of the circle $l$ and some ``tails" along $l$. Suppose $S_n$ is fully carried by $B$. We now split this train track along $S_n$. As before, we have 3 cases as shown in Figure~\ref{sp2}. Similarly, in the third case, the branched surface after splitting along a short arc does not carry $\mu$ anymore. Since every leaf is dense in $\mu$, we can find an annulus in $N(B_\mu)-\mu$ connecting any component of $\partial_vN(B_\mu)$ to $A$. Thus, by the argument on $L_\mu$ above, case 2 cannot happen at the circle $l$. Therefore, we can split the branched surface into a finite collection of branched surfaces, such that each $S_n$ is carried by a branched surface in this collection, and if a branched surface $B$ in this collection carries $\mu$, then no branch sector of $\overline{B-B_\mu}$ intersects $l$. We can apply this argument to any set of essential simple closed curves $l_1,\dots,l_m$ in $B_\mu-L_\mu$. So after a finite number of splittings, we may assume that no branch sector of $\overline{B-B_\mu}$ intersects any $l_i$. Now, $\overline{B-B_\mu}$ is a branched surface with boundary and the boundary of $\overline{B-B_\mu}$ is a train track in $B_\mu-L_\mu-\cup_{i=1}^ml_i$. Since every surface carried by $B_\mu$ is normal, we can find enough such circles $l_i$ so that, after the splittings above and eliminating disks of contact, the train track $\overline{B-B_\mu}\cap(B_\mu-L_\mu)$ does not carry any trivial circle in $B_\mu-L_\mu$. Since we can assume case 2 never happens along any such circles, by choosing enough such circles $l_i$ and after some more splitting and pinching, the boundary of $\overline{B-B_\mu}$ becomes a union of disjoint essential simple closed curves in the annuli $B_\mu-L_\mu$. Note that all the splittings above are along relatively short arcs and small disks. Similar to Lemma~\ref{Lsdisk}, we can perform a finite number of different splittings on $B$ and obtain a finite collection of branched surfaces such that \begin{enumerate} \item each $S_n$ is fully carried by a branched surface in this collection, \item if a branched surface $B'$ in this collection carries $\mu$, then $B_\mu$, the sub-branched surface of $B'$ fully carrying $\mu$, satisfies properties A and B before, and the boundary train track of $\overline{B'-B_\mu}$ consists of essential simple closed curves in $B_\mu-L_\mu$. \end{enumerate} By Proposition~\ref{PMS}, we may assume $B_\mu$ and $N(B_\mu)$ satisfy the hypotheses of Lemma~\ref{L0eff}. So, by Lemma~\ref{L0eff}, there is a torus component $\Gamma$ of $\partial N(B_\mu)$ bounding a solid torus in $M$ and the solid torus contains $N(B_\mu)$. $\Gamma$ is a union of annulus components of $\partial_vN(B_\mu)$ and $\partial_hN(B_\mu)$. Let $N$ be the solid torus bounded by $\Gamma$ ($N(B_\mu)\subset N$). Let $l$ be an essential simple closed curve in an annulus component of $\Gamma\cap\partial_hN(B_\mu)$. Since $B_\mu$ satisfies properties A and B before, by applying part 1 of Lemma~\ref {L0eff} to the components of $N-int(N(B_\mu))$, it is easy to see that $l$ does not bound a meridian disk of $N$. Suppose $B$ carries $\mu$ and fully carries infinitely many $S_n$'s. By our assumptions above, the boundary of $\overline{B-B_\mu}$ consists of essential circles in $B_\mu-L_\mu$. So, for each $S_n$ fully carried by $B$, we may assume that $S_n\cap\Gamma$ consists of parallel essential non-meridian curves, and for any such $S_n$, the slope of $S_n\cap\Gamma$ is the same as the slope of $l$ above. By Theorem~\ref{Tsch} (a theorem of Scharlemann \cite{S}), $S_n\cap N$ consists of $\partial$--parallel annuli and possibly one other component, obtained from one or two $\partial$--parallel annuli by attaching an unknotted tube along an arc parallel to an arc in $\Gamma-S_n$. We call the latter kind of component in Scharlemann's theorem an \emph{exceptional component}. An exceptional component is either a twice punctured torus or a planar surface with 4 boundary circles. Note that, for an exceptional component, if one fixes the annuli, then there is only one way to attach the tube, up to isotopy. Each component of $S_n\cap N$ is carried by $B\cap N$. Since the boundary of $\overline{B-B_\mu}$ consists of simple closed curves, after some small perturbation, we may assume $B$ is transverse to the torus $\Gamma$, $B\cap\Gamma$ consists of parallel essential non-meridian simple closed curves, $N(B)\cap\Gamma$ consists of vertical annuli, and $\mu\subset N(B_\mu)\subset N$. For each $\partial$--parallel annulus $A$ in the solid torus $N$, $A$ is isotopic (fixing $\partial A$) to an annulus $A'$ in $\Gamma$ and we call $A'$ the \emph{image} of $A$ in $\Gamma$. Let $A_1$ and $A_2$ be two $\partial$--parallel annuli in $N$ with $\partial A_i\subset N(B)\cap\Gamma$ ($i=1,2$). We say that $A_1$ and $A_2$ are equivalent if $A_1$ is isotopic to $A_2$ via an isotopy of $N$ fixing $\Gamma-N(B)$. Thus, there are only finitely many equivalence classes for $\partial$--parallel annuli with boundary in $N(B)\cap\Gamma$. Now, we consider the exceptional components as in Scharlemann's theorem above, and we say that two exceptional components (from two Heegaard surfaces) are equivalent if they are isotopic via an isotopy of $N$ fixing $\Gamma-N(B)$. Since the isotopy class of an exceptional component only depends on the annuli where the tube is attached, there are only finitely many equivalence classes for the exceptional components. Let $A_1$ and $A_2$ be properly embedded and disjoint annuli in $N$ carried by $N(B)\cap N$, and $A_i'$ be the image of $A_i$ in $\Gamma$. The solid torus bounded by $A_i\cup A_i'$ must contain at least one component of $\partial_hN(B)\cap N$. Moreover, if $A_1'$ and $A_2'$ are nested, say $A_1'\subset A_2'$, and if $A_1$ and $A_2$ are not $B$--isotopic, then the solid torus between $A_1$ and $A_2$, i.e. the solid torus bounded by $A_1\cup A_2\cup(A_2'-A_1')$, must contain at least one component of $\partial_hN(B)\cap N$. Thus, the number of disjoint and not $B$--isotopic annuli carried by $N(B)\cap N$ is bounded by a number which depends only on the number of components of $\partial_hN(B)\cap N$. So, by Scharlemann's theorem and the arguments above, for any $S_n\cap N$, there is a finite collection of components of $S_n\cap N$, denoted by $A_1,\dots, A_k$, such that each component of $S_n\cap N$ is $B$--isotopic to some $A_i$ and $k$ is bounded from above by a fixed number depending only on $N(B)\cap N$. So, we can split $B$ is a neighborhood of $N$ so that, after the splittings, $B\cap N$ becomes a collection of disjoint compact surfaces $A_i$'s above. By the assumptions on the $A_i$'s, the branched surface after this splitting still carries $S_n$ and clearly does not carry $\mu$, since $N(B_\mu)\subset N$. Suppose $\{S_n\}$ is the sequence of strongly irreducible Heegaard surfaces fully carried by $B$. Then, for each $S_n$, we use $\Sigma_n$ to denote the union of those $A_i$'s above. Now, we consider the sequence $\{\Sigma_n\}$. Each $\Sigma_n$ is fully carried by $B\cap N$ and consists of $\partial$--parallel annuli plus at most one exceptional component. Since the number of components of $\Sigma_n$ is bounded by a fixed number and since there are only finitely many equivalence classes, $\{\Sigma_n\}$ belong to finitely many isotopy classes. So, if we split $B\cap N$ into the $A_i$'s for each $n$, we only get a finite number of different branched surfaces, up to isotopy. Therefore, we can split $B$ in a neighborhood of $N$ and obtain a finite collection of branched surfaces such that \begin{enumerate} \item up to isotopy in $N$, each $S_n$ is fully carried by a branched surface in this collection, \item the intersection of $N$ and each branched surface in this collection consists of annuli and at most one exceptional component as in Scharlemann's theorem. In particular, no branched surface in this collection carries $\mu$. \end{enumerate} By combining all the splittings before, we get a finite collection of branched surfaces satisfying the properties of Lemma~\ref{Lmu}. \end{proof} Using Lemma~\ref{Ltorus2} and Theorem~\ref{TMS}, we can drop the hypothesis that $\mu$ is an exceptional minimal lamination in Lemma~\ref{Lmu}. \begin{corollary}\label{Cmu} Let $B$, $B_N$ and $\{S_n\}$ be as in Lemma~\ref{Lmu}. Let $\mu$ be a measured lamination carried by $B_N$ with $\chi(\mu)=0$. Then, $B$ can be split into a finite collection of branched surfaces with the following properties. \begin{enumerate} \item Up to isotopy, each $S_n$ is carried by a branched surface in this collection. \item No branched surface in this collection carries $\mu$. \end{enumerate} \end{corollary} \begin{proof} By Theorem~\ref{TMS}, $\mu$ is a disjoint union of a finite number of sub-laminations, $\mu_1,\dots,\mu_m$. It is a well-known fact that a measured lamination with positive Euler characteristic has a 2--sphere (or $P^2$) leaf (this is even true for ``abstract" laminations, see \cite{MO}). Since $B_N$ does not carry any 2--sphere, $B_N$ does not carry any measured lamination with positive Euler characteristic. So, $\chi(\mu_i)=0$ for each $i$. By Corollary~\ref{Cklein}, $B_N$ does not carry any Klein bottle. Hence, each $\mu_i$ either is an exceptional minimal lamination or consists of parallel normal tori. Now, the corollary follows immediately from Lemmas~\ref{Ltorus}, \ref{Ltorus2} and \ref{Lmu}. \end{proof} \section{Proof of the main theorem}\label{Sproof} Let $B'$ be a branched surface obtained by splitting $B$. By section~\ref{Spre}, we may naturally assume $N(B')\subset N(B)$. Recall that we say that a lamination $\mu$ carried by $B$ is also carried by $B'$ if after some $B$--isotopy, $\mu\subset N(B')\subset N(B)$, transverse to the $I$--fibers. \begin{proposition}\label{neighbor} Let $B'$ be a branched surface obtained by splitting $B$. Suppose $\mu$ is a measured lamination carried by $B$ but not carried by $B'$. Then, there is a neighborhood $N_\mu$ of $\mu$ in the projective lamination space of $B$, such that no measured lamination in $N_\mu$ is carried by $B'$. \end{proposition} \begin{proof} Let $\mathcal{PL}(B)$ and $\mathcal{PL}(B')$ be the projective lamination spaces for $B$ and $B'$ respectively. Suppose there is a measured lamination carried by $B'$ in every neighborhood of $\mu$ in $\mathcal{PL}(B)$. Then, there are an infinite sequence of measured laminations $\{\mu_n\}$ carried by $B'$ and the limit point of $\{\mu_n\}$ in $\mathcal{PL}(B)$ is $\mu$. Since $\mathcal{PL}(B')$ is compact, this sequence $\{\mu_n\}$ must have an accumulation point $\mu'$ in $\mathcal{PL}(B')$. Since every lamination carried by $B'$ is also carried by $B$, $\mu'$ is carried by $B$ and hence is an accumulation point of $\{\mu_n\}$ in $\mathcal{PL}(B)$. So, $\mu$ and $\mu'$ correspond to the same point in $\mathcal{PL}(B)$. Since points in $\mathcal{PL}(B)$ and the measured laminations described in section~\ref{SPL} are one-to-one correspondent, $\mu=\mu'$ and we get a contradiction. \end{proof} Now, we are in position to prove Theorem~\ref{T2}. \begin{proof}[Proof of Theorem~\ref{T2}] By \cite{BCZ,BO,M,MSch}, we may assume $M$ is not a lens space or a small Seifert fiber space. So, by \cite{JR}, $M$ admits a 0--efficient triangulation. By \cite{R, St, K}, every strongly irreducible Heegaard splitting can be isotoped to an almost normal surface. By section~\ref{Spre}, we can find a finite collection of branched surfaces, $B_1,\dots,B_n$, such that each almost normal Heegaard surface is fully carried by some $B_i$. Since the triangulation is 0--efficient, we may assume that $B_i$ does not carry any normal 2--sphere for each $i$. Let $\mathcal{PL}(B_i)$ be the projective lamination space for $B_i$. We can identify each point in $\mathcal{PL}(B_i)$ to a measured lamination carried by $B_i$. Let $\mathcal{T}_i\subset\mathcal{PL}(B_i)$ be the collection of normal measured laminations with Euler characteristic 0. By Proposition~\ref{Pcompact}, $\mathcal{T}_i$ is a compact subset of $\mathcal{PL}(B_i)$. By Corollary~\ref{Cmu}, for each normal measured lamination $\mu\in\mathcal{T}_i$ carried by $B_i$, we can split $B_i$ into a finite collection of branched surfaces such that any strongly irreducible Heegaard surface fully carried by $B_i$ is fully carried by a branched surface in this collection, and no branched surface in this collection carries $\mu$. Since this is a finite collection, by Proposition~\ref{neighbor}, there is a neighborhood $N_\mu$ of $\mu$ in $\mathcal{PL}(B_i)$ such that none of the measured laminations in $N_\mu$ is carried by any branched surface in this collection. Since $\mathcal{PL}(B_i)$ and $\mathcal{T}_i$ are compact, there are a finite number of normal measured laminations $\mu_1,\dots,\mu_k$ in $\mathcal{T}_i$ such that $\cup_{j=1}^kN_{\mu_j}$ covers $\mathcal{T}_i$. By applying Corollary~\ref{Cmu} to each $\mu_j$ and combining all the splittings for the $\mu_j$'s, we can split $B_i$ into a finite collection of branched surfaces such that \begin{enumerate} \item each strongly irreducible Heegaard surface fully carried by $B_i$ is still fully carried by a branched surface in this collection, \item no branched surface in this collection carries any measured lamination in $N_{\mu_j}$ ($j=1,\dots, k$). Since $\cup_{j=1}^kN_{\mu_j}$ covers $\mathcal{T}_i$, no branched surface in this collection carries any normal measured lamination with Euler characteristic 0. \end{enumerate} After performing such splitting on each $B_i$, we get a finite collection of branched surfaces with the desired properties. \end{proof} Now, Theorem~\ref{main} follows easily from Theorem~\ref{T2}. \begin{proof}[Proof of Theorem~\ref{main}] By Theorem~\ref{T2}, there are finitely many branched surfaces, say $B_1,\dots,B_n$, such that any almost normal strongly irreducible Heegaard surface is fully carried by some $B_i$, and for any $i$, $B_i$ does not carry any normal 2--sphere or normal torus. Since each almost normal surface has at most one almost normal piece, at most one branch sector of $B_i$ contains an almost normal piece, and (if it exists) we call this branch sector the \emph{almost normal sector} of $B_i$. For each almost normal surface $S$ fully carried by $B_i$, the weight of $S$ at the almost normal sector is exactly one. It is possible that $B_i$ has no almost normal sector, in which case every surface carried by $B_i$ is normal. Let $\mathcal{S}_i$ be the set of almost normal Heegaard surfaces fully carried by $B_i$ and with a fixed genus $g$. Each $S\in\mathcal{S}_i$ corresponds to a positive integer solution to the branch equations. We can write $S=(x_1,\dots,x_m)$, where each $x_i$ is the weight of $S$ at a branch sector of $B_i$. We may assume the first coordinate $x_1$ corresponds to the almost normal sector. So, $x_1=1$ for every $S\in\mathcal{S}_i$. If $\mathcal{S}_i$ is an infinite set, then we can find two surfaces $S_1=(x_1,\dots,x_m)$ and $S_2=(y_1,\dots,y_m)$ in $\mathcal{S}_i$ such that $x_i\le y_i$ for each $i$. There are many ways to see this and the following is suggested by the referee. If for a fixed $i$ infinitely many surfaces in $\mathcal{S}_i$ have the same $i$-th coordinate, then we work with that collection and proceed by induction. Eventually we reach an infinite collection of surfaces where for some coordinates they all agree and for the rest only finitely many take any one value. Then for a fixed surface $S_1=(x_1,\dots,x_m)$ there are only finitely many surfaces in this collection with any coordinate less than $\max_i\{x_i\}$. Hence such a surface $S_2=(y_1,\dots,y_m)$ exists. Thus, $T=(y_1-x_1, y_2-x_2,\dots, y_m-x_m)$ is a non-negative integer solution to the branch equations. So, we may consider $T$ as a closed surface carried by $B_i$ ($T$ may not be connected). If $B_i$ has an almost normal sector, then $x_1=y_1=1$ and the weight of $T$ at the almost normal sector is $0$. Hence, $T$ is a normal surface. Moreover, since $genus(S_1)=genus(S_2)=g$, $\chi(T)=\chi(S_1)-\chi(S_2)=0$. This is impossible, since $B_i$ does not carry any normal 2--sphere or normal torus. Hence, each $\mathcal{S}_i$ is a finite set and there are only finitely many strongly irreducible Heegaard splittings of any genus $g$. Johannson proved Theorem~\ref{main} for Haken manifolds \cite{Jo1,Jo2}. For non-Haken manifolds, by \cite{CG}, a weakly reducible Heegaard splitting is in fact reducible. So, any weakly reducible Heegaard splitting in an irreducible non-Haken manifold can be destabilized into a strongly irreducible Heegaard splitting. Therefore, Theorem~\ref{main} holds for all Heegaard splittings \end{proof} \end{psfrags} \end{document}
\begin{document} \title{ Quantum-Selected Configuration Interaction:\\ classical diagonalization of Hamiltonians in subspaces selected by quantum computers} \newcommand{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan}{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \newcommand{Graduate School of Engineering Science, Osaka University, 1-3 Machikaneyama, Toyonaka, Osaka 560-8531, Japan}{Graduate School of Engineering Science, Osaka University, 1-3 Machikaneyama, Toyonaka, Osaka 560-8531, Japan} \newcommand{Center for Quantum Information and Quantum Biology, Osaka University, Japan}{Center for Quantum Information and Quantum Biology, Osaka University, Japan} \author{Keita Kanno} \email{[email protected]} \affiliation{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \author{Masaya Kohda} \email{[email protected]} \affiliation{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \author{Ryosuke Imai} \affiliation{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \author{Sho Koh} \affiliation{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \author{Kosuke Mitarai} \affiliation{Graduate School of Engineering Science, Osaka University, 1-3 Machikaneyama, Toyonaka, Osaka 560-8531, Japan} \affiliation{Center for Quantum Information and Quantum Biology, Osaka University, Japan} \author{Wataru Mizukami} \affiliation{Graduate School of Engineering Science, Osaka University, 1-3 Machikaneyama, Toyonaka, Osaka 560-8531, Japan} \affiliation{Center for Quantum Information and Quantum Biology, Osaka University, Japan} \author{Yuya O. Nakagawa} \affiliation{QunaSys Inc., Aqua Hakusan Building 9F, 1-13-7 Hakusan, Bunkyo, Tokyo 113-0001, Japan} \date{\today} \begin{abstract} We propose quantum-selected configuration interaction (QSCI), a class of hybrid quantum-classical algorithms for calculating the ground- and excited-state energies of many-electron Hamiltonians on noisy quantum devices. Suppose that an approximate ground state can be prepared on a quantum computer either by variational quantum eigensolver or by some other method. Then, by sampling the state in the computational basis, which is hard for classical computation in general, one can identify the electron configurations that are important for reproducing the ground state. The Hamiltonian in the subspace spanned by those important configurations is diagonalized on classical computers to output the ground-state energy and the corresponding eigenvector. The excited-state energies can be obtained similarly. The result is robust against statistical and physical errors because the noisy quantum devices are used only to define the subspace, and the resulting ground-state energy strictly satisfies the variational principle even in the presence of such errors. The expectation values of various other operators can also be estimated for obtained eigenstates with no additional quantum cost, since the explicit eigenvectors in the subspaces are known. We verified our proposal by numerical simulations, and demonstrated it on a quantum device for an 8-qubit molecular Hamiltonian. The proposed algorithms are potentially feasible to tackle some challenging molecules by exploiting quantum devices with several tens of qubits, assisted by high-performance classical computing resources for diagonalization. \end{abstract} \maketitle \section{Introduction} Recent years have seen a rapid development of quantum computers towards their practical use. Although current quantum devices are prone to errors due to physical noise, ways to achieve \textit{quantum advantage} over classical computations have been explored experimentally~\cite{arute2019quantum, zhong2020quantum, madsen2022quantum}, and such noisy intermediate-scale quantum (NISQ) devices are believed to become useful in the near future~\cite{preskill2018quantum}. Quantum chemistry is at the top of the list of such useful applications (see, e.g., Refs.~\cite{cao2019quantum, mcardle2020quantum, cerezo2021variational, bharti2022noisy, tilly2022variational}): for instance, energy eigenvalues of a molecular Hamiltonian can be calculated by quantum algorithms developed for NISQ devices, where the most notable is the variational quantum eigensolver (VQE)~\cite{peruzzo2014variational} to find the ground-state energy. However, VQE faces several challenges to be overcome for practical use. The major obstacle comes from errors caused by statistical fluctuation and physical noise inherent in the noisy devices. Suppressing the statistical error to a practically acceptable level needs a prohibitively large number of samples~\cite{gonthier2020identifying}, and error mitigation techniques~\cite{viola1999dynamical, temme2017error, li2017efficient,endo2018practical,koczor2021exponential,huggins2021virtual,mcardle2019error,bonet2018low,maciejewski2020mitigation,endo2021hybrid} for reducing physical noise require even more samples to compensate the additional statistical error they introduce~\cite{wang2021can,takagi2022fundamental, tsubouchi2022universal,takagi2022universal}. In particular, the effect of the errors can spoil the \textit{variational} nature of VQE: that is, the energy estimated by quantum devices is not guaranteed to give an upper bound on the exact ground-state energy. This is problematic because lowering the resulting energy of VQE does not necessarily mean approaching to the exact ground state. Besides, there are other challenges for VQE such as the barren plateau problem, which can interrupt the optimization~\cite{mcclean2018barren}. In this paper, we propose a class of hybrid quantum-classical methods, which we call quantum-selected configuration interaction (QSCI), to find low-lying eigenvalues and eigenstates of a many-electron Hamiltonian.\footnote{We focus on applications to quantum chemistry in this paper. However, the proposed methods can be applied to a variety of many-body Hamiltonians, including many-electron and spin problems in condensed matter physics.} QSCI is noise resilient and, in principle, free of costly optimization of parametrized quantum circuits. In particular, QSCI sets rigorous upper bounds on the ground-state energy\footnote{QSCI can also set rigorous upper bounds on the excited-state energies, depending on its algorithmic implementation.} even under the effect of physical and statistical errors. Here we outline a version of QSCI for finding a ground state: suppose that an approximate ground state, which we call an \textit{input state} in this paper, can be prepared on a quantum computer; one then repeats a measurement of the state to identify the computational basis states, or electron configurations, that are important to express the ground state~\cite{kohda2022quantum}; one then diagonalizes, on classical computers, the truncated Hamiltonian matrix in the subspace spanned by the identified configurations to obtain the smallest eigenvalue and eigenvector. The resulting eigenvalue approximates the ground-state energy. The diagonalization is classically tractable unless the number of selected configurations is exponentially large in the system size. The algorithm can be extended to find excited states by enlarging the subspace or by repeating the procedure for each energy eigenstate. Since the matrix elements of the Hamiltonian in the computational basis can be exactly calculated on classical computers, the diagonalization results in an energy that gives a definite upper bound on the exact ground-state energy regardless of the quality of the subspace spanned by the identified configurations; the quality only affects how tight the bound is. The states need to be measured only in the computational basis, and thus no additional gate operation is required for the measurement. In the presence of symmetries with conserved quantities such as the particle number, the post-selection of the computational basis states in the sampling outcome allows one to mitigate the bit-flip errors. We experimentally demonstrate the effectiveness of the post-selection in this paper. The algorithm may take any quantum states as the input states, if they roughly approximate the desired eigenstates and can be prepared on quantum devices. Such input states can be prepared, e.g., by parametrized quantum circuits moderately optimized via VQE and its variants~\cite{tilly2022variational}, and other preparation schemes are discussed in Sec.~\ref{subsec:state-preparation}. Sampling from such quantum states can be hard for classical computers~\cite{arute2019quantum}, and thereby providing a potential quantum speed-up in QSCI. QSCI can also be advantageous as a technique for \textit{eigenstate} tomography in that it can (classically) estimate the expectation values of a variety of observables at no additional quantum cost: as we already have the classical representation of the state, one can efficiently compute the expectation values using that representation. Unlike QSCI, other efficient tomography techniques such as classical shadows~\cite{huang2020, zhao2021}, neural network tomography~\cite{Torlai_2018}, and tensor network tomography~\cite{Cramer_2010} do not exploit the fact that the states of our interest are eigenstates of some problem Hamiltonian. As the name suggests, QSCI can be viewed as a configuration interaction (CI), where the many-body basis set is determined by quantum computers via sampling of an input state. There are established techniques~\cite{helgaker2014molecular} that choose fixed basis sets. A common approach in electronic structure theory is to select only one- and two-particle excitations from a reference wavefunction. When the reference wavefunction is chosen to be Hartree-Fock, the resulting method is known as CI with singles and doubles (CISD). If the reference wavefunction is a correlated wavefunction beyond the mean-field approximation, the method is called multi-reference CISD (MR-CISD). In the context of quantum computing, MR-CISD has sometimes been called as quantum subspace expansion (QSE)~\cite{mcclean2017hybrid,takeshita2020increasing,urbanek2020jctc}. Another approach is the adaptive selection of a suitable basis set for a target system. In quantum chemistry, a systematic selection of important bases has a long history~\cite{bender1969pr,whitten1969jcp,huron1973jcp,buenker1974tca,buenker1975tca,nakatsuji1983cluster,cimiraglia1987jcc,harrison1991jcp,greer1995jcp,greer1998jcpss}. And there are recently active studies along with such a systematic selected CI~\cite{evangelista2014jcp,holmes2016jctc,schriber2016jcp,holmes2016jctc2,tubman2016deterministic,ohtsuka2017jcp,schriber2017jctc,sharma2017semistochastic,chakraborty2018ijqc,coe2018jctc,coe2019jctc,abraham202jctc,tubman2020modern,zhang2020jctc,zhang2021jctc,chilkuri2021jcc,chilkuri2021jctc,goings2021jctc,pineda2021jctc,jeong2021jctc,coe2023jctc,seth2023jctc}. Thanks to such developments, systematic selected CIs are now gradually being considered as a promising approach for large-scale quantum chemical simulations. The likely reason for this revival is that selected CI is an algorithm that can be adapted to current classical computer architectures with sufficient memory. QSCI may be seen as a new systematic selected CI that utilizes quantum computers. Our methods are capable of selecting electron configurations which are necessary to describe the eigenstates to some accuracy but are missed in the conventional methods with a fixed basis set. Note that our methods call the diagonalization procedure at most only once for each eigenstate, while the adaptive methods iteratively repeat the diagonalization to search for a configuration to be added in the basis set; our methods require much less classical computational time compared to those adaptive methods. The classical diagonalization is already utilized in various hybrid quantum-classical algorithms to find energy eigenstates. Most notable is QSE, which spans the subspace by states built upon the reference VQE state, and is widely used for various applications, e.g., excited state calculations~\cite{mcclean2017hybrid}, band structure calculations~\cite{yoshioka2022variational}, and noise reduction~\cite{mcclean2017hybrid,bonet2018low, takeshita2020increasing,mcclean2020decoding,yoshioka2022generalized, epperly2022theory}. More generally, one can span the subspace by various methods~\cite{huggins2020non,motta2020determining, parrish2019quantumfilter, stair2020multireference,parrish2019quantum, seki2021quantum,baek2022say, kirby2022exact}, which are sometimes collectively called as the quantum subspace diagonalization. In those methods, however, the matrix elements of the subspace Hamiltonian are calculated on quantum computers, and thus are subject to the physical and statistical errors. There is a proposal~\cite{radin2021classically} where some of the matrix elements are classically calculated, but the method still requires some matrix elements which are efficiently computable only by quantum computers for a possible quantum speed-up. In QSCI, on the other hand, all the matrix elements are classically computed, giving up the use of more complex and physically-motivated states as basis states that define the subspace. The rest of the paper is organized as follows. The proposed methods are introduced in Sec.~\ref{sec:methods}, and numerically tested in Sec.~\ref{sec:numerical}. A demonstration on a quantum device is presented in Sec.~\ref{sec:noisy-simulation-experiment}, along with a noisy simulation as a preparatory study. We discuss aspects of the proposed methods in Sec.~\ref{sec:discussion}, and finally conclude in Sec.~\ref{sec:conclusion}. Details of the algorithms, numerical simulations and experiment, as well as supplemental numerical results are given in the appendices. \section{Methods} \label{sec:methods} In this section, we present the methods of QSCI. Two ways of implementation are introduced: single diagonalization scheme in Sec.~\ref{sssec:method-single} and sequential diagonalization scheme in Sec.~\ref{sssec:method-sequantial}. They are designed for finding multiple energy eigenstates, and reduce to the same simplified method when used for finding the ground state alone. After introducing necessary ingredients, we begin with the algorithm specific to finding the ground state, which is simple and illustrative, and then proceed to the two methods which can also find excited states. \subsection{ Preliminary } We consider electronic structure problems of molecules in the second-quantization formalism with the Born-Oppenheimer approximation. A Hamiltonian and wave functions for electrons, in this setup, can be mapped onto $N_q$ qubits such that the Slater determinants\footnote{Instead, linear combinations of Slater determinants such as configuration state functions may be mapped to the computational basis states. QSCI can work with such a mapping, if the matrix elements of the Hamiltonian in the computational basis can be efficiently computed by classical computation.} for the Hartree-Fock state and its excitations are associated with the computational basis states $\ket{x}$, where $x\in{\{0, 1\}^{N_q}}$ is an $N_q$-bit string (see, e.g., Ref.~\cite{cao2019quantum,mcardle2020quantum}). In the Jordan-Wigner mapping, which we adopt in the numerical study, $N_q$ corresponds to the number of spin orbitals, and ``1'' or ``0'' represents whether each spin orbital is occupied or not. The methods can work with other mapping schemes such as the Bravyi-Kitaev mapping~\cite{bravyi2002fermionic}, although the fermion-qubit correspondence is less intuitive and the error mitigation (discussed later) is less effective. We denote the qubit Hamiltonian by $\hat{H}$. A linear combination of all the computational basis states, \begin{align} \ket{\psi} =\sum_{x\in{\{0, 1\}^{N_q}}} \alpha_x \ket{x}, \label{eq:general_state} \end{align} encompasses the full-CI wave function. Note that for a fixed number of electrons only a subset of the computational basis states is needed. In the full-CI method, sets of the CI coefficients \{$\alpha_x$\} that correspond to energy eigenstates are found by diagonalizing the Hamiltonian in the full Fock space. The method is costly due to the combinatorial growth of the Fock-space dimension as the number of spin-orbitals increases. For reducing the computational cost, there exist various classical approaches which truncate the Fock space and approximate the sum in Eq.~\eqref{eq:general_state} using a fixed or adaptively selected basis set, as mentioned in the previous section. In line with these efforts, but from a different viewpoint, we propose methods which harness quantum computers to identify important computational basis states, or electron configurations, for truncating the Fock space. \subsection{QSCI for ground state \label{subsec:ground-state} } We now describe the explicit algorithms. We begin with the algorithm for finding the lowest eigenvalue and the corresponding eigenstate (ground state) of an electronic Hamiltonian $\hat{H}$ on $N_q$ qubits. For simplicity, we assume the ground state is unique. When the degeneracy exists, the algorithms given in the next subsection, which is aimed at finding multiple eigenstates, can be straightforwardly applied. Indeed, the algorithm introduced in this subsection is a special case of each of the two algorithms in the next subsection. Let $\ket{{\psi_{\rm in}}}$ be an input state, which roughly approximates the ground state, and suppose $\ket{{\psi_{\rm in}}}$ can be prepared by a quantum circuit with $N_q$ qubits. Then, one prepares the input state on a quantum computer and measures the state in the computational basis, which results in an outcome bit string $x\in{\{0, 1\}^{N_q}}$. Repeating such a sampling procedure (or shot) for $N_{\rm shot}$ times, one counts how many times each $x$ appears. Based on the total sampling result, the most frequent $R$ computational basis states are selected to define the set \begin{align} \mc{S}_R = \{ \ket{x} | x\in{\{0, 1\}^{N_q}}, R~{\rm most~frequent} \}, \label{eq:set_GS} \end{align} where $R$ is a positive integer manually determined. This is to truncate the Fock space. One may in principle include all the computational basis states appeared in the measurements, while choosing an appropriately small $R$ can reduce the computational cost for diagonalization. One then solves the eigenvalue problem in the subspace spanned by $\mathcal{S}_R$: \begin{align} \bm{H}_R\bm{c} = E_R\bm{c}, \end{align} where $\bm{H}_R$ is the $R\times R$ Hermitian matrix defined by \begin{align} (\bm{H}_R)_{xy}= \mel{x}{\hat{H}}{y}~{\rm for}~\ket{x}, \ket{y} \in \mathcal{S}_R, \end{align} and $\bm{c}$ is an eigenvector with eigenvalue $E_R$, satisfying $\bm{c}^\dagger \bm{c}=1$. This step of the algorithm proceeds via classical computations: calculations of the matrix elements $\mel{x}{\hat{H}}{y}$ and the diagonalization of $\bm{H}_R$. The former calculations can be efficiently done by some classical method, e.g., by the Slater-Condon rules in the fermionic basis. The latter diagonalization is performed to obtain the smallest eigenvalue $E_R$ and the eigenvector $\bm{c}$, which are output of the algorithm. See Sec.~\ref{ssec:discussion-computational-cost} for further discussion on costs of these classical computations. Here, $E_R$ approximates the exact ground-state energy of $\hat{H}$, while $\bm{c}$ approximately gives the (normalized) CI coefficients, or the vector representation of the ground state, respectively. The corresponding quantum state, which we call the \textit{output state}, is constructed as \begin{align} \ket{{\psi_{\rm out}}}=\sum_{\ket{x}\in \mathcal{S}_R} c_x \ket{x}, \label{eq:output-state-gs} \end{align} where $c_x$ is an element of the eigenvector $\bm{c}$. The output state $\ket{{\psi_{\rm out}}}$ approximates the true ground state of $\hat{H}$. We remark that one does not need to realize the output state on quantum computers. Retaining the eigenvector $\bm{c}$ as classical data is enough for the application explained below. The output state can be used to estimate the expectation values of observables other than the Hamiltonian for the ground state, solely based on classical computations. Specifically, suppose that an observable in question is represented by a qubit operator $\hat{O}$. If the matrix elements $\mel{x}{\hat{O}}{y}$ can be efficiently computed on classical computers, so does the expectation value $\ev{\hat{O}}{{\psi_{\rm out}}}$, which is expected to give an approximation to the expectation value for the true ground state. In particular, if $\hat{O}$ can be expressed as a linear combination of $\text{poly}(N_q)$ Pauli strings, which is the case in many physical quantities, its expectation value can be efficiently computed on classical computers. Comments are in order for technical details. We identify the set $\mathcal{S}_R$ to span the subspace by sampling the input state. In this way, we expect that important computational basis states, or Slater determinants, to describe the ground state wave function can be selected. This is because in the sampling procedure a bit string $x$ occurs with the probability $\abs{\bra{x}\ket{{\psi_{\rm in}}}}^2$, while $\bra{x}\ket{{\psi_{\rm in}}}$ gives the CI coefficient of the corresponding Slater determinant in the input wave function $\ket{{\psi_{\rm in}}}$.\footnote{See, e.g., Eq.~\eqref{eq:general_state}. There, the CI coefficients can be expressed as $\alpha_x = \bra{x}\ket{\psi}$.} Indeed, $\mathcal{S}_R$ gives the $R$ Slater determinants with the largest coefficients $\abs{\bra{x}\ket{{\psi_{\rm in}}}}$ in $\ket{{\psi_{\rm in}}}$, under the ideal situation where physical noise can be ignored and the sampling is performed with an infinite number of shots. In passing, we sometimes adopt a method equivalent to this ideal situation to define $\mc{S}_R$ in the numerical study: that is, we just pick up the $R$ Slater determinants with the largest absolute values of the CI coefficients in the input state, instead of performing actual sampling procedures. We call this method as the \textit{idealized sampling} in this paper. Note that we assume the input state roughly approximates the true ground state. This is just to ensure the two states share the important computational basis states, and there is no need for a precise agreement between the CI coefficients of the two states. Such an input state can be prepared, e.g., by a parametrized quantum circuit moderately optimized via VQE. In Sec.~\ref{subsec:state-preparation}, we discuss methods to prepare the input state, including non-VQE based ways. The set $\mathcal{S}_R$ is defined in Eq.~\eqref{eq:set_GS} by specifying $R$, the number of the computational basis states retained in the subspace. But this is not the unique choice. For instance, one may define the set by taking all the computational basis states in the measurement outcome, as already mentioned. Or, one may instead set a threshold on the rate of occurrence $f_x$ for an outcome $x$ in the total sampling result, and then define an alternative set $\mathcal{S}_\epsilon = \{ \ket{x} | f_x \geq \epsilon \}$ with a threshold parameter $\epsilon$, For a proof-of-principle demonstration, we adopt Eq.~\eqref{eq:set_GS} to define the subspace for diagonalizing the Hamiltonian in the rest of the paper. In reality, physical noise and statistical fluctuation, the latter due to a finite number of shots, cannot be ignored, causing some errors in the output. However, the effect is only indirect and the method is robust against those errors: that is, the errors can degrade the quality of the selected subspace by missing important configurations or by picking up irrelevant configurations in the sampling procedures, but the lowest eigenvalue and eigenvectors are exact within the subspace. The latter point, the exactness within the subspace, results from the use of diagonalization for the matrix $\bm{H}_R$, whose elements are exactly computed. Consequently, the obtained energy $E_R$ sets an upper bound on $E_{\rm exact}$, the true ground-state energy of $\hat{H}$: \begin{align} E_{\rm exact}\leq E_R. \label{eq:variational-inequality} \end{align} Note that this variational inequality holds even under statistical fluctuation and physical noise. The situation is in contrast with VQE, where such an inequality is not guaranteed as the energy is directly measured on quantum computers and hence is susceptible to the errors.\footnote{In VQE, physical noise in the state preparation can hardly lead to the violation of the variational inequality, but it may be possible that an error during the measurement procedure causes it. The use of error mitigation techniques can also lead to the breakdown of the inequality. } It is also worth mentioning that, for a given sampling outcome, increasing $R$, the subspace size, always leads to a better approximation of the ground-state energy: $E_{\rm exact} \leq E_{R_a} \leq E_{R_b}$ for $R_a > R_b$. This can be used to see if the calculation converges. On the other hand, smaller $R$ can reduce the classical computational cost. Such a trade-off between the accuracy and cost is discussed in Sec.~\ref{subsec:scaling}. The algorithm finds the lowest energy state in the subspace $\mc{S}_R$, which gives an approximation to the ground state in the {\it full} Fock space. When there exists symmetry in the Hamiltonian, there are associated conserved quantities, e.g., the total electron number $N_e$ (or the charge of molecule) and the $z$-component of total electron spin $S_z$. Given this, one may wish to find the lowest energy state in a specific symmetry sector. In such a case, the method can be similarly applied but by relying on the subspace with fixed conserved quantities. For $N_e$ and $S_z$, this can be easily achieved as follows since each computational basis state corresponds to a Slater determinant with definite $N_e$ and $S_z$: one prepares an input state with the desired values of $(N_e, S_z)$, for which the sampling results in configurations each with the desired $(N_e, S_z)$; or, if such an input state cannot be prepared, one may post-select the sampling outcome, where one discards an outcome $x\in{\{0, 1\}^{N_q}}$ if it conflicts with the desired $(N_e,S_z)$. It is worth noting that the variational inequality~\eqref{eq:variational-inequality} still holds in each sector of Fock space specified by $(N_e, S_z)$. \begin{figure*} \caption{Schematic description of the QSCI algorithm for finding the ground state. When selecting the configurations, one may post-select the configurations by using conserved quantities such as the electron number or spin $S_z$ to mitigate the errors.} \label{fig:algorithm-gs} \end{figure*} Physical noise can cause a contamination of symmetry sectors: for an input state with fixed $(N_e, S_z)$, sampling on a noisy device can result in electron configurations with unwanted values of $(N_e, S_z)$, due to the bit-flip noise\footnote{Note that an error that corresponds to a phase-flip error occurring at the end of a circuit does not affect the probability distribution $\abs{\bra{x}\ket{{\psi_{\rm in}}}}^2$ and hence the sampling outcome.} or readout error. Nevertheless, one can mitigate such errors by post-selecting the sampling outcome according to the conserved quantities, as described above. One then diagonalizes the Hamiltonian in the post-selected subspace. We find that the post-selection is particularly effective to mitigate the readout error in the Jordan-Wigner mapping, while it is also applicable to other fermion-qubit mapping schemes (see Appendix~\ref{subsec: post-selection} for discussions). The algorithm is schematically summarized in Fig.~\ref{fig:algorithm-gs}. \subsection{ QSCI for multiple energy eigenstates \label{subsec:excited-states} } \begin{figure*} \caption{Schematic descriptions of the QSCI algorithms for finding the ground state and the first excited state: (a) single diagonalization scheme, and (b) sequential diagonalization scheme. In both panels, $|\psi_{\rm in} \label{fig:algorithm-es} \end{figure*} We now extend the algorithm to find multiple energy eigenstates, including low-lying excited states. For this, we note that the previous algorithm can output multiple eigenvectors, which can be taken to approximate excited states as well as the ground state. Yet, the quality of the approximation would not be satisfactory for the excited states, as the subspace is tailored for the ground state. Hence, we introduce extra input states to construct subspace(s) which can capture the excited states. In the following, we present two distinct algorithms to find multiple energy eigenstates and energies, schematically shown in Fig.~\ref{fig:algorithm-es}. The first algorithm, which we call the single diagonalization scheme, constructs a common subspace for both ground and excited states of interest, and performs the diagonalization in the subspace to simultaneously obtain all the desired eigenstates and energies. On the other hand, the second algorithm, dubbed as the sequential diagonalization scheme, constructs multiple subspaces, each tailored for each energy eigenstate, and sequentially diagonalizes the Hamiltonian in each subspace. Both of the algorithms contain the algorithm specific to the ground state, introduced in the preceding subsection, as a special case. \subsubsection{ Single diagonalization scheme \label{sssec:method-single} } Here we describe the single diagonalization scheme. Suppose one seeks for $N_s$ low-lying eigenstates of $\hat{H}$, which consist of the ground state(s) and subsequent excited states. In this case, one prepares multiple input states $|\psi_{\rm in}^{(i)}\rangle$ ($i=0,1,\cdots,N_{\rm in}-1$), which correspond to the low-lying energy eigenstates. Here, we allow $N_{\rm in}\leq N_s$, although the natural choice would be $N_{\rm in}=N_s$. For each of the input states, one repeats the sampling procedure as in the previous subsection. One then obtains the set of important configurations $\mc{S}_{R_i}^{(i)}$, formed by most frequent $R_i$ bit strings in the total sampling outcome for the $i$-th input state. Combining all the sets $\mc{S}_{R_i}^{(i)}$, one constructs the common subspace\footnote{Note that the set $\mc{S}_R$ defined here agrees with the definition~\eqref{eq:set_GS} in the preceding subsection when $N_{\rm in}=1$.}: \begin{align} \mc{S}_R = \mc{S}_{R_0}^{(0)} \cup \mc{S}_{R_1}^{(1)} \cup \cdots \cup \mc{S}_{R_{N_{\rm in}-1}}^{(N_{\rm in}-1)}. \label{eq:set_single-step} \end{align} In this case, the parameters $R_i$ may be eigenstate dependent, while $R$ is the number of the elements in the common subspace $\mc{S}_R$. $R\geq N_s$ is required to yield at least $N_s$ eigenvectors in the diagonalization procedure shortly explained. One may treat all $R_i$ as free parameters, which determine $R$ in turn. Or, one may first choose a value for $R$ and, then, decide each $R_i$ following some strategy. There are various ways for the latter strategy, depending on the purpose of using the algorithm. For example, if one prioritizes the ground state in terms of accuracy, a possible choice would be $R_0=R$ and $R_i=0$ for $i\neq 0$, albeit extreme. Or, if one wishes to treat all the input states on equal footing, each of $R_i$ can be chosen as equal as possible.\footnote{One can make each of $R_i$ as equal as possible by the following cycle of procedures, starting from an empty set $\mathcal{S}_R$, for a given $R$: in the first cycle, for each of the $N_\text{in}$ input states, the most frequent bit string is selected from the sampling outcome and then added to $\mathcal{S}_R$; this procedure is executed from the 0-th input state to $(N_{\rm in}-1)$-th input state, where one skips the state if the selected bit string already exists in $\mathcal{S}_R$; in the second cycle, the second frequent bit string is added to $\mathcal{S}_R$ for each input state according to the same rule; one repeats such a cycle until $\mathcal{S}_R$ is filled with $R$ distinct bit strings. Suppose such procedures finished after completing $R'$ cycles. Then, one can ensure that at least $R'$ most frequent bit strings for each input state are included in $\mathcal{S}_R$. This implies $R'$ or $R'+1$ most important configurations in each input state are included in the common subspace~\eqref{eq:set_single-step}, in the ideal situation where statistical fluctuation and physical noise can be ignored.} With the common subspace $\mc{S}_R$ constructed, one then diagonalizes the Hamiltonian in $\mc{S}_R$ as in the previous subsection: one constructs the $R\times R$ Hermitian matrix $\bm{H}_R$, solves the eigenvalue equation $\bm{H}_R\bm{c} = E_R\bm{c}$, and then picks up $N_s$ low-lying eigenvectors and eigenvalues, $(\bm{c}^{(0)}, E_R^{(0)}), (\bm{c}^{(1)}, E_R^{(1)}), \cdots, (\bm{c}^{(N_s-1)}, E_R^{(N_s-1)})$, where $\bm{c}^{(i)\dagger} \bm{c}^{(j)}=\delta_{ij}$. Here, $E_R^{(i)}$ ($E_R^{(0)}$) approximates the true energy of the $i$-th excited state (ground state), when the ground state is unique, for instance. The corresponding output states can be constructed as \begin{align} |\psi_{\rm out}^{(i)}\rangle =\sum_{\ket{x}\in \mathcal{S}_R} c_x^{(i)} \ket{x}, \label{eq:output-state_single-step} \end{align} for $i=0,1,\cdots, N_s-1$. Note that the algorithm in the previous subsection is a special case of the single diagonalization scheme with a single input state ($N_{\rm in}=1$). In this method, one can apply the same error mitigation technique by the post-selection as described in the previous subsection. The variational inequality now holds for each of energy eigenstates by Cauchy's interlace theorem~\cite{helgaker2014molecular} (see also Refs.~\cite{hylleraas1930numerical, macdonald1933successive}): \begin{align} E^{(i)}_{\rm exact} \leq E_R^{(i)}, \label{eq:variational-inequality-single} \end{align} for $i=0,1,\cdots, N_s-1$, where $E^{(i)}_{\rm exact}$ is the $i$-th eigenvalue (in ascending order) by the exact diagonalization. We remark that QSE~\cite{mcclean2017hybrid} and multistate-contracted VQE (MCVQE)~\cite{parrish2019quantum}, which also rely on the subspace diagonalization to obtain excited states, need to measure matrix elements, while the current method exactly calculates the matrix elements. Hence, we expect our method to be more noise-robust with the guarantee of the variational inequality. \subsubsection{ Sequential diagonalization scheme } \label{sssec:method-sequantial} We now give another scheme of QSCI to find excited states. The sequential diagonalization finds the ground state(s) and subsequent excited states by sequential diagonalization procedures of the Hamiltonian $\hat{H}$ in distinct subspaces. The algorithm is similar to the variational quantum deflation (VQD)~\cite{higgott2019variational}, a variant of VQE for excited states. Suppose one seeks for the $k$-th excited state\footnote{We implicitly assume the ground state is unique for ease of illustration. One can straightforwardly translate the description here to cases of degenerate ground (and possibly excited) states.} given that preceding $(k-1)$ excited states and ground state are already obtained by this method with the output states $|\psi_{\rm out}^{(i)}\rangle$ ($i=0,1,\cdots, k-1$). As in the previous methods, one repeats the preparation and measurement of the input state $|\psi_{\rm in}^{(k)}\rangle$ to obtain the set of important configurations: \begin{align} \mc{S}_{R_k}^{(k)} = \{ \ket{x} | x\in{\{0, 1\}^{N_q}}, R_k~{\rm most~frequent} \}. \label{eq:set_multi-step} \end{align} One then has to find the lowest energy state of $\hat{H}$ in this subspace, under the restriction that this state is orthogonal to the states already found, $|\psi_{\rm out}^{(i)}\rangle$ ($i=0,1,\cdots, k-1$). This can be achieved by diagonalizing the following effective Hamiltonian\footnote{This is not the unique choice of the effective Hamiltonian. For instance, the orthogonality can be imposed without introducing extra parameters though the implementation would be less suitable for NISQ devices~\cite{lee2018generalized}.} in the subspace spanned by $\mc{S}_{R_k}^{(k)}$: \begin{align} \hat{H}^{(k)} =\hat{H}+ \sum_{i=0}^{k-1}\beta_i |\psi_{\rm out}^{(i)}\rangle \langle \psi_{\rm out}^{(i)} |, \label{eq:sequential-Heff} \end{align} where $\beta_i$ are real parameters, which need to be sufficiently large for ensuring the orthogonality. The additional terms correspond to the overlap terms in VQD. This is equivalent to solving the eigenvalue equation \begin{align} \bm{H}_{R_k}^{(k)}\bm{c}^{(k)} = E_{R_k}^{(k)}\bm{c}^{(k)}, \label{eq:eigenvalue-eq} \end{align} and then pick up the smallest eigenvalue $E_{R_k}^{(k)}$ and eigenvector $\bm{c}^{(k)}$, normalized by $\bm{c}^{(k)\dagger} \bm{c}^{(k)}=1$. Here, $\bm{H}_{R_k}^{(k)}$ is the $R_k \times R_k$ Hermitian matrix defined by \begin{align} (\bm{H}_{R_k}^{(k)})_{xy}= \mel{x}{\hat{H}^{(k)}}{y}~{\rm for}~\ket{x}, \ket{y} \in \mathcal{S}^{(k)}_{R_k}, \label{eq:sequential-matrix} \end{align} whose matrix elements can be efficiently calculated by classical computations based on the expression \begin{align} (\bm{H}_R^{(k)})_{xy} = \mel{x}{\hat{H}}{y} +\sum_{i=0}^{k-1}\beta_i c_x^{(i)}c_y^{(i)*}. \end{align} One then constructs the output state \begin{align} |\psi_{\rm out}^{(k)}\rangle =\sum_{\ket{x}\in \mathcal{S}_{R_k}^{(k)}} c_x^{(k)} \ket{x}, \label{eq:output-state_multi-step} \end{align} which approximates the $k$-th excited state. Note that the expressions are specific to the $k$-th excited state. In order to find entire (low-lying) spectrum, one has to repeat the above procedure sequentially, starting from $k=0$, the ground state, which can be found by the ground-state algorithm already explained. This is similar to VQD, but the QSCI method does not require extra circuits to calculate the overlap terms. The coefficients $\beta_i$ can be chosen in the same manner as VQD. We want the smallest eigenvalue of $\bm{H}_{R_k}^{(k)}$ to approximate $E^{(k)}_{\rm exact}$, the $k$-th eigenvalue of $\hat{H}$. Following the discussion in Ref.~\cite{higgott2019variational}, it suffices to choose $\beta_i > E^{(k)}_{\rm exact}-E^{(i)}_{\rm exact}$ for $i=0,\cdots, k-1$; or, one may apply the looser condition of $\beta_i > 2\sum_j \abs{c_j}$, where $c_j$ are coefficients in the qubit Hamiltonian $\hat{H}=\sum_j c_j P_j$, expressed by the Pauli strings $P_j$ (see Appendix~\ref{subsec:details-sequential} for details). In practice, the condition $\beta_i > E^{(k)}_{\rm exact} - E^{(i)}_{\rm exact}$ can be utilized if one has prior knowledge on the energy spectrum, e.g., based on variational quantum algorithms. Even if such information is not available, one may still rely on the looser condition $\beta_i > 2\sum_j \abs{c_j}$. Note that in the sequential diagonalization scheme, the variational inequality like Eq.~\eqref{eq:variational-inequality-single} is not guaranteed due to the inexactness of the effective Hamiltonian, i.e., as Eq.~\eqref{eq:sequential-Heff} would be constructed only by approximate eigenstates in practice (see Appendix~\ref{subsec:details-sequential} for further discussion). \section{Benchmark of QSCI with noiseless simulations \label{sec:numerical} } In this section, we test various aspects of QSCI for small molecules by noiseless numerical simulations, where the effects of physical noise are not included. In Secs.~\ref{subsec:ground-state-simulation-with-noiseless-vqe} and \ref{subsec:simulation-excited-h2o}, QSCI calculations are performed for ground states and excited states, using input states prepared by VQE and VQD~\cite{higgott2019variational}, a variant of VQE for excited states. Then the scalability of QSCI is examined in Sec.~\ref{subsec:scaling}, and finally the effect of the statistical error in QSCI is studied in Sec.~\ref{ssec:sampling-simulation}. For the numerical simulations, a quantum-circuit simulation library Qulacs~\cite{suzuki2021qulacs} is used with the help of QURI Parts~\cite{quri_parts}, a library for developing quantum algorithms. The simulations in Sec.~\ref{ssec:sampling-simulation} are carried out by the sampling simulator which takes into account the statistical error, while all the other simulations are performed by the state-vector simulator, where the expectation values are exactly calculated without errors. For each simulation and experiment in this paper, the molecular Hamiltonian is first prepared as the second-quantized electronic Hamiltonian using the Born-Oppenheimer approximation with Hartree-Fock orbitals using the STO-3G basis unless otherwise stated, and converted to the qubit one by the Jordan-Wigner mapping. Active spaces are explicitly specified when employed, otherwise the full-space Hamiltonians are used. The electronic Hamiltonians are generated by OpenFermion~\cite{mcclean2020openfermion} interfaced with PySCF~\cite{sun2018pyscf}. The molecular geometries and other details are shown in Appendix~\ref{sec:appendix-details-of-sim-and-exp}. Stable geometries are chosen for all the molecules except for the hydrogen chains, and a potential impact of unstable geometry is briefly analyzed in Appendix~\ref{ssec:appendix-bond-length}. \subsection{QSCI for ground state} \label{subsec:ground-state-simulation-with-noiseless-vqe} \begin{figure} \caption{ The result of QSCI, the proposed method, for the ground state of \ce{H2O} \label{fig:noiseless-vqe} \end{figure} We first show the result of numerical simulation for ground state with input states prepared by noiseless VQE. We choose \ce{H2O} molecule with five active spatial orbitals and six active electrons as our problem, which leads to a 10-qubit Hamiltonian after the Jordan-Wigner mapping. In the VQE calculation, the parametrized quantum circuit is constructed by the real-valued symmetry-preserving ansatz~\cite{gard2020efficient, ibe2022calculating} with the depth 10, and is optimized by the Broyden–Fletcher–Goldfarb–Shanno (BFGS) optimizer in the scientific library SciPy~\cite{virtanen2020scipy}. See Appendix~\ref{subsec:setup-noiseless-vqe} for details. The QSCI calculation is performed for each iteration of the VQE optimization: given the values of ansatz parameters obtained at the iteration, the input state is prepared by the parametrized quantum circuit with those values assigned; then, the QSCI calculation with the idealized sampling introduced in Sec.~\ref{subsec:ground-state} is performed to estimate the ground-state energy $E_R$ for a given $R$, the number of configurations in the subspace $\mc{S}_R$. This calculation is repeated for all the iterations of VQE with different values of $R$. The effect of the uncertainty due to the finiteness of the number of shots is addressed later in Secs.~\ref{ssec:sampling-simulation}, \ref{sec:noisy-simulation-experiment}, and Appendix~\ref{ssec:appendix-sampling}. In Fig.~\ref{fig:noiseless-vqe}, the result is shown along with the optimization history of VQE: $E_R - E_{\rm exact}$ is plotted (in Hartree) for each optimization step of VQE, where $E_{\rm exact}$ is the ground-state energy obtained by the exact diagonalization in the active space, called the complete active space configuration interaction (CASCI). The energies obtained by VQE are shown in the same way. Comparing the results at the last iteration in the plot, one can see that QSCI gives a lower energy than VQE for $R\gtrsim 16$. This shows that the method is able to improve the results of VQE even in the noiseless setting, where the effect of error mitigation is not present. We emphasize that, as discussed in Sec.~\ref{subsec:ground-state}, a lower energy by QSCI means that the energy is closer to the exact ground-state energy, which is manifested in the plot where $E_R - E_{\rm exact}$ is always positive. It is notable that we can already achieve the chemical accuracy\footnote{In this paper, we define the chemical accuracy by $\SI{1}{kcal/mol} \simeq \SI{1.6e-3}{Hartree}$ for the deviation of the calculated energy from the one obtained by the exact diagonalization of the Hamiltonian. } of $\SI{1.6e-3}{Hartree}$ with $R\sim 16$ while the CASCI in this case uses 100 determinants to express the ground state.\footnote{ For the active space restriction of five active orbitals and six active electrons with $S_z=0$, the number of the Slater determinants is $\binom{5}{3}\cdot \binom{5}{3}=100$. If one does not know the number of electrons and $S_z$ of the ground state before the calculation, then one would need to deal with the full Hamiltonian in the Fock space of $2^{10}=1024$ dimensions.} A similar tendency is observed for iterations of $\gtrsim 200$. Note, in this case, the VQE results already achieve the chemical accuracy. On the other hand, for intermediate iterations of 70--200, the VQE results do not reach the chemical accuracy, while QSCI can improve them to meet the chemical accuracy if $R \gtrsim 16$. This suggests that an intermediate result of VQE, which is not seeing convergence in the optimization yet, is already useful as an input state of QSCI, and that one can reduce the number of optimization steps for VQE by employing QSCI as a post-processing. We note that the QSCI results do not monotonically decrease, as a QSCI calculation for an input state with a lower energy does not necessarily result in a lower output energy. \subsection{QSCI for excited states} \label{subsec:simulation-excited-h2o} \begin{figure}\label{fig:noiseless-vqd} \end{figure} We next show the results of noiseless simulations for excited states of \ce{H2O} using the two distinct implementations of QSCI presented in Sec.~\ref{subsec:excited-states}, namely the single diagonalization and sequential diagonalization schemes, which take the input states for excited states as well as the ground state. The numerical setup is similar to the previous subsection, with some differences explained below. In the input-state preparation, we employ VQE for the ground state and VQD for excited states, each with the same ansatz and optimizer as in the previous subsection; we use the same 10-qubit Hamiltonian, but with the overlap terms and penalty terms~\cite{mcclean2016theory,ryabinkin2018constrained,kuroiwa2021penalty} in VQD, for orthogonality between the eigenstates and for symmetry restrictions (charge neutrality for the molecule and $S_z=0$ for the total electron spin) on the excited states. Under the same symmetry restrictions, the first excited state is a triplet state ($T_1$) and the second excited state is a singlet state ($S_1$), according to the exact diagonalization. The VQD calculation for $T_1$ requires information of the ground state ($S_0$) to generate the overlap term, for which the ansatz state is used with the converged parameters in VQE. A similar procedure is applied for $S_1$, but with the extra overlap term for $T_1$ added. With the input states for $S_0$, $T_1$ and $S_1$, we perform QSCI calculations to find $T_1$ and $S_1$, where the idealized sampling is used. See Appendix~\ref{subsec:setup-noiseless-vqd} for details. The results are shown in Fig.~\ref{fig:noiseless-vqd} along with the optimization history of VQD, in the similar way as Fig.~\ref{fig:noiseless-vqe}, but for $|E_R^{(i)} - E_{\rm exact}|$ ($i=1$ for $T_1$ and $i=2$ for $S_1$). At each iteration, three types of QSCI calculations are performed: sequential, single-ground-state, and single-mixed. Sequential diagonalization uses the $T_1$ ($S_1$) state at the iteration, and one (two) lower energy state(s) at their final iterations as input states. Two single diagonalization methods use different input states: ``single-ground-state'' uses the ground state prepared by the converged VQE calculation, and that is why they are constant in the plot; on the other hand, ``single-mixed'' uses the two (three) states as input states, and selects $R$ configurations so that each of the two (three) states contributes as equally as possible, as explained in Sec.~\ref{sssec:method-single}. Note that $R$ is the dimension of the common subspace $\mc{S}_R$ in Eq.~\eqref{eq:set_single-step}. The coefficient(s) $\beta_0$ (and $\beta_1$ for $S_1$ state) of the overlap term(s) for orthogonality is set to $\beta_0=\beta_1=\SI{1}{Hartree}$, which is sufficiently larger than the energy gaps between the states in question. For sequential diagonalization, the values of $R_i$ are fixed to $R_i=16$ for $i=0,1$ and 2, corresponding to $S_0$, $T_1$, and $S_1$ states, respectively; for single diagonalization, the value of $R$ is set to $R=16$, so that the sizes of the subspace Hamiltonian matrices to be diagonalized are the same among all the setups. Comparing the three QSCI results for excited states, the sequential diagonalization performs the best except for the initial steps of iterations where the quality of the input state is significantly low. Moreover, the sequential diagonalization outperforms the VQD calculation, even with a moderate value of $R_i=16$. For some larger $R$, the single diagonalization is also expected to improve and eventually outperform the VQD result at the same iteration as it can achieve the same representability as the sequential one\footnote{To show this explicitly, assume $N_s=2$ for simplicity. The single diagonalization with the subspace $\mc{S}_{R}=\mc{S}_{R_0}^{(0}\cup\mc{S}_{R_1}^{(1)}$, where the subspaces on the right-hand side denote those of the sequential diagonalization, have at least the same representability as the sequential diagonalization calculation with $\mc{S}_{R_1}^{(1)}$.}. Although the sequential diagonalization seems to be better in terms of performance, it should be noted that there is no guarantee for the variational inequality in the sequential diagonalization. The inequality for excited states holds in the single diagonalization, as explained in Sec.~\ref{subsec:excited-states}. \subsection{Scaling of computational costs} \label{subsec:scaling} \begin{figure} \caption{Estimated $R$ required for a given energy error $\epsilon$. Results with (a) expanding active spaces (\ce{Cr2} \label{fig:scaling-a} \end{figure} We now investigate the scalability of the proposed method by estimating the classical and quantum computational costs to calculate the ground states for molecular Hamiltonians of various sizes. More concretely, we estimate the minimum value for $R$ and the required number of shots $N_{\rm shot}$ to obtain the ground-state energy within an error $\epsilon$ for those Hamiltonians. For this sake, we employ the chromium dimer \ce{Cr2} with various active spaces and the linear hydrogen chains with different numbers of atoms. Both \ce{Cr2} and hydrogen chains are known to be challenging molecules in quantum chemistry (see, e.g., Refs.~\cite{larsson2022chromium, motta2020ground} and references therein), while the hydrogen chains are also expected to show a clear scaling in the number of atoms. For \ce{Cr2}, the cc-pVQZ basis set is used with $n$ active orbitals and $n$ active electrons with $n=2,4,\dots,12$; the Jordan-Wigner mapping produces $4,8,\cdots,24$-qubit Hamiltonians, respectively. For the linear hydrogen chains, we consider $4,6,\cdots,12$ hydrogen atoms equally separated by a distance \SI{1.0}{\AA}; we use the STO-3G basis set without specifying the active space, corresponding to full-space Hamiltonians of $8,12,\cdots, 24$-qubit after the Jordan-Wigner mapping, respectively. For each setup, the exact ground state of the Hamiltonian is prepared as the input state, and the QSCI calculation is performed by the idealized sampling introduced in Sec.~\ref{subsec:ground-state}, which picks up the $R$ Slater determinants with the largest absolute values of CI coefficients in the input-state wavefunction. Then, for a given accuracy $\epsilon$, the minimal $R$ that satisfies $\abs{E_R -E_{\rm exact}} \leq \epsilon$ is determined, where $E_R$ is the energy obtained by QSCI with the $R$ configurations and $E_{\rm exact}$ by the exact diagonalization. In Fig.~\ref{fig:scaling-a}, the results are plotted for each molecule by varying the number of qubits, for $\epsilon=0.1, 0.01$ and $0.001$~Hartree; they are extrapolated by fitting (shown by lines) to discuss the feasibility for larger system sizes. As detailed in Sec.~\ref{ssec:discussion-computational-cost}, we infer that the diagonalization with $R\simeq \SI{5e7}{}$ configurations is achievable by the current state-of-the-art classical computing according to the reports~\cite{stampfuss2003improved, garniron2019quantum}. The result for \ce{Cr2} suggests that $R$ is expected to be manageable even when we require $\epsilon=\SI{0.001}{Hartree}$ for a system larger than 50 qubits, where the exact diagonalization in the whole Fock space, i.e., CASCI, is challenging for classical computers. In the case of the hydrogen chains, on the other hand, the exponential growth of $R$ is more clearly observed, and it may become hard to achieve $\epsilon=\SI{0.001}{Hartree}$ for a system much larger than 50 qubits due to the limitation of classical computing. Note that the two scalings have slightly different meanings: the active space is enlarged for \ce{Cr2} while fixing the molecule, i.e., the system size, while the system size itself is enlarged for the hydrogen chains. The results may suggest that our method is more suited to a localized system with many electrons involved, rather than a spatially extended system. For similar studies and results for several diatomic and aromatic molecules, see Appendix~\ref{ssec:more-results-scaling}. \begin{figure} \caption{Estimated number of shots for a given energy error $\epsilon$. For QSCI, the number of shots are approximated by $1/\abs{c_R} \label{fig:scaling-b} \end{figure} We next estimate the number of shots for sampling required to achieve an error of $\epsilon$ by using the value of $1/\abs{c_R}^2$ for each setup (Fig.~\ref{fig:scaling-b}). Here, $c_R$ is the CI coefficient that has the $R$-th largest absolute value in the input state, where $R$ is taken to be the values shown in Fig.~\ref{fig:scaling-a}. When the state is sampled $1/\abs{c_R}^2$ times, the probability of obtaining the $R$-th most significant configuration is $O(1)$, and in that sense, $1/\abs{c_R}^2$ gives a rough estimator for the number of shots required to sample $R$ most significant configurations. We see in the next section, especially in Fig.~\ref{fig:conventional}, that this gives a ballpark estimate of the required number of shots for a given accuracy. For comparison, the total number of shots required in a conventional expectation-value estimation is also estimated. More precisely, we analytically estimate the number of shots for which the standard deviation of the expectation-value estimations equals $\epsilon$ for the exact ground state (see, e.g., Ref.~\cite{kohda2022quantum}). In the conventional methods, the expectation value of the Hamiltonian, which is expressed as a linear combination of Pauli strings, is estimated by directly measuring the quantum state in the basis of the Pauli strings multiple times and taking the average of the measurement outcome. To reduce the number of measurements, we employ the qubit-wise commuting (QWC) grouping~\cite{mcclean2016theory} with the sorted insertion algorithm~\cite{crawford2021efficient}. The total shot is distributed to each of the groups with the shot allocation optimized for the exact ground state\footnote{This shot allocation may not be possible in practice without prior knowledge of the exact ground state, but this estimation gives the lower-bound on the required number of total shots among possible shot allocation strategies, for a given error tolerance with the given grouping method and the state. }~\cite{wecker2015progress, rubin2018application}. Note that, although there are methods that are capable of reducing the number of measurements better than QWC, they require more gate operations for measurements than QWC does; QWC requires a layer of single-qubit rotations after the state preparation, which is minimal for methods that measure the Pauli strings directly, while QSCI requires no gate operation. Most of the other grouping methods are thus expected to be more vulnerable to noise, and QWC is chosen for a fair comparison in this study. Figure~\ref{fig:scaling-b} shows the values of $1/\abs{c_R}^2$ in QSCI for various numbers of qubits, along with the estimated numbers of shots in QWC. For the hydrogen chains, the results of QWC are fitted by a function $a(N_q)^b$ with parameters $a$ and $b$ as they are expected to be polynomial in the number of qubits\footnote{More precisely, the fit was performed by a function \begin{equation} \log (N_{\text{shot}}(N_q))= B\log(N_q)+A, \end{equation} where $A$ and $B$ are the free parameters. Similarly in Fig.~\ref{fig:scaling-a}, a linear function $c N_q+d$ was used to fit the data for $\log(R)$, rather than $D\times 2^{C N_q}$ for $R$. }, while the scaling of QSCI is unclear and fitting is not performed. In the case of \ce{Cr2}, the number of shots for the proposed method seems to be consistently smaller than that of QWC, while the advantage of QSCI, in terms of reducing the effect of statistical fluctuation, is more non-trivial in hydrogen chains with the numbers of qubits $N_q\gtrsim 30$. The more operators are evaluated with the same output state, the more advantageous QSCI becomes; as we already noted in the previous section, QSCI does not require any additional quantum computation to evaluate additional observables, because QSCI outputs the classical vector representation of the state, and the expectation values are evaluated classically. On the other hand, in the conventional methods, quantum computational cost becomes more expensive, e.g., to measure additional Pauli strings introduced by the extra operators. To exploit this feature, we explore a scenario where the nuclear gradient and Hessian are evaluated along with the energy in the case of the hydrogen chains. For the shot allocation in the QWC grouping, we developed a method that is optimized for measuring multiple operators at once and is used in the simulation; see Appendix~\ref{subsec:appendix-scaling-multiple-operators} for details. The result, shown also in Fig.~\ref{fig:scaling-b} (b), implies that such a scenario makes QSCI much more advantageous\footnote{It is numerically shown in Appendix~\ref{ssec:appendix-multiple-observable-accuracy} that the accuracy of the gradients and Hessians in QSCI are of the same order as $\epsilon$ when expressed in the units of Hartree$/\mathrm{\AA}$ and Hartree$/\mathrm{\AA}^2$, respectively.}, as the number of shots for QWC significantly increases. QSCI generally outperforms QWC in terms of the sampling cost within the range of the system size that we studied. Although the scaling of QSCI seems to be worse than that of QWC in hydrogen chains, we should emphasize here that, even if QWC outperforms QSCI at, say, 50 qubits, it does not mean that QSCI is not useful for Hamiltonians with more than 50 qubits: QSCI has various features, such as error mitigation and the explicit representation of the output state, over the conventional methods, in addition to the reduction of the number of shots. The result should be interpreted as an implication that QSCI can be advantageous in moderately smaller but still classically-challenging systems, even when we only consider the effect of reduction of the number of shots. \subsection{Sampling simulation} \label{ssec:sampling-simulation} For assessing the effect of the statistical error during the sampling in QSCI, sampling simulation with different numbers of shots is performed. The result for a linear \ce{H6} molecule (12 qubits) is shown in Fig.~\ref{fig:conventional}, and results for other molecules are in Appendix~\ref{ssec:appendix-sampling}. For this simulation, the exact ground state is used as the input state, and we include all the configurations obtained in the sampling into the basis set $\mathcal{S}_R$ and we do not specify $R$ beforehand. For comparison, we also performed a conventional sampling estimation for the exact ground state with QWC grouping and a shot allocation optimized for Haar random states. For both QSCI and QWC, we perform 10 trials of sampling simulation for each number of shots, and the average of the absolute differences to the exact ground-state energy is plotted along with the standard deviation of the 10 trials. The absolute differences to the exact value are much smaller in QSCI compared to the conventional sampling with QWC grouping. It is worth noting that the standard deviation of QSCI energy is smaller than its average difference, while those of QWC sampling are roughly equal. Energy values obtained by QSCI are biased estimators for the exact expectation values even when using the exact ground states as input states. Thus, the absolute difference can roughly be calculated as a sum of the intrinsic bias existing in QSCI and the standard deviation which comes from statistical fluctuation of the subspace $\mathcal{S}_R$. In QWC, on the other hand, the statistical error is the only source of error. One can say that the QSCI result is much less affected by the statistical error compared to the conventional method. Furthermore, as one can see in Fig.~\ref{fig:conventional}, $1/\abs{c_R}^2$ calculated in the previous simulation gives a relatively accurate estimation of the total shots that gives an average error close to $\epsilon$. Thus the plots in Fig.~\ref{fig:scaling-b} for both QWC and QSCI give reasonable estimations of the number of shots with expected average error $\epsilon$, and the comparison is fair in this sense. \begin{figure} \caption{Energy error results for both QSCI and conventional QWC in sampling simulations. For each set of 10 trials for each method, the standard deviation and the average of the absolute error to the exact value obtained by exact diagonalization are shown. QSCI energy error using $1/\abs{c_R} \label{fig:conventional} \end{figure} \section{Benchmark of QSCI with noisy simulation and experiment} \label{sec:noisy-simulation-experiment} \begin{figure*} \caption{ QSCI results for the ground state of the linear hydrogen chain $\ce{H4} \label{fig:vqe-experiment} \end{figure*} In this section, we describe the result of the experiment for the ground state of the hydrogen chain \ce{H4} (8 qubits), conducted on the IonQ 11-qubit device through Amazon Braket service, along with the result of noisy sampling simulation using Qulacs with the identical setup. We first run a VQE calculation of a linear \ce{H4} molecule with bond lengths \SI{1.0}{\AA} on a noiseless state-vector simulator. We use the STO-3G basis set without freezing any orbitals, and thus the problem Hamiltonian is 8-qubit. The so-called Ry ansatz with depth 8 is employed for the VQE calculation. See Appendix~\ref{ssec:setup-experiment} for details, including the circuit diagram of the ansatz. Then, we perform QSCI calculations on the quantum hardware and the noisy simulator using four sets of parameters at four distinct iterations of the VQE calculation. We use 10,000 shots for each sampling, and the most frequent $R$ configurations are selected to define the subspace, with and without the post-selection. The post-selection of the sampling result is performed using the number of electrons $N_e=4$ and the spin $S_z=0$. For noisy simulation, to simulate the physical noises on the device, single-qubit depolarizing noise is added after each gate and bit-flip noises are added at the end of the circuit to mimic the measurement error. The level of each type of the noise is determined by the single-qubit and two-qubit gate fidelities, and the measurement fidelity of the actual device: 99.61\%, 96.868\%, and 99.824\%, respectively\footnote{More precisely, the error rate of the single-qubit depolarizing noise for each single-qubit gate is set to $p_1$, where $p_1$ is the single-qubit gate infidelity. For the two-qubit gates, single-qubit depolarizing noise is applied to each of the two qubits with probability $1-\sqrt{1-p_2}$ for a two-qubit gate infidelity $p_2$. The bit-flip noise is applied to each qubit with probability $p_{\text{ro}}$, the measurement infidelity.}. For comparison, on the quantum device and the noisy simulator, the calculation of the expectation value of the energy using a conventional method is performed with 10,000 shots. The QWC grouping and the shot allocation optimized for Haar random states are employed. Error mitigation techniques, which may improve the result at the cost of additional quantum resources, are not employed in this study. The results are presented in Fig.~\ref{fig:vqe-experiment}. By comparing the results from the noisy simulator and the quantum device, one can see that they have a reasonable agreement, although the result from the quantum device seems to be more affected by the errors. Moreover, it is clear that the post-selection is powerful in both simulation and experiment. It is particularly worth noting that even on the physical device, some of the QSCI calculations with $R=27$ do outperform the result of CISD, which also diagonalizes the subspace Hamiltonian with 27 Slater determinants, and achieve the chemical accuracy on the 8-qubit system. Some minor comments are in order: firstly, at the earlier iterations, the number of sampled (and post-selected) configurations was sometimes less than the given $R$, because the state is concentrated in some computational basis states. In that case, we only used the sampled configurations for the QSCI calculation; secondly, CASCI result, i.e., the exact diagonalization result, corresponds to\footnote{The number of Slater determinants which have the required particle number and $S_z$ is $\binom{4}{2}\cdot \binom{4}{2}=36$.} $R=36$; this number may seem to be comparable to $R=27$, but it is still a non-trivial task to choose 27 configurations out of 36 possibilities. \section{Discussion} \label{sec:discussion} In this section, we discuss various aspects of QSCI. We start from its classical and quantum computational costs in Sec.~\ref{ssec:discussion-computational-cost}, and then discuss its benefits for refining VQE results in Sec.~\ref{ssec:discussion-qsci-refine-vqe}. In Sec.~\ref{subsec:state-preparation}, several ideas for preparing input states are introduced. The aspect of QSCI as a selected CI is discussed in Sec.~\ref{ssec:discussion-qsci-as-selected-ci}, and ideas for future directions are finally introduced. \subsection{Computational costs} \label{ssec:discussion-computational-cost} Here classical and quantum computational costs are examined. In QSCI, classical computing is used for generating the truncated Hamiltonian matrix $\bm{H}_R$ and diagonalizing it. Exploiting the Slater-Condon rules, one can generate the sparse matrix $\bm{H}_R$ efficiently in both $R$ and the number of orbitals (see, e.g., Ref.~\cite{tubman2020modern} for details). For diagonalizing $\bm{H}_R$, one can employ algorithms to diagonalize a sparse matrix, such as the Lanczos method or the Davidson method. The generation and diagonalization of the Hamiltonian matrix are common procedures in the selected CI methods, and it is reported~\cite{garniron2019quantum} that $R\simeq \SI{5e7}{}$ of Slater determinants are manageable when a state-of-the-art high-performance computing resource is available, even for the method that repeats the Hamiltonian generation and diagonalization. In our method, such a repetition is not needed, and thus the computational cost should be smaller. As already discussed in Sec.~\ref{subsec:scaling}, Fig.~\ref{fig:scaling-a} suggests that, for some challenging molecules of $\sim$50 qubits, the QSCI calculation is feasible in terms of the classical cost by the current state-of-the-art classical computing, while meeting the accuracy requirement of $\epsilon \lesssim \SI{0.001}{Hartree}$. Note such a system size would be beyond the reach of the exact diagonalization. The quantum computational time is $t_Q=N_{\text{shot}}\times t_{\text{prepare}}$, where $N_{\text{shot}}$ is the number of shots for the sampling, i.e., the repetitions of the input-state preparation and measurement, and $t_{\text{prepare}}$ is the time needed for a single shot. Note that the total computational time can be reduced if multiple quantum computers are available, since the sampling procedures are completely parallelizable. $t_{\text{prepare}}$ highly depends on the type of quantum device to be used and the way to prepare the input state. For example, the Sycamore processor used in the Google's quantum supremacy experiment~\cite{arute2019quantum} can achieve $N_{\text{shot}}=\SI{1e6}{}$ in 200 seconds for a quantum circuit with 53 qubits and 20 repetitions of entangling operations, which corresponds to $N_{\text{shot}}\sim\SI{4e8}{}$ in a day. Hence, Fig.~\ref{fig:scaling-b} implies that the sampling cost is affordable for \ce{Cr2} with several tens of qubits, while it may be challenging at the moment to achieve $\epsilon=\SI{0.001}{Hartree}$ for a hydrogen chain with, say, 50 qubits. We remark that the sampling cost can be significantly reduced if one can prepare a state $\ket{\Delta\psi}$ that is orthogonal to a classically tractable state $\ket{\psi_c}$ such that, for some complex numbers $\alpha$ and $\beta$, $\ket{\psi_{\text{GS}}}=\alpha \ket{\psi_c} + \beta \ket{\Delta\psi}$ approximates the ground state, and can sample from $\ket{\Delta\psi}$ on a quantum computer. The state $\ket{\psi_c}$ can be the Hartree-Fock state or more intricate states such as the CISD state. For example, if $\abs{\alpha}^2=0.9$, then the sampling cost for a given precision can be reduced by a factor of ten. On the other hand, $\ket{\Delta\psi}$ can be prepared, e.g., by the method of Ref.~\cite{radin2021classically}. \subsection{ Use of QSCI to refine VQE results } \label{ssec:discussion-qsci-refine-vqe} QSCI can be viewed as a post-processing technique for VQE and its variants, when they are used to prepare the input states. Our methods have the following advantages: \begin{description} \item[Error reduction] By virtue of the classical diagonalization of a Hamiltonian matrix generated classically, the proposed methods can refine the VQE results, as demonstrated in Sec.~\ref{sec:numerical} and Sec.~\ref{sec:noisy-simulation-experiment}. Although results of noiseless VQE simulations are used to prepare the input states in our numerical and experimental studies, our results suggest that QSCI is also effective to refine \textit{dirty} VQE results subject to the statistical and physical errors. Figure~\ref{fig:vqe-experiment} also shows the effectiveness of the post-selection: the rate of the readout error, which is one of the major sources of physical errors, can be reduced from $O(p)$ to $O(p^2)$ with the Jordan-Wigner mapping, as discussed in Appendix~\ref{subsec: post-selection}. Note that QSCI does not require extra gate operations for the measurement, unlike expectation-value estimations in VQE. As already shown in Fig.~\ref{fig:noiseless-vqe}, our method is also effective to improve the quality of the input state even in the absence of physical and statistical errors. This feature may enable one to use ansatzes with shallower circuits, or to reduce the number of optimization steps in VQE, by employing QSCI to improve the final result. \item[Reliability] Our method is free of errors in the sense that the resulting ground-state energy is exact within the subspace spanned by the quantum-selected configurations. This means that the obtained energy is a definite upper bound for the exact ground-state energy, which is not the case in conventional VQE because of physical and statistical errors, as discussed in Sec.~\ref{subsec:ground-state}. This is advantageous for comparing the QSCI result obtained on noisy quantum devices with the results of classical variational methods such as CISD or density matrix renormalization group (DMRG)~\cite{white1992,white1993,Schollwock2005}: the variational nature of these methods guarantees that the method that gives the lowest energy is the most accurate one. Similar variational inequalities hold for excited states in the single diagonalization scheme of QSCI, while there is no such guarantee in the sequential diagonalization scheme. Although the latter appeared to be more accurate in our numerical simulation, the former is of great use if one is interested in giving rigorous upper bounds on excited-state energies. \item[Handiness] As one has the classical representations of the eigenstates as output, one can compute the expectation values of a large class of observables with no additional quantum computation. Our method becomes more valuable when more observables are to be evaluated, as exemplified in Fig.~\ref{fig:scaling-b}. Moreover, one can also analyze the classical vectors themselves, which may be useful to study the significance of each Slater determinant. \end{description} \subsection{ Use of QSCI with more general input states } \label{subsec:state-preparation} As discussed in the previous sections, input states for ground state can be prepared by VQE, and those for excited states by its variants, but the proposed methods are applicable to more general input states. Our method can in principle be applied to any kind of input states that can be prepared and sampled on a quantum computer. We give an incomplete list of possible preparation schemes for input states in the following: the adiabatic state preparation~\cite{farhi2000quantum, aspuru2005simulated}, the imaginary time evolution~\cite{williams2004probabilistic, terashima2005nonunitary, mcardle2019variational,mao2022measurementbased}, classically-boosted VQE~\cite{radin2021classically}, classically-optimized shallow ansatz circuits~\cite{okada2022identification}, unitary coupled-cluster ansatz circuits with classically-optimized parameters~\cite{mcclean2016theory, romero2018strategies, kuroiwa2023clifford+, hirsbrunner2023beyond}, and parametrized states classically optimized by Clifford circuits~\cite{mitarai2022quadratic, ravi2022cafqa}. Note that the performance of QSCI depends on the quality of the input state and also on the form of the exact eigenstate. For example, if the exact eigenstate is the equal superposition of all the computational basis states, then our algorithm will not perform well. The algorithm can also be useful for a Hamiltonian that has an exactly-known ground state. For example, one can calculate an exact ground-state energy of a system that is solvable with the Bethe ansatz, but there are quantities, such as a class of correlation functions, that cannot be computed efficiently~\cite{verstraete2009quantum}. Our method provides the classical representation of an approximate eigenstate, which means that one can evaluate various physical quantities without additional quantum resource, as we already discussed for states prepared by VQE. The preparation of the Bethe ansatz states on quantum computers is addressed in Refs.~\cite{van2021preparing, sopena2022algebraic}. Moreover, although we proposed the method as a hybrid quantum-classical algorithm, one can apply the method to input states that can be sampled efficiently on classical computers. This is shortly discussed in Sec.~\ref{ssec:outlook}. \subsection{ QSCI as selected CI } \label{ssec:discussion-qsci-as-selected-ci} As selected CI methods, the novelty of QSCI comes simply from how to define the subspace on which we construct the subspace Hamiltonian. Quantum computers are used to sample important configurations from the input state, and there is a quantum speed-up when the input state is hard to sample classically. In selected CI methods, the subspace of the Fock space for the diagonalization is either fixed by the method, e.g., CISD, or adaptively chosen according to the algorithm. We have shown experimentally that CISD performs worse even when compared to the QSCI result on the current NISQ device (Fig.~\ref{fig:vqe-experiment}). One of the most advanced methods for sampling dynamically important bases is the adaptive sampling configuration interaction (ASCI) algorithm developed by Tubman and co-workers~\cite{tubman2016deterministic,tubman2020modern}. The idea of systematically selecting important bases based on perturbation theory was developed about 50 years ago~\cite{bender1969pr,whitten1969jcp,huron1973jcp,buenker1974tca,buenker1975tca}, and a selection scheme based on Monte Carlo methods was proposed in the 1990s~\cite{greer1995jcp,greer1998jcpss}. However, systematically selected CI was not widely used in quantum chemistry calculations for many years. Recently, it has undergone rapid development and is now becoming applicable to large-scale quantum chemical simulationst~\cite{evangelista2014jcp,holmes2016jctc,schriber2016jcp,holmes2016jctc2,tubman2016deterministic,ohtsuka2017jcp,schriber2017jctc,sharma2017semistochastic,chakraborty2018ijqc,coe2018jctc,coe2019jctc,abraham202jctc,tubman2020modern,zhang2020jctc,zhang2021jctc,chilkuri2021jcc,chilkuri2021jctc,goings2021jctc,pineda2021jctc,jeong2021jctc,coe2023jctc,seth2023jctc}. Indeed, Tubman \textit{et al.} showed that ASCI is capable of handling 34 electrons in 152 spatial orbitals~\cite{tubman2020modern}. ASCI has hyperparameters that define the size of the search space to adaptively select the configurations, and we will see in Appendix~\ref{ssec:comparison-to-asci} that, with some set of hyperparameters, QSCI can perform better than ASCI. \subsection{Outlook} \label{ssec:outlook} QSCI is applicable to diverse systems, and has many directions for generalizations. \begin{itemize} \item It would be possible to consider a hybrid of the proposed method and another adaptive selected CI method, such as ASCI, by combining the configurations suggested by QSCI with those of the other method. In this way, one could improve the results of the state-of-the-art selected CI methods by using quantum computers. \item QSCI is essentially a selected CI where the configurations are randomly selected according to a probability distribution $p(x) = \abs{\braket{x}{\psi_{\mathrm{in}}}}^2$. A classical counterpart of this approach is called Monte-Carlo configuration interaction (MCCI)~\cite{greer1995jcp,greer1998jcpss}. MCCI does not seem to have been extensively studied since the first proposal in 1995, and the use of a more sophisticated (classical) probability distribution for MCCI is yet to be explored. It would be an interesting future work to use a classically tractable $p(x)$ for MCCI and compare/combine it with QSCI. For example, some of the tensor network states, such as the matrix product states (MPS) and the multi-scale entanglement renormalization ansatz (MERA) states, can be efficiently sampled on classical computers~\cite{ferris2012perfect}. \item Our method, compared to the conventional VQE, has an advantage that it can evaluate physical observables classically with no additional quantum computational cost. One may leverage this feature by using QSCI for the geometry optimization problem of a molecule, or a molecular dynamics calculation. In those applications, one may skip the sampling for some iterations, and continue to run with the same state subspace defined by the $R$ electron configurations, thereby reducing the quantum computational cost further. \end{itemize} We remark that the performance of QSCI depends highly on the quality of the input state. It would be great if there is a way to start from an input state with modest quality, and then improve the quality of the input state by an iterative use of QSCI. \section{Conclusion} \label{sec:conclusion} In this work, we proposed QSCI, a class of hybrid quantum-classical algorithms, to find low-lying eigenvalues and eigenstates of a many-electron Hamiltonian. Taking rough approximations of such eigenstates as input, QSCI selects important electron configurations to represent the eigenstates by sampling the input states on quantum computers, and then classically diagonalizes the Hamiltonian in the subspace(s) spanned by the selected configurations to yield better approximations for the eigenstates and their energies. QSCI is robust against noise and statistical fluctuation, as quantum computation is used only to define the subspaces. A quantum speed-up potentially arises in that sampling a quantum state is, in general, classically intractable. We verified the algorithms for ground and excited states of small molecules by numerical simulations and experiment, where the latter was conducted on the quantum device with the 8-qubit quantum circuits. We discussed potential utility of QSCI in various aspects: for instance, taking a state obtained by VQE as the input state, QSCI can be used to refine the VQE result, which may not be accurate enough due to statistical fluctuation, physical noise, and poor optimization; QSCI can be used as a technique for eigenstate tomography, which enables estimation of a variety of observables with no additional quantum computational cost. We also argued that QSCI is potentially feasible to tackle challenging molecules such as the chromium dimer by exploiting quantum devices with several tens of qubits, assisted by a high-performance classical computing resource for diagonalization. \section{The effect of post-selection \label{subsec: post-selection} } In this section, we discuss the effect of post-selection to mitigate errors in QSCI. To be specific, we consider the post-selection technique introduced in Sec.~\ref{subsec:ground-state}, which exploits the conservation of particle number and spin, targeting the bit-flip noise. In the following, we consider to measure an $N$-qubit computational basis state. We assume that the state is prepared without the effects of noise but each bit of the measurement result is flipped with error probability $p$. It is equivalent to the situation where the bit-flip noise is introduced to each qubit independently after the input state is generated. The probability that the $N$-bit string describing the state is measured correctly is $(1-p)^N$. \subsection{Jordan-Wigner mapping} Let us now assume that we consider an electronic Hamiltonian converted by the Jordan-Wigner mapping. In this case, the number of 1's in the $N$-bit string, which we denote by $n_1$, corresponds to the number of electrons in the system and is sometimes known prior to the calculation for a ground state or an excited state. One can thus perform the post-selection for a measurement outcome that excludes resulting bit strings with the number of 1's not equal to $n_1$. Although one may still get incorrect results, the probability is reduced. More concretely, the probability to get a result with correct $n_1$ is \begin{equation} (1-p)^N+n_0n_1p^2(1-p)^{N-2}+O(p^4), \end{equation} where we define $n_0:=N-n_1$. After the post-selection, the probability to get the correct result is thus \begin{align} &\dfrac{(1-p)^N}{(1-p)^N+n_0n_1p^2(1-p)^{N-2}+O(p^4)}\\ &=\dfrac{1}{1+n_0n_1p^2/(1-p)^2+O(p^4)}\\ &=\dfrac{1}{1+n_0n_1p^2+O(p^3)}\\ &=1-n_0n_1p^2+O(p^3). \end{align} The error rate of getting an incorrect result is reduced from $1-(1-p)^N\sim pN$ to $n_0n_1p^2$ with the ratio being \begin{equation} \dfrac{n_0n_1p^2}{pN}=\left(\dfrac{n_0}{N}\right)\left(\dfrac{n_1}{N}\right)Np \sim O(pN), \end{equation} which is less than one in a sensible situation $Np \ll 1$, where the original success probability $(1-p)^N$ is not too small. Although we here considered a computational basis state as the input state, we expect that the post-selection similarly works for a general input state that is a superposition of computational basis states with some fixed $n_1$. Note that, if one also knows the total spin $S_z$ of electrons prior to the calculation, one can count the number of 1's separately for up- and down-spin electrons, and make the post-selection more efficient. \subsection{Other mappings} For most of the other fermion-qubit mappings, it is not expected that the reduction of the error probability from $O(p)$ to $O(p^2)$ happens. For example, in the parity mapping~\cite{bravyi2002fermionic,seeley2012bravyi} and the Bravyi-Kitaev mapping~\cite{bravyi2002fermionic}, the states $\ket{01}$ and $\ket{11}$ are connected by just one bit flip, but both of them are one-electron states. This bit flip, which cannot be detected by the post-selection, occurs with probability $O(p)$, and thus the error rate after the post-selection is still $O(p)$. The same is true for a Hamiltonian with a reduced number of qubits using symmetries, where there is always a bit flip that does not change the total number of electrons. \section{Details of the algorithms} In this section, we present several detailed discussions on the QSCI algorithms. \subsection{Choice of $\beta_i$ parameters and variational inequalities in sequential diagonalization scheme} \label{subsec:details-sequential} Here we discuss the sequential diagonalization scheme, introduced in Sec.~\ref{sssec:method-sequantial}, on how to choose the $\beta_i$ parameters and a potential violation of the variational inequality, following the discussion in Ref.~\cite{higgott2019variational}. Suppose $k$ low-lying eigenstates of $\hat{H}$, $\ket{E_0}, \cdots, \ket{E_{k-1}}$, are known exactly. Then, the effective Hamiltonian to find the $k$-th eigenstate can be exactly constructed as \begin{align} \hat{H}^{(k)\prime} =\hat{H}+ \sum_{i=0}^{k-1}\beta_i \ket{E_i}\bra{E_i}. \end{align} This can be formally expressed as \begin{align} \hat{H}^{(k)\prime} = \sum_{i=0}^{k-1}(E_i + \beta_i) \ket{E_i}\bra{E_i} + \sum_{i\geq k} E_i \ket{E_i}\bra{E_i}, \end{align} where $E_i$ represents the $i$-th eigenvalue of $\hat{H}$ in this appendix. For $\beta_i > E_k - E_i$ ($i=0,\cdots, k-1$), the following inequality holds for an arbitrary $\ket{\psi}$ with $\bra{\psi}\ket{\psi}=1$: \begin{align} \bra{\psi} \hat{H}^{(k)\prime} \ket{\psi} \geq E_k, \end{align} where the equality holds if and only if $\ket{\psi}=\ket{E_k}$ up to a phase factor. In the language of the eigenvalue problem of Eq.~\eqref{eq:eigenvalue-eq}, this implies $E_{R_k}^{(k)\prime}\geq E_k$, where $E_{R_k}^{(k)\prime}$ is the smallest eigenvalue of $\bm{H}_{R_k}^{(k)\prime}$, the subspace matrix for $\hat{H}^{(k)\prime}$, defined in the same way as Eq.~\eqref{eq:sequential-matrix}. In practice, the condition $\beta_i > E_k - E_i$ can be utilized if one has prior knowledge on the energy spectrum, e.g., based on variational quantum algorithms. But even without such knowledge, one may still rely on the stronger condition of $\beta_i > 2\sum_j \abs{c_j}$~\cite{higgott2019variational}, which is written in terms of the coefficients $c_j$ of the qubit Hamiltonian $\hat{H}=\sum_j c_j P_j$, expressed by the Pauli strings $P_j$. In reality, the effective Hamiltonian cannot be exactly constructed as the $k$ low-lying eigenstates would be obtained only approximately and, hence, the inequality $E_{R_k}^{(k)}\geq E_k$ is not guaranteed. For instance, in the problem to find the first excited state, the effective Hamiltonian $\hat{H}^{(1)}$ is constructed with $|\psi_{\rm out}^{(0)}\rangle$, the output state for the ground state obtained by the preceding step in sequential diagonalization. Unless the output state perfectly overlaps with the true ground state $\ket{E_0}$, or $|\langle\psi_{\rm out}^{(0)}| E_0\rangle|=1$, there is no guarantee that $\ev{\hat{H}^{(1)}}{\psi}$ is bounded by the exact eigenvalue $E_1$. Instead, $\min_\psi \ev{\hat{H}^{(1)}}{\psi}$ is only bounded as~\cite{higgott2019variational}: \begin{align} E_1 - O((E_1-E_0)\epsilon_0) \leq \min_\psi \ev{\hat{H}^{(1)}}{\psi} \leq E_1 +\beta_0\epsilon_0, \end{align} where $\epsilon_0 = 1 - |\langle\psi_{\rm out}^{(0)}| E_0\rangle|^2$ and $\bra{\psi}\ket{\psi}=1$. A concrete example for breaching the variational inequality is given as follows. Consider a system with the unique ground state, i.e., $E_0 < E_1$. Suppose that one has a poor output state $|\psi_{\rm out}^{(0)}\rangle$ that is orthogonal to the true ground state $\ket{E_0}$, i.e., $\epsilon_0=1$. Then, $\bra{\psi}\hat{H}^{(1)} \ket{\psi} \geq E_0$ for any positive $\beta_0$, where the equality holds if and only if $\ket{\psi}=\ket{E_0}$ up to a phase factor. This means that the variational inequality $E_{R_1}^{(1)}\geq E_1 (> E_0)$ is violated at least in the limit where the subspace $\mc{S}_{R_1}^{(1)}$ is enlarged to cover the part of the Fock space necessary to express $\ket{E_0}$. Such a subspace can be constructed, e.g., if the input state is chosen to be $|\psi_{\rm in}^{(1)}\rangle = \ket{E_0}$ with a sufficiently large $R_1$. \subsection{An optimal shot allocation for evaluating expectation values of multiple observables in conventional method} \label{subsec:appendix-scaling-multiple-operators} In this subsection, we describe the details of the numerical estimation of computational cost in Sec.~\ref{subsec:scaling} for evaluating multiple operators. In the numerical simulation, we considered a situation where we want to calculate the expectation values of the nuclear gradient $\left\{\pdv{\hat{H}}{x_i} \mid i=1,\dots,3N_{\text{atom}} \right\}$ and the nuclear Hessian $\left\{\pdv{\hat{H}}{x_i}{x_j} \mid i,j=1,\dots,3N_{\text{atom}} \right\}$ along with the Hamiltonian $\hat{H}(\left\{x_i\right\})$, where $x_i$ are the nuclear coordinates and $N_\mr{atom}$ is the number of atoms in the molecule. The most naive way of doing it would be to calculate each expectation value completely separately. This is, though, too naive to be considered as the optimal strategy; all the observables are linear combinations of operators $a^\dagger_i a_j$ and $a^\dagger_i a_j a^\dagger_k a_l$ in the fermionic basis, and the expectation values of these operators can be reused among the operators. Before discussing the optimal strategy for evaluating expectation values of multiple observables, let us review the one for a single observable, following the discussion in Ref.~\cite{rubin2018application}. Consider a quantum state $\ket{\psi}$ and the expectation value of an operator $\hat{O}$ which can be written as a sum of operators $\hat{O}_l$: \begin{equation} \hat{O}=\sum_{l=1}^{L}\hat{O}_l. \end{equation} Each term $\hat{O}_l$ can be either a Pauli string or a sum of Pauli strings that commute with each other, which admits the projective measurement on eigenvalues of each $\hat{O}_l$. We denote the variance of each term $\hat{O}_l$ per one shot by $\sigma_l^2:=\text{Var}(\hat{O}_l) := \ev{\hat{O}_l^2}{\psi}-\ev{\hat{O}_l}{\psi}^2$. By measuring each term $\hat{O}_l$ with a number of shots $M_l$, the observed expectation value has the variance $\sum_l \sigma_l^2/M_l$. Employing the method of Lagrange multiplier with the Lagrangian \begin{equation} \mathcal{L}=\sum_l M_l +\lambda \left( \sum_l \dfrac{\sigma_l^2}{M_l} -\epsilon^2\right), \end{equation} one can get the optimal allocation of the number of shots with the total variance of the expectation value fixed to $\epsilon^2$, which is \begin{equation} M_l\propto \sigma_l. \end{equation} In general, $\sigma_l$ is not exactly known a priori, so one may use $\sigma_l$ for Haar random states to get a reasonable strategy. One may also try to improve the strategy by dividing the shot budget for one evaluation of an expectation value into several iterations: one can simply evaluate the expectation value with a mildly optimized strategy in the first iteration, and then, in the rest of the iterations, one can adjust the strategy by calculating $\sigma_l$ by using the expectation values obtained in the previous iterations. Generalizing the above discussion, let us consider a situation where one calculates the expectation values of a set of operators $\left\{\hat{O}^{(i)} \mid i=1,\dots,n\right\}$. We assume that $\hat{O}^{(i)}$ is decomposed as \begin{equation} \hat{O}^{(i)}=\sum_{l=1}^{L} \hat{O}^{(i)}_l, \end{equation} where all of $\left\{\hat{O}^{(i)}_l\mid i=1,\dots,n\right\}$ are simultaneously measurable for each $l$, i.e., $[\hat{O}^{(i)}_l, \hat{O}^{(j)}_l] = 0$ for any $i,j$. In our numerical simulation, the grouping was done by firstly taking the sum of all the observables $\hat{O}^{(i)}$ with each Pauli string with negative coefficient multiplied by $-1$ to make it positive. Then the greedy qubit-wise grouping of Refs.~\cite{mcclean2016theory, crawford2021efficient} was used. Our aim here is to find a good strategy to estimate the expectation values of all the operators $O^{(i)}$ with statistical error less than $\epsilon$. Note that one can always rescale the observables so that the required precision is the same for all observables even when one requires different precision for different operators. To get an analytical solution, we choose the following Lagrangian with slightly modified constraint, \begin{equation} \mathcal{L}=\sum_l M_l +\lambda \left(\sum_l \sum_i \left(\dfrac{{\sigma_l ^{(i)}}^2}{M_l}\right)-\epsilon_{\text{tot}}\right), \end{equation} where $\epsilon_{\text{tot}}$ can be $N\times \epsilon$ but it turns out that the choice of $\epsilon_{\text{tot}}$ does not affect the final result. By solving the extremal condition of this Lagrangian, one can get the best shot allocation that minimizes the total number of shots, while keeping the sum of the variances of all the operators less than $\epsilon_{\text{tot}}$. The result implies that \begin{equation} M_l\propto \sqrt{\sum_i {\sigma_l^{(i)}}^2}. \end{equation} By estimating the variance of each operator $\hat{O}^{(i)}$ with this shot allocation, and by adjusting the total number of shots so that the statistical error of each operator is $\epsilon$ at worst, one can obtain the total number of shots $\sum_l M_l$ with desired precision for all the operators. This may not be the optimal shot allocation to achieve the statistical error $\epsilon$ for each operator as we are minimizing the total variance rather than the maximum value of the variances, but this will give a reasonable strategy that is analytically available. There is one comment to make on the evaluation of nuclear Hessians. In the following, the expectation value is always taken by an ansatz state $\ket{\psi(\bm{\theta}(\left\{x_i\right\}))}$ parametrized by the ansatz parameters $\bm{\theta}(\left\{x_i\right\})$; we assume that $\bm{\theta}(\left\{x_i\right\})$ is optimized so that $\ket{\psi(\bm{\theta}(\left\{x_i\right\}))}$ has the minimum energy within the ansatz for each $\left\{x_i\right\}$. We denote the energy expectation value of the state by $E(\left\{x_i\right\})$. In the case of the nuclear gradient, \begin{equation} \pdv{E(\left\{x_i\right\})}{x_i}=\expval{\pdv{\hat{H}(\left\{x_i\right\})}{x_i}} \end{equation} holds thanks to the Hellmann-Feynman theorem, and it suffices to compute the right-hand side to obtain the nuclear gradient of the energy. In the case of the nuclear Hessian of the energy~\cite{mitarai2020theory}, on the other hand, it is in general necessary to evaluate the contribution of derivatives acting on the state as well as on the Hamiltonian operator, \begin{equation} \pdv{E(\left\{x_i\right\})}{x_i}{x_j}=\expval{\pdv{\hat{H}(\left\{x_i\right\})}{x_i}{x_j}}+\dots \end{equation} In our numerical simulation, we ignored the contribution of the derivatives acting on the state for simplicity. In the case of QWC, this contribution requires additional quantum resources. On the other hand, in the case of QSCI, one can generate and diagonalize the Hamiltonians at small finite distance $x_i\to x_i\pm \delta$ to get the derivatives of the state within the same selected subspace of the Fock space with no additional quantum resources. If we take this contribution into account properly, the advantage of QSCI will increase. It should also be noted that, although we evaluate $O(N_{\text{atom}}^2)$ observables in the numerical simulations for the hydrogen chain in the main text, due to the rich geometrical symmetry of the molecules, many of the observables are zero as an operator. It is likely that, for more generic molecules, the crossing-point of QSCI and QWC comes at a larger number of qubits. \section{Details of numerical simulations and experiments} \label{sec:appendix-details-of-sim-and-exp} In this section, we explain details of the numerical simulations and the experiment on quantum hardware in the main text. For all the molecules examined in this study, the second-quantized electronic Hamiltonian under the Born-Oppenheimer approximation is generated by OpenFermion~\cite{mcclean2020openfermion} interfaced with PySCF~\cite{sun2018pyscf} using the Hartree-Fock orbitals with the STO-3G minimal basis set, unless otherwise stated. The electronic Hamiltonians are mapped to qubit ones by the Jordan-Wigner transformation. The molecular geometries used in our study are shown in Table~\ref{tab: geometries}. Stable geometries for diatomic molecules are taken from CCCBDB database~\cite{johnson2022nist} and Ref.~\cite{wang2016relativistic}, while those for the other molecules are taken from PubChem~\cite{kim2023pubchem}, except for the hydrogen chains which are not in their stable geometries. We list the details specific to each of simulations and experiment in the following. \begin{table*}[] \caption{Geometries of molecules. ``$(\mr{X}, (x,y,z))$" denotes three dimensional coordinates $x,y,z$ of an atom X in units of \AA. \label{tab: geometries} } \begin{tabular}{c|p{12cm}} \hline \hline Molecule & Geometry \\ \hline \ce{H2O} & (O, (0, 0, 0)), (H, (0.2774, 0.8929, 0.2544)), (H, (0.6068, -0.2383, -0.7169)) \\ \ce{H}$_n$ ($n=4,6,8,10,12)$ & (H, (0, 0, 0)), (H, (0, 0, 1.0), \dots, (H, (0, 0, $n \times 1.0$)) \\ \ce{LiH} & (Li, (0, 0, 0)), (H, (0, 0, 1.595))\\ \ce{N2} & (N, (0, 0, 0)), (N, (0, 0, 1.1))\\ \ce{O2} & (O, (0, 0, 0)), (O, (0, 0, 1.2))\\ \ce{F2} & (F, (0, 0, 0)), (F, (0, 0, 1.4))\\ \ce{Cl2} & (Cl, (0, 0, 0)), (Cl, (0, 0, 2.0))\\ \ce{HCl} & (H, (0, 0, 0)), (Cl, (0, 0, 1.3))\\ \ce{CO} & (C, (0, 0, 0)), (O, (0, 0, 1.1))\\ \ce{Cr2} & (Cr, (0, 0, 0)), (Cr, (0, 0, 1.6))\\ \ce{Benzene} &(C, (-1.2131, -0.6884, 0)), (C, (-1.2028, 0.7064, 0.0001)), (C, (-0.0103, -1.3948, 0)), (C, (0.0104, 1.3948, -0.0001)), (C, (1.2028, -0.7063, 0)), (C, (1.2131, 0.6884, 0)), (H, (-2.1577, -1.2244, 0)), (H, (-2.1393, 1.2564, 0.0001)), (H, (-0.0184, -2.4809, -0.0001)), (H, (0.0184, 2.4808, 0)), (H, (2.1394, -1.2563, 0.0001)), (H, (2.1577, 1.2245, 0))\\ \ce{Naphthalene} &(C, (0, -0.7076, 0)), (C, (0, 0.7076, 0.0001)), (C, (1.225, -1.3944, 0.0001)), (C, (1.225, 1.3944, 0)), (C, (-1.225, -1.3943, 0)), (C, (-1.225, 1.3943, 0)), (C, (2.4327, -0.6958, 0)), (C, (2.4327, 0.6959, -0.0001)), (C, (-2.4327, -0.6958, -0.0001)), (C, (-2.4327, 0.6958, 0)), (H, (1.2489, -2.4822, 0.0001)), (H, (1.2489, 2.4821, -0.0001)), (H, (-1.2489, -2.4822, -0.0001)), (H, (-1.249, 2.4821, 0.0001)), (H, (3.3733, -1.239, -0.0001)), (H, (3.3732, 1.2391, -0.0001)), (H, (-3.3733, -1.239, -0.0001)), (H, (-3.3732, 1.239, 0))\\ \ce{Anthracene} &(C, (-1.225, 0.706, 0.0001)), (C, (-1.2251, -0.7061, 0.0001)), (C, (1.2251, 0.7061, 0.0002)), (C, (1.2251, -0.7061, 0.0001)), (C, (0, 1.3937, 0.0001)), (C, (0, -1.3938, 0)), (C, (-2.4504, 1.393, -0.0001)), (C, (-2.4505, -1.393, 0)), (C, (2.4505, 1.3929, 0)), (C, (2.4505, -1.3929, 0)), (C, (-3.6587, 0.6956, -0.0001)), (C, (-3.6588, -0.6955, -0.0001)), (C, (3.6587, 0.6956, -0.0002)), (C, (3.6587, -0.6956, -0.0002)), (H, (0, 2.4838, 0)), (H, (0, -2.4839, -0.0001)), (H, (-2.4742, 2.4808, -0.0001)), (H, (-2.4744, -2.4809, 0)), (H, (2.4742, 2.4808, 0)), (H, (2.4743, -2.4808, 0)), (H, (-4.5989, 1.2394, -0.0003)), (H, (-4.5991, -1.2391, -0.0002)), (H, (4.5989, 1.2393, -0.0003)), (H, (4.5989, -1.2393, -0.0004))\\ \ce{Tetracene} & (C, (0, 0.7045, -0.0002)), (C, (0, -0.7046, -0.0001)), (C, (-2.451, 0.7058, 0)), (C, (-2.4511, -0.7058, 0.0002)), (C, (2.4511, 0.7057, 0.0001)), (C, (2.4511, -0.7058, -0.0001)), (C, (1.2254, 1.3923, -0.0001)), (C, (1.2254, -1.3924, -0.0003)), (C, (-1.2254, 1.3923, -0.0002)), (C, (-1.2255, -1.3923, 0.0002)), (C, (-3.6764, 1.3928, -0.0001)), (C, (-3.6764, -1.3929, 0.0002)), (C, (3.6764, 1.3929, 0.0003)), (C, (3.6765, -1.3929, -0.0001)), (C, (-4.8846, 0.6957, -0.0001)), (C, (-4.8847, -0.6955, 0.0001)), (C, (4.8846, 0.6957, 0.0004)), (C, (4.8847, -0.6956, -0.0001)), (H, (1.2253, 2.4825, -0.0001)), (H, (1.2254, -2.4825, -0.0003)), (H, (-1.2254, 2.4824, -0.0003)), (H, (-1.2255, -2.4824, 0.0003)), (H, (-3.6999, 2.4807, -0.0002)), (H, (-3.7001, -2.4808, 0.0003)), (H, (3.6999, 2.4807, 0.0004)), (H, (3.7001, -2.4807, -0.0003)), (H, (-5.8248, 1.2393, -0.0002)), (H, (-5.8249, -1.2392, 0.0002)), (H, (5.8248, 1.2394, 0.0005)), (H, (5.8249, -1.2392, -0.0002))\\ \hline \hline \end{tabular} \end{table*} \subsection{Noiseless simulation for ground state} \label{subsec:setup-noiseless-vqe} \begin{figure} \caption{Real-valued symmetry-preserving ansatz with $n$ qubits and depth $d$.} \label{fig:rsp-ansatz} \end{figure} In Sec.~\ref{subsec:ground-state-simulation-with-noiseless-vqe}, the \ce{H2O} molecule with six active electrons and five active orbitals, is chosen to find the ground state by QSCI. In the VQE calculation for preparing the input states, the BFGS optimizer is employed through the scientific library SciPy~\cite{virtanen2020scipy}, and the real-valued symmetry-preserving ansatz~\cite{ibe2022calculating} is used to construct parametric quantum circuits with depth 10 (Fig.~\ref{fig:rsp-ansatz}). The initial state of the ansatz circuits is set to be the Hartree-Fock state, and the initial parameters in the optimization are randomly chosen. \subsection{Noiseless simulations for excited states} \label{subsec:setup-noiseless-vqd} In Sec.~\ref{subsec:simulation-excited-h2o}, QSCI is demonstrated for the same \ce{H2O} molecule but to find excited states. To prepare the input states, the VQD calculations are performed in the same setup as the previous VQE calculation, but with the penalty terms~\cite{mcclean2016theory,ryabinkin2018constrained,kuroiwa2021penalty} added to the Hamiltonian for constraining the resulting states to have $S_z=0$ and $N_e=6$; specifically, the following operator (in atomic units) is added to the Hamiltonian \begin{equation} 3.0 (\hat{S}_z)^2 + 3.0 (\hat{N}_e-6)^2, \end{equation} where $\hat{S}_z$ is the operator for the total electron spin in $z$-direction, and $\hat{N}_e$ for the particle number operator of electrons in the active space. Furthermore, the overlap terms to constrain the state to be orthogonal to lower energy eigenstates~\cite{higgott2019variational} are added with coefficients of unity (in Hartree). For the sequential diagonalization scheme of QSCI, the coefficients $\beta_i$ for ensuring orthogonality are also set to unity. \begin{figure} \caption{Ry ansatz with 8 qubits. All the rotational gates have independent parameters. The depth is set to be 8 in our experiment.} \label{fig:ryansatz} \end{figure} \subsection{Noisy simulation and experiment} \label{ssec:setup-experiment} For the noisy simulation and experiment in Sec.~\ref{sec:noisy-simulation-experiment}, the input states are prepared by noiseless VQE simulations. The VQE calculations are performed with the BFGS optimizer and Ry ansatz (Fig.~\ref{fig:ryansatz}) with depth 8. Other details are described in the main text. \section{Supplemental numerical results} In this section, we provide additional numerical results to supplement the contents in Sec.~\ref{sec:numerical}. \subsection{Scaling of computational costs with various molecules} \label{ssec:more-results-scaling} Figure~\ref{fig:all-scaling} shows the scaling of the classical and quantum computational costs, discussed in Sec.~\ref{subsec:scaling}, for different types of molecules. Here, we test three kinds of molecules: hydrogen chains, diatomic molecules, and aromatic molecules. The data for hydrogen chains are exactly the same as in the main text. Diatomic molecules are \ce{N2}, \ce{O2}, \ce{F2}, \ce{Cl2}, \ce{HCl}, \ce{CO}, and \ce{Cr2} with cc-pVQZ basis, and we tested them with various active spaces just as in the main text for \ce{Cr2}. To test with larger molecules, four aromatic molecules are chosen: benzene, naphthalene, anthracene, and tetracene. The Hamiltonian is generated by using the Hartree-Fock orbitals with STO-3G basis. The active space of $n$ orbitals and $n$ electrons with varying $n$ was employed for the diatomic and aromatic molecules. The geometries of these tested molecules are summarized in Table~\ref{tab: geometries}. As can be seen in Fig.~\ref{fig:all-scaling}, hydrogen chains with various numbers of atoms show the worst scalings, while \ce{Cr2} is one of the least expensive systems among others. \begin{figure*} \caption{Estimated $R$ and $1/\abs{c_R} \label{fig:all-scaling} \end{figure*} \subsection{Sampling simulations with various molecules} \label{ssec:appendix-sampling} In this subsection, we present similar results as Fig.~\ref{fig:conventional} but with various other molecules. The results, shown in Fig.~\ref{fig:all-sampling}, show the same features as \ce{H6} in the main text, such as the small standard deviation for QSCI and $1/\abs{c_R}^2$ giving an accurate estimation of the number of shots for given accuracy $\epsilon$. One can also see that the standard deviation is almost constant for hydrogen chains with various numbers of atoms, while the absolute error is highly dependent on the number of atoms. Comparing the three 12-qubit systems, it can be seen that the difference between the standard deviation and the absolute error depends on the system. \begin{figure*} \caption{Sampling simulation with various molecules. See Fig.~\ref{fig:conventional} \label{fig:all-sampling} \end{figure*} \subsection{Accuracy of expectation values of observables other than the Hamiltonian in QSCI} \label{ssec:appendix-multiple-observable-accuracy} \begin{figure} \caption{Histograms of absolute errors of nuclear gradients and Hessians for \ce{H4} \label{fig:appendix-multiple-observables} \end{figure} Here, we examine the accuracy of the expectation values of observables other than the Hamiltonian, estimated for the output state obtained by QSCI calculation. Figure~\ref{fig:appendix-multiple-observables} shows the histograms for absolute errors of the expectation values for the gradient and Hessian, where the absolute error for an observable $\hat{O}$ is defined by \begin{equation} \abs{\ev{\hat{O}}{{\psi_{\rm out}}}-\ev{\hat{O}}{\psi_\text{exact}}}. \end{equation} Here, $\ket{{\psi_{\rm out}}}$ is the output state of QSCI calculation with the idealized sampling from the exact ground state with $R$ given in Fig.~\ref{fig:scaling-a} for each error tolerance $\epsilon$ for energy, and $\ket{\psi_\text{exact}}$ is the exact ground state. The observables $\hat{O}$ are set to be the nuclear gradient $\pdv{\hat{H}}{x_i}$ $(i=1,\dots,3N_\text{atom})$ and the Hessian $\pdv{\hat{H}}{x_i}{x_j}$ $(i,j=1,\dots,3N_\text{atom})$, where $N_\text{atom}$ is the number of atoms in the molecule and $x_i$ are coordinates of the nuclei. The absolute error is shown in the unit of Hartree, Hartree/\AA, or $\text{Hartree}/\text{\AA}^2$, depending on the observables. Although there are some observables (i.e., components of the gradient or Hessian) whose expectation values exhibit larger absolute errors than that of the energy, the expectation values of the majority of the observables have similar accuracy as the energy. \subsection{Bond length dependence} \label{ssec:appendix-bond-length} \begin{figure} \caption{Estimated $R$ and $1/\abs{c_R} \label{fig:pec} \end{figure} The Hartree-Fock calculation is known to perform better for a stable geometry of a molecule than for the dissociation limit, so it is worth studying if QSCI also performs worse in the dissociation limit. Figure~\ref{fig:pec} shows the result of the same numerical analysis as Fig.~\ref{fig:scaling-a}, but for various bond lengths of \ce{H2O} molecules. The Hamiltonian is generated by the Hartree-Fock orbitals using STO-3G basis without specifying the active space, and is of 14-qubit after the Jordan-Wigner mapping. The bond lengths of two H-O bonds are taken to be equal, and the H-O-H angle is fixed to \ang{104.45}. The result implies that, although there is some dependency on the bond length for larger $\epsilon$, the dependency disappears for smaller $\epsilon$. It can be expected from the result that the potential energy surface calculated by QSCI has a relatively constant accuracy, at least compared to the Hartree-Fock result, when the error tolerance is not very large. \subsection{Comparison to ASCI} \label{ssec:comparison-to-asci} Here, we investigate if there is a possibility that QSCI outperforms the state-of-the-art selected CI methods by taking ASCI for illustration. ASCI is a selected CI method solely based on classical computation, which adaptively searches for the optimal subspace of the Fock space for the diagonalization. In Fig.~\ref{fig:asci}, we compare QSCI with ASCI, for which we follow the description in Ref.~\cite{tubman2020modern}. Here, we use the QSCI method with the idealized sampling from the ground state obtained by the exact diagonalization (full-CI) calculation. The target molecule is the linear hydrogen chain \ce{H10} with the equal separation of 1.0 \AA. The basis set is STO-3G and the Hamiltonian with the Hartree-Fock orbitals is mapped to the 20-qubit one by the Jordan-Wigner mapping. For ASCI, in addition to the parameter $R$ (called as $N_{tdets}$ in Ref.~\cite{tubman2020modern}), there are two additional parameters: they are denoted by $\epsilon$ and $N_{cdets}$ in that paper, and are denoted by $\delta$ and $R_{\text{core}}$, respectively, in the following. The parameters $\delta$ and $R_{\text{core}}$ determine the size of the search space for the iterative search for the new determinants, while the cost for the generation and diagonalization of the Hamiltonian, which is common for both ASCI and QSCI, are determined solely by $R$. We fixed $\delta=\SI{0.05}{Hartree}$ and $r:=R/R_{\text{core}}=10$ or $20$ for ASCI, and run QSCI and ASCI calculations with various $R$. While in the case of $r=10$ the two methods perform similarly, QSCI performs better for $r=20$, where less computational cost is required for searching for a better set of configurations in ASCI. The result shows that, depending on the hyperparameters for ASCI, there is a possibility that QSCI performs better, at least in the case of the idealized sampling from the exact ground state. \begin{figure} \caption{Comparison of ASCI and QSCI for \ce{H10} \label{fig:asci} \end{figure} \end{document}
\begin{document} \title{Simulation of memristive synapses and neuromorphic computing on a quantum computer} \author{Ying Li} \affiliation{Graduate School of China Academy of Engineering Physics, Beijing 100193, China} \begin{abstract} One of the major approaches to neuromorphic computing is using memristors as analogue synapses. We propose unitary quantum gates that exhibit memristive behaviours, including Ohm's law, pinched hysteresis loop and synaptic plasticity. Hysteresis depending on the quantum phase and long-term plasticity that encodes the quantum state are observed. We also propose a three-layer neural network with the capability of universal quantum computing. Quantum state classification on the memristive neural network is demonstrated. Our results pave the way towards brain-inspired quantum computing. We obtain these results in numerical simulations and experiments on the superconducting quantum computer {\it ibmq{\textunderscore}vigo}. \end{abstract} \maketitle \section{Introduction} Neuromorphic computing is a brain-inspired computer paradigm in contrast with the von Neumann architecture~\cite{Mead1990, Schuman2017}. According to the biological model of the brain, the information is stored and processed by a highly connected network formed of neurons, which provides the ability of learning, parallel and low energy cost computing, etc. Since the 1940s, it has been realised that how neurons wire up is essential~\cite{Hebb1949}. Besides neuroscience, this observation also motives the development of computer programming, such as the neural network algorithms vastly used in today's machine learning technologies~\cite{Nielsen2015, Goodfellow2016}. In term of the learning rule of neurons, spike-timing-dependent plasticity (STDP) is a biologically plausible model that has gained great attention in recent years~\cite{Caporale2008, Markram2011, Feldman2012}. In STDP, the synapse is strengthened or weakened depending on the temporal order between spikes of pre- and post-synaptic neurons [see Fig.~\ref{fig:neural_network}(a)]. In this way, the brain can establish causal relationships between events. Quantum computing uses quantum phenomena and is superior to classical computing in solving certain problems~\cite{Nielsen2010}. For example, to solve the integer factorisation problem, Shor's quantum algorithm takes polynomial time with respect to the integer size, which is exponentially faster than the most efficient known classical algorithm~\cite{Shor1994}. In the circuit-based universal quantum computer, information is encoded in qubits and processed with unitary gates~\cite{Deutsch1985}. This kind of quantum machines is still under development but already demonstrates the power of surpassing classical computers~\cite{Google, IBM}. Because the quantum computer for large-scale computing is not available yet, variational quantum algorithms are proposed for the near-future applications~\cite{Peruzzo2014, Farhi2014, Li2017}. Quantum neural networks are generalisations of classical artificial neural networks, in which unitary gates in the quantum circuit are taken as variables~\cite{Beer2020, Wan2017, Romero2017, Cao2017, Farhi2018, Mitarai2018, Grant2018, Schuld2020, Killoran2019, Steinbrecher2019}. \begin{figure} \caption{ (a) Pre- and post-synaptic neurons. (b) A memristor. In the quantum regime, we use qubits to represent the input/output current and the resistance of the memristor. (c) Memristive gate $M_\theta$ decomposed into elementary quantum gates, where $R_{\rm z} \label{fig:neural_network} \end{figure} The memristor is a resistor with memory and one of the fundamental two-terminal circuit elements~[see Fig.~\ref{fig:neural_network}(b)]~\cite{Chua1971, Chua1976}. Its resistance decreases or increases depending on the input signal, i.e.~the voltage or current. Memristance can explain STDP in biological synapses~\cite{LinaresBarranco2009}. Since the first memristive device was found in 2008~\cite{Strukov2008}, the application as hardware analogue of synapse in neuromorphic computing has been extensively investigated~\cite{Schuman2017}, mainly because memristive devices demonstrate behaviours similar to STDP~\cite{Jo2010, Serb2016}. In this paper, we propose memristor-like unitary quantum gates. These gates have the characteristic memristive property, i.e.~hysteretic resistance state~\cite{Chua1976, Strukov2008}. Given an oscillatory input state, the output-input observables display a pinched hysteresis loop. We find that the loop depends on not only the classical distribution but also the phase of the input quantum state, which reflects the quantum nature of memristive gates. Using these gates to mimic synapses, we observe the long-term potentiation (LTP) and long-term depression (LTD), which are crucial for learning and memory in the neural network~\cite{Caporale2008, Feldman2012}. We show that quantum information can also be encoded in a manner similar to the long-term plasticity. Therefore, a neuromorphic computer based on the memristive gates can process quantum information. \begin{figure*} \caption{ Hysteresis loops of memristive gates. Here $\mean{Z_{\rm C} \label{fig:memristor} \end{figure*} An artificial neural network with three layers is proposed as an example of the neuromorphic system based on memristive quantum gates, as shown in Fig.~\ref{fig:neural_network}(d). Neurons in the input and hidden layers are qubits, and neurons in the output layer are classical bits. Two quantum layers are wired up by memristive gates, and output bits are measurement outcomes of hidden-layer qubits. Compared with the general quantum neural network~\cite{Beer2020}, the number of variational parameters is significantly reduced with respect to the number of neurons and synapses. Each connection between an input neuron and a hidden-layer neuron is characterised by two variational parameters (i.e.~weights), and each connection to an output neuron is characterised by only one parameter. We prove that such a three-layer memristive neural network is as powerful as a universal quantum computer~\cite{Deutsch1985} up to a polynomial overhead. The application of the neural network is demonstrate in quantum state classification tasks~\cite{Farhi2018, Grant2018, Schuld2020, Gao2018}. All the results are demonstrated with numerical simulations using QuESTlink~\cite{Jones2019} and experiments on the quantum computer {\it ibmq{\textunderscore}vigo}. An example circuit realisation of memristive quantum gates is given in Fig.~\ref{fig:neural_network}(c). Alternative circuits are used in experiments for minimising the impact of errors. Details of numerical simulations and experiments are in Appendix. \section{Memristive quantum gates} To find quantum gates with the memristive properties, we introduce a simplified picture of the memristor, which is different from actual memristive devices~\cite{Strukov2008}. When we send the input current to the memristor, the current is transmitted or reflected depending on the state of memristor, and the state of memristor evolves depending on the input current. If the input current is from A to B~[see Fig.~\ref{fig:neural_network}(b)], the resistance of the memristor decreases. If the input current is from B to A, the resistance increases. We use one qubit to represent the current state: $\ket{0}_{\rm C}$ and $\ket{1}_{\rm C}$ denote currents from A to B and from B to A, respectively. We use another qubit to represent the resistance state: $\ket{0}_{\rm R}$ and $\ket{1}_{\rm R}$ denote transmission and reflection, respectively. In the extreme case, the resistance state can be completely flipped in one shot, then the memristor is the transformation $\ket{0}_{\rm C}\otimes\ket{0}_{\rm R} \rightarrow \ket{0}_{\rm C}\otimes\ket{0}_{\rm R}$, $\ket{0}_{\rm C}\otimes\ket{1}_{\rm R} \rightarrow \ket{1}_{\rm C}\otimes\ket{0}_{\rm R}$, $\ket{1}_{\rm C}\otimes\ket{0}_{\rm R} \rightarrow \ket{1}_{\rm C}\otimes\ket{1}_{\rm R}$ and $\ket{1}_{\rm C}\otimes\ket{1}_{\rm R} \rightarrow \ket{0}_{\rm C}\otimes\ket{1}_{\rm R}$. The key point is that input states and output states of this transformation are both orthogonal. Therefore, it can be a unitary transformation, i.e.~a quantum gate. Now, we consider the general case that the resistance state is rotated by a finite angle of $\pi - 2\theta$ when it is not saturated. The corresponding unitary transformation reads \begin{eqnarray} M_\theta = \left( \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 0 & 0 & e^{i\theta} \\ 0 & \cos\theta & i\sin\theta & 0 \\ 0 & ie^{-i\theta}\sin\theta & e^{-i\theta}\cos\theta & 0 \end{array}\right), \end{eqnarray} where basis vectors are sorted as $\ket{0}_{\rm C}\otimes\ket{0}_{\rm R}$, $\ket{0}_{\rm C}\otimes\ket{1}_{\rm R}$, $\ket{1}_{\rm C}\otimes\ket{0}_{\rm R}$ and $\ket{1}_{\rm C}\otimes\ket{1}_{\rm R}$. When $\theta = 0$, $M_\theta$ can flip the resistance state in one shot as in the extreme case. When $\theta$ is finite, the gate transforms the input state $\ket{0}_{\rm C}\otimes\ket{1}_{\rm R}$ into $\ket{1}_{\rm C}\otimes(\cos\theta\ket{0}_{\rm R} + ie^{-i\theta}\sin\theta\ket{1}_{\rm R})$, i.e.~the current is reflected, and the resistance state is rotated by a finite angle. It is similar for the input state $\ket{1}_{\rm C}\otimes\ket{0}_{\rm R}$. We can find that the influence of the input current on the resistance state is minimised at $\theta = \frac{\pi}{2}$. Many similar memristive gates can be constructed. For example, we can change the phases $e^{i\theta}$ and $e^{-i\theta}$, and the gate is still memristor-like. We choose the phases such that the gate $M_\theta$ can be used for encoding a quantum state and implementing universal quantum computing on the neural network, as we will show later. In some scenarios, we want to use different qubits to represent the states of two terminals A and B. For example, we use two qubits A and B to represent the voltages of two terminals. We can modify the memristive gate by taking $\ket{0}_{\rm C} = \ket{1}_{\rm A}\otimes\ket{0}_{\rm B}$ and $\ket{1}_{\rm C} = \ket{0}_{\rm A}\otimes\ket{1}_{\rm B}$. Then, a three-qubit memristive gate is $\widetilde{M}_\theta = M_\theta \oplus \openone_4$, where $\openone_4$ is the four-dimensional identity matrix acting on the subspace of $\ket{0}_{\rm A}\otimes\ket{0}_{\rm B}\otimes\ket{\mu}_{\rm R}$ and $\ket{1}_{\rm A}\otimes\ket{1}_{\rm B}\otimes\ket{\mu}_{\rm R}$, i.e.~the state of memristor does not change when two terminals have the same voltage. Qubits A and B can also be used to represent the spike timings of two neurons when the resistance qubit mimics the synapse. Memristive quantum gates for multi-state current and resistance can be constructed in a similar way. In this paper, we focus on the two-qubit gate for simplicity. \begin{figure} \caption{ (a) Classical and (b,c) quantum long-term plasticity based on memristive gates. Thin solid curves represent numerical results, and filled circles represent experimental results. Empty circles denote initial values in the experiments. In (b) and (c), we take the same values of the parameter $\theta$. Dashed horizontal lines denote values in the input state of current qubits, i.e.~the steady state. $P = X,Y,Z$ are Pauli operators. The quantum state is successfully encoded when three $\mean{P_{\rm R} \label{fig:LTP_encoding} \end{figure} \section{Memristive behaviour} Let $\rho_{\rm C}$ and $\rho_{\rm R}$ be input states of the current qubit and resistance qubit, respectively. Then the output state after the memristive gate is $\rho_{\rm out} = M_\theta \rho_{\rm C}\otimes \rho_{\rm R} M_\theta^\dag$. If we consider mean values of the Pauli operator $Z$, we can find Ohm's law of the memristive gate, i.e.~$\mean{Z_{\rm C}}_{\rm out} = \mean{Z_{\rm R}}_{\rm in} \mean{Z_{\rm C}}_{\rm in}$, where $\mean{Z_{\rm C}}_{\rm in} = \mathrm{Tr}(Z\rho_{\rm C})$, $\mean{Z_{\rm R}}_{\rm in} = \mathrm{Tr}(Z\rho_{\rm R})$ and $\mean{Z_{\rm C}}_{\rm out} = \mathrm{Tr}(Z\otimes I \rho_{\rm out})$ play the roles of voltage, conductance and current, respectively. See Appendix~\ref{app:Ohm}. Here, $I$, $X$, $Y$ and $Z$ are Pauli operators. To demonstrate the hysteretic behaviour, we let the resistance qubit interact with a sequence of current qubits in the input states $\rho_{\rm C}^{(0)},\rho_{\rm C}^{(1)},\ldots,\rho_{\rm C}^{(t)},\ldots$ one by one through memristive gates. These states have an oscillatory observable $\mean{Z_{\rm C}}_{\rm in}(t)$, and $t$ is the label of the time. Driven by current qubits, the resistance state (i.e. the conductance) evolves with $t$, which results in the hysteretic behaviour. The $\mean{Z_{\rm C}}_{\rm out}$-versus-$\mean{Z_{\rm C}}_{\rm in}$ (i.e. current-versus-voltage) hysteresis loops are shown in Fig.~\ref{fig:memristor}. We take $\rho_{\rm C}^{(t)} = \ketbra{\psi(t)}{\psi(t)}$ as pure states, where $\ket{\psi(t)} = \cos\frac{\delta\phi t}{2}\ket{0} + \eta \sin\frac{\delta\phi t}{2}\ket{1}$, and $\eta = 1,i$ in (a) and (b), respectively. In both cases, $\mean{Z_{\rm C}}_{\rm in}(t) = \cos(\delta\phi t)$. However, the phases of quantum states are different. As a result, hysteresis loops have different shapes. \section{Long-term plasticity} In STDP, causal events increase the strength of a synapse, and acausal events decrease the strength, which are called LTP and LTD, respectively. LTP and LTD can be mimicked using the memristor~\cite{Serb2016}. In the memristive gate, the resistance state evolves driven by the current qubit. The output state of the resistance qubit is $\mathcal{M}_{\theta,\rho_{\rm C}}(\rho_{\rm R}) = \mathrm{Tr}_{\rm C}(\rho_{\rm out})$, where $\mathrm{Tr}_{\rm C}$ denotes the partial trace on the current qubit, and $\mathcal{M}_{\theta,\rho_{\rm C}}$ is a completely positive map depending on $\theta$ and the input state $\rho_{\rm C}$ of the current qubit. The steady state of the map is $\rho_{s} = \frac{1}{2}(I + \mean{Z_{\rm C}}_{\rm in}Z)$ (see Appendix~\ref{app:SteadyStates}). Therefore, after the interaction with a sequence of current qubits in the same input state, the conductance of memristor converges to $\mean{Z_{\rm R}}_{\rm s} = \mean{Z_{\rm C}}_{\rm in}$, i.e.~the classical information of the current qubit is encoded into the resistance qubit. To demonstrate LTP and LTD phenomena mimicked using memristive gates, we take $\rho_{\rm C} = \ketbra{0}{0}$ and $\rho_{\rm C} = \ketbra{1}{1}$ to represent causal events in LTP and acausal events in LTD, respectively. We also take $\rho_{\rm C} = \ketbra{+}{+}$ to represent stochastic events (SE) without a definite casual order, where $\ket{\pm} = \frac{1}{\sqrt{2}}(\ket{0}\pm\ket{1})$. The results of numerical simulation and experiments are shown in Fig.~\ref{fig:LTP_encoding}(a). In three-qubit memristive gates, we can use qubits A and B to represent spike timings of two neurons, which will lead to similar results. \section{Encoding quantum states} Memristive gates can also encode quantum information into the resistance qubit. In LTP and LTD processes, only the classical information is encoded because the phase information is not preserved. The current qubit is flipped or not flipped depending on the resistance state. Therefore two qubits are correlated in the $Z$ direction in the output state, which damages the phase information. To restore the phase, we can measure the output current qubit in the $X$ basis and adjust the phase of the resistance qubit: the identity gate $I$ or phase gate $Z$ on the resistance qubit is performed if the measurement outcome is $\ket{+}$ or $\ket{-}$, respectively. Accordingly, the map on the resistance qubit reads $\mathcal{M}'_{\theta,\rho_{\rm C}}(\rho_{\rm R}) = \mathrm{Tr}_{\rm C}(K_+ \rho_{\rm out} K_+) + \mathrm{Tr}_{\rm C}(K_- \rho_{\rm out} K_-)$, where $K_\eta = \ketbra{\eta}{\eta}\otimes Z^{\frac{1}{2}-\eta\frac{1}{2}}$. The steady state of the map is $\rho'_{\rm s} = \rho_{\rm C}$ (see Appendix~\ref{app:SteadyStates}). Therefore, after the interaction with a sequence of current qubits in the same input state, the resistance state converges to $\rho_{\rm C}$, i.e.~the quantum information is encoded. The quantum state encoding is demonstrated in Figs.~\ref{fig:LTP_encoding}(b)~and~(c). The input state $\rho_{\rm C}$ is $e^{-i\frac{7\pi}{22}Z} e^{-i\frac{3\pi}{10}X} \ket{0}$ in (b) and $e^{-i\frac{\pi}{16}Z} e^{-i\frac{3\pi}{32}X} \ket{0}$ in (c). In the two experiments, the encoding fidelity reaches $97.672\%$ and $97.638\%$ after three memristive gates in (b) and (c), respectively. \begin{figure} \caption{ Universal quantum computing operations on the neural network. Red arrows denote the time sequence. } \label{fig:gates} \end{figure} \section{Artificial neural network} The neural network in Fig.~\ref{fig:neural_network}(d) has three layers. The input layer and hidden layer are formed by $M$ current qubits and $N$ resistance qubits, respectively. Each connection between the two quantum layers has three labels $(i,a,b)$ and two parameters $(\phi_i,\theta_i)$: The $i$-th connection is a composite gate $M_{\theta_i} e^{-i\frac{\phi_i}{2}Y}\otimes I$ on the $a$-th current qubit and $b$-th resistance qubit. Here, the $Y$-axis rotation is on the current qubit. We remark that these connections are time-ordered according to $i$ because quantum gates are non-commutative. The output layer is formed by $N$ classical bits. Each resistance qubit and the corresponding classical bit has a connection with only one parameter $\phi_j$: After a $Y$-axis rotation $e^{-i\frac{\phi_j}{2}Y}$, the resistance qubit is measured in the $Z$ basis, and the outcome is the classical bit. \section{Universal quantum computing} To implement the universal quantum computing on the memristive artificial neural network, we initialise input (current) and hidden-layer (resistance) qubits in states $\ket{0}$ and $\ket{+}$, respectively. We can think of that resistance qubits form the register of quantum data, and current qubits conduct the computing. (i) A current qubit can write/read the quantum state of a resistance qubit by taking $\phi = \theta = 0$, as shown in Fig.~\ref{fig:gates}(a), corresponding to transformations $M_0\ket{\psi}\otimes\ket{+} = \ket{+}\otimes\ket{\psi}$ and $M_0\ket{0}\otimes\ket{\psi} = \ket{\psi}\otimes\ket{0}$, respectively. (ii) To perform a single-qubit gate, we let a current qubit carry the qubit state $\ket{\psi}$ and prepare a resistance qubit in the state $\ket{0}$ by using write/read operations. Then, by visiting the resistance qubit twice with parameters shown in Fig.~\ref{fig:gates}(b), we obtain the transform $M_\theta M_0 e^{-i\frac{\phi}{2}Y}\otimes I\ket{\psi}\otimes\ket{0} = I\otimes e^{-i\frac{\theta}{2}Z} e^{-i\frac{\phi}{2}Y} \ket{0}\otimes\ket{\psi}$, which is a universal single-qubit gate. (iii) To perform a two-qubit gate on two resistance qubits, we use a current qubit to read the state of the first qubit $\psi$ and let it interact with the second qubit $\varphi$ [see Fig.~\ref{fig:gates}(c)]. The output current state is written into the third resistance qubit. In this way, a controlled-NOT gate $\Lambda_X$ is performed. The corresponding transformation on three resistance qubits is $\ket{\Psi}_{1,2}\otimes\ket{+}_3 \rightarrow \ket{0}_1\otimes \Lambda_X \ket{\Psi}_{2,3}$, where $\ket{\Psi}$ is the input two-qubit state, and the second qubit is the control qubit in $\Lambda_X$. The universal single-qubit gate and controlled-NOT gate form a universal gate set~\cite{Nielsen2010}. Each controlled-NOT gate consumes one current qubit and one resistance qubit. The single-qubit gate can be implemented under the restriction that each current qubit can only visit a resistance qubit at most once. See Appendix~\ref{app:universalQC} for details. Under this restriction, each single-qubit gate consumes three current qubits and two resistance qubits. Therefore, the overhead cost is polynomial. \begin{figure} \caption{ (a) Neural network for the classification of two-qubit states. (b) Probability of the output bit $0$ given optimal parameters. Dashed and solid boxes represent the theoretical and experimental results, respectively. } \label{fig:Ising} \end{figure} \section{Quantum state classification} Now, we use the memristive neural network for the quantum state classification~\cite{Farhi2018, Grant2018, Schuld2020, Gao2018}. Input qubits are prepared in one of quantum states to be classified $\ket{\Phi_k}$. Hidden-layer qubits are initialised in the state $\ket{+}$. The probability distribution of output classical bits $\boldsymbol{\mu}$ is $p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\boldsymbol{\mu}\vert\Phi_k)$ given the input state $\ket{\Phi_k}$, where $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ are parameters of the neural network. We find the optimal parameters by maximising $\overline{D} = \sum_{k\neq k'} D(p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_k),p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_{k'}))$. Here, $D$ is the trace distance between two distributions~\cite{Nielsen2010}, which characterises how well two states can be distinguished according to the output $\boldsymbol{\mu}$. Two examples are implemented. First, we use a network with two neurons in each layer, i.e.~$M=N=2$ to classify four Bell states. Because Bell states are orthogonal, they are completely distinguishable, which can be achieved by the neural network. Second, we use a network with $M=N=5$ to classify two five-qubit ground states of the quantum Ising model in ferromagnetic and paramagnetic phases~\cite{Sachdev1999}, i.e.~the Greenberger-Horne-Zeilinger state $\ket{\Phi_{\rm ghz}} = \frac{1}{\sqrt{2}}(\ket{0}^{\otimes M}+\ket{1}^{\otimes M})$ and the product state $\ket{\Phi_+} = \ket{+}^{\otimes M}$. These two states are not orthogonal. We find that the maximum distance given by the neural network can reach the quantum upper bound, i.e.~the trace distance between two quantum states~\cite{Nielsen2010}. If we turn off parameters $\boldsymbol{\phi}$ by setting all $\phi$ to zero, only memristive gates are used in the classification. In this case, the distance can reach $0.94792$, which is lower than the upper bound $0.96824$ but is still above the classical value $0.9375$, i.e.~the distance given by a direct measurement in the $Z$ basis on each qubit. Numerical data of the optimisation computing are in Appendix~\ref{app:QSC}. For the experimental implementation, we use the network shown in Fig.~\ref{fig:Ising}(a) to classify two-qubit ground states. In the numerical simulation, the distance can reach the theoretical upper bound $0.70711$, which is reduced to $0.65673$ (but still higher than the classical value $0.5$) in the experiment using optimal parameters. The corresponding distributions are shown in Fig.~\ref{fig:Ising}(b). \section{Discussion} We have demonstrated that memristive quantum gates can mimic memristors and synapses, which are essential building blocks of neuromorphic computing. These gates are unitary transformations that are feasible in many physical systems~\cite{Nielsen2010}. Memristive gates are fully quantum compared with the memristance involving the weak measurement and dissipation in quantum systems~\cite{Pfeiffer2016, Salmilehto2017, Sanz2018, GonzalezRaya2020, Maier2015, Li2017PRB}. The experiments are implemented using universal gates on a circuit-based quantum computer {\it ibmq{\textunderscore}vigo}. By engineering the interaction between qubits, it is also possible to realise a memristive gate directly in the time evolution. Synapses based on memristive gates can encode the quantum state in a way similar to the long-term plasticity, therefore, are capable of processing quantum information. We have demonstrated the supervised quantum state classification on the memristive neural network, which can also be used for the unsupervised learning~\cite{Serb2016}. These results pave the way towards the neuromorphic system in the quantum regime, i.e.~a brain-inspired quantum computer. \begin{acknowledgments} This work is supported by National Natural Science Foundation of China (Grant No. 11875050) and NSAF (Grant No. U1930403). YL thanks Tyson Jones for help on using QuESTlink. \end{acknowledgments} \appendix \setcounter{figure}{0} \setcounter{table}{0} \renewcommand\thefigure{S\arabic{figure}} \renewcommand\thetable{S\arabic{table}} \section{Numerical simulation and experiment} We implement numerical simulations using QuESTlink, which is a library based on the framework of Quantum Exact Simulation Toolkit (QuEST). We perform experiments on {\it ibmq{\textunderscore}vigo} via IBM Quantum Experience. The superconducting quantum computer {\it ibmq{\textunderscore}vigo} has five qubits. Two-qubit gates are available on nearest neighbouring qubits (0,1), (1,2), (1,3) and (3,4). Only qubits 0,1,2,3 are used in the experiments. In the calibration data from IBM Quantum Experience on 25 Feb 2020, single-qubit-gate error rates are from 0.03\% to 0.07\%, and two-qubit gate error rates are from 0.68\% to 1.18\%, depending on the qubits. We performed experiments on 25-27 Feb 2020. Each circuit runs for 8192 shots in experiments. In all the experiments, circuits are altered from Fig.~1(c) and optimised for minimising the impact of errors on {\it ibmq{\textunderscore}vigo}. In the hysteresis, LTP, LTD and quantum state encoding experiments, the qubit 1 is the resistance qubit, and qubits 0,2,3 are current qubits. In quantum state encoding experiments, we replace the measurement and feedback phase gate with a controlled-NOT gate, and they result in the same effect on the resistance qubit when gates are perfect. In the quantum state classification experiment, qubits 0,1,2 are used, and the roles (resistance or current) of qubits change in the circuit for minimising the number of two-qubit gates. More details will be given in the following sections. \section{Ohm's law} \label{app:Ohm} Consider the transformation of the operator $Z\otimes I$, we have \begin{eqnarray} M_\theta^\dag Z\otimes I M_\theta = Z\otimes Z. \end{eqnarray} Therefore, \begin{eqnarray} \mean{Z_{\rm C}}_{\rm out} &=& \mathrm{Tr}(Z\otimes I \rho_{\rm out}) \notag \\ &=& \mathrm{Tr}(Z\otimes Z \rho_{\rm C}\otimes \rho_{\rm R}) = \mean{Z_{\rm R}}_{\rm in} \mean{Z_{\rm C}}_{\rm in}. \end{eqnarray} \begin{figure*} \caption{ (a) Detailed display of the circuit of the memristive gate $M_{\theta} \label{fig:circuits} \end{figure*} \section{Hysteresis loops} \label{app:Hysteresis} In the numerical simulations, the resistance qubit is initialised in the state $\rho_{\rm R}^{(0)} = \ketbra{+}{+}$, where $\ket{+} = \frac{1}{\sqrt{2}}(\ket{0}+\ket{1})$. With this initial state, we compute the output states of the first gate, $\rho_{\rm out}^{(0)} = M_\theta\rho_{\rm C}^{(0)}\otimes\rho_{\rm R}^{(0)}M_\theta^\dag$ and $\rho_{\rm R}^{(1)} = \mathrm{Tr}_{\rm C}(\rho_{\rm out}^{(0)})$; with the output resistance state of the first gate, we compute the output states of the second gate, $\rho_{\rm out}^{(1)} = M_\theta\rho_{\rm C}^{(1)}\otimes\rho_{\rm R}^{(1)}M_\theta^\dag$ and $\rho_{\rm R}^{(2)} = \mathrm{Tr}_{\rm C}(\rho_{\rm out}^{(1)})$; and so on. In this way, we can obtain output states of each gate. Then, at the time $t$, the voltage is $\mean{Z_{\rm C}}_{\rm in}(t) = \mathrm{Tr}(Z\rho_{\rm C}^{(t)})$, the output current is $\mean{Z_{\rm C}}_{\rm out}(t) = \mathrm{Tr}(Z\otimes I\rho_{\rm out}^{(t)})$, and the output conductance is $\mean{Z_{\rm R}}_{\rm out}(t) = \mathrm{Tr}(I\otimes Z\rho_{\rm out}^{(t)})$. In Fig.~2, small gray circles represent the numerical data of $(\mean{Z_{\rm C}}_{\rm in}(t),\mean{Z_{\rm C}}_{\rm out}(t))$ with $\theta = \frac{7\pi}{16}$ and $\delta\phi = \frac{\pi}{32}$, where $t = 0,1,\ldots,\frac{20\pi}{\delta\phi}-1$. Dashed lines represent the numerical data with $\theta = \frac{3\pi}{8}$ and $\delta\phi = \frac{\pi}{4}$. For dashed lines, the numerical simulations are implemented for $t = 0,1,\ldots,\frac{20\pi}{\delta\phi}+1$, however, only the last cycle is plotted, i.e.~$t = \frac{20\pi}{\delta\phi}-7,\frac{20\pi}{\delta\phi}-6,\ldots,\frac{20\pi}{\delta\phi}+1$. The blue dashed lines represent $t = \frac{20\pi}{\delta\phi}-7,\frac{20\pi}{\delta\phi}-6,\frac{20\pi}{\delta\phi}-5$; the yellow dashed lines represent $t = \frac{20\pi}{\delta\phi}-5,\frac{20\pi}{\delta\phi}-4,\frac{20\pi}{\delta\phi}-3$; the green dashed lines represent $t = \frac{20\pi}{\delta\phi}-3,\frac{20\pi}{\delta\phi}-2,\frac{20\pi}{\delta\phi}-1$; and the orange dashed lines represent $t = \frac{20\pi}{\delta\phi}-1,\frac{20\pi}{\delta\phi},\frac{20\pi}{\delta\phi}+1$. On {\it ibmq{\textunderscore}vigo}, a qubit has direct gate coupling with at most three other qubits. Therefore, we implement the memristive gates between the resistance qubit and at most three current qubits. To demonstrate a full cycle of each hysteresis loop, we divide the cycle into four segments, i.e.~four experiments, according to the four segments of the dashed lines. In the experiments, we take $\theta = \frac{3\pi}{8}$ and $\delta\phi = \frac{\pi}{4}$ as the same as in numerical simulations of the dashed lines. For the segment started at $t = s$, we prepare the resistance qubit in the numerically-computed output state $\rho_{\rm R}^{(s)}$, and then we let the resistance qubit interact with three current qubits prepared in states $\rho_{\rm C}^{(s)}$, $\rho_{\rm C}^{(s+1)}$ and $\rho_{\rm C}^{(s+2)}$ one by one. For the first segment (large blue circles), $s = \frac{20\pi}{\delta\phi}-7$; for the second segment (large yellow circles), $s = \frac{20\pi}{\delta\phi}-5$; for the third segment (large green circles), $s = \frac{20\pi}{\delta\phi}-3$; and for the forth segment (large orange circles), $s = \frac{20\pi}{\delta\phi}-1$. Data $\mean{Z_{\rm C}}_{\rm out}(t)$ are measured in the experiments, and $(\mean{Z_{\rm C}}_{\rm in}(t),\mean{Z_{\rm C}}_{\rm out}(t))$ are plotted as large circles in Fig.~2. If quantum gates are ideal, experimental data should be consistent with dashed lines. The difference is caused by the noise on {\it ibmq{\textunderscore}vigo}. In the experiments, we decompose the memristive gate into elementary gates as shown in Fig.~\ref{fig:circuits}(c). Data of $(\mean{Z_{\rm C}}_{\rm in}(t),\mean{Z_{\rm R}}_{\rm out}(t))$ are shown in Fig.~\ref{fig:memristorII}. \begin{figure*} \caption{ Hysteresis loops of $(\mean{Z_{\rm C} \label{fig:memristorII} \end{figure*} \section{Steady states of maps} \label{app:SteadyStates} We use the Pauli transfer matrix representation. The input state of the current qubit is $\rho_{\rm C} = \frac{1}{2}(I + \rho_{\rm C}^X X + \rho_{\rm C}^Y Y + \rho_{\rm C}^Z Z)$, and the input state of the resistance qubit is $\rho_{\rm R} = \frac{1}{2}(I + \rho_{\rm R}^X X + \rho_{\rm R}^Y Y + \rho_{\rm R}^Z Z)$, where $I$, $X$, $Y$ and $Z$ are Pauli operators. The Pauli transfer matrix of the memristive-gate maps $\mathcal{M}_{\theta,\rho_{\rm C}}$ and $\mathcal{M}'_{\theta,\rho_{\rm C}}$ are \begin{eqnarray} M_{\theta,\rho_{\rm C}} = \left(\begin{array}{cc} 1 & 0 \\ k & E \end{array}\right)~{\rm and}~ M'_{\theta,\rho_{\rm C}} = \left(\begin{array}{cc} 1 & 0 \\ k' & E' \end{array}\right), \end{eqnarray} respectively, where \begin{eqnarray} k = \rho_{\rm C}^Z \left(\begin{array}{c} \cos\theta \sin^2\theta \\ \cos^2\theta \sin\theta \\ \cos^2\theta \end{array}\right),~~ k' = \cos^2\theta \left(\begin{array}{c} \rho_{\rm C}^X \\ \rho_{\rm C}^Y \\ \rho_{\rm C}^Z \end{array}\right), \end{eqnarray} and \begin{widetext} \begin{eqnarray} E = \left(\begin{array}{ccc} \rho_{\rm C}^X\cos\theta - \rho_{\rm C}^Y \sin^3\theta & - \rho_{\rm C}^X \cos^2\theta \sin\theta & - \cos\theta \sin^2\theta \\ \rho_{\rm C}^Y \cos^3\theta & \rho_{\rm C}^X \cos\theta \sin^2\theta - \rho_{\rm C}^Y \sin\theta & - \cos^2\theta \sin\theta \\ - \rho_{\rm C}^Y \cos\theta \sin\theta & \rho_{\rm C}^X \cos\theta \sin\theta & \sin^2\theta \end{array}\right), \end{eqnarray} \begin{eqnarray} E' = \left(\begin{array}{ccc} \sin^2\theta & - \rho_{\rm C}^Z \cos\theta \sin\theta & \rho_{\rm C}^Y \cos\theta \sin\theta \\ \rho_{\rm C}^Z \cos\theta \sin\theta & \sin^2\theta & - \rho_{\rm C}^X \cos\theta \sin\theta \\ - \rho_{\rm C}^Y \cos\theta \sin\theta & \rho_{\rm C}^X \cos\theta \sin\theta & \sin^2\theta \end{array}\right). \end{eqnarray} \end{widetext} We express the output state of the resistance qubit in the form $\rho_{\rm R,out} = \frac{1}{2}(I + \rho_{\rm R,out}^X X + \rho_{\rm R,out}^Y Y + \rho_{\rm R,out}^Z Z)$. If $\rho_{\rm R,out} = \mathcal{M}_{\theta,\rho_{\rm C}}(\rho_{\rm R})$, we have \begin{eqnarray} \left(\begin{array}{c} \rho_{\rm R,out}^X \\ \rho_{\rm R,out}^Y \\ \rho_{\rm R,out}^Z \end{array}\right) = E \left(\begin{array}{c} \rho_{\rm R}^X \\ \rho_{\rm R}^Y \\ \rho_{\rm R}^Z \end{array}\right) + k; \end{eqnarray} Similarly, if $\rho_{\rm R,out} = \mathcal{M}'_{\theta,\rho_{\rm C}}(\rho_{\rm R})$, we have \begin{eqnarray} \left(\begin{array}{c} \rho_{\rm R,out}^X \\ \rho_{\rm R,out}^Y \\ \rho_{\rm R,out}^Z \end{array}\right) = E' \left(\begin{array}{c} \rho_{\rm R}^X \\ \rho_{\rm R}^Y \\ \rho_{\rm R}^Z \end{array}\right) + k'. \end{eqnarray} The steady state of the map $\mathcal{M}_{\theta,\rho_{\rm C}}$ is the solution of the equation $\rho_{\rm s} = \mathcal{M}_{\theta,\rho_{\rm C}}(\rho_{\rm s})$. Express the steady state in the form $\rho_{\rm s} = \frac{1}{2}(I + \rho_{\rm s}^X X + \rho_{\rm s}^Y Y + \rho_{\rm s}^Z Z)$, the solution is \begin{eqnarray} \left(\begin{array}{c} \rho_{\rm s}^X \\ \rho_{\rm s}^Y \\ \rho_{\rm s}^Z \end{array}\right) = \left(\begin{array}{c} 0 \\ 0 \\ \rho_{\rm C}^Z \end{array}\right). \end{eqnarray} We remark that $\rho_{\rm C}^Z = \mathrm{Tr}(Z\rho_{\rm C}) = \mean{Z_{\rm C}}_{\rm in}$. Similarly, the steady state of the map $\mathcal{M}'_{\theta,\rho_{\rm C}}$ is the solution of the equation $\rho'_{\rm s} = \mathcal{M}'_{\theta,\rho_{\rm C}}(\rho'_{\rm s})$. Express the steady state in the form $\rho'_{\rm s} = \frac{1}{2}(I + \rho_{\rm s}^{\prime X} X + \rho_{\rm s}^{\prime Y} Y + \rho_{\rm s}^{\prime Z} Z)$, the solution is \begin{eqnarray} \left(\begin{array}{c} \rho_{\rm s}^{\prime X} \\ \rho_{\rm s}^{\prime Y} \\ \rho_{\rm s}^{\prime Z} \end{array}\right) = \left(\begin{array}{c} \rho_{\rm C}^X \\ \rho_{\rm C}^Y \\ \rho_{\rm C}^Z \end{array}\right). \end{eqnarray} Therefore, $\rho'_{\rm s} = \rho_{\rm C}$. \begin{figure} \caption{ Single-qubit gate. Red numbers denote the time sequence. } \label{fig:1q_gate} \end{figure} \section{LTP, LTD and quantum-state encoding} \label{app:LTP} In the LTP and LTD numerical simulations, we let the resistance qubit interact with a sequence of current qubits in the input states $\rho_{\rm C}^{(0)},\rho_{\rm C}^{(1)},\ldots,\rho_{\rm C}^{(t)},\ldots$ one by one through memristive gates, as the same as in hysteresis-loop simulations. We take $\rho_{\rm C}^{(t)} = \ketbra{1}{1}$ when $t = 0,1,\cdots,99$, $\rho_{\rm C}^{(t)} = \ketbra{0}{0}$ when $t = 100,101,\cdots,199$, $\rho_{\rm C}^{(t)} = \ketbra{1}{1}$ again when $t = 200,201,\cdots,299$, and $\rho_{\rm C}^{(t)} = \ketbra{+}{+}$ when $t = 300,301,\cdots,399$. The resistance qubit is initialised in the state $\rho_{\rm R}^{(0)} = \ketbra{+}{+}$. With this initial state, we compute the output states of the resistance qubit, i.e.~$\rho_{\rm R}^{(t+1)} = \mathcal{M}_{\theta,\rho_{\rm C}^{(t)}}(\rho_{\rm R}^{(t)})$, where $\theta = \frac{7\pi}{16}$. Then, $\mean{Z_{\rm R}}_{\rm out}(t) = \mathrm{Tr}(Z\rho_{\rm R}^{(t)})$ is computed and plotted as the thin curve in Fig.~3(a). Four LTP and LTD experiments are implemented on {\it ibmq{\textunderscore}vigo}, corresponding to four thick curves (with circles) in Fig.~3(a), respectively. From left to right, in the first experiment, the resistance qubit is initialised in the state $\ket{+}$, and three current qubits are initialised in the state $\ket{1}$; in the second experiment, the resistance qubit is initialised in the state $\ket{1}$, and three current qubits are initialised in the state $\ket{0}$; in the third experiment, the resistance qubit is initialised in the state $\ket{0}$, and three current qubits are initialised in the state $\ket{1}$ again; and in the forth experiment, the resistance qubit is initialised in the state $\ket{1}$, and three current qubits are initialised in the state $\ket{+}$. We let the resistance qubit interact with three current qubits one by one through the memristive gate. The memristive gate is decomposed into elementary gates as shown in Fig.~\ref{fig:circuits}(c). We take $\theta = \frac{\pi}{4}$. After each memristive gate, $\mean{Z_{\rm R}}_{\rm out}$ is measured. In the quantum-state encoding numerical simulations, we let the resistance qubit interact with a sequence of current qubits in the input states $\rho_{\rm C}^{(0)},\rho_{\rm C}^{(1)},\ldots,\rho_{\rm C}^{(t)},\ldots$ one by one through modified memristive gates. The circuit of the modified memristive gate (encoding gate) is shown in Fig.~\ref{fig:circuits}(d). Because we are only interested in the state of the resistance qubit, the modified memristive gate can also be realised using the circuit shown in Fig.~\ref{fig:circuits}(e). The additional controlled-NOT gate is equivalent to a phase gate on the resistance qubit depending on the phase state of the current qubit. We take $\rho_{\rm C}^{(t)} = \ketbra{\psi}{\psi}$, where $\ket{\psi} = e^{-i\frac{7\pi}{22}Z} e^{-i\frac{3\pi}{10}X} \ket{0}$ and $\ket{\psi} = e^{-i\frac{\pi}{16}Z} e^{-i\frac{3\pi}{32}X} \ket{0}$ in the two simulations. The resistance qubit is initialised in the state $\rho_{\rm R}^{(0)} = \ketbra{+}{+}$. With this initial state, we compute the output state of the resistance qubit at each time $t$, i.e.~$\rho_{\rm R}^{(t+1)} = \mathcal{M}'_{\theta,\rho_{\rm C}^{(t)}}(\rho_{\rm R}^{(t)})$, where $\theta = \frac{7\pi}{16}$. Then, the mean values of three Pauli operators $\mean{P_{\rm R}}_{\rm out}(t) = \mathrm{Tr}(P\rho_{\rm R}^{(t)})$ are computed and plotted as thin solid curves in Figs.~3(b)~and~(c), where $P = X,Y,Z$. Two quantum-state encoding experiments are implemented on {\it ibmq{\textunderscore}vigo}, corresponding to two input states $\ket{\psi} = e^{-i\frac{7\pi}{22}Z} e^{-i\frac{3\pi}{10}X} \ket{0}$ and $\ket{\psi} = e^{-i\frac{\pi}{16}Z} e^{-i\frac{3\pi}{32}X} \ket{0}$ of current qubits. In each experiment, the resistance qubit is initialised in the state $\ket{+}$, and three current qubits are initialised in the state $\ket{\psi}$. We let the resistance qubit interact with three current qubits one by one through modified memristive gates. The gate is realised using the circuit in Fig.~\ref{fig:circuits}(e), in which the memristive gate is decomposed into elementary gates as shown in Fig.~\ref{fig:circuits}(c). We take $\theta = \frac{\pi}{8}$. After each memristive gate, $\mean{P_{\rm R}}_{\rm out}$ are measured, where $P = X,Y,Z$. The data are plotted as thick curves (with circles) in Figs.~3(b)~and~(c). We can express states of the current qubit and resistance qubit as $\rho_{\rm C}^{(t)} = \frac{1}{2}(I + \rho_{\rm C}^X X + \rho_{\rm C}^Y Y + \rho_{\rm C}^Z Z)$ and $\rho_{\rm R}^{(t)} = \frac{1}{2}(I + \rho_{\rm R}^X X + \rho_{\rm R}^Y Y + \rho_{\rm R}^Z Z)$, respectively. Here, $\rho_{\rm C}^P = \mathrm{Tr}(P\rho_{\rm C}^{(t)})$ and $\rho_{\rm R}^P = \mathrm{Tr}(P\rho_{\rm R}^{(t)})$. Therefore, when $\mathrm{Tr}(P\rho_{\rm C}^{(t)}) = \mathrm{Tr}(P\rho_{\rm R}^{(t)})$, two states are the same. In Figs.~3(b) and (c), the dashed horizontal lines represent $\mathrm{Tr}(P\rho_{\rm C}^{(t)})$. Because $\rho_{\rm C}^{(t)}$ is a pure state, the fidelity $F = \sqrt{ \mathrm{Tr}(\rho_{\rm C}^{(t)} \rho_{\rm R}^{(t)}) } = \sqrt{ (1+\rho_{\rm C}^X\rho_{\rm R}^X+\rho_{\rm C}^Y\rho_{\rm R}^Y+\rho_{\rm C}^Z\rho_{\rm R}^Z)/2 }$. \begin{figure*} \caption{ Values of the distance in the optimisation computing. Blue dots denote the distance returned in each step. Dashed lines denote the quantum upper bound of the distance. Solid lines denote classical values, i.e.~$D(q(\bullet\vert\Phi_{\rm ghz} \label{fig:find} \end{figure*} \section{Universal gates} \label{app:universalQC} Under the restriction that each current qubit can only visit a resistance qubit once if they are connected, the single-qubit gate can be realised as shown in Fig.~\ref{fig:1q_gate}. The connection-1 prepares the second resistance qubit (from left to right) in the state $\ket{0}$. The connection-2 reads the state of the first resistance qubit $\ket{\psi}$ into the second current qubit. The connection-3 corresponds to the first visit in Fig.~4(c). Then, the connection-4 writes the output state of second current qubit into the third resistance qubit. The connection-5 reads the state of the third resistance qubit into the third current qubit. The connection-6 corresponds to the second visit in Fig.~4(c). To understand the controlled-NOT gate, we only need to note that the memristive gate with $\theta = 0$, i.e.~$M_0$, is equivalent to a controlled-NOT gate followed by a swap gate, as shown in Fig.~\ref{fig:circuits}(b). \section{Quantum state classification} \label{app:QSC} In the neural networks used for the quantum state classification, the input layer and the hidden layer are fully connected, and connections are sorted as follows. The first input qubit interacts with from the first to the last hidden-layer qubits one by one; then, the second input qubit interacts with from the first to the last hidden-layer qubits one by one; and so on. In other words, the $a$-th input qubit and the $b$-th hidden-layer qubit are coupled by the $i$-th $Y$-axis rotation and memristive gate, where $i = N(a-1)+b$. The trace distance between two distributions is \begin{eqnarray} && D(p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_k),p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_{k'})) \notag \\ &=& \frac{1}{2} \sum_{\boldsymbol{\mu}} \left\vert p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\boldsymbol{\mu}\vert\Phi_k) - p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\boldsymbol{\mu}\vert\Phi_{k'}) \right\vert, \end{eqnarray} where $\boldsymbol{\mu} = (\mu_1,\mu_2,\ldots,\mu_N)$ is a binary vector, $\mu_j$ is the value of the $j$-th output bit, i.e.~the measurement outcome of the $j$-th hidden-layer qubit, and parameter vectors are $\boldsymbol{\phi} = (\phi_1,\phi_2,\ldots,\phi_{(M+1)N})$ and $\boldsymbol{\theta} = (\theta_1,\theta_2,\ldots,\theta_{MN})$. Here, $\phi_{MN+j}$ is the parameter of the $Y$-axis rotation on the $j$-th hidden-layer qubit before the measurement. To distinguish four Bell states \begin{eqnarray} \ket{\Phi_1} &=& \frac{1}{\sqrt{2}} (\ket{0}\otimes\ket{0}+\ket{1}\otimes\ket{1}), \\ \ket{\Phi_2} &=& \frac{1}{\sqrt{2}} (\ket{0}\otimes\ket{0}-\ket{1}\otimes\ket{1}), \\ \ket{\Phi_3} &=& \frac{1}{\sqrt{2}} (\ket{0}\otimes\ket{1}+\ket{1}\otimes\ket{0}), \\ \ket{\Phi_4} &=& \frac{1}{\sqrt{2}} (\ket{0}\otimes\ket{1}-\ket{1}\otimes\ket{0}), \end{eqnarray} we take $M=N=2$, i.e.~each layer has two qubits or classical bits. We find optimal parameters $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ by maximising the distance function \begin{eqnarray} \overline{D}(\boldsymbol{\phi},\boldsymbol{\theta}) = \sum_{k=1}^3\sum_{k'=k+1}^{4} D(p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_k),p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_{k'})).~ \end{eqnarray} The value of the average distance $\overline{D}/6$ is plotted in Fig.~\ref{fig:find}(a), which reaches one at the end of the optimisation. The distance $D$ is never larger than $1$, and $D = 1$ means that two states are fully distinguishable with the successful probability one. The optimal parameters are $\boldsymbol{\phi} = (0,-0.31973,0,0,-1.5708,0)$ and $\boldsymbol{\theta} = (0,-1.3065,0,0)$. We note that $\frac{\pi}{2} \simeq 1.5708$, and we can find that the distance is one for any values of $\phi_2$ and $\theta_2$. The two ground states $\ket{\Phi_{\rm ghz}}$ and $\ket{\Phi_+}$ are not orthogonal. Therefore, they are not fully distinguishable. The trace distance between the two quantum states is \begin{eqnarray} D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+}) &=& \sqrt{1-\abs{\braket{\Phi_+}{\Phi_{\rm ghz}}}^2} \notag \\ &=& \sqrt{1-\frac{1}{2^{M-1}}}, \end{eqnarray} where $M$ is the number of qubits in the ground states. For any measurement setup, the distance between measurement-outcome distributions of two quantum states is never larger than $D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+})$. Therefore, $D(p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_{\rm ghz}),p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_+)) \leq D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+})$. If two ground states are directly measured in the $Z$ basis, the measurement-outcome distributions are $q(\boldsymbol{\mu}\vert\Phi_{\rm ghz}) = \frac{\delta_{\boldsymbol{\mu},\boldsymbol{0}}+\delta_{\boldsymbol{\mu},\boldsymbol{1}}}{2}$ and $q(\boldsymbol{\mu}\vert\Phi_+) = \frac{1}{2^M}$, where $\boldsymbol{0} = (0,0,\ldots,0)$ and $\boldsymbol{1} = (1,1,\ldots,1)$. The distance between the two distributions is \begin{eqnarray} D(q(\bullet\vert\Phi_{\rm ghz}),q(\bullet\vert\Phi_+)) = 1-\frac{1}{2^{M-1}}. \end{eqnarray} To distinguish two ground states of five qubits, we take $M=N=5$, i.e.~each layer has five qubits or classical bits. We find optimal parameters $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ by maximising the distance function \begin{eqnarray} \overline{D}(\boldsymbol{\phi},\boldsymbol{\theta}) = D(p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_{\rm ghz}),p_{\boldsymbol{\phi},\boldsymbol{\theta}}(\bullet\vert\Phi_+)). \label{eq:dis} \end{eqnarray} The result is plotted in Fig.~\ref{fig:find}(b), and $\overline{D}$ reaches the quantum upper bound $D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+}) \simeq 0.96824$ at the end of the optimisation. If we turn off parameters $\boldsymbol{\phi}$ by setting $\phi_i = 0$ for all $i = 1,2,\ldots,(M+1)N$, we find the optimal $\boldsymbol{\theta}$ by maximising the distance function \begin{eqnarray} \overline{D}(\boldsymbol{\theta}) = D(p_{\boldsymbol{0},\boldsymbol{\theta}}(\bullet\vert\Phi_{\rm ghz}),p_{\boldsymbol{0},\boldsymbol{\theta}}(\bullet\vert\Phi_+)). \end{eqnarray} Here, $\boldsymbol{0}$ is the $(M+1)N$-dimensional zero vector. The result is plotted in Fig.~\ref{fig:find}(c), and $\overline{D}$ reaches $0.94792$ at the end of the optimisation, which is lower than $D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+})$ but above $D(q(\bullet\vert\Phi_{\rm ghz}),q(\bullet\vert\Phi_+)) = 0.9375$. For the experiment, we use a three-qubit neural network shown in Fig.~5(a), i.e.~$M=2$ and $N=1$, to distinguish two-qubit ground states. We find optimal parameters $\boldsymbol{\phi}$ and $\boldsymbol{\theta}$ by maximising the distance function $\overline{D}(\boldsymbol{\phi},\boldsymbol{\theta})$ [Eq.~(\ref{eq:dis})], and the result is plotted in Fig.~\ref{fig:find}(d). The distance $\overline{D}$ reaches the quantum upper bound $D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+}) \simeq 0.70711$ at the end of the optimisation. The optimal parameters are $\boldsymbol{\phi} = (1.5708,1.5708,-0.78540)$ and $\boldsymbol{\theta} = (0,0)$. We note that $\frac{\pi}{2} \simeq 1.5708$ and $\frac{\pi}{4} \simeq 0.78540$. These parameters are used in the experiment. In the experiment of three-qubit neural network implemented on {\it ibmq{\textunderscore}vigo}, we optimise the implementation, i.e.~minimise the number of two-qubit gates, as follows. We can find that only memristive gates $M_\theta$ with $\theta = 0$ are used according to optimal parameters. Each gate $M_0$ can be realised using two controlled-NOT gates, as shown in Fig.~\ref{fig:circuits}(b), which is equivalent to a controlled-NOT gate followed by a SWAP gate. Therefore, we can implement the neural network with optimal parameters as shown in Fig.~\ref{fig:circuits}(f): At the beginning, qubit-0 represents the resistance qubit (i.e.~hidden-layer qubit), qubit-1 and qubit-2 represent current qubits (i.e.~input qubits); To perform the first memristive gate, instead of physically performing the SWAP gate, the roles of qubit-0 and qubit-1 are exchanged after the first controlled-NOT gate, i.e.~now qubit-1 represents the resistance qubit, and qubit-0 represents a current qubit; It is similar for the second memristive gate. The distributions of measurement outcomes obtained in the experiment are shown in Fig.~5(b). The distance between distributions of two ground states is $0.65673$, which is lower than the theoretical value $D(\ket{\Phi_{\rm ghz}},\ket{\Phi_+}) \simeq 0.70711$ but above $D(q(\bullet\vert\Phi_{\rm ghz}),q(\bullet\vert\Phi_+)) = 0.5$. \end{document}
\begin{document} \begin{center} {\Large \bf Skellam Type Processes of Order K and Beyond} \end{center} \vone \begin{center} {Neha Gupta}$^{\textrm{a}}$, {Arun Kumar}$^{\textrm{a}}$, {Nikolai Leonenko}$^{\textrm{b}}$ \footnotesize{ $$\begin{tabular}{l} $^{\textrm{a}}$ \emph{Department of Mathematics, Indian Institute of Technology Ropar, Rupnagar, Punjab - 140001, India}\\ $^{b}$Cardiff School of Mathematics, Cardiff University, Senghennydd Road, Cardiff, CF24 4AG, UK \end{tabular}$$} \end{center} \vtwo \begin{center} \noindent{\bf Abstract} \end{center} In this article, we introduce Skellam process of order $k$ and its running average. We also discuss the time-changed Skellam process of order $k$. In particular we discuss space-fractional Skellam process and tempered space-fractional Skellam process via time changes in Poisson process by independent stable subordinator and tempered stable subordinator, respectively. We derive the marginal probabilities, L\'evy measures, governing difference-differential equations of the introduced processes. Our results generalize Skellam process and running average of Poisson process in several directions.\\ \noindent{\it Key words:} Skellam process, subordination, L\'evy measure, Poisson process of order $k$, running average. \section{Introduction} Skellam distribution is obtained by taking the difference between two independent Poisson distributed random variables which was introduced for the case of different intensities $\lambda_1,\; \lambda_2$ by (see \cite{Skellam1946}) and for equal means in \cite{Irwin1937}. For large values of $\lambda_1+\lambda_2$, the distribution can be approximated by the normal distribution and if $\lambda_2$ is very close to $0$, then the distribution tends to a Poisson distribution with intensity $\lambda_1$. Similarly, if $\lambda_1$ tends to 0, the distribution tends to a Poisson distribution with non-positive integer values. The Skellam random variable is infinitely divisible since it is the difference of two infinitely divisible random variables (see Prop. $2.1$ in \cite{Steutel2004}). Therefore, one can define a continuous time L\'evy process for Skellam distribution which is called Skellam process. \noindent The Skellam process is the integer valued L\'evy process and can also be obtained by taking the difference of two independent Poisson processes which marginal probability mass funcion (PMF) involves the modified Bessel function of the first kind. Skellam process has various applications in different areas such as to model the intensity difference of pixels in cameras (see \cite{Hwang2007}) and for modeling the difference of the number of goals of two competing teams in football game in \cite{Karlis2008}. The model based on the difference of two point processes are proposed in (see \cite{Bacry2013a, Bacry2013b, Barndorff2011, Carr2011}). \noindent Recently, time-fractional Skellam processes have studied in \cite{Kerss2014} which is obtained by time-changing the Skellam process with an inverse stable subordinator. Further, they provided the application of time-fractional Skellam process in modeling of arrivals of jumps in high frequency trading data. It is shown that the inter arrival times between the positive and negative jumps follow Mittag-Leffler distribution rather then the exponential distribution. Similar observations are observed in case of Danish fire insurance data (see \cite{Kumar2019}). Buchak and Sakhno in \cite{Buchak2018} also have proposed the governing equations for time-fractional Skellam processes. Recently, \cite{Ayushi2020} introduced time-changed Poisson process of order $k$, which is obtained by time changing the Poisson process of order $k$ (see \cite{Kostadinova2012}) by general subordinators. \noindent In this paper we introduce Skellam process of order $k$ and its running average. We also discuss the time-changed Skellam process of order $k$. In particular we discuss space-fractional Skellam process and tempered space-fractional Skellam process via time changes in Poisson process by independent stable subordiantor and tempered stable subordiantor, respectively. We obtain closed form expressions for the marginal distributions of the considered processes and other important properties. Skellam process is used to model the difference between the number of goals between two teams in a football match. Similarly, Skellam process of order $k$ can be used to model the difference between the number of points scored by two competing teams in a basketball match where $k=3.$ The remainder of this paper proceeds as follows: in Section $2$, we introduce all the relevant definitions and results. We derive also the L\'evy density for space- and tempered space-fractional Poisson processes. In Section $3$, we introduce and study running average of Poisson process of order $k$. Section 4 is dedicated to Skellam process of order $k$. Section 5 deals with running average of Skellam process of order $k$. In Section 6, we discuss about the time-changed Skellam process of order $k$. In Section $7$, we determine the marginal PMF, governing equations for marginal PMF, L\'evy densities and moment generating functions for space-fractional Skellam process and tempered space-fractional Skellam process. \section{Preliminaries} In this section, we collect relevant definitions and some results on Skellam process, subordinators, space-fractional Poisson process and tempered space-fractional Poisson process. These results will be used to define the space-fractional Skellam processes and tempered space-fractional Skellam processes. \subsection{Skellam process} In this section, we revisit the Skellam process and also provide a characterization of it. Let $S(t)$ be a Skellam process, such that $$ S(t)= N_{1}(t)-N_{2}(t), \; t\geq0,$$ where $N_{1}(t)$ and $N_{2}(t)$ are two independent homogeneous Poisson processes with intensity $\lambda_{1} >0$ and $\lambda_2>0,$ respectively. The Skellam process is defined in \cite{Barndorff2011} and the distribution has been introduced and studied in \cite{Skellam1946}, see also \cite{Irwin1937}. This process is symmetric only when $\lambda_1= \lambda_2$. The PMF $s_{k}(t)=\mathbb{P}(S(t)=k)$ of $S(t)$ is given by (see e.g. \cite{Skellam1946, Kerss2014}) \begin{align}{\label{Skellam_PMF}} s_{k}(t)=e^{-t(\lambda_1+\lambda_2)}{\left(\frac{\lambda_1}{\lambda_2}\right)}^{k/2}I_{|k|}(2t\sqrt{\lambda_1 \lambda_2}),\; k\in \mathbb{Z}, \end{align} where $I_k$ is modified Bessel function of first kind (see \cite{Abramowitz1974}, p. $375$), \begin{equation}\label{Modi_Bessel} I_{k}(z)=\sum_{n=0}^{\infty}\frac{{(z/2)}^{2n+k}}{n!(n+k)!}. \end{equation} The PMF $s_{k}(t)$ satisfies the following differential difference equation (see \cite{Kerss2014}) \begin{equation} \frac{d}{dt}s_{k}(t)= \lambda_{1}(s_{k-1}(t)-s_{k}(t))-\lambda_{2}(s_{k}(t)-s_{k+1}(t)),\;\; k\in \mathbb{Z}, \end{equation} with initial conditions $s_{0}(0)=1$ and $s_{k}(0)=0, \;k\neq0$. The Skellam process is a L\'evy process, its L\'evy density $\nu_S$ is the linear combination of two Dirac delta function, $\nu_S(y)= \lambda_{1}\delta_{\{1\}}(y)+\lambda_{2}\delta_{\{-1\}}(y) $ and the corresponding L\'evy exponent is given by $$ \phi_{S(1)}(\theta)=\int_{-\infty}^{\infty}(1-e^{-\theta y})\nu(y)dy. $$ The moment generating function (MGF) of Skellam process is \begin{align} \mathbb{E}[e^{\theta S(t)}]=e^{-t(\lambda_{1}+\lambda_{2}-\lambda_{1}e^{\theta}-\lambda_{2}e^{-\theta})}, \; \theta \in \mathbb{R}. \end{align} With the help of MGF, one can easily find the moments of Skellam process. In next result, we give a characterization of Skellam process, which is not available in literature as per our knowledge. \begin{theorem} Suppose an arrival process has the independent and stationary increments and also satisfies the following incremental condition, then the process is Skellam. \[ \mathbb{P}(S(t+\delta)=m|S(t) = n)= \begin{cases} \lambda_1 \delta + o(\delta), & m >n,\;m= n+1;\\ \lambda_2 \delta + o(\delta), & m< n,\; m= n-1;\\ 1-\lambda_1 \delta-\lambda_2 \delta + o(\delta), & m=n;\\ 0 &\quad {\rm otherwise.}\\ \end{cases} \] \end{theorem} \begin{proof} Consider the interval [0,t] which is discretized with $n$ sub-intervals of size $\delta$ each such that $n\delta =t.$ For $k\geq 0$, we have \begin{align*} \mathbb{P}(S(0,t) = k) &= \sum_{m=0}^{[\frac{n-k}{2}]} \frac{n!}{m!(m+k)!(n-2m-k)!}(\lambda_1\delta)^{m+k}(\lambda_2\delta)^{m}(1-\lambda_1\delta-\lambda_2\delta)^{n-2m-k} \\ &= \sum_{m=0}^{[\frac{n-k}{2}]} \frac{n!}{m!(m+k)!(n-2m-k)!} \left(\frac{\lambda_1 t}{n}\right)^{m+k}\left(\frac{\lambda_2 t}{n} \right)^{m}\left(1-\frac{\lambda_1 t}{n} -\frac{\lambda_2 t}{n}\right)^{n-2m-k}\\ &= \sum_{m=0}^{[\frac{n-k}{2}]} \frac{(\lambda_1 t)^{m+k}(\lambda_2 t)^{m}}{m!(m+k)!} \frac{n!}{(n-2m-k)!n^{2m+k}}\left(1-\frac{\lambda_1 t}{n} -\frac{\lambda_2 t}{n}\right)^{n-2m-k}\\ &= e^{-(\lambda_1+\lambda_2)t} \sum_{m=0}^{\infty}\frac{(\lambda_1 t)^{m+k}(\lambda_2 t)^{m}}{m!(m+k)!}, \end{align*} by taking $n\rightarrow \infty.$ The result follows now by using the definition of modified Bessel function of first kind $I_k$. Similarly, we prove when $k<0.$ \end{proof} \subsection{Poisson process of order $k$ (PPoK)} In this section, we recall the definition and some important properties of Poisson process of order k (PPoK). Kostadinova and Minkova (see \cite{Kostadinova2012}) introduced and studied the PPok. Let $ x_1, x_2, \cdots, x_k $ be non-negative integers and $\zeta_{k} = x_1 + x_2 + \dots + x_k, \; \Pi_{k}! = x_1!x_2!\dots x_k! $ and \begin{equation} \Omega(k,n) = \{ X=(x_1,\ x_2,\ \dots,\ x_k) | x_1 + 2x_2+ \dots + kx_k=n\}. \end{equation} Also, let $\{N^{k}(t)\}_{t\geq 0},$ represent the PPok with rate parameter $\lambda t$, then probability mass function (pmf) is given by \begin{equation}\label{pmf_ppok} p_{n}^{N^{k}}(t)=\mathbb{P}(N^{k}(t) = n) = \sum_{X=\Omega(k,n)} e^{-k\lambda t} \frac{(\lambda t)^{\zeta_{k}}}{\Pi_{k}!}. \end{equation} The pmf of $N^{k}(t)$ satisfies the following differential-difference equations (see \cite{Kostadinova2012}) \begin{align}\label{SFPP} \frac{d}{dt}p_{n}^{N^{k}}(t) &= -k \lambda p_{n}^{N^{k}}(t)+\lambda\sum_{j=1}^{n\wedge k}p_{n-k}^{N^{k}}(t),\;\; n=1,2,\ldots \nonumber \\ \frac{d}{dt}p_{0}^{N^{k}}(t)& = -k \lambda p_{0}^{N^{k}}(t), \end{align} with initial condition $p_{0}^{N^{k}}(0) = 1$ and $p_{n}^{N^{k}}(0) = 0$ and $n\wedge k = \min\{k,n\}$. The characteristic function of PPoK $N^{k}(t)$ \begin{equation}\label{char_ppok} \phi_{N^{k}(t)}(u)= \mathbb{E}(e^{iuN^{k}(t)})= e^{- \lambda t (k- \sum_{j=1}^{k}e^{iuj})}, \end{equation} where $i=\sqrt{-1}$. The process PPoK is L\'evy, so it is infinite divisible i.e. $\phi_{N^{k}(t)}(u) = (\phi_{N^{k}(1)}(u))^{t}.$ The L\'evy measure for PPoK is easy to drive and is given by $$ \nu_{N^k}(x) = \lambda \sum_{j=1}^{k}\delta_j(x), $$ where $\delta_j$ is the Dirac delta function concentrated at $j$. The transition probability of the PPoK $\{N^{k}(t)\}_{t\geq 0}$ are also given by Kostadinova and Minkova \cite{Kostadinova2012}, \begin{equation} \mathbb{P}(N^{k}(t+\delta)=m|N^{k}(t) = n)= \begin{cases} 1-k\lambda \delta, & m=n;\\ \lambda \delta & m= n+i, i=1,2, \ldots,k;\\ 0 & \quad{\rm otherwise.}\\ \end{cases} \end{equation} The probability generating function (pgf) $G^{N^{k}}(s,t)$ is given by (see \cite{Kostadinova2012}) \begin{equation}\label{pgf_ppok} G^{N^{k}}(s,t) = e^{-\lambda t(k-\sum_{j=1}^{k} s^{j})}. \end{equation} The mean, variance and covariance function of the PPoK are given by \begin{align} \mathbb{E}[N^{k}(t)] &= \frac{k(k+1)}{2}\lambda t; \nonumber \\ {\rm Var}[N^{k}(t)]& = \frac{k(k+1)(2k+1)}{6}\lambda t; \nonumber\\ {\rm Cov}[N^{k}(t), N^{k}(s)]& = \frac{k(k+1)(2k+1)}{6} \lambda (t\wedge s). \end{align} \subsection{Subordinators} Let $D_{f}(t)$ be real valued L\'evy process with non-decreasing sample paths and its Laplace transform has the form $$ \mathbb{E}[e^{-s D_{f}(t)}] = e^{-tf(s)}, $$ where $$ f(s) = bs + \int_{0}^{\infty} (1-e^{x s}) \nu(dx),\;\; s>0,\; b\geq0,$$ is the integral representation of Bernstein functions (see \cite{Schilling2010}). The Bernstein functions are $C^{\infty}$, non-negative and such that $(-1)^{m}\frac{d^{m}}{dx^m}f(x) \leq 0$ for $m\geq 1$ in \cite{Schilling2010}. Here $\nu$ denote the non-negative L\'evy measure on the positive half line such that $$ \int_{0}^{\infty}(x \wedge 1) \nu(dx) < \infty, \; \; \nu([0, \infty)) = \infty, $$ and b is the drift coefficient. The right continuous inverse $E_{f}(t) = \inf\{u\geq0 : D_{f}(u) >t\}$ is the inverse and first exist time of $D_{f}(t)$, which is non-Markovian with non-stationary and non-independent increments. Next, we analyze some special cases of L\'evy subordinators with drift coefficient b = 0, that is, \begin{equation}\label{Levy_exponent} f(s) = \begin{cases} p\log(1+\frac{s}{\alpha}),\; p>0,\ \alpha >0, &$(gamma subordinator)$;\\ (s+\mu)^{\alpha}-\mu^{\alpha},\;\mu >0,\; 0<\alpha<1,&$(tempered $\alpha$-stable subordinator)$;\\ \delta(\sqrt{2s+\gamma^2}-\gamma),\; \gamma>0,\; \delta>0, & $(inverse Gaussian subordinator)$;\\ s^{\alpha},\; 0<\alpha<1, & $(stable subordinator)$. \end{cases} \end{equation} It is worth to note that among the subordiantors given in \eqref{Levy_exponent}, all the integer order moments of stable subordiantor are infinite. \subsection{The space-fractional Poisson process} In this section, we discuss main properties of space-fractional Poisson process (SFPP). We also provide the L\'evy density for SFPP which is not discussed in the literature. The SFPP $N_{\alpha}(t)$ was introduced by (see \cite{Orsingher2012}), as follows \begin{equation} N_{\alpha}(t) = \begin{cases} N(D_{\alpha}(t)),\;t\geq 0, & 0<\alpha<1,\\ N(t),\;t\geq0, & \alpha=1. \end{cases} \end{equation} The probability generating function (PGF) of this process is of the form \begin{align}\label{PGF of space} G^{\alpha}(u,t)=\mathbb{E}u^{N_{\alpha}(t)}=e^{{-\lambda}^{\alpha}(1-u)^{\alpha}t},\; |u|\leq1, \; \alpha\in(0,1). \end{align} The PMF of SFPP is \begin{align}\label{space-fractional-PMF} P^{\alpha}(k,t) =\mathbb{P}\{N_{\alpha}(t)=k\} &=\frac{(-1)^k}{k!}\sum_{r=0}^{\infty}\frac{(-\lambda^\alpha)^{r} t^r}{r!}\frac{\Gamma(r\alpha+1)}{\Gamma(r\alpha-k+1)}\nonumber\\ & =\frac{(-1)^k}{k!} {}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha); \\ (1-k, \alpha); \end{matrix}(-\lambda^\alpha t) \right], \end{align} where ${}_{h}\psi_{i}(z)$ is the Fox Wright function (see formula $(1.11.14)$ in \cite{Kilbas2006}). It was shown in \cite{Orsingher2012} that the PMF of the SFPP satisfies the following fractional differential-difference equations \begin{align}\label{SFPP} \frac{d}{dt}P^{\alpha}(k,t) &= -\lambda^\alpha (1-B)^\alpha P^{\alpha}(k,t),\;\; \alpha\in(0,1],\; k=1,2,\ldots \\ \frac{d}{dt}P^{\alpha}(0,t)& = -\lambda^\alpha P^{\alpha}(0,t), \end{align} with initial conditions \begin{equation}\label{intial condition} P(k,0) =\delta_{k}(0)= \begin{cases} 0, & k\neq0,\\ 1, & k=0. \end{cases} \end{equation} The fractional difference operator \begin{equation}\label{Backward_Operator} (1-B)^\alpha = \sum_{j=0}^{\infty}{\alpha \choose j}(-1)^jB^{j} \end{equation} is defined in \cite{Beran1994}, where $B$ is the backward shift operator. The characteristic function of SFPP is \begin{equation}\label{charc_SFPP} \mathbb{E}[e^{i\theta N_{\alpha}(t)}] = e^{-\lambda^{\alpha}(1-e^{i\theta})^{\alpha}t}. \end{equation} \begin{proposition} The L\'evy density $\nu_{N_{\alpha}}(x)$ of SFPP is given by \begin{equation}\label{space_levy} \nu_{N_{\alpha}}(x) = \lambda^{\alpha}\sum^{\infty}_{n=1}(-1)^{n+1} {\alpha \choose n} \delta_{n}(x). \end{equation} \end{proposition} \begin{proof} We use L\'evy-Khintchine formula (see \cite{Sato1999}), \begin{align*} \int_{{\{ 0 \}}^{c}}&(e^{i\theta x}-1)\lambda^{\alpha}\sum^{\infty}_{n=1}(-1)^{n+1} {\alpha \choose n} \delta_{n}(x) dx \\ &=\lambda^{\alpha}\left[ \sum^{\infty}_{n=1}(-1)^{n+1} {\alpha \choose n} e^{i\theta n}+\sum_{n=0}^{\infty}(-1)^{n} {\alpha \choose n} -1 \right]\\ &= \lambda^{\alpha} \sum^{\infty}_{n=0}(-1)^{n+1} {\alpha \choose n} e^{i\theta n} = -\lambda^{\alpha}(1-e^{i\theta})^{\alpha}, \end{align*} which is the characteristic exponent of SFPP from equation \eqref{charc_SFPP}. \end{proof} \subsection{Tempered space-fractional Poisson process} The tempered space-fractional Poisson process (TSFPP) can be obtained by subordinating homogeneous Poisson process $N(t)$ with the independent tempered stable subordiantor $D_{\alpha, \mu}(t)$ (see \cite{Gupta2020}) \begin{equation} N_{\alpha, \mu}(t) = N(D_{\alpha, \mu}(t)),\; \alpha \in(0,1),\; \mu > 0. \end{equation} This process have finite integer order moments due to the tempered $\alpha$-stable subordinator. The PMF of TSFPP is given by (see \cite{Gupta2020}) \begin{align}{\label{pmf_tem_space}} P^{\alpha, \mu}(k,t) & = (-1)^k e^{t\mu^{\alpha}} \sum_{m=0}^{\infty}\mu^m\sum_{r=0}^{\infty}\frac{(-t)^r}{r!}\lambda^{\alpha r-m}{\alpha r \choose m} {\alpha r -m \choose k}\nonumber\\ &=e^{t\mu^{\alpha}}\frac{(-1)^k}{k!}\sum_{m=0}^{\infty}\frac{\mu^m \lambda^{-m}}{m!} {}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha); \\ (1-k-m, \alpha); \end{matrix}(-\lambda^\alpha t) \right] ,\;k=0,1,\ldots. \end{align} The governing difference-differential equation is given by \begin{equation}\label{tempered-SFPP} \frac{d}{dt}P^{\alpha, \mu}(k,t) = - ((\mu + \lambda(1-B))^{\alpha} - \mu^{\alpha}) P^{\alpha, \mu}(k,t). \end{equation} The characteristic function of TSFPP, \begin{equation}\label{tempe_char} \mathbb{E}[e^{i\theta N_{\alpha, \mu}(t) }] = e^{- t((\mu + \lambda(1-e^{i\theta}))^{\alpha} - \mu^{\alpha})}. \end{equation} Using a standard conditioning argument, the mean and variance of TSFPP are given by \begin{align}\label{vari_tem} \mathbb{E}(N_{\alpha,\mu}(t)) = \lambda \alpha \mu^{\alpha-1}t,\;\; {\rm Var}(N_{\alpha,\mu}(t)) = \lambda \alpha \mu^{\alpha-1}t + \lambda^2 \alpha(1-\alpha) \mu^{\alpha-2}t. \end{align} \begin{proposition} The L\'evy density $\nu_{N_{\alpha, \mu}}(x)$ of TSFPP is \begin{equation}\label{levy_temp} \nu_{N_{\alpha, \mu}}(x) = \sum_{n=1}^{\infty}\mu^{\alpha-n}{\alpha \choose n}\lambda^{n} \sum_{l=1}^{n} {n \choose l}(-1)^{l+1} \delta_{l}(x),\; \mu > 0. \end{equation} \end{proposition} \begin{proof} Using \eqref{tempe_char}, the characteristic exponent of TSFPP is given by $F(\theta)=((\mu + \lambda(1-e^{i\theta}))^{\alpha} - \mu^{\alpha})$ . We find the L\'evy density with the help of L\'evy-Khintchine formula (see \cite{Sato1999}), \begin{align*} \int_{{\{ 0 \}}^{c}}&(e^{i\theta x}-1)\sum_{n=1}^{\infty}\mu^{\alpha-n}{\alpha \choose n}\lambda^{n} \sum_{l=1}^{n} {n \choose l}(-1)^{l+1} \delta_{l}(x) dx \\ &=\sum_{n=1}^{\infty}\mu^{\alpha-n}{\alpha \choose n}\lambda^{n} \left(\sum_{l=1}^{n} {n \choose l}(-1)^{l+1}e^{i\theta x}-\sum_{l=1}^{n} {n \choose l}(-1)^{l+1} \right)\\ &= \sum_{n=0}^{\infty}\mu^{\alpha-n}{\alpha \choose n}\lambda^{n} \sum_{l=0}^{n} {n \choose l}(-1)^{l+1} \delta_{l}(x)- \mu^{\alpha}\\ & = -((\mu + \lambda(1-e^{i\theta}))^{\alpha} - \mu^{\alpha}), \end{align*} hence proved. \end{proof} \section{Running average of PPoK} In this section, first we introduced the running average of PPoK and their main properties. These results will be used further to discuss the running average of SPoK. \begin{definition}[Running average of PPoK] We define the average process $N^{k}_{A}(t)$ by taking time-scaled integral of the path of the PPoK, \begin{align} N^{k}_{A}(t) = \frac{1}{t}\int_{0}^{t}{N^{k}(s) ds}. \end{align} \end{definition} \noindent We can write the differential equation with initial condition $N^{k}_{A}(0) =0$, $$ \frac{d}{dt}(N^{k}_{A}(t))=\frac{1}{t}N^{k}(t) - \frac{1}{t^2}\int_{0}^{t}{N^{k}(s) ds}. $$ Which shows that it has continuous sample paths of bounded total variation. We explored the compound Poisson representation and distribution properties of running average of PPoK. The characteristic of $N^{k}_{A}(t)$ is obtained by using the Lemma 1 of (see \cite{Xia2018}). \begin{lemma} If $X_{t}$ is a L\'evy process and $Y_{t}$ its Riemann integral defined by \begin{align*} Y_{t} = \int_{0}^{t}{X_{s} ds}, \end{align*} then the characteristic functions of $X$ and $Y$ satisfy \begin{align} \phi_{Y(t)}(u) = \mathbb{E}[e^{iuY(t)}] = \exp\left( t \int_{0}^{1}\log{\phi_{X(1)}(tuz)} dz\right),\; u\in \mathbb{R}. \end{align} \end{lemma} \begin{proposition} The characteristic function of $N_A^k(t)$ is given by \begin{align}\label{cf_ppok} \phi_{N^{k}_{A}(t)}(u) = \exp\left(-t \lambda \left(k- \sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj}\right)\right). \end{align} \end{proposition} \begin{proof} The result follows by applying the Lemma $1$ to \eqref{char_ppok} after scaling by $1/t$. \end{proof} \begin{proposition}\label{running_average_cp} The running average process has a compound Poisson representation, such that \begin{align} Y(t) = \sum_{i=1}^{N(t)}X_{i}, \end{align} where $X_i = 1,2, \ldots$ are independent, identically distributed (iid) copies of $X$ random variables, independent of $N(t)$ and $N(t)$ is a Poisson process with intensity $k \lambda$. Then $$ Y(t) \stackrel{law}{=} N^{k}_{A}(t).$$ Further, the random variable $X$ has following pdf \begin{equation}\label{pmf_X} f_{X}(x)= \sum_{i=1}^{k}p_{V_{i}}(x) f_{U_{i}}(x) = \frac{1}{k}\sum_{i=1}^{k}f_{U_{i}}(x), \end{equation} where $V_{i}$ follows discrete uniform distribution over $(0, k)$ and $U_{i}$ follows continuous uniform distribution over $(0,i),\;i=1,2,\ldots,k.$ \end{proposition} \begin{proof} The pdf of $U_i$ is $f_{U_{i}}(x) = \frac{1}{i} , \; 0 \leq x \leq i.$ Using \eqref{pmf_X}, the characteristic function of $X$ is given by $$\phi_X(u) = \frac{1}{k}\sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj}. $$ For fixed $t$, the characteristic functinon of $Y(t)$ is \begin{equation} \phi_{Y(t)}(u)= e^{-k \lambda t (1-\phi_{X}(u))} = e^{-t \lambda \left(k- \sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj}\right)}, \end{equation} which is equal to the characteristic function of PPoK given in \eqref{cf_ppok}. Hence by uniqueness of characteristic function the result follows. \end{proof} \noindent Using the definition \begin{equation}\label{M_char} m_{r}= \mathbb{E}[X^{r}] = (-i)^r\frac{d^{r}\phi_{X}(u)}{du^{r}}, \end{equation} the first two moments for random variable $X$ given in Proposition \eqref{running_average_cp} are $m_{1} = \frac{(k+1)}{4}$ and $m_2= \frac{1}{18}[(k+1)(2k+1)]$. Further using the mean, variance and covariance of compound Poisson process, we have \begin{align*} \mathbb{E}[N^{k}_{A}(t)]&=\mathbb{E}[N(t)]\mathbb{E}[X]=\frac{k(k+1)}{4}\lambda t;\\ {\rm Var}[N^{k}_{A}(t)]& = \mathbb{E}[N(t)]\mathbb{E}[X^2]=\frac{1}{18}[k(k+1)(2k+1)]\lambda t;\\ {\rm Cov}[N^{k}_{A}(t),N^{k}_{A}(s)] &= \mathbb{E}[N^{k}_{A}(t),N^{k}_{A}(s)] -\mathbb{E}[N^{k}_{A}(t)]\mathbb{E}[N^{k}_{A}(s)]\\ &=\mathbb{E}[N^{k}_{A}(s)]\mathbb{E}[N^{k}_{A}(t-s)]-\mathbb{E}[N^{k}_{A}(s)^{2}]-\mathbb{E}[N^{k}_{A}(t)]\mathbb{E}[N^{k}_{A}(s)]\\ & = \frac{1}{18}[k(k+1)(2k+1)]\lambda s -\frac{k^2(k+1)^{2}}{16}\lambda^2 s^2, \; s<t. \end{align*} \begin{remark} Putting $k=1$, the running average of PPoK $N^{k}_{A}(t)$ reduces to running average of standard Poisson process $N_{A}(t)$ (see Appendix in \cite{Xia2018}). \end{remark} \begin{remark} The mean and variance of PPoK and running average of PPoK satisfy, $\mathbb{E}[N^{k}_{A}(t)]/ \mathbb{E}[N^{k}(t)] = \frac{1}{2}$ and $\mathrm{Var}[N^{k}_{A}(t)]/\mathrm{Var}[N^{k}(t)] = \frac{1}{3}$. \end{remark} \noindent Next we discuss the long-range dependence (LRD) property of running average of PPoK. We recall the definition of LRD for a non-stationary process. \begin{definition}[Long range dependence (LRD)]\label{LRD_definition} Let $X(t)$ be a stochastic process which has correlation function for $s \geq t$ for fixed $s$, that satisfies, $$ c_{1}(s) t^{-d} \leq {\rm Cor}(X(t),X(s)) \leq c_{2}(s) t^{-d},$$ for large $t$, $d > 0$, $c_{1}(s) > 0$ and $c_2(s) > 0$. That is, $$ \lim_{t\to\infty} \frac{{\rm Cor}(X(t),X(s))}{t^{-d}} = c(s) $$ for some $c(s) > 0$ and $d > 0$. We say that if $d \in (0, 1)$ then X(t) has the LRD property and if $d \in (1, 2)$ it has short-range dependence (SRD) property \cite{Maheshwari2016}. \end{definition} \begin{proposition} The running average of PPoK has LRD property. \end{proposition} \begin{proof} Let $0 \leq s< t < \infty$, then the correlation function for running average of PPoK $N_A^{k}(t)$ is \begin{align*} {\rm Cor}(N^{k}_{A}(t), N^{k}_{A}(s)) &=\frac{\left(8(2k+1)-9(k+1)k\lambda s\right)s^{1/2}t^{-1/2}}{8(2k+1)}. \end{align*} Then for $d=1/2$, it follows \begin{align*} \lim_{t\to\infty} \frac{{\rm Cor}(N^{k}_{A}(t), N^{k}_{A}(s))}{t^{-d}}& = \frac{\left(8(2k+1)-9(k+1)k \lambda s\right)s^{1/2}}{8(2k+1)}= c(s). \end{align*} \end{proof} \section{Skellam process of order $K$ (SPoK)} In this section, we introduce and study Skellam process of order $K$ (SPoK). \begin{definition}[SPoK] Let $ N^{k}_{1}(t)$ and $N^{k}_{2}(t)$ be two independent PPoK with intensities $ \lambda_{1} >0$ and $ \lambda_{2} >0$. The stochastic process $$ S^{k}(t)= N^{k}_{1}(t) - N^{k}_{2}(t) $$ is called a Skellam process of order $K$ (SPoK). \end{definition} \begin{proposition} The marginal distribution $R_{m}(t)=\mathbb{P}(S^{k}(t)=m)$ of SPoK $S^{k}(t)$ is given by \begin{equation}\label{pmf_spok} R_{m}(t)=e^{-kt(\lambda_1+\lambda_2)}{\left(\frac{\lambda_1}{\lambda_2}\right)}^{m/2}I_{|m|}(2tk\sqrt{\lambda_1 \lambda_2}),\; m\in \mathbb{Z}. \end{equation} \end{proposition} \begin{proof} For $m\geq 0$, using the pmf of PPoK given in \eqref{pmf_ppok}, it follows \begin{align*} R_{m}(t) &= \sum^{\infty}_{n=0}\mathbb{P}(N^{k}_{1}(t)=n+m)\mathbb{P}(N^{k}_{2}(t)=n)\mathbb{I}_{m\geq 0}\\ &= \sum_{n=0}^{\infty}\left( \sum_{X=\Omega(k,n+m)} e^{-k\lambda_1 t} \frac{(\lambda_1 t)^{\zeta_{k}}}{\Pi_{k}!}\right)\left( \sum_{X=\Omega(k,n)} e^{-k\lambda_2 t} \frac{(\lambda_2 t)^{\zeta_{k}}}{\Pi_{k}!}\right)\\ \end{align*} Setting $x_{i}= n_{i}$ and $n=x+\sum_{i=1}^{k}(i-1)n_{i}$, we have \begin{align*} R_{m}(t) &=e^{-kt(\lambda_{1} +\lambda{2})}\sum_{x=0}^{\infty}\frac{(\lambda_2 t)^ {x}}{x!}\frac{(\lambda_1 t)^{m+x}}{(m+x)!}\left(\sum_{n_{1}+n_{2}+\ldots +n_{k}=m+x} {m+x \choose n_1!n_2!\ldots n_k!}\right)\left(\sum_{n_{1}+n_{2}+\ldots +n_{k}=x} {x \choose n_1!n_2!\ldots n_k!}\right)\\ &= e^{-kt(\lambda_{1} +\lambda{2})}\sum_{x=0}^{\infty}\frac{(\lambda_2 t)^ {x}}{x!}\frac{(\lambda_1 t)^{m+x}}{(m+x)!} k^{m+x}k^{x}, \end{align*} using the multinomial theorem and modified Bessel function given in \eqref{Modi_Bessel}. Similarly, it follows for $m<0$. \end{proof} \noindent In the next proposition, we prove the normalizing condition for SPoK. \begin{proposition} The pmf of $S^{k}(t)$ satisfies the following normalizing condition $$ \sum_{m=-\infty}^{\infty} R_{m}(t) = 1.$$ \end{proposition} \begin{proof} Using the property of modified Bessel function of first kind $$ \sum_{y=-\infty}^{\infty} \left(\frac{\theta_1}{\theta_2}\right)^{y/2}I_{|m|}(2a\sqrt{\theta_1 \theta_2}) = e^{a(\theta_1 + \theta_2 )},$$ and puting this result in \eqref{pmf_spok}, we obtain $$\sum_{m=-\infty}^{\infty} R_{m}(t) = e^{-kt(\lambda_1+\lambda_2)}e^{kt(\lambda_1+\lambda_2)} =1.$$ \end{proof} \begin{proposition} The L\'evy measure for SPoK is $$ \nu_{S^k}(x) = \lambda_1 \sum_{j=1}^{k}\delta_j(x) + \lambda_2 \sum_{j=1}^{k}\delta_{-j}(x). $$ \end{proposition} \begin{proof} The proof follows by using the independence of two PPoK used in the definition of SPoK. \end{proof} \begin{remark} Using \eqref{pgf_ppok}, the pgf of SPoK is given by \begin{equation}\label{pgf_spok} \displaystyle G^{S^{k}}(s,t) = \sum^{\infty}_{m=-\infty} s^{m}R_{m}(t)= e^{-t\left(k(\lambda_1 + \lambda_2) -\lambda_1\sum_{j=1}^{k}s^{j} -\lambda_2 \sum_{j=1}^{k}s^{-j}\right)}. \end{equation} \noindent Further, the characteristic function of SPoK is given by \begin{equation}\label{char_spok} \phi_{S^{k}(t)}(u) = e^{-t[k(\lambda_1 + \lambda_2) -\lambda_1\sum_{j=1}^{k}e^{iju} -\lambda_2 \sum_{j=1}^{k}e^{-iju}]}. \end{equation} \end{remark} \subsection{SPoK as a pure birth and death process} In this section, we provide the transition probabilities of SPoK at time $t + \delta$, given that we started at time $t$. Over such a short interval of length $\delta \rightarrow 0$, it is nearly impossible to observe more than $k$ event; in fact, the probability to see more than $k$ event is $o(\delta)$. \begin{proposition} The transition probabilities of SPoK are given by \begin{equation} \mathbb{P}(S^{k}(t+\delta)=m|S^{k}(t) = n)= \begin{cases} \lambda_1 \delta + o(\delta), & m >n,\;m= n+i, i=1,2, \ldots,k;\\ \lambda_2 \delta + o(\delta), & m< n,\; m= n-i, i=1,2, \ldots,k;\\ 1-k\lambda_1 \delta-k\lambda_2 \delta + o(\delta), & m=n.\\ \end{cases} \end{equation} Basically, at most k events can occur in a very small interval of time $\delta$. And even though the probability for more than k event is non-zero, it is negligible. \end{proposition} \begin{proof} Note that for $i=1,2,\cdots,k$, we have \begin{align*} \mathbb{P}(S^{k}(t+\delta)= n+i|S^{k}(t) = n) &= \sum_{j=1}^{k-i}\mathbb{P}(\mbox{the first process has i+j arrivals and the second process has j arrivals})\\ & + \mathbb{P}(\mbox{the first process has i arrivals and the second process has 0 arrivals})\\ & = \sum_{j=0}^{k-i} (\lambda_1\delta + o(\delta)) \times (\lambda_2\delta + o(\delta)) + (\lambda_1\delta + o(\delta)) \times (1-k\lambda_2\delta + o(\delta)) \\ & = \lambda_1\delta + o(\delta). \end{align*} Similarly, for $i=1,2,\cdots,k$, we have \begin{align*} \mathbb{P}(S^{k}(t+\delta)= n-i|S^{k}(t) = n) &= \sum_{j=1}^{k-i}\mathbb{P}(\mbox{the first process has j arrivals and the second process has i+j arrivals})\\ & + \mathbb{P}(\mbox{the first process has 0 arrivals and the second process has i arrivals})\\ & = \sum_{j=0}^{k-i} (\lambda_1\delta + o(\delta)) \times (\lambda_2\delta + o(\delta)) + (1-k\lambda_1\delta + o(\delta)) \times (\lambda_2\delta + o(\delta)) \\ & = \lambda_2\delta + o(\delta). \end{align*} Further, \begin{align*} \mathbb{P}(S^{k}(t+\delta)= n|S^{k}(t) = n) &= \sum_{j=1}^{k}\mathbb{P}(\mbox{the first process has j arrivals and the second process has j arrivals})\\ & + \mathbb{P}(\mbox{the first process has 0 arrivals and the second process has 0 arrivals})\\ & = \sum_{j=0}^{k} (\lambda_1\delta + o(\delta)) \times (\lambda_2\delta + o(\delta)) + (1-k\lambda_1\delta + o(\delta)) \times (1-k\lambda_2\delta + o(\delta)) \\ & = 1-k\lambda_1\delta - k\lambda_2\delta + o(\delta). \end{align*} \end{proof} \begin{remark} The pmf $R_{m}(t)$ of SPoK satisfies the following difference differential equation \begin{align*}\label{diff_spok} \frac{d}{dt} R_{m}(t) &= -k(\lambda_{1}+\lambda_2)R_{m}(t)+\lambda_{1} \sum_{j=1}^{k} R_{m-j}(t) + \lambda_{2} \sum_{j=1}^{k} R_{m+j}(t)\\ &= -\lambda_{1}\sum_{j=1}^{k}(1-B^{j}) R_{m} - \lambda_{2} \sum_{j=1}^{k}(1-F^{j})R_{m}(t),\;\; m\in \mathbb{Z}, \end{align*} with initial condition $R_{0}(0) = 1$ and $R_{m}(0) = 0$ for $m \neq 0$,Let $B$ be the backward shift operator defined in \eqref{Backward_Operator} and $F$ be the forward shift operator defined by $F^jX(t) = X(t+j)$ such that $(1-F)^\alpha = \sum_{j=0}^{\infty}{\alpha \choose j}F^{j}$. Multiplying by $s^{m}$ and summing for all $m$ in \eqref{diff_spok}, we get the following differential equation for the pgf $$ \frac{d}{dt}G^{S^{k}}(s,t) = \left(-k(\lambda_1 + \lambda_2) +\lambda_1\sum_{j=1}^{k}s^{j} +\lambda_2 \sum_{j=1}^{k}s^{-j}\right)G^{S^{k}}(s,t). $$ \end{remark} \noindent The mean, variance and covariance of SPoK can be easily calculated by using the pgf, \begin{align*} \mathbb{E}[S^{k}(t)]& =\frac{k(k+1)}{2}(\lambda_1 -\lambda_2) t;\\ {\rm Var}[S^{k}(t)]& =\frac{1}{6}\left[k(k+1)(2k+1)\right](\lambda_{1} +\lambda_{2}) t;\\ {\rm Cov}[S^{k}(t),S^{k}(s)] &= \frac{1}{6}\left[k(k+1)(2k+1)\right](\lambda_1 +\lambda_2)s, \;\; s<t. \end{align*} \noindent Next we show the LRD property for SPoK. \begin{proposition} The SPoK has LRD property defined in Definition \ref{LRD_definition}. \end{proposition} \begin{proof} The correlation function of SPoK satisfies $$\lim_{t\to\infty} \frac{{\rm Cor}(S^k(t), S^k(s))}{t^{-d}} = \frac{s^{1/2} t^{-1/2}}{t^{-1/2}} =c(s).$$ Hence SPoK exhibits the LRD property. \end{proof} \section{Running average of SPoK} In this section, we introduce and study the new stochastic L\'evy process which is running average of SPoK. \begin{definition} The following stochastic process defined by taking time-scaled integral of the path of the SPoK, \begin{align} S^{k}_{A}(t) = \frac{1}{t}\int_{0}^{t}{S^{k}(s) ds}, \end{align} is called the running average of SPoK. \end{definition} \noindent Next we provide the compound Poisson representation of running average of SPoK. \begin{proposition} The characteristic function $\phi_{S^{k}_{A}(t)}(u) = \mathbb{E}[e^{iu S^{k}_{A}(t)}]$ of $S^{k}_{A}(t)$ is given by \begin{align}\label{char_raspok} \phi_{S^k_A(t)}(u) = \exp\left[-kt \left\{ \lambda_1 \left(1- \frac{1}{k}\sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj} \right)+\lambda_2\left(1- \sum_{j=1}^{k}\frac{(1-e^{-iuj})}{iuj}\right) \right\} \right],\; u\in \mathbb{R}. \end{align} \end{proposition} \begin{proof} By using the Lemma $1$ to equation \eqref{char_spok} after scaling by $1/t$. \end{proof} \begin{remark} It is easily observable that in equation \eqref{char_raspok} has removable singularity at $u=0$. To remove that singularity we can define $\phi_{S^{k}_{A}(t)}(0) =1$. \end{remark} \begin{proposition} Let $Y(t)$ be a compound Poisson process \begin{align} Y(t) = \sum_{n=1}^{N(t)}J_{n}, \end{align} where $N(t)$ is a Poisson process with rate parameter $k(\lambda_1 + \lambda_2) >0$ and $\{J_{n}\}_{n \geq 1}$ are iid random variables with mixed double uniform distribution function $p_{j}$ which are independent of $N(t)$. Then $$ Y(t) \stackrel{law}{=} S^{k}_{A}(t).$$ \end{proposition} \begin{proof} Rearranging the $\phi_{S^{k}_{A}(t)}(t)$, $$ \phi_{S^{k}_{A}(t)}(0) = \exp \left[(\lambda_1 + \lambda_2)kt\left(\frac{\lambda_1}{\lambda_1 + \lambda_2}\frac{1}{k}\sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj} + \frac{\lambda_2}{\lambda_1 + \lambda_2}\frac{1}{k}\sum_{j=1}^{k}\frac{(1-e^{-iuj})}{iuj} -1 \right)\right] $$ The random variables $J_{1}$ being a mixed double uniformly distributed has density \begin{equation}\label{pmf_X} p_{J_1}(x)= \sum_{i=1}^{k}p_{V_{i}}(x)f_{U_{i}}(x) = \frac{1}{k}\sum_{i=1}^{k}f_{U_{i}}(x), \end{equation} where $V_{j}$ follows discrete uniform distribution over $(0,k)$ with pmf $p_{V_{j}}(x)=\mathbb{P}(V_{j}=x) = \frac{1}{k}, \;\; j=1,2,\ldots k,$ and $U_{i}$ be doubly uniform distributed random variables with density $$f_{U_{i}}(x) = (1-w)1_{[-i,0]}(x) + w 1_{[0, i]}(x), \;\; -i \leq x \leq i.$$ Further, $0<w<1$ is a weight parameter and $1(\cdot)$ is the indicator function. Here we obtained the characteristic of $J_1$ by using the Fourier transformation of \eqref{pmf_X}, $$\phi_{J_1} (u) =\frac{\lambda_1}{\lambda_1 + \lambda_2}\frac{1}{k}\sum_{j=1}^{k}\frac{(e^{iuj}-1)}{iuj} + \frac{\lambda_2}{\lambda_1 + \lambda_2}\frac{1}{k}\sum_{j=1}^{k}\frac{(1-e^{-iuj})}{iuj}. $$ The characteristic function of $Y(t)$ is \begin{equation} \phi_{Y(t)}(u)= e^{-k (\lambda_1 =\lambda_2) t (1-\phi_{J_1}(u))}, \end{equation} putting the characteristic function $\phi_{J_1}(u)$ in the above expression yields the characteristic function of $S^{k}_{A}(t)$, which completes the proof. \end{proof} \begin{remark} The $q$-th order moments of $J_1$ can be calculated by using \eqref{M_char} and also using Taylor series expansion of the characteristic $\phi_{J_1}(u))$, around $0$, such that $$ \frac{(e^{iuj}-1)}{iuj} = 1+\sum_{r=1}^{\infty}\frac{(iuj)^r}{(r+1)!}\;\;\&\;\; \frac{(1-e^{-iuj})}{iuj} = 1+\sum_{r=1}^{\infty}\frac{(-iuj)^r}{(r+1)!}. $$ We have $m_{1} = \frac{(k+1)(\lambda_1-\lambda_2)}{4(\lambda_1 +\lambda_2)}$ and $m_{2} = \frac{1}{18}[(k+1)(2k+1)]$. Further, the mean, variance and covariance of running average of SPoK are \begin{align*} \mathbb{E}[S^{k}_{A}(t)]&=\mathbb{E}[N(t)]\mathbb{E}[J_1]=\frac{k(k+1)}{4}(\lambda_1 - \lambda_2)t\\ {\rm Var}[S^{k}_{A}(t)]& = \mathbb{E}[N(t)]\mathbb{E}[J_1^2]=\frac{1}{18}[k(k+1)(2k+1)] (\lambda_1 + \lambda_2)t\\ {\rm Cov}[S^{k}_{A}(t), S^{k}_{A}(s)] &= \frac{1}{18}[k(k+1)(2k+1)](\lambda_1-\lambda_2) s -\frac{k^2(k+1)^{2}}{16}(\lambda_1-\lambda_2)^2 s^2. \end{align*} \end{remark} \begin{corollary} For $\lambda_2 = 0$ the running average of SPoK is same as the running average of PPoK, i.e. $$\phi_{S^k_A(t)}(u) = \phi_{N^k_A(t)}(u). $$ \end{corollary} \begin{corollary} For $k=1$ this process behave like the running average of Skellam process. \end{corollary} \begin{corollary} The ratio of mean and variance of SPoK and running average of SPoK are $1/2$ and $1/3$ respectively. \end{corollary} \section{Time-changed Skellam process of order K} We consider time-changed SPoK, which can be obtained by subordinating SPoK $S^{k}(t)$ with the independent L\'evy subordinator $D_{f}(t)$ satisfying $\mathbb{E}[D_{f}(t)]^c < \infty$ for all $c>0$. The time-changed SPoK is defined by $$ Z_{f}(t) = S^{k}(D_{f}(t)),\;\; t\geq 0. $$ Note that the stable subordinator doesn't satisfy the condition $\mathbb{E}[D_{f}(t)]^c < \infty$. The MGF of time-changed SPoK $Z_{f}(t)$ is given by $$ \mathbb{E}[e^{\theta Z_{f}(t)}] = e^{-t f(k(\lambda_1 + \lambda_2) -\lambda_1\sum_{j=1}^{k}e^{\theta j} -\lambda_2 \sum_{j=1}^{k}e^{-\theta j})}. $$ \begin{theorem} The pmf $H_{f}(t) = \mathbb{P} (Z_{f}(t) = m)$ of time-changed SPoK is given by \begin{equation}\label{pmf_tcspok} H_{f}(t) = \sum_{x=\max(0, -m)}^{\infty} \frac{(k \lambda_1)^{m+x} (k \lambda_2)^{x} }{(m+x)! x!} \mathbb{E}[e^{-k(\lambda_1 +\lambda_2)D_{f}(t)} D_{f}^{2m+x}(t)],\; m \in \mathbb{Z}. \end{equation} \end{theorem} \begin{proof} Let $h_{f}(x,t)$ be the probability density function of L\'evy subordinator. Using conditional argument \begin{align*} H_{f}(t) &= \int_{0}^{\infty}R_{m}(y) h_{f}(y,t) dy\\ & = \int_{0}^{\infty}e^{-ky(\lambda_1+\lambda_2)}{\left(\frac{\lambda_1}{\lambda_2}\right)}^{m/2}I_{|m|}(2yk\sqrt{\lambda_1 \lambda_2})h_{f}(y,t) dy\\ &=\sum_{x=\max(0, -m)}^{\infty} \frac{(k \lambda_1)^{m+x} (k \lambda_2)^{x} }{(m+x)! x!} \int_{0}^{\infty} e^{-k(\lambda_1 +\lambda_2)y} y^{2m+x} h_{f}(y,t) dy\\ & = \sum_{x=\max(0, -m)}^{\infty} \frac{(k \lambda_1)^{m+x} (k \lambda_2)^{x} }{(m+x)! x!} \mathbb{E}[e^{-k(\lambda_1 +\lambda_2)D_{f}(t)} D_{f}^{2m+x}(t)]. \end{align*} \end{proof} \begin{proposition} The state probability $H_{f}(t)$ of time-changed SPoK satisfies the normalizing condition $$ \sum_{m=-\infty}^{\infty} H_{f}(t) =1. $$ \end{proposition} \begin{proof} Using \eqref{pmf_tcspok}, we have \begin{align*} \sum_{m=-\infty}^{\infty} H_{f}(t)& = \sum_{m=-\infty}^{\infty}\int_{0}^{\infty}e^{-ky(\lambda_1+\lambda_2)}{\left(\frac{\lambda_1}{\lambda_2}\right)}^{m/2}I_{|m|}(2yk\sqrt{\lambda_1 \lambda_2})h_{f}(y,t) dy\\ & = \int_{0}^{\infty} e^{-ky(\lambda_1+\lambda_2)}e^{ky(\lambda_1+\lambda_2)} h_{f}(y,t) dy\\ & = \int_{0}^{\infty}h_{f}(y,t) dy =1. \end{align*} \end{proof} \noindent The mean and covarience of time changed SPoK are given by, \begin{align*} \mathbb{E}[Z_{f}(t)]&=\frac{k(k+1)}{2}(\lambda_1 -\lambda_2) \mathbb{E}[D_{f}(t)]\\ {\rm Cov}[Z_{f}(t),Z_{f}(s)] &= \frac{1}{6}[k(k+1)(2k+1)](\lambda_1 +\lambda_2)) \mathbb{E}[D_{f}(s)]+\frac{k^2(k+1)^2}{4}(\lambda_1-\lambda_2)^2 {\rm Var}[D_{f}(s)]. \end{align*} \section{Space fractional Skellam process and tempered space fractional Skellam process} In this section, we introduce time-changed Skellam processes where time time-change are stable subordinator and tempered stable subordinator. These processes give the space-fractional version of the Skellam process similar to the time-fractional version of the Skellam process introduced in \cite{Kerss2014}. \subsection{The space-fractional Skellam process} In this section, we introduce space-fractional Skellam processes (SFSP). Further, for introduced processes, we study main results such as state probabilities and governing difference-differential equations of marginal PMF. \begin{definition}[SFSP] Let $ N_{1}(t)$ and $N_{2}(t)$ be two independent homogeneous Poison processes with intensities $ \lambda_{1} >0$ and $ \lambda_{2} >0,$ respectively. Let $D_{\alpha_1}(t)$ and $D_{\alpha_2}(t)$ be two independent stable subordinators with indices $\alpha_{1} \in (0,1)$ and $\alpha_{2} \in (0,1)$ respectively. These subordinators are independent of the Poisson processes $ N_{1}(t)$ and $N_{2}(t)$. The subordinated stochastic process $$ S_{\alpha_1, \alpha_2}(t)= N_{1}(D_{\alpha_1}(t)) - N_{2}(D_{\alpha_2}(t)) $$ is called a SFSP. \end{definition} \noindent Next we derive the moment generating function (MGF) of SFSP. We use the expression for marginal (PMF) of SFPP given in \eqref{space-fractional-PMF} to obtain the marginal PMF of SFSP. \begin{align*} M_{\theta}(t)=\mathbb{E}[e^{\theta S_{\alpha_1, \alpha_2}(t)}] &= e^{\theta(N_{1}(D_{\alpha_1}(t))- N_{2}(D_{\alpha_2}(t)))} = e^{-t[\lambda_{1}^{\alpha_{1}}(1-e^{\theta})^{\alpha_{1}}+\lambda_{2}^{\alpha_{2}}(1-e^{-\theta})^{\alpha_{2}}]},\;\; \theta \in \mathbb{R}. \end{align*} In the next result, we obtain the state probabilities of the SFSP. \begin{theorem} The PMF $H_{k}(t)=\mathbb{P}(S_{\alpha_1, \alpha_2}(t)=k)$ of SFSP is given by \begin{align} H_{k}(t)&= \sum_{n=0}^{\infty}\frac{(-1)^k}{n!(n+k)!}\left({}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_1); \\ (1-n-k, \alpha_1); \end{matrix}(-{\lambda_1}^{\alpha_1} t) \right]\right) \left({}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_2); \\ (1-n, \alpha_2); \end{matrix}(-{\lambda_2}^{\alpha_2} t) \right]\right)\mathbb{I}_{k\geq 0}\nonumber\\ &+\sum_{n=0}^{\infty}\frac{(-1)^{|k|}}{n!(n+|k|)!}\left({}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_1); \\ (1-n, \alpha_1); \end{matrix}(-{\lambda_1}^{\alpha_1} t) \right]\right) \left({}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_2); \\ (1-n-|k|, \alpha_2); \end{matrix}(-{\lambda_2}^{\alpha_2} t) \right]\right)\mathbb{I}_{k< 0} \end{align} for $k\in \mathbb{Z}$. \end{theorem} \begin{proof} Note that $N_{1}(D_{\alpha_1}(t))$ and $N_{2}(D_{\alpha_2}(t))$ are independent, hence \begin{align*} \mathbb{P}(S_{\alpha_1, \alpha_2}(t)=k) &= \sum^{\infty}_{n=0}\mathbb{P}(N_{1}(D_{\alpha_1}(t))=n+k)\mathbb{P}(N_{2}(D_{\alpha_2}(t))=n)\mathbb{I}_{k\geq 0}\\ &+ \sum^{\infty}_{n=0}\mathbb{P}(N_{1}(D_{\alpha_1}(t))=n)\mathbb{P}(N_{2}(D_{\alpha_2}(t))=n+|k|)\mathbb{I}_{k<0}. \end{align*} Using \eqref{space-fractional-PMF}, the result follows. \end{proof} \noindent In the next theorem, we discuss the governing differential-difference equation of the marginal PMF of SFSP. \begin{theorem} The marginal distribution $H_{k}(t)= \mathbb{P}(S_{\alpha_1, \alpha_2}(t)=k)$ of SFSP satisfy the following differential difference equations \begin{align} \frac{d}{dt}H_{k}(t) &= -\lambda_{1}^{\alpha_{1}} (1-B)^{\alpha_{1}} H_{k}(t)-\lambda_{2}^{\alpha_{2}} (1-F)^{\alpha_{2}} H_{k}(t),\;\; k \in \mathbb{Z} \\ \frac{d}{dt}H_{0}(t)& = -\lambda_{1}^{\alpha_{1}}H_{0}(t)-\lambda_{2}^{\alpha_{2}}H_{1}(t), \end{align} with initial conditions $H_{0}(0)=1$ and $H_{k}(0)=0$ for $k\neq0.$ \end{theorem} \begin{proof} The proof follows by using probability generating function. \end{proof} \begin{remark} The MGF of the SFSP solves the differential equation \begin{align} \frac{dM_{\theta}(t)}{dt} = -M_{\theta}(t)(\lambda_{1}^{\alpha_{1}}(1-e^{\theta})^{\alpha_{1}}+\lambda_{2}^{\alpha_{2}}(1-e^{-\theta})^{\alpha_{2}}). \end{align} \end{remark} \begin{proposition} The L\'evy density $\nu_{S_{\alpha_1, \alpha_2}}(x)$ of SFSP is given by \begin{align*}\label{skellam_levy-I} \nu_{S_{\alpha_1, \alpha_2}} (x) = {\lambda_1}^{\alpha_1}\sum^{\infty}_{n_1=1}(-1)^{n_1+1} {\alpha_1 \choose n_1} \delta_{n_1}(x)+\lambda_2^{\alpha_2}\sum^{\infty}_{n_2=1}(-1)^{n_2+1} {\alpha_2 \choose n_2} \delta_{-n_2}(x). \end{align*} \end{proposition} \begin{proof} Substituting the L\'evy densities $\nu_{N_{\alpha_1}}(x)$ and $\nu_{N_{\alpha_2}}(x)$ of $N_{1}(D_{\alpha_1}(t))$ and $N_{2}(D_{\alpha_2}(t))$, respectively from the equation \eqref{space_levy}, we obtain $$\nu_{S_{\alpha_1, \alpha_2}} (x) = \nu_{N_{\alpha_1}}(x) + \nu_{N_{\alpha_2}}(x),$$ which gives the desired result. \end{proof} \subsection{Tempered space-fractional Skellam process (TSFSP)} In this section, we present the tempered space-fractional Skellam process (TSFSP). We discuss the corresponding fractional difference-differential equations, marginal PMFs and moments of this process. \begin{definition}[TSFSP] The TSFSP is obtained by taking the difference of two independent tempered space fractional Poisson processes. Let $D_{\alpha_1, \mu_1}(t)$, $D_{\alpha_2, \mu_2}(t)$ be two independent TSS (see \cite{Rosinski2007}) and $N_1(t), N_2(t)$ be two independent Poisson processes whcih are independent of TSS. Then the stochastic process $$ S^{\mu1, \mu2}_{\alpha_1,\alpha_2}(t) = N_1(D_{\alpha_1, \mu_1}(t)-N_2(D_{\alpha_2, \mu_2}(t))$$ is called the TSFSP. \end{definition} \begin{theorem} The PMF $H_{k}^{\mu1, \mu2}(t)=\mathbb{P}(S^{\mu1, \mu2}_{\alpha_1,\alpha_2}(t)=k)$ is given by \begin{align} H_{k}^{\mu1, \mu2}(t)&=\sum_{n=0}^{\infty}\frac{(-1)^{k}}{n!(n+k)! }e^{t(\mu_{1}^{\alpha_{1}}+\mu1^{\alpha_{1}})}\left(\sum_{m=0}^{\infty}\frac{\mu_1^m \lambda_1^{-m}}{m!} {}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_1); \\ (1-n-k-m, \alpha_1); \end{matrix}(-{\lambda_1}^{\alpha_1} t) \right]\right)\times\nonumber\\ & \left(\sum^{\infty}_{l=0}\frac{{\mu_2}^{l}{\lambda_2}^{-l}}{l!}{}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_2); \\ (1-l-k, \alpha_2); \end{matrix}(-{\lambda_2}^{\alpha_2} t) \right]\right) \end{align} when $k\geq 0$ and similarly for $k<0$, \begin{align} H_{k}^{\mu1, \mu2}(t)&=\sum_{n=0}^{\infty}\frac{(-1)^{|k|}}{n!(n+|k|)! }e^{t(\mu_{1}^{\alpha_{1}}+\mu1^{\alpha_{1}})}\left(\sum_{m=0}^{\infty}\frac{\mu_1^m \lambda_1^{-m}}{m!} {}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_1); \\ (1-n-m, \alpha_1); \end{matrix}(-{\lambda_1}^{\alpha_1} t) \right]\right)\times\nonumber\\ & \left(\sum^{\infty}_{l=0}\frac{{\mu_2}^{l}{\lambda_2}^{-l}}{l!}{}_{1}\psi_{1} \left[\begin{matrix} (1, \alpha_2); \\ (1-l-n-|k|, \alpha_2); \end{matrix}(-{\lambda_2}^{\alpha_2} t) \right]\right). \end{align} \end{theorem} \begin{proof} Since $N_{1}(D_{\alpha_1, \mu_1}(t))$ and $N_{2}(D_{\alpha_2, \mu_2}(t))$ are independent, \begin{align*} \mathbb{P}\left(S^{\mu1, \mu2}_{\alpha_1,\alpha_2}(t)=k\right) &= \sum^{\infty}_{n=0}\mathbb{P}(N_1(D_{\alpha_1, \mu_1}(t))=n+k)\mathbb{P}(N_2(D_{\alpha_2, \mu_2}(t))=n)\mathbb{I}_{k\geq 0}\\ &+ \sum^{\infty}_{n=0}\mathbb{P}(N_1(D_{\alpha_1, \mu_1}(t))=n)\mathbb{P}(N_2(D_{\alpha_2, \mu_2}(t))=n+|k|)\mathbb{I}_{k<0}, \end{align*} which gives the marginal PMF of TSFPP by using \eqref{pmf_tem_space}. \end{proof} \begin{remark} We use this expression to calculate the marginal distribution of TSFSP. The MGF is obtained by using the conditioning argument. Let $f_{\alpha, \mu}(x,t)$ be the density function of $D_{\alpha, \mu}(t)$. Then \begin{align}\label{MGF_temp} \mathbb{E}[e^{\theta N(D_{\alpha, \mu}(t))}] &= \int^{\infty}_{0}\mathbb{E}[e^{\theta N(u)}]f_{\alpha, \mu}(u,t)du =e^{-t\{(\lambda(1-e^{\theta})+\mu)^{\alpha}-\mu^{\alpha}\}}. \end{align} Using \eqref{MGF_temp}, the MGF of TSFSP is $$ \mathbb{E}[e^{\theta S^{\mu_1, \mu_2}_{\alpha_1, \alpha_2}(t)}]=\mathbb{E}\left[e^{\theta N_{1}(D_{\alpha_1, \mu_1}(t))}\right]\mathbb{E}\left[e^{\theta N_{2}(D_{\alpha_2, \mu_2}(t))}\right] = e^{-t[\{(\lambda_1(1-e^{\theta})+\mu_1)^{\alpha_1}-\mu_1^{\alpha_1}\}+\{(\lambda_2(1-e^{\theta})+\mu_2)^{\alpha_2}-\mu_2^{\alpha_2}\}]}. $$ \end{remark} \begin{remark} We have $\mathbb{E}[S^{\mu1, \mu2}_{\alpha_1,\alpha_2}(t)] =t(\alpha_1 \mu_1^{\alpha_1 -1}-\alpha_2 \mu_2^{\alpha_2 -1}).$ Further, the covariance of TSFSP can be obtained by using \eqref{vari_tem} and \begin{align*} {\rm Cov}\left[S^{\mu_1, \mu_2}_{\alpha_1, \alpha_2}(t), S^{\mu_1, \mu_2}_{\alpha_1, \alpha_2}(s)\right] & = {\rm Cov}[N_{1}(D_{\alpha_1, \mu_1}(t)), N_{1}(D_{\alpha_1, \mu_1}(s))] + {\rm Cov}[N_{2}(D_{\alpha_2, \mu_2}(t)), N_{2}(D_{\alpha_2,\mu_2}(s))]\\ &= {\rm Var}(N_{1}(D_{\alpha_1, \mu_1}(\min(t,s)))+ {\rm Var}(N_{2}(D_{\alpha_2, \mu_2}(\min(t,s))). \end{align*} \end{remark} \begin{proposition} The L\'evy density $\nu_{S^{\mu1, \mu2}_{\alpha_1,\alpha_2}}(x)$ of TSFSP is given by \begin{align*}\label{skellam_levy-I} \nu_{S^{\mu1, \mu2}_{\alpha_1,\alpha_2}}(x) &= \sum_{n_1=1}^{\infty}\mu_1^{\alpha_1-n_1}{\alpha_1 \choose n_1}{\lambda_1}^{n_1} \sum_{l=1}^{n_1} {n_1 \choose l_1}(-1)^{l_1+1} \delta_{l_1}(x)\\ & +\sum_{n_2=1}^{\infty}{\mu_2}^{\alpha_2-n_2}{\alpha_2 \choose n_2}{\lambda_2}^{n_2} \sum_{l_2=1}^{n_2} {n_2 \choose l_2}(-1)^{l_2+1} \delta_{l_2}(x), \; \mu_{1}, \; \mu_{2} > 0. \end{align*} \end{proposition} \begin{proof} By adding L\'evy densities $\nu_{N_{\alpha_1, \mu_1}}(x)$ and $\nu_{N_{\alpha_2, \mu_2}}(x)$ of $N_{1}(D_{\alpha_1, \mu_1}(t))$ and $N_{2}(D_{\alpha_2, \mu_2}(t))$ respectively from the equation \eqref{levy_temp}, which leads to $$\nu_{S^{\mu1, \mu2}_{\alpha_1,\alpha_2}}(x) = \nu_{N_{\alpha_1, \mu_1}}(x) + \nu_{N_{\alpha_2, \mu_2}}(x).$$ \end{proof} \subsection{Simulation of SFSP and TSFSP} We present the algorithm to simulate the sample trajectories for SFSP and TSFSP. We use Python 3.7 and its libraries \textit{Numpy} and \textit{ Matplotlib} for the simulation purpose.\\ \noindent{\bf Simulation of SFSP:}\\ \textbf{Step-1}: generate independent and uniformly distributed in $[0, 1]$ rvs $U$, $V$ for fix values of parameters;\\ \textbf{Step-2}: generate the increments of the $\alpha$-stable subordinator $D_{\alpha}(t)$ (see \cite{Cahoy2010}) with pdf $f_{\alpha}(x, t)$, using the relationship $D_{\alpha}(t + dt) - D_{\alpha}(t) \stackrel{d}{=} D_{\alpha}(dt) \stackrel{d}= (dt)^\frac{1}{\alpha} D_{\alpha}(1)$, where $$ D_{\alpha}(1) = \frac{\sin(\alpha \pi U)[\sin((1-\alpha)\pi U)]^{1/\alpha -1}}{[\sin(\pi U)]^{1/\alpha} |\log V|^{1/\alpha -1}}; $$ \textbf{Step-3}: generate the increments of Poisson distributed rv $N(D_{\alpha}(dt))$ with parameter $\lambda (dt)^{1/\alpha} D_{\alpha}(1)$;\\ \textbf{Step-4}: cumulative sum of increments gives the space fractional Poisson process $N(D_{\alpha}(t))$ sample trajectories;\\ \textbf{Step-5}: generate $N_{1}(D_{\alpha_1}(t))$, $N_{2}(D_{\alpha_2}(t))$ and subtract these to get the SFSP $S_{\alpha_1, \alpha_2}(t)$.\\ \noindent We next present the algorithm for generating the sample trajectories of TSFSP.\\ \noindent{\bf Simulation of TSFSP:}\\ Use the first two steps of previous algorithm for generating the increments of $\alpha$-stable subordinator $D_{\alpha}(t)$.\\ \textbf{Step-3}: for generating the increments of TSS $D_{\alpha, \mu}(t)$ with pdf $f_{\alpha, \mu}(x, t)$, we use the following steps called ``acceptance-rejection method";\\ \textbf{(a)} generate the stable random variable $D_{\alpha}(dt)$;\\ \textbf{(b)} generate uniform $(0, 1)$ rv $W$ (independent from $D_{\alpha}$);\\ \textbf{(c)} if $W \leq e^{-\mu D_{\alpha}(dt)}$, then $D_{\alpha, \mu}(dt) = D_{\alpha}(dt)$ (``accept"); otherwise go back to (a) (``reject"). Note that, here we used that $f_{\alpha, \mu}(x, t) = e^{-\mu x + \mu^{\alpha}t} f_{\alpha}(x, t),$ which implies $\frac{f_{\alpha, \mu}(x, t)(x,dt)}{cf_{\alpha}(x,dt)} = e^{-\mu x}$ for $c = e^{{\mu }^{\alpha} dt}$ and the ratio is bounded between $0$ and $1$;\\ \textbf{step-4}: generate Poisson distributed rv $N(D_{\alpha, \mu}(dt))$ with parameter $\lambda D_{\alpha, \mu}(dt)$ \\ \textbf{step-5}: cumulative sum of increments gives the tempered space fractional Poisson process $N(D_{\alpha, \mu}(t))$ sample trajectories;\\ \textbf{step-6}: generate $N_{1}(D_{\alpha_1, \mu_1}(t))$, $N_{2}(D_{\alpha_2, \mu_2}(t))$, then take difference of these to get the sample paths of the TSFSP$S_{\alpha_1, \alpha_2}^{\mu_1, \mu_2}(t)$. \begin{figure} \caption{The sample trajectories of SFSP and TSFSP} \end{figure} \noindent {\bf Acknowledgments:} NG would like to thank Council of Scientific and Industrial Research(CSIR), India, for the award of a research fellowship. \noindent \end{document}
\begin{document} \title[Homogenization of Pucci PDE]{Approximate homogenization of fully nonlinear elliptic PDEs: estimates and numerical results for Pucci type equations} \author{Chris Finlay} \author{Adam~M. Oberman} \date{\today} \begin{abstract} We are interested in the shape of the homogenized operator $\overline F(Q)$ for PDEs which have the structure of a nonlinear Pucci operator. A typical operator is $H^{a_1,a_2}(Q,x) = a_1(x) \lambda_{\min}(Q) + a_2(x)\lambda_{\max}(Q)$. Linearization of the operator leads to a non-divergence form homogenization problem, which can be solved by averaging against the invariant measure. We estimate the error obtained by linearization based on semi-concavity estimates on the nonlinear operator. These estimates show that away from high curvature regions, the linearization can be accurate. Numerical results show that for many values of $Q$, the linearization is highly accurate, and that even near corners, the error can be small (a few percent) even for relatively wide ranges of the coefficients. \end{abstract} \keywords{Viscosity Solutions, Partial Differential Equations, Homogenization, Pucci Maximal Operator} \maketitle \section{Introduction} In this article we consider fully nonlinear, uniformly elliptic PDEs $F(Q,x)$. We are interested in approximating the homogenized operator $\overline F(Q)$. We focus on Pucci-type PDE operators in two dimensions. The restriction to two dimensions is for computational simplicity and also for visualization purposes. We consider periodic coefficients, although in our numerical experiments we obtained very similar results with random coefficients. The approach we take is to linearize the operator about the value $Q$, and to homogenize the linearized operator $\overline L(Q)$. The solution of the linear homogenization problem can be expressed (and in some cases solved analytically) by averaging against the invariant measure. The result is given by \[ \overline {L^Q}(Q) = \int F(Q,x) \rho^Q(x) dx \] where $\rho^Q$ is the invariant measure of the corresponding linear problem. We estimate the linearization error \[ E(Q) \equiv \overline F(Q) - \overline{L^Q}(Q), \] For convex operators, the analysis gives a one sided bound on the error. In general, we obtain upper or lower bounds on the error, which depend on generalized semi-concavity/convexity estimates of $F$, as well as on the solution of the cell problem $u^Q$ for the nonlinear problem. These results are stated in \Cref{th:main} below. For theoretical results on nonlinear homogenization, we refer to the review \cite{Engquist2008} as well as recent works on rates of convergence (for example \cite{Armstrong2014}). There are fewer works which aim to determine the values $\overline F(Q)$. Few analytical results are available. Numerical homogenizing results for Pucci type operators can be found in \cite{caffarelli2008numerical} using a least-squares formulation. We also mention numerical work by \cite{GO04} and \cite{oberman2009homogenization} and \cite{luo2011new} in the first order case, as well as \cite{Froese2009} in the second order linear non-divergence case. The typical operator we consider herein is defined next. Below, we consider more operators, including the usual convex Pucci Maximal operator. \begin{definition}[Fully nonlinear elliptic operator $F(Q,x)$ and linearization]\label{def:F} We are given $F: \mathcal{S}^d \times \mathbb{T}^d \to \R$ which is uniformly elliptic, Lipschitz continuous in the first variable and bounded in the second variable. Suppose for a given $Q$, that $\grad_Q F(Q,x)$ exists for all $x$. Write \begin{equation} \label{LFaffine} L^Q(M,x) = \grad_Q F(Q,x)\cdot(M-Q) +F(Q,x) \end{equation} for the affine approximation to $F$ at $Q$. \end{definition} Given $Q \in \mathcal{S}^d$, write, for $d=2$, $\lambda_{\min}(Q)$ and $\lambda_{\max}(Q)$ for the smaller, and larger eigenvalues of $Q$, respectively. \begin{example}[Typical PDE operator] Given $\delta > 0$, and periodic functions $a_1(y), a_2(y) \geq \delta$. Define the homogeneous order one PDE operator \begin{equation} H^{a_1,a_2}(Q,x) = a_1(x) \lambda_{\min}(Q) + a_2(x)\lambda_{\max}(Q) \label{eq:HP} \end{equation} Suppose $Q$ has unit eigenvectors $v_1, v_2$ corresponding to the eigenvalues $\lambda_{\min}(Q),\lambda_{\max}(Q)$, respectively. Then the linearization at $Q$, of $H^{a_1,a_2}$ is given by \begin{equation} L^Q(M,x) = a_1(x) v_1^T M v_1 + a_2(x) v_2^T M v_2 \label{eq:LinearHP} \end{equation} \end{example} \begin{remark}[Typical results] We consider the case of coefficients which are either (i) periodic checkerboards or (ii) random checkerboards. We compute both the nonlinear homogenization $\overline F(Q)$ and the homogenized linear operator $\overline{L^Q}(Q)$. In practice, the numerically computed error is insignificant, less than \num{1e-8} for values of $Q$, away from regions of high curvature of $F$ with respect to $Q$. Areas where the error is significant correspond to regions where the semi-concavity constants are large. A typical result is displayed in Figure~\ref{fig1}. The solid line is a level set of the homogenized linear operator $ \overline{L^Q}(Q)$. The dots are numerical computations of $\overline F(Q)$. The error is very small, except at one point, which corresponds to a corner of the operator. (Dashed lines indicate underlying operators which comprise $F(Q,x)$. Our analysis depends on the shape of $F(Q,x)$ in $Q$, but not on the pattern of the coefficients in $x$. We also considered the case of stripe coefficients. For separable examples, the linear approximation is still effective. However, we also found nonseparable examples where the linear approximation is poor, which we will address in a companion paper \cite{ObermanFinlay_LP} with a closer bound. \end{remark} \begin{figure} \caption{Plot of a single level set of $\overline{L^Q} \label{fig1} \end{figure} \subsection{Background: cell problem and linear homogenization} In this section, we review background material on the cell problem for the nonlinear PDE, and on linear homogenization. We also give an exact formula in one dimension for a separable operator. Given $a(y): {\mathbb{T}} \to \R$, positive, $a(y)>0$, write $\HM(a) = \left( \int_{\mathbb{T}} \frac{\,\mathrm{d} y }{a(y)}\right)^{-1}$ for the harmonic mean of $a$. In the linear case, $\overline L$ can be found by averaging against the invariant measure, by solving the adjoint equation (see \cite{bensoussan2011asymptotic} or \cite{Froese2009}), which yields the following formula. \begin{lemma}[Linear Homogenization Formula] \label{formula:HM} The separable linear operator $L(M,x) = a(x)A_0:M + f(x)$ has invariant measure $\rho(x) = \HM(a)/a(x)$ and homogenizes to $\overline L(Q) = \HM(a) A_0:Q + \overline f$, where $\overline f(x) = \int f(x) \,\mathrm{d} \rho(x)$. \end{lemma} For the nonlinear operator $F$, the homogenized operator is given by solving the cell problem, see~\cite{Evans1989}. \begin{definition}[Solution of the cell problem] Given $F$ uniformly elliptic, for each $Q \in {\mathcal{S}^d}$, there is a unique value $\overline F(Q)$ and a periodic function $u^Q(y)$ which is a viscosity solution of the cell problem \begin{equation}\label{def:cell_problem} F(Q + D^2u^Q(y), y) = \overline F(Q). \end{equation} \end{definition} \begin{lemma}[Homogenization of linearized operator]\label{def:linear} Consider the nonlinear elliptic operator $F(Q,x)$, and suppose for a given $Q$, that $\grad_Q F(Q,x)$ exists for all $x$. The corresponding linearization at $Q$ is given by \eqref{LFaffine}. Let $\rho^Q$ be the corresponding unique invariant probability measure, which is the solution of the adjoint equation \begin{equation}\label{adjoint-equation} D^2:(\grad_Q F(Q,y)\rho^Q(y)) = 0, \end{equation} interpreted in the weak sense. Then $\overline{L^Q}(Q)$, the homogenized linearized operator evaluated at $Q$, is given by \begin{equation}\label{LbarQ} \overline{L^Q}(Q) = \int_{\mathbb{T}^d} F(Q,y) \,\mathrm{d} \rho^Q(y). \end{equation} \end{lemma} \begin{proof} The invariant measure $\rho^Q$ solves \eqref{adjoint-equation}, see \cite{bensoussan2011asymptotic} or \cite{Froese2009}. Apply \eqref{LFaffine} at $M=Q$ and then integrate against $\rho^Q$ to obtain the result. \end{proof} \section{Main Result} \subsection{Generalized semiconcavity estimates on the operators} Consider the uniformly elliptic operator $F(Q,x)$, where $Q \in \mathcal{S}^d$ and $x \in \mathbb{T}^d$. We assume the following. \begin{ass}[Quadratically dominated for $F(Q,x)$]\label{def:quaddom} Let $F$ be as in Definition~\ref{def:F}. Suppose for a given $Q$, that $\grad_Q F(Q,x)$ exists for all $x$. Write $\| Q \|$ for the Frobenious norm of $Q$. We say that $F$ is \emph{quadratically dominated above} at $Q$ if there is a bounded function $C^+(Q,x): \mathbb{T}^d \to \R$ such that \begin{equation}\label{QDabove} F(M,x) - L^Q(M,x) \leq C^+(Q,x)\frac {\norm{M-Q}^2}2, \quad \text{ for all $(M,x) \in \mathcal{S}^d \times \mathbb{T}^d$} \end{equation} and similarly, $F$ is \emph{quadratically dominated below} at $Q$ if there is a bounded function $C^-(Q,x): \mathbb{T}^d \to \R$ such that \begin{equation}\label{QDbelow} F(M,x) - L^Q(M,x) \geq C^-(Q,x)\frac {\norm{M-Q}^2}2, \quad \text{ for all $(M,x) \in \mathcal{S}^d \times \mathbb{T}^d$} \end{equation} \end{ass} \begin{remark} If $F$ is convex in $Q$, then $C^-(Q,y) = 0$. Similarly if $F$ is concave in $Q$, $C^+(Q,y)=0$. More generally if $F$ is semi-concave, or semi-convex in $Q$, then we can set $C^{\pm}(Q,y) = C^\pm(y)$, to be a constant independent of $Q$. However, we require the definition above for when the semi-concavity or semi-convexity conditions in $Q$ do not hold, as is the case for the Pucci-type operators defined below. \end{remark} \begin{example} Let $x\in \R$ and set $f(x) = \max\left\{ a x, b x \right\}$. Since $f$ is convex, we can take $C^-(x) =0$ in \eqref{QDbelow}. We claim that for $x\not=0$, \eqref{QDabove} holds with \begin{equation} \label{eq:CplusEx1} C^+(x) = \frac{\abs{a-b}}{2 x}, \end{equation} and this is the best constant. See \Cref{fig:CQ_max_ab}. \end{example} \begin{proof}[Derivation of \eqref{eq:CplusEx1}] Expand $f(x +y)$ about the point $x$, for $x\not=0$. To test \eqref{QDabove}, replace the inequality with an equality to obtain a quadratic equation. By requiring that there is only one root, we obtain an equation for the discriminant of the quadratic, which can be solved to obtain the result. \end{proof} \begin{figure} \caption{ For the simple example $f(x) = \max\left\{ a x, b x \right\} \label{fig:CQ_max_ab} \end{figure} \subsection{Main Theorem} \begin{theorem} \label{th:main} Suppose $F$ satisfies Assumptions~\ref{def:quaddom} and $u^Q \in C^{2,\alphapha}({\mathbb{T}^d})$ is a classical solution. Let $\overline F(Q)$ be the homogenized operator at $Q$ and let $u^Q$ be the corresponding solution of the cell problem given by~\eqref{def:cell_problem}. Let the homogenization of the linearization of the operator be given by \eqref{LbarQ} and let $\rho^Q(y)$ be the corresponding invariant measure of the linearized problem \eqref{LFaffine}. Write \[ \overline{ C^\pm}(Q) = \frac 1 2 \int C^\pm(Q,y) \| D^2u^Q(y) \|^2 \,\mathrm{d} \rho^Q(y) \] Then \begin{equation}\label{eq:bounds} \overline{ C^-}(Q) \leq \overline F(Q) - \overline{L^Q}(Q) \leq \overline{ C^+}(Q) \end{equation} \end{theorem} \begin{remark} In the examples we consider below, $C^\pm(Q,y) \to 0$ as $\dist(|Q|,S) \to \infty$, for the singular set of the operator. This gives control over the homogenization error for many values of $Q$. Another term in the error is $\|D^2u^Q\|$. In the homogeneous order one case, we have $u^Q = 0$ for $Q = 0$, so a continuity argument suggests that we may have control of $\|D^2u^Q\|$ for small values of $Q$. This is the case in one dimension in \cite{ObermanFinlay_LP}, where we obtain an analytical formula for $u^Q_{xx}$ through \eqref{separable}, which gives $\abs{u^Q_{xx}} \leq C|Q|$. The main theorem is a formal result in the sense that it relies on the fact that $u^Q$ is a classical solution, which does not hold in general. If $F$ is convex (or concave), then by a famous theorem of Krylov and Evans \cite{krylov_boundedly_1984,evans_classical_1982}, or~\cite{caffarelli1995fully}, $u^Q \in C^2({\mathbb{T}^d})$. However, in general we are only guaranteed $u^Q \in C^{1,\alphapha}({\mathbb{T}^d})$ \cite{jensen_maximum_1988}. \end{remark} \begin{proof} Subtract the linearization of $F$ at $Q$ evaluated at $Q + D^2u^Q(y)$ from the equation for the cell problem \eqref{def:cell_problem}, to obtain \begin{equation} \label{eq:subtractlin} \overline F(Q) - L^Q(Q+D^2u^Q,y) = F(Q+D^2u^Q,y) - L^Q(Q+D^2u^Q,y). \end{equation} From Assumption~\eqref{def:quaddom}, \begin{equation} \overline F(Q) - L^Q(Q+D^2u^Q,y) \leq C^+(Q,y) \frac{\| D^2 u^Q \|^2}{2} \label{eq:Fb_above} \end{equation} and \begin{equation} \overline F(Q) - L^Q(Q+D^2u^Q,y) \geq C^-(Q,y) \frac{\| D^2 u^Q \|^2}{2}. \label{eq:Fb_below} \end{equation} Now integrate \cref{eq:Fb_above,eq:Fb_below} against the invariant measure $\rho^Q$. This yields the upper and lower bounds \eqref{eq:bounds}, where we have used the fact that for all $\phi \in C^2({\mathbb{T}^d})$, \begin{equation} \label{LbarAtPhi} \int_{\mathbb{T}^d} {L^Q}(Q + D^2\phi,y) \,\mathrm{d}\rho^Q(y) = \int_{\mathbb{T}^d} F(Q,y)\,\mathrm{d}\rho^Q(y), \end{equation} which follows from integration by parts, since $\rho^Q$ solves the adjoint equation~\eqref{adjoint-equation}. \end{proof} \subsection{Applications of the main result} We give two applications of the main result. In the first example, where the operator is separable, we have an analytical formula for $\overline{L^Q}(Q)$. In this case the estimates also simplify. In the second, nonseparable example, we can find $\overline{L^Q}(Q)$ by solving a single linear homogenization problem, with coefficients given by the linearization \eqref{eq:LinearHP}. \begin{corollary} \label{formula:sep} Consider the separable, purely second order operator \[ F(Q, y) = a(y) F_0(Q) \] for $y \in \R^d$. Suppose that $F_0$ is quadratically dominated with constants $C^-(Q)$ and $C^+(Q)$. Then, \begin{equation}\label{eq:corbounds} \overline{ C^-}(Q) \leq \overline{F}(Q) - \HM(a) F_0(Q) \leq \overline{ C^+}(Q) \end{equation} where \[ \overline{ C^\pm}(Q) = \frac 1 2 C^\pm(Q)\HM(a) \int_{\mathbb{T}^d} \| D^2u^Q(y) \|^2 \frac{1}{a(y)} \,\mathrm{d} y. \] \end{corollary} \begin{proof} 1. The formula for the linearization, \[ \overline{L^Q}(Q) = \HM(a) F_0(Q) \] follows from the Linear Homogenization Formula (Lemma \ref{formula:HM}). 2. From linearization, we have that $\rho^Q(y) = \HM(a)/a(y)$. Using the definition, then the generalized semiconvexity/concavity constants for $F(Q,y)$ are given by \[ C^+(Q,y) = a(y)C^+(Q), \quad\text{ and }\quad C^-(Q,y) = a(y)C^-(Q). \] Passing the constants and the invariant measure into Theorem~\ref{th:main} gives the bounds provided by \eqref{eq:corbounds}, since the coefficients $a(y)$ cancel. \end{proof} \begin{remark} In a companion paper \cite{ObermanFinlay_LP}, we show that for convex operators in one dimension, \begin{equation}\label{separable} \overline F(Q) = \overline{L^Q}(Q) = \HM(a) F_0(Q). \end{equation} \end{remark} \begin{corollary} Consider the operator $H^{a_1,a_2}$ given by \eqref{eq:HP}. Then \begin{multline*} \left| \overline{H^{a_1,a_2}}(Q) - \overline {L^Q}(Q) \right| \\ \leq \frac{1}{2\abs{\lambda_{\min}(Q)-\lambda_{\max}(Q)}} \int \abs{a_1(y)-a_2(y)} \, \| D^2u^Q(y) \|^2 \, d\rho^Q(y) \end{multline*} \end{corollary} \begin{proof} We apply Theorem~\ref{th:main} to $H^{a_1,a_2}$ given by \eqref{eq:HP}. The linearization is given by \eqref{eq:LinearHP}. The invariant measure of the linear problem is given by the solution of \eqref{adjoint-equation} and the homogenized linear operator is given by \eqref{LbarQ} from Lemma~\ref{def:linear}. The main step is to work out the generalized semi-concavity constants. We claim. \begin{equation} C^+(Q,x) = \frac{ (a_2(x)-a_1(x))^+}{\abs{\lambda_{\min}-\lambda_{\max} }}, \quad\text{ and }\quad C^-(Q,x) = \frac{(a_1(x)-a_2(x))^-}{\abs{\lambda_{\min}-\lambda_{\max}} }. \label{HSCconstant} \end{equation} To prove this we proceed in steps. 1. First, take $q\in \R^2$ and set $f(q) = \max(q_1, q_2)$. Then $L^q(y) = \grad f(q)\cdot y$ away from the singular set $q_1=q_2$, since the function is homogeneous of order one. The constant $C^-(q) = 0$, since $f$ is convex. We claim the optimal choice for $C^+(q)$ is given by \[ C^+(q) = \frac{1}{\abs{q_2 - q_1}} \] for $q_1\not= q_2$. To see this, we require \[ \max(y_1,y_2) \leq \grad f(q)\cdot y + \frac{C^+(q)}{2} \abs{y-q}^2. \] It is easily verified that the extremal case for the inequality occurs when $(y_1,y_2) = (q_2, q_1)$, which leads to the condition \[ \abs{q_1-q_2} \leq {C^+(q)} \abs{q_1-q_2}^2. \] giving the result. 2. Let $f(q_1, q_2) = a_1\min(q_1, q_2) + a_2\max(q_1, q_2)$. Rewrite $f(q_1, q_2) = a_1(q_1 + q_2) + (a_2-a_1) \max(q_1, q_2)$. We can always subtract an affine function when computing the constants. So the constants for $f$ are the same as the constants for $(a_2-a_1) \max(q_1, q_2)$. In this case, using the result of step 1, we obtain \[ C^+(x) = \frac{(a_2-a_1)^+}{\abs{q_1-q_2}}, \qquad C^-(x) = \frac{(a_1-a_2)^-}{\abs{q_1-q_2}} \] 3. Next consider for the two by two matrix $Q$, $h(Q,x) = a_1(x) \min(q_{11}, q_{22}) + a_2(x) \max(q_{11}, q_{22})$. Then the previous step shows that the constants for $h$ are given by the previous ones (with $q_{11}$ replacing $q_1$ and $q_{22}$ replacing $q_2$. Finally, since $H^{a_1, a_2}$ depends only on the eigenvalues of $Q$, without loss of generality, we can choose a coordinate system where $Q$ is diagonal when computing the generalized semiconcavity constants. It remains to show that the generalized semi-concavity condition holds for a matrix, $M$. If $M$ is diagonal the condition holds. But if $M$ is not diagonal, then the change in the norm $\| M - Q\|^2$ can be controlled by a constant, or absorbed into the definition of the norm. \end{proof} \section{Computational Setting} For our numerical experiments, we consider a wider class of separable and non-separable operators. \subsection{PDE Operators} \begin{definition}[Pucci-type operators]\label{defnPucci} For $\delta > 0$ and given functions $0 < \delta \leq a(y)\leq A(y)$. Write $b(y) = A(y)-a(y)$. Also write $t^+ = \max(t,0)$. Define, for $d=2$, the standard Pucci maximal operator, the Pucci-type operator, the smoothed Pucci-type operator, and a Monge-Ampere type operator respectively as \begin{align} \label{Pucci_2d} P^{A,a}(Q,y) &= a(y) \tr Q + b(y)\left ( \lambda_{\min}^+(Q) + \lambda_{\max}^+(Q) \right ) \\ \label{eq:OurPucci} F^{A,a}(Q,y) &= a(y) \tr Q + b(y) \lambda_{\max}^+(Q). \\ F_k^{A,a}(Q,y) &= a(y) \tr Q + b(y)\mathcal S_k(\lambda_{\min}(Q),\lambda_{\max}(Q),0) \label{eq:smoothP} \\ M(Q,y) &= a(y)\left( \tr(Q) + \lambda_{\min}^+(Q) \lambda^+_{\max}(Q) \right). \end{align} Here, for $x \in \R^m$, $S_k(x)$ is the smoothed maximum function, \begin{equation} \mathcal S_k(x) = \frac{\sum_{i=1}^m x_i \exp(k x_i)}{\sum_{i=1}^m \exp(k x_i)}. \label{eq:smoothmax} \end{equation} The function $S_k$ goes to the max as $k\to \infty$, and to the average as $k\to 0$. \end{definition} \begin{definition}[Periodic checkerboard, stripes, and random checkerboard coefficients] Define \begin{equation}a_0(y) = \begin{cases}1,~ y\in B\\r,~y\in W,\end{cases}\label{eq:a0}\end{equation} with $r>1$. The sets $B$ and $W$ are either black and white squares of a checkerboard; alternating black and white stripes of equal width; or a `random' checkerboard, with black and white squares distributed with equal probability, uniformly. \end{definition} \begin{remark}[Representation and visualization of the operators] The definition above agrees with the usual definition of the Pucci operator, \begin{equation} P^{A,a}(Q,y) = \sup \{ M : Q \mid a(y) I \ll M \ll A(y) I \}. \label{eq:RealPucci} \end{equation} We can also rewrite \[ F^{A,a}(Q,y) = \begin{cases} a(y) \tr Q & \text{$Q$ negative definite} \\ H^{A,a}(Q,x), & \text{otherwise}. \end{cases} \] \end{remark} \begin{figure} \caption{Example level sets} \label{fig:visualize} \caption{Pucci level sets} \label{fig:pucci-level-sets} \caption{Figure \ref{fig:visualize} \label{fig:pucci-figs} \end{figure} \begin{example} For the Pucci operator $P^{A,a}(Q,x)$ given by \eqref{Pucci_2d}, by convexity, $C^-(Q,x) = 0$ and \begin{equation} C^+(Q,x) = \begin{cases} \max\left\{ \frac{b(x)}{\tr(Q)},\frac{A(x)}{2\lambda_{\min}} \right\}, &\text{ if } \lambda_{\min}, \lambda_{\max} > 0 \\ \frac{b(x)}{2 \min(\abs{\lambda_{\min}},\abs{\lambda_{\max}} )}, &\text{ otherwise}. \end{cases} \label{eq:CQ_pucci} \end{equation} \end{example} \subsection{Numerical Method details} In order to compute the errors, as a function of $Q$, we used a grid in the $\lambda_1-\lambda_2$ plane, and computed the linear and nonlinear homogenization at the grid points. Typical values can be see in Figure~\ref{fig:pucci-level-sets}, where black points indicate the grid values of $Q$ tested. We remark on the numerical methods used throughout. To compute $\overline{F} (Q)$ directly, we discretized with finite differences and solved the parabolic equation $u_t + F(Q+D^2u,y)$ using an explicit Euler method, to iteratively compute a steady state solution. We discretized using a convergent monotone scheme \cite{ObermanSINUM} and also using standard finite differences. The accuracy of the monotone scheme was less than the standard finite differences, so we implemented a filtered scheme \cite{froese2013convergent}. In practice, the filtered scheme always selected the accurate scheme, so in this instance, perhaps because the solutions are $C^2$ and periodic, standard finite differences appear to converge. For all the computations, to avoid trivial solutions, we solved with a right hand side function equal to a constant, and then subtracted the same constant from $\overline F(Q)$. The computational domain was the torus $[0,1]^2$, divided into $20\times20$ equal squares, each with 16 grid points per square. \begin{remark}[Comparison with \cite{caffarelli2008numerical}] The problem of homogenizing $a_0(y) F^{A,a}(Q)$, was considered in \cite{caffarelli2008numerical}. In their case, the spatial coefficient $a_0(y)$ varies periodically and smoothly between 2 and 3, and their homogenized value for $\overline a_0$ was 2.5 (which was the average of the coefficient $a_0(x,y) = \cos(\pi x)\cos(\pi y)$). Our results using these coefficients was $\overline a_0 = 2.486$, which is very close to the average. However with coefficients which are more spread out, we obtain values far from the average. \end{remark} \begin{figure} \caption{Error} \label{fig:real-pucci-error2} \caption{$\max_y C^+(Q,y)$} \label{fig:CQ} \caption{Homogenization of a separable Pucci example operator, $a(y) P^{3,1} \label{fig:real-pucci-chkbd-r=2} \end{figure} \section{Numerical results}\label{sec:Numerics} \subsection{Numerical Results: separable operators}\label{sec:numSepPucci} Here we check the homogenization error of the bound for separable operators in two dimensions, from Corollory~\ref{formula:sep}. We are in the convex case, so the lower bound is zero. We performed numerical simulations on four operators, see Definition~\ref{defnPucci}. \begin{itemize} \item $a_0(y) P^{3,1}(Q)$ \item $a_0(y) F^{3,1}(Q)$ \item $a_0(y) F^{3,1}_k(Q)$, with $k=10$ and $k=0.1$ \item $a_0(y) M(Q)$ \end{itemize} In \Cref{fig:real-pucci-chkbd-r=2} we compare the error $\overline F(Q) - \overline L^Q(Q)$ for a separable Pucci operator on a checkerboard, we also illustrate the constant $C^+(Q,y)$. In this case we have an analytical formula for $\overline L^Q(Q)$). This figure illustrates the Main Theorem: when the constant is large the error from the linearization is high. The error is less than \num{1e-6} outside of a small region about the axis, and on the order of $0.1$ near the axis. In \Cref{fig:smoothmax}, we show how the error $\overline F(Q) - \overline L^Q(Q)$ decreases as the operator becomes smoother. The operator with the smallest maximum curvature (\Cref{fig:psmooth10}) exhibits the smallest error. As the operator becomes less smooth, the error increases. For the smoothest operator the global error is at most one percent (in the range of values shown in the figure). For the two sharper operators, there is still very high accuracy away from the highest curvature regions. We see that error of the smooth operator, $F^{3,1}_{10}(Q)$, is slightly smaller than the non smooth operator's error near the line $\lambda_1=\lambda_2$ (this is where the non smooth operator is not differentiable). As the smoothing constant $k\rightarrow 0$, the error of the linearized homogenization decreases. For example, setting $k=0.1$, as in \Cref{fig:psmooth10}, results in an error on the order of $0.01$. In all cases, the error is near zero in a large part of the domain. It concentrates near the positive diagonal, where it is .1 to .4 for the non-smooth operator, and similar for the operator with a small smoothing parameter. A larger smoothing parameter sends the error in a similar region to the range .002 to 0.01. A small amount of smoothing has a small effect on the error. More smoothing leads to errors going from .1 to .002 in a similar part of the domain. \begin{figure} \caption{$a_0(y) F^{3,1} \label{fig:psep} \caption{$a_0(y) F^{3,1} \label{fig:psmooth100} \caption{$a_0(y) F^{3,1} \label{fig:psmooth10} \caption{Homogenization error for a smoothed Pucci type operator. The coefficients $a(y)$ are on a checker board with $r=2$ (i.e. $a$ = 1 or 2). The operators are defined in \Cref{sec:Numerics} \label{fig:smoothmax} \end{figure} \begin{figure} \caption{$r=1.2$} \label{fig:fake-pucci-stripes-r=1.2} \caption{$r=2$} \label{fig:fake-pucci-stripes-r=2} \caption{Error for $a_0(y) F^{3,1} \label{fig:fake-pucci-stripes} \end{figure} \begin{figure} \caption{Checkerboard} \label{fig:ma-chkbd} \caption{Stripes} \label{fig:ma-stripes} \caption{Error for $M(Q,y)$ with $r=2$, on a periodic checkerboard and on stripes.} \label{fig:ma} \end{figure} Figure \ref{fig:fake-pucci-stripes} presents the error for $a_0(y) F^{3,1}(Q)$ on stripes. On stripes, the regions with large error are much smaller than the operators on checkerboard. We hypothesize that this is because stripes have a smoothing effect. The location where the large error is located depends on the interplay between the operator and the direction of the stripes. Given that in this example the homogenized operator is $\mathcal O(1)$, the error here is particularly large. In a companion paper, we will derive a closer lower bound for $\overline{F^{A,a}}(Q)$, using the optimal invariant measure of the nonlinear operator. Figure \ref{fig:ma} shows error for $a_0(y) (\tr(Q) + \text{MA}(Q))$ on both stripes and a periodic checkerboard. For the Monge Ampere type operator on checkerboard, error is on the order of \num{1e-2} in the first quadrant, where the curvature is bounded. Elsewhere the error is negligible. As $r$ (the scaling coefficient of $a_0(y)$) grows, so does the error. As expected, for the two Pucci type operators on checkerboard, away from the regions where the curvature is unbounded, the error is negligible: this is where the operators are linear. Although we do not show it, for all figures, the error profile on the random checkerboard is nearly identical to the periodic checkerboard. \subsection{Numerical Results: non-separable operators}\label{sec:iso} Now we consider nonseparable coefficients for $F^{A,a}(Q,y)$, refer to Definition \ref{defnPucci}. For both periodic and random checkerboard coefficients, the numerically computed values of $\overline {F^{A,a}}(Q)$ depend only on the eigenvalues of $Q$, not on the eigenvectors. In addition, $\overline{F^{A,a}}(Q)$ is homogeneous order one. So the entire function $\overline{F^{A,a}}(Q)$ is determined by the 1-level set of $\overline{F^{A,a}}(Q)$ for diagonal matrices $Q$. We write \begin{equation}\label{formula:guess} \overline{L^Q}(Q) = \overline A \lambda^+(Q) + \overline a \lambda^-(Q) \end{equation} where the coefficients are obtained by numerical homogenization of the linearized operator \eqref{eq:LinearHP} when $Q$ had at least one positive eigenvalue. (In the negative definite case the operator is linear and the error was insignificant). We found that error was within 5\% for a range of values of $A$ and $a$ with coefficients which vary by a factor of 10. In Figure \ref{fig:nonseparable} we show the error on a periodic checkerboard, with \[ F^{A,a}(Q,y) = \begin{cases} \tr(Q),\, y\in B\\ F^{4,1},\,y\in W. \end{cases} \] The error is on the order of \num{1e-1} near the line $\lambda^+=\lambda^-$ in the first quadrant; on the order of \num{1e-2} in the second and fourth quadrants; and negligible otherwise. In Figure~\ref{fig:nonseparable} we plot the error against the numerically homogenized value for an the nonconvex operator alternating between $F^{2,1}$ and $F^{1,\frac{4}{3}}$ on a periodic checkerboard. \begin{figure} \caption{Error for the non-separable operator on a periodic checkerboard. Left: alternating between $F^{1,1} \label{fig:nonseparable} \end{figure} \subsubsection{Further experiments} We let $A$ and $a$ each take two positive values in periodic checkerboard pattern. In the second, we let $A$ and $a$ each take two positive values in a random checkerboard, drawn randomly from a Bernoulli trial with probability $p$. We checked both when $p=\frac{1}{2}$ and other values of $p$. When $p=\frac{1}{2}$ the homogenized operator on the random checkerboard is identical to the homogenization on the periodic checkerboard. Finally, we also checked the case when $A$ and $a$ are each drawn from a uniform distribution with positive support. In all of these cases, the numerically homogenized operator is (numerically) isotropic, homogeneous order one, and agrees closely with $\overline F$ in the approximate formula \eqref{formula:guess}. \section{Conclusions} We studied the error between the homogenization of the linearized operator and the fully nonlinear homogenization. We obtained upper and lower bounds on the error in terms of the generalized semiconvavity constants of the operator. We also performed numerical calculations. For the class of operators we studied, linearization was very accurate for a wide range of values of $Q$, with negligible error in some cases. The numerically computed errors were small, and concentrated around regions of high curvature in $Q$ of the operator $F(Q,x)$. Errors grew with the degree of nonlinearity and with the range of the coefficients. The numerical results are consistent with the bounds, although in some cases the error was smaller than was predicted by the bounds. \end{document}
\begin{document} \parskip 4pt \baselineskip 16pt \title[Bernoulli numbers and sums of powers of integers of higher order] {Bernoulli numbers and sum of powers of integers of higher order} \author[Andrei K. Svinin]{Andrei K. Svinin} \address{Andrei K. Svinin, Matrosov Institute for System Dynamics and Control Theory of Siberian Branch of Russian Academy of Sciences, P.O. Box 292, 664033 Irkutsk, Russia} \operatorname{\mathrm{e}}mail{[email protected]} \author[Svetlana V. Svinina]{Svetlana V. Svinina} \address{Svetlana V. Svinina, Matrosov Institute for System Dynamics and Control Theory of Siberian Branch of Russian Academy of Sciences, P.O. Box 292, 664033 Irkutsk, Russia} \operatorname{\mathrm{e}}mail{[email protected]} \date{\today} \begin{abstract} We give an expression of polynomials for higher sums of powers of integers via the higher order Bernoulli numbers. \operatorname{\mathrm{e}}nd{abstract} \maketitle \section{Introduction} As is known the sum of powers of integers \cite{Graham}, \cite{Knut} \[ S_m(n):=\sum_{q=1}^{n}q^{m} \] can be computed with the help of some appropriate polynomial $\hat{S}_m(n)$ for any $m\geq 0$. Exponential generating function for the sums $S_m(n)$ is given by \begin{equation} S(n, t)=\sum_{q=1}^{n}e^{qt}=\frac{e^{(n+1)t}-e^t}{e^t-1}. \label{genf} \operatorname{\mathrm{e}}nd{equation} Expanding in series (\ref{genf}) yields an infinite set of polynomials $\{\hat{S}_m(n) : m\geq 0\}$, that is, \[ S(n, t)=\sum_{q\geq 0}\hat{S}_{q}(n)\frac{t^q}{q!}. \] It is a classical result that these polynomials can be expressed as \cite{Jacobi} \begin{equation} \hat{S}_{m}(n)=\frac{1}{m+1}\sum_{q=0}^m(-1)^q{m+1\choose q}B_{q}n^{m+1-q}, \label{2} \operatorname{\mathrm{e}}nd{equation} where $B_q$ are the Bernoulli numbers that can be derived from the exponential generating function \begin{equation} \frac{t}{e^t-1}=\sum_{q\geq 0}B_q\frac{t^q}{q!}. \label{Bernoulli} \operatorname{\mathrm{e}}nd{equation} It follows from (\ref{Bernoulli}) that the Bernoulli numbers satisfy the recurrence relation \begin{equation} \sum_{q=0}^{m}{m+1\choose q}B_{q}=\delta_{0, m}. \label{rec-rel} \operatorname{\mathrm{e}}nd{equation} This relation is in fact the simplest one of many known recurrence relations involving the Bernoulli numbers (see, for example, \cite{Agoh} and references therein). One can derive, for example, an infinite number of recurrence relations of the form \begin{equation} \sum_{q=0}^{m}{m+k\choose q}S(m+k-q, k)B_{q}=\frac{m+k}{k}S(m+k-1, k-1),\;\; \forall k\geq 1. \label{rec-rel1} \operatorname{\mathrm{e}}nd{equation} In this paper we investigate a class of sums that correspond to a $k$-th power of generating function (\ref{genf}) for $k\geq 1$. Our main result is a formula for polynomials allowing to calculate these sums. It turns out that these polynomials are expressed via the higher Bernoulli numbers. \section{The power sums of higher order} Let us now consider a power of the generating function (\ref{genf}): \[ \left(S(n, t)\right)^k:=\sum_{q\geq 0}S_{q}^{(k)}(n)\frac{t^q}{q!}. \] We have \begin{equation} \left(S(n, t)\right)^k=\left(\sum_{q=1}^{n}e^{qt}\right)^k=\sum_{q=k}^{kn}{k\choose q}_ne^{qt}. \label{1} \operatorname{\mathrm{e}}nd{equation} The coefficients ${k\choose q}_n$ obviously generalizing the binomial coefficients originated from Abraham De Moivre and Leonard Euler works \cite{Moivre}, \cite{Euler} and extensively studied in the literature due to their applicability. From (\ref{1}), we see that it is a generating function for the sums of the form \begin{equation} S_{m}^{(k)}(n):=\sum_{q=0}^{k(n-1)}{k\choose q}_{n}\left(k+q\right)^m. \label{sums} \operatorname{\mathrm{e}}nd{equation} It is natural to call (\ref{sums}) the sums of powers of integers of higher order. Expanding \[ \left(\frac{e^{(n+1)t}-e^t}{e^t-1}\right)^k=\sum_{q\geq 0}\hat{S}_{q}^{(k)}(n)\frac{t^q}{q!}, \] we get an infinite number of polynomials $\hat{S}_{m}^{(k)}(n)$. Our goal in the paper is to prove that \begin{equation} \hat{S}_{m}^{(k)}(n)=\frac{1}{{m+k\choose k}}\sum_{q=0}^{m}(-1)^q{m+k\choose q}B_q^{(k)}S(m+k-q, k)n^{m+k-q}, \label{higher-polynomials} \operatorname{\mathrm{e}}nd{equation} where $B_q^{(k)}$ are the higher order Bernoulli numbers defined as \begin{equation} \frac{t^k}{(e^t-1)^k}=\sum_{q\geq 0}B^{(k)}_q\frac{t^q}{q!}. \label{Bern-high} \operatorname{\mathrm{e}}nd{equation} The Bernoulli numbers of higher order appeared in \cite{Norlund} in connection with a theory of finite differences and then was investigated by many authors from different points of view (see, for example, \cite{Carlitz}). These numbers are known to satisfy \cite{Norlund} \[ B_n^{(k+1)}=\frac{k-n}{k}B_n^{(k)}-nB_{n-1}^{(k)}. \] The number $B^{(k)}_n$ with fixed $n\geq 0$ turns out to be some polynomial in $k$. These kind of polynomials are known as N\"orlund polynomials. One can find a number of these polynomials in \cite{Norlund}. For convenience, we have written out several N\"orlund polynomials in the Appendix. The numbers $S(n, k)$ in (\ref{higher-polynomials}) are the Stirling numbers of the second kind that satisfy recurrence relation \begin{equation} S(n, k)=S(n-1, k-1)+kS(n-1, k) \label{rr} \operatorname{\mathrm{e}}nd{equation} with appropriate boundary conditions \cite{Weisstein2}, \cite{Graham}. It is easy to prove that the higher order Bernoulli numbers satisfy the recurrence relation \begin{equation} \sum_{q=0}^{m}{m+k\choose q}S(m+k-q, k)B_{q}^{(k)}=\delta_{0, m}. \label{impl} \operatorname{\mathrm{e}}nd{equation} The most general relation involving (\ref{rec-rel}), (\ref{rec-rel1}) and (\ref{impl}) as particular cases is \begin{equation} \sum_{q=0}^{m}{m+k\choose q}S(m+k-q, k)B_{q}^{(r)}=\frac{{m+k\choose k}}{{m+k-r\choose k-r}}S(m+k-r, k-r),\;\; \forall k\geq r. \label{impl1} \operatorname{\mathrm{e}}nd{equation} As is known $S(m+k, k)$, for any fixed $m\geq 0$, is expressed as a polynomial $f_m(k)$ of degree $2m$, which satisfy the identity \[ f_m(k)-f_m(k-1)=kf_{m-1}(k) \] following from the identity (\ref{rr}). Therefore we can replace $S(m+k-q, k)$ by $f_{m-q}(k)$ in (\ref{higher-polynomials}). In the literature the polynomials $f_m(k)$ are known as the Stirling polynomials \cite{Gessel}, \cite{Jordan}. These are known to be expressed via the N\"orlund polynomials as (see, for example, \cite{Adelberg}) \[ f_m(k)={m+k\choose m}B_m^{(-k)}. \] The following proposition also gives the relationship of the higher Bernoulli numbers with the Stirling numbers. \begin{proposition} One has \begin{equation} B_m^{(k)}=\sum_{q=1}^{m} \frac{s(q+k, k)}{{q+k\choose k}}S(m, q). \label{8} \operatorname{\mathrm{e}}nd{equation} \operatorname{\mathrm{e}}nd{proposition} In (\ref{8}), $s(n, k)$ stands for the Stirling numbers of the first kind \cite{Weisstein1}. It is evident that in the case $k=1$, (\ref{8}) becomes \[ B_m=\sum_{q=1}^{m}(-1)^{q} \frac{q!}{q+1}S(m, q), \] while in the case $k=2$, it takes the following form: \[ B_m^{(2)}=2\sum_{q=1}^{m}(-1)^{q} \frac{(q+1)!H_{q+1}}{(q+1)(q+2)}S(m, q), \] where $H_m$ are harmonic number defined by $H_m:=\sum_{q=1}^{m}1/q$. To prove (\ref{higher-polynomials}), we need the following lemma: \begin{lemma} \label{le2} By virtue of (\ref{impl1}) we have \begin{eqnarray} R_m^{(k, r)}(n)&:=&\sum_{q=0}^{m}(-1)^{q}{m+k\choose q}S(m+k-q, k)\hat{S}_q^{(r)}(n)\label{le1}\\ &=&\frac{1}{{k\choose r}}\sum_{j=0}^{m}(-1)^j{m+k\choose m+k-r-j}S(m+k-r-j, k-r)\nonumber\\ &&\times S(r+j, r)n^{r+j},\;\; \forall m\geq 0,\;\;k\geq r. \label{lee} \operatorname{\mathrm{e}}nd{eqnarray} \operatorname{\mathrm{e}}nd{lemma} It should be remarked that in the case $k=r$, (\ref{lee}) becomes \begin{equation} R_m^{(k, k)}(n)=(-1)^mS(m+k, k)n^{m+k}. \label{rec-rel2} \operatorname{\mathrm{e}}nd{equation} \noindent \textbf{Proof of lemma \ref{le2}}. We can rewrite (\ref{le1}) as \[ R_{m}^{(k, r)}(n)=\sum_{0\leq j\leq q \leq m}a_qb_{q, j}n^{r+q-j}, \] where \[ a_{q}:=\frac{{m+k\choose q}}{{r+q\choose r}}S(m+k-q, k) \] and \[ b_{q, j}:=(-1)^{q-j}{r+q\choose j}B_j^{(r)}S(r+q-j, r). \] Let $\tilde{j}=q-j$ and \[ b_{q, \tilde{j}}=(-1)^{\tilde{j}}{r+q\choose q- \tilde{j}}B_{q- \tilde{j}}^{(r)}S(r+\tilde{j}, r). \] In what follows, for simplicity, let us write $\tilde{j}$ without the tilde. Making use the identity \[ {r+q\choose q- j}={r+q\choose r+j}={q\choose j}\frac{{r+q\choose r}}{{r+j\choose r}}, \] we get \[ a_{q}b_{q, j}=(-1)^{j}\frac{S(r+j, r)}{{r+j\choose r}}{m+k\choose q}{q\choose j}S(m+k-q, k)B_{q- j}^{(r)} \] and therefore \begin{eqnarray} R_{m}^{(k, r)}(n)&=&\sum_{0\leq j\leq q \leq m}a_qb_{q, j}n^{r+j}\nonumber\\ &=&\sum_{0\leq j\leq m}(-1)^{j}\frac{S(r+j, r)}{{r+j\choose r}}n^{r+j}\sum_{j\leq q\leq m}{m+k\choose q}{q\choose j}S(m+k-q, k)B_{q- j}^{(r)}.\nonumber \operatorname{\mathrm{e}}nd{eqnarray} In turn, making use the identity \[ {m+k\choose q}{q\choose j}={m+k\choose j}{m+k-j\choose q-j}, \] we get \begin{eqnarray} R_{m}^{(k, r)}(n)&=&\sum_{0\leq j\leq m}(-1)^{j}\frac{S(r+j, r)}{{r+j\choose k}}{m+k\choose j}n^{r+j}\nonumber\\ &&\times\sum_{j\leq q\leq m}{m+k-j\choose q-j}S(m+k-q, k)B_{q- j}^{(r)}. \nonumber \operatorname{\mathrm{e}}nd{eqnarray} Finally, by virtue of (\ref{impl1}), we get \begin{eqnarray} &&\sum_{j\leq q\leq m}{m+k-j\choose q-j}S(m+k-q, k)B_{q- j}^{(r)}\nonumber\\ &&\;\;\;\;\;\;\;\;\;\;\; =\sum_{0\leq q\leq m-j}{m+k-j\choose q}S(m+k-j-q, k)B_{q}^{(r)}\nonumber\\ &&\;\;\;\;\;\;\;\;\;\;\; =\frac{{m+k-j\choose k}}{{m+k-j-r\choose k-r}}S(m+k-j-r, k-r)\nonumber \operatorname{\mathrm{e}}nd{eqnarray} and hence \begin{eqnarray} R_{m}^{(k, r)}(n)&=&\sum_{0\leq j\leq m}(-1)^{j}\frac{{m+k\choose j}}{{r+j\choose r}}\frac{{m+k-j\choose k}}{{m+k-j-r\choose k-r}}S(m+k-j-r, k-r)\nonumber\\ &&\times S(r+j, r)n^{r+j}\nonumber\\ &=&\frac{1}{{k\choose r}}\sum_{0\leq j\leq m}(-1)^{j}{m+k\choose m+k-j-r}S(m+k-j-r, k-r).\nonumber\\ &&\times S(r+j, r)n^{r+j}\nonumber \operatorname{\mathrm{e}}nd{eqnarray} Therefore the lemma is proved. $\Box$ The recurrence relation, for example (\ref{rec-rel2}), uniquely determines an infinite set of polynomials $\{\hat{S}_m^{(k)}(n) : m\geq 0\}$. We have written out some of them in the Appendix. For example, $\hat{S}_{0}^{(k)}(n)=n^k$. On the other hand \[ S_{0}^{(k)}(n):=\sum_{q=0}^{k(n-1)}{k\choose q}_{n}=\Biggl(\sum_{q=0}^{n-1}t^q\Biggr)^{k}\Bigl|_{t=1}=n^k. \] \begin{lemma} \label{le3} The higher sums $S_m^{(k)}(n)$ satisfy the same recurrence relations as in lemma \ref{le2}, that is, \begin{eqnarray} &&\sum_{q=0}^{m}(-1)^{q}{m+k\choose q}S(m+k-q, k)S_q^{(r)}(n) \nonumber \\ &&\;\;\;=\frac{1}{{k\choose r}}\sum_{j=0}^{m}(-1)^j{m+k\choose m+k-r-j}S(m+k-r-j, k-r)S(r+j, r)n^{r+j}. \label{id} \operatorname{\mathrm{e}}nd{eqnarray} \operatorname{\mathrm{e}}nd{lemma} In the case $k=r=1$, (\ref{id}) becomes the well-known identity for the sums of powers \cite{Riordan}. \noindent \textbf{Proof of lemma \ref{le3}}. This lemma is proved by using standard arguments. Let us replace an argument of generating function $t\rightarrow -t$ to get \begin{equation} \sum_{q\geq 0}(-1)^qS^{(r)}_{q}(n)\frac{t^q}{q!}=(-1)^k\left(\frac{e^{-nt}-1}{e^{t}-1}\right)^{r}. \label{id1} \operatorname{\mathrm{e}}nd{equation} Multiplying both sides of (\ref{id1}) by $(e^t-1)^k$ and taking into account that \[ (e^t-1)^k=k!\left(\sum_{q\geq 0} S(q, k)\frac{t^q}{q!}\right), \] we get (\ref{id}). $\Box$ Now, we are in a position to prove our theorem. \begin{theorem} One has \[ S_{m}^{(k)}(n)=\hat{S}_{m}^{(k)}(n), \] where $\hat{S}_{m}^{(k)}(n)$ be the polynomials (\ref{higher-polynomials}). \operatorname{\mathrm{e}}nd{theorem} \noindent \textbf{Proof}. This theorem is a simple consequence of lemma \ref{le2} and lemma \ref{le3} since the sums ${S}_m^{(k)}(n)$ satisfy the same recurrence relations as the polynomials $\hat{S}_m^{(k)}(n)$. $\Box$ \section{The relationship of the sums $S_{m}^{(k)}(n)$ to other sums} In \cite{Svinin} we considered sums of the form \begin{equation} \mathcal{S}_{m}^{(k)}(n):=\sum_{\{\lambda\}\in B_{j, jn}}\left(\lambda_1^{m}+(\lambda_2-n)^{m}+\cdots+(\lambda_k-kn+n)^{m}\right), \label{sums11} \operatorname{\mathrm{e}}nd{equation} where it is supposed that $m$ is odd. Here $B_{k, kn}:=\{\lambda_q : 1\leq \lambda_1\leq \cdots \leq \lambda_k\leq kn\}$. Let us remark that there are some terms of the form $r^m$ with negative $r$ in (\ref{sums11}). It is evident that in this case $r^m=-|r|^m$. By this rule, the sum (\ref{sums11}) can be rewritten as \begin{equation} \mathcal{S}_{m}^{(k)}(n)=\sum_{q=0}^{kn}c_q(k, n)q^m \label{defin} \operatorname{\mathrm{e}}nd{equation} with some integer coefficients $c_r(k, n)$. It was conjectured in \cite{Svinin} that in the case of odd $m$ the sums $\mathcal{S}_{m}^{(k)}(n)$ and $S_{m}^{(k)}(n)$ are related with each other by \begin{equation} \mathcal{S}_{m}^{(k)}(n)=\sum_{q=0}^{k-1}{k(n+1)\choose q} S_{m}^{(k-q)}(n). \label{relationsh} \operatorname{\mathrm{e}}nd{equation} Let us define the sums $\mathcal{S}_{m}^{(k)}(n)$ with even $m$ by (\ref{defin}). It is evident that conjectural relation (\ref{relationsh}) is valid for both odd and even $m$. More exactly, actual calculations show that \[ \mathcal{S}_{m}^{(k)}(n)-\sum_{q=0}^{k-1}{k(n+1)\choose q} S_{m}^{(k-q)}(n)=c_0(k, n)\delta_{m, 0}. \] \section*{Appendix} \subsection*{N\"orlund polynomials} The first six of the N\"orlund polynomials are given by \[ B_0^{(k)}=1,\;\; B_1^{(k)}=-\frac{1}{2}k,\;\; B_2^{(k)}=\frac{1}{12}k\left(3k-1\right),\;\; B_3^{(k)}=-\frac{1}{8}k^2\left(k-1\right), \] \[ B_4^{(k)}=\frac{1}{240}\left(15k^3-30k^2+5k+2\right),\;\; B_5^{(k)}=-\frac{1}{96}k^2\left(k-1\right)\left(3k^2-7k-2\right). \] \subsection*{The polynomials $\hat{S}_m^{(k)}(n)$} The first six of these polynomials are given by \[ \hat{S}_0^{(k)}(n)=n^k,\;\; \hat{S}_1^{(k)}(n)=\frac{k}{2}n^k\left(n+1\right),\;\; \] \[ \hat{S}_2^{(k)}(n)=\frac{k}{12}n^k(n+1)\left((3k+1)n+3k-1\right), \] \[ \hat{S}_3^{(k)}(n)= \frac{k^2}{8}n^k(n+1)^2\left((k+1)n+k-1\right), \] \begin{eqnarray} \hat{S}_{4}^{(k)}(n)&=&\frac{k}{240}n^k(n+1)\left((15k^3+30k^2+5k-2)n^3+(45k^3+30k^2-5k+2)n^2\right. \nonumber\\ &&\left.+(45k^3-30k^2-5k-2)n+15k^3-30k^2+5k+2\right),\nonumber \operatorname{\mathrm{e}}nd{eqnarray} \begin{eqnarray} \hat{S}_{5}^{(k)}(n)&=&\frac{k^2}{96}n^k(n+1)^2\left((3k^3+10k^2+5k-2)n^3+(9k^3+10k^2-5k+2)n^2\right.\nonumber\\ &&\left.+(9k^3-10k^2-5k-2)n+3k^3-10k^2+5k+2\right).\nonumber \operatorname{\mathrm{e}}nd{eqnarray} \begin{thebibliography}{99} \bibitem{Adelberg} A. Adelberg, A finite difference approach to degenerate Bernoulli and Stirling polynomials, {\operatorname{\mathrm{e}}m Disc. Math.} \textbf{140} (1995), 1--21. \bibitem{Agoh} T. Agoh, K. Dilcher, Shortened recurrence relations for Bernoulli numbers, {\operatorname{\mathrm{e}}m Disc. Math.}, \textbf{309} (2009), 887--898. \bibitem{Carlitz} L. Carlitz, Some theorems on Bernoulli numbers of higher order, {\operatorname{\mathrm{e}}m Pacific J. Math.}, \textbf{2} (1952), 127--139. \bibitem{Graham} K. Graham, D.~E. Knuth and O. Patashnik, Concrete mathematics, Addison-Wesley, 1989. \bibitem{Euler} L. Euler, De evolutione potestatis polynomialis cuiuscunque $\left(1+x+x^2+\cdots\right)^n$, {\operatorname{\mathrm{e}}m Nova Acta Academiae Scientarum Imperialis Petropolitinae}, \textbf{12} (1801), 47--57. \bibitem{Gessel} I. Gessel, R.~P. Stanley, Stirling polynomials, {\operatorname{\mathrm{e}}m J. Comb. Theor., Ser. A} \textbf{24} (1978), 24--33. \bibitem{Jacobi} C.~G. Jacobi, De usu legitimoformulae summatoriae Maclaurinaianae, {\operatorname{\mathrm{e}}m J. Reine Angew. Math.}, \textbf{12} (1834), 263--272. \bibitem{Jordan} K. Jord\'an, Calculus of finite differences, American Mathematical Soc., 1965. \bibitem{Knut} D.~E. Knuth, Johann Faulhaber and the Sums of Powers, Mathematics of Computation, American Mathematical Society, \textbf{61} (1993), 277--294. \bibitem{Moivre} A. De Moivre, The doctrine of chances: or, A method of calculating the probabilities of events in play, Chelsea Publishing Company, 1756. \bibitem{Norlund} N.~E. N\"orlund, Vorlesungen \"uber Differenzenrechnung, Berlin, 1924. \bibitem{Riordan} J. Riordan, Combinatorial identities, New York: Wiley, 1968. \bibitem{Svinin} A.~K. Svinin, Conjectures involving a generalization of the sums of powers of integers, {\operatorname{\mathrm{e}}m Exp. Math.}, accepted for publication. \bibitem{Weisstein1} E.~W. Weisstein, Stirling Number of the First Kind, From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/StirlingNumberoftheFirstKind.html \bibitem{Weisstein2} E.~W. Weisstein, Stirling Number of the Second Kind, From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/StirlingNumberoftheSecondKind.html \operatorname{\mathrm{e}}nd{thebibliography} \operatorname{\mathrm{e}}nd{document}
{\beta}egin{document} \newtheorem{defn}{Definition}[section] \newtheorem{thm}{Theorem}[section] \newtheorem{prop}{Proposition}[section] \newtheorem{exam}{Example}[section] \newtheorem{cor}{Corollary}[section] \newtheorem{rem}{Remark}[section] \newtheorem{lem}{Lemma}[section] \newcommand{\mathbb{C}}{\mathbb{C}} \newcommand{\mathbb{K}}{\mathbb{K}} \newcommand{\mathbb{Z}}{\mathbb{Z}} {\delta}ef{\alpha}{{{\alpha}lpha}} {\delta}ef{\beta}{{{\beta}eta}} {\delta}ef{\delta}{{{\delta}elta}} {\delta}ef{\gamma}{{{\gamma}amma}} {\delta}ef{\lambda}{{{\lambda}ambda}} {\delta}ef{\gamma}g{{\mathfrak g}} {\delta}ef\mathcal {\mathcal } \title{The classification of Leibniz superalgebras of nilindex $n+m$ ($m\neq0.$)} {\alpha}uthor{J. R. G\'{o}mez, A.Kh. Khudoyberdiyev and B.A. Omirov} {\alpha}ddress{[J.R. G\'{o}mez] Dpto. Matem\'{a}tica Aplicada I. Universidad de Sevilla. Avda. Reina Mercedes, s/n. 41012 Sevilla. (Spain)} \email{[email protected]} {\alpha}ddress{[A.Kh. Khudoyberdiyev -- B.A. Omirov] Institute of Mathematics and Information Technologies of Academy of Uzbekistan, 29, F.Hodjaev srt., 100125, Tashkent (Uzbekistan)} \email{[email protected] --- [email protected]} \thanks{The first author was supported by the PAI, FQM143 of the Junta de Andaluc\'{\i}a (Spain) and the last author was supported by grant NATO-Reintegration ref. CBP.EAP.RIG.983169} \maketitle {\beta}egin{abstract} In this paper we investigate the description of the complex Leibniz superalgebras with nilindex $n+m$, where $n$ and $m$ ($m\neq 0$) are dimensions of even and odd parts, respectively. In fact, such superalgebras with characteristic sequence equal to $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s)$ (where $n_1+{\delta}ots +n_k=n, \ m_1+ {\delta}ots + m_s=m$) for $n_1{\gamma}eq n-1$ and $(n_1, {\delta}ots, n_k | m)$ were classified in works \cite{FilSup}--\cite{C-G-O-Kh1}. Here we prove that in the case of $(n_1, {\delta}ots, n_k| m_1, {\delta}ots, m_s)$, where $n_1{\lambda}eq n-2$ and $m_1 {\lambda}eq m-1$ the Leibniz superalgebras have nilindex less than $n+m.$ Thus, we complete the classification of Leibniz superalgebras with nilindex $n+m.$ \end{abstract} \textbf{Mathematics Subject Classification 2000}: 17A32, 17B30, 17B70, 17A70. \textbf{Key Words and Phrases}: Lie superalgebras, Leibniz superalgebras, nilindex, characteristic sequence, natural gradation. \section{Introduction} During many years the theory of Lie superalgebras has been actively studied by many mathematicians and physicists. A systematic exposition of basic of Lie superalgebras theory can be found in \cite{Kac}. Many works have been devoted to the study of this topic, but unfortunately most of them do not deal with nilpotent Lie superalgebras. In works \cite{2007Yu}, \cite{GL}, \cite{G-K-N} the problem of the description of some classes of nilpotent Lie superalgebras have been studied. It is well known that Lie superalgebras are a generalization of Lie algebras. In the same way, the notion of Leibniz algebras, which were introduced in \cite{Lod}, can be generalized to Leibniz superalgebras \cite{Alb}, \cite{Liv}. Some elementary properties of Leibniz superalgebras were obtained in \cite{Alb}. In the work \cite{G-K-N} the Lie superalgebras with maximal nilindex were classified. Such superalgebras are two-generated and its nilindex equal to $n+m$ (where $n$ and $m$ are dimensions of even and odd parts, respectively). In fact, there exists unique Lie superalgebra of maximal nilindex. This superalgebra is filiform Lie superalgebra (the characteristic sequence equal to $(n-1,1 | m)$) and we mention about paper \cite{2007Yu}, where some crucial properties of filiform Lie superalgebras are given. For nilpotent Leibniz superalgebras the description of the case of maximal nilindex (nilpotent Leibniz superalgebras distinguished by the feature of being single-generated) is not difficult and was done in \cite{Alb}. However, the description of Leibniz superalgebras of nilindex $n+m$ is a very problematic one and it needs to solve many technical tasks. Therefore, they can be studied by applying restrictions on their characteristic sequences. In the present paper we consider Leibniz superalgebras with characteristic sequence $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s)$ ($n_1{\lambda}eq n-2$ and $m_1{\lambda}eq m-1$) and nilindex $n+m.$ Recall, that such superalgebras for $n_1{\gamma}eq n-1$ or $m_1=m$ have been already classified in works \cite{FilSup}--\cite{C-G-O-Kh1}. Namely, we prove that a Leibniz superalgebra with characteristic sequence equal to $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s)$ ($n_1{\lambda}eq n-2$ and $m_1{\lambda}eq m-1$) has nilindex less than $n+m.$ Therefore, we complete classification of Leibniz superalgebras with nilindex $n+m.$ It should be noted that in our study the natural gradation of even part of Leibniz superalgebra played one of the crucial roles. In fact, we used some properties of naturally graded Lie and Leibniz algebras for obtaining the convenience basis of even part of the superalgebra (so-called adapted basis). Throughout this work we shall consider spaces and (super)algebras over the field of complex numbers. By asterisks $(*)$ we denote the appropriate coefficients at the basic elements of superalgebra. \section{Preliminaries} Recall the notion of Leibniz superalgebras. {\beta}egin{defn} A $\mathbb{Z}_2$-graded vector space $L=L_0\oplus L_1$ is called a Leibniz superalgebra if it is equipped with a product $[-, -]$ which satisfies the following conditions: 1. $[L_{\alpha}lpha,L_{\beta}eta]\subseteq L_{{\alpha}lpha+{\beta}eta(mod\ 2)},$ 2. $[x, [y, z]]=[[x, y], z] - (-1)^{{\alpha}lpha{\beta}eta} [[x, z], y]-$ Leibniz superidentity,\\ for all $x\in L,$ $y \in L_{\alpha}lpha,$ $z \in L_{\beta}eta$ and ${\alpha}lpha,{\beta}eta\in \mathbb{Z}_2.$ \end{defn} The vector spaces $L_0$ and $L_1$ are said to be even and odd parts of the superalgebra $L$, respectively. Evidently, even part of the Leibniz superalgebra is a Leibniz algebra. Note that if in Leibniz superalgebra $L$ the identity $$[x,y]=-(-1)^{{\alpha}lpha{\beta}eta} [y,x]$$ holds for any $x \in L_{{\alpha}lpha}$ and $y \in L_{{\beta}eta},$ then the Leibniz superidentity can be transformed into the Jacobi superidentity. Thus, Leibniz superalgebras are a generalization of Lie superalgebras and Leibniz algebras. The set of all Leibniz superalgebras with the dimensions of the even and odd parts, respectively equal to $n$ and $m$, we denote by $Leib_{n,m}.$ For a given Leibniz superalgebra $L$ we define the descending central sequence as follows: $$ L^1=L,\quad L^{k+1}=[L^k,L], \quad k {\gamma}eq 1. $$ {\beta}egin{defn} A Leibniz superalgebra $L$ is called nilpotent, if there exists $s\in\mathbb N$ such that $L^s=0.$ The minimal number $s$ with this property is called nilindex of the superalgebra $L.$ \end{defn} {\beta}egin{defn} The set $$\mathcal{R}(L)={\lambda}eft\{ z\in L\ |\ [L, z]=0\right\}$$ is called the right annihilator of a superalgebra $L.$ \end{defn} Using the Leibniz superidentity it is easy to see that $\mathcal{R}(L)$ is an ideal of the superalgebra $L$. Moreover, the elements of the form $[a,b]+(-1)^{{\alpha}lpha {\beta}eta}[b,a],$ ($a \in L_{{\alpha}lpha}, \ b \in L_{{\beta}eta}$) belong to $\mathcal{R}(L)$. The following theorem describes nilpotent Leibniz superalgebras with maximal nilindex. {\beta}egin{thm} {\lambda}abel{t1} \cite{Alb} Let $L$ be a Leibniz superalgebra of $Leib_{n,m }$ with nilindex equal to $n+m+1.$ Then $L$ is isomorphic to one of the following non-isomorphic superalgebras: $$ [e_i,e_1]=e_{i+1},\ 1{\lambda}e i{\lambda}e n-1, \ m=0;\quad {\lambda}eft\{ {\beta}egin{array}{ll} [e_i,e_1]=e_{i+1},& 1{\lambda}e i{\lambda}e n+m-1, \\ {[}e_i,e_2{]}=2e_{i+2}, & 1{\lambda}e i{\lambda}e n+m-2,\\ \end{array}\right. $$ (omitted products are equal to zero). \end{thm} {\beta}egin{rem} {\em From the assertion of Theorem \ref{t1} we have that in case of non-trivial odd part $L_1$ of the superalgebra $L$ there are two possibility for $n$ and $m$, namely, $m=n$ if $n+m$ is even and $m=n+1$ if $n+m$ is odd. Moreover, it is clear that the Leibniz superalgebra has the maximal nilindex if and only if it is single-generated.} \end{rem} Let $L=L_0\oplus L_1$ be a nilpotent Leibniz superalgebra. For an arbitrary element $x\in L_0,$ the operator of right multiplication $R_x:L \rightarrow L$ (defined as $R_x(y)=[y,x]$) is a nilpotent endomorphism of the space $L_i,$ where $i\in \{0, 1\}.$ Taking into account the property of complex endomorphisms we can consider the Jordan form for $R_x.$ For operator $R_x$ denote by $C_i(x)$ ($i\in \{0, 1\}$) the descending sequence of its Jordan blocks dimensions. Consider the lexicographical order on the set $C_i(L_0)$. {\beta}egin{defn} {\lambda}abel{d4}A sequence $$C(L)={\lambda}eft( {\lambda}eft.\max{\lambda}imits_{x\in L_0\setminus L_0^2} C_0(x)\ \right|\ \max{\lambda}imits_{\widetilde x\in L_0\setminus L_0^2} C_1{\lambda}eft(\widetilde x\right) \right) $$ is said to be the characteristic sequence of the Leibniz superalgebra $L.$ \end{defn} Similarly to \cite{GL} (corollary 3.0.1) it can be proved that the characteristic sequence is invariant under isomorphism. Since Leibniz superalgebras from $Leib_{n,m}$ with nilindex $n+m$ and with characteristic sequences equal to $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s)$ either $n_1{\gamma}eq n-1$ or $m_1=m$ were already classified, we shall reduce our investigation to the case of the characteristic sequence $(n_1, {\delta}ots, n_k| m_1, {\delta}ots m_s),$ where $n_1{\lambda}eq n-2$ and $m_1 {\lambda}eq m-1$ From the Definition \ref{d4} we have that a Leibniz algebra $L_0$ has characteristic sequence $(n_1, {\delta}ots, n_k).$ Let $l \in \mathbb{N}$ be a nilindex of the Leibniz algebra $L_0.$ Since $n_1 {\lambda}eq n-2,$ then we have $l {\lambda}eq n-1$ and Leibniz algebra $L_0$ has at least two generators (the elements which belong to the set $L_0\setminus L_0^2$). For the completeness of the statement below we present the classifications of the papers \cite{FilSup}--\cite{C-G-O-Kh} and \cite{G-K-N}. $Leib_{1,m}:$ $$\small {\lambda}eft\{{\beta}egin{array}{l} [y_i,x_1]=y_{i+1}, \ \ 1{\lambda}eq i {\lambda}eq m-1. \end{array}\right.$$ $Leib_{n,1}:$ $$ \small{\lambda}eft\{{\beta}egin{array}{ll} [x_i,x_1]=x_{i+1},& 1 {\lambda}eq i {\lambda}eq n-1,\\{} [y_1,y_1]={\alpha}lpha x_n, & {\alpha}lpha = \{0, \ 1\}.\end{array}\right.$$ $Leib_{2,2}:$ $$\small{\beta}egin{array}{ll} {\lambda}eft\{{\beta}egin{array}{l} [y_1,x_1]=y_2, \\ {[}x_1,y_1]={\delta}isplaystyle \frac12 y_2, \\[2mm] {[}x_2,y_1]={\delta}isplaystyle y_2, \\[2mm] [y_1,x_2] = 2y_2, \\ {[}y_1,y_1]=x_2, \\ \end{array}\right.& {\lambda}eft\{{\beta}egin{array}{l} [y_1,x_1]=y_2, \\ {[}x_2,y_1]={\delta}isplaystyle y_2, \\[2mm] {[}y_1,x_2]= 2y_2, \\ {[}y_1,y_1]=x_2. \\ \end{array}\right. \end{array}$$ $Leib_{2,m}, \ m \ \rm{is \ odd}:$ $$\small{\beta}egin{array}{ll} &\\[2mm] {\lambda}eft\{{\beta}egin{array}{ll} [x_1,x_1]=x_2, \ & m{\gamma}eq 3, \\{} [y_i,x_1]=y_{i+1},& 1{\lambda}eq i{\lambda}eq m-1,\\{} [x_1,y_i]=-y_{i+1},&1{\lambda}eq i{\lambda}eq m-1,\\{} [y_i,y_{m+1-i}]=(-1)^{j+1}x_2, & 1{\lambda}eq i{\lambda}eq \frac{m+1}2. \end{array}\right.& {\lambda}eft\{{\beta}egin{array}{ll} [y_i,x_1]= [x_1, y_i]= -y_{i+1},& 1{\lambda}eq i{\lambda}eq m-1,\\{} [y_{m+1-i},y_i]=(-1)^{j+1}x_2, & 1{\lambda}eq i{\lambda}eq \frac{m+1}2. \end{array}\right. \end{array}$$ In order to present the classification of Leibniz superalgebras with characteristic sequence $(n-1,1 | m)$, $n {\gamma}eq 3$ and nilindex $n+m$ we need to introduce the following families of superalgebras: $${\beta}f Leib_{n,n-1}:$$ $L({\alpha}lpha_4, {\alpha}lpha_5, {\lambda}dots, {\alpha}lpha_n, \theta):$ $$ {\lambda}eft\{{\beta}egin{array}{ll} [x_1,x_1]=x_3,& \\[1mm] {[}x_i,x_1]=x_{i+1},& 2 {\lambda}e i {\lambda}e n-1, \\[1mm] {[}y_j,x_1]=y_{j+1},& 1 {\lambda}e j {\lambda}e n-2, \\[1mm] {[}x_1,y_1]= \frac12 y_2,& \\[1mm] {[}x_i,y_1]= \frac12 y_i, & 2 {\lambda}e i {\lambda}e n-1, \\[1mm] {[}y_1,y_1]=x_1,& \\[1mm] {[}y_j,y_1]=x_{j+1},& 2 {\lambda}e j {\lambda}e n-1, \\[1mm] {[}x_1,x_2]={\alpha}lpha_4x_4+ {\alpha}lpha_5x_5+ {\lambda}dots + {\alpha}lpha_{n-1}x_{n-1}+ \theta x_n,& \\[1mm] {[}x_j,x_2]= {\alpha}lpha_4x_{j+2}+ {\alpha}lpha_5x_{j+3}+ {\lambda}dots + {\alpha}lpha_{n+2-j}x_n,& 2 {\lambda}e j {\lambda}e n-2, \\[1mm] {[}y_1,x_2]= {\alpha}lpha_4y_3+ {\alpha}lpha_5y_4+ {\lambda}dots + {\alpha}lpha_{n-1}y_{n-2}+\theta y_{n-1},& \\[1mm] {[}y_j,x_2]= {\alpha}lpha_4y_{j+2}+ {\alpha}lpha_5y_{j+3}+ {\lambda}dots + {\alpha}lpha_{n+1-j}y_{n-1},& 2 {\lambda}e j {\lambda}e n-3. \end{array} \right.$$ $G({\beta}eta_4,{\beta}eta_5, {\lambda}dots, {\beta}eta_n, {\gamma}amma):$ $$ {\lambda}eft\{{\beta}egin{array}{ll} [x_1,x_1]=x_3, \\[1mm] {[}x_i,x_1]=x_{i+1},& 3 {\lambda}e i {\lambda}e n-1, \\[1mm] {[}y_j,x_1]=y_{j+1}, & 1 {\lambda}e j {\lambda}e n-2, \\[1mm] {[}x_1,x_2]= {\beta}eta_4x_4+{\beta}eta_5x_5+{\lambda}dots+{\beta}eta_nx_n,& \\[1mm] {[}x_2,x_2]= {\gamma}amma x_n,& \\[1mm] {[}x_j,x_2]= {\beta}eta_4x_{j+2}+{\beta}eta_5x_{j+3}+{\lambda}dots+{\beta}eta_{n+2-j}x_n,& 3{\lambda}e j{\lambda}e n-2, \\[1mm] {[}y_1,y_1]=x_1,& \\[1mm] {[}y_j,y_1]=x_{j+1},& 2 {\lambda}e j {\lambda}e n-1, \\[1mm] {[}x_1,y_1]= \frac12 y_2,& \\[1mm] {[}x_i,y_1]= \frac12 y_i,& 3{\lambda}e i{\lambda}e n-1, \\[1mm] {[}y_j,x_2]= {\beta}eta_4y_{j+2}+{\beta}eta_5y_{j+3}+ {\lambda}dots + {\beta}eta_{n+1-j}y_{n-1},& 1{\lambda}e j{\lambda}e n-3. \end{array} \right.$$ $${\beta}f Leib_{n,n}:$$ $M({\alpha}lpha_4, {\alpha}lpha_5, {\lambda}dots, {\alpha}lpha_n, \theta, \tau):$ $$ {\lambda}eft\{ {\beta}egin{array}{ll} [x_1,x_1]=x_3,& \\[1mm] {[}x_i,x_1]=x_{i+1},& 2 {\lambda}e i {\lambda}e n-1, \\[1mm] {[}y_j,x_1]=y_{j+1}, & 1 {\lambda}e j {\lambda}e n-1, \\[1mm] {[}x_1,y_1]= \frac12 y_2,& \\[1mm] {[}x_i,y_1]= \frac12 y_i, & 2 {\lambda}e i {\lambda}e n, \\[1mm] {[}y_1,y_1]=x_1,& \\[1mm] {[}y_j,y_1]=x_{j+1},& 2 {\lambda}e j {\lambda}e n-1, \\[1mm] {[}x_1,x_2]={\alpha}lpha_4x_4+ {\alpha}lpha_5x_5+ {\lambda}dots + {\alpha}lpha_{n-1}x_{n-1}+ \theta x_n,& \\[1mm] {[}x_2,x_2]={\gamma}amma_4x_4,&\\[1mm] {[}x_j,x_2]= {\alpha}lpha_4x_{j+2}+ {\alpha}lpha_5x_{j+3}+ {\lambda}dots + {\alpha}lpha_{n+2-j}x_n,&3 {\lambda}e j {\lambda}e n-2, \\[1mm] {[}y_1,x_2]= {\alpha}lpha_4y_3+ {\alpha}lpha_5y_4+ {\lambda}dots + {\alpha}lpha_{n-1}y_{n-2}+\theta y_{n-1}+\tau y_n,& \\[1mm] {[}y_2,x_2]= {\alpha}lpha_4y_4+ {\alpha}lpha_5y_4+ {\lambda}dots + {\alpha}lpha_{n-1}y_{n-1}+\theta y_n,& \\[1mm] {[}y_j,x_2]= {\alpha}lpha_4y_{j+2}+ {\alpha}lpha_5y_{j+3}+ {\lambda}dots + {\alpha}lpha_{n+2-j}y_{n},& 3 {\lambda}e j {\lambda}e n-2.\end{array} \right.$$ $H({\beta}eta_4, {\beta}eta_5, {\lambda}dots,{\beta}eta_n, {\delta}elta , {\gamma}amma ):$ $$ {\lambda}eft\{ {\beta}egin{array}{ll} [x_1,x_1]=x_3,& \\[1mm] {[}x_i,x_1]=x_{i+1},& 3 {\lambda}e i {\lambda}e n-1, \\[1mm] {[}y_j,x_1]=y_{j+1}, & 1 {\lambda}e j {\lambda}e n-2, \\[1mm] {[}x_1,x_2]= {\beta}eta_4x_4+{\beta}eta_5x_5+{\lambda}dots+{\beta}eta_nx_n,& \\[1mm] {[}x_2,x_2]= {\gamma}amma x_n, &\\[1mm] {[}x_j,x_2]= {\beta}eta_4x_{j+2}+{\beta}eta_5x_{j+3}+{\lambda}dots+{\beta}eta_{n+2-j}x_n,& 3{\lambda}e j{\lambda}e n-2, \\[1mm] {[}y_1,y_1]=x_1,& \\[1mm] {[}y_j,y_1]=x_{j+1},& 2 {\lambda}e j {\lambda}e n-1, \\[1mm] {[}x_1,y_1]= \frac12 y_2,& \\[1mm] {[}x_i,y_1]= \frac12 y_i,& 3{\lambda}e i{\lambda}e n-1, \\[1mm] {[}y_1,x_2]= {\beta}eta_4y_3+{\beta}eta_5y_4+ {\lambda}dots + {\beta}eta_ny_{n-1}+{\delta}elta y_n,& \\[1mm] {[}y_j,x_2]= {\beta}eta_4y_{j+2}+{\beta}eta_5y_{j+3}+ {\lambda}dots + {\beta}eta_{n+2-j}y_n,& 2{\lambda}e j{\lambda}e n-2. \end{array} \right.$$ Analogously, for the Leibniz superalgebras with characteristic sequence $(n | m-1,1)$, $n {\gamma}eq 2$ we introduce the following families of superalgebras: $${\beta}f Leib_{n,n+1}:$$ $E{\lambda}eft( {\gamma}amma, {\beta}eta_{{\lambda}eft[ \frac{n+4}2\right]}, {\beta}eta_{{\lambda}eft[ \frac{n+4}2\right]+1}, {\lambda}dots, {\beta}eta_n,{\beta}eta\right):$ $$ {\lambda}eft\{ {\beta}egin{array}{ll} [x_i,x_1]=x_{i+1},& 1 {\lambda}e i {\lambda}e n-1, \\[1mm] [y_j,x_1]=y_{j+1}, & 1 {\lambda}e j {\lambda}e n-1, \\[1mm] [x_i,y_1]=\frac12 y_{i+1}, &1{\lambda}e i{\lambda}e n-1, \\[1mm] [y_j,y_1]=x_{j}, & 1{\lambda}e j{\lambda}e n, \\[1mm] [y_{n+1},y_{n+1}]={\gamma}amma x_n, & \\[1mm] [x_i,y_{n+1}]=\sum{\lambda}imits_{k={\lambda}eft[\frac{n+4}2\right]}^{n+1-i} {\beta}eta_k y_{k-1+i}, & 1{\lambda}e i{\lambda}e {\lambda}eft[ \frac{n-1}2\right], \\[1mm] [y_1,y_{n+1}]=-2\sum{\lambda}imits_{k={\lambda}eft[\frac{n+4}2\right]}^{n} {\beta}eta_k x_{k-1}+{\beta}eta x_n,& \\[1mm] [y_j,y_{n+1}]=-2\sum{\lambda}imits_{k={\lambda}eft[\frac{n+4}2\right]}^{n+2-j} {\beta}eta_k x_{k-2+j},& 2{\lambda}e j{\lambda}e {\lambda}eft[\frac{n+1}2\right]. \\[1mm] \end{array} \right.$$ $${\beta}f Leib_{n,n+2}:$$ $ F{\lambda}eft( {\beta}eta_{{\lambda}eft[\frac{n+5}2\right]}, {\beta}eta_{{\lambda}eft[\frac{n+5}2\right]+1}, {\lambda}dots,\right.$ ${\lambda}eft. {\beta}eta_{n+1}\right): $ $$ {\lambda}eft\{ {\beta}egin{array}{ll} [x_i,x_1]=x_{i+1},& 1 {\lambda}e i {\lambda}e n-1, \\[1mm] [y_j,x_1]=y_{j+1}, & 1 {\lambda}e j {\lambda}e n, \\[1mm] [x_i,y_1]=\frac12 y_{i+1}, &1{\lambda}e i{\lambda}e n, \\[1mm] [y_j,y_1]=x_{j}, & 1{\lambda}e j{\lambda}e n, \\[1mm] [x_i,y_{n+2}]=\sum{\lambda}imits_{k={\lambda}eft[\frac{n+5}2\right]}^{n+2-i} {\beta}eta_k y_{k-1+i}, & 1{\lambda}e i{\lambda}e {\lambda}eft[ \frac{n}2\right], \\[1mm] [y_j,y_{n+2}]=-2\sum{\lambda}imits_{k={\lambda}eft[\frac{n+5}2\right]}^{n+2-j} {\beta}eta_k x_{k-2+j}, & 1{\lambda}e j{\lambda}e {\lambda}eft[ \frac{n}2\right] \end{array} \right.$$ Let us introduce also the following operators which act on $k$-dimensional vectors: $$ {\beta}egin{array}{rl} j & \\ V^0_{j,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = ( 0, {\lambda}dots, 0, 1, & {\delta}elta \sqrt[j]{{\delta}elta ^{j+1}} S_{m,j}^{j+1} {\alpha}lpha_{j+1}, {\delta}elta \sqrt[j]{{\delta}elta ^{j+2}} S_{m,j}^{j+2} {\alpha}lpha_{j+2}, {\lambda}dots , {\delta}elta \sqrt[j]{{\delta}elta ^{k}} S_{m,j}^{k} {\alpha}lpha_{k} ) ; \\ \end{array} $$ $$ {\beta}egin{array}{rl} j & \\ V^1_{j,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = ( 0, {\lambda}dots, 0, 1, & S_{m,j}^{j+1} {\alpha}lpha_{j+1}, S_{m,j}^{j+2} {\alpha}lpha_{j+2}, {\lambda}dots , S_{m,j}^{k} {\alpha}lpha_{k} ) ; \\ \end{array} $$ $$ {\beta}egin{array}{rl} j & \\ V^2_{j,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = ( 0, {\lambda}dots, 0, 1, & S_{m,2j+1}^{2(j+1)+1} {\alpha}lpha_{j+1}, S_{m,2j+1}^{2(j+2)+1} {\alpha}lpha_{j+2}, {\lambda}dots , S_{m,2j+1}^{2k+1} {\alpha}lpha_{k} ) ; \\ \end{array} $$ $$ V^0_{k+1,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = V^1_{k+1,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = V^2_{k+1,k}({\alpha}lpha_1, {\alpha}lpha_2,{\lambda}dots, {\alpha}lpha_k) = (0, 0, {\lambda}dots, 0); $$ $$ W_{s,k}(0,0,{\lambda}dots,\stackrel{j-1}{0},\stackrel{j}{1},S_{m,j}^{j+1}{\alpha}lpha_{j+1},S_{m,j}^{j+2}{\alpha}lpha_{j+2},{\lambda}dots, S_{m,j}^k{\alpha}lpha_k,{\gamma}amma)= $$ $$ =( 0, 0,{\lambda}dots, \stackrel{j}{1} ,0,{\lambda}dots,\stackrel{s+j}{1}, S_{m,s}^{s+1}{\alpha}lpha_{s+j+1}, S_{m,s}^{s+2}{\alpha}lpha_{s+j+2},{\lambda}dots, S_{m,s}^{k-j}{\alpha}lpha_k, S_{m,s}^{k+6-2j}{\gamma}amma), $$ $$ W_{k+1-j,k}(0,0,{\lambda}dots,\stackrel{j-1}{0},\stackrel{j}{1},S_{m,j}^{j+1}{\alpha}lpha_{j+1},S_{m,j}^{j+2}{\alpha}lpha_{j+2},{\lambda}dots, S_{m,j}^k{\alpha}lpha_k,{\gamma}amma)=$$ $\qquad =(0,0,{\lambda}dots,\stackrel{j}{1},0,{\lambda}dots,1),$ $$ W_{k+2-j,k}(0,0,{\lambda}dots,\stackrel{j-1}{0},\stackrel{j}{1},S_{m,j}^{j+1} {\alpha}lpha_{j+1},S_{m,j}^{j+2}{\alpha}lpha_{j+2},{\lambda}dots, S_{m,j}^k{\alpha}lpha_k,{\gamma}amma)=$$ $\qquad =(0,0,{\lambda}dots,\stackrel{j}{1},0,{\lambda}dots,0), $ \\ where $k\in N,$ ${\delta}elta=\pm 1,$ $1{\lambda}e j{\lambda}e k,$ $1{\lambda}e s{\lambda}e k-j,$ and ${\delta}isplaystyle S_{m,t}=\cos\frac{2\pi m}t+i\sin\frac{2\pi m}t$ $(m=0,1,{\lambda}dots, t-1).$ Below we present the complete list of pairwise non-isomorphic Leibniz superalgebras with $n+m:$ with characteristic sequence equal to $(n-1,1|m):$ $$ {\beta}egin{array}{l} L{\lambda}eft( V^1_{j,n-3}{\lambda}eft( {\alpha}lpha_4,{\alpha}lpha_5,{\lambda}dots, {\alpha}lpha_n\right),S_{m,j}^{n-3}\theta\right),\qquad \ \ 1{\lambda}e j{\lambda}e n-3, \\[2mm] L(0,0,{\lambda}dots,0,1), \ L(0,0,{\lambda}dots,0), \ G(0,0,{\lambda}dots,0,1), \ G(0,0,{\lambda}dots,0), \\[2mm] G{\lambda}eft( W_{s,n-2}{\lambda}eft( V^1_{j,n-3}{\lambda}eft( {\beta}eta_4,{\beta}eta_5,{\lambda}dots,{\beta}eta_n\right),{\gamma}amma\right)\right),\quad 1{\lambda}e j{\lambda}e n-3,\ 1{\lambda}e s{\lambda}e n-j, \\[2mm] M{\lambda}eft( V^1_{j,n-2}{\lambda}eft( {\alpha}lpha_4,{\alpha}lpha_5,{\lambda}dots,{\alpha}lpha_n\right),S_{m,j}^{n-3}\theta\right), \qquad \ 1{\lambda}e j{\lambda}e n-2, \\[2mm] M(0,0,{\lambda}dots,0,1), \ M(0,0,{\lambda}dots,0), \ H(0,0,{\lambda}dots,0,1), \ H(0,0,{\lambda}dots,0), \\[2mm] H{\lambda}eft( W_{s,n-1}{\lambda}eft( V^1_{j,n-2}{\lambda}eft( {\beta}eta_4,{\beta}eta_5,{\lambda}dots,{\beta}eta_n\right),{\gamma}amma\right)\right),\quad 1{\lambda}e j{\lambda}e n-2,\ 1{\lambda}e s{\lambda}e n+1-j, \\\end{array} $$ with characteristic sequence equal to $(n|m-1,1)$ if $n$ is odd (i.e. $n=2q-1$): $$ {\beta}egin{array}{lll} E{\lambda}eft(1,{\delta}elta{\beta}eta_{q+1}, V_{j,q-2}^0({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n),0\right), & {\delta}isplaystyle {\beta}eta_{q+1}\ne \pm\frac12, & 1{\lambda}e j{\lambda}e q-1, \\[2mm] E{\lambda}eft(1,{\beta}eta_{q+1}, V_{j,q-1}^0({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n,{\beta}eta)\right), & {\beta}eta_{q+1}={\delta}isplaystyle \pm\frac12, & 1{\lambda}e j{\lambda}e q, \\[2mm] E(0,1,V_{j,q-2}^0({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n),0), & 1{\lambda}e j{\lambda}e q-1, & \\[2mm] E(0,0, W_{s,q-1}(V^1_{j,q-1}({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n, {\beta}eta))), & 1{\lambda}e j{\lambda}e q-1, & 1{\lambda}e s{\lambda}e q-j, \\[2mm] E(0,0,{\lambda}dots,0); \\ \end{array} $$ if $n$ is even (i.e. $n=2q$): $$ {\beta}egin{array}{lll} E(1,V^2_{j,q-1}({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n,), 0), & 1{\lambda}e j{\lambda}e q, & \\[2mm] E(0, W_{s,q}(V^1_{j,q}({\beta}eta_{q+2}, {\beta}eta_{q+3}, {\lambda}dots, {\beta}eta_n,{\beta}eta))), & 1{\lambda}e j{\lambda}e q, & 1{\lambda}e s{\lambda}e q+1-j, \\[2mm] E(0,0,{\lambda}dots,0). \\ \end{array} $$ $$ F{\lambda}eft( W_{s,n+2-{\lambda}eft[\frac{n+5}2\right]} {\lambda}eft( V^1_{j, n+2-{\lambda}eft[\frac{n+5}2\right]} {\lambda}eft( {\beta}eta_{{\lambda}eft[ \frac{n+5}2 \right]}, {\beta}eta_{{\lambda}eft[\frac{n+5}2\right]+1}, {\lambda}dots, {\beta}eta_{n+1}\right)\right)\right), $$ where $1{\lambda}e j{\lambda}e n+2-{\delta}isplaystyle {\lambda}eft[\frac{n+5}2\right],$ $1{\lambda}e s{\lambda}e n+3-{\delta}isplaystyle {\lambda}eft[ \frac{n+5}2\right]-j,$ $$ F(0,0,{\lambda}dots,0). $$ For a given Leibniz algebra $A$ of the nilindex $l$ we put $gr(A)_i = A^i / A^{i+1}, \quad 1 {\lambda}eq i {\lambda}eq l-1$ and $gr(A) = gr(A)_1 \oplus gr(A)_2 \oplus {\delta}ots \oplus gr(A)_{l-1}.$ Then $[gr(A)_i, gr(A)_j] \subseteq gr(A)_{i+j}$ and we obtain the graded algebra $gr(A).$ {\beta}egin{defn} {\lambda}abel{d5} The gradation constructed in this way is called the natural gradation and if a Leibniz algebra $G$ is isomorphic to $gr(A)$ we say that the algebra $G$ is naturally graded Leibniz algebra. \end{defn} \section{The main result} Let $L$ be a Leibniz superalgebra with characteristic sequence $(n_1, {\delta}ots, n_k| m_1, {\delta}ots, m_s),$ where $ n_1 {\lambda}eq n-2,$ $m_1 {\lambda}eq m-1$ and of nilindex $n+m.$ Since the second part of the characteristic sequence of the Leibniz superalgebra $L$ is equal to $(m_1, {\delta}ots, m_s)$ then by the definition of the characteristic sequence there exists a nilpotent endomorphism $R_x$ ($x\in L_0\setminus L_0^2$) of the space $L_1$ such that its Jordan form consists of $s$ Jordan blocks. Therefore, we can assume the existence of an adapted basis $\{y_1, y_2, {\delta}ots, y_m\}$ of the subspace $L_1,$ such that $$ {\lambda}eft\{{\beta}egin{array}{ll} [y_j,x]=y_{j+1}, \ & j \notin \{m_1, m_1+m_2, {\delta}ots, m_1+m_2+ {\delta}ots + m_s\},\\{} [y_j,x]=0,& j \in \{m_1, m_1+m_2, {\delta}ots, m_1+m_2+ {\delta}ots + m_s\}. \end{array}\right. \eqno(1) $$ for some $x \in L_0\setminus L_0^2.$ Further we shall use a homogeneous basis $\{x_1, {\delta}ots, x_n\}$ with respect to natural gradation of the Leibniz algebra $L_0,$ which is also agreed with the lower central sequence of $L.$ The main result of the paper is that the nilindex of the Leibniz superalgebra $L$ with characteristic sequence $(n_1, {\delta}ots, n_k| m_1, {\delta}ots, m_s),$ $ n_1 {\lambda}eq n-2, m_1 {\lambda}eq m-1$ is less than $n+m.$ According to the Theorem \ref{t1} we have the description of single-generated Leibniz superalgebras, which have nilindex $n+m+1.$ If the number of generators is greater than two, then superalgebra has nilindex less than $n+m.$ Therefore, we should consider case of two-generated superalgebras. The possible cases for the generators are: 1. Both generators lie in $L_0,$ i.e. $dim(L^2)_0 = n-2$ and $dim(L^2)_1 = m;$ 2. One generator lies in $L_0$ and another one lies in $L_1,$ i.e. $dim(L^2)_0 = n-1$ and $dim(L^2)_1 = m-1;$ 3. Both generators lie in $L_1,$ i.e. $dim(L^2)_0 = n$ and $dim(L^2)_1 = m-2.$ Moreover, two-generated superalgebra $L$ has nilindex $n+m$ if and only if $dim L^k = n+m-k$ for $2 {\lambda}eq k {\lambda}eq n+m.$ Since $m\neq 0$ we omit the case where both generators lie in even part. \subsection{The case of one generator in $L_0$ and another one in $L_1$} \ Since $dim(L^2)_0 = n-1$ and $dim(L^2)_1 = m-1$ then there exist some $m_j,$ $0 {\lambda}eq j {\lambda}eq s-1$ (here we assume $m_0=0$) such that $y_{m_1+ {\delta}ots + m_j + 1} \notin L^2.$ By a shifting of basic elements we can assume that $m_j=m_0,$ i.e. the basic element $y_1$ can be chosen as a generator of the superalgebra $L.$ Of course, by this shifting the condition from definition of the characteristic sequence $m_1 {\gamma}eq m_2 {\gamma}eq {\delta}ots {\gamma}eq m_s$ can be broken, but further we shall not use the condition. Let $L=L_0 \oplus L_1$ be a two generated Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence equal to $(n_1, {\delta}ots, n_k| m_1, {\delta}ots, m_s)$ and let $\{x_1, {\delta}ots, x_n, y_1, {\delta}ots, y_m\}$ be a basis of the $L.$ {\beta}egin{lem}{\lambda}abel{l1} Let one generator lies in $L_0$ and another one lies in $L_1.$ Then $x_1$ and $y_1$ can be chosen as generators of the $L.$ Moreover, in equality (1) instead of element $x$ we can suppose $x_1.$ \end{lem} {\beta}egin{proof} As mentioned above $y_1$ can be chosen as the first generator of $L$. If $x\in L\setminus L^2$ then the assertion of the lemma is evident. If $x\in L^2$ then there exists some $i_0$ ($2{\lambda}eq i_0$) such that $x_{i_0}\in L\setminus L^2.$ Set $x_1'=Ax + x_{i_0}$ for $A\neq 0$ then $x_1'$ is a generator of the superalgebra $L$ (since $x_1'\in L\setminus L^2$). Moreover, making transformation of the basis of $L_1$ as follows $$ {\lambda}eft\{{\beta}egin{array}{ll} y_j'= y_j, \ & j \in \{1, m_1+1, {\delta}ots, m_1+m_2+ {\delta}ots + m_{s-1}+1\},\\{} y_j'= [y_{j-1}',x_1'],& j \notin \{1, m_1+1, {\delta}ots, m_1+m_2+ {\delta}ots + m_{s-1}+1\}. \end{array}\right.$$ and taking sufficiently big value of the parameter $A$ we preserve the equality (1). Thus, in the basis $\{x_1', x_2, {\delta}ots, x_n, y_1', y_2', {\delta}ots, y_m'\}$ the elements $x_1'$ and $y_1'$ are generators. \end{proof} Due to Lemma \ref{l1} further we shall suppose that $\{x_1, y_1\}$ are generators of the Leibniz superalgebra $L.$ Therefore, $$L^2 = \{x_2, x_3, {\delta}ots, x_n, y_2, y_3, {\delta}ots, y_m\}.$$ Let us introduce the notations: $$[x_i,y_1]= \sum{\lambda}imits_{j=2}^m {\alpha}lpha_{i,j}y_j,\ 1 {\lambda}e i {\lambda}e n, \ \ \ [y_i,y_1]= \sum{\lambda}imits_{j=2}^n {\beta}eta_{i,j}x_j, \ 1 {\lambda}e i {\lambda}e m. \eqno (2)$$ Without loss of generality we can assume that $y_{m_1+{\delta}ots+m_i+1}\in L^{t_i}\setminus L^{{t_i}+1}$, where $t_i<t_j$ for $1{\lambda}eq i<j{\lambda}eq s-1.$ Firstly we consider the case of $dim(L^3)_0 = n-1,$ then $dim(L^3)_0 = n-2.$\\[0,5mm] {{\beta}f Case $dim(L^3)_0 = n-1$}.\\[0,5mm] In this subcase we have $$L^3 = \{x_2, x_3, {\delta}ots, x_n, y_3, {\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\},$$ where $(B_1,B_2)\neq(0,0).$ Analyzing the way the element $x_2$ can be obtained, we conclude that there exist $i_0 \ (2 {\lambda}eq i_0 {\lambda}eq m)$ such that $[y_{i_0},y_1]= \sum{\lambda}imits_{j=2}^n {\beta}eta_{i_0,2}x_j, \ {\beta}eta_{i_0,2} \neq 0.$ Let us show that $i_0 \notin\{m_1+1, {\delta}ots, m_1+ {\delta}ots + m_{s-1}+1\}.$ It is known that the elements $y_{m_1+ m_2+1}, {\delta}ots, y_{m_1+ {\delta}ots + m_{s-1}+1}$ are generated from the products $[x_i, y_1], \ (2 {\lambda}eq i {\lambda}eq n).$ Due to nilpotency of $L$ we get $i_0 \notin \{m_1+m_2+1, {\delta}ots, m_1+ {\delta}ots + m_{s-1}+1\}.$ If $y_{m_1+1}$ is generated by $[x_1, y_1],$ i.e. in the expression $[x_1, y_1] = \sum{\lambda}imits_{j=2}^m {\alpha}lpha_{1,j}y_j$ ${\alpha}lpha_{1, m_1+1} \neq 0$ then we consider the product $$[[x_1, y_1],y_1] = [\sum{\lambda}imits_{j=2}^m {\alpha}lpha_{1,j}y_j, y_1] = {\alpha}lpha_{1, m_1+1}{\beta}eta_{m_1+1,2}x_2 + \sum{\lambda}imits_{i {\gamma}eq 3} (*)x_i.$$ On the other hand, $$[[x_1, y_1],y_1] = \frac 1 2 [x_1,[y_1, y_1]] = \frac 1 2 [x_1, \sum{\lambda}imits_{j=2}^n {\beta}eta_{1,j}x_j] = \sum{\lambda}imits_{i {\gamma}eq 3} (*)x_i$$ Comparing the coefficients at the corresponding basic elements we obtain ${\alpha}lpha_{1, m_1+1}{\beta}eta_{m_1+1,2}=0,$ which implies ${\beta}eta_{m_1+1,2}=0.$ It means that $i_0 \neq m_1+1.$ Therefore, ${\beta}eta_{i_0,2} \neq 0,$ where $i_0 \notin\{m_1+1, {\delta}ots, m_1+ {\delta}ots + m_{s-1}+1\}.$ \ \textbf{Case $y_2 \notin L^3.$} Then $B_2 \neq 0.$ Let $h\in\mathbb{N}$ be a number such that $x_2 \in L^h\setminus L^{h+1},$ that is $$ L^h = \{x_2, x_3, {\delta}ots, x_n, y_h,{\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}, \ h {\gamma}eq 3,$$ $$L^{h+1} = \{x_3, x_4, {\delta}ots, x_n, y_h, {\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2},{\delta}ots, y_m\}.$$ Since the elements $B_1y_2 +B_2y_{m_1+1}, y_{m_1+ m_2+1}, {\delta}ots, y_{m_1+ {\delta}ots +m_{s-1}+1}$ are generated from the multiplications $[x_i, y_1], 2 {\lambda}eq i {\lambda}eq n$ it follows that $h {\lambda}eq m_1 +1.$ So, $x_2$ can be obtained only from product $[y_{h-1},y_1]$ and thereby ${\beta}eta_{h-1,2} \neq 0.$ Making the change $x_2'= \sum{\lambda}imits_{j=2}^n {\beta}eta_{h-1,j}x_j$ we can assume that $[y_{h-1}, y_1] = x_2.$ Let now $p$ is a number such that $y_h \in L^{h+p}\setminus L^{h+p+1}.$ Then for the powers of superalgebra $L$ we have the following $$L^{h+p} = \{x_{p+2}, x_{p+3}, {\delta}ots, x_n, y_h, {\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}, \ p {\gamma}eq 1,$$ $$L^{h+p+1} = \{x_{p+2}, x_{p+3}, {\delta}ots, x_n, y_{h+1}, {\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}.$$ In the following lemma the useful expression for the products $[y_i, y_j]$ is presented. {\beta}egin{lem}{\lambda}abel{le2} The equality: $$[y_i, y_j] = (-1)^{h-1-i}C_{j-1}^{h-1-i}x_{i+j+2-h} + \sum{\lambda}imits_{t > i+j+2-h}(*)x_t, \eqno (3)$$ $1 {\lambda}eq i {\lambda}eq h-1, \ h-i {\lambda}eq j {\lambda}eq min\{h-1, h-1+p-i\},$ holds. \end{lem} {\beta}egin{proof} The proof is deduced by the induction on $j$ at any value of $i.$ \end{proof} For the natural number $p$ we have the following {\beta}egin{lem}{\lambda}abel{l3} Under the above conditions $p=1.$ \end{lem} {\beta}egin{proof} Assume the contrary, i.e. $p> 1.$ Then we can suppose $$[x_i, x_1] = x_{i+1}, \ 2 {\lambda}eq i {\lambda}eq p, \quad [x_{p+1}, y_1] = \sum{\lambda}imits_{j = h}^m{\alpha}lpha_{p+1,j} y_j, \quad {\alpha}lpha_{p+1,h} \neq 0.$$ Using the equality (3) we consider the following chain of equalities $$[y_1, [y_{h-1},x_1]] = [[y_1, y_{h-1}],x_1] - [[y_1,x_1], y_{h-1}]= (-1)^{h-2}x_3 + \sum{\lambda}imits_{t {\gamma}eq 4}(*)x_t-$$ $$-(-1)^{h-3}(h-2)x_3 + \sum{\lambda}imits_{t {\gamma}eq 4}(*)x_t= (-1)^{h}(h-1)x_3 + \sum{\lambda}imits_{t {\gamma}eq 4}(*)x_t.$$ If $h {\lambda}eq m_1,$ then $[y_1, [y_{h-1},x_1]] = [y_1, y_h]$. Since $y_h \in L^{h+p}$ and $p > 1$ then in the decomposition of $[y_1, y_h]$ the coefficient at the basic elements $x_2$ and $x_3$ are equal to zero. Therefore, from the above equalities we get a contradiction with assumption $p>1.$ If $h = m_1 +1,$ then $[y_1, [y_{h-1},x_1]] = 0$ and we also obtain the irregular equality $(-1)^{h}(h-1)x_3 + \sum{\lambda}imits_{t {\gamma}eq 4}(*)x_t=0.$ Therefore, the proof of the lemma is completed. \end{proof} We resume our main result in considered cases in the following {\beta}egin{thm}{\lambda}abel{t2} Let $L=L_0 \oplus L_1$ be a Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence equal to $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1$ and let $dim(L^3)_0 = n-1$ with $y_2 \notin L^3.$ Then $L$ has a nilindex less than $n+m.$ \end{thm} {\beta}egin{proof} Let us assume the contrary, i.e. nilindex of the superalgebra $L$ equal to $n+m.$ Then according to the Lemma \ref{l3} we have $$L^{h+2} = \{x_3, {\delta}ots, x_n, y_{h+1},{\delta}ots, y_{m_1}, B_1y_2 +B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}.$$ Since $y_h \notin L^{h+2},$ it follows that $$ {\alpha}lpha_{2,h}\neq 0, \quad {\alpha}lpha_{i,h}=0 \quad \mbox{for}\quad i>2.$$ Consider the product $$[[y_{h-1}, y_1], y_1] = \frac 1 2 [ y_{h-1}, [ y_1, y_1]] = \frac 1 2 [y_{h-1}, \sum{\lambda}imits_{i=2}^n{\beta}eta_{1,i}x_i] .$$ The element $y_{h-1}$ belongs to $L^{h-1}$ and elements $x_2, x_3, {\delta}ots, x_n$ lie in $L^3.$ Hence $\frac 1 2 [y_{h-1}, \sum{\lambda}imits_{i=2}^n{\beta}eta_{1,i}x_i] \in L^{h+2}.$ Since $y_h \notin L^{h+2},$ we obtain that $[[y_{h-1}, y_1], y_1] =\sum{\lambda}imits_{j{\gamma}eq h+1}(*)y_j.$ On the other hand, $$[[y_{h-1}, y_1], y_1] = [x_2, y_1] = {\alpha}lpha_{2,h}y_h + \sum{\lambda}imits_{j= h+1}^m{\alpha}lpha_{2,j}y_j.$$ Comparing the coefficients at the basic elements we obtain $ {\alpha}lpha_{2,h}=0,$ which is a contradiction with the assumption that the superalgebra $L$ has nilindex equal to $n+m$ and therefore the assertion of the theorem is proved. \end{proof} \textbf{Case $y_2 \in L^3.$} Then $B_2=0$ and the following theorem is true. {\beta}egin{thm}{\lambda}abel{t3} Let $L=L_0 \oplus L_1$ be a Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence equal to $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1$ and let $dim(L^3)_0 = n-1$ with $y_2 \in L^3.$ Then $L$ has a nilindex less than $n+m.$ \end{thm} {\beta}egin{proof} We shall prove the assertion of the theorem by contrary method, i.e. we assume that nilindex of the superalgebra $L$ equal to $n+m.$ The condition $y_2 \in L^3$ implies $$L^3 = \{x_2, x_3, {\delta}ots, x_n, y_2, {\delta}ots, y_{m_1}, y_{m_1+2}, {\delta}ots, y_m\}.$$ Then ${\alpha}lpha_{1, m_1+1} \neq 0$ and ${\alpha}lpha_{i, m_1+1} = 0$ for $i {\gamma}eq 2.$ The element $y_2$ is generated from products $[x_i, y_1],$ $i{\gamma}eq 2$ which implies $y_2 \in L^4.$ Since $[y_{m_{1} +1}, y_1]=[[x_1,y_1],y_1]=\frac{1}{2}[x_1,[y_1,y_1]]=\frac{1}{2}[x_1,\sum(*)x_i]$ and $x_2$ is a generator of the Leibniz algebra $L_0$ then $x_2$ can not generated from the product $[y_{m_{1} +1}, y_1].$ Thereby $x_2$ also belongs to $L^4.$ Consider the equality $$[[x_1, y_1], x_1] = [x_1,[ y_1, x_1]] + [[x_1, x_1], y_1] = [x_1, y_2] -[\sum{\lambda}imits_{i{\gamma}eq 3}(*)x_i, y_1].$$ From this it follows that the product $[[x_1, y_1], x_1]$ belongs to $L^5$ (and therefore belongs to $L^4$). On the other hand, $$[[x_1, y_1], x_1] = [\sum{\lambda}imits_{j=2}^m {\alpha}lpha_{1,j} y_j, x_1] = {\alpha}lpha_{1,2}y_3 + {\delta}ots + {\alpha}lpha_{1,m_1-1}y_{m_1} +{\alpha}lpha_{1,m_1+1}y_{m_1+2} + {\delta}ots +{\alpha}lpha_{1,m-1}y_{m}.$$ Since ${\alpha}lpha_{1, m_1+1} \neq 0,$ we obtain that $y_{m_1+2} \in L^4.$ Thus, we have $L^4 = \{x_2, x_3, {\delta}ots, x_n, \\ y_2, {\delta}ots, y_{m_1}, y_{m_1+2}, {\delta}ots, y_m\},$ that is $L^4 = L^3.$ It is a contradiction to nilpotency of the superalgebra $L.$ Thus, we get a contradiction with assumption that the superalgebra $L$ has nilindex equal to $n+m$ and therefore the assertion of the theorem is proved. \end{proof} From Theorems \ref{t2} and \ref{t3} we obtain that Leibniz superalgebra $L$ with condition $dim (L^3)_0 = n-1$ has nilindex less than $n+m.$ The investigation of the Leibniz superalgebra with property $dim (L^3)_0 = n-2$ shows that the restriction to nilindex depends on the structure of the Leibniz algebra $L_0.$ Below we present some necessary remarks on nilpotent Leibniz algebras. Let $A = \{z_1, z_2, {\delta}ots, z_n\}$ be an $n$-dimensional nilpotent Leibniz algebra of nilindex $l$ ($l < n$). Note that algebra $A$ is not single-generated. {\beta}egin{prop} {\lambda}abel{c1} \cite{C-G-O-Kh1} Let $gr(A)$ be a naturally graded non-Lie Leibniz algebra. Then $dim A^3 {\lambda}eq n-4.$ \end{prop} The result on nilindex of the superalgebra under the condition $dim(L^3)_0 = n-2$ is established in the following two theorems. {\beta}egin{thm}{\lambda}abel{t4} Let $L=L_0 \oplus L_1$ be a Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence $(n_1, {\delta}ots, n_k | m_1, {\delta}ots m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1,$ $dim(L^3)_0 = n-2$ and $dim L_0^3 {\lambda}eq n-4.$ Then $L$ has a nilindex less than $n+m.$ \end{thm} {\beta}egin{proof} Let us assume the contrary, i.e. the nilindex of the superalgebra $L$ is equal to $n+m.$ According to the condition $dim(L^3)_0 = n-2$ we have $$ L^3 = \{x_3, x_4, {\delta}ots, x_n, y_2, y_3, {\delta}ots, y_m\}.$$ From the condition $dim L_0^3 {\lambda}eq n-4$ it follows that there exist at least two basic elements, that do not belong to $L_0^3.$ Without loss of generality, one can assume $x_3, x_4 \notin L_0^3.$ Let $h$ be a natural number such that $x_3 \in L^{h+1}\setminus L^{h+2},$ then we have $$L^{h+1} = \{x_3, x_4, {\delta}ots, x_n, y_h, y_{h+1}, {\delta}ots, y_m\}, \ h {\gamma}eq 2, \ {\beta}eta_{h-1, 3} \neq 0.$$ $$L^{h+2} = \{x_4, {\delta}ots, x_n, y_h, y_{h+1}, {\delta}ots, y_m\}.$$ Let us suppose $x_3 \notin L_0^2.$ Then we have that $x_3$ can not be obtained by the products $[x_i, x_1],$ with $2 {\lambda}eq i{\lambda}eq n.$ Therefore, it is generated by products $[y_j, y_1], 2 {\lambda}eq j {\lambda}eq m,$ which implies $h {\gamma}eq 3$ and ${\alpha}lpha_{2,2}\neq 0.$ If $h=3,$ then ${\beta}eta_{2,3}\neq 0.$ Consider the chain of equalities $$[[x_2, y_1], y_1] = [\sum {\lambda}imits_{j=2}^m {\alpha}lpha_{2,j}y_j, y_1] = \sum {\lambda}imits_{j=2}^m {\alpha}lpha_{2,j}[y_j, y_1]= {\alpha}lpha_{2,2}{\beta}eta_{2,3}x_3 + \sum {\lambda}imits_{i{\gamma}eq 4}(*)x_i.$$ On the other hand, $$[[x_2, y_1], y_1] = \frac 1 2 [x_2, [ y_1, y_1]] = \frac 1 2 [x_2, \sum {\lambda}imits_{i=2}^n{\beta}eta_{1,i}x_i] = \frac 1 2 \sum {\lambda}imits_{i=2}^n{\beta}eta_{1,i} [x_2, x_i] = \sum {\lambda}imits_{i{\gamma}eq 4}(*)x_i.$$ Comparing the coefficients at the corresponding basic elements, we get a contradiction with $ {\beta}eta_{2,3} = 0.$ Thus, $h {\gamma}eq 4.$ Since $y_2 \in L^3$ and $h {\gamma}eq 4$ we have $y_{h-2} \in L^{h-1},$ which implies $[y_{h-2}, y_2] \in L^{h+2} = \{x_4, {\delta}ots, x_n, y_h, y_{h+1}, {\delta}ots, y_m\}.$ It means that in the decomposition $[y_{h-2}, y_2]$ the coefficient at the basic element $x_3$ is equal to zero. On the other hand, $$[y_{h-2}, y_2] = [y_{h-2}, [y_1, x_1]] = [[y_{h-2}, y_1], x_1] - [[y_{h-2}, x_1], y_1] =$$ $$=[ \sum{\lambda}imits_{i=2}^n {\beta}eta_{h-2,i}x_i, x_1] - [y_{h-1}, y_1]= - {\beta}eta_{h-1,3}x_3 + \sum{\lambda}imits_{i{\gamma}eq 4}(*)x_i.$$ Hence, we get ${\beta}eta_{h-1,3} = 0,$ which is obtained from the assumption $x_3 \notin L_0^2.$ Therefore, we have $x_3, x_4 \in L_0^2\setminus L_0^3.$ The condition $x_4 \notin L_0^3$ deduce that $x_4$ can not be obtained by the products $[x_i, x_1],$ with $3 {\lambda}eq i{\lambda}eq n.$ Therefore, it is generated by products $[y_j, y_1], h {\lambda}eq j {\lambda}eq m.$ Hence, $L^{h+3} = \{x_4, {\delta}ots, x_n, y_{h+1}, {\delta}ots, y_m\}$ and $y_h \in L^{h+2} \setminus L^{h+3},$ which implies ${\alpha}lpha_{3,h} \neq 0.$ Let $p$ ($ p {\gamma}eq 3$) be a natural number such that $x_4 \in L^{h+p} \setminus L^{h+p+1}.$ Suppose that $p=3.$ Then ${\beta}eta_{h,4}\neq 0.$ Consider the chain of equalities $$[[x_3, y_1], y_1] = [\sum {\lambda}imits_{j=h}^m {\alpha}lpha_{3,j}y_j, y_1] = \sum {\lambda}imits_{j=h}^m {\alpha}lpha_{3,j}[y_j, y_1]= {\alpha}lpha_{3,h}{\beta}eta_{h,4}x_4 + \sum {\lambda}imits_{i{\gamma}eq 5}(*)x_i.$$ On the other hand, $$[[x_3, y_1], y_1] = \frac 1 2 [x_3, [ y_1, y_1]] = \frac 1 2[x_3, \sum {\lambda}imits_{i=2}^n{\beta}eta_{1,i}x_i] = \frac 1 2 \sum {\lambda}imits_{i=2}^n{\beta}eta_{1,i} [x_3, x_i] = \sum {\lambda}imits_{i{\gamma}eq 5}(*)x_i.$$ Comparing the coefficients at the corresponding basic elements in these equations we get ${\alpha}lpha_{3,h}{\beta}eta_{h,4} = 0,$ which implies $ {\beta}eta_{h,4} = 0.$ It is a contradiction with assumption $p=3.$ Therefore, $p{\gamma}eq 4$ and for the powers of descending lower sequences we have $$L^{h+p-2} = \{x_4, {\delta}ots, x_n, y_{h+p-4}, {\delta}ots, y_m\},$$ $$L^{h+p-1} = \{x_4, {\delta}ots, x_n, y_{h+p-3}, {\delta}ots, y_m\},$$ $$L^{h+p} = \{x_4, {\delta}ots, x_n, y_{h+p-2}, {\delta}ots, y_m\},$$ $$L^{h+p+1} = \{x_5, {\delta}ots, x_n, y_{h+p-2}, {\delta}ots, y_m\}.$$ It is easy to see that in the decomposition $[y_{h+p-3}, y_1] = \sum{\lambda}imits_{i=4}^n {\beta}eta_{h+p-3,i}x_i$ we have ${\beta}eta_{h+p-3,4} \neq 0.$ Consider the equalities $$[y_{h+p-4}, y_2] = [y_{h+p-4}, [y_1, x_1]] = [[y_{h+p-4}, y_1], x_1] - [[y_{h+p-4}, x_1], y_1] =$$ $$=[ \sum{\lambda}imits_{i=4}^n {\beta}eta_{h+p-3,i}x_i, x_1] - [y_{h+p-3}, y_1]= - {\beta}eta_{h+p-3,4}x_4 + \sum{\lambda}imits_{i{\gamma}eq 5}(*)x_i.$$ Since $y_{h+p-4} \in L^{h+p-2}, y_2 \in L^3$ and ${\beta}eta_{h+p-3,4} \neq 0,$ then the element $x_4$ should lie in $L^{h+p+1},$ but it contradicts to $L^{h+p+1} = \{x_5, {\delta}ots, x_n, y_{h+p-2}, {\delta}ots, y_m\}.$ Thus, the superalgebra $L$ has a nilindex less than $n+m.$ \end{proof} From Theorem \ref{t4} we conclude that Leibniz superalgebra $L= L_0 \oplus L_1$ with the characteristic sequence $(n_1, {\delta}ots, n_k| m_1, {\delta}ots, m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1$ and nilindex $n+m$ can appear only if $dim L_0^3 {\gamma}eq n-3.$ Taking into account the condition $n_1 {\lambda}eq n-2$ and properties of naturally graded subspaces $gr(L_0)_1,$ $gr(L_0)_2$ we get $dim L_0^3 = n-3.$ Let $dim L_0^3 =n-3.$ Then $$gr(L_0)_1 = \{\overline{x}_1, \overline{x}_2\}, \ gr(L_0)_2 = \{\overline{x}_3\}.$$ From Proposition \ref{c1} the naturally graded Leibniz algebra $gr(L_0)$ is a Lie algebra, i.e. the following multiplication rules hold $$ {\lambda}eft\{ {\beta}egin{array}{l} [\overline{x}_1,\overline{x}_1]=0, \\{} [\overline{x}_2,\overline{x}_1]=\overline{x}_3, \\{} [\overline{x}_1,\overline{x}_2]=-\overline{x}_3, \\ {}[\overline{x}_2,\overline{x}_2]=0. \end{array}\right. $$ Using these products for the corresponding products in the Leibniz algebra $L_0$ with the basis $\{x_1, x_2, {\delta}ots, x_n\}$ we have $$ {\lambda}eft\{ {\beta}egin{array}{l} [x_1,x_1]={\gamma}amma_{1,4}x_4 + {\gamma}amma_{1,5}x_5 + {\delta}ots + {\gamma}amma_{1,n}x_n, \\{} [x_2,x_1]=x_3, \\{} [x_1,x_2]=-x_3 + {\gamma}amma_{2,4}x_4 + {\gamma}amma_{2,5}x_5 + {\delta}ots + {\gamma}amma_{2,n}x_n, \\ {}[x_2,x_2]={\gamma}amma_{3,4}x_4 + {\gamma}amma_{3,5}x_5 + {\delta}ots + {\gamma}amma_{3,n}x_n. \end{array} \right. \eqno(4)$$ {\beta}egin{thm}{\lambda}abel{t5} Let $L=L_0\oplus L_1$ be a Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence $(n_1, {\delta}ots, n_k | m_1, {\delta}ots, m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1,$ $dim (L^3)_0=n-2$ and $dimL_0^3=n-3.$ Then $L$ has a nilindex less than $n+m.$ \end{thm} {\beta}egin{proof} Let us suppose the contrary, i.e. the nilindex of the superalgebra $L$ equals $n+m.$ Then from the condition $dim (L^3)_0=n-2$ we obtain $$ L^2 = \{x_2, x_3,{\delta}ots, x_n, y_2, {\delta}ots, y_m\},$$ $$L^3 = \{x_3, x_4, {\delta}ots, x_n, y_2, {\delta}ots, y_m\}.$$ $$L^4 \supset \{x_4, {\delta}ots, x_n, y_3, {\delta}ots, y_{m_1}, B_1y_2 + B_2y_{m_1+1}, y_{m_1+2} {\delta}ots, y_m\}, \quad (B_1, B_2) \neq (0,0).$$ Suppose $x_3 \notin L^4.$ Then $$L^4 = \{x_4, {\delta}ots, x_n, y_2, {\delta}ots, y_{m_1}, y_{m_1+1}, {\delta}ots, y_m\}.$$ Let $B'_1y_2 + B'_2y_{m_1+1}$ be an element which earlier disappear in the descending lower sequence for $L$. Then this element can not to be generated from the products $[x_i, y_1], \ 2 {\lambda}eq i {\lambda}eq n.$ Indeed, since $x_3 \notin L^4,$ the element can not to be generated from $[x_2, y_1].$ Due to structure of $L_0$ the elements $x_i, (3 {\lambda}eq i {\lambda}eq n)$ are in $L_0^2,$ i.e. they are generated by the linear combinations of the products of elements from $L_0.$ The equalities $$[[x_i, x_j], y_1] = [x_i,[ x_j, y_1]] + [[x_i, y_1], x_j] = [x_i,\sum{\lambda}imits_{t=2}^m {\alpha}lpha_{j,t}y_t] + [\sum{\lambda}imits_{t=2}^m {\alpha}lpha_{i,t}y_t, x_i]$$ derive that the element $B'_1y_2 + B'_2y_{m_1+1}$ can not be obtained by the products $[x_i, y_1], 3 {\lambda}eq i {\lambda}eq n.$ However, it means that $x_3\in L^4.$ Thus, we have $$L^4 = \{x_3, x_4, {\delta}ots, x_n, y_3, {\delta}ots, y_{m_1}, B_1y_2 + B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\},$$ where $(B_1, B_2) \neq (0,0)$ and $B_1B'_2 - B_2B'_1 \neq 0.$ The simple analysis of descending lower sequences $L^3$ and $L^4$ implies $$[x_2, y_1] = {\alpha}lpha'_{2,2}(B'_1y_2 + B'_2y_{m_1+1}) + {\alpha}lpha'_{2,m_1+1}(B_1y_2 + B_2y_{m_1+1})+ \sum{\lambda}imits_{{\beta}egin{array}{c}j=3\\j\neq m_1+1 \end{array}} ^m {\alpha}lpha_{2,j}y_j,\quad {\alpha}lpha'_{2,2} \neq 0.$$ Let $h$ be a natural number such that $x_3 \in L^{h+1}\setminus L^{h+2},$ i.e. $$L^h = \{x_3, x_4, {\delta}ots, x_n, y_{h-1}, y_h, {\delta}ots, y_{m_1}, B_1y_2 + B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}, h {\gamma}eq 3,$$ $$L^{h+1} = \{x_3, x_4, {\delta}ots, x_n, y_h, y_{h+1},{\delta}ots, y_{m_1}, B_1y_2 + B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\},$$ $$L^{h+2} = \{x_4, {\delta}ots, x_n, y_h, y_{h+1},{\delta}ots, y_{m_1}, B_1y_2 + B_2y_{m_1+1}, y_{m_1+2}, {\delta}ots, y_m\}.$$ If $h=3,$ then $[B'_1y_2 + B'_2y_{m_1+1}, y_1] = {\beta}eta'_{2,3}x_3 + \sum{\lambda}imits_{i{\gamma}eq 4}(*)x_4,$ ${\beta}eta'_{2,3} \neq 0$ and we consider the product $$[[x_2, y_1], y_1] = [ {\alpha}lpha'_{2,2}(B'_1y_2 + B'_2y_{m_1+1}) +{\alpha}lpha'_{2,m_1+1}(B_1y_2 + B_2y_{m_1+1})+ \sum{\lambda}imits_{{\beta}egin{array}{c}j=3\\j\neq m_1+1 \end{array}}^m {\alpha}lpha_{2,j}y_j, y_1] =$$ $$= {\alpha}lpha'_{2,2}[B'_1y_2 + B'_2y_{m_1+1},y_1] + {\alpha}lpha'_{2,m_1+1}[B_1y_2 + B_2y_{m_1+1},y_1]+ $$ $$+\sum{\lambda}imits_{{\beta}egin{array}{c}j=3\\j\neq m_1+1 \end{array}}^m {\alpha}lpha_{2,j}[y_j, y_1] = {\alpha}lpha'_{2,2} {\beta}eta'_{2,3}x_3 + \sum{\lambda}imits_{i{\gamma}eq 4}(*)x_4 .$$ On the other hand, due to (4) we have $$[[x_2, y_1], y_1] = \frac 1 2 [x_2, [ y_1, y_1]] = \frac 1 2 [x_2, \sum{\lambda}imits_{i=2}^n{\beta}eta_{1,i}x_i] = \sum{\lambda}imits_{i{\gamma}eq 4}(*)x_i.$$ Comparing the coefficients at the corresponding basic elements we get equality ${\alpha}lpha'_{2,2}{\beta}eta'_{2,3} = 0,$ i.e. we have a contradiction with supposition $h=3.$ If $h {\gamma}eq 4,$ then we obtain ${\beta}eta'_{h-1,3} \neq 0.$ Consider the chain of equalities $$[y_{h-2}, y_2] = [y_{h-2}, [y_1, x_1]] = [[y_{h-2}, y_1], x_1] - [[y_{h-2}, x_1], y_1] =$$ $$= [ \sum{\lambda}imits_{i=3}^n{\beta}eta_{h-2,i}x_i , x_1] - [y_{h-1}, y_1] = - {\beta}eta_{h-1,3}x_3 + \sum{\lambda}imits_{i{\gamma}eq 4}(*)x_i.$$ Since $y_{h-2} \in L^{h-1}$ and $y_2 \in L^3$ then $x_3 \in L^{h+2} = \{x_4, {\delta}ots, x_n, y_{h-1}, {\delta}ots, y_m\},$ which is a contradiction with the assumption that the nilindex of $L$ is equal to $n+m.$ \end{proof} {\beta}egin{rem} In this subsection we used product $[y_1,x_1]=y_2.$ However, it is not difficult to check that the obtained results are also true under the condition $[y_1,x_1]=0.$ \end{rem} \subsection{The case of both generators lie in $L_1$} {\beta}egin{thm}{\lambda}abel{t6} Let $L=L_0 \oplus L_1$ be a Leibniz superalgebra from $Leib_{n,m}$ with characteristic sequence equal to $(n_1, {\delta}ots, n_k | m_1,{\delta}ots, m_s),$ where $n_1{\lambda}eq n-2, \ m_1{\lambda}eq m-1$ and let both generators lie in $L_1.$ Then $L$ has a nilindex less than $n+m.$ \end{thm} {\beta}egin{proof} Since both generators of the superalgebra $L$ lie in $L_1,$ they are linear combinations of the elements $\{y_1, y_{m_1+1}, {\delta}ots, y_{m_1+{\delta}ots+m_{s-1}+1}\}.$ Without loss of generality we may assume that $y_1$ and $y_{m_1+1}$ are generators. Let $L^{2t} = \{x_i, x_{i+1}, {\delta}ots, x_n, y_j, {\delta}ots, y_m\}$ for some natural number $t$ and let $z \in L$ be an arbitrary element such that $z \in L^{2t} \setminus L^{2t+1}.$ Then $z$ is obtained by the products of even number of generators. Hence $z \in L_0$ and $L^{2t+1} = \{x_{i+1}, {\delta}ots, x_n, y_j, {\delta}ots, y_m\}.$ In a similar way, having $L^{2t+1} = \{x_{i+1}, {\delta}ots, x_n, y_j, {\delta}ots, y_m\}$ we obtain $L^{2t+2} = \{x_{i+1}, {\delta}ots, x_n, y_{j+1}, {\delta}ots, y_m\}.$ From the above arguments we conclude that $n = m-1$ or $n = m-2$ and $$L^3 = \{x_2, {\delta}ots, x_n, y_2, y_3, {\delta}ots, y_{m_1}, y_{m_1+2}, {\delta}ots, y_m\}.$$ Applying the above arguments we get that an element of form $B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}$ disappears in $L^4.$ Moreover, there exist two elements $B'_1y_2 + B'_2y_{m_1+2} + B'_3y_{m_1+m_2+1}$ and $B''_1y_2 + B''_2y_{m_1+2} + B''_3y_{m_1+m_2+1}$ which belong to $L^4,$ where $$rank {\lambda}eft({\beta}egin{array}{lll} B_1&B_2&B_3\\ B'_1&B'_2&B'_3\\ B''_1&B''_2&B''_3\end{array}\right) =3.$$ Since $x_2$ does not belong to $L^5$ then the elements $B'_1y_2 + B'_2y_{m_1+2} + B'_3y_{m_1+m_2+1},$ $B''_1y_2 + B''_2y_{m_1+2} + B''_3y_{m_1+m_2+1}$ lie in $L^5.$ Hence, from the notations $$[x_1, y_1] = {\alpha}lpha_{1,2}(B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}) + {\alpha}lpha_{1,m_1+2}(B'_1y_2 + B'_2y_{m_1+2} + B'_3y_{m_1+m_2+1}) +$$$$+ {\alpha}lpha_{1,m_1+m_2+1}(B''_1y_2 + B''_2y_{m_1+2} + B''_3y_{m_1+m_2+1})+ \sum{\lambda}imits_{j=3, j\neq m_1+2, m_1+m_2+1}^m {\alpha}lpha_{1,j}y_j.$$ $$[x_1, y_{m_1+1}] = {\delta}elta_{1,2}(B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}) + {\delta}elta_{1,m_1+2}(B'_1y_2 + B'_2y_{m_1+2} + B'_3y_{m_1+m_2+1}) +$$$$+ {\delta}elta_{1,m_1+m_2+1}(B''_1y_2 + B''_2y_{m_1+2} + B''_3y_{m_1+m_2+1})+ \sum{\lambda}imits_{j=3, j\neq m_1+2, m_1+m_2+1}^m {\delta}elta_{1,j}y_j,$$ we have $({\alpha}lpha_{1,2},{\delta}elta_{1,2}) \neq (0,0).$ Similarly, from the notations $$[B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}, y_1 ] = {\beta}eta_{2,2}x_2 + {\beta}eta_{2,3}x_3 + {\delta}ots + {\beta}eta_{2,n}x_n,$$ $$[B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}, y_{m_1+1}] = {\gamma}amma_{2,2}x_2 + {\gamma}amma_{2,3}x_3 + {\delta}ots + {\gamma}amma_{2,n}x_n,$$ we obtain the condition $({\beta}eta_{2,2}, {\gamma}amma_{2,2}) \neq (0, 0).$ Consider the product $$[x_1, [y_1, y_1]] = 2 [[x_1, y_1], y_1] = 2{\alpha}lpha_{1,2}[B_1y_2 + B_2y_{m_1+2} + B_3y_{m_1+m_2+1}, y_1]+$$ $$ +2{\alpha}lpha_{1,m_1+2}[B'_1y_2 + B'_2y_{m_1+2}+B'_3y_{m_1+m_2+1}, y_1] +$$$$ 2{\alpha}lpha_{1,m_1+m_2+1}[B''_1y_2 + B''_2y_{m_1+2} + B''_3y_{m_1+m_2+1},y_1]+$$ $$+2\sum{\lambda}imits_{j=3, j\neq m_1+2, m_1+m_2+1}^m {\delta}elta_{1,j}[y_j,y_1]=2 {\alpha}lpha_{1,2}{\beta}eta_{2,2}x_2 + \sum{\lambda}imits_{i{\gamma}eq 3}(*)x_i .$$ On the other hand, $$[x_1, [y_1, y_1]] = [x_1, {\beta}eta_{1,1}x_1 + {\beta}eta_{1,2}x_2 + {\delta}ots + {\beta}eta_{1,n}x_n] = \sum{\lambda}imits_{i{\gamma}eq 3}(*)x_i.$$ Comparing the coefficients at the basic elements in these equations we obtain ${\alpha}lpha_{1,2}{\beta}eta_{2,2} = 0.$ Analogously, considering the product $[x_1, [y_{m_1+1}, y_{m_1+1}]],$ we obtain ${\delta}elta_{1,2}{\gamma}amma_{2,2} = 0.$ From this equations and the conditions $({\beta}eta_{2,2}, {\gamma}amma_{2,2}) \neq (0, 0),$ $({\alpha}lpha_{1,2},{\delta}elta_{1,2}) \neq (0,0)$ we easily obtain that the solutions are ${\alpha}lpha_{1,2}{\gamma}amma_{2,2} \neq 0, {\beta}eta_{2,2} ={\delta}elta_{1,2} =0$ or ${\beta}eta_{2,2}{\delta}elta_{1,2}\neq 0, {\alpha}lpha_{1,2} = {\gamma}amma_{2,2}=0.$ Consider the following product $$[[x_1, y_1], y_{m_1+1}] = [x_1,[ y_1, y_{m_1+1}]] - [[x_1, y_{m_1+1}], y_1] =-{\delta}elta_{1,2}{\beta}eta_{2,2}x_2+ \sum{\lambda}imits_{i{\gamma}eq 3}(*)x_i.$$ On the other hand, $$[[x_1, y_1], y_{m_1+1}] = {\alpha}lpha_{1,2}{\gamma}amma_{2,2}x_2+ \sum{\lambda}imits_{i{\gamma}eq 3}(*)x_i.$$ Comparing the coefficients of the basic elements in these equations we obtain irregular equation ${\alpha}lpha_{1,2}{\gamma}amma_{2,2} = -{\beta}eta_{2,2}{\delta}elta_{1,2}.$ It is a contradiction with supposing the nilindex of the superalgebra equal the $n+m.$ And the theorem is proved. \end{proof} Thus, the results of the Theorems \ref{t2}--\ref{t6} show that the Leibniz superalgebras with nilindex $n+m$ ($m\neq 0$) are the superalgebras mentioned in section 2. Hence, we completed the classification of the Leibniz superalgebras with nilindex $n+m.$ {\beta}egin{thebibliography}{99} {\beta}ibitem{Alb} Albeverio S., Ayupov Sh.A., Omirov B.A. {\it On nilpotent and simple Leibniz algebras.} Comm. in Algebra, 33(1), 2005, p. 159--172. {\beta}ibitem{2007Yu} Bordemann M., G\'{o}mez J.R., Khakimdjanov Yu., Navarro R.M. {\it Some deformations of nilpotent Lie superalgebras}, J. Geom. and Phys., 57, 2007, p. 1391--1403 {\beta}ibitem{FilSup} Ayupov Sh. A., Khudoyberdiyev A. Kh., Omirov B. A. {\it The classification of filiform Leibniz superalgebras of nilindex n+m}, Acta Math. Sinica (english series), vol. 25(1), 2009, p. 171--190. {\beta}ibitem{C-G-N-O} Camacho L.M., G\'{o}mez J.R., Navarro R.M., Omirov B.A. {\it Classification of some nilpotent class of Leibniz superalgebras}, appear in Acta Math. Sinica (english series), 2009, arXiv:math/0611636. {\beta}ibitem{C-G-O-Kh} Camacho L.M., G\'{o}mez J.R., Omirov B.A, Khudoyberdiyev A.Kh {\it On complex Leibniz superalgebras of nilindex n+m}, submitted to J. Geom. and Phys., arXiv:math/0812.2156. {\beta}ibitem{C-G-O-Kh1} Camacho L.M., G\'{o}mez J.R., Omirov B.A, Khudoyberdiyev A.Kh. {\it On the description of Leibniz superalgebras of nilindex $n+m$}, arXiv:0902.2884v1 {\beta}ibitem{GL} Gilg M. {\it Super-alg\`{e}bres de Lie nilpotentes:} PhD thesis. University of Haute Alsace, 2000. -- 126 p. {\beta}ibitem{G-K-N} G\'{o}mez J.R., Khakimdjanov Yu., Navarro R.M. {\it Some problems concerning to nilpotent Lie superalgebras}, J. Geom. and Phys., 51(4), 2004, p. 473--486. {\beta}ibitem{Kac} Kac V.G. {\it Lie superalgebras}, Advances in Math., vol. 26 (1), 1977, p. 8–-96. {\beta}ibitem{Liv} Livernet M. {\it Rational homotopy of Leibniz algebras}, Manuscripta Mat., vol. 96, 1998, p. 295--315. {\beta}ibitem{Lod} Loday J.-L. {\it Une version non commutative des alg${\alpha}cute{e}$bres de Lie: les alg${\alpha}cute{e}$bres de Leibniz}, Ens. Math., 39, 1993, p. 269--293. \end{thebibliography} \end{document}
\begin{equation}gin{document} \begin{equation}gin{titlepage} \thetaitle{Robust Phase Transitions for Heisenberg and Other Models on General Trees} \author{Robin Pemantle\thetahanks{Research partially supported by a Presidential Faculty Fellowship and a Sloan Foundation Fellowship.} \and Jeffrey E. Steif\thetahanks{Research supported by grants from the Swedish Natural Science Research Council and from the Royal Swedish Academy of Sciences.} \and \\ \it University of Wisconsin-Madison and Chalmers University of Technology } \bar{d}ate{} \title{Robust Phase Transitions for Heisenberg and Other Models on General Trees} \begin{equation}gin{abstract} We study several statistical mechanical models on a general tree. Particular attention is devoted to the classical Heisenberg models, where the state space is the $d$--dimensional unit sphere and the interactions are proportional to the cosines of the angles between neighboring spins. The phenomenon of interest here is the classification of phase transition (non-uniqueness of the Gibbs state) according to whether it is {\it robust}. In many cases, including all of the Heisenberg and Potts models, occurrence of robust phase transition is determined by the geometry (branching number) of the tree in a way that parallels the situation with independent percolation and usual phase transition for the Ising model. The critical values for robust phase transition for the Heisenberg and Potts models are also calculated exactly. In some cases, such as the $q\ge 3$ Potts model, robust phase transition and usual phase transition do not coincide, while in other cases, such as the Heisenberg models, we conjecture that robust phase transition and usual phase transition are equivalent. In addition, we show that symmetry breaking is equivalent to the existence of a phase transition, a fact believed but not known for the rotor model on $Z\!\!\!Z^2$. \mbox{\rm \small e}nd{abstract} \noindent AMS 1991 subject classifications. Primary 60K35, 82B05, 82B26. \\ Key words and phrases. phase transitions, symmetry breaking, Heisenberg models.\\ Running head: Phase transitions for Heisenberg models. \mbox{\rm \small e}nd{titlepage} \setcounter{equation}{0} \section{Definition of the model and main results} \labelel{sec:one} Particle systems on trees have produced the first and most tractable examples of certain qualitative phenomena. For example, the contact process on a tree has multiple phase transitions, (\cite{Pem,Lig2,Sta}) and the critical temperature for the Ising model on a tree is determined by its branching number or Hausdorff dimension (\cite{Ly1,EKPS,PP}), which makes the Ising model intimately related to independent percolation whose critical value is also determined by the branching number (see \cite{Ly2}). In this paper we study several models on general infinite trees, including the classical Heisenberg and Potts models. Our aim is to exhibit a distinction between two kinds of phase transitions, {\it robust} and {\it non-robust}, as well as to investigate conditions under which robust phase transitions occur. In many cases, including the Heisenberg and Potts models, the existence of a robust phase transition is determined by the branching number. However, in some cases (including the $q > 2$ Potts model), the critical temperature for the existence of usual phase transition is not determined by the branching number. Thus robust phase transition behaves in a more universal manner than non-robust phase transition, being a function of the branching number alone, as it is for usual phase transition for independent percolation and the Ising model. Although particle systems on trees do not always predict the qualitative behavior of the same particle system on high-dimensional lattices, it seems likely that there is a lattice analogue of non-robust phase transition, which would make an interesting topic for further research. Another unresolved question is whether there is ever a non-robust phase transition for the Heisenberg models (see Conjecture~\ref{conj:PS}). We proceed to define the general statistical ensemble on a tree and to state the main results of the paper. Let $G$ be a compact metrizable group acting transitively by isometries on a compact metric space $({\bf S},d)$. It is well known that there exists a unique $G$--invariant probability measure on ${\bf S}$, which we denote by $\bar{d}x$. An {\bf energy function} is any nonconstant function $H : {\bf S} \thetaimes {\bf S} \rightarrow \overline{h}ox{I\kern-.2em\overline{h}ox{R}}$ that is symmetric, continuous, and $d$--invariant in that $H(x,y)$ depends only on $d(x,y)$. This implies that $$ H(x,y)=H(gx,gy) \,\,\bar{d}eltasiorall \, x,y\in {\bf S}, \,g\in G. $$ ${\bf S}$ together with its $G$--action and the function $H$ will be called a {\bf statistical ensemble}. Several examples with which we will be concerned are as follows. \begin{equation}gin{eg} \labelel{eg:ising} The Ising model. Here ${\bf S} = \{ 1 , -1 \}$ acted on by itself (multiplicatively), $d$ is the usual discrete metric, $\bar{d}x$ is uniform on ${\bf S}$, and $H (x , y) = - xy$. \mbox{\rm \small e}nd{eg} \begin{equation}gin{eg} \labelel{eg:potts} The Potts model. Here ${\bf S} = \{ 0 , 1 , \ldots , q-1 \}$ for some integer $q > 1$, $G$ is the symmetric group $S_q$ with its natural action, $d$ is the usual discrete metric, $\bar{d}x$ is uniform on ${\bf S}$, and $H (x , y) = 1 - 2 \bar{d}eltata_{x,y}$. This reduces to the Ising model when $q = 2$. \mbox{\rm \small e}nd{eg} \begin{equation}gin{eg} \labelel{eg:rotor} The rotor model. Here ${\bf S}$ is the unit circle, acted on by itself by translations, $d(\thetaheta , \bar{d}eltahi) = 1- \cos (\thetaheta - \bar{d}eltahi)$, $\bar{d}x$ is normalized Lebesgue measure, and $H (\thetaheta , \bar{d}eltahi) = - \cos (\thetaheta - \bar{d}eltahi)$. \mbox{\rm \small e}nd{eg} \begin{equation}gin{eg} \labelel{eg:spherical} The Heisenberg models for $d \ge 1$. In the $d$--dimensional Heisenberg model, ${\bf S}$ is the unit sphere $S^d$, $G$ is the special orthogonal group with its natural action; $d(x,y)$ is $1-x\cdot y$, $\bar{d}x$ is normalized surface measure, and $H (x , y)$ is again the negative of the dot product of $x$ and $y$. When $d=1$, we recover the rotor model. \mbox{\rm \small e}nd{eg} Let $A$ be any finite graph, with vertex and edge sets denoted by $V(A)$ and $E(A)$ respectively, and let ${\cal J} : E (A) \rightarrow \overline{h}ox{I\kern-.2em\overline{h}ox{R}}^+$ be a function mapping the edge set of $A$ to the nonnegative reals which we call {\bf interaction strengths}. We now assume that ${\bf S}$, $G$ and $H$ are given and fixed. \begin{equation}gin{defn} \labelel{defn:Gibbs} The {\bf Gibbs measure} with interaction strengths ${\cal J}$ is the probability measure $\mu=\mu^{{\cal J}}$ on ${\bf S}^{V(A)}$ whose density with respect to product measure $\bar{d}x^{V(A)}$ is given by $$ { \mbox{\rm \small e}xp (- H^{\cal J} (\mbox{\rm \small e}ta)) \over Z},\,\,\,\, \mbox{\rm \small e}ta\in {\bf S}^{V(A)} $$ where $$ H^{\cal J}(\mbox{\rm \small e}ta) = \sum_{e = \overline{xy} \in E(A)} {\cal J} (e) H(\mbox{\rm \small e}ta (x) , \mbox{\rm \small e}ta (y)) ,$$ and $Z = \int \mbox{\rm \small e}xp (- H^{\cal J}(\mbox{\rm \small e}ta)) \, \bar{d}x^{V(A)}$ is a normalization. \mbox{\rm \small e}nd{defn} In statistical mechanics, one wants to define Gibbs measures on infinite graphs $A$ in which case the above definition of course does not make sense. We follow the usual approach (see~\cite{Ge}), in which one introduces boundary conditions and takes a weak limit of finite subgraphs increasing to $A$. Since the precise nature of the boundary conditions play a role here (we know this to be true at least for the Potts model with $q > 2$), we handle boundary conditions with extra care and, unfortunately, notation. We give definitions in the case of a rooted tree, though the extensions to general locally finite graphs are immediate. By a {\bf tree}, we mean any connected loopless graph $\Gamma$ where every vertex has finite degree. One fixes a vertex $o$ of $\Gamma$ which we call the {\bf root}, obtaining a {\bf rooted tree}. The vertex set of $\Gamma$ is denoted by $V(\Gamma)$. If $x$ is a vertex, we write $|x|$ for the number of edges on the shortest path from $o$ to $x$ and for two vertices $x$ and $y$, we write $|x-y|$ for the number of edges on the shortest path from $x$ to $y$. For vertices $x$ and $y$, we write $x \le y$ if $x$ is on the shortest path from $o$ to $y$, $x < y$ if $x \le y$ and $x \ne y$, and $x \thetao y$ if $x \le y$ and $|y|=|x|+1$. For $x \in V(\Gamma)$, the tree $\Gamma (x)$ denotes the subtree of $\Gamma$ rooted at $x$ consisting of $x$ and all of its descendents. We also define $\bar{d}eltaartial\Gamma$, which we refer to as the boundary of $\Gamma$, to be the set of infinite self-avoiding paths starting from $o$. Throughout the paper, the following assumption is in force. \noindent{\bf ASSUMPTION:} For all trees considered in this paper, the number of children of the vertices will be assumed bounded and we will denote this bound by $B$. A {\bf cutset} $C$ is a finite set of vertices not including $o$ such that every self-avoiding infinite path from $o$ intersects $C$ and such that there is no pair $x , y \in C$ with $x < y$. Given a cutset $C$, $\Gamma \begin{equation}gin{array}ckslash C$ has one finite component (which contains $o$) which we denote by $C^i$ (``i'' for inside) and we let $C^o$ (``o'' for outside) denote the union of the infinite components of $\Gamma \begin{equation}gin{array}ckslash C$. We say that a sequence $\{ C_n \}$ of cutsets approaches $\infty$ if for all $v \in \Gamma$, $v \in C_n^i$ for all sufficiently large $n$. Boundary conditions will take the form of specifications of the value of $\mbox{\rm \small e}ta$ at some cutset $C$. Let $\bar{d}eltata$ be any element of ${\bf S}^C$. The Gibbs measure with boundary condition $\bar{d}eltata$ is the probability measure $\mu^\bar{d}eltata_C = \mu^{{\cal J} , \bar{d}eltata}_C$ on ${\bf S}^{C^i}$ whose density with respect to product measure $\bar{d}x^{C^i}$ is given by \begin{equation}gin{equation} \labelel{eq:Gibbs} { \mbox{\rm \small e}xp (- H^{{\cal J} , \bar{d}eltata}_C (\mbox{\rm \small e}ta)) \over Z},\,\,\,\, \mbox{\rm \small e}ta\in {\bf S}^{C^i} \mbox{\rm \small e}nd{equation} where $$ H^{{\cal J} , \bar{d}eltata}_C (\mbox{\rm \small e}ta) = \sum_{e = \overline{xy} \in E(\Gamma) \atop x,y \in C^i} {\cal J} (e) H(\mbox{\rm \small e}ta (x) , \mbox{\rm \small e}ta (y)) + \sum_{e = \overline{xy} \in E(\Gamma) \atop x \in C^i, y \in C} {\cal J} (e) H(\mbox{\rm \small e}ta (x) , \bar{d}eltata (y)) $$ and $Z = \int \mbox{\rm \small e}xp (- H^{{\cal J} , \bar{d}eltata}_C (\mbox{\rm \small e}ta)) \, \bar{d}x^{C^i}$ is a normalization. When we don't include the second summand above, we call this the {\it free} Gibbs measure on $C^i$, denoted by $\mu^{\rm free}_C$, where ${\cal J}$ is suppressed in the notation. As we will see in Lemma~\ref{lem:free}, the free measure does not depend on $C$ except for its domain of definition, so we can later also suppress $C$ in the notation. \begin{equation}gin{defn} \labelel{defn:gibbstree} A probability measure $\mu$ on ${\bf S}^{V(\Gamma)}$ is called a {\bf Gibbs state} for the interactions ${\cal J}$ if for each cutset $C$, the conditional distribution on $C^i$ given the configuration $\bar{d}eltata'$ on $C\cup C^o$ is given by $\mu_C^{{\cal J} , \bar{d}eltata}$ where $\bar{d}eltata$ is the restriction of $\bar{d}eltata'$ to $C$. (A similar definition is used for general graphs.) Both in the case of lattices and trees (or for any graph), we say that a statistical ensemble {\bf exhibits a phase transition (PT) for the interaction strengths ${\cal J}$} if there is more than one Gibbs state for the interaction strengths ${\cal J}$. \mbox{\rm \small e}nd{defn} In the next section we will prove \begin{equation}gin{lem} \labelel{lem:free} Fix interaction strengths ${\cal J}$ and let $C$ and $D$ be any two cutsets of $\Gamma$. Then the projections of $\mu^{\rm free}_C$ and $\mu^{\rm free}_D$ to ${\bf S}^{C^i \cap D^i}$ are equal. Hence the measures $\mu^{\rm free}_C$ have a weak limit as $C \rightarrow \infty$, denoted $\mu^{\rm free}$. \mbox{\rm \small e}nd{lem} For general graphs, the measures $\mu^{\rm free}_C$ are not compatible in this way. Also, one has the following fact, which follows from Theorems~4.17 and~7.12 in \cite{Ge}. \begin{equation}gin{lem} \labelel{lem:limits} If $\{C_n\}$ is a sequence of cutsets approaching $\infty$ and if for each $n$, $\bar{d}eltata_n\in {\bf S}^{C_n}$, then any weak subsequential limit of the sequence $\{\mu_{C_n}^{{\cal J},\bar{d}eltata_n}\}_{n\ge 1}$ is a Gibbs state for the interactions ${\cal J}$. In addition, if all such possible limits are the same, then there is no phase transition. (A similar statement holds for graphs other than trees.) \mbox{\rm \small e}nd{lem} We pause for a few remarks about more general graphs, before restricting our discussion to trees for the rest of the paper. Lemma~\ref{lem:free} does not apply to graphs with cycles, so the existence of a unique weak limit $\mu^{\rm free}$ is not guaranteed there, but Lemma~\ref{lem:limits} together with compactness tells us that there always is at least one Gibbs state. The state of knowledge about the rotor model (Example~\ref{eg:rotor}) on more general graphs is somewhat interesting. It is known (see~\cite{Ge}, p.178 and p.434) that for $Z\!\!\!Z^d$, $d \leq 2$, all Gibbs states are rotationally invariant when ${\cal J}\mbox{\rm \small e}quiv J$ for any $J$ (and it is believed but not known that there is a unique Gibbs state for the rotor model in this case) while for $d \geq 3$, there are values of $J$ for which the rotor model with ${\cal J}\mbox{\rm \small e}quiv J$ has a Gibbs state whose distribution at the origin is not rotationally invariant (and hence there is more than one Gibbs state). In statistical mechanics, this latter phenomenon is referred to as a {\it continuous symmetry breaking} since we have a continuous state space (the circle) where the interactions are invariant under a certain continuous symmetry (rotations) but there are Gibbs states which are not invariant under this symmetry. We also mention that it is proved in~\cite{C} that for the rotor model with ${\cal J}\mbox{\rm \small e}quiv J$ for any $J$ on any graph of bounded degree for which simple random walk is recurrent, all the Gibbs states are rotationally invariant. (This was then extended in~\cite{MW} where the condition of boundedness of the degree is dropped and the group involved is allowed to be more general than the circle.) This however is not a sharp criterion: in~\cite{E}, a graph (in fact a tree) is constructed for which simple random walk is transient but such that there is no phase transition in the rotor model when ${\cal J}\mbox{\rm \small e}quiv J$ for any $J$. (This will also follow from Theorem~\ref{th:0hd} below together with the easy fact that there are trees with branching number 1 for which simple random walk is transient.) However, Y.\ Peres has conjectured a sharp criterion, Conjecture~\ref{conj:peres} below, for which our Corollary~\ref{cor:perestree} together with the discussion following it provides some corroboration. For the rest of this paper, we will restrict to trees. It is usually in this context that the most explicit results can be obtained and our basic goal is to determine whether there is a phase transition by comparing the interaction strengths with the ``size'' (branching number) of our tree. It turns out that we can only partially answer this question but the question which we can answer more completely is whether there is a {\it robust} phase transition, a concept which we will introduce shortly. \begin{equation}gin{defn} \labelel{defn:notation} Given ${\cal J},C$ and $\bar{d}eltata$ defined on $C$, let $f^{{\cal J},\bar{d}eltata}_{C , o}$ (or $f^{\bar{d}eltata}_{C , o}$ if ${\cal J}$ is understood) denote the marginal density of $\mu^{{\cal J} , \bar{d}eltata}_{C}$ at the root $o$. \mbox{\rm \small e}nd{defn} For any tree, recall that $\Gamma (v)$ denotes the subtree rooted at $v$, so that the tree $\Gamma(v)$ has vertex set $\{ w \in \Gamma : v \leq w\}$. If $v\in C^i$ and we intersect $C$ with $\Gamma (v)$, we obtain a cutset $C(v)$ for $\Gamma(v)$. We now extend Definition~\ref{defn:notation} to other marginals as follows. \begin{equation}gin{defn} \labelel{defn:marginals} With ${\cal J},C$ and $\bar{d}eltata$ as in Definition~\ref{defn:notation} and $v \in C^i$, define $f_{C,v}^{{\cal J} , \bar{d}elta}$ by replacing $\Gamma$ by $\Gamma (v)$, $C$ with $C(v)$, ${\cal J}$ with ${\cal J}$ restricted to $E (\Gamma (v))$, $\bar{d}eltata$ with $\bar{d}eltata$ restricted to $C(v)$ and $o$ with $v$ in Definition~\ref{defn:notation}. \mbox{\rm \small e}nd{defn} It is important to note that $f_{C,v}^{{\cal J},\bar{d}elta}$ is not the density of the projection of $\mu_C^{{\cal J},\bar{d}elta}$ onto vertex $v$, but rather the density of a Gibbs measure with similar boundary conditions on the smaller graph $\Gamma (v)$. \begin{equation}gin{defn} \labelel{defn:SB} A statistical ensemble on a tree $\Gamma$ exhibits a {\bf symmetry breaking (SB) for the interactions ${\cal J}$} if there exists a Gibbs state such that the marginal distribution at some vertex $v$ is not $G$--invariant (or equivalently is not $\bar{d}x$). \mbox{\rm \small e}nd{defn} The following proposition which will be proved in Section~\ref{sec:prelims} is interesting since it establishes the equivalence of PT and SB for general trees and general statistical ensembles, something not known for general graphs, see the remark below. \begin{equation}gin{prop} \labelel{prop:SB=} Consider a statistical ensemble on a tree $\Gamma$ with interactions ${\cal J}$. The following four conditions are equivalent. \\ (i) There exists a vertex $v$ such that for any sequence of cutsets $C_n\thetao\infty$, there exist boundary conditions $\bar{d}elta_n$ on $C_n$ such that $$ \inf_n \|f_{C_n,v}^{\bar{d}elta_n}-1\|_\infty \neq 0. $$ (ii) There exists a vertex $v$, a sequence of cutsets $C_n\thetao\infty$ and boundary conditions $\bar{d}elta_n$ on $C_n$ such that $$ \inf_n \|f_{C_n,v}^{\bar{d}elta_n}-1\|_\infty \neq 0. $$ (iii) The system satisfies SB. \\ (iv) The system satisfies PT. \mbox{\rm \small e}nd{prop} We now fix a distinguished element in ${\bf S}$, hereafter denoted ${\hat{0}}$. The notation $\mu^{{\cal J} , +}_C$ denotes $\mu^{{\cal J} , \bar{d}eltata}_C$ when $\bar{d}eltata$ is the constant function ${\hat{0}}$. In the case ${\cal J} \mbox{\rm \small e}quiv J$, we denote this simply $\mu^{J,+}_C$. We will be particularly concerned about whether $\mu^{{\cal J} , +}_C \rightarrow \mu^{\rm free}$ weakly, as $C \rightarrow \infty$. \begin{equation}gin{defn} \labelel{defn:SB+} A statistical ensemble on a tree $\Gamma$ exhibits a {\bf symmetry breaking with plus boundary conditions (SB+) for the interactions ${\cal J}$} if there exists a vertex $v$ and a sequence of cutsets $C_n\thetao\infty$ such that $$ \inf_n \|f_{C_n,v}^{{\cal J},+}-1\|_\infty \neq 0. $$ \mbox{\rm \small e}nd{defn} Note that by symmetry, SB+ does not depend on which point of ${\bf S}$ is chosen to be ${\hat{0}}$. In Section~\ref{sub:spherical} we will prove: \begin{equation}gin{pr} \labelel{pr:rotor equiv} For the rotor model on a tree, SB is equivalent to SB+. \mbox{\rm \small e}nd{pr} We conjecture but cannot prove the stronger statement: \begin{equation}gin{conj} \labelel{conj:SB} For any Heisenberg model on any graph, SB is equivalent to SB+. \mbox{\rm \small e}nd{conj} \noindent{\mbox{\rm \small e}m Remarks:} $(i)$ By Proposition~\ref{prop:SB=}, we have that SB+ implies SB for any statistical ensemble on a tree. While Proposition~\ref{prop:SB=} tells us that PT and SB are equivalent for any statistical ensemble on a tree, we note that such a result is not even known for the rotor model on $Z\!\!\!Z^2$ where it has been established that for all $J$, all Gibbs states are rotationally invariant for ${\cal J}\mbox{\rm \small e}quiv J$ but where it has not been established that there is no phase transition. A weaker form of the above conjecture would be that SB+ and SB are equivalent for all Heisenberg models on trees. This is Problem~\ref{pblm:all spheres} in~Section~\ref{sec:anal}. An extension to graphs with cycles would seem to entail a different kind of reasoning, perhaps similar to the inequalities of Monroe and Pearce~\cite{MP} which fall just short of proving Conjecture~\ref{conj:SB} for the rotor model. \\ \noindent{$(ii)$} The fact that PT and SB+ are equivalent when the rotor model is replaced by the Ising model is an immediate consequence of the fact that the probability measure is stochastically increasing in the boundary conditions. More generally, it is also the case that PT and SB+ are equivalent for the Potts models (see \cite{ACCN}). We now consider the idea of a {\it robust phase transition} where we investigate if the boundary conditions on a cutset have a nontrivial effect on the root even when the interactions along the cutset are made arbitrarily small but fixed. Given parameters $J>0$ and $J' \in (0,J]$ and a cutset $C$ of $\Gamma$, let $ {\cal J} ( J', J , C)$ be the function on $E(\Gamma)$ which is $J$ on edges in $C^i$ and $J'$ on edges connecting $C^i$ to $C$ (the values elsewhere being irrelevant). Let $f^{J',J , +}_{C_n , o}$ denote the marginal at the root $o$ of the measure $\mu^{J',J, +}_C:=\mu^{{\cal J} (J', J , C) , +}_C$. \begin{equation}gin{defn} \labelel{defn:robustPT} The statistical ensemble on the tree $\Gamma$ has a {\bf robust phase transition (RPT) for the parameter $J>0$} if for every $J'\in (0,J]$ $$ \inf_C \|f^{J',J , +}_{C , o} - 1\|_\infty \neq 0 \, $$ where the $\inf$ is taken over all cutsets $C$. \mbox{\rm \small e}nd{defn} \noindent{\mbox{\rm \small e}m Remarks:} In the case ${\cal J} \mbox{\rm \small e}quiv J$, by taking $J'=J$, it is clear that a RPT implies SB+ (which in turn implies SB and PT). Note that in this case, RPT is stronger than SB+ not only because $J'$ can be any number in $(0,J]$ and the root $o$ must play the role of $v$ but also because in SB+, we only require that for {\it some} sequence of cutsets going to infinity, the marginal at the vertex $v$ stays away from uniform while in RPT, we require this for {\it all} cutsets going to infinity. We note also that with some care, this definition makes sense for general graphs, and that the issue of robustness of phase transition on general graphs is worth investigating, although we do not do so here. Our first theorem gives criteria based on $J$ and the branching number of $\Gamma$ (which will now be defined) for robust phase transition to occur for the Heisenberg models. A little later on, we will have an analogous result for the Potts models. In \cite{F}, Furstenberg introduced the notion of the Hausdorff dimension of a tree (or more accurately of the boundary of the tree). This was further investigated by Lyons~(\cite{Ly2}) using the term branching number instead. The {\bf branching number} of a tree $\Gamma$, denoted $\thetaextstyle {br} (\Gamma)$, is a real number greater than or equal to one that measures the average number of branches per vertex of the tree. More precisely, the {\bf branching number} of $\Gamma$ is defined by $$\thetaextstyle{br}\,\Gamma:=\inf\left\{\lambda>0;\inf\limits_{C} \sum_{x \in C}\lambda^{-|x|} = 0 \right \} \;$$ where the second infimum is over all cutsets $C$. The branching number is a measure of the average number of branches per vertex of $\Gamma$. It is less than or equal to $\liminf_{n \thetao \infty} M_n^{1/n}$, where $M_n := | \left \{x \in \Gamma ; |x| = n \right \}|$, and takes more of the structure of $\Gamma$ into account than does this latter growth rate. For sufficiently regular trees, such as homogeneous trees or, more generally, Galton-Watson trees, $\thetaextstyle{br}\, \Gamma = \lim_{n\thetao\infty} M_n^{1/n}$ (\cite{Ly2}). We also mention that the branching number is the exponential of the Hausdorff dimension of $\bar{d}eltaartial\Gamma$ where the latter is endowed with the metric which gives distance $e^{-k}$ to two paths which split off after $k$ steps. As indicated earlier, the branching number has been an important quantity in previous investigations. More specifically, in \cite{Ly1} and \cite{Ly2}, the critical values for independent percolation and for phase transition in the Ising model on general trees are explicitly computed in terms of the branching number. For each $J\ge 0$, define a continuous strictly positive probability density function $K_J : {\bf S} \rightarrow \overline{h}ox{I\kern-.2em\overline{h}ox{R}}^+$ by \begin{equation}gin{equation} \labelel{eq:KJ} K_J (u): = C(J)^{-1} \mbox{\rm \small e}xp (- J H (u , {\hat{0}})) \mbox{\rm \small e}nd{equation} where $C(J) = \int \mbox{\rm \small e}xp (- J H(w,{\hat{0}})) \, \bar{d}x (w)$ is a normalizing constant, and more generally let $K_{J,y} : {\bf S} \rightarrow \overline{h}ox{I\kern-.2em\overline{h}ox{R}}^+$ be given by \begin{equation}gin{equation} \labelel{eq:KJy} K_{J,y} (u): = C(J)^{-1} \mbox{\rm \small e}xp (- J H (u , y)) \mbox{\rm \small e}nd{equation} (noting that $K_{J,{\hat{0}}}=K_{J}$). Let ${\cal K}J$ denote the convolution operator on the space $L^2 ({\bf S} , \bar{d}x)$ given by the formula \begin{equation}gin{equation} \labelel{eq:conv} {\cal K}J f (u) : = \int_{{\bf S}} f(x) K_{J,x} (u) \bar{d}x(x) \,\, . \mbox{\rm \small e}nd{equation} Note that by the assumed invariance $\int_{{\bf S}} \mbox{\rm \small e}xp (- J H(w,y)) \, \bar{d}x (w)$ is independent of $y$ and that $f\ge 0$ and $\int_{{\bf S}} f(x) \bar{d}x(x)=1$ imply that ${\cal K}J f\ge 0$ and $\int_{{\bf S}} {\cal K}J f(x) \bar{d}x(x)=1$. We extend the above notation to cover the case where $f$ is a pointmass $\bar{d}elta_y$ at $y$ by defining in that case \begin{equation}gin{equation} \labelel{eq:pointconv} {\cal K}J \bar{d}elta_y (u) : = K_{J , y}(u) . \mbox{\rm \small e}nd{equation} We will now give the exact critical parameter $J$ for RPT for the Heisenberg models. For any $d\ge 1$, let $$ {\cal R}dJ:= {\int_{-1}^1 r e^{Jr}(1-r^2)^{{d \over 2}-1} dr \over \int_{-1}^1 e^{Jr}(1-r^2)^{{d \over 2}-1} dr }. $$ When $d=1$ (rotor model), this is (by a change of variables) the first Fourier coefficient of $K_J$ ($\int_{{\bf S}} K_J(\thetaheta) \cos (\thetaheta) d\thetaheta$) which is perhaps more illustrative. When $d=2$, this is the first Legendre coefficient of $e^{Jr}$ (properly normalized) and for $d\ge 3$, this is the first so-called ultraspherical coefficient of $e^{Jr}$ (properly normalized). \begin{equation}gin{th} \labelel{th:main} Let $d\ge 1$. \\ (i) If $\thetaextstyle {br} (\Gamma) {\cal R}dJ <1$, then the $d$--dimensional Heisenberg model on $\Gamma$ with parameter $J$ does not exhibit a robust phase transition. \\ (ii) If $\thetaextstyle{br}(\Gamma) {\cal R}dJ >1$, then the $d$--dimensional Heisenberg model on $\Gamma$ with parameter $J$ exhibits a robust phase transition. \mbox{\rm \small e}nd{th} \noindent{\mbox{\rm \small e}m Remark:} It is easy to see that $\lim_{d\thetao\infty} {\cal R}dJ=0$ which says that it is harder to obtain a robust phase transition on higher dimensional spheres. This is consistent with the fact that it is in some sense harder to have a phase transition for the rotor model than in the Ising model (0-dimensional sphere); this latter fact can be established using the ideas in \cite{PS}. A simple computation shows that the derivative of ${\cal R}dJ$ with respect to $J$ is the variance of a random variable whose density function is proportional to $e^{Jr}(1-r^2)^{d/2 -1}$ on $[-1,1]$, thereby obtaining the following lemma. \begin{equation}gin{lem} \labelel{lem:inc} For any $d\ge 1$, we have that ${\cal R}dJ$ is a strictly increasing function of $J$. \mbox{\rm \small e}nd{lem} Theorem \ref{th:main} and Lemma \ref{lem:inc} together with the fact that for any $d\ge 1$, ${\cal R}dJ$ is a continuous function of $J$ which approaches 0 as $J\thetao 0$ and approaches 1 as $J\thetao \infty$ give us the following corollary. \begin{equation}gin{cor} \labelel{cor:critical} For any Heisenberg model with $d\ge 1$ and any tree $\Gamma$ with branching number larger than 1, let $J_c=J_c(\Gamma, d)$ be such that $\thetaextstyle{br}(\Gamma) \rho^d(J_c)=1$. Then there is a robust phase transition for the $d$--dimensional Heisenberg model on $\Gamma$ if $J> J_c$ and there is no such robust phase transition for $J< J_c$. \mbox{\rm \small e}nd{cor} For the Heisenberg models, we believe that phase transition and robust phase transition coincide and therefore we have the following conjecture. \begin{equation}gin{conj} \labelel{conj:PS} For any $d\ge 1$, if $\thetaextstyle {br} (\Gamma) {\cal R}dJ <1$, then the $d$--dimensional Heisenberg model on $\Gamma$ with parameter $J$ does not exhibit a phase transition. \mbox{\rm \small e}nd{conj} We can however obtain the following weaker form of this conjecture which is valid for all statistical ensembles. \begin{equation}gin{th} \labelel{th:0hd} If $\thetaextstyle {br} (\Gamma) = 1$, then there is no phase transition for any statistical ensemble on $\Gamma$ with bounded ${\cal J}$. \mbox{\rm \small e}nd{th} Theorems \ref{th:main}(ii) and \ref{th:0hd} together with the facts that RPT implies PT and that for any $d\ge 1$, $\lim_{J\thetao\infty} {\cal R}dJ = 1$ immediately yield the following corollary. \begin{equation}gin{cor} \labelel{cor:perestree} For any Heisenberg model with $d\ge 1$ and for any tree $\Gamma$, there is a \bar{d}eltat for the tree $\Gamma$ for some value of the parameter $J$ if and only if $\thetaextstyle {br} (\Gamma)>1$. \mbox{\rm \small e}nd{cor} Since it is known (see \cite{Ly2}) that $\thetaextstyle {br} (\Gamma)>1$ if and only if there is some $p< 1$ with the property that when performing independent percolation on $\Gamma$ with parameter $p$, there exists a.s.\ an infinite cluster on which simple random walk is transient, the above corollary yields the following conjecture of Y. Peres for the special case of trees of bounded degree. \begin{equation}gin{conj} \labelel{conj:peres} For any graph $A$, the rotor model exhibits a \bar{d}eltat for some $J$ if and only if there is some $p< 1$ with the property that performing independent bond percolation on $A$ with parameter $p$, there exists a.s.\ an infinite cluster on which simple random walk is transient. \mbox{\rm \small e}nd{conj} Recall that the rotor model on the graph $A$ exhibits no SB for any parameter $J$ if $A$ is recurrent for simple random walk, which is of course consistent with the above conjecture. Note that, on the other hand, the standard Ising model does exhibit a \bar{d}eltat on $Z\!\!\!Z^2$, a graph which is recurrent (as are its subgraphs) for simple random walk. The next result states the critical value for RPT for the Potts models. \begin{equation}gin{th} \labelel{th:potts} Consider the Potts model with $q \ge 2$ and let $$\alphapha_J = {e^J - e^{-J} \over e^J + (q-1) e^{-J}} \, .$$ (i) If $\thetaextstyle {br} (\Gamma) \alphapha_J <1$, then the Potts model on $\Gamma$ with parameter $J$ does not exhibit a robust phase transition. \\ (ii) If $\thetaextstyle{br}(\Gamma) \alphapha_J >1$, then the Potts model on $\Gamma$ with parameter $J$ exhibits a robust phase transition. \mbox{\rm \small e}nd{th} \noindent{\mbox{\rm \small e}m Remarks:}\\ $(i)$ $d\alphapha_J/dJ >0$ and so there is a critical value of $J$ depending on $\thetaextstyle{br}(\Gamma)$ analogous to in Corollary~\ref{cor:critical} for the Heisenberg models. \\ \noindent{$(ii)$} Note that when $q=2$ (the Ising model), this formula agrees with the formula for the Heisenberg models when one formally sets $d=0$ in the formula $$ {\cal R}dJ=\int_{S^d} (x \cdot {\hat{0}}) K_J (x) \, \bar{d}x(x), $$ the latter being obtained by a change of variables. To point out the subtlety involved in Conjecture \ref{conj:PS}, we continue to discuss the Potts model, a case in which the analogue of Conjecture~\ref{conj:PS} fails. Our final result tells us that phase transitions (unlike robust phase transitions) in the Potts model with $q > 2$ cannot be determined by the branching number. \begin{equation}gin{th} \labelel{th:2trees} Given any integer $q >2$, there exist trees $\Gamma_1$ and $\Gamma_2$ and a nontrivial interval $I$ such that $\thetaextstyle {br} (\Gamma_1) < \thetaextstyle {br} (\Gamma_2)$ and for any $J\in I$, there is a phase transition for the $q$--state Potts model with parameter $J$ on $\Gamma_1$ but no such phase transition on $\Gamma_2$. \mbox{\rm \small e}nd{th} \noindent{\mbox{\rm \small e}m Remarks:}\\ $(i)$ $\Gamma_1$ and $\Gamma_2$ can each be taken to be spherically symmetric which means that for all $k$, all vertices at the $k$th generation have the same number of children. \\ \noindent{$(ii)$} In the case $q=2$, more is known. In \cite{Ly1}, the critical value for phase transition in the Ising model is found and corresponds to what is obtained in Theorem~\ref{th:potts} above. It follows that there is never a non-robust phase transition except possibly at the critical value. However, a sharp capacity criterion exists~\cite{PP} for phase transition for the Ising model (settling the issue of phase transition at the critical parameter) and using this criterion, one can show that phase transition and robust phase transition correspond even at criticality. The arguments of~\cite{PP} cannot be extended to the Potts model for $q > 2$ because the operator ${\cal K}J$, acting on a certain likelihood function, when conjugated by the logarithm is not concave in this case. Theorems~\ref{th:potts} and~\ref{th:2trees} together tell us that there is indeed a non-robust phase transition when $q > 2$ for a nontrivial interval of $J$. The rest of the paper is devoted to the proofs of the above results. In Section~\ref{sec:prelims}, we collect several lemmas that apply to general statistical ensembles, including the basic recursion formula (Lemma~\ref{lem:rec}) that allows us to analyze general statistical ensembles on trees, prove Lemma~\ref{lem:free} and Proposition~\ref{prop:SB=} as well as provide some background concerning Heisenberg models (showing that they satisfy the more general hypotheses of Theorems~\ref{th:gen ii} and~\ref{th:gen i} given later on) and the more general notion of distance regular spaces. Section~\ref{sec:proofs} is devoted to the proofs of Theorems~\ref{th:gen ii} and~\ref{th:gen i}. In Section~\ref{sec:anal}, we use these theorems to find the critical parameters for robust phase transition in the Heisenberg and Potts models, Theorems~\ref{th:main} and~\ref{th:potts}, as well as prove Proposition~\ref{pr:rotor equiv}. Section~\ref{sec:zero} discusses the special case of trees of branching number 1, proving Theorem~\ref{th:0hd}. Finally, in Section~\ref{sec:potts}, Theorem~\ref{th:2trees} is proved. \setcounter{equation}{0} \section{Basic background results} \labelel{sec:prelims} In this section, we collect various background results which will be needed to prove the results described in the introduction. We begin with a subsection describing results pertaining to trees that hold for general statistical ensembles. After discussing the concept of a distance regular space in Section~\ref{sub:drs}, we specialize to Heisenberg models (the most relevant family of continuous distance regular models) in Section~\ref{sub:sphere} and then to distance regular graphs in Section~\ref{sub:finite}. \subsection{The fundamental recursion and other lemmas} \labelel{sub:rec} We start off with two lemmas exploiting the recursive structure of trees. Let ${\bf S},G$ and $H$ be a statistical ensemble. Let $A_1$ and $A_2$ be two disjoint finite graphs, with distinguished vertices $v_1 \in V(A_1)$ and $v_2 \in V(A_2)$. Let ${\cal J}_1$ and ${\cal J}_2$ be interaction functions for $A_1$ and $A_2$, i.e., positive functions on $E(A_1)$ and $E(A_2)$ respectively. For any $C_1 \subseteq V(A_1) \setminus \{ v_1 \}$ (possibly empty) and any $C_2 \subseteq V(A_2)$, and for any $\bar{d}eltata_1 \in {\bf S}^{C_1}$ and $\bar{d}eltata_2 \in {\bf S}^{C_2}$, we have measures $\mu_i := \mu^{{\cal J}_i , \bar{d}eltata_i}_{C_i}$, $i = 1, 2$ on ${\bf S}^{V(A_i)\setminus C_i}$ defined (essentially) by~(\ref{eq:Gibbs}). Abbreviate $H_{C_i}^{{\cal J}_i,\bar{d}elta_i}$ (which has the obvious meaning) by $H_i$. Let $A$ be the union of $A_1$ and $A_2$ together with an edge connecting $v_1$ and $v_2$. Let $C = C_1 \cup C_2$, ${\cal J}$ extend each ${\cal J}_i$ and the value of the new edge be given the value $J$, $\bar{d}eltata$ extend each $\bar{d}eltata_i$ and denote $\mu^{{\cal J} , \bar{d}eltata}_C$ (a probability measure on ${\bf S}^{(V(A_1)\setminus C_1)\cup (V(A_2)\setminus C_2)}$) by $\mu$ and $H^{{\cal J} , \bar{d}eltata}_C$ (again having the obvious meaning) by $H$. The identity \begin{equation}gin{equation} \labelel{eq:Hdecomp} H = H_1 + H_2 + J H (\mbox{\rm \small e}ta (v_1) , \mbox{\rm \small e}ta (v_2)) \mbox{\rm \small e}nd{equation} leads to the following lemma. \begin{equation}gin{lem} \labelel{lem:decomps} The measure $\mu$ satisfies \begin{equation}gin{equation} \labelel{eq:mudecomp} {d\mu \over d (\mu_1 \thetaimes \mu_2)} = c \mbox{\rm \small e}xp [- J H(\mbox{\rm \small e}ta_1 (v_1) , \mbox{\rm \small e}ta_2 (v_2))] , \mbox{\rm \small e}nd{equation} where $$c = \left [ \int \int \mbox{\rm \small e}xp (- J H (\mbox{\rm \small e}ta_1 (v_1) , \mbox{\rm \small e}ta_2 (v_2))) \, d\mu_1 (\mbox{\rm \small e}ta_1) \, d\mu_2 (\mbox{\rm \small e}ta_2) \right ]^{-1}$$ is a normalizing constant. Let $f_i$ denote the marginal density of $\mu_i$ at $v_i$, $i = 1 , 2$, and $f$ denotes the marginal density of $\mu$ at $v_1$. Then the projection $\mu^{(1)}$ of $\mu$ onto ${\bf S}^{V(A_1)\setminus C_1}$ satisfies \begin{equation}gin{equation} \labelel{eq:mudecomp2} \mu^{(1)} = c \int \int \mu_{1 , y} f_1 (y) f_2 (z) \mbox{\rm \small e}xp (- J H(y,z)) \, \bar{d}x (z) \, \bar{d}x (y) \mbox{\rm \small e}nd{equation} for some normalizing constant $c$, where $\mu_{1 , y}$ denotes the conditional distribution of $\mu_1$ given $\mbox{\rm \small e}ta (v_1) = y$. Consequently, \begin{equation}gin{equation} \labelel{eq:fdecomp} f (y) = c f_1 (y) \int f_2 (z) \mbox{\rm \small e}xp (- J H(y,z)) \, \bar{d}x (z) \, , \mbox{\rm \small e}nd{equation} where $c$ normalizes $f$ to be a probability density. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} The relation~(\ref{eq:mudecomp}) follows from~(\ref{eq:Hdecomp}) and the defining equation~(\ref{eq:Gibbs}). From this it follows that the measure $\mu$ on pairs $(\mbox{\rm \small e}ta_1 , \mbox{\rm \small e}ta_2)$ makes $\mbox{\rm \small e}ta_1$ and $\mbox{\rm \small e}ta_2$ conditionally independent given $\mbox{\rm \small e}ta_1 (v_1)$ and $\mbox{\rm \small e}ta_2 (v_2)$. Hence the conditional distribution of $\mu^{(1)}$ given $\mbox{\rm \small e}ta_1 (v_1) = y$ and $\mbox{\rm \small e}ta_2 (v_2) = z$ is just $\mu_{1 , y}$. Next, (\ref{eq:mudecomp}) and the last fact yield~(\ref{eq:mudecomp2}). The marginal of $\mu_{1, y}$ at $v_1$ is just $\bar{d}eltata_y$, and so~(\ref{eq:mudecomp2}) yields~(\ref{eq:fdecomp}). $ \Box$ A tree $\Gamma$ may be built up from isolated vertices by the joining operation described in the previous lemma. The decompositions in Lemma~\ref{lem:decomps} may be applied inductively to derive a fundamental recursion for marginals. This recursion, Lemma~\ref{lem:rec} below, expresses the marginal distribution at the root of $\Gamma$ as a pointwise product of marginals at the roots of each of the generation 1 subtrees, each convolved with a kernel $K_J$. The normalized pointwise product will be ubiquitous throughout what follows, so we introduce notation for it. \begin{equation}gin{defn} If $f_1 , \ldots , f_k$ are nonnegative functions on ${\bf S}$ with $\int f_i \, \bar{d}x = 1$ for each $i$, let \\ $\bar{d}eltaoi_k (f_1 , \ldots , f_k)$ denote the normalized pointwise product, $$\bar{d}eltaoi_k (f_1 , \ldots , f_k) (x) = {\bar{d}eltarod_{i=1}^k f_i (x) \over \int \bar{d}eltarod_{i=1}^k f_i (y) \, \bar{d}x (y)}$$ whenever this makes sense, e.g., when each $f_i$ is in $L^k (\bar{d}x)$ and the product is not almost everywhere zero. Let $\bar{d}eltaoi$ denote the operator which for each $k$ is $\bar{d}eltaoi_k$ on each $k$-tuple of functions. There is an obvious associativity property, namely $\bar{d}eltaoi (\bar{d}eltaoi (f,g) , h) = \bar{d}eltaoi (f,g,h)$, which may be extended to arbitrarily many arguments. \mbox{\rm \small e}nd{defn} \begin{equation}gin{lem}[Fundamental recursion] \labelel{lem:rec} Given a tree $\Gamma$, a cutset $C$, interactions ${\cal J}$, boundary condition $\bar{d}eltata$ and $v\in C^i$, let $\{ w_1 , \ldots , w_k \}$ be the children of $v$. Let $J_1 , \ldots , J_k$ denote the values of ${\cal J} (v , w_1) , \ldots , {\cal J} (v , w_k)$. Then \begin{equation}gin{equation} \labelel{eq:recurse} f_{C,v}^{{\cal J},\bar{d}elta} = \bar{d}eltaoi ({\cal K}_{J_1} f^{{\cal J} , \bar{d}eltata}_{C , w_1} , \ldots , {\cal K}_{J_k} f^{{\cal J} , \bar{d}eltata}_{C , w_k}) \, , \mbox{\rm \small e}nd{equation} where when $w_i\in C$, $f^{{\cal J} , \bar{d}eltata}_{C , w_i}$ is taken to be the point mass at $\bar{d}eltata(w_i)$ and convention~(\ref{eq:pointconv}) is in effect. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} Passing to the subtree $\Gamma (v)$, we may assume without loss of generality that $v = o$. Also assume without loss of generality that $w_1 , \ldots , w_k$ are numbered so that for some $s$, $w_i \in C^i$ for $i \leq s$ and $w_i \in C$ for $i > s$. For $i\le s$, let $C(w_i) = C \cap \Gamma (w_i)$. For such $i$, by definition, $f_i := f^{{\cal J} , \bar{d}elta}_{C,w_i}$ is the marginal at $w_i$ of the measure $\mu_i := \mu^{{\cal J}, \bar{d}elta}_{C(w_i) , w_i}$ on configurations on $\Gamma (w_i)\cap C^i$, where ${\cal J}$ and $\bar{d}elta$ are restricted to $E (\Gamma (w_i))$ and $C(w_i)$ respectively. Let $\Gamma_r$ denote the induced subgraph of $\Gamma$ whose vertices are the union of $\{ o \}$, $\Gamma (w_1) , \ldots , \Gamma (w_r)$. We prove by induction on $r$ that the density $g_r$ at the root of $\Gamma_r$ of the analogue of $\mu^{{\cal J} , \bar{d}eltata}_C$ for $\Gamma_r$ is equal to $$\bar{d}eltaoi ({\cal K}_{J_1} f^{{\cal J} , \bar{d}eltata}_{C , w_1} , \ldots , {\cal K}_{J_r} f^{{\cal J} , \bar{d}eltata}_{C , w_r}) \, ;$$ The case $r = k$ is the desired conclusion. To prove the $r=1$ step, use~(\ref{eq:fdecomp}) with $v_1 = o$, $A_1 = \{ o \}$, $C_1 = \mbox{\rm \small e}mptyset$, $v_2 = w_1$, $A_2 = \Gamma (w_1)$ and $C_2 = C(w_1)$. If $w_1 \in C$, the $r=1$ case is trivially true, so assume $s \geq 1$. The measure $\mu_1$ is uniform on ${\bf S}$ since $C(v) = \mbox{\rm \small e}mptyset$. Thus from~(\ref{eq:fdecomp}) we find that $$g_1 (y) = c \int e^{-J_1 H (y,z)} f_1 (z) \, dz = ({\cal K}_{J_1} f_1) (y)$$ which proves the $r = 1$ case. For $1 < r \leq s$, use~(\ref{eq:fdecomp}) with $A_1 = \Gamma_{r-1}$, $v_1 = o$, $C_1 = \Gamma_{r-1} \cap C$, $A_2 = \Gamma (w_r)$, $v_2 = w_r$ and $C_2 = \Gamma (w_r) \cap C$. Using~(\ref{eq:fdecomp}) we find that \begin{equation}gin{eqnarray*} g_r (y) & = & c g_{r-1} (y) \int e^{-J_r H (y,z)} f_r (z) \, \bar{d}x (z) \\[1ex] & = & c g_{r-1} (y) ({\cal K}_{J_r} f_r) (y) \\[1ex] & = & (\bar{d}eltaoi (g_{r-1} , {\cal K}_{J_r} f_r)) (y) \, . \mbox{\rm \small e}nd{eqnarray*} By associativity of $\bar{d}eltaoi$ the induction step is completed for $r \leq s$. Finally, if $r > s$, then the difference between $H (\mbox{\rm \small e}ta)$ on $\Gamma_{r-1}$ and $H (\mbox{\rm \small e}ta)$ on $\Gamma_r$ is just $- J_r H(\mbox{\rm \small e}ta (o) , \bar{d}eltata (w_r))$, so $$g_r (y) = c g_{r-1} (y) \mbox{\rm \small e}xp (- J_r H(y , \bar{d}eltata (w_r))) = \left ( \bar{d}eltaoi (g_{r-1} , {\cal K}_{J_r} f_r) \right ) (y)$$ by the convention~(\ref{eq:pointconv}), and associativity of $\bar{d}eltaoi$ completes the induction as before. $ \Box$ Another consequence of Lemma~\ref{lem:decomps} is Lemma~\ref{lem:free}, giving the existence of a natural and well defined free boundary measure. \noindent{\bf Proof of Lemma~\bar{d}eltarotect{\ref{lem:free}}.} Observe that in~(\ref{eq:mudecomp2}), if $f_2 \mbox{\rm \small e}quiv 1$ then the integral against $z$ is independent of $y$, so one has $\mu^{(1)} = \mu_1$. Let $F$ be any cutset and $w \in F^i$ be chosen so each of its children $v_1 , \ldots , v_k$ is in $F$. Applying our observation inductively to eliminate each child of $w$ in turn, we see that the projection of $\mu^{\rm free}_F$ onto ${\bf S}^{F^i \setminus \{ w \}}$ is just $\mu^{\rm free}_{F'}$ where $F' = F \cup \{ w \} \setminus \{ v_1 , \ldots , v_k \}$. Given cutsets $C$ and $D$ with $D \cap C^i \neq \mbox{\rm \small e}mptyset$, choose $v \in D \cap C^i$ and $w \geq v$ maximal in $C^i$. Then all children of $w$ are in $C$. Applying the previous paragraph with $F = C$, we see that $\mu^{\rm free}_C$ agrees with $\mu^{\rm free}_{F'}$. Continually reducing in this way, we conclude that on $C^i \cap D^i$ $\mu^{\rm free}_C$ agrees with $\mu^{\rm free}_Q$ where $Q$ is the exterior boundary of $C^i \cap D^i$. The same argument shows that $\mu^{\rm free}_D$ agrees with $\mu^{\rm free}_Q$, which finishes the proof of the lemma. $ \Box$ According to Lemma~\ref{lem:rec}, if, for $J>0$, we define ${\bf P}P (J)$ to be the smallest class of densities containing each $K_{J' , y}$ for $J' \in (0,J]$ and $y \in {\bf S}$ and closed under ${\cal K}_{J'}$ for $J' \in (0,J]$ and $\bar{d}eltaoi$, then, when ${\cal J}$ is strictly positive and bounded by $J$, each density $f^{{\cal J} , \bar{d}elta}_{C , v}$ is an element of ${\bf P}P(J)$. Similarly, if ${\bf P}P_+(J)$ is taken to be the smallest class of densities containing each $K_{J'}$ for $J' \in (0,J]$ and closed under ${\cal K}_{J'}$ for $J' \in (0,J]$ and $\bar{d}eltaoi$, then, when ${\cal J}$ is strictly positive and bounded by $J$, each density $f^{{\cal J} , +}_{C , v}$ is an element of ${\bf P}P_+(J)$. We also let ${\bf P}P:=\bigcup_{J> 0}{\bf P}P(J)$ and ${\bf P}P_+:=\bigcup_{J> 0}{\bf P}P_+(J)$. This leads to the following lemma whose proof is left to the reader. \begin{equation}gin{lem} \labelel{lem:unifbd} Suppose the interaction strengths $\{ {\cal J} (e) \}$ are bounded above by some constant. Then there exist constants $0 < B_{\rm min} < B_{\rm max}$ such that for every $C , \bar{d}eltata$ and $v \in C^i$, the one-dimensional marginal of $\mu^{\bar{d}eltata}_C$ at $v$ is absolutely continuous with respect to $\bar{d}x$ with a density function in $[B_{\rm min} , B_{\rm max}]$. It follows, since the above properties are closed under convex combinations, that all one-dimensional marginals of any Gibbs state have densities in $[B_{\rm min} , B_{\rm max}]$. Similarly, the $k$-dimensional marginals have densities in the interval $[B_{\rm min}^{(k)} , B_{\rm max}^{(k)}]$ for some constants $0< B_{\rm min}^{(k)} < B_{\rm max}^{(k)}$. In addition, the family of all one--dimensional densities which arise as above is an equicontinuous family. \mbox{\rm \small e}nd{lem} The usefulness of the equicontinuity property is that the following easily proved lemma (whose proof is also left to the reader) tells us that in determining weak convergence to $\bar{d}x$, it is equivalent to look to see if there is convergence in $L^\infty$ of the associated densities to 1. \begin{equation}gin{lem} \labelel{lem:converge} Let $(X,d)$ be a compact metric space and $\mu$ a probability measure on $X$ with full support. If $\{f_n\}$ is an equicontinuous family of probability densities (with respect to $\mu$), then $$ \lim_{n\thetao\infty} \|f_n-1\|_{\infty} = 0 \mbox{ if and only if } \lim_{n\thetao\infty} f_n d\mu = \mu \mbox{ weakly }. $$ \mbox{\rm \small e}nd{lem} Using this, we can prove the equivalence of phase transition and symmetry breaking on trees (Proposition~\ref{prop:SB=}). \noindent{\bf Proof of Proposition~\ref{prop:SB=}.} (i) implies (ii) is trivial. For (ii) implying (iii), assume we have a vertex $v$, a sequence of cutsets $C_n\thetao\infty$ and boundary conditions $\bar{d}elta_n$ on $C_n$ such that $$ \inf_n \|f_{C_n,v}^{\bar{d}elta_n}-1\|_\infty \neq 0. $$ Clearly we obtain the same result if we change $\bar{d}elta_n$ on $C_n\setminus \Gamma(v)$ to anything, in particular, if we take no (i.e., free) boundary condition there. We then take any weak limit of these measures as $n\thetao\infty$. This will yield a Gibbs state and by the first line of the proof of Lemma~\ref{lem:free}, together with Lemma~\ref{lem:converge}, the marginal density at $v$ of this Gibbs state is not 1, which proves (iii). (iii) implies (iv) is also trivial of course. To see that (iv) implies (i), note that if there is PT, then there exists an extremal Gibbs state $\mu\neq \mu^{\rm free}$. Choose a cutset $C$ such that $\mu\neq \mu^{\rm free}$ when restricted to $C^i$. If (i) fails, then for all $v\in C$, there exists a sequence of cutsets $C_n\thetao\infty$ such that for all boundary conditions $\bar{d}elta_n$ on $C_n$ we have that \begin{equation}gin{equation} \labelel{eq:ivgivesi} \inf_n \|f_{C_n,v}^{\bar{d}elta_n}-1\|_\infty = 0. \mbox{\rm \small e}nd{equation} Clearly, because of the geometry, $\{C_n\}$ can be chosen independent of $v$. Since $\mu$ is extremal, it is known (see Theorem 7.12(b) in \cite{Ge}, p. 122) that there exist boundary conditions $\bar{d}elta_n'$ on $C_n$ so that $\mu_{C_n}^{\bar{d}elta_n'} \rightarrow \mu$ weakly. However, by (\ref{eq:ivgivesi}) and Lemma~\ref{lem:rec}, $\mu$ must equal $\mu^{\rm free}$ on $C^i$, a contradiction. $ \Box$ \subsection{Distance regular spaces} \labelel{sub:drs} Our primary interest in this paper is in the Heisenberg models. Nevertheless, it turns out that many of the properties of the Heisenberg model hold in the more general context of distance regular spaces. A {\bf distance regular graph} is a finite graph for which the size of the set $\{ z : d(x,z) = a , d (y,z) = b \}$ depends on $x$ and $y$ only through the value of $d(x,y)$ where $d(x,y)$ is the usual graph distance between $x$ and $y$. We generalize this by saying that the metric space $({\bf S},d)$ with probability measure $\bar{d}x$ is {\bf distance regular} if the law of the pair $(d(x,Z) , d(y,Z))$ when $Z$ has law $\bar{d}x$ depends only on $d(x,y)$. In particular, when the action of $G$ on ${\bf S}$ is distance transitive (in addition to preserving $d$ and $\bar{d}x$), meaning that $(x,y)$ can be mapped to any $(x' , y')$ with $d(x,y) = d(x' , y')$, it follows easily that $({\bf S},d, \bar{d}x)$ is distance regular. All the examples we have mentioned so far are distance transitive (and hence distance regular) except for the rotor model which is still distance regular. (For an example of a graph showing that the full automorphism group acting distance transitively is strictly stronger than the assumption of distance regularity, see~\cite{AVLF} or {\it Additional Result} {\bf 23b} of~\cite{Big}.) We present some of the background in this generality not because we are fond of gratuitous generalization but because we find the reasoning clearer, and because it seems reasonable that someone in the future might study a particle system whose spin states are elements of some distance regular space, such as real projective space or the discrete $n$-cube. The primary consequence of distance regularity is that it allows one to define a commutative convolution on a certain subspace of $L^2$. \begin{equation}gin{defn} Let $L^2 ({\bf S})$ denote the space $L^2 (\bar{d}x)$, and let $L^2 ({\bf S})g$ denote the space of functions $f \in L^2 ({\bf S})$ for which $f(x)$ depends only on $d(x , {\hat{0}})$. For $f \in L^2 ({\bf S})g$, define a function $\bar{d}eltasib$ on $\{d({\hat{0}},y)\}_{y\in {\bf S}}$ by $\bar{d}eltasib (r) := f(x)$ where $x$ is such that $d({\hat{0}},x) = r$. \mbox{\rm \small e}nd{defn} \begin{equation}gin{defn} If $({\bf S},\bar{d}x)$ is distance regular, define a commutative convolution operation on $L^2 ({\bf S})g \thetaimes L^2 ({\bf S})g$ by $$f * h (x) := \int_{{\bf S}} h(y) \bar{d}eltasib (d(x,y)) \, \bar{d}x (y) = \int_{[0,\infty)^2} \bar{d}eltasib (u) \overline{h} (v) \, d\bar{d}eltai_x(u,v)$$ where $\bar{d}eltai_x$ is the law of $(d(x,Z) , d({\hat{0}} , Z))$ for a variable $Z$ with law $\bar{d}x$. It is clear from the definition of a distance regular space that $(d(x,Z) , d({\hat{0}},Z))$ and $(d({\hat{0}},Z) , d(x,Z))$ are equal in distribution implying that $f * h =h * f$ and that, since $\bar{d}eltai_x$ only depends on $d(x,{\hat{0}})$, $f,h\in L^2 ({\bf S})g$ implies that $f * h \in L^2 ({\bf S})g$. \mbox{\rm \small e}nd{defn} The following lemma is straightforward and left to the reader. \begin{equation}gin{lem} \labelel{lem:dt} For all $J\ge 0$, $K_J\in L^2 ({\bf S})g$ and for all $h\in L^2 ({\bf S})$, ${\cal K}_J(h)(x)$ (defined in~(\ref{eq:conv})) is equal to $\int_{{\bf S}} h(y) \overline{K_J} (d(x,y)) \, \bar{d}x (y)$. In particular, if $({\bf S} , \bar{d}x)$ is distance regular, then the operators ${\cal K}_J$ map $L^2 ({\bf S})g$ into itself and ${\cal K}_J(h) =K_J * h$ for all $h \in L^2 ({\bf S})g$. \mbox{\rm \small e}nd{lem} We believe that for most distance regular spaces, one can verify the necessary hypotheses of Theorems~\ref{th:gen ii} and~\ref{th:gen i} below in the same way as we will do for the Heisenberg models in detail in the next section. Doing this however would take us too far afield and so we content ourselves with pointing out to the reader that much of this probably can be done, and after analyzing the Heisenberg models in Section~\ref{sub:sphere}, explain how to carry much of this out in the context of distance regular graphs in Section~\ref{sub:finite}. \subsection{Heisenberg models} \labelel{sub:sphere} In this subsection, we consider Example \ref{eg:spherical} in Section~\ref{sec:one} and so we have ${\bf S} = S^d$, $d \geq 1$, the unit sphere in $(d+1)$--dimensional Euclidean space with the corresponding $G, d, \bar{d}x$ and $H$. Recall that this is distance transitive for $d\ge 2$ (and hence distance regular) and distance regular for $d=1$. The following lemma allows us to set up coordinates in which our bookkeeping will be manageable. It is certainly well known. \begin{equation}gin{lem} \labelel{lem:spherical} For any $d\ge 1$, there exist real--valued functions $\bar{d}eltasi_0 , \bar{d}eltasi_1 , \bar{d}eltasi_2 , \ldots \in L^2 ({\bf S})g$ $({\bf S}=S^d)$, orthogonal under the inner product $\langle f,g \rangle = \int_{{\bf S}} f \overline{g} \, \bar{d}x$, such that $\bar{d}eltasi_n$ is a polynomial of degree exactly $n$ in $x\cdot {\hat{0}}$, and such that the following properties hold. \\ (1) $\bar{d}eltasi_0 (x) \mbox{\rm \small e}quiv 1$ and $\bar{d}eltasi_1 (x) = x \cdot {\hat{0}}$. \\ (2) $1 = \bar{d}eltasi_j ({\hat{0}}) = \sup_{x \in {\bf S}} |\bar{d}eltasi_j (x)|$, for all $j$. \\ (3) $\bar{d}eltasi_i \bar{d}eltasi_j = \sum_{r\ge 0} q^r_{ij} \bar{d}eltasi_r$, where the coefficients $q^r_{ij}$ are nonnegative and $\sum_r q^r_{ij} = 1$. \\ (4) $\bar{d}eltasi_i * \bar{d}eltasi_j = \gamma_j \bar{d}eltata_{ij} \bar{d}eltasi_j$, where $\gamma_j := \bar{d}eltasi_j * \bar{d}eltasi_j ({\hat{0}}) = \int \bar{d}eltasi_j^2(x) \, \bar{d}x(x)$. \\ (5) The functions $\bar{d}eltasi_j$ are eigenfunctions of any convolution operator, that is, $f * \bar{d}eltasi_j = c \bar{d}eltasi_j$ for any $f \in L^2 ({\bf S})g$. \\ (6) Any $f \in L^2 ({\bf S})g$ can be written as a convergent series $f(x) = \sum_{j\ge 0} a_j (f) \bar{d}eltasi_j(x)$ (in the $L^2$ sense), where the complex numbers $a_j(f)$ are given by $a_j(f) : = \gamma_j^{-1} \int f(x) \bar{d}eltasi_j (x) \, \bar{d}x (x) .$ \\ (7) For $f,g \in L^2 ({\bf S})g$, we have $a_j(f * g)= \gamma_j a_j(f) a_j(g)$. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} For each $\alphapha, \begin{equation}ta >-1$, define the Jacobi polynomials $\{{\bf P}^{(\alphapha , \begin{equation}ta)}_n(r)\}_{n\ge 0}$ by \begin{equation}gin{equation} \labelel{eq:rod} (1 - r)^\alphapha (1 + r)^\begin{equation}ta {\bf P}_n^{(\alphapha , \begin{equation}ta)} (r) = {(-1)^n \over 2^n n!} {d^n \over dr^n} \left [ (1 - r)^{n + \alphapha} (1 + r)^{n + \begin{equation}ta} \right ] \, . \mbox{\rm \small e}nd{equation} (The Jacobi polynomials are usually defined differently in which case~(\ref{eq:rod}) becomes what is known as Rodrigues' formula but we shall use~(\ref{eq:rod}) as our definition; when $\alphapha=\begin{equation}ta$, which is the case relevant to us, these are the ultraspherical polynomials.) For any given $d\ge 1$, we let, for $n\ge 0$, $$ \bar{d}eltasi_n(x):= {{\bf P}^{({d \over 2}-1 ,{d \over 2}-1)}_n (x\cdot {\hat{0}}) \over {\bf P}^{({d \over 2}-1 ,{d \over 2}-1)}_n (1)}. $$ By p.254 in \cite{R}, ${\bf P}^{(\alphapha , \begin{equation}ta)}_n$ is a polynomial of degree exactly $n$. By p.259 in \cite{R}, the collection $\{{\bf P}^{(\alphapha , \begin{equation}ta)}_n\}_{n\ge 0}$ are orthogonal on $[-1,1]$ with respect to the weight function $(1-r)^\alphapha (1+r)^\begin{equation}ta$. A change of variables then shows that the $\bar{d}eltasi_n$'s are orthogonal in $L^2 ({\bf S})$. (1) is then an easy calculation, the first equality in (2) is trivial while the second equality is in \cite{R}, p.278 and 281. (3) is in~\cite{Askey74}, p.41. (4) and (5) follow from the Funk--Hecke Theorem (\cite{N}, p.195) (the calculation of $\gamma_j$ being trivial). Since the subspace generated by the $\{{\bf P}^{({d \over 2}-1 ,{d \over 2}-1)}_n(r)\}$'s are uniformly dense in $C([-1,1])$ by the Stone-Weierstrass Theorem, it easily follows that the subspace generated by the $\bar{d}eltasi_n$'s are uniformly dense in $L^2 ({\bf S})g\cap C({\bf S})$. Hence the $\bar{d}eltasi_n$'s are a basis for $L^2 ({\bf S})g$ and (6) follows. Finally, (4) and (6) together yield (7). $ \Box$ Note that for all $f,g\in L^2 ({\bf S})g$, we have that $fg\in L^2 ({\bf S})g$ provided $fg\in L^2 ({\bf S})$. Since $\bar{d}eltasi_n$ is a polynomial of degree exactly $n$ in $x\cdot {\hat{0}}$, the greatest $r$ for which $q^r_{ij} \neq 0$ must be $i + j$. From this and the nonnegativity of the $q^r_{ij}$'s, it follows that for $\lambda > 0$ the function $e^{\lambda \bar{d}eltasi_1(x)} = \sum_{n \geq 0} \lambda^n \bar{d}eltasi_1 (x)^n / n!$ has \begin{equation}gin{equation} \labelel{eq:viii} a_j (e^{\lambda \bar{d}eltasi_1}) > 0 , \mbox{ for all } j\ge 0. \mbox{\rm \small e}nd{equation} It follows from Lemmas~\ref{lem:rec}, \ref{lem:dt} and \ref{lem:spherical}(3,4) that ${\bf P}P_+\subseteq L^2 ({\bf S})g$ and that for all $g \in {\bf P}P_+$, \begin{equation}gin{equation} \labelel{eq:ix} a_j (g) > 0, \mbox{ for all } j\ge 0. \mbox{\rm \small e}nd{equation} \begin{equation}gin{defn} Define the $A$ norm on $L^2 ({\bf S})g$ by $$||f||_A = \sum_{j\ge 0} |a_j (f)| ,$$ provided it is finite. \mbox{\rm \small e}nd{defn} From the fact that $\sum_{r\ge 0} q^r_{ij} = 1$, one can easily show that for all $f,g\inL^2 ({\bf S})g$ with $fg\inL^2 ({\bf S})g$, \begin{equation}gin{equation} \labelel{eq:submult} ||fg||_A \leq ||f||_A ||g||_A , \mbox{\rm \small e}nd{equation} and that equality holds if $f , g \in {\bf P}P_+$. An easy computation also shows that $||e^{\lambda \bar{d}eltasi_1(x)}||_A =e^{\lambda} <\infty$ for all $\lambda \ge 0$ and hence by Lemmas~\ref{lem:rec} and~\ref{lem:spherical}(4) and~(\ref{eq:submult}), $||f||_A<\infty$ for all $f\in{\bf P}P_+$. Also, it follows from~(\ref{eq:ix}), Lemma~\ref{lem:spherical}(2,6), the fact that $\int f \, \bar{d}x = 1$ for all $f \in {\bf P}P_+$ and the fact that ${\bf P}P_+\subseteq L^2 ({\bf S})g$ that for $f \in {\bf P}P_+$, \begin{equation}gin{equation} \labelel{eq:x} 1 + ||f - 1||_A = ||f||_A = f ({\hat{0}}) = ||f||_\infty = 1 + ||f - 1||_\infty . \mbox{\rm \small e}nd{equation} The last equality is obtained by observing that $\le$ is clear while $ ||g||_\infty \le ||g ||_A $ for all $g\in L^2 ({\bf S})g$ is also clear. \begin{equation}gin{lem} \labelel{lem:taylor} There exists a function $o$ with $\bar{d}isplaystyle{\lim_{h \thetao 0} {o(h) \over h} = 0}$ such that for all $h_1 , \ldots , h_k \in {\bf P}P_+$ with $k \leq B$, \begin{equation}gin{equation} \labelel{eq:xi} || \bar{d}eltaoi (h_1 , \ldots , h_k) - 1 - \sum_{i=1}^k (h_i - 1)||_A \leq o(\max_i ||h_i - 1||_A) , \mbox{\rm \small e}nd{equation} provided $\max_i ||h_i - 1||_A\le 1$. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} Write \begin{equation}gin{equation} \labelel{eq:new01} || \bar{d}eltarod_{i=1}^k h_i - 1 - \sum_{i=1}^k (h_i - 1)||_A = || \sum_{{A\subseteq\{1,\ldots,k\}\atop |A|\ge 2}} \bar{d}eltarod_{i\in A}(h_i-1)||_A. \mbox{\rm \small e}nd{equation} Then $\max_i ||h_i - 1|| \leq 1$ and submultiplicativity~(\ref{eq:submult}) of $|| \cdot ||_A$ implies this is at most $$ 2^k (\max_i ||h_i - 1||_A)^2 .$$ Next, since $\int (h_i - 1) \, \bar{d}x = 0$ for $1 \leq i \leq k$, we similarly obtain $$\left|\int \bar{d}eltarod_{i=1}^k h_i - 1\right| \leq 2^k (\max_i ||h_i - 1||_A)^2.$$ We then have $$ || \bar{d}eltaoi (h_1 , \ldots , h_k) - \bar{d}eltarod_{i=1}^k h_i||_A = {1\over \int \bar{d}eltarod_{i=1}^k h_i}\left|\int \bar{d}eltarod_{i=1}^k h_i - 1\right| || \bar{d}eltarod_{i=1}^k h_i ||_A \leq 4^k (\max_i ||h_i - 1||_A)^2 ,$$ since $|| \bar{d}eltarod_{i=1}^k h_i ||_A\le 2^k$ and $\int \bar{d}eltarod_{i=1}^k h_i\ge 1$ by the positivity of the $q^r_{ij}$ and~(\ref{eq:ix}). A use of the triangle inequality completes the proof. $ \Box$ We note five facts that follow easily from the above, but which will be useful later on in generalizing our results. Let $ spherically symmetric p$ be the linear subspace of $L^2 ({\bf S})g$ spanned by ${\bf P}P_+$, $ spherically symmetric pj$ be the linear subspace of $L^2 ({\bf S})g$ spanned by ${\bf P}P_+(J)$ and $|| {\cal K}_{J'} ||_A$ denote the operator norm of ${\cal K}_{J'}$ on $( spherically symmetric p,||\,\,||_A)$. \begin{equation}gin{equation} \labelel{eq:xii} \lim_{J' \thetao 0} ||K_{J'} - 1||_A = 0 ; \mbox{\rm \small e}nd{equation} \begin{equation}gin{equation} \labelel{eq:present iii} c_1:=\sup_{f\in spherically symmetric p ,f \neq 1} {||f - 1||_\infty \over ||f - 1||_A} < \infty ; \mbox{\rm \small e}nd{equation} \begin{equation}gin{equation} \labelel{eq:present iii'} c_2:=\inf_{f\in{\bf P}P_+,f\neq 1} {||f - 1||_\infty \over ||f - 1||_A} > 0 ; \mbox{\rm \small e}nd{equation} \begin{equation}gin{equation} \labelel{eq:xiii} \mbox{ For all } J' \geq 0, \,\,\, || {\cal K}_{J'} ||_A \leq 1; \mbox{\rm \small e}nd{equation} There exist $a,b\in {\bf S}$ such that for all $f\in {\bf P}P_+$, \begin{equation}gin{equation} \labelel{eq:xiiii} f(a)=\sup_{x\in{\bf S}} f(x) \mbox{ and } f(b)=\inf_{x\in{\bf S}} f(x). \mbox{\rm \small e}nd{equation} (\ref{eq:xiii}), for example, follows immediately from Lemmas~\ref{lem:dt} and~\ref{lem:spherical}(7) and the fact that $|\gamma_n a_n(g)|\le 1$ for any probability density function $g\in L^2 ({\bf S})g$. The results on Heisenberg models presented thus far are parallel to the results obtainable for any finite distance regular graph (see the next subsection). One useful result that is not true for general distance regular models depends on the following obvious geometric property of the sphere: $$|\{ z : d(x,z) \leq a , d(y,z) \leq b \}|$$ is a nonincreasing function of $d(x,y)$ for any fixed $a$ and $b$ where $|\,\,|$ denotes surface measure. [Proof: For $S^1$, this is obvious. For $S^d$, $d\ge 2$, by symmetry, we can assume that $x=(0,\ldots,0,1)$ and $y=(\cos\thetaheta,0,\ldots, 0, \sin\thetaheta)$ (both vectors with $d+1$ coordinates). Write $S^d$ as $$ \cup_{u\in [-1,1]^{d-1}} A_u $$ where $$ A_u:=S^d\cap\{(a_1,\ldots,a_{d+1}):(a_2,\ldots,a_{d})=u\}. $$ Each $A_u$ is a circle (or is empty) and so essentially by the 1--dimensional case, we have the desired behaviour on each $A_u$ (using 1--dimensional Lebesgue measure) and by Fubini's Theorem, we obtain the desired result on $S^d$.] Calling a function $f\inL^2 ({\bf S})g$ nonincreasing if the corresponding $\bar{d}eltasib$ is nonincreasing, the latter can be seen to be equivalent to the property that ${\bf 1}_{d(x , {\hat{0}}) \leq a} * {\bf 1}_{d(x , {\hat{0}}) \leq b}$ is nonincreasing, and by taking linear combinations, this is equivalent to $f * g$ being nonincreasing for all nonincreasing $f$ and $g$ in $L^2 ({\bf S})g$. Since $K_J$ is nonincreasing for all $J$, it follows from the fundamental recursion that \begin{equation}gin{equation} \labelel{eq:xiv} f \in {\bf P}P_+ \overline{h}ox{I\kern-.2em\overline{h}ox{R}}ightarrow f \mbox{ is nonincreasing} . \mbox{\rm \small e}nd{equation} \begin{equation}gin{lem} \labelel{lem:incr} For any positive nonincreasing $f \in L^2 ({\bf S})g$, $$ \left|\int_{{\bf S}} f \bar{d}eltasi_n \, \bar{d}x\right| \le \int_{{\bf S}} f \bar{d}eltasi_1 \, \bar{d}x $$ for all $n \ge 1 $. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} It suffices to prove this for functions of the form $f(x) = {\bf 1}_{\{x \cdot {\hat{0}} \geq t\}}$ with $t\in [-1,1]$. We rely on explicit formulae for the functions $\{ \bar{d}eltasi_n \}$. Letting $\alphapha = d/2 - 1$, a change of variables yields $$\int_{{\bf S}} f \bar{d}eltasi_n \, \bar{d}x = s_d^{-1}\int_t^1 {P_n^{(\alphapha , \alphapha)} (r) \over P_n^{(\alphapha , \alphapha)} (1)} (1 - r^2)^\alphapha \, dr,$$ where $$ s_d=\int_{-1}^1 (1-r^2)^\alphapha dr $$ and ${\bf P}_n^{(\alphapha , \alphapha)}$ is the Jacobi polynomial defined earlier. Taking the indefinite integral of each side in~(\ref{eq:rod}) with $\begin{equation}ta = \alphapha$ yields \begin{equation}gin{eqnarray*} \int (1 - r)^\alphapha (1 + r)^\alphapha P_n^{(\alphapha , \alphapha)} (r) \, dr & = & {(-1)^n \over 2^n n!} {d^{n-1} \over dr^{n-1}} \left [ (1 - r)^{n + \alphapha} (1 + r)^{n + \alphapha} \right ] \\[2ex] & = & {- 1 \over 2n} (1 - r^2)^{\alphapha + 1} P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (r)\, . \mbox{\rm \small e}nd{eqnarray*} Evaluating at 1 and $t$ gives \begin{equation}gin{eqnarray*} \int_{{\bf S}} f \bar{d}eltasi_n \, \bar{d}x & = & s_d^{-1}\int_t^1 {P_n^{(\alphapha , \alphapha)} (r) (1 - r^2)^\alphapha \over P_n^{(\alphapha , \alphapha)} (1)} \, dr \\[2ex] & = & s_d^{-1} {P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (t) (1 - t^2)^{\alphapha + 1} \over 2 n P_n^{(\alphapha , \alphapha)} (1)} . \mbox{\rm \small e}nd{eqnarray*} When $n = 1$, using~(\ref{eq:rod}), this is just $s_d^{-1}(1 - t^2)^{\alphapha + 1} / 2(1+\alphapha)$. Dividing, we get $${ \int_{{\bf S}} f \bar{d}eltasi_n \, \bar{d}x \over \int_{{\bf S}} f \bar{d}eltasi_1 \, \bar{d}x } = {P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (t) (1+\alphapha) \over n P_n^{(\alphapha , \alphapha)} (1)} = {P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (t) \over P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (1)} \cdot {P_{n-1}^{(\alphapha + 1 , \alphapha + 1)} (1) \over n P_n^{(\alphapha , \alphapha)} (1)}\cdot(1+\alphapha).$$ The first term in the product is bounded in absolute value by 1. By \cite{Askey74}, p.7, $$P_n^{(\alphapha , \alphapha)} (1) = {\alphapha + n \choose n} ,$$ and so we see that the second term is $1 / (\alphapha + 1)$, completing the proof of the lemma. $ \Box$ \noindent{\mbox{\rm \small e}m Remark:} The case $d = 1$ can also be handled by a rearrangement lemma. \begin{equation}gin{defn} \labelel{defn:op} Define a linear functional $L$ on $L^2 ({\bf S})g$ by $L (g):= \int_{{\bf S}}g(x)\bar{d}eltasi_1(x) \bar{d}x(x)$ $(= \gamma_1 a_1 (g))$ and set ${\bf Op}_J = L(K_J)$. (Recall that $\bar{d}eltasi_1,\gamma_1$ and $a_1$ are defined in Lemma~\ref{lem:spherical}.) \mbox{\rm \small e}nd{defn} It follows from Lemmas~\ref{lem:dt},~\ref{lem:spherical}(7) and~\ref{lem:incr},~(\ref{eq:xiv}) and an easy computation that \begin{equation}gin{equation} \labelel{eq:ixix} ||{\cal K}_J f-1||_A\le {\bf Op}_J ||f-1||_A \mbox{ for all } f\in{\bf P}P_+(J). \mbox{\rm \small e}nd{equation} In the following inequalities, we denote $\rho := {\bf Op}_J$. For $f \in {\bf P}P_+(J)$, it also follows easily that \begin{equation}gin{equation} \labelel{eq:(a)} L ({\cal K}_J f - 1 ) \geq \rho L (f - 1) \mbox{\rm \small e}nd{equation} and that there is a constant $c_3$ such that for all $f\in\,\, spherically symmetric pj$, \begin{equation}gin{equation} \labelel{eq:(c)} |L(f)| \leq c_3 ||f||_A . \mbox{\rm \small e}nd{equation} (We can of course take $c_3$ to be 1, but we leave the condition written in this more general form for use as a hypothesis in Theorem~\ref{th:gen i}.) Putting together the results of Lemmas~\ref{lem:spherical} and~\ref{lem:incr}, as well as~(\ref{eq:viii}),~(\ref{eq:ix}) and~(\ref{eq:xiv}), gives the following corollary. \begin{equation}gin{cor} \labelel{cor:(b)} For all $J\ge 0$, there is a constant $c_4 > 0$ such that for all $f \in {\bf P}P_+(J)$, \begin{equation}gin{equation} \labelel{eq:(b)} L (f) \geq c_4 ||f - 1||_A . \mbox{\rm \small e}nd{equation} \mbox{\rm \small e}nd{cor} \noindent{\bf Proof.} Fix $f \in {\bf P}P_+(J)$. If $f = K_{J'}$ for some $J' \in (0, J]$, we argue as follows. As $||e^{\lambda \bar{d}eltasi_1(x)}||_A =e^{\lambda}$ (which we mentioned earlier) and $K_{J'}(x)=e^{J'\bar{d}eltasi_1(x)}/\int e^{J'\bar{d}eltasi_1(x)}\bar{d}x(x)$, we have \begin{equation}gin{eqnarray*} ||K_{J'} - 1||_A & = & ||K_{J'} ||_A -1 \\[2ex] & = & {e^{J'}\over \int e^{J'\bar{d}eltasi_1(x)}\bar{d}x(x)} -1 \\[2ex] & \le & e^{2J'} -1. \mbox{\rm \small e}nd{eqnarray*} Next, \begin{equation}gin{eqnarray*} L (K_{J'}) & = & {1\over \int e^{J'\bar{d}eltasi_1(x)}\bar{d}x(x)} \int e^{J'\bar{d}eltasi_1(x)}\bar{d}eltasi_1(x)\bar{d}x(x) \\[2ex] & = & {1\over \int e^{J'\bar{d}eltasi_1(x)}\bar{d}x(x)} \sum_{k=0}^\infty {(J')^k\over k!} \int \bar{d}eltasi_1^{k+1}(x)\bar{d}x(x). \mbox{\rm \small e}nd{eqnarray*} By Lemma~\ref{lem:spherical}(3), all terms in the sum are nonnegative and by Lemma~\ref{lem:spherical}(4), the $k=1$ term is $J'\gamma_1$. Hence $L (K_{J'})\ge J'\gamma_1/e^{J'}$. Since $$ \inf_{J'\in (0,J]} {J'\gamma_1 \over e^{J'}(e^{2J'} -1) } >0, $$ we can find a $c_4$ in this case. Otherwise, by the fundamental recursion, we may represent $f$ as $\bar{d}eltaoi ({\cal K}_{J_1} h_1 , \ldots , {\cal K}_{J_k} h_k)$ with each $h_i$ either in ${\bf P}P_+(J)$ or equal to $\bar{d}eltata_{{\hat{0}}}$ and each $J_i\in (0,J]$. Define $g_i = {\cal K}_{J_i} h_i - 1$. Let $m :=\inf_{0< J'\le J} a_1 (K_{J'}) / \sum_{n > 0} a_n (K_{J'})$ which is strictly positive by the above. It follows that if $h_i\in {\bf P}P_+(J)$ (the case $h_i=\bar{d}eltata_{{\hat{0}}}$ is already done), $${L (g_i) \over ||g_i ||_A} = {a_1 (K_{J_i}) a_1 (h_i) \gamma_1^2\over \sum_{n > 0} a_n (K_{J_i}) a_n (h_i) \gamma_n} \geq m\gamma_1$$ by Lemma~\ref{lem:spherical}(7) and since $a_1(h_i)\gamma_1 \ge a_n(h_i)\gamma_n$ for all $n\ge 1$ by Lemma~\ref{lem:incr} and~(\ref{eq:xiv}). Let $h = \bar{d}eltarod_{i=1}^k {\cal K}_{J_i} h_i$. Then $L (h) = L(1 + \sum_{i=1}^k g_i + Q)$, where $Q$ is a sum of monomials in $\{ g_i \}$. Using $q^r_{ij} \ge 0$ and~(\ref{eq:ix}), we have that $L(Q) \geq 0$, and hence \begin{equation}gin{equation} \labelel{eq:new02} L(h) \geq \sum_{i=1}^k L(g_i) \geq m \gamma_1\sum_{i=1}^k ||g_i ||_A . \mbox{\rm \small e}nd{equation} On the other hand, for any $B$ and $M$, there is $C = C(M,B)$ such that if $x_1 , \ldots , x_k \in (0,M)$ with $k\le B$, then $$ -1 + \bar{d}eltarod_{i=1}^k (1 + x_i) \leq C \sum_{i=1}^k x_i .$$ Next, the positivity of the $q^r_{ij}$ implies $\int_{{\bf S}} h(x) \, \bar{d}x(x) = a_0 (h) \geq 1$. It follows that $$||h - 1||_A = -1+ ||h ||_A = -1 + \bar{d}eltarod_{i=1}^k ||g_i + 1||_A \leq C \sum_{i=1}^k ||g_i||_A $$ for some constant $C$ since $||g_i+1 ||_A=||g_i ||_A +1$ and $||g_i+1 ||_A$ clearly has a universal upper bound. [To see the latter statement, one notes that $$ \sup_{0<J' \le J} ||K_{J'}||_A <\infty, $$ $$ ||K_{J'} *f||_A \le ||K_{J'} ||_A $$ for any probability density function $f\in L^2 ({\bf S})g$ (by Lemma~\ref{lem:spherical}(7)),~(\ref{eq:submult}) and the fact that we never have more than $B$ terms in our pointwise products imply that $$ \sup_{f\in {\bf P}P_+(J)}||f||_A \le \left(\sup_{0<J' \le J} ||K_{J'}||_A \right)^B <\infty.] $$ Putting this together with~(\ref{eq:new02}) gives $${L(h) \over ||h - 1||_A} \geq {m \gamma_1\over C} \, .$$ Finally, letting $f = h / \left(\int_{{\bf S}} h(x) \, \bar{d}x(x)\right)$, we obtain \begin{equation}gin{eqnarray*} ||h - 1||_A & \ge & \sum_{n\ge 1} a_n(h) \\[2ex] & = & \sum_{n\ge 1} \left[\int_{{\bf S}} h(x)\bar{d}x(x)\right] a_n(f) \\[2ex] & = & \left[\int_{{\bf S}} h(x)\bar{d}x(x)\right] ||f - 1||_A. \mbox{\rm \small e}nd{eqnarray*} Hence $$ {L(f)\over ||f - 1||_A} \ge {L(h)\over ||h - 1||_A} \ge {m \gamma_1\over C} $$ and we're done. $ \Box$ \subsection{Distance regular graphs} \labelel{sub:finite} For the remainder of this section, we suppose that ${\bf S}$ is the vertex set of a finite, connected, distance regular graph, that $d(x,y)$ is the graph distance, and that the energy $H (x,y)$ depends only on $d(x,y)$. The Potts models fit into this framework, with the respective graphs being the complete graph $K_q$ on $q$ vertices. All the results we need follow in fact from an even weaker assumption, namely that ${\bf S}$ is an {\it association scheme}. For the definition of association schemes and the proofs of the relevant results, see~\cite{BCN} or~\cite{Ter98}. By developing the analogue of Lemma~\ref{lem:spherical} for distance regular graphs, we will illustrate the extent to which our results are independent of the special properties of the Heisenberg model. We have a distinguished element ${\hat{0}} \in {\bf S}$ and the measure $\bar{d}x$ will of course be normalized counting measure $|{\bf S}|^{-1} \sum_{x \in {\bf S}} \bar{d}eltata_x$. The spaces $L^2 ({\bf S})$ and $L^2 ({\bf S})g$ are then simply finite dimensional vector spaces with respective dimensions $|{\bf S}|$ and $1+D$, where $D$ is the diameter of the graph ${\bf S}$. Denote by $M({\bf S})$ the space of matrices with rows and columns indexed by ${\bf S}$, thought of as linear maps from $L^2 ({\bf S})$ to $L^2 ({\bf S})$. Associated with each function $f \in L^2 ({\bf S})g$ is the matrix $M_f \in M({\bf S})$ whose $(x,y)$ entry is $\bar{d}eltasib (d(x,y))$, whence the matrix $M_f$ corresponds to the linear operator $h \mapsto h * f$ given in Section~\ref{sub:drs}. The following analogue of Lemma~\ref{lem:spherical} is derived from Section~2.4 of~\cite{Ter98}; a published reference is Section~2.3 of~\cite{BCN}. \begin{equation}gin{lem} \labelel{lem:scheme} There exists a basis of real--valued functions $\bar{d}eltasi_0 , \ldots , \bar{d}eltasi_D$ of $L^2 ({\bf S})g$ orthogonal under the inner product $\langle f , g\rangle = |{\bf S}|^{-1} \sum_x f(x) \overline{g(x)}$ with the following properties. \\ (1) $\bar{d}eltasi_0 (x) \mbox{\rm \small e}quiv 1$. \\ (2) $\bar{d}eltasi_j ({\hat{0}}) = 1 = \sup_x |\bar{d}eltasi_j (x)|$ for all $j$. \\ (3) $\bar{d}eltasi_i \bar{d}eltasi_j = \sum_{r=0}^D q^r_{ij} \bar{d}eltasi_r$ for some nonnegative coefficients $q^r_{ij}$ with $\sum_r q^r_{ij} = 1$. \\ (4) $\bar{d}eltasi_i * \bar{d}eltasi_j = \gamma_j \bar{d}eltata_{ij} \bar{d}eltasi_j$, where $\gamma_j : = \bar{d}eltasi_j * \bar{d}eltasi_j ({\hat{0}}) = |{\bf S}|^{-1} \sum_x \bar{d}eltasi_j (x)^2$. \\ (5) The functions $\bar{d}eltasi_j$ are eigenfunctions of any convolution operator, that is, $M_f \bar{d}eltasi_j = c \bar{d}eltasi_j$ for any $f \in L^2 ({\bf S})g$. \\ (6) For $f \in L^2 ({\bf S})g$, we have $f = \sum_{j=0}^D a_j(f) \bar{d}eltasi_j$, where $a_j : = \gamma_j^{-1} |{\bf S}|^{-1} \sum_x f(x) \bar{d}eltasi_j (x)$. \\ (7) For $f,g \in L^2 ({\bf S})g$, we have $a_j(f * g)= \gamma_j a_j(f) a_j(g)$. \\ (8) For $f\in L^2 ({\bf S})g$ which is positive and nonincreasing, $|\langle f , \bar{d}eltasi_i\rangle|\le \langle f , \bar{d}eltasi_1\rangle $ for each $i\ge 1$. \mbox{\rm \small e}nd{lem} If we place the norm $\sum_{j=0}^D |a_j(f)|$ on $ spherically symmetric pj$, essentially all of the hypotheses in Theorems~\ref{th:gen ii} and~\ref{th:gen i} (to come later) are immediate noting that all norms are equivalent on finite dimensional spaces. If the analogue of~(\ref{eq:xiv}) holds, then letting $L (g):= |S|^{-1}\sum_{x\in{\bf S}}g(x)\bar{d}eltasi_1(x)$ and both ${\bf Op}_J $ and $\rho$ to be $L(K_J)$, then one can easily show that {\it all} of the hypotheses in Theorems~\ref{th:gen ii} and~\ref{th:gen i} hold. As far as~(\ref{eq:xiv}), it trivially holds for the complete graph where the diameter $D$ is equal to 1 and in any case, the reader is left with only one condition to check. \setcounter{equation}{0} \section{Two Technical Theorems} \labelel{sec:proofs} We now state two general results from which Theorems~\ref{th:main} and~\ref{th:potts} will follow. \begin{equation}gin{th} \labelel{th:gen ii} Let $\Gamma$ be any tree (with bounded degree). For the $d$--dimensional Heisenberg model with $d\ge 1$, if $J > 0$ and $$\thetaextstyle {br} (\Gamma) \cdot {\bf Op}_J < 1,$$ then there is no robust phase transition for the parameter $J$, where ${\bf Op}_J$ is given in Definition~\ref{defn:op} (${\bf Op}_J$ implicitly depends on $d$). More generally, if $J>0$ and if $({\bf S} , G , H)$ is any statistical ensemble with a norm $|| \cdot ||$ on $ spherically symmetric pj$ satisfying~(\ref{eq:xi}),~(\ref{eq:xii}),~(\ref{eq:present iii}) and~(\ref{eq:xiii}) and there exists a number ${\bf Op}_J \in (0,1)$ satisfying (\ref{eq:ixix}) and $\thetaextstyle {br} (\Gamma) \cdot {\bf Op}_J < 1$, then there is no robust phase transition for the parameter $J$. \mbox{\rm \small e}nd{th} \begin{equation}gin{th} \labelel{th:gen i} Let $\Gamma$ be any tree (with bounded degree). For the $d$--dimensional Heisenberg model with $d\ge 1$, if $J > 0$ and $$\thetaextstyle {br} (\Gamma) \cdot {\bf Op}_J > 1,$$ then there is a robust phase transition for the parameter $J$, where ${\bf Op}_J$ is as above. More generally, if $J>0$ and if $({\bf S} , G , H)$ is any statistical ensemble with a norm $|| \cdot ||$ on $ spherically symmetric pj$ satisfying~(\ref{eq:xi}),~(\ref{eq:present iii}), (\ref{eq:present iii'}),~(\ref{eq:xiii}) and~(\ref{eq:xiiii}), and if $L$ is a linear functional on $ spherically symmetric pj$ which vanishes on the constants and satisfies~(\ref{eq:(a)}),~(\ref{eq:(c)}) and~(\ref{eq:(b)}) for a constant $\rho > 0$, then $\thetaextstyle {br} (\Gamma) \cdot \rho > 1$ implies a robust phase transition for the parameter $J$. \mbox{\rm \small e}nd{th} To prove these results, we begin with a purely geometric lemma on the existence of cutsets of uniformly small content below the branching number. \begin{equation}gin{lem} \labelel{lem:globalcut} Assume that $\thetaextstyle {br} (\Gamma) < d$. Then for all $\mbox{\rm \small e}psilonsilon >0$, there exists a cutset $C$ such that $$ \sum_{x\in C}({1 \over d})^{|x|} \le \mbox{\rm \small e}psilonsilon $$ and for all $v\in C^i\cup C$, \begin{equation}\label{eqn:goodcut} \sum_{x\in C\cap \Gamma (v)}({1 \over d})^{|x|-|v|} \le 1. \mbox{\rm \small e}nd{equation} \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} Since $\thetaextstyle {br} (\Gamma) < d$, for any given $\mbox{\rm \small e}psilonsilon >0$, there exists a cutset $C$ such that $$ \sum_{x\in C}({1 \over d})^{|x|} \le \mbox{\rm \small e}psilonsilon. $$ We can assume that $C$ is a minimal cutset with this property with respect to the partial order $C_1 \bar{d}eltareceq C_2$ if for all $v\in C_1$, there exists $w\in C_2$ such that $v\le w$. We claim that this cutset satisfies~(\ref{eqn:goodcut}). If this property failed for some $v$, we let $C'$ be the modified cutset obtained by replacing $C\cap \Gamma (v)$ by $v$ (and leaving $C\cap \Gamma^c_v$ unchanged). As~(\ref{eqn:goodcut}) clearly holds for $w\in C$, we must have that $v\not\in C$ in which case $C'\neq C$. We then have \begin{equation}gin{eqnarray*} \sum_{x\in C'}({1 \over d})^{|x|} & = & \sum_{x\in C\cap \Gamma (v)^c}({1 \over d})^{|x|}+({1 \over d})^{|v|} \\[1ex] & < & \sum_{x\in C\cap \Gamma (v)^c}({1 \over d})^{|x|}+ ({1 \over d})^{|v|} \sum_{x\in C\cap \Gamma (v)}({1 \over d})^{|v-x|} \\[1ex] & = & \sum_{x\in C\cap \Gamma (v)^c}({1 \over d})^{|x|}+ \sum_{x\in C\cap \Gamma (v)}({1 \over d})^{|x|} \\[1ex] & = & \sum_{x\in C}({1 \over d})^{|x|} \\[1ex] & \le & \mbox{\rm \small e}psilonsilon, \mbox{\rm \small e}nd{eqnarray*} contradicting the minimality of $C$ since clearly $C'\bar{d}eltareceq C$. $ \Box$ We now proceed with the proofs of Theorems~\ref{th:gen ii} and~\ref{th:gen i}. \noindent{\bf Proof of Theorem \ref{th:gen ii}.} Since in Section~\ref{sub:sphere} the Heisenberg models have been shown to satisfy all of the more general hypotheses of this theorem, we need only prove the last statement of the theorem where we have a given $J>0$, a given $\|\,\,\|$ on $ spherically symmetric pj$ and a given ${\bf Op}_J$ satisfying the required conditions. By~(\ref{eq:xi}), for any $\mbox{\rm \small e}psilonsilon > 0$, there is an $\mbox{\rm \small e}psilonsilon_0 > 0$ such that for all $k \leq B$ and all $h_1 , \ldots , h_k \in {\bf P}P_+(J)$ with $\|h_i - 1\| \leq \mbox{\rm \small e}psilonsilon_0$ for all $i$, we have that \begin{equation}gin{equation} \labelel{eq:star2} \| \bar{d}eltaoi_k (h_1 , \ldots , h_k) - 1 \| \leq (1 + \mbox{\rm \small e}psilonsilon) \sum_{i=1}^k \|h_i - 1\| \, . \mbox{\rm \small e}nd{equation} Choose $\mbox{\rm \small e}psilonsilon > 0$ so that $(1 + \mbox{\rm \small e}psilonsilon)^{-1} > \thetaextstyle {br} (\Gamma) \cdot {\bf Op}_J$ and choose $\mbox{\rm \small e}psilonsilon_0$ as above. By~(\ref{eq:xii}), we can choose $J'>0$ small enough so that $\| K_{J'} -1 \| \leq \mbox{\rm \small e}psilonsilon_0 {\bf Op}_J$. Use Lemma~\ref{lem:globalcut} to choose a sequence of cutsets $\{ C_n \}$ for which $$\lim_{n \rightarrow \infty} \sum_{x \in C_n} [(1 + \mbox{\rm \small e}psilonsilon) {\bf Op}_J ]^{|x|} = 0$$ and for all $n$ and all $v \in C_n^i \cup C_n$, \begin{equation}gin{equation} \labelel{eq:star43} \sum_{x \in C_n \cap \Gamma (v)} [(1 + \mbox{\rm \small e}psilonsilon) {\bf Op}_J ]^{|x| - |v|} \leq 1. \mbox{\rm \small e}nd{equation} We now show by induction that for all $n$ and all $v \in C_n^i$, \begin{equation}gin{equation} \labelel{eq:ind} \|f^{J' ,J, +}_{C_n , v} - 1\| \leq \mbox{\rm \small e}psilonsilon_0 \sum_{x \in C_n \cap \Gamma (v)} [(1 + \mbox{\rm \small e}psilonsilon) {\bf Op}_J ]^{|x| - |v|} \, . \mbox{\rm \small e}nd{equation} Indeed, from Lemma~\ref{lem:rec}, letting $w_1 , \ldots , w_k$ be the children of $v$, $$\|f^{J',J , +}_{C_n , v} - 1\| = \| \bar{d}eltaoi ({\cal K}_{J_1''} f^{J',J , +}_{C_n , w_1}, \ldots , {\cal K}_{J_k''} f^{J',J , +}_{C_n , w_k}) - 1 \| \, $$ where $J_i''$ is $J$ if $w_i\in C_n^i$ and $J'$ otherwise. When $w_i \in C_n$, the choice of $J'$ guarantees that $\|{\cal K}_{J_i''} f^{J',J , +}_{C_n , w_i} - 1\| \leq \mbox{\rm \small e}psilonsilon_0{\bf Op}_J\leq \mbox{\rm \small e}psilonsilon_0$, while when $w_i \notin C_n$, the induction hypothesis together with~(\ref{eq:star43}) guarantees that $\|f^{J',J , +}_{C_n , w_i} - 1\| \le \mbox{\rm \small e}psilonsilon_0$ which implies that $\|{\cal K}_{J_i''} f^{J',J , +}_{C_n , w_i} - 1\| \le \mbox{\rm \small e}psilonsilon_0$ by~(\ref{eq:xiii}). Hence, from~(\ref{eq:star2}), $$\|f^{J',J , +}_{C_n , v} - 1\| \leq (1 + \mbox{\rm \small e}psilonsilon) \sum_{w_i \in C_n} \| {\cal K}JP f^{J',J , +}_{C_n , w_i} - 1 \| + (1 + \mbox{\rm \small e}psilonsilon) \sum_{w_i \notin C_n} \|{\cal K}_J f^{J',J , +}_{C_n , w_i} - 1\|.$$ The summands in the first sum are at most $\mbox{\rm \small e}psilonsilon_0 {\bf Op}_J$ while those in the second sum are by~(\ref{eq:ixix}) at most ${\bf Op}_J \|f^{J',J , +}_{C_n , w_i} - 1\| $. Therefore using the induction hypothesis on the second term, we obtain \begin{equation}gin{eqnarray*} \|f^{J',J , +}_{C_n , v} - 1\| & \leq & \sum_{i=1}^k (1 + \mbox{\rm \small e}psilonsilon) \mbox{\rm \small e}psilonsilon_0 {\bf Op}_J \sum_{x \in C_n \cap \Gamma (w_i)} \left [ (1 + \mbox{\rm \small e}psilonsilon ) {\bf Op}_J \right ]^{|x| - |w_i|} \\[2ex] & = & \mbox{\rm \small e}psilonsilon_0 \sum_{x \in C_n \cap \Gamma (v)} \left [ (1 + \mbox{\rm \small e}psilonsilon) {\bf Op}_J \right ]^{|x| - |v|} , \mbox{\rm \small e}nd{eqnarray*} completing the induction. Finally, the theorem follows by taking $v = o$, letting $n \rightarrow \infty$, and using~(\ref{eq:present iii}). $ \Box$ For the proof of Theorem~\ref{th:gen i}, it is easiest to isolate the following two lemmas. \begin{equation}gin{lem} \labelel{lem:first} Under the more general hypotheses of Theorem~\ref{th:gen i} (with a given $J >0$, a given $\|\,\,\|$ on $ spherically symmetric pj$, a given $L$ and a given $\rho$ satisfying the required conditions), for all $\alphapha>0$, there exists $\begin{equation}ta>0$ so that if $h_1,\ldots, h_k\in {\bf P}P_+(J)$ with $k\le B$ and $\|h_i-1\| < \begin{equation}ta$ for each $i$, then $$ L \left [ (\bar{d}eltaoi_k ({\cal K}J h_1 , \ldots , {\cal K}J h_k)) - 1 \right ] \geq {1 \over 1 + \alphapha} \sum_{i=1}^k L ({\cal K}J h_i - 1) $$ \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} In~(\ref{eq:xi}), choose $\begin{equation}ta<1$ so that $$o(h)\le h \left(1-{1 \over (1+\alphapha)}\right) {c_4 \over c_3} $$ for all $h\in (0,\begin{equation}ta)$, with $c_3$ and $c_4$ as in~(\ref{eq:(c)}) and~(\ref{eq:(b)}). If $h_1,\ldots, h_k\in {\bf P}P_+(J)$ are such that $\|h_i-1\| < \begin{equation}ta$, then $\|{\cal K}J h_i-1\| < \begin{equation}ta$ by~(\ref{eq:xiii}). We can now write \begin{equation}gin{equation} \labelel{eq:U1} \bar{d}eltaoi_k({\cal K}J h_1,\ldots,{\cal K}J h_k)-1- {1 \over (1+\alphapha)}\sum_{i=1}^k ({\cal K}J h_i-1) \mbox{\rm \small e}nd{equation} as \begin{equation}gin{equation} \labelel{eq:U2} \left(1-{1 \over (1+\alphapha)}\right)\sum_{i=1}^k ({\cal K}J h_i-1) +U \mbox{\rm \small e}nd{equation} where by assumption, \begin{equation}gin{eqnarray} \labelel{eq:new03} \|U\| & \le & o(\max_i \|{\cal K}J h_i-1 \|) \\[2ex] & \le & \left(1-{1 \over (1+\alphapha)}\right) {c_4 \over c_3} \max_i \|{\cal K}J h_i-1 \| \nonumber \\[2ex] & \le & \left(1-{1 \over (1+\alphapha)}\right){c_4 \over c_3} \sum_{i=1}^k \|{\cal K}J h_i-1 \|. \nonumber \mbox{\rm \small e}nd{eqnarray} Letting $a$ be the quantity~(\ref{eq:U1}), we see that \begin{equation}gin{eqnarray*} L(a) & = & L \left [ \left ( 1 - {1 \over (1+\alphapha)} \right ) \sum_{i=1}^k ({\cal K}J h_i-1) \right ] + L(U) \\ & \ge & \left ( 1 - {1 \over (1+\alphapha)} \right ) c_4 \sum_{i=1}^k \|{\cal K}J h_i-1\| -c_3 \|U\| \\ & \geq & 0 \mbox{\rm \small e}nd{eqnarray*} by~(\ref{eq:(c)}),~(\ref{eq:(b)}) and~(\ref{eq:new03}), which is the conclusion of the lemma. $ \Box$ The next lemma tells us that in ``one step'', we can't move from being ``far away'' from uniform to being ``very close'' to uniform. \begin{equation}gin{lem} \labelel{lem:second} Under the more general hypotheses of Theorem~\ref{th:gen i} (with a given $J >0$, a given $\|\,\,\|$ on $ spherically symmetric pj$, a given $L$ and a given $\rho$ satisfying the required conditions), for all $\begin{equation}ta>0$ and $J'\in (0,J]$, there exists a $\gamma<\begin{equation}ta$ such that if $\| \bar{d}eltaoi_k ( {\cal K}_{J_1''} h_1 , \ldots , {\cal K}_{J_k''} h_k) - 1 \| <\gamma$ with $h_1,\ldots, h_k\in {\bf P}P_+(J)\cup \{\bar{d}elta_{{\hat{0}}}\}$ and $k\le B$ and with $J_i''$ being $J$ if $h_i\in {\bf P}P_+(J)$ and $J'$ if $h_i=\bar{d}elta_{{\hat{0}}}$, then each $h_i$ is not $\bar{d}elta_{{\hat{0}}}$ and $\sum_{i=1}^k \| h_i - 1\| < \begin{equation}ta$. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} Choose $\gamma\in (0,\min\{\begin{equation}ta,1/c_1\})$ so that $$ {2c_1 c_3 B\gamma \over\rho c_2 c_4 (1-c_1\gamma)} < \begin{equation}ta $$ and $$ \min\{||K_J-1||,||K_{J'}-1||\} > {2c_1 \gamma \over (1-c_1\gamma)c_2} $$ where $c_1,c_2,\rho,c_3$ and $c_4$ come from~(\ref{eq:present iii}),~(\ref{eq:present iii'}),~(\ref{eq:(a)}),~(\ref{eq:(c)}) and~(\ref{eq:(b)}) respectively. We first show that if $h_1,\ldots,h_k\in{\bf P}P_+(J)$, with $k\le B$, then $\| \bar{d}eltaoi_k (h_1 , \ldots , h_k) - 1 \| <\gamma< 1/c_1$ implies that for all $i$ $$ \| h_i - 1 \| <{2c_1\gamma \over (1-c_1\gamma)c_2}. $$ [Proof: $$ ||h_i-1||\le c_2^{-1}||h_i-1||_\infty \le c_2^{-1}\left({\max h_i\over \min h_i}-1\right) $$ $$ \le c_2^{-1}\left({\max \bar{d}eltarod_i h_i\over \min \bar{d}eltarod_i h_i}-1\right) = c_2^{-1}\left({\max \bar{d}eltaoi_k (h_1 , \ldots , h_k)\over\min \bar{d}eltaoi_k (h_1 , \ldots , h_k) }-1\right) $$ where the second inequality is straightforward and the third inequality comes from~(\ref{eq:xiiii}). Next, $\| \bar{d}eltaoi_k (h_1 , \ldots , h_k) - 1 \| <\gamma< 1/c_1$ implies $|| \bar{d}eltaoi_k (h_1 , \ldots , h_k) - 1 ||_\infty\le c_1\gamma$ which implies the last expression is at most $$ c_2^{-1}\left({1+c_1\gamma\over 1-c_1\gamma}-1\right)= c_2^{-1}{2c_1\gamma\over 1-c_1\gamma}.] $$ It follows that if $\| \bar{d}eltaoi_k ( {\cal K}_{J_1''} h_1 , \ldots , {\cal K}_{J_k''} h_k) - 1 \| <\gamma$, then $$ \| {\cal K}_{J_i''} h_i-1 \| <{2c_1 \gamma \over (1-c_1\gamma)c_2} $$ for each $i$ which implies that $h_i\in{\bf P}P_+(J)$ (as opposed to being $\bar{d}elta_{{\hat{0}}}$). Hence $J_i''$ is $J$ for all $i$. Now from~(\ref{eq:(a)})--(\ref{eq:(b)}) we have $$||{\cal K}_{J} h_i - 1|| \geq {\rho c_4 ||h_i - 1|| \over c_3}$$ and we obtain the conclusion of the lemma. $ \Box$ \noindent{\bf Proof of Theorem~\ref{th:gen i}.} Since in Section~\ref{sub:sphere} the Heisenberg models have been shown to satisfy all of the more general hypotheses of this theorem, we need only prove the last statement of the theorem, where we have a given $J >0$, a given $\|\,\,\|$ on $ spherically symmetric pj$, a given $L$ and a given $\rho$ satisfying the required conditions. Choose an $\alphapha > 0$ so that $\thetaextstyle {br} (\Gamma) \cdot \rho > 1 + \alphapha$. Choosing $\begin{equation}ta$ from Lemma \ref{lem:first}, we have, under our assumptions, that for all $h_1 , \ldots , h_k \in {\bf P}P_+(J)$ with $k\le B$ and $\|h_i-1\| < \begin{equation}ta$ for each $i$, \begin{equation}gin{equation} \labelel{eq:star4old} L \left [ (\bar{d}eltaoi_k ({\cal K}J h_1 , \ldots , {\cal K}J h_k)) - 1 \right ] \geq {1 \over 1 + \alphapha} \sum_{i=1}^k L ({\cal K}J h_i - 1) \geq {\rho \over 1 + \alphapha} \sum_{i=1}^k L (h_i - 1). \mbox{\rm \small e}nd{equation} Now, if there is no robust phase transition, then by~(\ref{eq:present iii'}) there must exist $J'\in (0,J]$ and a sequence of cutsets $\{ C_n \}$ going to infinity such that $\lim_{n \rightarrow \infty} \|f^{J',J , +}_{C_n , o} - 1\| = 0$. Using Lemma \ref{lem:second}, choose $\gamma<\begin{equation}ta$ corresponding to $\begin{equation}ta$ and $J'$. Next, by our choice of $\alphapha$, we have $$I := \inf_C \sum_{x \in C} \left ( {\rho \over 1 + \alphapha} \right)^{|x|} > 0$$ where the infimum is over all cutsets. We now choose $n$ so that $$ \|f^{J',J , +}_{C_n , o} - 1\| < \min \{ \gamma , {c_4 \gamma I \over c_3} \}. $$ where $c_3$ and $c_4$ come from~(\ref{eq:(c)}) and~(\ref{eq:(b)}) respectively. We then define $\Gamma'$ to be the component of the set $$\{ v \in C_n^i : \|f^{J',J , +}_{C_n , v} - 1\| < \gamma \}$$ that contains $o$ and let $C$ be the exterior boundary of $\Gamma'$ (that is, the set of $x \notin \Gamma'$ neighboring some $y \in \Gamma'$). By the choice of $\gamma$, $C \subseteq C_n^i$ and for each $v\in C^i\cup C$, the density $f^{J',J , +}_{C_n , v}$ is in $$ {\bf P}P_+(J) \cap \{f: \|f-1\| < \begin{equation}ta\}. $$ Using~(\ref{eq:star4old}) and induction, we see that $$L (f^{J',J , +}_{C_n , o} - 1) \geq \sum_{x \in C} \left ( {\rho \over 1 + \alphapha} \right )^{|x|} L (f^{J',J , +}_{C_n , x} - 1) .$$ By definition of $\Gamma' , C$ and $I$ and the fact that $L (f-1) \geq c_4 \|f - 1\|$ on ${\bf P}P_+(J)$, we see that $$L (f^{J',J , +}_{C_n , o} - 1) \geq c_4 \gamma I .$$ Hence $$\|f^{J',J , +}_{C_n , o} - 1\| \geq {c_4 \over c_3} \gamma I .$$ This contradicts the choice of $n$, proving that there is indeed a robust phase transition. $ \Box$ \setcounter{equation}{0} \section{Analysis of specific models} \labelel{sec:anal} \subsection{Heisenberg models} \labelel{sub:spherical} For the Heisenberg models, recall that ${\bf S} = S^d$, $d \geq 1$, and $H(x,y) = - x \cdot y$. The operator ${\cal K}J$ is convolution with the function $K_J (x) = c e^{ J x \cdot {\hat{0}}}$, where $c$ is a normalizing constant. \noindent{\bf Proof of Theorem~\bar{d}eltarotect{\ref{th:main}}.} A change of variables shows that $L(K_J)={\cal R}dJ$ and so the result follows from Theorems~\ref{th:gen ii} and~\ref{th:gen i}. $ \Box$ For the rotor model, we now prove the equivalence of SB and SB+. \noindent{\bf Proof of Proposition~\bar{d}eltarotect{\ref{pr:rotor equiv}}.} We have already seen the representation $$f = \sum_{n \geq 0} a_n (f) \bar{d}eltasi_n, $$ for functions $f \in L^2 ({\bf S})g$. In the case of the rotor model, where ${\bf S} = S^1$ and we take ${\hat{0}}$ to be $(1,0)$, the space $L^2 ({\bf S})g$ is the space of even functions of $\thetaheta \in [-\bar{d}eltai , \bar{d}eltai]$ and $\bar{d}eltasi_n = \cos (n \thetaheta)$. We now turn to the full Fourier decomposition $f = \sum_{n \in Z\!\!\!Z} b_n (f) e^{i n \thetaheta}$, where $b_n (f) = \int_0^{2\bar{d}eltai} f(\thetaheta) e^{-i n \thetaheta} \, d\thetaheta/ (2 \bar{d}eltai)$. Let $C$ be any cutset and $\bar{d}elta$ be a set of boundary conditions on $C$. Let ${\cal J}$ be any set of interaction strengths. It suffices to show that $$||f^{{\cal J} , \bar{d}elta}_{C,w} - 1||_\infty \leq ||f^{{\cal J} , +}_{C,w}-1||_\infty$$ for all $w\in C^i$. For $v \in C$ and $n\in Z\!\!\!Z$, let $x_{v,n} = b_n (K_{{\cal J}(x) , \bar{d}elta (v)})$ where $e$ is the edge from $v$ to its parent. \noindent{\mbox{\rm \small e}m Claim}: For all $y\in C^i$, the Fourier coefficients $\int_0^{2\bar{d}eltai} e^{i n \thetaheta} \, d\mu^{{\cal J} , \bar{d}elta}_{C,y}(\thetaheta)$, which we denote by $\{ u_{y , n} : n \in Z\!\!\!Z \}$, are sums of monomials in $\{ x_{v,n} \}_{v\in C, n\inZ\!\!\!Z}$ with nonnegative coefficients. {\it Proof:} Let $w \in C^i$ have children $w_1 , \ldots , w_r \in C^i$ and $w_{r+1} , \ldots , w_k \in C$. Then the Fourier coefficients $\{ u_{w,n} : n \in Z\!\!\!Z \}$ are the convolution of the $k - r$ series $\{ x_{v , n} : n \in Z\!\!\!Z \}$ as $v$ ranges over $w_{r+1} , \ldots , w_k$, also convolved with the series $\{ b_n (K_{{\cal J} (\overline{wv})}) u_{v,n} : n \in Z\!\!\!Z \}$ as $v$ ranges over $w_1 , \ldots , w_r$. Since $b_n (K_J) \geq 0$, this establishes the claim via induction and the fundamental recursion. Now write $x_{v,n}^+$ for the Fourier coefficients $b_n (K_{{\cal J} (e)})$ where $e$ is as before. Since $K_{J , e^{i \alphapha}} (x) = K_J (e^{-i \alphapha} x)$, it follows that $$|x_{v,n}| = |x_{v,n}^+| .$$ But $x_{v,n}^+$ is real because $K_J$ is even, and has been shown to be nonnegative. Thus $$|x_{v,n}| = x_{v,n}^+ ,$$ and it follows from the claim that each $u_{w,n}$ has modulus bounded above by the corresponding $u_{w,n}^+$ when plus boundary conditions are taken. Hence $$ ||f^{{\cal J} , \bar{d}elta}_{C,w} - 1||_\infty \le ||f^{{\cal J} , \bar{d}elta}_{C,w} - 1||_A \leq \sum_{n \neq 0} |u_{w,n}| \leq \sum_{n \neq 0} u_{w,n}^+ = ||f^{{\cal J} , +}_{C,w} - 1||_A = ||f^{{\cal J} , +}_{C,w} - 1||_\infty,$$ proving the lemma. $ \Box$ \noindent{\mbox{\rm \small e}m Remark:} Although we have used special properties of the Fourier decomposition on $L^2 (S^1)$, there exist similar decompositions for $S^d$. We believe that a parallel argument can probably be constructed, bounding the modulus of the sum of the coefficients of spherical harmonics of a given order by the coefficients one obtains for the analogous monomials in the values $a_n (K_{{\cal J} (x)})$, whose coefficients are necessarily nonnegative by the nonnegativity of the connection coefficients $q^r_{ij}$. Thus we are led to state: \begin{equation}gin{pblm} \labelel{pblm:all spheres} Prove a version of Proposition~\ref{pr:rotor equiv} for general Heisenberg models on trees. \mbox{\rm \small e}nd{pblm} \subsection{The Potts model} \labelel{sub:potts} \noindent{\bf Proof of Theorem}~\ref{th:potts}. We will obtain this result from Theorems~\ref{th:gen ii} and~\ref{th:gen i}. For (i), letting $||\,\,||$ be the $L_\infty$ norm on $ spherically symmetric pj$ and ${\bf Op}_J=\alphapha_J$, all of the hypotheses in Theorem~\ref{th:gen ii} except~(\ref{eq:ixix}) are clear. The function $K_J$ is given by $$K_J (x) = c \mbox{\rm \small e}xp (J (2 \bar{d}eltata_{x,0} - 1))$$ where $c = (e^J + (q-1) e^{-J})^{-1}$. The operator ${\cal K}J$ is linear and $${\cal K}J \bar{d}eltata_j = c e^J \bar{d}eltata_j + \sum_{i \neq j} c e^{-J} \bar{d}eltata_i \, .$$ Hence in the basis $\bar{d}eltata_0 , \ldots , \bar{d}eltata_{q-1}$, the matrix representation of ${\cal K}J$ is $c (e^J - e^{-J}) I + c e^{-J} M$ where $M$ is the matrix of all ones. On the orthogonal complement of the constant functions, ${\cal K}J$ is $c (e^J - e^{-J}) I$, and~(\ref{eq:ixix}) follows, proving (i) by an application of Theorem~\ref{th:gen ii}. For (ii), let $||\,\,||$ be the same as above, $\rho=\alphapha_J$ and $L(h)=h(0)-h(1)$. It is then immediate to check that all of the hypotheses in Theorem~\ref{th:gen i} hold and we may conclude (ii) by an application of Theorem~\ref{th:gen i}. $ \Box$ \setcounter{equation}{0} \section{Proof of Theorem~\bar{d}eltarotect{\ref{th:0hd}}.} \labelel{sec:zero} By Proposition~\ref{prop:SB=} and the fact that any subtree of a tree with branching number 1 also has branching number 1, it suffices to show: \begin{equation}gin{quote} For for any $\Gamma$ with $\thetaextstyle {br} (\Gamma) = 1$, and any bounded ${\cal J}$, there is a sequence of cutsets $\{ C_n \}$ such that for any sequence $\{ \bar{d}eltata_n \}$ of boundary conditions on $\{ C_n \}$, $$ \lim_{n\thetao\infty}\| f^{{\cal J} , \bar{d}eltata_n}_{C_n , o} - 1\|_\infty = 0 . $$ \mbox{\rm \small e}nd{quote} It is convenient to work with a different measure of size, the {\mbox{\rm \small e}m Max/Min} measure, defined as follows. (This arose already in the proof of Lemma~\ref{lem:second}.) For any continuous strictly positive function $f$ on ${\bf S}$, let $$\|f\|_M := {\max_{x \in {\bf S}} f(x) \over \min_{x \in {\bf S}} f(x)} \, .$$ It is immediate to see: \begin{equation}gin{lem} \labelel{lem:mmequiv} For any sequence $\{ h_n \}$ of continuous probability densities, $\|h_n - 1\|_\infty \rightarrow 0$ if and only if $\log \|h_n\|_M \rightarrow 0$. \mbox{\rm \small e}nd{lem} Next, we examine the effect of ${\cal K}J$ on $\|f\|_M$. \begin{equation}gin{lem} \labelel{lem:unifmm} For any statistical ensemble $({\bf S} , G , H)$, any $J_{\rm max}$ and any $T > 0$ there is an $\mbox{\rm \small e}psilonsilon > 0$ such that for any continuous strictly positive function $f$ with $\|f\|_M \leq T$, and any $J \leq J_{\rm max}$, $$\log \|{\cal K}J f\|_M \leq (1 - \mbox{\rm \small e}psilonsilon) \log \| f \|_M \, .$$ \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} Fix $H, J$ and $f$ and assume without loss of generality that $\int f \, \bar{d}x = 1$ since the {\mbox{\rm \small e}m Max/Min} measure is unaffected by multiplicative constants. Let $[a,b]$ be the smallest closed interval containing the range of $f$ and $[c,d]$ contain the range of $K_J$ with $a,c >0$. Since $f$ is a probability density, $a < 1 < b$ (we rule out the trivial case $f \mbox{\rm \small e}quiv 1$). Since $K_J = c + (1-c) g$ for some probability density $g$, it follows that for any $x \in {\bf S}$, $$c + (1-c) a \leq {\cal K}J f(x) \leq c + (1-c) b .$$ As $J$ varies over $[0 , J_{\rm max}]$, $\min_x K_J (x)$ is bounded below by some $c_0 > 0$, so for all such $J$, $$c_0 + (1-c_0) a \leq {\cal K}J f(x) \leq c_0 + (1-c_0) b$$ and so $$\| {\cal K}J f \|_M \leq {c_0 + (1 - c_0) b \over c_0 + (1 - c_0) a} \, .$$ Setting $R = \|f\|_M - 1$, we have $b = (1+R) a$ and so $$\| {\cal K}J f\|_M \leq {c_0 + (1 - c_0) (1 + R) a \over c_0 + (1 - c_0) a} = 1 + R {(1 - c_0) a \over c_0 + (1 - c_0) a} \leq 1 + R (1 - c_0) \, .$$ Thus \begin{equation}gin{equation} \labelel{eq:u} \|{\cal K}J f\|_M \leq 1 + (1 - c_0) \left ( \|f\|_M - 1 \right ) \, . \mbox{\rm \small e}nd{equation} The function $\log (1 + (1 - c_0) u) / \log (1 + u)$ is bounded above by some $1 - \mbox{\rm \small e}psilonsilon < 1$ as $u$ varies over $(0 , T-1]$, and setting $u = \|f\|_M - 1$ in~(\ref{eq:u}) gives $$\log \| {\cal K}J f\|_M \leq \log (1 + (1 - c_0) (\|f\|_M - 1)) \leq (1 - \mbox{\rm \small e}psilonsilon) \log \|f\|_M \, ,$$ proving the lemma. $ \Box$ Proceeding with the proof of Theorem~\ref{th:0hd}, let $C$ be a cutset with no vertices in the first generation, $$\bar{d}eltaartial C = \{ v \in C^i : \mbox{\rm \small e}xists w \in C~{\rm with }\,\, v \thetao w \}, $$ and $\bar{d}elta$ be defined on $C$. Clearly, for continuous strictly positive functions $h_1 , \ldots , h_k$, $$ \| \bar{d}eltaoi (h_1 , \ldots , h_k) \|_M \leq \bar{d}eltarod_{i=1}^k \| h_i \|_M \, . $$ We have also previously seen (Lemma~\ref{lem:unifbd}) that all densities that arise are uniformly bounded away from 0 and $\infty$ and hence there is a uniform bound on the $\| \,\, \|_M $ that arise. We can therefore choose $\mbox{\rm \small e}psilonsilon$ from Lemma~\ref{lem:unifmm}. Next for any $v\in C^i\setminus \bar{d}eltaartial C$, applying the fundamental recursion gives \begin{equation}gin{eqnarray*} \log \|f^{{\cal J} , \bar{d}eltata}_{C,v} \|_M & = & \log \| \bar{d}eltaoi ({\cal K}_{{\cal J} (\overline{vw_1})} f^{{\cal J} , \bar{d}eltata}_{C , w_1} , \ldots , {\cal K}_{{\cal J} (\overline{vw_k})} f^{{\cal J} , \bar{d}eltata}_{C , w_k} \|_M \\[2ex] & \leq & \sum_{i=1}^k \log \| {\cal K}_{{\cal J} (\overline{vw_i})} f^{{\cal J} , \bar{d}eltata}_{C , w_i} \|_M \\[2ex] & \leq & \sum_{i=1}^k (1 - \mbox{\rm \small e}psilonsilon) \log \| f^{{\cal J} , \bar{d}eltata}_{C , w_i} \|_M. \mbox{\rm \small e}nd{eqnarray*} Working backwards, we find that for any cutset $C$, $$\log \|f^{{\cal J} , \bar{d}eltata}_{C , o}\|_M \leq \sum_{w \in \bar{d}eltaartial C} (1 - \mbox{\rm \small e}psilonsilon)^{|w|} \log \|f^{{\cal J} , \bar{d}eltata}_{C , w}\|_M \, .$$ Since $\thetaextstyle {br} (\Gamma) = 1$ one can choose a sequence of cutsets $\{ C_n \}$ such that $\sum_{w \in \bar{d}eltaartial C_n} (1 - \mbox{\rm \small e}psilonsilon)^{|w|} \rightarrow 0$. The uniform bound on $\|f^{{\cal J} , \bar{d}eltata}_{C , w}\|_M$ implies that for any sequence of functions $\bar{d}elta_n$ on $C_n$, $$ \lim_{n\thetao\infty}\log \|f^{{\cal J} , \bar{d}eltata_n}_{C_n , o} \|_M =0, $$ which along with Lemma~\ref{lem:mmequiv} proves the theorem. $ \Box$ Olle H\"aggstr\"om pointed out to us that this result could also be obtained using ideas from disagreement percolation. \setcounter{equation}{0} \section{Proof of Theorem {\bar{d}eltarotect{\ref{th:2trees}}}.} \labelel{sec:potts} While we assume that $q$ is an integer, the case of nonintegral $q$ can be made sense of via the random cluster representation, and it is worth noting here that the break between $q=2$ and $q=3$ happens at $q = 2 + \mbox{\rm \small e}psilonsilon$. See~\cite{Ha} for a discussion of the qualitative differences between the random cluster model on a tree when $q \leq 2$ as opposed to $q > 2$. \begin{equation}gin{lem} \labelel{lem:robust} Assume that all of the hypotheses of Theorem~\ref{th:gen ii} are in force (in particular,~(\ref{eq:ixix}) and $\thetaextstyle {br} (\Gamma) \cdot {\bf Op}_J < 1$ hold and so there is no RPT for the parameter $J$) and in addition that $\sup_{y\in {\bf S}} \|K_{J,y}\| < \infty$ and~(\ref{eq:ixix}) holds for all $f\in{\bf P}P(J)$ (instead of just ${\bf P}P_+(J)$). Then there is a tree $\Gamma'$ with $\thetaextstyle {br} (\Gamma') = \thetaextstyle {br} (\Gamma)$ such that $\Gamma'$ has no PT for the parameter $J$. \mbox{\rm \small e}nd{lem} \noindent{\bf Proof.} We mimic the proof of Theorem~\ref{th:gen ii}. Choose $\mbox{\rm \small e}psilonsilon$, $\mbox{\rm \small e}psilonsilon_0$ and cutsets $\{ C_n \}$ as in the proof of Theorem~\ref{th:gen ii} where we can assume that the cutsets $\{ C_n \}$ are disjoint. Choose an integer $m$ sufficiently large so that the $m$-fold iterated convolution operator ${\cal K}J^m$ satisfies $||{\cal K}J^m \bar{d}elta_y-1|| \leq \mbox{\rm \small e}psilonsilon_0 {\bf Op}_J$. For each increasing sequence $\{ n(k) : k = 1 , 2 , \ldots \}$ of integers, define a tree $\Gamma'$ by replacing each edge from an element of $C_{n(k)}$ to its parent by $m$ edges in series, for all cutsets in the sequence $\{ C_{n(k)} \}$. It is not too great an abuse of notation to let $C_n$ denote the cutset of $\Gamma'$ consisting of the same vertices as before. It is now possible to establish~(\ref{eq:ind}) for all $v \in D$, where $D$ is the set of vertices in $\Gamma'$ that are in $C^i$ and in $\Gamma$ (i.e., are not in a chain of parallel edges that was added). The only adjustment in the proof is as follows. Use Lemma~\ref{lem:rec} to represent $f^{J , +}_{C_n , v}$ in terms of $f^{J,+}_{C_n , w}$ where $w$ are the children of $v$ in $\Gamma$ rather than in $\Gamma'$, i.e., we leap the whole chain of $m$ edges at once. Then the case $w \in C_n$ that was handled by the choice of $J'$ is replaced by a case $w \in \Gamma' \setminus \Gamma$, which is handled by the choice of $m$. In fact, (\ref{eq:ind}) holds when + is replaced by any boundary condition as the exact same proof shows. By choosing $\{ n(k) \}$ sufficiently sparse, we can ensure that $\thetaextstyle {br} (\Gamma') = \thetaextstyle {br} (\Gamma)$. Fixing any such choice of $\{ n(k) \}$, it follows that there is no phase transition by the above together with Proposition~\ref{prop:SB=}. $ \Box$ We proceed now with the description of a counterexample. For $\Gamma_1$, we choose the homogeneous binary tree, where each vertex has precisely 2 children. Recall from Section~\ref{sub:potts} that under + boundary conditions, the functions $f^{J , +}_{C , v}$ all lie in a one-dimensional set. The most convenient parameterization for the segment is by the log-likelihood ratio of state ${\hat{0}}$ to the other states. Thus the probability measure $a \bar{d}eltata_0 + \sum_{i=1}^{q-1} ((1-a)/(q-1)) \bar{d}eltata_i$ is mapped to the value $\log [(q-1) a / (1-a)]$. Let $g(v)$ denote the log-likelihood ratio at $v$ under some interaction strength and boundary conditions. The recursion~(\ref{eq:recurse}) of Lemma~\ref{lem:rec} boils down to $$g(v) = \sum_{v \rightarrow w} \bar{d}eltahi (g(w)) ; \;\;\;\; \bar{d}eltahi (z) := \log { p e^z + 1-p \over {1-p \over q-1} e^z + (1 - {1-p \over q-1})} \, ,$$ where \begin{equation}gin{equation} \labelel{eq:p} p := e^J / (e^J + (q-1) e^{-J}) \, . \mbox{\rm \small e}nd{equation} Taking a Taylor expansion to the second order gives $$\bar{d}eltahi (z) = \left ( p - {1-p \over q-1} \right ) z + {1-p \over 2 (q-1)^2} [p(q-1)^2 - (q-1) + (1-p)] z^2 + O(z^3) .$$ To see that the second derivative is positive at 0 for $q > 2$, first take the $q$-derivative of the $z^2$ coefficient which is $[q+2p-3](1-p)/(2(q-1)^3)$. The definition of $p$ and the fact that $J > 0$ imply that $p > 1/q \geq 1/(2(q-1))$. Since $x+1/(x-1) -3 >0$ on $(2,\infty)$ and $2p> 1/(q-1)$, it follows that the $z^2$ coefficient has a positive $q$-derivative for $q \ge 2$, and is therefore positive for all $q > 2$. (This also implies that for $q\in (2-\bar{d}elta,2)$ for some $\bar{d}elta$, the function $\bar{d}eltahi$ is concave (see~\cite{PP} for a detailed analysis of the critical case $q=2$).) The Taylor expansion gives $\bar{d}eltahi'(0) = p - (1-p)/(q-1)$. Note that $p_0: = (q+1)/(2q)$ satisfies $p_0 - (1 - p_0) / (q-1) = 1/2$. The value of $p_0$ is chosen to make $\bar{d}eltahi' (0) = 1/2$; by convexity of $\bar{d}eltahi$ near zero, there is an interval $I := (p_0 - \mbox{\rm \small e}psilonsilon , p_0)$ such that for $p \in I$, the equation $\bar{d}eltahi (z) = z/2$ has a positive solution, call it $z(p)$. Take $\mbox{\rm \small e}psilonsilon>0$ so small that $p_0 -\mbox{\rm \small e}psilonsilon > 1/q$. For any $1>p > 1/q$ there is a unique $J > 0$ such that~(\ref{eq:p}) holds. If $p \in I$, then $z(p)$ is a fixed point for the function $2 \bar{d}eltahi$ and it is easy to see by induction that under + boundary conditions on the binary tree, one will always have $g(v) \geq z(p)$. Thus we have shown that $\Gamma_1$ has a phase transition for any $J$ such that $p \in I$. To find $\Gamma_2$, we examine the connection between $p_0$ and $\| {\cal K}J \|$ where for the rest of the proof, the operator norm refers to the $L^\infty$ norm on the orthogonal complement of the constants. Observe that $$p - {1-p \over q-1} = {e^J \over e^J + (q-1) e^{-J}} - {e^{-J} \over e^J + (q-1) e^{-J}} = \| {\cal K}J \|$$ by the computation in Section~\ref{sub:potts}. Thus $p_0$ is chosen to make $\| {\cal K}J \| = 1/2$ and for any $p \in I$, $\| {\cal K}J \| < 1/2$. Fix any $J$ so that $p \in I$, and let $\Gamma$ be any tree with $$2 = \thetaextstyle {br} (\Gamma_1) < \thetaextstyle {br} (\Gamma) < \| {\cal K}J \|^{-1}.$$ Let $\Gamma'$ be as in Lemma~\ref{lem:robust} and set $\Gamma_2 = \Gamma'$. Then there is no phase transition on $\Gamma_2$ for the chosen parameters, and since we have seen there is a phase transition for $\Gamma_1$, this completes the proof of Theorem~\ref{th:2trees}. $ \Box$ \noindent {\bf Acknowledgements.} We thank Richard Askey for discussions and showing us the proof of Lemma~\ref{lem:incr}, J\"{o}ran Bergh, Yuval Peres and Paul Terwilliger for discussions, Anton Wakolbinger for providing us with reference \cite{E} and the referee for a correction and some suggestions. \begin{equation}gin{thebibliography}{9} \bibitem{AVLF} Adel'son-Vel'skii, G., Veisfeiler, B., Leman, A. and Faradzev, I. (1969). Example of a graph without a transitive automorphism group. {\mbox{\rm \small e}m Soviet Math. Dokl.} {\bf 10} 440--441. \bibitem{ACCN} Aizenman, M., Chayes, J. T., Chayes, L. and Newman, C. M. (1988) Discontinuity of the magnetization in one--dimensional $1/|x-y|^2$ Ising and Potts models, {\mbox{\rm \small e}m J. Stat. Phy.} {\bf 50} 1--40. \bibitem{Askey74} Askey, R. (1974). {\mbox{\rm \small e}m Orthogonal Polynomials and Special Functions}. S.I.A.M. Regional conferences in applied mathematics no. 21, J.W. Arrowsmith, Ltd.: Bristol, England. \bibitem{BCN} Brouwer., A., Cohen, A. and Neumaier, A. (1989). {\mbox{\rm \small e}m Distance Regular Graphs}. Modern Surveys in Mathematics, Ser. 3, Bd. 18. Springer-Verlag: New York. \bibitem{Big} Biggs, N. (1993). {\mbox{\rm \small e}m Algebraic Graph Theory, 2nd Ed.} Cambridge University Press:Cambridge. \bibitem{C} Cassi, D. (1992). Phase transition and random walks on graphs: a generalization of the Mermin--Wagner theorem to disordered lattices, fractals, and other discrete structures. {\mbox{\rm \small e}m Phys. Rev. Lett.} {\bf 68} 3631--3634. \bibitem{E} Eisele, M. (1994). {\mbox{\rm \small e}m Phase transitions may be absent on graphs with transient random walks}. {\mbox{\rm \small e}m Unpublished manuscript.} \bibitem{EKPS} Evans, W., Kenyon, C., Peres, Y. and Schulman, L.J. (1998). Broadcasting on trees and the Ising model. {\mbox{\rm \small e}m Preprint.} \bibitem{F} Furstenberg, H. (1970). Intersections of Cantor sets and transversality of semigroups. In {\mbox{\rm \small e}m Problems in analysis. Sympos. in Honor of Salomon Bochner, Princeton Univ. (R. C. Gunning, ed.)} 41--59. Princeton Univ. Press, Princeton, N.J. \bibitem{Ge} Georgii, H.-O. (1988). {\mbox{\rm \small e}m Gibbs Measures and Phase Transitions}. de Gruyter: New York. \bibitem{Ha} H\"aggstr\"om, O. (1996). The random-cluster model on a homogeneous tree. {\mbox{\rm \small e}m Probab.\ Theory Related Fields} {\bf 104} 231--253. \bibitem{Lig2} Liggett, T. M. (1996). Multiple transition points for the contact process on a binary tree. {\mbox{\rm \small e}m Ann. Probab.} {\bf 24} 1675--1710. \bibitem{Ly1} Lyons, R. (1989). The Ising model and percolation on trees and tree-like graphs. {\mbox{\rm \small e}m Commun. Math. Phys.} {\bf 125} 337--353. \bibitem{Ly2} Lyons, R. (1990). Random walks and percolation on trees. {\mbox{\rm \small e}m Ann. Probab.} {\bf 18} 931--958 \bibitem{MW} Merkl, F., and Wagner, H. (1994). Recurrent random walks and the absence of continuous symmetry breaking on graphs. {\mbox{\rm \small e}m J. Stat. Phy.} {\bf 75} 153--165. \bibitem{MP} Monroe, J. L., and Pearce, P. A. (1979). Correlation inequalities for vector spin models. {\mbox{\rm \small e}m J. Stat. Phy.} {\bf 21} 615--633. \bibitem{N} Natterer, F. (1986). {\mbox{\rm \small e}m The Mathematics of Computerized Tomography}. John Wiley, Stuttgart. \bibitem{PS} Patrascioiu A. and Seiler, E. (1992). Phase structure of two-dimensional spin models and percolation. {\mbox{\rm \small e}m J. Stat. Phy.} {\bf 69} 573--595. \bibitem{Pem} Pemantle, R. (1992). The contact process on trees. {\mbox{\rm \small e}m Ann. Probab.} {\bf 20} 2089--2116. \bibitem{PP} Pemantle, R., and Peres, Y., Recursions on trees and the Ising model, {\mbox{\rm \small e}m Preprint.} \bibitem{R} Rainville, E. D. (1960). {\mbox{\rm \small e}m Special Functions}. MacMillan, New York. \bibitem{Sta} Stacey, A. (1996). The existence of an intermediate phase for the contact process on trees. {\mbox{\rm \small e}m Ann. Probab.} {\bf 24} 1711--1726. \bibitem{Ter98} Terwilliger, P. (1998). {\mbox{\rm \small e}m Unpublished lecture notes}. \mbox{\rm \small e}nd{thebibliography} \noindent \begin{equation}gin{tabbing} enoughs \= fffffffffffffffffffffenoughennnnnnnnnnnnnnn \= \kill \> Robin Pemantle \> Jeffrey E.~Steif \\ \> Department of Mathematics \> Department of Mathematics \\ \> University of Wisconsin-Madison \> Chalmers University of Technology \\ \> Van Vleck Hall \> S--41296 Gothenburg \\ \> 480 Lincoln Drive \> Sweden \\ \> Madison, WI 53706 \> [email protected] \\ \> [email protected] \mbox{\rm \small e}nd{tabbing} \mbox{\rm \small e}nd{document}
\begin{document} \title[Unreasonable ineffectiveness of mathematics in biology]{A mathematician's view of the unreasonable ineffectiveness of mathematics in biology.} \date{06 February 2021} \author{Alexandre Borovik} \address{Department of Mathematics, University of Manchester, UK} \email{[email protected]} \thanks{This is the last pre-publication version of the paper.} \maketitle \begin{abstract} This paper discusses, from a mathematician's point of view, the thesis formulated by Israel Gelfand, one of the greatest mathematicians of the 20th century, and one of the pioneers of mathematical biology:\\ \begin{quote}\small \emph{ There is only one thing which is more unreasonable than the unreasonable effectiveness of mathematics in physics, and this is the unreasonable ineffectiveness of mathematics in biology.}\\ \end{quote} \noindent \textsc{Disclaimer.} The author writes in his personal capacity and views expressed do not represent position of any other person, corporation, organisation, or institution. \end{abstract} \section{Israel Gelfand and his views on the role of mathematics in biology} Perhaps a disclaimer is necessary: I am a mathematician, not a biologist. I was invited to write this paper because I found myself in a strange role of a custodian of a particular saying by Israel Gelfand, one of the greatest mathematicians of the 20th century, and a pioneer of mathematical biology. My blog \cite{borovik-blog} became its principal source: \begin{quote}\small Eugene Wigner wrote a famous essay \cite{wigner} on the unreasonable effectiveness of mathematics in natural sciences. He meant physics, of course. There is only one thing which is more unreasonable than the unreasonable effectiveness of mathematics in physics, and this is the unreasonable ineffectiveness of mathematics in biology. \end{quote} I wish to confirm that, indeed, I heard these words from Israel Gelfand in private conversations (and more than once) in about 1995--2005. Beyond that, everything in this paper is my opinion or my reconstruction of Gelfand's view of science and life from my conversations with him; I understand of course that my assessments could be very lopsided. However, when writing this paper, I located and read papers of a few of Gelfand's earliest collaborators in biology and medicine \cite{Arshavsky,Vasiliev,Vorobiev} and was pleased to discover that my reconstructions were concordant with their memories of him. This gives me hope that my story contains a reasonable approximation to the truth. I welcome two papers in this volume, by Blanchard and Longo \cite{Longo} and Rodin \cite{Rodin} which touch on the role of mathematics and biology from perspectives close, but not identical to mine. I found some further justification of my position in the book \emph{Contemporary Debates in Philosophy of Biology} \cite{Debates} which lists 10 questions and, for each question, contains two papers with completely opposite answers. This paper is an attempt to answer the question \begin{quote} \emph{Should we accept Israel Gelfand's assessment of the role of mathematics in biology?} \end{quote} And my answer is \begin{quote} Yes, we should, for the time being: mathematics is still too weak for playing in biology the role it ought to play. \end{quote} I will be happy to see a detailed refutation of my thesis which addresses a number of my concerns raised in the present paper. Also I think that my stories told here are of general human interest and may be even useful for historians and philosophers of science. It was not my aim to write any kind of a systematic survey. References are sparse and random and used only as illustrations. \section{The story starts} I met Gelfand in 1991 at Rutgers University in the USA, and he immediately dragged me into a research collaboration which lasted for more than decade and was partially summarised in our monograph \cite{CoxeterMatroids}. Because of Gelfand's peculiar style of work\footnote{See \cite{topaz}, a brief sketch of Gelfand written by a bemused American mathematician.}, I, although a pure mathematician myself, was often present during his long conversations with other mathematicians, with mathematical physicists, and with his biologist collaborators, first of all, with Alexander Kister. Gelfand's conversations with biologists were mostly about the spacial structure of proteins\footnote{\cite{gelfand1996} is one of the papers produced by Gelfand and Kister in that period. I understand nowadays this type of analysis is heavily computer-based and classified as computational biology.}. In our first conversation Gelfand asked me about my early childhood mathematical experience, and, specifically, about what moved me to study mathematics. In my answer I mentioned mathematics correspondence schools which sent to me cute little books on mathematics for schoolchildren, including some books for children written by him. Gelfand looked at me with suspicion and asked me what I had learned from his little books. My answer: \begin{quote}\small ``the general principle: always start solving a problem by looking at the simplest possible example'' \end{quote} delighted him. This was indeed his principle, he was proud of it, and he systematically applied it throughout all his mathematical work -- but perhaps not in biology: I will return to that later, in Section \ref{sec:adequate}. I had never heard the words ``mathematical biology'' from Gelfand -- he always used just ``biology''; in a similar vein, he never used the words ``mathematical physics'' or ``theoretical physics'' -- just ``physics''. However, Gelfand did a lot of highly nontrivial mathematics and was one of the most influential mathematicians of the 20th century -- in his thinking, the simplest possible example almost instantly led to very deep mathematics. He also was a mathematical physicist -- and of a very applied kind: for example, he was a mathematical advisor to Andrei Sakharov in the Soviet H-bomb project and was the head of the team which carried out critically important calculations \cite[p. 185]{sakharov}); not surprisingly, he had deep knowledge of quantum physics. Gelfand also was one of the pioneers of mathematical biology and had experience of 50 years of research in that absolutely new, at his time, area. Sakharov suggests in his memoirs \cite[p. 219]{sakharov} that the long years of Gelfand's work in mathematical biology may have been motivated by the tragic early death of his son of leukemia (biologists who worked with Gelfand\cite{Vasiliev, Vorobiev} give more detail of this deeply human story). \section{The controversy and its potential resolution} I hope I have explained why Gelfand's remark was not made off the cuff and deserves some attention. But his view was contrasted by the \textsc{Wikipedia}\footnote{\textsc{Wikipedia}, \href{https://en.wikipedia.org/wiki/Unreasonable_ineffectiveness_of_mathematics}{Unreasonable ineffectiveness of mathematics}, downloaded 07 Feb 2021.} with the equally strongly expressed opinion of the legendary Leonard Adleman (the `A' in RSA), a mathematician, computer scientist, and cryptographer: \begin{quote}\small [In the 1990's] biology was no longer the science of things that smelled funny in refrigerators (my view from undergraduate days in the 1960s [\dots ]). The field was undergoing a revolution and was rapidly acquiring the depth and power previously associated exclusively with the physical sciences. Biology was now the study of information stored in DNA -- strings of four letters: A, T, G, and C and the transformations that information undergoes in the cell. There was mathematics here! \cite[p. 14]{adleman} \end{quote} I agree, there is mathematics there. DNA computing, pioneered by Adleman, is a part of mathematics and is fantastic new computer science. But his story is more about application of biology to computer science than application of mathematics to biology. The same could be perhaps be said about some other recent development, say, the study of ``artificial life'' \cite{kovitz}. Also, we have to take into account the fantastic progress of biology, and especially genomics, over the last 20 years which perhaps makes Gelfand's thesis outdated. It suffices to mention the very recent example: a detailed epigenomic map of non-protein coding segments of human DNA associated with human deseases\footnote{From \href{https://news.mit.edu/2021/epigenomic-map-reveals-circuitry-human-disease-regions-0203}{Epigenomic map reveals circuitry of 30,000 human disease regions, MIT News of February 3, 2021}: \begin{quote}\footnotesize What we’re delivering is really the circuitry of the human genome. Twenty years later, we not only have the genes, we not only have the noncoding annotations, but we have the modules, the upstream regulators, the downstream targets, the disease variants, and the interpretation of these disease variants \end{quote} -- says Manolis Kellis, a professor of computer science, a member of MIT's Computer Science and Artificial Intelligence Laboratory and of the Broad Institute of MIT and Harvard, and the senior author of the new study.} \cite{Boix}. However, \begin{itemize} \item Gelfand's thesis deserves a discussion. It should, and can be, discussed without undermining in any way the successes and heroic efforts of mathematical biologists (Gelfand, after all, was one of them) and bioinformaticians. \item In his paper, Wigner had in mind pretty highbrow mathematics -- he himself is famous for classifying elementary particles in terms of unitary representations of Lie groups. There is one more thing which is more unreasonable than the unreasonable effectiveness of ``higher'' mathematics in physics -- and this is the unreasonable effectiveness of arithmetic (even mental arithmetic) in physics. \item The fantastic, explosive growth of genomics, and studies of RNA and DNA is the evidence for existence of a natural affinity of these parts of biology and mathematics / computer science. \item But there is more than affinity between mathematics and physics: by their origin, they are twin sisters. \item Biology is much more complex than physics. \item At its present form, mainstream mathematics approaches the limits of its potential applicability to biology. To be useful in the future, mathematics needs to change dramatically -- and there are good intrinsic reasons for that within mathematics itself. \end{itemize} In this paper, I will try to touch, briefly, on all these points -- but not always in the same order. \section{The unreasonable effectiveness of mental arithmetic in physics} It is likely that for Gelfand one of the benchmarks of mathematics' success in applied physics was the creation of the hydrogen bomb -- and he supplied the exceptionally difficult computational part for it. He produced sufficiently precise numerical estimates for processes within the physical event which, most likely, had never before happened on the surface of the Earth -- radiation implosion. Calculations required digital electronic computers, the first ever -- they were designed and built specifically for that purpose. But the road to the dawn of the computer era went through tens of thousands of manual (frequently back-of-envelope) calculations and quick mental estimates, say, of physical magnitudes which had never been measured before -- with the aim to get some idea of the size of a measurement device needed and the precision of the measurement required. In physicists' folklore, questions of that kind were known as \emph{Fermi problems} and could be asked about anything in the world, as Enrico Fermi did, when recruiting young physicists in the Manhattan Project while being unable, for reasons of secrecy, give them any indication of what their future work was about. Instead, he was asking them something like \begin{quote}\small How many piano tuners are in Chicago? \end{quote} and invited the interviewees to think aloud, and accessed their reasoning. Enrico Fermi's report \emph{My Observations During the Explosion at Trinity on July 16, 1945} remains the mother of all mental estimates in physics: \begin{quote}\small About $40$ seconds after the explosion, the air blast reached me. I tried to estimate its strength by dropping from about six feet small pieces of paper before, during, and after the passage of the blast wave. Since, at the time, there was no wind I could observe very distinctly and actually measure the displacement of the pieces of paper that were in the process of falling while the blast was passing. The shift was about $2\frac{1}{2}$ meters, which, at the time, I estimated to correspond to the blast that would be produced by ten thousand tons of T.N.T. \end{quote} The energy output of the first ever nuclear explosion was calculated, on the spot, and by mental arithmetic, from observation of pieces of paper falling on the ground -- and estimated correctly, as proper measurements confirmed.\footnote{Physicists I spoke to told me they believed that Fermi's calculation was most likely based on the so-called \emph{dimensional analysis} rooted in the scale invariance frequently present in physical phenomena. Kolmogorov's deduction of his ``5/3'' Law (Section \ref{section:Komogorov} and the Appendix)) was also done that way.} Gelfand was definitely familiar with physicists' love for this kind of mental trick. He told me that he once met Sakharov, who told him: ``You know, on the way here, I did some mental calculation and was surprised to discover that the Sun produces, per unit of mass, less energy than produced in a pile of rotting manure''\footnote{This observation deserves to be wider known. Life on Earth exists thanks to steady supply of energy from a natural thermonuclear fusion reactor, safe, clean, stable, reliable, cheap -- our Sun. It is tempting to assume that the promised thermonuclear reactors (already decades in development) will offer the same benefits. But the Sun's power to mass ratio is a bit disappointing. And here is a Fermi problem for the reader: estimate the size of a pile of manure which would provide an adequate power supply to your home (lights, heating / air conditioning, hot water, all appliances, etc., and add a couple of all-electric cars to the equation), and estimate at what rate the heap has to replenished.}. On hearing this from Gelfand, I was also surprised and did my own calculations -- Sakharov was (of course) right. Later I told the story to my astrophysicists friends -- they were astonished, made their calculations (much faster than I did, I have to admit) -- and were completely perplexed. So, this is the way physicists (well, at least experimental physicists) are thinking -- how could it happen to be so effective? My proposed answer is in the next section. \section{Twin sisters: Physics and Mathematics} \label{twin-sisters} I will be using the definition (or description) of mathematics given by Davis and Hersh \cite[p. 399]{Davis-Hersh}: \begin{quote} \small mathematics is the study of mental objects with reproducible properties. \end{quote} The famous mathematician David Mumford uses this formulation in his paper \cite[p. 199]{Mumford} and further comments on it: \begin{quote}\small I love this definition because it doesn't try to limit mathematics to what has been called mathematics in the past but really attempts to say why certain communications are classified as math, others as science, others as art, others as gossip. Thus reproducible properties of the physical world are science whereas reproducible mental objects are math. \end{quote} Mumford's observation can be directly incorporated in (my own) definition: \begin{quote} \small mathematics is the study of mental objects and constructions with reproducible properties which imitate the causality structures of the physical world, and are expressed in the human language of social interactions. \end{quote} The most basic elements of the causality structures of the world are schemes for expression of observations of the world so self-evident that they never mentioned in physics. For example, if you have some spoons and some forks in your cupboard and you can arrange them in pairs, with no spoon and no fork being singled out, and if you then mix spoons and forks in a box and start matching them in pairs again, it \emph{must} be a perfect match. Please notice the word \emph{must} -- its basic use is for expressing relations between people; please also notice that words like `must', `forces', `follows', `defines', `holds' etc.\ normally used for description of actions of people and relations between people, play an essential role in any mathematical narrative.\footnote{Without this emphasis on the social interactions language it would be impossible to explain a fact frequently ignored in discussions of mathematics: the surprisingly loose and informal language used by mathematicians when they talk about mathematics between themselves -- it has almost nothing in common with the language of published mathematical texts.} What we see in the example with spoons and forks is the mathematical concept of the one-to-one correspondence between finite sets -- as it appears ``in the wild''. A mental construction on the top of one-to-one correspondence produces natural numbers, arithmetic operations, and the order relation. They are interesting for their universal applicability: \begin{itemize} \item the number of my children is smaller than the number of protons in the nucleus of Lithium, \item which, in its turn, is smaller than the number of Galilean moons\footnote{Galilean moons can be objectively defined as satellites of Jupiter visible from Earth via a primitive telescope or standard binoculars.} of Jupiter; \item which is the same as the number of bases of DNA. \end{itemize} This is a true statement about four groups of objects in the real world which have absolutely no ``real world'' connections between them. \begin{quote} \textbf{The humble natural numbers are already a huge abstraction}.\footnote{At least one human culture was documented as having no concept of number and no number words in the language: that is of Pirah\~{a} people in the Amazon rainforest \cite[p.260]{Everett}.} \end{quote} The question about the ``unreasonable effectiveness'' has to be asked already about arithmetic, with an obvious answer: yes, arithmetic is effective in biology -- every time we have to count some distinctive and stable objects. It is a summary of experience accumulated by humanity over millennia: the causality structures of the physics universe are so robust that their consequences could be developed within mathematics independently from physics -- and remain consistent (that is, do not generate contradictions). Moreover, these mathematical developments could happen to be useful for description and modeling of physical phenomena. Ptolemean astronomy was built on the basis of highly developed by that time spherical geometry (born from the needs of astronomy, by the way) in absence of some key inputs from astronomic observations and from physics which became available only much later -- still, it provided a reasonable approximation to the observed movement of planets in the sky.\footnote{David Khudaverdian kindly explained to me that he does not see any problems with transferring, from the plane to the sphere, of his algorithm (and his computer programme) for approximate reproduction, by a linkage mechanism, and with preservation of the velocity of the point, of movement of a point along a plane curve, see \url{https://david.wf/linkage/theory.html}. It would be interesting to see what this algorithm would do with a kind of data that Ptolemeus could use. This is just a remark on how far we moved from the time of Ptolemeus.} At their birth, quantum mechanics and general relativity theory already had their mathematical machinery essentially ready and waiting to be used (perhaps with one important exception, as I'll explain it in minute). What is important, the efficiency of mathematics in description the explanation of the real world was demonstrated at least two millennia ago at the level of arithmetic, primitive algebra and geometry. This is a well established historic fact.\footnote{Leonard Adleman was already mentioned here. He is co-inventor of RSA, one of the most widely used cryptographic systems, critically important for the world system of financial transactions, among many other uses. The belief in the security of RSA entirely depends on the assumption that factorisation of integers into products of prime numbers is an exceptionally hard problem. This is a historic observation extracted from two millennia of human experience with arithmetic. There is still no proof.} This justifies the motto coined by my colleague Robert A. Wilson: \begin{quote}\small Mathematics: solving tomorrow's problems yesterday. \cite{Wilson} \end{quote} Of course, occasionally mathematics has to solve today's problems. This had happened with the theory of distributions (or generalised functions): they were invented (or made popular) by one of the founders of quantum physics, Paul Dirac (including the famous $\delta$-function), and were quickly and smoothly incorporated into mathematics; Gelfand was one of the principal contributors to the new theory. Regarding Gelfand's statement about mathematics and biology, I think he felt that he faced a much more serious challenge: the existing mathematics was not directly applicable in biology: some new mathematics was needed. I will return to that point in Section~\ref{sec:adequate}. And now I wish to offer a mental experiment. Imagine that over the last 11 thousand years (that is, the period of stable climate following upon the last ice age which allowed the human civilisation to develop) the atmospheric conditions on Earth were different: the skies were always covered, even in the absence of clouds, by a very light haze, not preventing the development of agriculture, but obscuring the stars and turning the sun and the moon into amorphous light spots. Would mathematics have had a chance to develop beyond basic arithmetic and geometry sufficient for measuring fields and keeping records of harvest? I doubt that. Civilisations which developed serious mathematics also had serious astronomy (it was an equivalent of our theoretical physics). But I claim even more: the movement of stars in the sky was the paradigm of precision and reproducibility, the two characteristic features of mathematics. Where else could humans learn the concept of absolute precision? Speaking about mathematics and physics as twin sisters, it is almost impossible not to mention the most extreme point of view on relations between the two sciences. It belongs to the famous mathematician Vladimir Arnold \cite{arnold-teaching}: \begin{quote}\small \emph{Mathematics is part of Physics.\\ Physics is an experimental discipline, one of the natural sciences.\\ Mathematics is the part of Physics where experiments are cheap.} \end{quote} Not every mathematician would agree with that. But I think it is important to put this extreme formulation on record, especially in the context of this paper. \section{My own doubts about the role of mathematics in biology} \subsection{My mathematical background} Everything said in the rest of this paper is my own opinion as a mathematician with 45 years of diverse experiences in mathematics. Over the last 25 years I was engaged -- in parallel with some mainstream and hard core pure mathematics which I was always doing -- with the study of various probabilistic and non-deterministic methods for solving problems in algebra. This made me quite receptive to David Mumford's idea \cite{Mumford} that the future of mathematics is stochastic. I mention this because I believe in the stochastic nature of the underlying laws of biology, whether they are expressed mathematically or not. This is a huge theme, and in this paper, my arguments are only indicated, not expanded in any detail. \subsection{Biology as a study of algorithms} Speaking about biology, and especially molecular biology, it is important to understand that it is not a natural science in the same sense as physics. It does not study the relatively simple laws of the world. It studies objects which do not exist in physics, and cannot be meaningfully reduced to physical phenomena: \emph{algorithms}. It has to deal with molecular algorithms (such as, say, the transcription from DNA to RNA and further translation into synthesis of proteins which ensures the correct spatial shape and the correct functioning of the protein molecule -- and this chain of transformations continues all the way down to specific patterns of neuron firing). Of course I agree with Adleman \cite{adleman} that this part: \begin{quote}\small ``\emph{the transformations that information undergoes in the cell}'' \end{quote} can be understood mathematically (or by means of computer science).\footnote{The design of the BioNTech/Pfizer vaccine is enthusiastically greeted by cryptanalysis / computer security geeks who immediately started to ask interesting questions, see Bert Hubert \cite{Hubert} -- but we also should not forget the tremendous work of molecular biologists which made the success possible. Cryo–electron microscopy resulted in the structure analysis of the SARS-CoV2 spike protein in complex with its cognate cell receptor \cite{Wrapp}, which, in its turn, made possible the design of the stabilized spike protein mutant that has been successfully adapted for the vaccine design for both RNA based  BioNtech/Pfizer and Moderna vaccines.} Adelman's paper was written in 1998 and stayed within the Central Dogma of molecular biology. He occasionally made even more restrictive statements: \begin{quote} \small\emph{ The polymerase enables DNA to reproduce, which in turn allows cells to reproduce and ultimately allows you to reproduce. For a strict reductionist, the replication of DNA by DNA polymerase is what life is all about.} \cite[p. 54]{adleman} \end{quote} Let us stay for a minute under the umbrella of the Central Dogma. \subsection{Irreversibility} First of all, we need to take into consideration that there are many stages of the transformations ``that information undergoes in the cell'', and each of them has its own mechanisms for re-encoding the information into a different ``language''. Each transformation could happen to be a one-way function or procedure, with sufficiently clear ways of performing it, but without rules for reversing the transformation. Why? Because all these sophisticated and subtle mechanisms were developed in the course of evolution. The clarity and precision of transformation were obvious selection criteria -- otherwise organisms could not leave viable descendants, and, most likely, could not even function themselves. But it appears that the existence of rules and mechanisms for reversing each particular transformation had never been a survival criterion. But if some property was not a survival criterion, why we should we expect that it dominates the population? If it was inherited form previous stages of evolution, and lost its usefulness, it is likely that it will be suppressed by mechanism controlling gene expression. (Here we start to deviate from the Central Dogma.) Avoiding terminology from complexity theory and cryptography, one may still say \begin{quote} \textbf{The transformations that information undergoes in the cell is a cascade of functions which could happen to be not effectively reversible.} \end{quote} Without giving a precise definition, I wish to remark that in mathematics such transformations (functions) are called \emph{one-way functions}. A canonical (alleged) example of a one-way function is multiplication of integers: it is very easy to multiply two integer numbers $p$ and $q$; if $n = p \times q$, finding factors $p$ and $q$ when given only $n$, is believed to be an impossibly difficult problem, especially if $p$ and $q$ are very large prime numbers. The catch is that it is not proven that factorisation is difficult, it is only a historic observation, the total of experiences accumulated by mathematicians over 2,000 years. The security of the famous RSA cryptosystem, the backbone of electronic finance, is a belief, not a fact. Almost all mathematicians believe that one-way functions exist, but this remains a conjecture, it is not a theorem, it is not proved. Moreover, almost all functions are likely to be one-way -- but there is no proof of that. On that matter, mathematics is still at square zero. To summarise, \begin{quote} \textbf{Mathematics of nowadays has no tools (and perhaps will never have) for reversing transformations of unknown provenance and of that size of inputs.} \end{quote} But, inverting everything that can be reversed is one of the paradigms of mainstream mathematics; even if you are not a mathematician, recall how many hours you spent at school solving all these equations and systems of equations; this was about it: reversing mathematical operations and inverting functions. Moreover, more could be said: \begin{quote} \textbf{Being understood by some species which would come to existence perhaps hundreds of millions of years later had never been a selection criterion for molecular algorithms at any stage of their evolution.} \end{quote} This basic remark suggests that the current successes of biology is a fantastic achievement which could never have been taken for granted. The further we are from the Central Dogma and the more information transfer paths are discovered in the cell and between cells, the more complicated and difficult for analysis things become. In particular, if something appears to be reversed, this is not a full inverse map -- loss of information is inevitable. For example, suppression of genes is an obvious removal (perhaps temporary) of no longer needed information. \subsection{Black boxes} My co-author \c{S}\"{u}kr\"{u} Yal\c{c}\i nkaya and I have a toy model for one-way information flows; it is called \emph{black box algebra} and we are preparing a monograph on it \cite{BY-monograph}. Black box algebra studies categories where objects are some finite mathematical (or computer science, which in this context does not matter) structures called black boxes. Elements of black boxes are binary strings, mathematical operations (perhaps partial) inside of a black box are performed and predicates evaluated by efficient (in some specific meaning) algorithms. Morphisms are maps from one black box to another which preserve operations and values of predicates in the black boxes and are performed by efficient algorithms. There is also a more subtle and flexible relation and subtle relation between black boxes: we say that a black box $Y$ is\emph{ interpreted} in a black box $X$  if there is an effective map $\alpha: Y \longrightarrow X$ such that for every partial operation (here, for the sake of simplicity of explanation-- binary)  ``$\otimes$'' on $Y$  there is an efficient map $\beta: X \times X \longrightarrow X$ such that \[ \alpha(y_1 \otimes y_2) = \beta(\alpha(y_1),\alpha(y_2)), \] with a similar property holding for predicates. The crucial feature of the theory is that we do not expect that the inverse morphisms can be also computed efficiently -- morphisms could happen to be one-way functions. Also, we do not know what is inside of a black box, we can only sample some its random elements and observe their behaviour and interaction with other elements from the sample. In all that there are some analogies with what we see in a cell at a molecular level when we try to look at it from a mathematical point of view. Black box algebra has happened to be critically important for solving, by probabilistic methods, some difficult problems in computational algebra \cite{BY2018,haystack} and is a natural tool for analysis of the so-called homomorphic encryption \cite{BY-homomorphic}. Some famous intractable problems of algebraic cryptography -- factorisation of integers, the discreet logarithm problem in finite fields and on elliptic curves -- naturally live in the domain of black box algebra. This shows that this new field of algebra is immensely difficult. This also supports the nagging feeling that in the world around us almost every process is not reversible (after all, there is the universal phenomenon of aging followed by the inevitable death), and, moreover, its mathematical description as a function or algorithm (if found) has no efficiently computable inverse. \section{Some further comments on mathematics and evolution} Once I did some work on genetic (or evolutionary) algorithms in mathematics \cite{BBB,BB}. As it happens in experimental work, not all observations made found their way to publication, especially because my collaborators and I focused on the convergence, in some special cases, of the evolution of a population of non-deterministic algorithms for solving a particular mathematical problem to a known deterministic algorithm, that is, to an algorithm constructed by humans. For the purpose of this discussion, the cases where the evolution did not progress as we wished would be much more interesting -- and these were the majority of the cases: the geometry of the search space was too complex, and the evolution of an algorithm stuck in a cul-de-sac of a local maximum. This raises a natural question: why did the evolution of life on Earth produce, and continue to produce, something that apparently works? Most of the molecular algorithms of life were shaped at the stage of prokaryotes and their immensely complex co-evolution with viruses \cite{Koonin2017,Koonin}. This took, most likely, hundreds of millions of years, with billions of generations. This number of generations can be reproduced on modern supercomputers. However -- and this was the principal difference from any form of computation that technology might allow us to do -- this was happening in huge search spaces. The probability of mutations and chances for survival of one of them in subsequent generations grow with the size of the population.\footnote{Only very recently, almost a year after the start of the pandemic, I had finally had a chance to hear a politician (Shadow Health Secretary in the UK Parliament) referring to this basic principle in the debate in the British parliament about the pandemic . Still, this is a colossal success of popularisation of science.} Any evolution -- an artificial evolution of some artificial entities, or the natural evolution of life -- is blind. In a small number of cases it finds optimal solutions with respect to certain relatively simple constraints and survival criteria -- the same way as water flows down the slope. For example, all animals living in water, if they have to be able to move faster than their prey or predators, have distinctive streamlined shapes dictated by (physical) laws of fluid dynamics.\footnote{``About 60\% of the recognized virus taxa have icosahedral capsids, which is unsurprising because the icosahedron has the largest volume to surface area ratio, closest to that of a sphere, the most thermodynamically favorable three-dimensional shape, and generates the maximum enclosed volume for shells comprised of a given size subunit. [\dots ] The other side of the coin, however, is that similar capsid geometries do not necessarily reflect homologous relationships between viruses: for example, icosahedral capsids emerged at least 11 times during virus evolution from unrelated CPs with drastically different folds.'' \cite[pp. 4--5]{Koonin}.} However functioning of a cell means the simultaneous satisfaction of thousands of constraints and criteria. And experiments show that in problems with multiple constraints evolution does not find an optimal, or even close to optimal, but just a survivable solution.\footnote{Gregory Cherlin, who read an early draft of my paper, commented at that point: \begin{quote}\tiny It is probably looking for solutions to $NP$-complete problems and even with much space is still going to get trapped. I understand that even the shape of a foam in theoretical physics is a solution to an $NP$-complete problem and nature does not actually produce that shape, even under the laws of physics. \end{quote} I share his concern; if $P\ne NP$ (as almost every mathematician expects), mathematisation of biology is likely to be a long slog. Notice that existence of one-way functions implies $P\ne NP$. $P$ vs. $NP$ is one of the Clay Mathematics Institute Millennium Prize Problems, seven problems judged to be among the most important open questions in mathematics.} In short, the surviving solution could be in one of myriads of local optima, sufficiently good to ensure reasonably high probability of survival. Lucky strikes could be so rare that the huge search space and millions of years of evolution produce just one survivable solution, which, as a result, dominates the living world, and is perceived by us as something special.\footnote{Of course, we award this special status, first and foremost, to \emph{ourselves}. There is an almost universal belief that humanity is the crown of God's / Evolution's creation. Ephesians 2:10 is given in the New Living Version as ``\emph{For we are God's masterpiece}''. The translators of the (older) King James Version were a bit more modest: ``\emph{For we are his workmanship}''. It seems that the self-esteem of \emph{H. Sapiens} as a species improves with time.} But it might happen that there is absolutely no reasonably compact external characterization which allows us to distinguish it from other possible solutions, and that its phylogeny (if we will ever know it) is its only explanation. With the exception of relatively rare periods of regression, evolution progresses bottom up, from simple to complex. In modern mathematics the situation is different. Of course, new theories frequently generalise, and are built upon, older theories. However, in concrete research projects and in proving specific theorems mathematics usually works in the opposite direction: from the more general and abstract down to filling in concrete details. This is how mathematicians \emph{write} proofs after they got them. I co-authored a theorem with a proof of 500 pages -- it was published as book \cite{ABC}. Believe me: this could not be done using the ``bottom up'' approach. The same ``top down'' approach is used in project management: clear identification of priorities and the target, and then planning back to the present position -- with special attention to identification of time critical paths. The military in more advanced countries reached real sophistication and efficiency in ``reverse thinking'', both in operational planning and in logistics. In the UK, the army remains the last resort for saving the government's pathetic attempts to manage its response to the COVID epidemic. I doubt that the evolution of life had ever done critical path analysis. In short, evolution of life has nothing in common with human problem solving, nothing in common with design and development of mathematical algorithms by mathematicians or computer scientists. And computers are of no help. I spent considerable time solving, by non-deterministic methods, mathematical equivalents of the search for the proverbial needle in a haystack \cite{haystack}. The biggest structure of that kind where my co-author \c{S}\"{u}kr\"{u} Yal\c{c}\i nkaya and I managed to compute significant and important substructures, and say something sensible about them, contained about $10^{960}$ elements. The Observable Universe contains around $10^{80}$ electrons. We were computing in something which was $10^{880}$ times bigger than the Observable Universe. The total number of prokareotes which ever existed on the Earth is nothing in comparison with that. We were successful because we knew what we were looking for, used the powerful global symmetries of the system which we studied, and were able to restrict our work to just a handful of carefully chosen elements. Also, individual elements were much simpler than any bacteria or archaea; our elements were about $1,000$ bytes long and \emph{had no structure}: we worked with just labels of, or pointers to, random elements -- but bacterial DNA contains millions of base pairs\footnote{The information content of the messenger RNA of BioNTech/Pfeizer vaccine is just above 1 kilobyte.}, and has structure which has to be taken into account if we (humans) try to analyse the DNA molecule. But evolution does not analyse the structure od DNA -- it just checks whether a mutation is advantageous for survival, neutral, or disadvantageous, and these checks are probabilistic by their nature. And let me repeat: evolution is blind. Evolution does not know what it is looking for. It works via random mutations or exchange of genetic information (again random). For a human mind, even assisted by computers, to navigate the resulting mess -- is a challenging task. \section{Once more about the unreasonable effectiveness of mathematics in physics} \label{section:Komogorov} In the previous Section, non-reversibility of transformations in information flows in cells was highlighted as the principal difficulty for analysing them mathematically. So it would be useful to look at one of the most extreme cases of the unreasonable effectiveness of mathematics in physics, Andrei Kolmogorov's\footnote{By the way, Gelfand was a student of Kolmogorov.} analysis of an incomprehensibly chaotic (and non-reversible, one-way) phenomenon -- \emph{turbulence} -- and try to find: \emph{where is the catch}? The deduction of Kolmogorov's seminal ``$5/3$'' Law for the energy distribution in turbulent fluid \cite{kolmogorov} is so simple that it can be done in a few lines using only school level algebra (that kind of derivation can be found in \cite[Section 8.4]{MuM}; I borrow some details from there). The turbulent flow of a liquid is a cascade of vortices; the flow in every vortex is made of smaller vortices, all the way down the scale to the point where the viscosity of the fluid turns the kinetic energy of motion into heat. So, assume that we are in a steady state, that is, we have a balanced energy flow. Kolmogorov asked the question: \emph{what is the share of energy carried by vortices of a particular size}? He got an answer by an elegant short argument based on the important assumption of \emph{self-similarity} or \emph{scaling invariance} which amounted to saying that \begin{quote}\small The way bigger vortices are made from smaller ones is the same throughout the range of wave numbers, from the biggest vortices (say, like a cyclone covering the whole continent) to a smaller one (like a whirl of dust on a street corner). \cite{arnold} \end{quote} So, this was the catch! And here is Kolmogorov's formula: \[ E(k) \approx C \epsilon^{2/3}k^{-5/3} \] where $E(k)$ is the \emph{energy density}, $\epsilon$ is \emph{the energy flow}, and $k$ is the \emph{wave number}, while the constant $C$ is dimensionless and is usually close to $1$ (details are in \cite[Section 8.4]{MuM} or in the Appendix to this paper). The status of Kolmogorov's celebrated result is quite remarkable. In the words of an expert on turbulence, Alexander Chorin \cite{chorin}, \begin{quote} \small Nothing illustrates better the way in which turbulence is suspended between ignorance and light than the Kolmogorov theory of turbulence, which is both the cornerstone of what we know and a mystery that has not been fathomed. \end{quote} \begin{quote}\small The same spectrum [\dots] appears in the sun, in the oceans, and in man-made machinery. The ``5/3'' Law is well verified experimentally and, by suggesting that not all scales must be computed anew in each problem, opens the door to practical modelling. \end{quote} \normalsize Vladimir Arnold \cite{arnold} reminds us that the main premises of Kolmogorov's argument remain unproven---after more than 60 years! Even worse, Chorin points to the rather disturbing fact that \begin{quote}\small Kolmogorov's spectrum often appears in problems where his assumptions clearly fail. [\dots] The ``5/3'' Law can now be derived in many ways, often under assumptions that are antithetical to Kolmogorov's. Turbulence theory finds itself in the odd situation of having to build on its main result while still struggling to understand it. \end{quote} This is an interesting case indeed: a remarkable success of mathematics which also shows its limitations. And limitations are obvious: this is only a summarily description of one (although important) aspect of a stochastic phenomenon, Figure~\ref{Hokusai}. In biology, we frequently need something more detailed than that. \begin{figure}\label{Hokusai} \end{figure} A testimony from Sakharov about the role of self-similarity in physics is illuminating: \begin{quote} \small Soon after we began the project, I'd come up with an approximate analysis of the important processes specific to the Third Idea\footnote{\emph{The Third Idea} was a more advanced design of an H Bomb, much more powerful than the previous versions.}. Mathematically, these were the so-called \emph{self-similar solutions} for certain partial differential equations. [\dots ] \end{quote} For Sakharov, this provided sufficient grounds for gearing up the project: \begin{quote} \small Relying on intuition, and without waiting for the resolution of all theoretical questions or the final calculations, I issued instructions and explained to the designers which specifications were critical, and which could be adjusted. Through frequent visits, I established close professional relations with the technical personnel employed in the design sector. I came to appreciate the difficult, painstaking nature of their work, and the specialized knowledge and talent it required. \end{quote} This is very interesting: an approximate solution based on the assumption of scale invariance was sufficient for starting the project, but not enough for its completion: \begin{quote} \small Nevertheless, we needed something better than analyses of individual processes using simplified assumptions. Mathematicians at the Installation and in Moscow worked out new methods for performing complicated calculations by computer. A team headed by Izrail Gelfand, a corresponding member of the Academy of Sciences, played a critical role. I worked closely with Gelfand and his group to develop the basic programs, and we established an excellent rapport despite Gelfand's habit of flying into a rage and shouting at his colleagues (and sometimes at me as well). After a flare-up, he would stride up and down his office in silence for a few minutes. When he had regained his composure, he would return to work and even, on occasion, apologize for his outburst. Still, I got the impression that Gelfand's colleagues loved him, and that he had a paternal attitude toward them. \end{quote} Basically, Gelfand and his team resolved the extremely difficult problem numerically, by computer calculations, and without use of the oversimplifying assumption of self-similarity. This is what I call project management. Importantly, different levels of mathematical modelling were needed at different stages of the development of the project. At that time, Sakharov was about 35, Gelfand about 45 years old. \section{Lack of ``global'' scaling invariance in biology} Molecular level processes within a cell are quite different by their nature from the interaction of cells within a living tissue, and the way an organism is built from its parts is again different. These levels of structural hierarchy developed at different stages of evolution, under different external conditions, and every time evolution had to use not very suitable means for solving new problems. When scaling invariance is observed in a living organism, for example, in the form of phyllotaxis \cite{Jean,Lamport,Swinton}, it is usually restricted to a single level of structural hierarchy. Not surprisingly, phyllotaxis has happened to be open to mathematical insights, and first serious mathematical study was done by Alan Turing \cite{turing}; he even used one of the first digital electronic computers for related calculations \cite{Swinton}. The absence of scaling invariance is another obstacle to the effective use of mathematics in biology. \section{The natural affinity between mathematics and genomics -- and its limits} \label{sec:affinity} \noindent Returning to the definition of mathematics as ``the study of mental objects with reproducible properties'' (Section \ref{twin-sisters}), I wish to focus on the word ``reproducible''.\footnote{This section is only a very brief exposition of much more detailed \cite[Chapter 11]{MuM} which contains also analysis of some concrete historic examples.} \subsection{Memes} The term \emph{meme} was made popular by Richard Dawkins \cite{dawkins} and introduced into mainstream philosophy and cultural studies by Daniel Dennett \cite{dennett}. Memes are intended to play the same role in the explanation of {the} evolution of culture (and {the} reproduction of individual objects of culture) as genes {do in the} evolution of life (correspondingly, {the} reproduction of individual organisms). The concepts of `meme' and `meme complex' (the latter introduced by Susan Blackmore \cite{Blackmore}) still look more like metaphors rather than rigorously defined scientific terms and have been irreparably undermined by adoption of the word `meme' in social media parlance. In memetics, specific case studies and applications (like the one described in \cite{koza}) are still more interesting than a rather vacuous general theory. But in discussion of the transmission and reproduction of mathematics, the meme metaphor has non-trivial aspects. As I argue in \cite[Chapter 11]{MuM}, mathematical memes play a crucial role in many meme complexes of human culture: they increase the precision of reproduction of the complex, thus giving an evolutionary advantage to the complex, and, of course, to the memes themselves. Remarkably, the memes may remain invisible, unnoticed for centuries and not recognized as rightly belonging to mathematics. This is the characteristic property of ``mathematical'' memes: \begin{quote}\small If a meme has the intrinsic property that it increases the precision of reproduction and error correction of the meme complexes it belongs to, and if it does that without resorting to external social or cultural restraints, then it is likely to be an object or construction of mathematics. \end{quote} As Ian Stewart put it, \begin{quote}\small Mathematics is the ultimate in technology transfer. \cite{stewart} \end{quote} Indeed mathematics studies mental objects with reproducible properties which happen to be built according to highly precise reproducible rules, with the precision of reproduction being checked by specific mechanisms, which, in their turn, can also be precisely reproduced and shared. These rules can themselves be treated as mathematical objects (this is done in branches of mathematics called mathematical logic and proof theory) and are governed by metarules, etc. Mathematical objects can reproduce themselves only because they are built hierarchically. Simple or atomic objects (definitions, formulae, elementary arguments, etc.), form more complicated entities (theorems and their proofs) which, in their turn, are arranged into theories. When comparing mathematics with other cultural systems, we see that some degree of precision of replication can usually be found in systems which are relatively simple (like fashion, say). Precision can also be linked to a certain rigidity of the system and an institutionalized resistance to change, as in the case of religion. We do not offer hecatombs to Zeus, but, after 2000 years or so, we still use Euclidean geometry -- and this has happened without anything resembling the endless religious wars of human history. Mathematics is so stable as a cultural complex because it has an extremely powerful intrinsic capability for error detection and error correction. The difficulty of explaining the astonishing power of self-correction of mathematics by external factors, social or cultural, is analyzed, in considerable detail, in \cite{Azzouni}. I claim that the only possible explanation lies in the nature of mathematical memes themselves. To summarise the role of mathematical objects in the evolution of human culture, they are memes which happened to be successful and spread because of the following properties: \begin{itemize} \item They have extreme resilience and precision of reproduction. \item When included in meme complexes (collections of memes which have better chances for reproduction when present in the genotype as a group), they increase the precision of reproduction of the complex as a whole. We will call memes with this property \emph{correctors}. \item This error correcting property is intrinsic to mathematics, its implementation involves only other mathematical objects, concepts, and procedures -- it does not depend on external social or cultural restraints. \end{itemize} \subsection{Mathematics is huge -- but in comparison with what? } People outside the mathematical community cannot imagine how big mathematics is. Davis and Hersh point out that between 100 000 and 200 000 new theorems are published every year in mathematical journals around the world. A poem can exist on its own; although it requires readers who know its language and can understand its allusions, it does not necessarily refer to or quote other poems. A mathematical theorem, as a rule, explicitly refers to other theorems and definitions and, from the instant of its conception in a mathematician's mind, is integrated into the huge system of mathematical knowledge. This system remains unified, tightly connected, and cohesive: if you take two papers at random, say, one on mathematical logic and one on probability theory, you may easily conclude that they have nothing in common. However, a closer look at the Mathematics Subject Classification reveals discipline 03B48: Probability and inductive logic. We see that, despite all this diversity, there is an almost incomprehensible unity of mathematics. It can be compared only with the diversity and the unity of life. Indeed, all life forms on Earth, in all their mind-boggling variety, are based on the same mechanisms of replication of DNA and RNA, and all that genomic stuff looks like mathematics. It is not surprising at all that mathematics and computer science proved to be efficient there. As I have already said earlier, the trouble with mathematics is likely to start at higher levels of structure of living matter. Also the comparison with biology is not really in favour of mathematics: it is minuscule in comparison with Life. Allocating, say, 10 kB of \LaTeX\ code to the proof of a theorem, 200,000 theorems become 2GB of \LaTeX\ files. What is 2GB on biology's scale? Nothing. And there is one more issue: texts are only one of media of social transfer of mathematics. A text is alive only while there are people who \emph{wish} and can understand it; alas, their number, per paper, is in single figures. From my experience of a journal editor, I can say that finding a reviewer for a mathematical paper submitted to a journal is becoming increasingly difficult. In the next section I say more about the emerging crisis in mathematics as a cultural and social system. \subsection{But it looks as if mathematics is reaching the limits of human comprehension} Mathematics continues to grow, and if you look around, you see that mathematical results and concepts involved in practical applications are much deeper and more abstract and difficult than ever before. And we have to accept that the mathematics hardwired and coded, say, in a smartphone, is beyond the reach of the vast majority of graduates from mathematics departments in our universities. The cutting edge of mathematical research moves further and further away from the stagnating mathematics education. From the point of view of an aspiring PhD student, mathematics looks like New York in the \v{C}apek Brothers’ book \emph{A Long Cat Tale} \cite[p. 44]{Capeks}: \begin{quote}\small And New York -- well, houses there are so tall that they can’t even finish building them. Before the bricklayers and tilers climb up them on their ladders, it is noon, so they eat their lunches and start climbing down again to be in their beds by bedtime. And so it goes on day after day. \end{quote} Joseph and Karel Capek were the people who coined the word `\emph{robot}' for a specific socio-economic phenomenon: a device or machine whose purpose is to replace a human worker. Almost a century ago, they were futurologists -- long before the word `futurology' was invented. Mathematics badly needs its own specialised mathematical robots -- first of all, for checking proofs, which are becoming impossibly long and difficult. One of the more notorious examples -- the Classification of the Finite Simple Groups (CFSG), one of the central results of the 20th century algebra. In particular, the CSFG underpins quite a number of results and methods in finite combinatorics, critically important for any systematic development of mathematical biology -- after all, no matter how huge they are, protein molecules are built of finitely many atoms. The original proof of the CFSG, still with holes, was spread over more than 100 journal papers of total length about 15 thousand pages. A proper and structured proof is being published, volume by volume, since 1994 \cite{CSFG}. At the present time, 8 volumes out of the originally estimated 12 are published, volume 9 is in print, volume 10 is in preparation, plus 1220 pages of two volumes of an important part of the proof which was developed separately \cite{ASquasithin}. I personally know, I think, almost every person in the world who can read and understand this proof. The youngest of them is Inna Capdeboscq, one of the authors of volume 9; very soon she will be the only non-retired mathematician who understands the proof of the CFSG. We have to admit that mathematics faces an existential crisis. Without switching to systematic use of computer-based proof assistants, and corresponding changes in the way how mathematics is published and taught, mathematics will not be able to face challenges of biology -- moreover, it is likely to enter a spiral of decay. \section{The search for the adequate mathematical language} \label{sec:adequate} Israel Gelfand once said to me: \begin{quote}\small Many people think that I am slow, almost stupid. Yes, it takes time for me to understand what people are saying to me. To understand a mathematical fact, you have to translate it into a mathematical language which you know. Most mathematicians use three, four languages. But I am an old man and know too many languages. When you tell me something from combinatorics, I have to translate what you say in the languages of representation theory, integral geometry, hypergeometric functions, cohomology, and so on, in too many languages. This takes time. \cite[p. 67]{MuM} \end{quote} Gelfand's love to ``simplest possible examples'' as well his insistence on being constantly reminded of the most basic definitions was not a caprice: he used these examples as pointers toward \emph{ the most adequate mathematical language} for describing and solving a particular problem; if several languages had to be used, he used definitions as synchronisation markers for smooth translation from one language to another. I heard from Gelfand these particular words: ``adequate mathematical language'' many times. I was excited to find the term ``adequate language'' prominently featuring in reminiscences about him written by his colleague in neurophysiology, Yuri Arshavsky. \begin{quote}\small The widely accepted concept, presently known as the connectionist concept, that the brain is a form of computational machinery consisting of simple digital elements was particularly alien to I.M. Gelfand. Everybody in this audience knows that, according to I.M. Gelfand, the main problem of science is the problem of ``adequate language.” For a formulation of adequate logic there must be language that does not simplify a real situation. His viewpoint was that the situation in which neuroscientists use the language of electrical spikes and synaptic connections as the only language in their interaction with the nervous system, should unavoidably lead to principal roadblocks in understanding the higher, cognitive functions of the brain. Computational models of cognitive functions, even those looking flawlessly logical and convincing, are usually incorrect, because they use non-adequate language. I.M. Gelfand believed that the language of cognitive neuroscience should be shifted away from the commonly-accepted ``network” language to the intracellularly-oriented direction. My guess is that this was among reasons for I.M. Gelfand to shift his biological interests from neurophysiology to cell biology. He used to ask us –a group of young electrophysiologists, whether we really believed that neurons do not have, metaphorically speaking, a ``soul,” but only electrical potentials. In other words, Gelfand’s idea was that the highest levels of the brain include complex, ``smart” neurons, performing their own functions and that the whole cognitive function is the result of cooperative work of these complex neurons. As far as I know, most of Gelfand’s colleagues have been admired by his fantastic intuition in mathematics. I think that Gelfand’s idea that neurons can have not only electrical potentials, but also a ``soul” shows that his intuition extended far beyond mathematics. \cite{Arshavsky} \end{quote} I strongly recommend a short Arshavsky's paper \cite{Arshavsky}; in effect it explains \begin{quote}\small the unreasonable ineffectiveness of mathematics in neurophysiology \end{quote} and explains the shift of Gelfand's interests to cellular biology. The ``adequate language'' philosophy was not reductionists in the sense that he refused to work within a single structural layer of living matter. This was his philosophy in mathematics, too. For example, he insisted that every decent mathematical theory should have a proper combinatorial underpinning (this is why he dragged me into writing a book \cite{CoxeterMatroids} about some exceptionally simple, at first glance, combinatorial objects -- he needed them for his more serious projects). And, as a fleeting remark: Gelfand's work in medicine also was a quest for an adequate language \cite{Kulikowski}. Gelfand applied the same ideology to biology. Here, he did not already have a suitable mathematical language at hand -- it had to be developed; perhaps more than one language was needed. The underlying combinatorial theory also did not exist. There was an additional difficulty: unfortunately, in biology simplest possible examples, which would be natural stating points for this development, are not so simple. The foundational combinatorics underpinning a description of molecular processes in the cell (of course, if it exists) has to multidimensional -- just look at the number of degrees of freedom of a large molecule. I have a feeling that an appropriate multidimensional combinatorics emerges in works of Alina Vdovina, see, for example, \cite{Vdovina}, one of several her works in which she uses, in various contexts, ubiquitous combinatorial structures made of a group acting on (or associated in other subtle ways with) a $CW$-complex; at a naive level, these new multidimensional combinatorial structures have rich (local) symmetries and rich and complex branching. \section{What kind of new mathematics may help?} Since my paper is not particularly intended for mathematicians, this section is very brief. To summarise, my conclusion is that mathematics, as we know it, is unlikely to be effective in biology. We will need to develop some new mathematics for that. First of all, we need \begin{itemize} \item stronger emphasis on stochasticity -- Mumford wrote about that 20 years ago in his paper \emph{The dawning of the age of stochasticity} \cite{Mumford}, and \item new multidimensional combinatorics. \end{itemize} Also, we need dramatic, fundamental changes in the everyday work of mathematicians and in the functioning of mathematics as a cultural system; using a biological simile, these have to be changes at the cellular level. In my opinion, mathematics for biology will be born from the synthesis of three colossal tasks: \begin{description} \item[Stream A] Rebuild mainstream pure mathematics as a computer based discipline, with routine use of proof assistants and proof checkers (specialist software packages which implement methods of proof theory). Make sure that the use of proof assistants covers all kinds of stochastic stuff and non-deterministic methods in mathematics. \item[Stream B] Introduce methods of AI (artificial intelligence) into computer-based pure mathematics. \item[Stream C] Move beyond statistics-based AI, machine learning, data science etc.\footnote{These directions have their share of issues, see, for example, \cite{D'Amour}.} and develop a new kind of AI which also uses methods of proof theory to provide not only answers, but also structured human-readable explanations and justifications. If necessary, this new AI should be able to generate language and symbolism for these explanations. \end{description} The most prominent programme for realisation of Stream A is Vladimir Voevodsky's \emph{Univalent Foundations of Mathematics} \cite{UFM,voevodsky-origins} -- see Andrei Rodin's paper in this volume \cite{Rodin} for discussion of its possible role in biology. For a very recent example of other developments, see \cite{abraham}. I doubt that the next generation of mathematicians would be willing to handle proofs 500 page long without computer support. Without proof assistants, further progress of mathematics will simply stop, and any talk of mathematics for biology will become meaningless. A very recent paper \cite{polu} gives a taste of Stream B. Stream C appears to be a hot ticket in FinTech, with well-funded start-ups (such as \href{http://www.hylomorph-solutions.com/}{Hylomorph Solutions}) fighting for a killer product. Of course, realisation of this modest proposal will require a dramatic reform of mathematics education (which is dangerous, judging by  grotesque failures of previous attempts). \section*{Appendix: Kolmogorov's ``5/3''Law} I borrow this fragment from my book \emph{Mathematics under the Microscope} \cite[Section.8.4]{MuM}. The deduction of Kolmogorov' seminal ``$5/3$'' law for the energy distribution in the turbulent fluid \cite{kolmogorov} is so simple that it can be done in a few lines. It  remains the most striking and beautiful example of dimensional analysis in mathematics. I was lucky  to study at a good secondary school where my physics teacher, Anatoly Mikhailovich Trubachov, derived the ``$5/3$'' law in one of his improvised lectures.  The turbulent flow of a liquid consists of vortices; the flow in every vortex is made of smaller vortices, all the way down the scale to the point when the viscosity of the fluid turns the kinetic energy of motion into heat (Figure~\ref{Hokusai}). If there is no influx of energy (like the wind whipping up a storm in Hokusai's woodcut), the energy of the motion will eventually dissipate and the water will stand still. So, assume that we have a balanced energy flow,the storm is already at full strength and stays that way. The motion of a liquid is made of waves of different lengths; Kolmogorov asked the question, what is the share of energy carried by waves of a particular length? Here is a somewhat simplified description of his analysis. We start by making a list of the quantities involved and their dimensions. First, we have the \emph{energy flow}(let me recall, in our setup it is the same as the dissipation of energy). The dimension of energy is \[ \frac{\mbox{mass} \cdot \mbox{length}^2}{\mbox{time}^2} \] (remember the formula  $K = mv^2/2$ for the kinetic energy of a moving material point). It will be convenient to make all calculations \emph{per unit of mass}. Then the energy flow $\epsilon$ has dimension \[ \frac{\mbox{energy}}{\mbox{mass}\cdot \mbox{time}} = \frac{\mbox{length}^2}{\mbox{time}^3} \] For counting waves, it is convenient to use the \emph{wave number}, that is, the number of waves fitting into the unit of length. Therefore the wave number $k$ has dimension \[ \frac{1}{\mbox{length}}. \] Finally, the \emph{energy spectrum} $E(k)$ is the quantity such that, given the interval $$\Delta k= k_1-k_2$$ between the two wave numbers, the energy (per unit of mass) carried by waves in this interval should be approximately equal to $E(k_1)\Delta k$. Hence the dimension of $E$ is \[ \frac{\mbox{energy}}{\mbox{mass}\cdot \mbox{wave number}} = \frac{\mbox{length}^3}{\mbox{time}^2}. \] To make the next crucial calculations, Kolmogorov made the major assumption that amounted to saying that\footnote{This formulation is a bit cruder than most experts would accept; I borrow it from Arnold \cite{arnold}}. \small\begin{quote} The way bigger vortices are made from smaller ones is the same throughout the range of wave numbers, from the biggest vortices (say, like a cyclone covering the whole continent) to a smaller one (like a whirl of dust on a street corner). \end{quote}\normalsize Then we can assume that the energy spectrum $E$, the energy flow $\epsilon$ and the wave number $k$ are linked by an equation which does not involve anything else. Since the three quantities involved have completely different dimensions, we can combine them only by means of an equation of the form \[ E(k) \approx C \epsilon^x \cdot k^y. \] And now the all-important scaling considerations come into the play. In the equation above, $C$ is a constant. Since the  equation should remain the same for small scale and for global scale events, the shape of the equation should not depend on the choice of units of measurements, hence the constant $C$ should be dimensionless. Let us now check how  the equation looks in  terms of dimensions: \[ \frac{\mbox{length}^3}{\mbox{time}^2} = \left(\frac{\mbox{length}^2}{\mbox{time}^3} \right)^x \cdot \left(\frac{1}{\mbox{length}} \right)^y. \] After equating lengths with lengths and times with times and solving the resulting system of two simultaneous linear equations in $x$ and $y$, we get \[ x = \frac{2}{3} \;\; \mbox{ and} \;\;  y = -\frac{5}{3}. \] Therefore we come to \emph{Kolmogorov's\ ``$5/3$'' Law}: \[ E(k) \approx C \epsilon^{2/3}k^{-5/3}. \] As simple as that. Basically, I reproduced here the stuff which I first learnt in one of improvised lectures of my physics teacher at a secondary school, Anatoly Mikhailovich Trubachov -- he derived the ``$5/3$'' Law as one of examples of usefulness of dimensional analysis. It is claimed that people like Enrico Fermi, Stanislaw Ulam (co-inventor, with Edward Teller, of the American H Bomb), or Andrei Sakharov, could do dimensional analysis off the top of their heads and use it for producing quick on the hoof estimates of various physical quantities or qualitative description of physical processes. By my time it became a part of mainstream culture in physics -- it could be explained to schoolchildren. It is so much simpler than biology\dots \section*{A comment on bibliography} This paper is not a systematic survey; bibliographic references are relatively random and serve only for illustrative purposes. \end{document}
\betaegin{document} \muarkboth{V.Aiello, D.Guido, T.Isola}{Spectral triples on irreversible $C^*$-dynamical systems} \tauitle{Spectral triples on irreversible $C^*$-dynamical systems} \alphauthor{Valeriano Aiello} \tauextrm{ad}dress{Mathematisches Institut, Universit\"at Bern, Alpeneggstrasse 22, 3012 Bern, Switzerland \\ [email protected]} \alphauthor{Daniele Guido} \tauextrm{ad}dress{ Dipartimento di Matematica, Universit\`a di Roma ``Tor Vergata'', I--00133 Roma, Italy \\ [email protected]} \alphauthor{Tommaso Isola} \tauextrm{ad}dress{ Dipartimento di Matematica, Universit\`a di Roma ``Tor Vergata'', I--00133 Roma, Italy \\ [email protected]} \muaketitle \betaegin{abstract} Given a spectral triple on a $C^*$-algebra $\muathcal A$ together with a unital injective endomorphism $\alphalpha$, the problem of defining a suitable crossed product $C^*$-algebra endowed with a spectral triple is addressed. The proposed construction is mainly based on the works of Cuntz and \cite{Skalski}, and on our previous papers \cite{AiGuIs01,AGI3}. The embedding of $\alphalpha(\muathcal A)$ in $\muathcal A$ can be considered as the dual form of a covering projection between noncommutative spaces. A main assumption is the expansiveness of the endomorphism, which takes the form of the local isometricity of the covering projection, and is expressed via the compatibility of the Lip-norms on $\muathcal A$ and $\alphalpha(\muathcal A)$. \end{abstract} \kappaeywords{Crossed product, spectral triple, noncommutative coverings, Lip-semiboundedness} {\muathcal C}ode{Mathematics Subject Classification 2010: 58B34, 46LXX,47L65.} \epsilonction{Introduction} How to promote a spectral triple on an algebra to a spectral triple on a crossed product $C^*$-algebra has been the subject of various papers \cite{Skalski,Paterson,BMR,GaGr,IoMa}; the same has been recently done for the structure of compact quantum metric spaces \cite{KaKy}. The aim of this paper is to tackle the following question: is it possibile to extend the construction of a spectral triple on a crossed product $C^*$-algebra based on a spectral triple on the base algebra to the case of crossed products with a single endomorphism? Even though we do not have yet a general answer to this problem, we are able to propose a procedure - some steps of which can be completely described, while for others we can give several examples - which explains what we expect to be the general case. Before describing our plan, we draw attention to a feature of our construction, namely we more or less explicitly assume that our endomorphism is in a sense expansive, a notion which has been often considered both in the commutative and in the noncommutative case, see e.g. \cite{DGMW}. Such property has important consequences: the compact resolvent property for the Dirac operator forces the spectral triple on the crossed product to be semifinite, and the bounded commutator property requires a reduction of the crossed product $C^*$-algebra, namely a new definition of crossed product by an endomorphism. Indeed, even though there are now various notions of crossed product of a $C^*$-algebra with an endomorphism, see e.g. \cite{Murphy,Exel,KwLe}, we essentially follow a path outlined by Cuntz \cite{Cuntz} and then further developed by Stacey \cite{StaceyCrossed}, but we are forced to adapt it to the case of expansive endomorphisms. According to Cuntz, given a $C^*$-algebra ${\muathcal A}$ together with a unital injective endomorphism $\alphalpha$, one constructs a direct system of $C^*$-algebras ${\muathcal A}_n$ with endomorphisms $\alphalpha_n$, whence the direct limit $C^*$-algebra ${\muathcal A}_\infty$ is obtained. The key point is that the endomorphism $\alpha$ of ${\muathcal A}$ becomes an automorphism $\alpha_\infty$ on ${\muathcal A}_\infty$, so that one may define the crossed product ${\muathcal A}\rhotimes_\alpha{\muathbb N}$ as the crossed product ${\muathcal A}_\infty\rhotimes_{\alphalpha_\infty}{\muathbb Z}$. Let us note that this definition gives back the original algebra when $\alphalpha$ is an automorphism. The first and second step of our construction have been studied in \cite{AiGuIs01,AGI3}, where one assumes that a spectral triple ${\muathcal T}$ on ${\muathcal A}$ is given. Let us observe that unital injective endomorphisms of a $C^*$-algebra ${\muathcal A}$ can be seen as noncommutative self-coverings of the underlying noncommutative space; the first step is then to endow any of the $C^*$-algebras ${\muathcal A}_n$ described above with a spectral triple ${\muathcal T}_n$ which makes the self-covering locally isometric or, equivalently, such that the Lip-norms induced by the Dirac operators are compatible with the connecting maps (this property can and will be weakened in some cases, cf. Section \rhoef{UHF}). This means that the sequence of covering spaces consists of dilated copies of the original space. This request is the reason of the expansivity mentioned above. Even if we do not give a general procedure for this step, this is not a difficult task in all the examples considered in \cite{AiGuIs01,AGI3}. The second step consists of constructing a spectral triple ${\muathcal T}_\infty$ on the direct limit ${\muathcal A}_\infty$ which is in some sense naturally associated with the original spectral triple on ${\muathcal A}$. We note here that the algebra ${\muathcal A}_\infty$ can be naturally seen as the solenoid algebra associated with the pair (${\muathcal A},\alphalpha$), see \cite{AiGuIs01,AGI3,DGMW,LP2} for related constructions. In the abelian case, an intrinsic notion of solenoid, called compact universal cover, has been studied in \cite{Plaut} in great generality. Coming back to ${\muathcal T}_\infty$, we wish to define it as a suitable limit of the triples ${\muathcal T}_n$ on ${\muathcal A}_n$. This step is far from being obvious, firstly because there is no general procedure to define a limit of a sequence of spectral triples (however in some circumstances one may follow \cite{FloGho}), secondly because the situations we consider are quite different, ranging from regular coverings associated with an action of an abelian group to (possibly ramified) coverings with trivial group of deck transformations. Examples illustrating this step are contained in \cite{AiGuIs01,AGI3} and briefly described below. In all cases, the coverings becoming wider and wider, the spectra of the Dirac operators turn more and more closely packed, so that the limit has no longer compact resolvent. However, a corresponding rescaling of the traces gives rise to a (semicontinuous semifinite) trace on a suitable $C^*$-algebra ${\muathcal B}$ of geometric operators, which contains ${\muathcal A}_\infty$ and the resolvents of the limiting Dirac operator, finally producing a semifinite spectral triple on ${\muathcal A}_\infty$. This means in particular that the semifiniteness property is true already at the level of ${\muathcal A}_\infty$, therefore determines the analogous semifiniteness property for the spectral triple on the crossed product. The third and final step, which is the main object of this paper, consists in defining a new kind of crossed product of a $C^*$-algebra w.r.t. an endomorphism, which can be seen as a variant of the crossed product considered by Cuntz in \cite{Cuntz} and Stacey in \cite{StaceyCrossed}, and which turns out to be tailored to accomodate a spectral triple in the case of expansive endomorphisms. The notion of this new crossed product with an endomorphism is given in Definition \rhoef{1.1}. On the one hand it is a universal object, therefore defines a unique object up to isomorphisms, on the other hand, as shown in Theorem \rhoef{prop:crossedProd}, it coincides with a reduction by a projection of the $C^*$-algebra crossed product defined in \cite{StaceyCrossed}, Proposition 1.13. While the latter is nothing else than the crossed product of ${\muathcal A}_\infty$ with ${\muathbb Z}$ w.r.t. $\alphalpha_\infty$, our notion can be considered as the crossed product of ${\muathcal A}_\infty$ with ${\muathbb N}$ w.r.t. $\alphalpha_\infty$. Indeed, while Stacey crossed product with an endomorphism reduces to the usual crossed product for an automorphism $\alphalpha$, ours produces a ``corner'' of it, in such a way that only positive powers of $\alphalpha$ are implemented. The advantage of such a choice is to allow the weakening of the request of metric equicontinuity (Lip-boundedness in our paper) of \cite{Skalski}, which, for an action $\alphalpha$ of ${\muathbb Z}$ and a Lipschitz element $a$ reads $\delta} \def\D{\Deltaisplaystyle \varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb Z}} L(\alphalpha^{-n}(a)) < \infty$ and makes sense for automorphisms, to a condition on $\alphalpha$ that we call Lip-semiboundedness, namely $\delta} \def\D{\Deltaisplaystyle \varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb N}} L(\alphalpha^{-n}(a)) < \infty$. More precisely, in Section \rhoef{subsec:CrossedProd}, we first generalize the construction of a spectral triple on a crossed product described in \cite{Skalski} to the case of a semifinite spectral triple, maintaining the Lip-boundedness assumption, and then modify it by replacing the crossed product of Cuntz-Stacey with our crossed product, and noting that in this case the request of the endomorphism being Lip-semibounded is sufficient to guarantee the bounded commutator property of the spectral triple, cf. Theorem \rhoef{triple-cross-prod-N}. Moreover, such theorem shows that the metric dimension of the crossed product spectral triple equals the metric dimension of the base triple increased by 1. On the same grounds, a theory for the action of suitable semigroups (e.g. ${\muathbb N}^k$) can be established, but this will not be discussed here. In the last section of this paper we show that the self-coverings considered in \cite{AiGuIs01,AGI3} satisfy the Lip-semiboundedness condition, hence give rise to a semifinite spectral triple on the crossed product. The first example deals with the self-covering of a $p$-torus, which is a regular covering. Given a purely expanding integer valued matrix $B$, the covering projection goes from ${\muathbb R}^p/B{\muathbb Z}^p$ to ${\muathbb R}^p/{\muathbb Z}^p$ and the canonical Dirac operator on the covering makes the covering projection locally isometric. A natural embedding of the $C^*$-algebra ${\muathcal A}_n$ in ${\muathcal B}({\muathcal H}_0)\otimes M_{{\muolt}^n}({\muathbb C})$ gives rise to the embedding of the direct limit $C^*$-algebra ${\muathcal A}_\infty$ in ${\muathcal B}({\muathcal H}_0)\otimes \muathrm{UHF}_{\muolt}$, which is the algebra ${\muathcal B}$ mentioned above, where $r=|\delta} \def\D{\Deltaet(B)|$ and $\muathrm{UHF}_r$ denotes the infinite tensor product of $M_r({\muathbb C})$. Moreover, the Dirac operators $D_n$ converge in the norm resolvent sense to a Dirac operator affiliated with ${\muathcal B}({\muathcal H}_0)\otimes \muathrm{UHF}_{\muolt}$. This structure produces a semifinite spectral triple on ${\muathcal A}_\infty$, as shown in \cite{AiGuIs01}. Theorem \rhoef{teo-p-toro} shows that the condition of Lip-semiboundedness is satisfied, hence we get a semifinite spectral triple on our crossed product with ${\muathbb N}$. The second example treats the case of regular noncommutative coverings of the rational rotation algebra with abelian group of deck transformations as defined in \cite{AiGuIs01}. The procedure and the results are essentially the same as the previous example, but the condition $r \equiv_q \pim 1$ has to be further assumed in order to get a self-covering. The third example concerns the UHF-algebra with the covering map given by the shift endomorphism and the spectral triple described in \cite{Chris}. In this case the Lip-norms given by the spectral triples are not compatible, namely $\|[D_n,\alphalpha^n(a)]\|\nue\|[D_0,a]\|$ for $a$ Lipschitz in ${\muathcal A}_0$, however $\|[D_{n+p},\alphalpha^p(a)]\|$ is bounded in $p$ (indeed converges) for any Lipschitz element in ${\muathcal A}_n$. Again we show that the condition of Lip-semiboundedness is satisfied, cf. Theorem \rhoef{UHFcrossedprod}. The fourth and last example describes the crossed product associated with a ramified covering of the fractal called Sierpi\'nski gasket. Such covering is not given by an action of a group of deck transformations. Here the spectral triple on ${\muathcal A}$ is the one described in \cite{GuIs16}, and the spectral triples on ${\muathcal A}_n$ make the covering maps locally isometric. The $C^*$-algebra ${\muathcal B}$ containing both ${\muathcal A}_\infty$ and the resolvents of $D_\infty$ is an algebra of geometric operators acting on the $\ell^2$ space on the edges of the infinite Sierpi\'nski gasket with one boundary point \cite{Tep}. The proof of the condition of Lip-semiboundedness is contained in Theorem \rhoef{teo-gasket}. In all cases, by Theorem \rhoef{triple-cross-prod-N}, the spectral triples are finitely summable and their metric dimension is equal to the metric dimension of ${\muathcal T}$ plus 1, namely it is the sum of the metric dimension of ${\muathcal T}$ and the growth of ${\muathbb N}$. Finally, we mention that even though in all of our examples the functional given by the norm of the commutator with the Dirac operator is a Lip-norm in the sense of Rieffel \cite{Rieffel99} on ${\muathcal A}$, such property does not hold for the spectral triple on the crossed product. In fact any distance on the state space of a unital $C^*$-algebra inducing the weak$^*$-topology should necessarily be bounded, and this is not the case for our construction. The reason is that the expansiveness of the endomorphism $\alphalpha$ produces larger and larger (quantum) covering spaces and eventually an unbounded solenoid space. This property leads to an analogous unboundedness for the distance on the state space of the crossed product $C^*$-algebra. \epsilonction{Crossed products for $C^*$-algebras} \varsigmagma} \def\S{\Sigmaubsection{Preliminaries} \tauextbf{Inductive limit}. We begin by recalling the construction of the inductive limit $C^*$-algebra, due to Takeda \cite{Takeda}, for the particular case of interest in this paper, to fix some notation. Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \End(A)$ an injective, unital $*$-endomorphism. Consider the following inductive system \betaegin{equation} \lambda} \def\La{\Lambdaabel{eq:CstarIndLim1} \betaegin{CD} A_0 @ > \varphi_0 >> A_1 @ > \varphi_1 >> {\muathcal D}ots \end{CD} \end{equation} where, for all $n\in{\muathbb N}=\{0,1,2,\lambda} \def\La{\Lambdadots\}$, $A_n={\muathcal A}$, $\varphi_n=\alpha$, and define, for $m< n$, $\varphi_{nm} : A_m \tauo A_n$ by $\varphi_{nm} := \varphi_{n-1}\circ {\muathcal D}ots \circ \varphi_m \equiv \alpha^{n-m}$, and $\varphi_{mm}:=\id$. Consider the direct product $\pirod_{n=0}^\infty A_n$, with pointwise operations, and set $$ A_\infty := \lambda} \def\La{\Lambdaeft\{ (a_n) \in \pirod_{n=0}^\infty A_n : \exists m\in{\muathbb N} \tauextrm{ such that } a_n=\varphi_{nm}(a_m)= \alpha^{n-m}(a_m), n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq m \rhoight\}/\!\varsigmagma} \def\S{\Sigmaim \; ,$$ where $(a_n)\varsigmagma} \def\S{\Sigmaim(b_n) \iff a_n=b_n$ for all large enough $n$. Then, $A_\infty$ is a $^*$-algebra. For all $n\in{\muathbb N}$, define $\varphi_{\infty n}: a\in A_n\muapsto \varphi_{\infty n}(a)\in A_\infty$, where $\varphi_{\infty n}(a) \equiv (a_k)$, and $$ a_k := \betaegin{cases} 0, & k<n,\\ \varphi_{kn}(a)=\alpha^{k-n}(a), & k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n. \end{cases} $$ We can introduce a norm $p$ on $A_\infty$ given by $$ p(a):= \lambda} \def\La{\Lambdaimsup\lambda} \def\La{\Lambdaimits_{n\tauo\infty} \Arrowvert \varphi_{nm}(a_m) \Arrowvert = \nuorm{a_m}\; , $$ if $a=\varphi_{\infty m}(a_m)$, which is independent of the representative, and is a $C^*$-norm. Upon completion, we get the desired inductive limit $C^*$-algebra, which is denoted ${\muathcal A}_\infty \equiv \varinjlim A_n$. \varsigmagma} \def\S{\Sigmamallskip \tauextbf{Crossed product}. Let us recall the definition of the crossed product by an automorphism, in the case of unital $C^*$-algebras, to fix some notation. Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha \in \Aut({\muathcal A})$ an automorphism. Denote by $C_c({\muathcal A},{\muathbb Z},\alpha)$ the $^*$-algebra of functions $f: {\muathbb Z} \tauo {\muathcal A}$ with finite support, pointwise addition and scalar multiplication, with product $(fg)(n):= \varsigmagma} \def\S{\Sigmaum_{k\in{\muathbb Z}} f(k) \alpha^k(g(n-k))$, and involution $f^*(n):= \alpha^n(f(-n)^*)$, $f,g\in C_c({\muathcal A},{\muathbb Z},\alpha)$, $n\in{\muathbb Z}$. Define a norm on $C_c({\muathcal A},{\muathbb Z},\alpha)$ by $\Arrowvert f \Arrowvert_1 := \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \Arrowvert f(n)\Arrowvert$, and denote by $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ the Banach $^*$-algebra obtained by completing $C_c({\muathcal A},{\muathbb Z},\alpha)$ with respect to this norm. A different description of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ is obtained by introducing the functions $\delta} \def\D{\Deltad_n(k):= \delta} \def\D{\Delta_{k, n}$. Then, $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ is the set of all sums $\varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} a_n \delta} \def\D{\Deltad_n$, with $a_n\in{\muathcal A}$, for all $n\in{\muathbb Z}$, and $\varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \Arrowvert a_n\Arrowvert <+\infty$. Let now $\pii$ be a representation of ${\muathcal A}$ on ${\muathcal H}$, $V$ a unitary operator on ${\muathcal H}$, such that $\pii(\alpha(a)) = V \pii(a) V^*$, $a\in{\muathcal A}$. The triple $({\muathcal H}, \pii,V)$ is called a covariant representation of $({\muathcal A},\alpha)$. Then, the integrated form of $({\muathcal H}, \pii,V)$ is the representation $\pii\rhotimes V$ of $C_c({\muathcal A},{\muathbb Z},\alpha)$ on ${\muathcal H}$ given by \betaegin{eqnarray} \lambda} \def\La{\Lambdaabel{IntegratedForm} \pii\rhotimes V(\varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} a_n\delta} \def\D{\Deltad_n) &:=& \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \pii(a_n) V^n. \end{eqnarray} It can be proved (\cite{Ped} Proposition 7.6.4) that there is a bijection between the set of non-degenerate covariant representations $({\muathcal H}, \pii,V)$ of $({\muathcal A},\alpha)$ on a Hilbert space ${\muathcal H}$, and the set of non-degenerate continuous representations of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ on ${\muathcal H}$. Define the universal representation $\pii_u$ of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ to be the direct sum of all non-degenerate continuous representations of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ on Hilbert spaces. The crossed product of ${\muathcal A}$ by the action $\alpha$ of ${\muathbb Z}$ is the $C^*$-algebra ${\muathcal A}\rhotimes_\alpha{\muathbb Z}$ obtained as the norm closure of $\pii_u(\ell^1({\muathcal A},{\muathbb Z},\alpha))$. \varsigmagma} \def\S{\Sigmamallskip \tauextbf{Reduced crossed product}. Since ${\muathbb Z}$ is an amenable group, a different description (\cite{Ped}, 7.7.7) of the crossed product (called the reduced crossed product, in the case of non amenable groups) can be given. Let $\pii$ be a faithful, non-degenerate representation of ${\muathcal A}$ on ${\muathcal H}$, set $\widetilde{\muathcal H} := \ell^2({\muathbb Z},{\muathcal H}) \equiv \{ \xii : {\muathbb Z}\tauo{\muathcal H} | \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \Arrowvert \xii(n) \Arrowvert^2 <+\infty \}$, and, for $n\in{\muathbb Z}$, $a\in{\muathcal A}$, $\xii\in\widetilde{\muathcal H}$, \betaegin{align*} (\upsilonu \xii)(n) := \xii(n-1), \qquad (\widetilde{\pii}(a)\xii )(n) := \pii( \alpha^{-n}(a) )(\xii(n)). \end{align*} Observe that, $\widetilde{\pii}(\alpha(a))= \upsilonu\widetilde{\pii}(a) \upsilonu^* $, $a\in {\muathcal A}$. Therefore, $(\widetilde{\muathcal H}, \widetilde{\pii},\upsilonu)$ is a covariant representation of $({\muathcal A},{\muathbb Z},\alpha)$, and the representation $\widetilde{\pii} \rhotimes \upsilonu$ is called a regular representation of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$. In particular, if $a = \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} a_n \delta} \def\D{\Deltad_n \in C_c({\muathcal A},{\muathbb Z},\alpha)$, then $(\widetilde{\pii} \rhotimes \upsilonu (a) \xii )(n) = \varsigmagma} \def\S{\Sigmaum_{k\in{\muathbb Z}} \pii( \alpha^{-n}(a_k)) (\xii(n-k))$, $n\in{\muathbb Z}$. Define the universal regular representation $\lambda} \def\La{\Lambda_u$ of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ to be the direct sum of all regular representations of $\ell^1({\muathcal A},{\muathbb Z},\alpha)$ on Hilbert spaces. The (reduced) crossed product of ${\muathcal A}$ by the action $\alpha$ of ${\muathbb Z}$ is the $C^*$-algebra obtained as the norm closure of $\lambda} \def\La{\Lambda_u(\ell^1({\muathcal A},{\muathbb Z},\alpha))$. Observe that (\cite{Ped}, 7.7.4), if $\pii_u$ is the universal representation of ${\muathcal A}$, then ${\muathcal A}\rhotimes_\alpha{\muathbb Z}$ coincides with the norm closure of $\widetilde{\pii_u}\rhotimes \upsilonu (\ell^1({\muathcal A},{\muathbb Z},\alpha))$. Therefore, we get ${\muathcal A} \rhotimes_{\alpha} {\muathbb Z} = \lambda} \def\La{\Lambdaangle\, \widetilde{\pii_u}({\muathcal A}),\upsilonu \,\rhoangle$, where $\lambda} \def\La{\Lambdaangle\, \widetilde{\pii_u}({\muathcal A}),\upsilonu \,\rhoangle$ stands for the $C^*$-algebra generated by $\widetilde{\pii_u}({\muathcal A})$ and $\upsilonu$. \varsigmagma} \def\S{\Sigmamallskip \tauextbf{Lift of a spectral triple to a crossed product}. First of all we recall the definition of spectral triples. \betaegin{definition}\lambda} \def\La{\Lambdaabel{deftripla} An odd spectral triple $({\muathcal L},{\muathcal H},D)$ consists of a Hilbert space ${\muathcal H}$, an algebra ${\muathcal L}$ acting (faithfully) on it, a self adjoint operator $D$ on the same Hilbert space such that $a\delta} \def\D{\Deltaom(D) \varsigmagma} \def\S{\Sigmaubset \delta} \def\D{\Deltaom(D)$ and $[D, a]$ is bounded for any $a\in{\muathcal L}$, and with $D$ having compact resolvent. A spectral triple is said to be even if there exists a self-adjoint unitary operator $\G$ such that $\pii(a)\G = \G\pii(a)$, $\varphiorall a\in{\muathcal A}$, and $D\G=-\G D$. \end{definition} In \cite{BMR}, Bellissard, Marcolli and Reihani show how to lift a spectral triple from a unital C*-algebra ${\muathcal A}$, endowed with an automorphism $\alpha$, to the crossed product ${\muathcal A}\rhotimes_\alpha {\muathbb Z}$. Their setting is generalised in (\cite{Skalski}, Theorem 2.8) to the case of the action of a discrete group. In the particular case of an automorphism, one obtains \betaegin{definition}\lambda} \def\La{\Lambdaabel{equi} Let ${\muathcal A}$ be a unital C*-algebra, $\alpha\in \Aut({\muathcal A})$ a unital automorphism, $({\muathcal L},{\muathcal H},D)$ a spectral triple on ${\muathcal A}$ such that $\alpha({\muathcal L})\varsigmagma} \def\S{\Sigmaubset{\muathcal L}$. The automorphism is said to be Lip-bounded if $$ \varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb Z}} \| [D,\alpha^{-n}(a)] \| < \infty, \qquad \varphiorall a\in{\muathcal L}. $$ \end{definition} The previous notion was introduced in \cite{Skalski} where it is called the metric equicontinuity of the action. \betaegin{theorem} Let ${\muathcal A}$ be a unital C*-algebra, $({\muathcal L},{\muathcal H},D)$ an odd spectral triple on ${\muathcal A}$, and $\alpha\in \Aut({\muathcal A})$ a unital Lip-bounded automorphism. Set \betaegin{flalign*} & {\muathcal L}_\rhotimes := {}^*\muathrm{alg}(\widetilde{\pii_u}({\muathcal L}),U), & \qquad & {\muathcal H}_\rhotimes := {\muathcal H} \otimes \ell^2({\muathbb Z}) \otimes {\muathbb C}^2, & \\ & D_\rhotimes := D \otimes I \otimes \varepsilon_1 + I \otimes D_{\muathbb Z} \otimes \varepsilon_2, & \qquad & \G_\rhotimes := I \otimes I \otimes \varepsilon_3, & \end{flalign*} where ${}^*\muathrm{alg}(\widetilde{\pii_u}({\muathcal L}),U)$ is the $^*$-algebra generated by $\widetilde{\pii_u}({\muathcal L})$ and $U$, $(D_{\muathbb Z} \xii)(n) := n\xii(n)$, $\varphiorall \xii\in\ell^2({\muathbb Z})$, and \betaegin{align}\lambda} \def\La{\Lambdaabel{pauli} \varepsilon_1 := \betaegin{pmatrix} 0 & 1 \\ 1 & 0 \end{pmatrix}, \; \varepsilon_2 := \betaegin{pmatrix} 0 & -i \\ i & 0 \end{pmatrix}, \; \varepsilon_3 := \betaegin{pmatrix} 1 & 0\\ 0 & -1 \end{pmatrix} \end{align} are the Pauli matrices. \varsigmagma} \def\S{\Sigmamallskip Then $({\muathcal L}_\rhotimes,{\muathcal H}_\rhotimes,D_\rhotimes,\Gamma_\rhotimes)$ is an even spectral triple on ${\muathcal A}\rhotimes_\alpha {\muathbb Z}$. \end{theorem} \betaegin{remark} In \cite{KaKy} a more general notion than Lip-boundeness of an automorphism is introduced, there called quasi-isometricity (see their Definition 4.4), and it is shown (in Example 4.5) that quasi-isometric automorphisms arise naturally in differential geometry. \end{remark} \varsigmagma} \def\S{\Sigmaubsection{A new definition of crossed product by an endomorphism} There are many different definitions of the crossed product with an endomorphism, see e.g. \cite{Murphy}, \cite{Exel}, and the very general one given in \cite{KwLe}. We will work with a modification of the one introduced in \cite{Cuntz,StaceyCrossed}. Indeed, Cuntz (\cite{Cuntz}, pag. 101) considers the inductive sequence \eqref{eq:CstarIndLim1}, and its inductive limit C*-algebra ${\muathcal A}_\infty$, which is endowed with an automorphism $\alpha_\infty$, uniquely defined by the diagram \eqref{eq:CstarIndLim2} \betaegin{equation} \lambda} \def\La{\Lambdaabel{eq:CstarIndLim2} \xiymatrix{ & {\muathcal A} \alphar[rr]^{ \alpha } \alphar[dd]^{\alpha} && {\muathcal A} \alphar[rr]^{ \alpha } \alphar[dd]^{\alpha} && {\muathcal A} \alphar[rr]^{ \alpha } \alphar[dd]^{\alpha} && {\muathcal D}ots \alphar[r] & {\muathcal A}_\infty \alphar[dd]^{\alpha_\infty} \\ &&&&&&&&\\ & {\muathcal A} \alphar[rr]^{ \alpha } \alphar[uurr]^{id} && {\muathcal A} \alphar[rr]^{ \alpha } \alphar[uurr]^{id} && {\muathcal A} \alphar[rr]^{ \alpha } \alphar[uurr]^{id} && {\muathcal D}ots \alphar[r] & {\muathcal A}_\infty } \end{equation} where the diagonal maps define the inverse $\alpha_\infty^{-1}$. Then Cuntz defined ${\muathcal A}\rhotimes_\alpha {\muathbb N} := q({\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z})q$, where $q\in{\muathcal A}_\infty$ is the image of $1\in{\muathcal A}$, and turns out to be $q=1$ in our case, since $\alpha$ is unital. Subsequently, Stacey \cite{StaceyCrossed} characterised ${\muathcal A}\rhotimes_\alpha {\muathbb N}$ as the solution of a universal problem. In this paper, our interest is in lifting suitable spectral triples from $({\muathcal A},\alpha)$, where $\alpha\in \End({\muathcal A})$, to ${\muathcal A} \rhotimes_\alpha {\muathbb N}$. Since we already know how to lift a spectral triple from $({\muathcal A},\alpha)$ to $({\muathcal A}_\infty,\alpha_\infty)$, at least in some examples \cite{AiGuIs01,AGI3}, and the lift from $({\muathcal A}_\infty,\alpha_\infty)$ to ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z}$ is well known \cite{BMR}, we found only natural to use Cuntz' definition of the crossed product ${\muathcal A}\rhotimes_\alpha {\muathbb N}$. Unfortunately, the spectral triples $({\muathcal L}_\infty,{\muathcal H}_\infty,D_\infty)$ on $({\muathcal A}_\infty,\alpha_\infty)$ we constructed in \cite{AiGuIs01,AGI3} satisfy, besides $\alpha_\infty({\muathcal L}_\infty)\varsigmagma} \def\S{\Sigmaubset {\muathcal L}_\infty$, only $\varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb N}} \| [D_\infty,\alpha_\infty^{-n}(a)] \| < \infty$, $\varphiorall a\in{\muathcal L}_\infty$. This fact forces us to introduce a modification in Cuntz' procedure, namely to consider ${\muathcal A} \rhotimes_\alpha{\muathbb N} :=p({\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z})p$, where $p \in {\muathcal B}(\ell^2({\muathbb Z},{\muathcal H}_u))$ is the projection on the non-negative ``frequencies'' \betaegin{equation} \lambda} \def\La{\Lambdaabel{eq:pProjection} (p \xii)(n) = \betaegin{cases} \xii(n), & n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0,\\ 0, & n<0. \end{cases} \end{equation} Actually, we prefer to define our version of the crossed product by an endomorphism, in the same spirit of Stacey, as the solution to a universal problem, see Definition \rhoef{1.1}, and then prove in Theorem \rhoef{prop:crossedProd} that it coincides with $p({\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z})p$. \betaegin{definition}\lambda} \def\La{\Lambdaabel{Def:CovRep} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \End(A)$ a $^*$-endomorphism. Let $\pii:{\muathcal A}\tauo {\muathcal B}({\muathcal H})$ be a representation, $W\in {\muathcal B}({\muathcal H})$ an isometry. We say that $({\muathcal H},\pii,W)$ is a covariant representation of $({\muathcal A},\alpha)$ on ${\muathcal H}$, if \betaegin{align*} \pii(\alpha(a))W & = W\pii(a), \quad a\in {\muathcal A},\\ W^kW^{*k} & \in \pii({\muathcal A})', \quad k\in{\muathbb N}. \end{align*} \end{definition} \betaegin{definition} \lambda} \def\La{\Lambdaabel{1.1} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \End(A)$ an injective, unital $*$-endomorphism. The crossed product of ${\muathcal A}$ with ${\muathbb N}$ by $\alpha$ is a unital $C^*$-algebra ${\muathcal B}$, together with a unital $^*$-monomorphism $\zetaci_{\muathcal A}: {\muathcal A}\tauo{\muathcal B}$, and an isometry $t\in{\muathcal B}$, such that \betaegin{enumerate} \item[$(1)$] ${\muathcal B}$ is the $C^*$-algebra generated by $\zetaci_{\muathcal A}({\muathcal A})$ and $t$, \item[$(2)$] $\zetaci_{\muathcal A}(\alpha(a))t = t\zetaci_{\muathcal A}(a)$, $a\in{\muathcal A}$, \item[$(3)$] $t^k(t^*)^k$ commutes with $\zetaci_{\muathcal A}({\muathcal A})$, $k\in{\muathbb N}$, \item[$(4)$] for every covariant representation $({\muathcal H},\pii,W)$ of $({\muathcal A},\alpha)$, there exists a non-degenerate representation $\widehat{\pii}$ of ${\muathcal B}$ on ${\muathcal H}$, such that $\widehat{\pii}\circ \zetaci_{\muathcal A} = \pii$, and $\widehat{\pii}(t)=W$. \end{enumerate} \end{definition} We denote by ${\muathcal A} \rhotimes_\alpha {\muathbb N}$ the above algebra ${\muathcal B}$. We have defined our crossed product as a universal object, which guarantees its uniqueness. For its existence, we will prove in Proposition \rhoef{prop:crossedProd} that it is a reduction by a projection of the $C^*$-algebra crossed product defined by Cuntz in \cite{Cuntz}. \varsigmagma} \def\S{\Sigmaubsection{Existence of the universal object} Let us now consider the commutative diagram \eqref{eq:CstarIndLim2}. It follows from (\cite{WO}, Theorem L.2.1) that the vertical maps determine a $^*$-homomorphism $\alpha_\infty:{\muathcal A}_\infty\tauo{\muathcal A}_\infty$, and the diagonal maps define the inverse of $\alpha_\infty$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:exists} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha$ a unital, injective $^*$-endomorphism of ${\muathcal A}$. Then, there exists a covariant representation $({\muathcal H},\pii,W)$ of $({\muathcal A},\alpha)$. \end{proposition} \betaegin{proof} Let $\rhoepr$ be a faithful representation of ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z}$ on a Hilbert space $H$. If $\pii_u$ is the universal representation of ${\muathcal A}_\infty$, let $\wt{\pii_u} : {\muathcal A}_\infty \tauo {\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z}$, $\upsilonu \in {\muathcal U}( {\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z})$ be such that ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z} = \lambda} \def\La{\Lambdaangle \wt{\pii_u}({\muathcal A}_\infty), \upsilonu \rhoangle$, $\pii := \rhoepr \circ \wt{\pii_u} \circ \varphi_{\infty 0} : {\muathcal A} \tauo {\muathcal B}(H)$, which is a representation of ${\muathcal A}$ on $H$, and $W:= \rhoepr(\upsilonu) \in {\muathcal B}(H)$, which is a unitary operator acting on $H$. Moreover, for all $a\in{\muathcal A}$, $k\in{\muathbb N}$, by using that $\varphi_{\infty 0}\circ\alpha=\alpha_\infty \circ \varphi_{\infty 0}$ and $\wt{\pii_u}(\alpha_\infty(x))=\upsilonu\wt{\pii_u}(x)\upsilonu^*$, we get \betaegin{align*} \pii(\alpha(a)) W & = (\rhoepr\circ \wt{\pii_u} \circ \varphi_{\infty 0}(\alpha(a)) {\muathcal D}ot \rhoepr(\upsilonu) ) = \rhoepr( \wt{\pii_u} \circ \alpha_\infty \circ \varphi_{\infty 0}(a) {\muathcal D}ot \upsilonu ) \\ & = \rhoepr( \upsilonu {\muathcal D}ot \wt{\pii_u} \circ \varphi_{\infty 0}(a)) = (\rhoepr(\upsilonu)) (\rhoepr\circ \wt{\pii_u} \circ \varphi_{\infty 0}(a)) \\ & = W\pii(a), \\ W^kW^{*k} & = \rhoepr(\upsilonu^k \upsilonu^{*k}) = 1 \in \pii({\muathcal A})'. \end{align*} \end{proof} We now prove that any covariant representation of $({\muathcal A},\alpha)$ lifts to a covariant representation of $({\muathcal A}_\infty,\alpha_\infty)$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:RepIndLim} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha$ a unital, injective $^*$-endomorphism of ${\muathcal A}$, and denote by ${\muathcal A}_\infty$ the $C^*$-algebra inductive limit of the inductive system \eqref{eq:CstarIndLim1}, and denote by $\alpha_\infty$ the automorphism of ${\muathcal A}_\infty$ induced by $\alpha$. Let $({\muathcal H},\pii,W)$ be a covariant representation of $(A,\alpha)$, and denote by ${\muathcal H}_\infty \equiv \varinjlim H_n$ the Hilbert space inductive limit of the inductive system \betaegin{equation} \lambda} \def\La{\Lambdaabel{HilbertIndLim} \betaegin{CD} H_0 @ > S_0 >> H_1 @ > S_1 >> {\muathcal D}ots \end{CD} \end{equation} where, for all $n\in{\muathbb N}$, $H_n:={\muathcal H}$, $S_n:= W$. Then, there exist $W_\infty\in{\muathcal U}({\muathcal H}_\infty)$, and a covariant representation $({\muathcal H}_\infty, \pii_\infty, W_\infty)$ of $({\muathcal A}_\infty,\alpha_\infty)$, such that \betaegin{align*} \pii_\infty\circ \varphi_{\infty n}(a) S_{\infty n} & = S_{\infty n} \pii(a), \quad n\in{\muathbb N}o, a\in{\muathcal A}, \\ W_\infty S_{\infty 0} & = S_{\infty 0} W, \end{align*} where $S_{\infty n}: \xii\in H_n \muapsto (\xii_k)\in {\muathcal H}_\infty$, $\xii_k:= \betaegin{cases} 0, & k<n, \\ W^{k-n}\xii, & k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n. \end{cases}$ \end{proposition} \betaegin{proof} Denote by $W_\infty$ the unitary operator on the inductive limit ${\muathcal H}_\infty \equiv \varinjlim H_n$ defined by the following diagram \betaegin{equation*} \xiymatrix{ & {\muathcal H} \alphar[rr]^{ W } \alphar[dd]^{ W } && {\muathcal H} \alphar[rr]^{ W } \alphar[dd]^{ W } && {\muathcal H} \alphar[rr]^{ W } \alphar[dd]^{ W } && {\muathcal D}ots \alphar[r] & {\muathcal H}_\infty \alphar[dd]^{ W_\infty } \\ &&&&&&&&\\ & {\muathcal H} \alphar[rr]^{ W } \alphar[uurr]^{id} && {\muathcal H} \alphar[rr]^{ W } \alphar[uurr]^{id} && {\muathcal H} \alphar[rr]^{ W } \alphar[uurr]^{id} && {\muathcal D}ots \alphar[r] & {\muathcal H}_\infty } \end{equation*} so that $W_\infty S_{\infty n} = S_{\infty,n-1}$, for all $n\in{\muathbb N}$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1$, and $W_\infty S_{\infty 0} = S_{\infty 0}W$. Introduce a map $\piiinf_0:{\muathcal A} \tauo {\muathcal B}({\muathcal H}_\infty)$ by \betaegin{align*} \piiinf_0(a) S_{\infty m}\xii := S_{\infty m} \pii(\alpha^m(a)) \xii, \quad a\in{\muathcal A}, m\in{\muathbb N}, \xii\in H_m \equiv {\muathcal H}, \end{align*} which is well defined, because, if $S_{\infty m}\xii = S_{\infty,m-1}\eta = S_{\infty m}W\eta$, then $\xii=W\eta$, and \betaegin{align*} S_{\infty m} \pii(\alpha^m(a)) \xii & = S_{\infty m} \pii(\alpha^m(a)) W\eta = S_{\infty m} W\pii(\alpha^{m-1}(a))\eta = S_{\infty,m-1} \pii(\alpha^{m-1}(a))\eta. \end{align*} Let us prove that $\piiinf_0$ is a representation of ${\muathcal A}$. Indeed, for $a,b\in{\muathcal A}$, we get, for all $m\in{\muathbb N}$, $\xii\in H_m$, \betaegin{align*} \piiinf_0(ab) S_{\infty m}\xii & = S_{\infty m} \pii(\alpha^m(ab)) \xii = S_{\infty m} \pii(\alpha^m(a)) \pii(\alpha^m(b)) \xii \\ & = \piiinf_0(a) S_{\infty m} \pii(\alpha^m(b)) \xii = \piiinf_0(a) \piiinf_0(b) S_{\infty m} \xii. \end{align*} Moreover, for $a\in{\muathcal A}_\infty$, $\xii,\eta\in{\muathcal H}$, $m,n\in{\muathbb Z}$, we get, if $n<m$, \betaegin{align*} (S_{\infty m}\xii, \piiinf_0(a)^*S_{\infty n}\eta) & = (\piiinf_0(a)S_{\infty m}\xii, S_{\infty n}\eta) = (S_{\infty m}\pii(\alpha^m(a))\xii, S_{\infty n}\eta) \\ & = (S_{\infty m}\pii(\alpha^m(a))\xii, S_{\infty m}S_{mn}\eta) = (\pii(\alpha^m(a))\xii, S_{mn}\eta) \\ & = (\xii, \pii(\alpha^m(a^*))W^{m-n}\eta) = (\xii, W^{m-n}\pii(\alpha^n(a^*))\eta) \\ & = (S_{\infty m}\xii, S_{\infty n}\pii(\alpha^n(a^*))\eta) = (S_{\infty m}\xii, \piiinf_0(a^*)S_{\infty n}\eta). \end{align*} Setting, for all $n\in{\muathbb N}$, $\piiinf_n := \tauextrm{Ad}(W_\infty^*)^n\circ\piiinf_0$, we get, for $m\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq n+1$, \betaegin{align*} \piiinf_{n+1}(\alpha(a)) S_{\infty m} & = (W_\infty^*)^{n+1}\piiinf_0(\alpha(a))W_\infty^{n+1} S_{\infty m} = (W_\infty^*)^{n+1} \piiinf_0(\alpha(a)) S_{\infty,m-n-1} \\ & = (W_\infty^*)^{n+1} S_{\infty,m-n-1} \pii(\alpha^{m-n}(a)) = (W_\infty^*)^{n} S_{\infty,m-n} \pii(\alpha^{m-n}(a)) \\ & = (W_\infty^*)^{n} \piiinf_0(a) S_{\infty,m-n} = (W_\infty^*)^{n} \piiinf_0(a) W_\infty^n S_{\infty m} = \piiinf_n(a) S_{\infty m}, \end{align*} so that $\piiinf_{n+1}(\alpha(a)) = \piiinf_n(a)$. Therefore, the following diagram commutes \betaegin{equation*} \betaegin{CD} A_0 @ > \varphi_0 >> A_1 @ > \varphi_1 >> A_2 @ > \varphi_2 >> {\muathcal D}ots @ >>> {\muathcal A}_\infty \\ @V \piiinf_0 VV @V \piiinf_1 VV @V \piiinf_2 VV @. @V \pii_\infty VV \\ {\muathcal B}({\muathcal H}_\infty) @ > \id >> {\muathcal B}({\muathcal H}_\infty) @> \id >> {\muathcal B}({\muathcal H}_\infty) @> \id >> {\muathcal D}ots @>>> {\muathcal B}({\muathcal H}_\infty) \end{CD} \end{equation*} so that there is a unique $^*$-homomorphism $\pii_\infty:{\muathcal A}_\infty\tauo{\muathcal B}({\muathcal H}_\infty)$ such that $\pii_\infty\circ \varphi_{\infty n} = \piiinf_n$, for all $n\in{\muathbb N}$. Therefore, for all $n\in{\muathbb N}$, $a\in{\muathcal A}$, we have \betaegin{align} \pii_\infty\circ \varphi_{\infty n}(a) S_{\infty n} & = \piiinf_n(a)S_{\infty n} = W_\infty^{*n}\piiinf_0(a) W_\infty^n S_{\infty n} = W_\infty^{*n}\piiinf_0(a) S_{\infty 0} \nuotag \\ & = W_\infty^{*n} S_{\infty 0} \pii(a) = S_{\infty n} \pii(a). \lambda} \def\La{\Lambdaabel{allaccia} \end{align} Finally, for all $n\in{\muathbb N}$, $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1$, $a\in A_n = {\muathcal A}$, we have \betaegin{align*} \pii_\infty\circ\alpha_\infty\circ\varphi_{\infty n}(a) & = \pii_\infty\circ\varphi_{\infty n}\circ\alpha(a) = \piiinf_n\circ\alpha(a) = \piiinf_{n-1}(a) \\ & = \tauextrm{Ad}(W_\infty) \circ \piiinf_n(a) = \tauextrm{Ad}(W_\infty) \circ \pii_\infty\circ \varphi_{\infty n} (a), \end{align*} so that $\pii_\infty\circ \alpha_\infty = \tauextrm{Ad}(W_\infty) \circ \pii_\infty$, that is $({\muathcal H}_\infty,\pii_\infty,W_\infty)$ is a covariant representation of $({\muathcal A}_\infty,\alpha_\infty)$. \end{proof} {\muathbb I}gskip We recall that in the construction of ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z}$ we denoted by $\pii_u$ the universal representation of ${\muathcal A}_\infty$ on ${\muathcal H}_u$, so that ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z} = \lambda} \def\La{\Lambdaangle\, \wt{\pii_u}({\muathcal A}_\infty),\upsilonu \,\rhoangle$. \nuoindent Define the projection $p\in {\muathcal B}( \ell^2({\muathbb Z},{\muathcal H}_u) )$ as in \eqref{eq:pProjection}, so that $p\wt{\pii_u}(a)=\wt{\pii_u}(a)p$, $a\in{\muathcal A}_\infty$, and set $t:= p\upsilonu p \equiv \upsilonu p$, so that $t^*t = p$, and $t\wt{\pii_u}(a) = \wt{\pii_u}(\alpha_\infty(a))t$, $a\in {\muathcal A}_\infty$. Set $\zetaci_{\muathcal A}(a) := \wt{\pii_u}\circ \varphi_{\infty 0}(a)p$, which is a representation of ${\muathcal A}$ on $p\ell^2({\muathbb Z},{\muathcal H}_u)$, and denote by $C^*({\muathcal A},\alpha,{\muathbb N})$ the $C^*$-algebra generated by $\zetaci_{\muathcal A}({\muathcal A})$ and $t$ on $p\ell^2({\muathbb Z},{\muathcal H}_u)$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:generate} For any $a\in{\muathcal A}$, $k\in{\muathbb N}$, we have that \betaegin{itemize} \item[$(1)$] $\zetaci_{\muathcal A}(\alpha(a))t = t\zetaci_{\muathcal A}(a)$, \item[$(2)$] $t^k(t^*)^k \zetaci_{\muathcal A}(a) = \zetaci_{\muathcal A}(a) t^k(t^*)^k$, \item[$(3)$] $C^*({\muathcal A},\alpha,{\muathbb N}) \equiv \lambda} \def\La{\Lambdaangle\, \zetaci_{\muathcal A}({\muathcal A}), t \,\rhoangle = \lambda} \def\La{\Lambdaangle\, p\wt{\pii_u}({\muathcal A}_\infty)p, p\upsilonu p \,\rhoangle = \lambda} \def\La{\Lambdaangle\, t^{*m}\zetaci_{\muathcal A}(a)t^n: a\in{\muathcal A}, m,n\in{\muathbb N} \,\rhoangle = \lambda} \def\La{\Lambdaangle\, \zetaci_{\muathcal A}(a)t^mt^{*n}: a\in{\muathcal A}, m,n\in{\muathbb N} \,\rhoangle$. \end{itemize} \end{proposition} \betaegin{proof} $(1)$ Indeed, for all $a\in{\muathcal A}$, \betaegin{align*} \zetaci_{\muathcal A}(\alpha(a))t & = \wt{\pii_u}\circ\varphi_{\infty 0}\circ\alpha(a)t = \wt{\pii_u}\circ\alpha_\infty\circ\varphi_{\infty 0}(a)t = t\wt{\pii_u}\circ\varphi_{\infty 0}(a)p = t\zetaci_{\muathcal A}(a). \end{align*} \nuoindent $(2)$ Indeed, since $t^k = \upsilonu^kp = p\upsilonu^kp$, we get \betaegin{align*} t^k(t^*)^k \zetaci_{\muathcal A}(a) & = U^kp(U^*)^k \wt{\pii_u} \circ\varphi_{\infty 0}(a)p = U^kp \wt{\pii_u} \circ\alpha_\infty^{-k} \circ \varphi_{\infty 0}(a) (U^*)^kp \\ & = U^k \wt{\pii_u} \circ\alpha_\infty^{-k} \circ \varphi_{\infty 0}(a) p(U^*)^kp = \wt{\pii_u} \circ \varphi_{\infty 0}(a) U^k p(U^*)^kp \\ & = \wt{\pii_u} \circ \varphi_{\infty 0}(a) pU^k p(U^*)^kp = \zetaci_{\muathcal A}(a) t^k(t^*)^k. \end{align*} \nuoindent $(3)$ Indeed, ${\muathcal A}_\infty = \overline{\varsigmagma} \def\S{\Sigmasv} \{ \varphi_{\infty m}(a) : a\in{\muathcal A}, m\in{\muathbb N} \}$, and \betaegin{align*} p\wt{\pii_u}\circ \varphi_{\infty m}(a)p & = p\wt{\pii_u}\circ \varphi_{\infty m}(a) \upsilonu^{-m}\upsilonu ^{m}p = p \upsilonu^{-m} \wt{\pii_u} \circ \alpha_\infty^m \circ\varphi_{\infty m}(a) \upsilonu ^{m} p \\ & = p \upsilonu^{-m} \wt{\pii_u} \circ \varphi_{\infty m} \circ \alpha^m(a) \upsilonu ^{m }p = p \upsilonu^{-m} \wt{\pii_u} \circ \varphi_{\infty 0}(a) \upsilonu ^{m }p = t^{*m} \zetaci_{\muathcal A}(a) t^{m}, \end{align*} so that $\lambda} \def\La{\Lambdaangle\, p\wt{\pii_u}({\muathcal A}_\infty)p, p\upsilonu p \,\rhoangle = \lambda} \def\La{\Lambdaangle\, t^{*m}\zetaci_{\muathcal A}(a)t^n: a\in{\muathcal A}, m,n\in{\muathbb N} \,\rhoangle = \lambda} \def\La{\Lambdaangle\, \zetaci_{\muathcal A}(a)t^mt^{*n}: a\in{\muathcal A}, m,n\in{\muathbb N} \,\rhoangle = \lambda} \def\La{\Lambdaangle\, \zetaci_{\muathcal A}({\muathcal A}), t \,\rhoangle$. \end{proof} We want to prove that $C^*({\muathcal A},\alpha,{\muathbb N})$ is isomorphic to the crossed product of ${\muathcal A}$ with $\alpha$ by ${\muathbb N}$. Actually, property $(1)$ in Definition \rhoef{1.1} follows by definition, while properties $(2)$ and $(3)$ have been proved in Proposition \rhoef{prop:generate}. Unfortunately, the proof of property $(4)$ in Definition \rhoef{1.1} will force us to a long detour. First of all, we need a $C^*$-algebra which contains ${\muathcal A}_\infty\rhotimes_{\alpha_\infty}{\muathbb Z}$ and a projection on the ``positive frequencies'' of ${\muathbb Z}$, and to which we can lift, in a canonical way, any representation of ${\muathcal A}_\infty$. We start with some preliminary results. Denote by ${\muathbb Z}_\infty:={\muathbb Z}{\muathcal U}p\{+\infty\}$ the spectrum of the $C^*$-algebra of functions on ${\muathbb Z}$, vanishing at $-\infty$, and having finite limit for $n\tauo+\infty$, and let $\betaeta$ be the automorphism of $C_0({\muathbb Z}_\infty)$ given by $\betaeta(f)(n):= f(n-1)$, $n\in{\muathbb Z}$. It follows from \cite{Sakai}, Proposition 1.22.3, that ${\muathcal A}_\infty \otimes C_0({\muathbb Z}_\infty) {\muathcal O}ng C_0({\muathbb Z}_\infty,{\muathcal A}_\infty)$, that is two-sided sequences of elements in ${\muathcal A}_\infty$, vanishing at $-\infty$, and having norm-limit for $n\tauo+\infty$. It follows from \cite{Take}, Proposition IV.4.22 that there is a unique automorphism $\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma\in \Aut( C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) )$ such that $\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma(a\otimes f) = \alpha_\infty(a)\otimes \betaeta(f)$, $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$. In Proposition \rhoef{prop:reprOfCrossed}, we construct a representation of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) \rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma} {\muathbb Z}$ on $\ell^2({\muathbb Z},{\muathcal H}_u)$. Let $\rhoho_u$ be the representation of $C_0({\muathbb Z}_\infty)$ on ${\muathcal H}_u$ given by $\rhoho_u(f)\xii = f(0)\xii$, $f\in C_0({\muathbb Z}_\infty)$, $\xii\in{\muathcal H}_u$. It follows from \cite{Take}, Proposition IV.4.7, that there is a unique representation $\varsigmagma} \def\S{\Sigmaigma_u$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ on ${\muathcal H}_u$, such that $\varsigmagma} \def\S{\Sigmaigma_u(a\otimes f) = \pii_u(a)\rhoho_u(f)$, $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$. Introduce the representations $\wt{\rhoho_u}$ of $C_0({\muathbb Z}_\infty)$ and $\wt{\varsigmagma} \def\S{\Sigmaigma_u}$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ on $\ell^2({\muathbb Z},{\muathcal H}_u)$ given by, for $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$, $\xii \in \ell^2({\muathbb Z},{\muathcal H}_u)$, $n\in{\muathbb Z}$, \betaegin{align*} (\wt{\rhoho_u}(f) \xii)(n) & := \rhoho_u( \betaeta^{-n}(f) ) \xii(n) = f(n)\xii(n), \\ (\wt{\varsigmagma} \def\S{\Sigmaigma_u}(a\otimes f) \xii)(n) & := \varsigmagma} \def\S{\Sigmaigma_u( \gamma} \delta} \def\D{\Deltaef\G{\Gammaamma^{-n}(a\otimes f) ) \xii(n). \end{align*} \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:reprOfCrossed} \betaegin{itemize} \item[$(1)$] $\upsilonu \wt{\rhoho_u}(f)\upsilonu^* = \wt{\rhoho_u}(\betaeta(f))$, $f\in C_0({\muathbb Z}_\infty)$. \item[$(2)$] The representation $\wt{\varsigmagma} \def\S{\Sigmaigma_u}$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ on $\ell^2({\muathbb Z},{\muathcal H}_u)$ is faithful, and \betaegin{align*} \wt{\varsigmagma} \def\S{\Sigmaigma_u}( C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) ) & = \lambda} \def\La{\Lambdaangle\, \wt{\pii_u}({\muathcal A}_\infty), \wt{\rhoho_u}(C_0({\muathbb Z}_\infty)) \,\rhoangle, \\ \upsilonu \wt{\varsigmagma} \def\S{\Sigmaigma_u}(a\otimes f)\upsilonu^* & = \wt{\varsigmagma} \def\S{\Sigmaigma_u}(\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma(a\otimes f)), \quad a\in{\muathcal A}_\infty, f\in C_0({\muathbb Z}_\infty). \end{align*} \item[$(3)$] The regular representation ${\muathcal H}i:=\varsigmagma} \def\S{\Sigmascov \rhotimes U$ of $C_0({\muathbb Z}_\infty,{\muathcal A}_\infty) \rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma} {\muathbb Z}$, induced from $\varsigmagma} \def\S{\Sigmaigma_u$ on $\ell^2({\muathbb Z},{\muathcal H}_u)$, is faithful. \end{itemize} \end{proposition} \betaegin{proof} $(1)$ is a computation. \nuoindent $(2)$ It is easy to see that $(\wt{\varsigmagma} \def\S{\Sigmaigma_u}(g)\xii)(k) = \pii_u(\alpha_\infty^{-k}(g(k)))\xii(k)$, $k\in{\muathbb Z}$, $\xii\in\ell^2({\muathbb Z},{\muathcal H}_u)$, $g\in C_0({\muathbb Z}_\infty,{\muathcal A}_\infty)$, from which it follows that $\wt{\varsigmagma} \def\S{\Sigmaigma_u}$ is faithful. Moreover, for $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$, one has \betaegin{align*} \wt{\varsigmagma} \def\S{\Sigmaigma_u}(\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma( a\otimes f)) & = \wt{\varsigmagma} \def\S{\Sigmaigma_u}(\alpha_\infty(a)\otimes \betaeta(f)) = \wt{\pii_u}(\alpha_\infty(a)) \wt{\rhoho_u}(\betaeta(f)) \\ & = \upsilonu \wt{\pii_u}(a) \upsilonu^* \upsilonu \wt{\rhoho_u}(f) \upsilonu^* = \upsilonu \wt{\varsigmagma} \def\S{\Sigmaigma_u}(a\otimes f) \upsilonu^*. \end{align*} \nuoindent $(3)$ This follows from \cite{Will}, Theorem 7.13. \end{proof} It follows from the previous Proposition that ${\muathcal C}:= \lambda} \def\La{\Lambdaangle\, \wt{\pii_u}({\muathcal A}_\infty), \wt{\rhoho_u}(C_0({\muathbb Z}_\infty)), \upsilonu \,\rhoangle \varsigmagma} \def\S{\Sigmaubset {\muathcal B}(\ell^2({\muathbb Z},{\muathcal H}_u))$ is isomorphic, via ${\muathcal H}i^{-1}$, to $({\muathcal A}_\infty \otimes C_0({\muathbb Z}_\infty)) \rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma} {\muathbb Z}$, and contains $C^*({\muathcal A},\alpha,{\muathbb N})$. It follows from its construction that we can lift canonically to ${\muathcal C}$ any representation of ${\muathcal A}_\infty$, as we prove in Proposition \rhoef{prop:Sigma}. We now begin the proof of property $(4)$ in Definition \rhoef{1.1}. In rough terms, starting from a covariant representation $\pii$ of $({\muathcal A},\alpha)$ on a Hilbert space ${\muathcal H}$, we construct a covariant representation $\pii_\infty$ of $({\muathcal A}_\infty,\alpha_\infty)$ on ${\muathcal H}_\infty$. Then we construct a suitable representation $\rho_\infty$ of $C_0({\muathbb Z}_\infty)$ on ${\muathcal H}_\infty$, which allows us to construct a representation $\varsigmagma} \def\S{\Sigma_\infty$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ on ${\muathcal H}_\infty$, and then a representation $\varsigmagma} \def\S{\Sigma_\infty \rhotimes W_\infty$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) \rhotimes_\gamma} \delta} \def\D{\Deltaef\G{\Gamma {\muathbb Z}$, viz. a representation $\pii_{\muathcal C}$ of ${\muathcal C}$, on ${\muathcal H}_\infty$, that we can restrict to $C^*({\muathcal A},\alpha,{\muathbb N})$, and compress to a representation $\widehat{\pii}$ on ${\muathcal H}$ that satisfies property $(4)$ in Definition \rhoef{1.1}. In order to the help the reader with the understanding of the following statements and proofs, we exhibit two tables with the $C^*$-algebras considered, and their representations on the various Hilbert spaces \betaegin{center} \betaegin{tabular}{ | c | c | c | c | c | } \hline & $\Aut({\muathcal D}ot)$ & ${\muathcal H}_\infty$ & ${\muathcal H}_u$ & $\ell^2({\muathbb Z};{\muathcal H}_u)$ \\ \hline ${\muathcal A}_\infty$ & $\alpha_\infty$ & $\pii_\infty$ & $\pii_u$ & $\widetilde{\pii_u}$ \\ \hline $C_0({\muathbb Z}_\infty)$ & $\beta$ & $\rhoho_\infty$ & $\rhoho_u$ & $\widetilde{\rhoho_u}$ \\ \hline $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ & $\gamma} \delta} \def\D{\Deltaef\G{\Gamma\equiv \alpha_\infty\otimes\beta$ & $\varsigmagma} \def\S{\Sigma_\infty$ & $\varsigmagma} \def\S{\Sigma_u$ & $\widetilde{\varsigmagma} \def\S{\Sigma_u}$ \\ \hline ${\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z}$ & - & $\pii_\infty \rhotimes W_\infty$ & - & $\widetilde{\pii_u} \rhotimes U$ \\ \hline $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) \rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gamma} {\muathbb Z}$ & - & $\varsigmagma} \def\S{\Sigma_\infty \rhotimes W_\infty$ & - & ${\muathcal H}i\equiv \widetilde{\varsigmagma} \def\S{\Sigma_u} \rhotimes U$ \\ \hline ${\muathcal C} \equiv {\muathcal H}i(C_0({\muathbb Z}_\infty;{\muathcal A}_\infty) \rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gamma} {\muathbb Z})$ & - & $\pii_{\muathcal C} \equiv (\varsigmagma} \def\S{\Sigma_\infty \rhotimes W_\infty)\circ {\muathcal H}i^{-1}$ & - & id \\ \hline \end{tabular} \end{center} and \betaegin{center} \betaegin{tabular}{ | c | c | c | c | c | } \hline & $\End({\muathcal D}ot)$ & ${\muathcal H}$ & ${\muathcal H}_\infty$ & $p\ell^2({\muathbb Z};{\muathcal H}_u)$ \\\hline ${\muathcal A}$ & $\alpha$ & $\pii$ & $\pisi_0$ & $\zetaci_{\muathcal A} \equiv \widetilde{\pii_u}\circ\varphi_{\infty0}({\muathcal D}ot)p$ \\\hline $C^*({\muathcal A},\alpha,{\muathbb N})$ & - & $\widehat{\pii}\equiv S_{\infty0}^*\pii_{\muathcal C}({\muathcal D}ot)S_{\infty0}$ & $\pii_{\muathcal C}|_{C^*({\muathcal A},\alpha,{\muathbb N})}$ & id\\\hline \end{tabular} \end{center} Let $({\muathcal H},\pii,W)$ be a covariant representation of $(A,\alpha)$, and recall from Proposition \rhoef{prop:RepIndLim} that there exist $W_\infty\in{\muathcal U}({\muathcal H}_\infty)$, and a covariant representation $({\muathcal H}_\infty, \pii_\infty, W_\infty)$ of $({\muathcal A}_\infty,\alpha_\infty)$, on ${\muathcal H}_\infty \equiv \varinjlim H_n$, the Hilbert space inductive limit of the inductive system \eqref{HilbertIndLim}, such that $\pii_\infty\circ \varphi_{\infty n}(a) S_{\infty n} = S_{\infty n} \pii(a)$, for all $n\in{\muathbb N}$, $a\in{\muathcal A}$, and $W_\infty S_{\infty 0} = S_{\infty 0} W$. We now construct a representation $\rhoho_\infty$ of $C_0({\muathbb Z}_\infty)$ on ${\muathcal H}_\infty$ such that $[\pii_\infty(a),\rhoho_\infty(f)]=0$, for all $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:spectralFamily} Set $P_0:= S_{\infty 0}S_{\infty 0}^*$, $P_n:= \tauextrm{Ad}(W_\infty^n)(P_0)$, $n\in{\muathbb Z}$. Then \betaegin{itemize} \item[$(1)$] $\{ P_n:n\in{\muathbb Z}\}$ is a decreasing family of projections in ${\muathcal B}({\muathcal H}_\infty)$, \item[$(2)$] there exists $P_{+\infty}:= \lambda} \def\La{\Lambdaim_{n\tauo+\infty} P_n$, in the strong operator topology of ${\muathcal B}({\muathcal H}_\infty)$, \item[$(3)$] $\lambda} \def\La{\Lambdaim_{n\tauo-\infty} P_n = 1$, in the strong operator topology of ${\muathcal B}({\muathcal H}_\infty)$, \item[$(4)$] $\{ P_n : n\in{\muathbb Z}_\infty \} \varsigmagma} \def\S{\Sigmaubset \pii_\infty({\muathcal A}_\infty)'$. \end{itemize} \end{proposition} \betaegin{proof} $(1)$ Let $n\in{\muathbb Z}$. If $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0$, then $$ P_n = W_\infty^nS_{\infty 0}S_{\infty 0}^*W_\infty^{n*} = S_{\infty 0}W^nW^{n*}S_{\infty 0}^* \gamma} \delta} \def\D{\Deltaef\G{\Gammaeq S_{\infty 0}W^{n+1}(W^{*})^{n+1}S_{\infty 0}^* = P_{n+1}. $$ If $n=-k\lambda} \def\La{\Lambdaeq 0$, then $$ P_n = W_\infty^{*k}S_{\infty 0}S_{\infty 0}^*W_\infty^{k} = S_{\infty k}S_{\infty k}^* = S_{\infty,k+1}WW^*S_{\infty,k+1}^* \lambda} \def\La{\Lambdaeq S_{\infty,k+1}S_{\infty,k+1}^* = P_{n-1}. $$ \nuoindent $(2)$ follows from $(1)$. \nuoindent $(3)$ We have to prove that $\lambda} \def\La{\Lambdaim_{k\tauo+\infty} S_{\infty k}S_{\infty k}^* = 1$, in the strong operator topology, and it suffices to prove it on the dense subset of ${\muathcal H}_\infty$ spanned by $\{ S_{\infty n}\xii : n\in{\muathbb N}, \xii\in{\muathcal H}\}$. Let us fix $n\in{\muathbb N}$, $\xii\in{\muathcal H}$, and compute, for $k>n$, $S_{\infty k}S_{\infty k}^*S_{\infty n}\xii = S_{\infty k}S_{\infty k}^*S_{\infty k}S_{kn}\xii = S_{\infty k}S_{kn}\xii = S_{\infty n}\xii$, and the thesis follows. \nuoindent $(4)$ Let us first prove that $\pii_\infty(x) P_0 = P_0 \pii_\infty(x)$ for $x \in {\muathcal A}_\infty$. It suffices to show the equality for $x\in \{ \varphi_{\infty n}(a) : n\in{\muathbb N},a\in{\muathcal A}\}$. We have, from equation \eqref{allaccia}, \betaegin{align*} \pii_\infty \circ \varphi_{\infty n}(a)P_0 & = \pii_\infty\circ \varphi_{\infty n}(a)S_{\infty 0}S_{\infty 0}^* = \pii_\infty\circ \varphi_{\infty n}(a)S_{\infty n}W^nS_{\infty 0}^* \\ & = S_{\infty n} \pii(a) W^n W^{*n} S_{\infty n}^* = S_{\infty n} W^n W^{*n} \pii(a) S_{\infty n}^* \\ & = S_{\infty 0} W^{*n} S_{\infty n}^* \pii_\infty \circ \varphi_{\infty n}(a) = P_0 \pii_\infty \circ \varphi_{\infty n}(a). \end{align*} Then, for any $x \in {\muathcal A}_\infty$, $k\in{\muathbb Z}$, \betaegin{align*} \pii_\infty(x) P_k & = \pii_\infty(x)W_\infty^kP_0W_\infty^{*k} = W_\infty^k\pii_\infty(\alpha_\infty^{-k}(x)) P_0W_\infty^{*k} \\ & = W_\infty^kP_0\pii_\infty(\alpha_\infty^{-k}(x)) W_\infty^{*k} = W_\infty^kP_0W_\infty^{*k} \pii_\infty(x) = P_k \pii_\infty(x). \end{align*} Finally, $P_{+\infty} \in \pii_\infty({\muathcal A}_\infty)'$, because of $(2)$. \end{proof} \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:repC0} There exists a representation $\rhoho_\infty$ of $C_0({\muathbb Z}_\infty)$ on ${\muathcal H}_\infty$, such that, for any $f\in C_0({\muathbb Z}_\infty)$, \betaegin{align*} \rhoho_\infty(f) & \in\pii_\infty({\muathcal A}_\infty)', \\ \rhoho_\infty(\betaeta(f)) & = W_\infty \rhoho_\infty(f)W_\infty^*. \end{align*} \end{proposition} \betaegin{proof} Set $E_n:= P_n-P_{n+1}$, $n\in{\muathbb Z}$, $E_{+\infty}:=P_{+\infty}$. Then, $\{ E_n: n\in{\muathbb Z}_\infty\}$ is a spectral family on ${\muathcal H}_\infty$, and $E_{n+1} = W_\infty E_n W_\infty^*$, $n\in{\muathbb N}$, $E_{+\infty} = W_\infty E_{+\infty} W_\infty^*$. Define, for $f\in C_0({\muathbb Z}_\infty)$, $\rhoho_\infty(f):= \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}_\infty} f(n)E_n$, where the series converges in the strong operator topology of ${\muathcal B}({\muathcal H}_\infty)$. Then, $\rhoho_\infty$ is a representation of $C_0({\muathbb Z}_\infty)$ on ${\muathcal H}_\infty$, such that $\rhoho_\infty(f)\in\pii_\infty({\muathcal A}_\infty)'$, for any $f\in C_0({\muathbb Z}_\infty)$, and $\rhoho_\infty(\betaeta(f)) = W_\infty \rhoho_\infty(f)W_\infty^*$, $f\in C_0({\muathbb Z}_\infty)$. \end{proof} \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:Sigma} \betaegin{itemize} \item[$(1)$] There is a unique representation $\varsigmagma} \def\S{\Sigmaigma_\infty$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$ on ${\muathcal H}_\infty$, such that $\varsigmagma} \def\S{\Sigmaigma_\infty(a\otimes f) = \pii_\infty(a)\rhoho_\infty(f)$, $a\in{\muathcal A}_\infty$, $f\in C_0({\muathbb Z}_\infty)$. Moreover, $\varsigmagma} \def\S{\Sigmaigma_\infty(\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma(g)) = W_\infty \varsigmagma} \def\S{\Sigmaigma_\infty(g)W_\infty^*$, $g\in C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$. \item[$(2)$] There is a unique representation $\varsigmagma} \def\S{\Sigmasinf \rhotimes W_\infty$ of $C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)\rhotimes_{\gamma} \delta} \def\D{\Deltaef\G{\Gammaamma} {\muathbb Z}$ on ${\muathcal H}_\infty$ such that $\varsigmagma} \def\S{\Sigmasinf \rhotimes W_\infty(g\delta} \def\D{\Delta_n) = \varsigmagma} \def\S{\Sigmaigma_\infty(g)W_\infty^n$, $g\in C_0({\muathbb Z}_\infty;{\muathcal A}_\infty)$, $n\in{\muathbb Z}$. \end{itemize} \end{proposition} \betaegin{proof} $(1)$ This follows from \cite{Take}, Proposition IV.4.7. \nuoindent $(2)$ This follows from \cite{Ped}, Proposition 7.6.4. \end{proof} Let us set $\pii_{\muathcal C} := \varsigmagma} \def\S{\Sigmasinf \rhotimes W_\infty \circ {\muathcal H}i^{-1}$, which is a representation of ${\muathcal C}$ on ${\muathcal H}_\infty$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{prop:repr} \betaegin{itemize} \item[$(1)$] $\pii_{\muathcal C}( x ) = \pipinf \rhotimes W_\infty( x )$, for all $x\in{\muathcal A}_\infty \rhotimes_{\alpha_\infty} {\muathbb Z} \equiv \lambda} \def\La{\Lambdaangle\, \wt{\pii_u}({\muathcal A}_\infty), \upsilonu \,\rhoangle$, \item[$(2)$] $\pii_{\muathcal C}(p) = P_0 = S_{\infty 0}S_{\infty 0}^*$. \end{itemize} \end{proposition} \betaegin{proof} It follows from Proposition \rhoef{prop:reprOfCrossed} that, for $\varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} (a_n\otimes f_n) \delta} \def\D{\Deltad_n \in C_c( C_0({\muathbb Z}_\infty;{\muathcal A}_\infty), {\muathbb Z}, \gamma} \delta} \def\D{\Deltaef\G{\Gammaamma)$, we have ${\muathcal H}i( \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} (a_n\otimes f_n) \delta} \def\D{\Deltad_n ) = \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \wt{\pii_u}(a_n)\wt{\rhoho_u}(f_n)u^n$, so that \betaegin{align*} \pii_{\muathcal C} {\muathbb I}gg( \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} \wt{\pii_u}(a_n)\wt{\rhoho_u}(f_n) \upsilonu^n {\muathbb I}gg) & = \varsigmagma} \def\S{\Sigmasinf \rhotimes W_\infty {\muathbb I}gg( \varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}} (a_n\otimes f_n) \delta} \def\D{\Deltad_n {\muathbb I}gg) \\ & = \varsigmagma} \def\S{\Sigmaum_{k\in{\muathbb Z}} \varsigmagma} \def\S{\Sigmaigma_\infty( a_n \otimes f_n) W_\infty^n = \varsigmagma} \def\S{\Sigmaum_{k\in{\muathbb Z}} \pii_\infty( a_n) \rhoho_\infty( f_n) W_\infty^n. \end{align*} \nuoindent $(1)$ Indeed, with $\epsilont{e_n:n\in{\muathbb N}}$ an approximate unit of $C_0({\muathbb Z}_\infty)$, we get, for all $a\in{\muathcal A}_\infty$, $k\in{\muathbb Z}$, \betaegin{align*} \pii_{\muathcal C}( \wt{\pii_u}(a) \upsilonu^k ) & = \lambda} \def\La{\Lambdaim_{n\tauo\infty} \pii_{\muathcal C}( \wt{\pii_u}(a)\wt{\rhoho_u}(e_n) \upsilonu^k) = \lambda} \def\La{\Lambdaim_{n\tauo\infty} \pii_\infty(a) \rhoho_\infty(e_n)W_\infty^k \\ & = \pii_\infty(a)W_\infty^k = \pipinf \rhotimes W_\infty( \wt{\pii_u}(a) \upsilonu^k ), \end{align*} and the thesis follows. \nuoindent $(2)$ If $f(n) = \betaegin{cases} 0, & n<0,\\ 1, & n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0, \end{cases}\ $ then $\widehat{{\muathcal H}i}(p) = \widehat{{\muathcal H}i}(\wt{\rhoho_u}(f)) = \rhoho_\infty(f) = P_0$. \end{proof} Let us still denote by $\pii_{\muathcal C}$ the restriction of $\pii_{\muathcal C}$ to the subalgebra $C^*({\muathcal A},\alpha,{\muathbb N}) \equiv \lambda} \def\La{\Lambdaangle\, p\wt{\pii_u}({\muathcal A}_\infty)p, p\upsilonu p \,\rhoangle$ of ${\muathcal C} \equiv \lambda} \def\La{\Lambdaangle\, \wt{\pii_u}({\muathcal A}_\infty), \wt{\rhoho_u}(C_0({\muathbb Z}_\infty)), \upsilonu \,\rhoangle$. \betaegin{theorem} \lambda} \def\La{\Lambdaabel{prop:crossedProd} $C^*({\muathcal A},\alpha,{\muathbb N})$ satisfies all the properties in Definition \rhoef{1.1}, namely is the crossed product of ${\muathcal A}$ with ${\muathbb N}$ by $\alpha$. \end{theorem} \betaegin{proof} As it was already noticed, property $(1)$ in Definition \rhoef{1.1} follows by definition, while properties $(2)$ and $(3)$ have been proved in Proposition \rhoef{prop:generate}. It remains to prove property $(4)$. Let $({\muathcal H},\pii,W)$ be a covariant representation of $(A,\alpha)$, and recall from Proposition \rhoef{prop:RepIndLim} that there exist $W_\infty\in{\muathcal U}({\muathcal H}_\infty)$, and a covariant representation $({\muathcal H}_\infty, \pii_\infty, W_\infty)$ of $({\muathcal A}_\infty,\alpha_\infty)$, on ${\muathcal H}_\infty \equiv \varinjlim H_n$, the Hilbert space inductive limit of the inductive system \eqref{HilbertIndLim}, such that $\pii_\infty\circ \varphi_{\infty n}(a) S_{\infty n} = S_{\infty n} \pii(a)$, for all $n\in{\muathbb N}$, $a\in{\muathcal A}$, and $W_\infty S_{\infty 0} = S_{\infty 0} W$. Let $\pii_{\muathcal C}$ be the representation of $C^*({\muathcal A},\alpha,{\muathbb N})$ on ${\muathcal H}_\infty$ constructed in Proposition \rhoef{prop:repr}. Let us now prove that $P_0\in \pii_{\muathcal C}(C^*({\muathcal A},\alpha,{\muathbb N}))'$, that is $\pii_{\muathcal C}(C^*({\muathcal A},\alpha,{\muathbb N})) S_{\infty 0}{\muathcal H} \varsigmagma} \def\S{\Sigmaubset S_{\infty 0}{\muathcal H}$. Because of Proposition \rhoef{prop:generate} it is enough to prove that $\pii_{\muathcal C}(t)S_{\infty 0}{\muathcal H} \varsigmagma} \def\S{\Sigmaubset S_{\infty 0}{\muathcal H}$, $\pii_{\muathcal C}(t^*)S_{\infty 0}{\muathcal H} \varsigmagma} \def\S{\Sigmaubset S_{\infty 0}{\muathcal H}$, and $\pii_{\muathcal C}(\zetaci_{\muathcal A}(a))S_{\infty 0}{\muathcal H} \varsigmagma} \def\S{\Sigmaubset S_{\infty 0}{\muathcal H}$, for all $a\in{\muathcal A}$. Indeed, for all $a\in{\muathcal A}$, $\xii\in{\muathcal H}$, we have \betaegin{align*} \pii_{\muathcal C}(t)S_{\infty 0}\xii & = \pii_{\muathcal C}(p\upsilonu p)S_{\infty 0}\xii = P_0W_\infty P_0S_{\infty 0}\xii \in S_{\infty 0}{\muathcal H}, \\ \pii_{\muathcal C}(t^*)S_{\infty 0}\xii & = \pii_{\muathcal C}(p\upsilonu^* p)S_{\infty 0}\xii = P_0W_\infty^* P_0S_{\infty 0}\xii \in S_{\infty 0}{\muathcal H}, \\ \pii_{\muathcal C}( \zetaci_{\muathcal A}(a) )S_{\infty 0}\xii & = \pii_{\muathcal C}\circ \wt{\pii_u} \circ \varphi_{\infty 0} (a) P_0S_{\infty 0}\xii = \pii_\infty \circ \varphi_{\infty 0} (a) S_{\infty 0}\xii \\ & = S_{\infty 0} \pii(a)\xii \in S_{\infty 0}{\muathcal H}. \end{align*} Recall from the proof of Proposition \rhoef{prop:RepIndLim} that there is a representation $\piiinf_0$ of ${\muathcal A}$ on ${\muathcal H}_\infty$ such that $\piiinf_0(a)S_{\infty 0} = S_{\infty 0}\pii(a)$, $a\in{\muathcal A}$, and $\pii_\infty\circ\varphi_{\infty 0} = \piiinf_0$. Finally, define \betaegin{align*} \widehat{\pii}(x):= S_{\infty 0}^* \pii_{\muathcal C}(x)S_{\infty 0},\quad x\in C^*({\muathcal A},\alpha,{\muathbb N}), \end{align*} which is a representation of $C^*({\muathcal A},\alpha,{\muathbb N})$ on ${\muathcal H}$, because $P_0\in \pii_{\muathcal C}(C^*({\muathcal A},\alpha,{\muathbb N}))'$. Then, \betaegin{align*} \widehat{\pii}(t) & = S_{\infty 0}^* \pii_{\muathcal C}(t)S_{\infty 0} = S_{\infty 0}^* P_0W_\infty P_0S_{\infty 0} = S_{\infty 0}^* W_\infty S_{\infty 0} \\ & = S_{\infty 0}^* S_{\infty 0}W = W, \end{align*} and, for all $a\in{\muathcal A}$, \betaegin{align*} \widehat{\pii}(\zetaci_{\muathcal A}(a)) & = S_{\infty 0}^* \pii_{\muathcal C}( \wt{\pii_u} \circ \varphi_{\infty 0} (a) p) S_{\infty 0} = S_{\infty 0}^* \pii_\infty \circ \varphi_{\infty 0} (a) S_{\infty 0}S_{\infty 0}^* S_{\infty 0} \\ & = S_{\infty 0}^* \piiinf_0 (a) S_{\infty 0} = S_{\infty 0}^* S_{\infty 0}\pii(a) = \pii(a). \end{align*} \end{proof} \varsigmagma} \def\S{\Sigmaubsection{An example: the noncommutative torus} As mentioned in the introduction, the crossed product ${\muathcal A} \rhotimes_\alpha {\muathbb N}$ given in Definition \rhoef{1.1} coincides with a reduction by a projection of the ordinary crossed product when $\alphalpha$ is an automorphism. We now give two equivalent descriptions of ${\muathcal A} \rhotimes_\alpha {\muathbb N}$, when ${\muathcal A}=C({\muathbb R}/{\muathbb Z})$ and $\alphalpha$ is a rotation by $2\pii \vartheta} \def\Th{\Thetaeta$, where $\vartheta} \def\Th{\Thetaeta$ is irrational. The first description is the following. As it is known, the noncommutative torus $A_\vartheta} \def\Th{\Thetaeta$ can be described as the crossed product $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb Z}$, where $(\alphalpha_\vartheta} \def\Th{\Thetaeta(f))(t)=f(t-\vartheta} \def\Th{\Thetaeta)$. Given the Hilbert space $H=\ell^2({\muathbb Z},L^2({\muathbb R}/{\muathbb Z}))$, the representation $\pii:C({\muathbb R}/{\muathbb Z})\tauo {\muathcal B}(H)$, $(\pii(f)\xii)(n)=\alphalpha_\vartheta} \def\Th{\Thetaeta^{-n}(f)\xii(n)$ and the unitary $V$ acting on $H$ as $(V\xii)(n)=\xii(n-1)$, $A_\vartheta} \def\Th{\Thetaeta$ can be identified with the $C^*$-algebra generated by $V$ and $\pii(C({\muathbb R}/{\muathbb Z}))$ on the Hilbert space $H$. Since $C({\muathbb R}/{\muathbb Z})$ is generated as a $C^*$-algebra by the unitary $U_0=\exp(2\pii i t)$, $A_\vartheta} \def\Th{\Thetaeta$ is generated by the unitary $V$ and the unitary $U$ given by $(U\xii)(n)=\exp(2\pii i n\vartheta} \def\Th{\Thetaeta)U_0\xii(n)$. It is easy to check that $UV=\exp(2\pii i\vartheta} \def\Th{\Thetaeta)VU$. Since $\alphalpha_\vartheta} \def\Th{\Thetaeta$ is an automorphism, Theorem \rhoef{prop:crossedProd} implies that $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ is the reduction of $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb Z}$ by the projection $p$ on the Hilbert space $H_+=\ell^2({\muathbb N}_0,L^2({\muathbb R}/{\muathbb Z}))$. We have proved the following theorem. \betaegin{theorem} The C$^*$-algebra $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ can be identified with the $C^*$-algebra generated by the unitary $U$ and the isometry $pVp$ acting on $H_+$. \end{theorem} We now provide a description of ${\muathcal A} \rhotimes_\alpha {\muathbb N}$ as a universal object. \betaegin{theorem} The $C^*$-algebra $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ coincides with the universal $C^*$-algebra generated by a unitary $U$ and an isometry $V$ satisfying the conditions $UV=\exp(2\pii i\vartheta} \def\Th{\Thetaeta)VU$. \end{theorem} \betaegin{proof} By definition, the universal $C^*$-algebra generated by a unitary $U$ and an isometry $V$ satisfying the conditions $UV=\exp(2\pii i\vartheta} \def\Th{\Thetaeta)VU$ is the unique $C^*$-algebra $B$ satisfyng the following universal property: for any triple $({\muathcal H},u,v)$, where ${\muathcal H}$ is a Hilbert space, $u$ is a unitary and $v$ is an isometry acting on ${\muathcal H}$ satisfying $uv=\exp(2\pii i\vartheta} \def\Th{\Thetaeta)vu$, there exists a representation $\pii:B\tauo {\muathcal B}({\muathcal H})$ such that $\pii(U)=u$ and $\pii(V)=v$. By definition, also $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ satisfies a universal property, given by properties $(1) - (4)$ of Definition 2.4. Therefore, given a triple $({\muathcal H},u,v)$ as above, we get indeed a covariant representation $({\muathcal H},\rhoho, v)$ of $(C({\muathbb R}/{\muathbb Z}),\alphalpha_\vartheta} \def\Th{\Thetaeta)$, where we set $\rhoho(f)=f(u)$, in fact the commutation relations imply that $v^kv^{*k}u=u v^kv^{*k}$. The properties of $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ imply the thesis. \end{proof} \betaegin{remark} If $\vartheta} \def\Th{\Thetaeta$ is rational, the projection $p$ in the first description is the identity and, therefore, $C({\muathbb R}/{\muathbb Z})\rhotimes_{\alphalpha_\vartheta} \def\Th{\Thetaeta}{\muathbb N}$ coincides with $A_\vartheta} \def\Th{\Thetaeta$. \end{remark} \epsilonction{Some results on semifinite spectral triples} In this section we discuss some generalizations of results well-known for type I spectral triples. Some of these results have already been proved in \cite{Jordans} and some are new. First of all we recall the following definitions: \betaegin{definition} Let $({\muathcal A}m,\tau)$ be a von Neumann algebra with a normal semifinite faithful (n.s.f.) trace, $T\, \widehat{\in}\, {\muathcal A}m$ a self-adjoint operator\varphiootnote{By $T\, \widehat{\in}\, {\muathcal A}m$ we mean that the operator $T$ is affiliated with ${\muathcal A}m$. Another common notation is $T\, \eta\, {\muathcal A}m$.}. We use the notation $e_T(\Omega)$ for the spectral projection of $T$ relative to the measurable set $\Omega\varsigmagma} \def\S{\Sigmaubset {\muathbb R}$, $\lambda} \def\La{\Lambda_t(T):=\tau(e_{|T|}(t,+\infty))$, $\Lambda_t(T):=\tau(e_{|T|}[0,t))$, $\muu_t(T):=\inf \{s>0: \lambda} \def\La{\Lambdaambda_T(s)\lambda} \def\La{\Lambdaeq t\}$, $t>0$. The operator $T$ is said to be $\tau$-measurable if $\lambda} \def\La{\Lambda_t(T) \tauo 0$, $t\tauo+\infty$, and $\tau$-compact if $\mu_t(T) \tauo 0$, $t\tauo+\infty$, or equivalently, $\lambda} \def\La{\Lambda_t(T)<+\infty$, $\varphiorall\; t>0$. \end{definition} \betaegin{definition} Let ${\muathcal A}$ be a unital $C^*$-algebra. An odd semifinite spectral triple $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$ on ${\muathcal A}$, with respect to a semifinite von Neumann algebra ${\muathcal A}m\varsigmagma} \def\S{\Sigmaubset{\muathcal B}({\muathcal H})$ endowed with a n.s.f. trace $\tau$, is given by a unital, norm-dense, $^*$-subalgebra ${\muathcal L}\varsigmagma} \def\S{\Sigmaubset{\muathcal A}$, a (separable) Hilbert space ${\muathcal H}$, a faithful representation $\pii:{\muathcal A}\tauo{\muathcal B}({\muathcal H})$ such that $\pii({\muathcal A})\varsigmagma} \def\S{\Sigmaubset{\muathcal A}m$, and an unbounded self-adjoint operator $D\, \widehat{\in}\, {\muathcal A}m$ such that \betaegin{itemize} \item[$(1)$] $(1+D^2)^{-1}$ is a $\tau$-compact operator, \tauextit{i.e.} $\lambda} \def\La{\Lambda_t((1+D^2)^{-1})<+\infty$, $\varphiorall\; t>0$ or, equivalently, $\Lambda_t(D)<+\infty$, $\varphiorall \; t>0$, \item[$(2)$] $\pii(a)(\delta} \def\D{\Deltaom D) \varsigmagma} \def\S{\Sigmaubset \delta} \def\D{\Deltaom D$, and $[D,\pii(a)] \in{\muathcal A}m$, for all $a\in{\muathcal L}$. \end{itemize} \piar\nuoindent The spectral triple $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$ is even if, in addition, \betaegin{itemize} \item[$(3)$] there is a self-adjoint unitary operator (\tauextit{i.e.} a ${\muathbb Z}_2$-grading) $\G\in{\muathcal A}m$ such that $\pii(a)\G = \G\pii(a)$, $\varphiorall a\in{\muathcal A}$, and $D\G=-\G D$. \piar The spectral triple $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$ is finitely summable if, in addition, \item[$(4)$] there exists a $\delta} \def\D{\Delta>0$ such that $\tauau((1+D^2)^{-\delta} \def\D{\Delta/2})<+\infty$. \end{itemize} \end{definition} \betaegin{definition} Given a finitely summable semifinite spectral triple $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$, the number $d=\inf\{\alpha>0:\tauau((1+D^2)^{-\alpha/2})<+\infty\}$ is called the metric or Hausdorff dimension of the triple, since it is the unique exponent, if any, such that the logarithmic Dixmier trace is finite non-zero on $(1+D^2)^{-\alpha/2}$ (cf. \cite{GuIs09}, Theorem 2.7). \end{definition} We note that the usual definition of spectral triple, which was recalled in Definition \rhoef{deftripla}, can be recovered by taking ${\muathcal A}m={\muathcal B}({\muathcal H})$. \betaegin{proposition} \lambda} \def\La{\Lambdaabel{dimension-Lambda} Let $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$ be a finitely summable semifinite spectral triple. Then $d= \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D)}{\lambda} \def\La{\Lambdaog t}$. \end{proposition} \betaegin{proof} We first observe that, by \cite{FaKo}, Proposition 2.7, $$ \tauau((1+D^2)^{-\alpha/2})=\int_0^{+\infty}\muu_t((1+D^2)^{-\alpha/2})dt = \int_0^{+\infty}\muu^\alpha_t((1+D^2)^{-1/2})dt. $$ Therefore, \betaegin{align*} d&=\lambda} \def\La{\Lambdaeft(\lambda} \def\La{\Lambdaiminf_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\mu_t((1+D^2)^{-1/2})}{\lambda} \def\La{\Lambdaog(1/t)}\rhoight)^{-1} =\lambda} \def\La{\Lambdaimsup_{s\tauo0}\varphirac{\lambda} \def\La{\Lambdaog\lambda} \def\La{\Lambdaambda_s((1+D^2)^{-1/2})}{\lambda} \def\La{\Lambdaog(1/s)}\\ &=\lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t((1+D^2)^{1/2})}{\lambda} \def\La{\Lambdaog t} =\lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D)}{\lambda} \def\La{\Lambdaog t}, \end{align*} where the first equality follows by \cite{GuIs09} Theorem 1.4, the second by \cite {GuIs05} Proposition 1.13, the third by definition of $\Lambda$, the last by simple estimates. \end{proof} \varsigmagma} \def\S{\Sigmaubsection{The case of the tensor product} Let us recall the definition of tensor product of semifinite spectral triples. \betaegin{definition} Let ${\muathcal A}_1,{\muathcal A}_2$ be unital $C^*$-algebras, with respective semifinite spectral triples ${\muathcal T}_1:=({\muathcal L}_1,{\muathcal H}_1,D_1,\G_1;{\muathcal A}m_1,\tau_1)$, ${\muathcal T}_2:=({\muathcal L}_2,{\muathcal H}_2,D_2,\G_2;{\muathcal A}m_2,\tau_2)$, and define ${\muathcal T}_1\tauimes {\muathcal T}_2 \equiv ({\muathcal L},{\muathcal H},D,\G;{\muathcal A}m,\tau)$ as follows: \\ if ${\muathcal T}_1$ and ${\muathcal T}_2$ are both even $$\betaegin{array}{ccc} {\muathcal L}:={\muathcal L}_1 \odot {\muathcal L}_2, & {\muathcal H}:= {\muathcal H}_1 \otimes {\muathcal H}_2, & D := D_1 \otimes I_2 + \G_1 \otimes D_2, \\ \G:=\G_1\otimes \G_2, & {\muathcal A}m := {\muathcal A}m_1 \otimes {\muathcal A}m_2, & \tau := \tau_1 \otimes \tau_2, \end{array}$$ \\ if ${\muathcal T}_1$ is even, and ${\muathcal T}_2$ is odd, $$\betaegin{array}{ccc} {\muathcal L}:={\muathcal L}_1 \odot {\muathcal L}_2, & {\muathcal H}:= {\muathcal H}_1 \otimes {\muathcal H}_2, & D := D_1 \otimes I_2 + \G_1 \otimes D_2, \\ \G:=I_1\otimes I_2, & {\muathcal A}m := {\muathcal A}m_1 \otimes {\muathcal A}m_2, & \tau := \tau_1 \otimes \tau_2, \end{array}$$ \\ if ${\muathcal T}_1$ is odd, and ${\muathcal T}_2$ is even, $$\betaegin{array}{ccc} {\muathcal L}:={\muathcal L}_1 \odot {\muathcal L}_2, & {\muathcal H}:= {\muathcal H}_1 \otimes {\muathcal H}_2, & D := D_1 \otimes \G_2 + I_1 \otimes D_2, \\ \G:=I_1\otimes I_2, & {\muathcal A}m := {\muathcal A}m_1 \otimes {\muathcal A}m_2, & \tau := \tau_1 \otimes \tau_2, \end{array}$$ \\ if ${\muathcal T}_1$ and ${\muathcal T}_2$ are both odd, $$\betaegin{array}{ccc} {\muathcal L}:={\muathcal L}_1 \odot {\muathcal L}_2, & {\muathcal H}:= {\muathcal H}_1 \otimes {\muathcal H}_2 \otimes{\muathbb C}^2, & D := D_1 \otimes I_2 \otimes \varepsilon_1 + I_1 \otimes D_2 \otimes \varepsilon_2, \\ \G:=I_1\otimes I_2\otimes \varepsilon_3, & {\muathcal A}m := {\muathcal A}m_1 \otimes {\muathcal A}m_2\otimes M_2({\muathbb C}), & \tau := \tau_1 \otimes \tau_2\otimes Tr, \end{array}$$ where $\varepsilon_1$, $\varepsilon_2$, $\varepsilon_3$ are the Pauli matrices, see \eqref{pauli}. \end{definition} \betaegin{proposition} \lambda} \def\La{\Lambdaabel{tensorProductTriple} Let ${\muathcal A}_1,{\muathcal A}_2$ be unital $C^*$-algebras, with respective semifinite spectral triples ${\muathcal T}_1:=({\muathcal L}_1,{\muathcal H}_1,D_1,\G_1;{\muathcal A}m_1,\tau_1)$, ${\muathcal T}_2:=({\muathcal L}_2,{\muathcal H}_2,D_2,\G_2;{\muathcal A}m_2,\tau_2)$. Then ${\muathcal T}_1 \tauimes {\muathcal T}_2$ is a semifinite spectral triple on the spatial tensor product ${\muathcal A}_1 \otimes {\muathcal A}_2$. Moreover, the Hausdorff dimension $d$ of ${\muathcal T}_1 \tauimes {\muathcal T}_2$ satisfies $d\lambda} \def\La{\Lambdaeq d_1 + d_2$, where $d_1, d_2$ are the Hausdorff dimensions of the factor spectral triples. Finally, if $\delta} \def\D{\Deltaisplaystyle\lambda} \def\La{\Lambdaim_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D_1)}{\lambda} \def\La{\Lambdaog t}$ exists, the equality $d= d_1+d_2$ holds. \end{proposition} \betaegin{proof} In case ${\muathcal T}_1$ and ${\muathcal T}_2$ are not both odd, the result is proved in \cite{Jordans}, Theorem 2.13, and Lemma 2.19. In the remaining case, one can proceed analogously. We now give an alternative proof of the formula for the Hausdorff dimension, valid in all cases. Since $D^2=D_1^2\otimes I+I\otimes D_2^2$, in all cases, if $d$ denotes the dimension of $({\muathcal L},{\muathcal H},D; {\muathcal A}m,\tau)$, we have that \betaegin{align*} d & = \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D)}{\lambda} \def\La{\Lambdaog t} = \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\tau(e_{D}(-t,t))}{\lambda} \def\La{\Lambdaog t} \\ & = \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\tau({\muathcal H}i_{[0,t^2)}(D_1^2\otimes I+I\otimes D_2^2))}{\lambda} \def\La{\Lambdaog t}. \end{align*} If $\varsigmagma} \def\S{\Sigma_i$ denotes the spectrum of $D_i$, $i=1,2$, the representations of $C_0(\varsigmagma} \def\S{\Sigma_i)$ on ${\muathcal H}_i$ with image in ${\muathcal A}m_i$ given by functional calculus, $i=1,2$, together with the Radon measures $\nuu_i$ on $\varsigmagma} \def\S{\Sigma_i$ induced by the traces $\tauau_i$, $i=1,2$, give rise to a representation $j$ of $C_0(\varsigmagma} \def\S{\Sigma_1\tauimes\varsigmagma} \def\S{\Sigma_2)$ on ${\muathcal H}_1\otimes{\muathcal H}_2$ with image in ${\muathcal A}m_1\otimes{\muathcal A}m_2$ together with the Radon measure $\nuu:=\nuu_1\otimes\nuu_2$ on $\varsigmagma} \def\S{\Sigma_1\tauimes\varsigmagma} \def\S{\Sigma_2$ induced by the trace $\tauau:=\tauau_1\otimes\tauau_2$ such that $j(f_1\otimes f_2)=f_1(D_1)\otimes f_2(D_2)$ and $\int f_1\otimes f_2 d\nuu=\tauau_1(f_1(D_1)) \tauau_2(f_2(D_2))$. Then, denoting by $B_r$ the disk of radius $r$ centered in the origin of the plane, and by $Q_r$ the square $[-r,r]\tauimes[-r,r]$ in the plane, $$ {\muathcal H}i_{[0,t^2)}(D_1^2\otimes I+I\otimes D_2^2) = j({\muathcal H}i_{B_t}). $$ Then the inclusions $Q_{t/\varsigmagma} \def\S{\Sigmaqrt2}\varsigmagma} \def\S{\Sigmaubset B_t\varsigmagma} \def\S{\Sigmaubset Q_t$ give the inequalities $$ \tauau_1(\Lambda_{t/\varsigmagma} \def\S{\Sigmaqrt2}(D_1)){\muathcal D}ot\tauau_2(\Lambda_{t/\varsigmagma} \def\S{\Sigmaqrt2}(D_2)) \lambda} \def\La{\Lambdaeq \nuu(Q_{t/\varsigmagma} \def\S{\Sigmaqrt2}) \lambda} \def\La{\Lambdaeq \nuu(B_t)\lambda} \def\La{\Lambdaeq\nuu(Q_t)\lambda} \def\La{\Lambdaeq\tauau_1(\Lambda_t(D_1)){\muathcal D}ot\tauau_2(\Lambda_t(D_2)), $$ from which we get \betaegin{align*} \lambda} \def\La{\Lambdaiminf_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D_1)}{\lambda} \def\La{\Lambdaog t}+\lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D_2)}{\lambda} \def\La{\Lambdaog t} & \lambda} \def\La{\Lambdaeq \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D)}{\lambda} \def\La{\Lambdaog t} \\ & \lambda} \def\La{\Lambdaeq \lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D_1)}{\lambda} \def\La{\Lambdaog t}+\lambda} \def\La{\Lambdaimsup_{t\tauo\infty}\varphirac{\lambda} \def\La{\Lambdaog\Lambda_t(D_2)}{\lambda} \def\La{\Lambdaog t}. \end{align*} \end{proof} \varsigmagma} \def\S{\Sigmaubsection{The cases of the crossed products}\lambda} \def\La{\Lambdaabel{subsec:CrossedProd} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \Aut({\muathcal A})$ a unital automorphism, and $({\muathcal L},{\muathcal H},D;{\muathcal A}m,\tau)$ a semifinite spectral triple on ${\muathcal A}$. Assume that $\alpha$ is Lip-bounded, that is $\alpha({\muathcal L})\varsigmagma} \def\S{\Sigmaubset{\muathcal L}$, and, for any $a\in{\muathcal L}$, $\varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb Z}} \| [D,\alpha^{-n}(a)] \| < \infty$. Then, following \cite{BMR}, we can construct a semifinite spectral triple $({\muathcal L}_\rhotimes,{\muathcal H}_\rhotimes,D_\rhotimes;{\muathcal A}m_\rhotimes,\tau_\rhotimes)$ on the crossed product $C^*$-algebra ${\muathcal A}\rhotimes_\alpha {\muathbb Z} = \lambda} \def\La{\Lambdaangle \widetilde{\pii_u}({\muathcal A}),U \rhoangle$, which is defined as follows: \betaegin{itemize} \item[$(1)$] if $({\muathcal L},{\muathcal H},D,\G;{\muathcal A}m,\tau)$ is even, \betaegin{flalign*} & {\muathcal L}_\rhotimes := {}^*\muathrm{alg}(\widetilde{\pii_u}({\muathcal L}),U), & \qquad & {\muathcal H}_\rhotimes := {\muathcal H} \otimes \ell^2({\muathbb Z}), & \\ & D_\rhotimes := D \otimes I + \G \otimes D_{\muathbb Z}, & \qquad & \G_\rhotimes := I\otimes I, & \\ & {\muathcal A}m_\rhotimes := {\muathcal A}m \otimes {\muathcal B}(\ell^2({\muathbb Z})), & \qquad & \tau_\rhotimes := \tau \otimes Tr, & \end{flalign*} where ${}^*\muathrm{alg}(\widetilde{\pii_u}({\muathcal L}),U)$ is the $^*$-algebra generated by $\widetilde{\pii_u}({\muathcal L})$ and $U$, $(D_{\muathbb Z} \xii)(n) := n\xii(n)$, $\varphiorall \xii\in\ell^2({\muathbb Z})$, and $Tr$ is the usual trace on ${\muathcal B}(\ell^2({\muathbb Z}))$, \item[$(2)$] if $({\muathcal L},{\muathcal H},\G;{\muathcal A}m,\tau)$ is odd, \betaegin{flalign*} & {\muathcal L}_\rhotimes := {}^*\muathrm{alg}(\widetilde{\pii_u}({\muathcal L}),U), & \qquad & {\muathcal H}_\rhotimes := {\muathcal H} \otimes \ell^2({\muathbb Z}) \otimes {\muathbb C}^2, & \\ & D_\rhotimes := D \otimes I \otimes \varepsilon_1 + I \otimes D_{\muathbb Z} \otimes \varepsilon_2, & \qquad & \G_\rhotimes := I \otimes I \otimes \varepsilon_3, & \\ & {\muathcal A}m_\rhotimes := {\muathcal A}m \otimes {\muathcal B}(\ell^2({\muathbb Z})) \otimes M_2({\muathbb C}), & \qquad & \tau_\rhotimes := \tau \otimes Tr \otimes tr, & \end{flalign*} where $tr$ is the normalized trace on $M_2({\muathbb C})$. \end{itemize} In case $\alpha$ satisfies a weaker condition, we have the following result. \betaegin{definition}\lambda} \def\La{\Lambdaabel{semiequi} Let ${\muathcal A}$ be a unital C*-algebra, $\alpha\in \Aut({\muathcal A})$ a unital automorphism, $({\muathcal L},{\muathcal H},D)$ a spectral triple on ${\muathcal A}$ such that $\alpha({\muathcal L})\varsigmagma} \def\S{\Sigmaubset{\muathcal L}$. The automorphism is said to be Lip-semibounded if $$ \varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb N}} \| [D,\alpha^{-n}(a)] \| < \infty, \qquad \varphiorall a\in{\muathcal L}. $$ \end{definition} \betaegin{proposition} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \Aut({\muathcal A})$ a unital automorphism, $({\muathcal L},{\muathcal H},D;{\muathcal A}m,\tau)$ a semifinite spectral triple on ${\muathcal A}$, and assume $\alpha$ is Lip-semibounded. Then we can construct a semifinite spectral triple $({\muathcal L}_\rhotimes,{\muathcal H}_\rhotimes,D_\rhotimes;{\muathcal A}m_\rhotimes,\tau_\rhotimes)$ on the crossed product $C^*$-algebra ${\muathcal A}\rhotimes_\alpha {\muathbb N} = \lambda} \def\La{\Lambdaangle \zetaci_{\muathcal A}({\muathcal A}),t \rhoangle$, which is defined as follows: \betaegin{itemize} \item[$(1)$] if $({\muathcal L},{\muathcal H},D,\G;{\muathcal A}m,\tau)$ is even, \betaegin{flalign*} & {\muathcal L}_\rhotimes := {}^*\muathrm{alg}(\zetaci_{\muathcal A}({\muathcal L}),t), & \qquad & {\muathcal H}_\rhotimes := {\muathcal H} \otimes \ell^2({\muathbb N}), & \\ & D_\rhotimes := D \otimes I + \G \otimes D_{\muathbb N}, & \qquad & \G_\rhotimes := I\otimes I, & \\ & {\muathcal A}m_\rhotimes := {\muathcal A}m \otimes {\muathcal B}(\ell^2({\muathbb N})), & \qquad & \tau_\rhotimes := \tau \otimes Tr, & \end{flalign*} where ${}^*\muathrm{alg}(\zetaci_{\muathcal A}({\muathcal L}),t)$ is the $^*$-algebra generated by $\zetaci_{\muathcal A}({\muathcal L})$ and $t$, $(D_{\muathbb N} \xii)(n) := n\xii(n)$, $\varphiorall \xii\in\ell^2({\muathbb N})$, and $Tr$ is the usual trace on ${\muathcal B}(\ell^2({\muathbb N}))$, \item[$(2)$] if $({\muathcal L},{\muathcal H},\G;{\muathcal A}m,\tau)$ is odd, \betaegin{flalign*} & {\muathcal L}_\rhotimes := {}^*\muathrm{alg}(\zetaci_{\muathcal A}({\muathcal L}),t), & \qquad & {\muathcal H}_\rhotimes := {\muathcal H} \otimes \ell^2({\muathbb N}) \otimes {\muathbb C}^2, & \\ & D_\rhotimes := D \otimes I \otimes \varepsilon_1 + I \otimes D_{\muathbb N} \otimes \varepsilon_2, & \qquad & \G_\rhotimes := I \otimes I \otimes \varepsilon_3, & \\ & {\muathcal A}m_\rhotimes := {\muathcal A}m \otimes {\muathcal B}(\ell^2({\muathbb N})) \otimes M_2({\muathbb C}), & \qquad & \tau_\rhotimes := \tau \otimes Tr \otimes tr, & \end{flalign*} where $tr$ is the normalized trace on $M_2({\muathbb C})$. \nuoindent Moreover, in both cases, if $\varsigmagma} \def\S{\Sigmaom$ is the dimension of the original spectral triple, then the dimension of the new spectral triple is $\varsigmagma} \def\S{\Sigmaom+1$. \end{itemize} \end{proposition} \betaegin{proof} We only prove the even case, the odd case being similar. Let us first observe that, since $\alpha$ is an automorphism, ${\muathcal A}_\infty = {\muathcal A}$, $\alpha_\infty=\alpha$, and $\zetaci_{\muathcal A}(a) = \wt{\pii_u}(a)p$, $\varphiorall a\in{\muathcal A}$. Let $\pii:{\muathcal A}\tauo{\muathcal B}({\muathcal H})$ be the representation implied by the spectral triple $({\muathcal L},{\muathcal H},D,\G;{\muathcal A}m,\tau)$, and consider $(\widetilde{\pii}(a)\xii)(n) := \pii(\alpha^{-n}(a))\xii(n)$, $\varphiorall a\in{\muathcal A}$, $\xii\in{\muathcal H}\otimes\ell^2({\muathbb N})$, $n\in{\muathbb N}$, which is a representation of ${\muathcal A}$ on ${\muathcal H}\otimes\ell^2({\muathbb N})$, and the shift operator $$ (W\xii)(n) := \betaegin{cases} 0, & n=0,\\ \xii(n-1), & n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1. \end{cases}\ $$ Then, it is easy to see that $({\muathcal H}\otimes\ell^2({\muathbb N}),\widetilde{\pii},W)$ is a covariant representation of $({\muathcal A},\alpha,{\muathbb N})$ on ${\muathcal H}\otimes\ell^2({\muathbb N})$, in the sense of Definition \rhoef{Def:CovRep}. Therefore it induces a non-degenerate representation $\widehat{\pii}$ of ${\muathcal A}\rhotimes_\alpha {\muathbb N} = \lambda} \def\La{\Lambdaangle \zetaci({\muathcal A}), t \rhoangle$ on ${\muathcal H}\otimes\ell^2({\muathbb N})$, such that $\widehat{\pii}\circ \zetaci_{\muathcal A} = \widetilde{\pii}$, and $\widehat{\pii}(t)=W$. Hence $\widehat{\pii}({\muathcal A}\rhotimes_\alpha {\muathbb N}) \varsigmagma} \def\S{\Sigmaubset {\muathcal A}m_\rhotimes$, while the facts that $D_\rhotimes\widehat{\in} {\muathcal A}m_\rhotimes$, and $(1+D_\rhotimes^2)^{-1}$ is $\tau_\rhotimes$-compact follow from Proposition \rhoef{tensorProductTriple}. It remains to prove that $\| [ D_\rhotimes, \widehat{\pii}(a) ] \| < \infty$, $\varphiorall a\in{\muathcal L}_\rhotimes$. Since the commutators $[\G\otimes D_{\muathbb N},\widehat{\pii}(a)]$ and $[D\otimes I,W]$ vanish, while $\| [\G\otimes D_{\muathbb N}, W] \| \lambda} \def\La{\Lambdaeq 1$, it is enough to estimate the commutators $\| [D\otimes I,\widehat{\pii}(a) ] \| = \| \delta} \def\D{\Deltaiag \{ [D,\pii(\alpha^{-n}(a)) ] : n\in{\muathbb N} \} \| = \varsigmagma} \def\S{\Sigmaup_{n\in{\muathbb N}} \| [D,\pii(\alpha^{-n}(a)) ] \| < \infty$, and the claim follows. \nuoindent We now prove the statement about the dimension, which in turn implies (again) the $\tauau$-compactness of the resolvent. By Proposition \rhoef{dimension-Lambda}, the Hausdorff dimension of $D_\rhotimes$ is given by \betaegin{displaymath} \lambda} \def\La{\Lambdaimsup_{t\tauo +\infty}\varphirac{\lambda} \def\La{\Lambdaog(\Lambda_t(D_\rhotimes))}{\lambda} \def\La{\Lambdaog t}. \end{displaymath} We observe that $\Lambda_{t}(D_{\muathbb N})=[t]$ and thus $$ \lambda} \def\La{\Lambdaimsup_{t\tauo \infty} \varphirac{\lambda} \def\La{\Lambdaog \Lambda_t (D_{\muathbb N})}{\lambda} \def\La{\Lambdaog t}=\lambda} \def\La{\Lambdaim_{t\tauo \infty} \varphirac{\lambda} \def\La{\Lambdaog ([t])}{\lambda} \def\La{\Lambdaog t }=1. $$ Now by applying Proposition \rhoef{tensorProductTriple} we are done. \end{proof} The next result has to do with the case of crossed products with respect to endomorphisms. \betaegin{theorem} \lambda} \def\La{\Lambdaabel{triple-cross-prod-N} Let ${\muathcal A}$ be a unital $C^*$-algebra, $\alpha\in \End(A)$ an injective, unital $*$-endomorphism, ${\muathcal A}_\infty=\varinjlim {\muathcal A}$ the inductive limit described in \eqref{eq:CstarIndLim1}, and $({\muathcal L}_\infty,{\muathcal H}_\infty,D_{\infty};{\muathcal A}m_\infty,\tau_\infty)$ a semifinite spectral triple of dimension $p$ on ${\muathcal A}_\infty$. If the morphism $\alpha_\infty\in {\rhom Aut}({\muathcal A}_\infty)$ is Lip-semibounded, then there exists a semifinite spectral triple $({\muathcal L}_\rhotimes,{\muathcal H}_\rhotimes,D_\rhotimes;{\muathcal A}m_\rhotimes,\tau_\rhotimes)$ of dimension $p+1$ on the crossed product $C^*$-algebra ${\muathcal A}\rhotimes_\alpha {\muathbb N}$. \end{theorem} \betaegin{proof} Note that ${\muathcal A}\rhotimes_\alpha {\muathbb N}={\muathcal A}_\infty\rhotimes_{\alpha_\infty} {\muathbb N}$. Now the claim follows by applying the previous proposition. \end{proof} \epsilonction[Spectral triples for crossed products]{Spectral triples for crossed products generated by self-coverings} In this section we exhibit some examples of semifinite spectral triples for crossed products with respect to an endomorphism: the self-covering of a $p$-torus, the self-covering of the rational rotation algebra, the endomorphism UHF algebra given by the shift, and the self-covering of the Sierpi\'nski gasket. In this paper we consider two pictures of the inductive limits. One is what we call the Cuntz picture. The other one deals with an increasing sequence of algebras ${\muathcal A}_i$ with the morphisms $\varphi_i: {\muathcal A}_i\tauo {\muathcal A}_{i+1}$ being the inclusions, which entails that the morphisms $\alphalpha_i: {\muathcal A}_i\tauo{\muathcal A}_i$ are injective. The following result gives a more detailed description of the second picture. \betaegin{proposition}\lambda} \def\La{\Lambdaabel{prop41} Given a family of algebras $\{{\muathcal A}_i\}_{i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1}$, a morphism $\alpha_1: {\muathcal A}_1\tauo{\muathcal A}_1$, a collection of isomorphisms $\beta_i: {\muathcal A}_i\tauo{\muathcal A}_{i+1}$ for all $i\in {\muathbb N}$, one can obtain the following commuting diagram \betaegin{equation*} \xiymatrix{ & {\muathcal A}_1 \alphar[rr]^{ \varphi_1 } \alphar[dd]^{\alpha_1} && {\muathcal A}_2 \alphar[rr]^{ \varphi_2 } \alphar[dd]^{\alpha_2} && {\muathcal A}_3 \alphar[rr]^{ \varphi_3 } \alphar[dd]^{\alpha_3} && {\muathcal D}ots \alphar[r] & {\muathcal A}_\infty \alphar[dd]^{\alpha_\infty} \\ &&&&&&&&\\ & {\muathcal A}_1 \alphar[rr]^{ \varphi_1 } \alphar[uurr]^{\beta_1} && {\muathcal A}_2 \alphar[rr]^{ \varphi_2 } \alphar[uurr]^{\beta_2} && {\muathcal A}_3 \alphar[rr]^{ \varphi_3 } \alphar[uurr]^{\beta_3} && {\muathcal D}ots \alphar[r] & {\muathcal A}_\infty } \end{equation*} where the morphisms $\alpha_i: {\muathcal A}_i\tauo{\muathcal A}_i$ are defined by the formula $\alpha_i :=\beta_{i-1}\circ \alpha_{i-1}\circ \beta_{i-1}^{-1}$ for $i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2$, $\varphi_1 := \betaeta_1\circ \alpha_1$, $\varphi_i := \alpha_{i+1}\circ \beta_i =\beta_i\circ \alpha_i$ for $i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 2$. Moreover, the morphisms $\{\varphi_i\}_{i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1}$ give rise to an inductive limit that we denote by ${\muathcal A}_\infty$ and the former morphisms $\{\alpha_i\}_{i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1}$ and $\{\beta_i\}_{i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1}$ induce morphisms $\alpha_\infty, \beta_\infty: {\muathcal A}_\infty\tauo{\muathcal A}_\infty$ that are inverses of each other. \end{proposition} \betaegin{proof} The first part of the statement, namely the one concerning the commuting diagram, follows by direct computations. Now we take care of the second part concerning the morphisms $\alpha_\infty$ and $\beta_\infty$. We observe that \betaegin{align*} \alpha_\infty(f_1, f_2, \lambda} \def\La{\Lambdadots) & = (\alpha_1(f_1), \alpha_{2}(f_2),\lambda} \def\La{\Lambdadots)\\ \beta_\infty(f_1, f_2, \lambda} \def\La{\Lambdadots) & = (0,\beta_{1}(f_1), \beta_{2} (f_2),\lambda} \def\La{\Lambdadots) \end{align*} for all $(f_1, f_2,\lambda} \def\La{\Lambdadots)\in{\muathcal A}_\infty$. On the one hand, we have that \betaegin{align*} \alpha_\infty\circ \beta_\infty(f_1, f_2, \lambda} \def\La{\Lambdadots) & = \alpha_\infty(0,\beta_{1}(f_1), \beta_{2} (f_2),\lambda} \def\La{\Lambdadots)\\ & = (0,\alpha_2\circ\beta_{1}(f_1), \alpha_3\circ\beta_{2} (f_2),\lambda} \def\La{\Lambdadots)\; . \end{align*} On the other hand, we have that \betaegin{align*} \beta_\infty\circ\alpha_\infty (f_1, f_2, \lambda} \def\La{\Lambdadots) & = \beta_\infty(\alpha_1(f_1), \alpha_{2}(f_2),\lambda} \def\La{\Lambdadots)\\ & = (0,\beta_1\circ \alpha_1(f_1), \beta_2\circ\alpha_{2}(f_2),\lambda} \def\La{\Lambdadots)\; . \end{align*} Since $ \alpha_{i+1}\circ \beta_i =\beta_i\circ \alpha_i$ we are done. \end{proof} \betaegin{notation}\lambda} \def\La{\Lambdaabel{notazioneBs} Before the discussion of the examples, we introduce some notation. We will consider an invertible matrix $B\in M_p({\muathbb Z})$ and we will set $A:=(B^T)^{-1}$. The following exact sequence will play a role in the definition of some of the Dirac operators $$ 0\tauo {\muathbb Z}^p\tauo A{\muathbb Z}^p\tauo \widehat{{\muathbb Z}_B}:= A{\muathbb Z}^p/{\muathbb Z}^p\tauo 0\; . $$ Moreover, we will consider a section $s: \widehat{{\muathbb Z}_B}\tauo A{\muathbb Z}^p$ such that $s({\muathcal D}ot)\in [0,1)^p$. We set $s_h(x):=A^{h-1}s(x)$ as in \cite{AiGuIs01}, p. 1387-1388. Note that $|\widehat{{\muathbb Z}_B}|=|\delta} \def\D{\Deltaet(B)|=:r$. \end{notation} \muedskip \varsigmagma} \def\S{\Sigmaubsection{The crossed product for the self-coverings of the $p$-torus} We begin with the case of tori. The $p$-torus ${\muathbb T}^p :=\muathbb{R}^p/\muathbb{Z}^p$ can be endowed with a Dirac operator acting on the Hilbert space ${\muathcal H}_0 := {\muathbb C}^{2^{[p/2]}} \otimes L^2({\muathbb T}^p,dm)$ $$ D_0 :=-i\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a, $$ where the matrices $\varepsilon_a= (\varepsilon_a)^*\in M_{2^{[p/2]}}({\muathbb C})$, $\varepsilon_a \varepsilon_b + \varepsilon_b \varepsilon_a=2 \delta} \def\D{\Deltaelta_{a,b}$, furnish a representation of the Clifford algebra for the $p$-torus (see \cite{Spin} for more information on Dirac operators). Then, we may consider the following spectral triple $$ ({\muathcal L}_0 :=C^1({\muathbb T}^p), {\muathcal H}_0, D_0). $$ We recall that the spectral triple considered for the torus is even precisely when $p$ is even. With the above notation and $B\in M_p({\muathbb Z})$, let $\pii:t\in{\muathbb T}^p\muapsto Bt\in{\muathbb T}^p$ be the self-covering, $\alpha(f)(t)=f(Bt)$ the associated endomorphism of ${\muathcal A} =C({\muathbb T}^p)$. Then we consider the inductive system \eqref{eq:CstarIndLim1} and construct the inductive limit ${\muathcal A}_\infty=\delta} \def\D{\Deltaisplaystyle\varinjlim{\muathcal A}_n$. An alternative description is given by the following isomorphic inductive family: ${\muathcal A}_n$ consists of continuous $B^n{\muathbb Z}^p$-periodic functions on ${\muathbb R}^p$, and the embedding is the inclusion. In the following we denote by ${\muathbb T}_n$ the $p$-torus ${\muathbb R}^p/B^n{\muathbb Z}^p$. Assume now that $B$ is purely expanding, namely $\|B^n v\|$ goes to infinity for all vectors $v\nueq 0$, hence $\|A\|<1$, where $A=(B^T)^{-1}$. In \cite{AiGuIs01}, we produced a semifinite spectral triple on ${\muathcal A}_\infty=\varinjlim C({\muathbb T}_n)$. More precisely, we constructed a Dirac operator\varphiootnote{The symbol $s_h({\muathcal D}ot)$ denotes the section defined in Notation \rhoef{notazioneBs}.} $D_\infty$ acting on ${\muathcal H}_\infty:= {\muathbb C}^{2^{[p/2]}} \otimes L^2({\muathbb T}^p,dm)\otimes L^2({\muathcal A}r,\tauau)$ $$ D_\infty := D_0 \otimes I - 2\pii \varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes I \otimes {\muathbb I}gg( \varsigmagma} \def\S{\Sigmaum_{h=1}^\infty I^{\otimes h-1} \otimes \delta} \def\D{\Deltaiag(s_{h}({\muathcal D}ot)^a) {\muathbb I}gg), $$ the algebra ${\muathcal L}_\infty := {\muathcal U}p_{n\in{\muathbb N}} C^1({\muathbb T}_n)\varsigmagma} \def\S{\Sigmaubset {\muathcal A}_\infty$ embeds into the injective limit $$ \varinjlim {\muathcal B}({\muathcal H}_0)\otimes M_{{\muolt}^n}({\muathbb C}) = {\muathcal B}({\muathbb C}^{2^{[p/2]}} \otimes L^2({\muathbb T}^p,dm)) \otimes \muathrm{UHF}_r\; , $$ where $\muathrm{UHF}_r$ denotes the infinite tensor product of $M_r({\muathbb C})$, see Section 4.3 for more details. The C$^*$-algebra ${\muathcal B}({\muathbb C}^{2^{[p/2]}} \otimes L^2({\muathbb T}^p,dm)) \otimes \muathrm{UHF}_r$ in turn embeds into ${\muathcal A}m_\infty := {\muathcal B}({\muathbb C}^{2^{[p/2]}} \otimes L^2({\muathbb T}^p,dm)) \otimes {\muathcal A}r$, where ${\muathcal A}r$ denotes the unique injective type II$_1$ factor obtained as the weak closure of the UHF algebra in the GNS representation of the unital trace, and we denote by $\tau_\infty := Tr\otimes\tau_{\muathcal A}r$ the trace on ${\muathcal A}m_\infty$. Then $( {\muathcal L}_\infty, {\muathcal H}_\infty,D_\infty; {\muathcal A}m_\infty, \tau_\infty)$ is a finitely summable, semifinite, spectral triple on $\varinjlim {\muathcal A}_n$, with Hausdorff dimension $p$. \betaegin{theorem}\lambda} \def\La{\Lambdaabel{teo-p-toro} Under the above hypotheses and with the notation of the former section, $C({\muathbb T}^p)\rhotimes_\alpha{\muathbb N}$ can be endowed with the finitely summable semifinite spectral triple $({\muathcal L}_\rhotimes, {\muathcal H}_\rhotimes,D_\rhotimes; {\muathcal A}m_\rhotimes, \tau_\rhotimes)$ of Theorem \rhoef{triple-cross-prod-N}, with Hausdorff dimension $p+1$. \end{theorem} \betaegin{proof} In order to construct a spectral triple on $C({\muathbb T}^p)\rhotimes_\alphalpha \muathbb{N}$, according to Theorem \rhoef{triple-cross-prod-N}, we only need to check that $\alphalpha_\infty$ is Lip-semibounded, that is \betaegin{displaymath} \varsigmagma} \def\S{\Sigmaup \{\Arrowvert [D_\infty ,\alphalpha_\infty^{-n}(f)]\Arrowvert, n\in{\muathbb N}\}<\infty, \quad \varphiorall f\in {\muathcal L}_\infty = {\muathcal U}p_{n\in{\muathbb N}} C^1({\muathbb T}_n). \end{displaymath} Let $f\in C^1({\muathbb T}_k)$. As observed in \cite{AiGuIs01}, the seminorms $L_{D_\infty}$, $L_{D_1}$, $L_{D_2}$, \lambda} \def\La{\Lambdadots \; are compatible and we have that $$ \| [D_\infty ,\alphalpha_\infty^{-n}(f)] \| = \| [D_0,f\circ B^{-n}] \| $$ Moreover, by using the relation $\varepsilon_a \varepsilon_b + \varepsilon_b \varepsilon_a=2 \delta} \def\D{\Deltaelta_{a,b}$ we obtain the following equalities \betaegin{align*} \| [D_0,f] \|^2 & = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f) \rhoight\|^2\\ & = \lambda} \def\La{\Lambdaeft\| \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f)\rhoight)^* \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f)\rhoight)\rhoight\| \\ & = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p (\varepsilon_a)^2 \otimes |\piartial^a f|^2\rhoight\| = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p 1 \otimes |\piartial^a f|^2\rhoight\| \end{align*} Now we compute $\| [D_0,f\circ B^{-n}] \|$. Setting $X=B^{-n}$ for simplicity, we have that \betaegin{align*} \| [D_0,f\circ X] \|^2 & = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f\circ X) \rhoight\|^2 \\ & = \lambda} \def\La{\Lambdaeft\| \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f\circ X)\rhoight)^* \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \piartial^a(f\circ X)\rhoight)\rhoight\| \\ & = \lambda} \def\La{\Lambdaeft\| \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{a=1}^p \varepsilon_a \otimes \lambda} \def\La{\Lambdaeft( \varsigmagma} \def\S{\Sigmaum_{i=1}^p X_{a,i} (\piartial^i \overline{f})\circ X \rhoight) \rhoight) \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{b=1}^p \varepsilon_b \otimes \lambda} \def\La{\Lambdaeft( \varsigmagma} \def\S{\Sigmaum_{j=1}^p X_{b,j}(\piartial^j f)\circ X \rhoight) \rhoight)\rhoight\| \\ & = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p (\varepsilon_a)^2 \otimes \varsigmagma} \def\S{\Sigmaum_{i,j=1}^p X_{a,i} X_{a,j} (\piartial^i \overline{f})\circ X {\muathcal D}ot (\piartial^j f)\circ X \rhoight.\\ & \quad + \varsigmagma} \def\S{\Sigmaum_{a<b} \varepsilon_a \varepsilon_b \otimes \varsigmagma} \def\S{\Sigmaum_{i,j=1}^p X_{a,i} X_{b,j} (\piartial^i \overline{f})\circ X {\muathcal D}ot (\piartial^j f)\circ X \\ & \quad \lambda} \def\La{\Lambdaeft. + \varsigmagma} \def\S{\Sigmaum_{a>b} \varepsilon_a \varepsilon_b \otimes \varsigmagma} \def\S{\Sigmaum_{i,j=1}^p X_{a,i} X_{b,j} (\piartial^i \overline{f})\circ X {\muathcal D}ot (\piartial^j f)\circ X \rhoight\| \\ & = \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p 1 \otimes \lambda} \def\La{\Lambdaeft( \varsigmagma} \def\S{\Sigmaum_{i,j=1}^p X_{a,i} X_{a,j} (\piartial^i \overline{f})\circ X {\muathcal D}ot (\piartial^j f)\circ X \rhoight) \rhoight\| \\ & = \| {\muathbb I}g( (\nuabla f)\circ X, X^* X (\nuabla f)\circ X {\muathbb I}g)\| \\ & \lambda} \def\La{\Lambdaeq \| X^* X \| \lambda} \def\La{\Lambdaeft\| \varsigmagma} \def\S{\Sigmaum_{a=1}^p 1 \otimes (\piartial^a f)^2\rhoight\| = \| X\|^2 \| [D,f] \|^2 \; . \end{align*} These computations and the hypothesis on $B$ being purely expanding (cf. Proposition 2.6 in \cite{AiGuIs01}) imply that \betaegin{displaymath} \varsigmagma} \def\S{\Sigmaup\{\Arrowvert [D_\infty ,\alphalpha_\infty^{-n}(f)]\Arrowvert, n\in{\muathbb N}\}\lambda} \def\La{\Lambdaeq\varsigmagma} \def\S{\Sigmaup\{ \Arrowvert B^{-n}\Arrowvert \Arrowvert [D_\infty,f]\Arrowvert, n\in{\muathbb N}\}<\infty\; . \end{displaymath} \end{proof} \muedskip \varsigmagma} \def\S{\Sigmaubsection{The crossed product for the self-coverings of the rational rotation algebra} The present example is associated with a regular noncommutative self-covering with finite abelian group of deck transformations \cite{AiGuIs01}. \betaegin{definition} \lambda} \def\La{\Lambdaabel{def-reg-cov} A finite (noncommutative) covering with abelian group is an inclusion of (unital) $C^*$-algebras ${\muathcal A}\varsigmagma} \def\S{\Sigmaubset {\muathcal B}$ together with an action of a finite abelian group $\Gamma$ on ${\muathcal B}$ such that ${\muathcal A}={\muathcal B}^\Gamma$. We will say that ${\muathcal B}$ is a covering of ${\muathcal A}$ with deck transformations given by the group $\Gamma$. \end{definition} We are now going to give a description of the rational rotation algebra making small modifications to the description of $A_\vartheta} \def\Th{\Thetaeta$, $\vartheta} \def\Th{\Thetaeta=p/q\in\muathbb{Q}$, seen in \cite{BEEK}. We observe that $A_\vartheta} \def\Th{\Thetaeta$ reduces to $C({\muathbb T}^2)$ in the case $\vartheta} \def\Th{\Thetaeta\in{\muathbb Z}$. Consider the following matrices \betaegin{eqnarray*} (U_0)_{hk} = \delta} \def\D{\Deltaelta_{h,k}e^{2\pii i(k-1) \vartheta} \def\Th{\Thetaeta }, \quad (V_0)_{hk} = \delta} \def\D{\Deltaelta_{h+1,k} +\delta} \def\D{\Deltaelta_{h,q}\delta} \def\D{\Deltaelta_{k,1} \in M_q(\muathbb{C}) \end{eqnarray*} and $W_0(n) := U_0^{n_1}V_0^{n_2}$, for all $n=(n_1,n_2)\in{\muathbb Z}^2$. Let $p',p''\in{\muathbb N}$, $p',p''<q$, be such that $pp'+1=n'q$, $pp''-1=n''q$, for some $n',n''\in{\muathbb N}$, and introduce $P := \betaegin{pmatrix} 0 & p' \\ p'' & 0 \end{pmatrix} $, and $$ \widetilde{\gamma} \delta} \def\D{\Deltaef\G{\Gamma}_n(f)(t):=\tauextrm{ad}(W_0(P n))[f(t+n)]=V_0^{-p''n_1}U_0^{-p'n_2}f(t+n)U_0^{p'n_2}V_0^{p''n_1}, $$ for all $t\in{\muathbb R}^2$, $n\in{\muathbb Z}^2$. We have the following description of $A_\vartheta} \def\Th{\Thetaeta$ (cf. \cite{BEEK}) $$ A_\vartheta} \def\Th{\Thetaeta=\{f\in C(\muathbb{R}^2, M_q(\muathbb{C})) \, : \, f = \widetilde{\gamma} \delta} \def\D{\Deltaef\G{\Gamma}_{n}(f), n\in{\muathbb Z}^2 \}. $$ This algebra comes with a natural trace $$ \tauau(f):= \varphirac{1}{q}\int_{{\muathbb T}_0} \taur(f(t))dt, $$ where we are considering the Haar measure on ${\muathbb T}_0:={\muathbb R}^2/B{\muathbb Z}^2$ and $\taur(A)=\varsigmagma} \def\S{\Sigmaum_i a_{ii}$. We observe that the function $\taur(f(t))$ is ${\muathbb Z}^2$-periodic. Define \betaegin{align*} U(t_1,t_2)&:=e^{-2\pii i t_1/q} U_0\\ V(t_1,t_2)&:=e^{-2\pii i t_2/q} V_0 \end{align*} and \betaegin{displaymath} {\muathcal L}_\vartheta} \def\Th{\Thetaeta :=\lambda} \def\La{\Lambdaeft\{\varsigmagma} \def\S{\Sigmaum_{r,s}a_{rs}U^rV^s : (a_{rs})\in S(\muathbb{Z}^2) \rhoight\}, \end{displaymath} where $S(\muathbb{Z}^2)$ is the set of rapidly decreasing sequences. It is clear that the derivations $\piartial_1$ and $\piartial_2$, defined as follows on the generators, extend to ${\muathcal L}_\vartheta} \def\Th{\Thetaeta$ \betaegin{eqnarray*} \piartial_1(U^hV^k)&=&2\pii ihU^hV^k\\ \piartial_2(U^hV^k)&=&2\pii ikU^hV^k. \end{eqnarray*} Moreover, the above derivations extend to densely defined derivations both on $A_\vartheta} \def\Th{\Thetaeta$ and $L^2(A_\vartheta} \def\Th{\Thetaeta,\tauau)$. We still denote these extensions with the same symbols. We may consider the following spectral triple (see \cite{GBFV}) \betaegin{eqnarray*} ({\muathcal L}_0:={\muathcal L}_\vartheta} \def\Th{\Thetaeta , {\muathcal H}_0:={\muathbb C}^{2} \otimes L^2(A_\vartheta} \def\Th{\Thetaeta,\tauau), D_0 :=-i(\varepsilon_1 \otimes \piartial_1+\varepsilon_2 \otimes \piartial_2)), \end{eqnarray*} where $\varepsilon_1, \varepsilon_2$ denote the Pauli matrices. Given the integer-valued matrix $B\in M_2({\muathbb Z})$ such that $\delta} \def\D{\Deltaet(B)\equiv_q 1$, there is an associated endomorphism $\alphalpha: A_\vartheta} \def\Th{\Thetaeta\tauo A_\vartheta} \def\Th{\Thetaeta$ defined by $\alpha(f)(t)=f(Bt)$, \cite{Stacey}. Then, we consider the inductive limit ${\muathcal A}_\infty=\delta} \def\D{\Deltaisplaystyle\varinjlim{\muathcal A}_n$ as in \eqref{eq:CstarIndLim1}. As in the case of the torus one can consider the following isomorphic inductive family: ${\muathcal A}_n$ consists of continuous $B^n{\muathbb Z}^2$-invariant matrix-valued functions on ${\muathbb R}^2$, i.e $$ {\muathcal A}_n:=\{f\in C(\muathbb{R}^2, M_q(\muathbb{C})) \, : \, f =\widetilde{\gamma} \delta} \def\D{\Deltaef\G{\Gamma}_{B^n k}(f), k\in{\muathbb Z}^2 \}, $$ with trace $$ \tauau_n(f)=\varphirac{1}{q |\!\delta} \def\D{\Deltaet B^n|}\int_{{\muathbb T}_n}\taur(f(t))dt, $$ and the embedding is unital inclusion $\alphalpha_{n+1,n}: {\muathcal A}_n\hookrightarrow {\muathcal A}_{n+1}$. In particular, ${\muathcal A}_0={\muathcal A}$, and ${\muathcal A}_1={\muathcal B}$. This means that ${\muathcal A}_\infty$ may be considered as a solenoid $C^*$-algebra (cf. \cite{McCord}, \cite{LP2}). On the $n$-th noncommutative covering ${\muathcal A}_n$, the formula of the Dirac operator doesn't change and we can consider the following spectral triple \betaegin{eqnarray*} ({\muathcal L}_\vartheta} \def\Th{\Thetaeta^{(n)} , {\muathbb C}^{2} \otimes L^2({\muathcal A}_n,\tauau), D=-i(\varepsilon_1 \otimes \piartial_1 + \varepsilon_2 \otimes \piartial_2)). \end{eqnarray*} In \cite{AiGuIs01}, we produced a semifinite spectral triple on ${\muathcal A}_\infty=\varinjlim {\muathcal A}_n$. More precisely, we constructed a Dirac operator $D_\infty$ acting on ${\muathcal H}_\infty:= {\muathbb C}^{2} \otimes L^2({\muathcal A}_0,\tauau_0)\otimes L^2({\muathcal A}r,\tauau)$ \[ D_\infty := D_0 \otimes I - 2\pii \varsigmagma} \def\S{\Sigmaum_{a=1}^2 \varepsilon_a \otimes I \otimes {\muathbb I}gg( \varsigmagma} \def\S{\Sigmaum_{h=1}^\infty I^{\otimes h-1} \otimes \delta} \def\D{\Deltaiag(s_{h}({\muathcal D}ot)^a) {\muathbb I}gg), \] the algebra ${\muathcal A}_\infty$ embeds into the injective limit $$ \varinjlim {\muathcal B}({\muathbb C}^{2} \otimes L^2({\muathcal A}_0,\tauau_0))\otimes M_{{\muolt}^n}({\muathbb C}) = {\muathcal B}({\muathbb C}^{2} \otimes L^2({\muathcal A}_0,\tauau_0)) \otimes \muathrm{UHF}_{\muolt} $$ which in turn embeds into ${\muathcal A}m_\infty := {\muathcal B}({\muathbb C}^{2} \otimes L^2({\muathcal A}_0,\tauau_0)) \otimes {\muathcal A}r$, which is endowed with the trace $\tau_\infty := Tr\otimes\tau_{\muathcal A}r$. Then $({\muathcal L}_\infty, {\muathcal H}_\infty,D_\infty; {\muathcal A}m_\infty, \tau_\infty)$ is a finitely summable, semifinite, spectral triple on $\varinjlim {\muathcal A}_n$, with Hausdorff dimension $2$ (\cite{AiGuIs01}, Theorem 3.7). \betaegin{theorem} Under the above hypotheses and with the notation of the former section, $A_\vartheta} \def\Th{\Thetaeta\rhotimes_\alpha{\muathbb N}$ can be endowed with the finitely summable semifinite spectral triple $({\muathcal L}_\rhotimes, {\muathcal H}_\rhotimes,D_\rhotimes; {\muathcal A}m_\rhotimes, \tau_\rhotimes)$ of Theorem \rhoef{triple-cross-prod-N}, with Hausdorff dimension $3$. \end{theorem} \betaegin{proof} According to Theorem \rhoef{triple-cross-prod-N} we only need to check that $\alphalpha_\infty$ is Lip-semibounded, that is $$ \varsigmagma} \def\S{\Sigmaup\{\Arrowvert [D_\infty,\alphalpha_\infty^{-n}(f)]\Arrowvert, n\in{\muathbb N}\} < \infty, \quad \varphiorall f\in {\muathcal L}_\infty \; . $$ This is true because similar computations to those in the proof of Theorem \rhoef{teo-p-toro} yield \betaegin{align*} \varsigmagma} \def\S{\Sigmaup\{\Arrowvert [D_\infty,\alphalpha_\infty^{-n}(f)]\Arrowvert, n\in{\muathbb N}\} \lambda} \def\La{\Lambdaeq \varsigmagma} \def\S{\Sigmaup\{ \| B^{-n}\| \Arrowvert [D_\infty,f]\|, n\in{\muathbb N}\} \end{align*} The hypothesis of $B$ being purely expanding ensures that $\varsigmagma} \def\S{\Sigmaup\{\Arrowvert [D_\infty,\alphalpha_\infty^{-n}(f)]\Arrowvert, n\in{\muathbb N}\}$ is finite. \end{proof} \muedskip \varsigmagma} \def\S{\Sigmaubsection{The crossed product for the shift-endomorphism of the UHF-algebra}\lambda} \def\La{\Lambdaabel{UHF} Consider now the case of the UHF-algebra. This algebra is defined as the inductive limit of the following sequence of finite dimensional matrix algebras: \betaegin{eqnarray*} M_0 & = & M_\muolt(\muathbb{C})\\ M_n & = & M_{n-1}\otimes M_\muolt(\muathbb{C}) \quad n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1, \end{eqnarray*} with maps $\pihi_{ij}: M_j\tauo M_i$ given by $\pihi_{ij}(a_j)=a_j\otimes 1$. We denote by ${\muathcal A}$ the $C^*$-algebra UHF$_r$ and set $M_{-1}=\muathbb{C}1_{\muathcal A}$ in the inductive limit defining the above algebra. The $C^*$-algebra ${\muathcal A}$ has a unique normalized trace that we denote by $\tauau$. Consider the projection $P_n:L^2({\muathcal A},\tauau)\tauo L^2(M_n,{\rhom Tr})$, where ${\rhom Tr}: M_n \tauo \muathbb{C}$ is the normalized trace, and define \betaegin{eqnarray*} Q_n&:=& P_n-P_{n-1}, \quad n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0,\\ E(x)&:=&\tauau(x)1_{\muathcal A}\,. \end{eqnarray*} For any $s>1$, Christensen and Ivan \cite{Chris} defined the following spectral triple for the algebra UHF$_\muolt$ \betaegin{eqnarray*} ({\muathcal L}_0, L^2({\muathcal A},\tauau),D_0=\varsigmagma} \def\S{\Sigmaum_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0} \muolt^{ns}Q_n ) \end{eqnarray*} where ${\muathcal L}_0$ is the algebra consisting of the elements of ${\muathcal A}$ with bounded commutator with $D_0$. It was proved that for any such value of the parameter $s$, this spectral triple induces a metric which defines a topology equivalent to the weak$^*$-topology on the state space (\cite[Theorem 3.1]{Chris}). We consider the endomorphism of ${\muathcal A}$ given by the right shift, $\alpha(x)=1\otimes x$. Then as in \eqref{eq:CstarIndLim1} we may consider the inductive limit ${\muathcal A}_\infty=\delta} \def\D{\Deltaisplaystyle\varinjlim{\muathcal A}_n$. As in the previous sections, we have the following isomorphic inductive family: ${\muathcal A}_i$ is defined as \betaegin{eqnarray*} {\muathcal A}_0&=& {\muathcal A};\\ {\muathcal A}_n &=& M_\muolt(\muathbb{C})^{\otimes n} \otimes {\muathcal A}_0;\\ {\muathcal A}_\infty &=& \varinjlim {\muathcal A}_i \end{eqnarray*} and the embedding is the inclusion. It is easy to see that ${\muathcal A}_\infty$ is again the UHF-algebra of the same type, since the corresponding supernatural number is the same. In \cite{AiGuIs01}, we produced a semifinite spectral triple on $\varinjlim {\muathcal A}_n$. More precisely, we defined the following Dirac operator acting on ${\muathcal H}_\infty := L^2({\muathcal A}r, \tauau)\otimes L^2({\muathcal A}_0, \tauau)$ \betaegin{equation} D_{\infty}=I_{-\infty,-1} \otimes D_0+\varsigmagma} \def\S{\Sigmaum_{k=1}^\infty \muolt^{-sk} I_{-\infty,-k-1}\otimes F\otimes E, \end{equation} where $I_{-\infty,k}$ is the identity on the factors with indices in $[-\infty, k]$, $F: M_r({\muathbb C})\tauo M_r({\muathbb C})$ is defined as $F(x):=x-\taur(x)1$ for $x\in M_r({\muathbb C})$, and the algebra ${\muathcal A}_\infty$ embeds in the injective limit $$ \varinjlim {\muathcal B}(L^2({\muathcal A}_0,\tauau)) \otimes M_{{\muolt}^n}({\muathbb C})={\muathcal B}(L^2({\muathcal A}_0,\tauau))\otimes \muathrm{UHF}_r $$ Set ${\muathcal L}_\infty = {\muathcal U}p_n{\muathcal L}_n$, ${\muathcal A}m_\infty = {\muathcal A}r \otimes {\muathcal B}(L^2({\muathcal A}_{0},\tauau))$, $\tau_\infty:=\tau_{\muathcal A}r\otimes Tr$. Then $({\muathcal L}_\infty,{\muathcal H}_\infty,D_{\infty}; {\muathcal A}m_\infty,\tau_\infty)$ is a finitely summable, semifinite, spectral triple, with Hausdorff dimension $2/s$ (\cite{AiGuIs01}, Theorem 5.6). \betaegin{theorem}\lambda} \def\La{\Lambdaabel{UHFcrossedprod} Under the above hypotheses and the notation of the former section, ${\rhom UHF}_r\rhotimes_\alphalpha {\muathbb N}$ can be endowed with the finitely summable semifinite spectral triple $({\muathcal L}_\rhotimes, {\muathcal H}_\rhotimes,D_\rhotimes; {\muathcal A}m_\rhotimes,\tau_\rhotimes)$ of Theorem \rhoef{triple-cross-prod-N}, with Hausdorff dimension $1+2/s$. \end{theorem} \betaegin{proof} According to Theorem \rhoef{triple-cross-prod-N}, in order to construct a spectral triple on ${\muathcal A} \rhotimes_\alpha{\muathbb N}$ we only need to check that $\alphalpha_\infty$ is Lip-semibounded, that is \betaegin{displaymath} \varsigmagma} \def\S{\Sigmaup\{\Arrowvert [D_\infty,\alphalpha_\infty^{-k}(f)]\Arrowvert, k\in{\muathbb N}\}< \infty, \quad \varphiorall f\in {\muathcal L}_\infty. \end{displaymath} This is true because \betaegin{align*} \Arrowvert [D_\infty,\alphalpha_\infty^{-k}(f)]\Arrowvert = \muolt^{-ks}\Arrowvert [D_\infty,f]\Arrowvert. \end{align*} In fact, let $f=({\muathbb I}gotimes_{k=-\infty}^{-n-1}I)\otimes a\in {\muathcal A}_n$, $\alphalpha_\infty^{k}(f)=({\muathbb I}gotimes_{j=-\infty}^{-n+k-1}I)\otimes a\in {\muathcal A}_{n-k}$ for $k\in{\muathbb Z}$. The Hilbert space on which $D_\infty$ acts is the completion of ${\muathcal A}_\infty$. On this Hilbert space, we consider the right shift on the factors and we denote it by $U_\alphalpha$. We set $\Phi := \tauextrm{ad} (U_\alphalpha)$. Then we have that \betaegin{eqnarray*} [D_\infty,\alphalpha_\infty^{-k}(f)] &=& \varsigmagma} \def\S{\Sigmaum_{h\in{\muathbb Z}} \muolt^{hs}[Q_h,\lambda} \def\La{\Lambdaeft({\muathbb I}gotimes_{j=-\infty}^{-n-k-1}I\rhoight)\otimes a]\\ &=& \Phi^{-k} \lambda} \def\La{\Lambdaeft(\varsigmagma} \def\S{\Sigmaum_{h\in{\muathbb Z}} \muolt^{hs}[Q_{k+h},\lambda} \def\La{\Lambdaeft({\muathbb I}gotimes_{j=-\infty}^{n-1}I\rhoight)\otimes a]\rhoight)\\ &=& \muolt^{-ks} \Phi^{-k}([D_\infty,f]) \end{eqnarray*} where we used that $\Phi(Q_h)=Q_{h+1}$ and $\Phi\upsilonpharpoonright_{{\muathcal A}_\infty} = \alphalpha_\infty$. \end{proof} In the theorem above, we considered the $C^*$-algebra ${\rhom UHF}_r\rhotimes_\alphalpha {\muathbb N}$. We note that the crossed product of the UHF of type $2^\infty$ under the action of the bilateral shift, namely the $C^*$-algebra ${\rhom UHF}_2 \rhotimes_\alphalpha {\muathbb Z}$, was studied in \cite{BKRS}. \muedskip \varsigmagma} \def\S{\Sigmaubsection{The crossed product for the self-coverings of the Sierpi\'nski gasket} We conclude this paper with the case of a self-covering of the Sierpi\'nski gasket that was studied by the authors in \cite{AGI3}. The Sierpi\'nski gasket is the self-similar fractal determined by $3$ similarities with scaling parameter 1/2 centered in the vertices $v_0=(0,0)$, $v_1=(1/2,\varsigmagma} \def\S{\Sigmaqrt{3}/2)$, $v_2=(1,0)$, namely the non-empty, compact set $K$, such that $$ K={\muathbb I}gcup_{j=0,1,2}w_j(K), $$ where $w_j$ is the dilation around $v_j$ with contraction parameter $1/2$ (see Figure \rhoef{fig:covering}). Denote by $V_0(K)$ the set $\{v_0, v_1, v_2\}$, and let $E_0(K):=\{ (p,q) : p,q\in V_0, p\nueq q\}$. We call an element of the family $\{w_{i_1} \circ \delta} \def\D{\Deltaots \circ w_{i_k}(K):k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0\}$ a {\it cell}, and call its diameter the size of the cell. We call an element of the family $E(K)=\{w_{i_1} \circ \delta} \def\D{\Deltaots \circ w_{i_k}(e):k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq0, e\in E_{0}(K)\}$ an {\it (oriented) edge} of $K$ and we denote by $e^-$ (resp. $e^+$) the source (resp. the target) of the oriented edge $e$. Note that a cell $C:=w_{i_1} \circ \delta} \def\D{\Deltaots \circ w_{i_k}(K)$ has $\varsigmagma} \def\S{\Sigmaize(C)=2^{-k}$ and, if $e_0\in E_{0}(K)$, then $e=w_{i_1} \circ \delta} \def\D{\Deltaots \circ w_{i_k}(e_0)$ has length $2^{-k}$. In the following we shall consider $K_0:=K$, $E_0:=E(K)$, $K_n:=w_0^{-n}K_0$. Let us now consider the middle point $x_{i,i+1}$ of the segment $(w_0^{-1}v_i,w_0^{-1}v_{i+1})$, $i=0,1,2$, the map $R_{i+1,i}:w_0^{-1}w_{i}K\tauo w_0^{-1}w_{i+1}K$ consisting of the rotation of $\varphirac43\pii$ around the point $x_{i,i+1}$, $i=0,1,2$. We then construct the coverings $p:K_1\tauo K$ and $\pihi: K\tauo K$ given by $$ p(x)= \betaegin{cases} x,&x\in K,\\ R_{0,1}(x),&x\in w_0^{-1}w_1 K,\\ R_{0,2}(x),&x\in w_0^{-1}w_2 K, \end{cases} $$ and \betaegin{displaymath} \pihi(x)=\lambda} \def\La{\Lambdaeft\{ \betaegin{array}{ll} w_0^{-1}x & \tauextrm{if $x\in C_0$} \\ R_{0,1}(w_0^{-1}(x)) & \tauextrm{if $x\in C_1$}\\ R_{0,2}(w_0^{-1}(x)) & \tauextrm{if $x\in C_2$}\\ \end{array} \rhoight.\\ \end{displaymath} Note that $p(x)=\pihi(w_0(x))$ for all $x\in K_1$ (see Figure \rhoef{fig:covering}). \betaegin{figure} \varsigmagma} \def\S{\Sigmacalebox{1.75}{\delta} \def\D{\Deltaef\taurianglewidth{2cm} \pigfdeclarelindenmayersystem{Sierpinski triangle}{ \varsigmagma} \def\S{\Sigmaymbol{X}{\pigflsystemdrawforward} \varsigmagma} \def\S{\Sigmaymbol{Y}{\pigflsystemdrawforward} \rhoule{X -> X-Y+X+Y-X} \rhoule{Y -> YY} } \varphioreach \lambda} \def\La{\Lambdaevel in {6}{ \tauikzset{ l-system={step=\taurianglewidth/(2^\lambda} \def\La{\Lambdaevel), order=\lambda} \def\La{\Lambdaevel, angle=-120} } \betaegin{tikzpicture} \varphiill [black] (0,0) -- ++(0:\taurianglewidth) -- ++(120:\taurianglewidth) -- cycle; \delta} \def\D{\Deltaraw [draw=none] (0,0) l-system [l-system={Sierpinski triangle, axiom=X},fill=white]; \nuode (bbb) at (-.05,-0.075) {$\varsigmagma} \def\S{\Sigmacalebox{.5}{$v_0$}$}; \nuode (bbb) at (1,1.85) {$\varsigmagma} \def\S{\Sigmacalebox{.5}{$v_2$}$}; \nuode (bbb) at (2.09,-.075) {$\varsigmagma} \def\S{\Sigmacalebox{.5}{$v_1$}$}; \nuode (bbb) at (0,-.87) {$\varsigmagma} \def\S{\Sigmacalebox{.5}{$\;$}$}; \end{tikzpicture} } } \varsigmagma} \def\S{\Sigmacalebox{3}{ \delta} \def\D{\Deltaef\taurianglewidth{2cm} \varphioreach \lambda} \def\La{\Lambdaevel in {6}{ \tauikzset{ l-system={step=\taurianglewidth/(2^\lambda} \def\La{\Lambdaevel), order=\lambda} \def\La{\Lambdaevel, angle=-120} } \betaegin{tikzpicture} \varphiill [black] (0,0) -- ++(0:\taurianglewidth) -- ++(120:\taurianglewidth) -- cycle; \delta} \def\D{\Deltaraw [draw=none] (0,0) l-system [l-system={Sierpinski triangle, axiom=X},fill=white]; \delta} \def\D{\Deltaraw[line width=0.05mm, <-] (.75,-.1) to[out=-90,in=-90] (1.25,-.1); \delta} \def\D{\Deltaraw[line width=0.05mm, <-] (0.25,.6) to[out=135,in=120] (.5,1); \nuode (bbb) at (-.05,-0.075) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$v_0$}$}; \nuode (bbb) at (.395,.87) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$x_{2,0}$}$}; \nuode (bbb) at (1.65,.87) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$x_{1,2}$}$}; \nuode (bbb) at (1,1.85) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$w_0^{-1}v_2$}$}; \nuode (bbb) at (1,-.075) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$x_{0,1}=v_1$}$}; \nuode (bbb) at (2.07,-.075) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$w_0^{-1}v_1$}$}; \nuode (bbb) at (0.1,.89) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$R_{0,2}$}$}; \nuode (bbb) at (1.1,-.35) {$\varsigmagma} \def\S{\Sigmacalebox{.25}{$R_{0,1}$}$}; \end{tikzpicture} } } \lambda} \def\La{\Lambdaabel{fig:covering} {\muathcal A}ption{The Sierpi\'nski gasket $K=K_0$ and the covering map $p_1=p: K_1\tauo K$.} \end{figure} Similarly, for every $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0$, we define a family of coverings $p_n: K_{n+1}\tauo K_{n}$ and $\pihi_n: K_n\tauo K_n$ by $p_{n+1} := w_0^{-n} \circ p \circ w_0^{n}$ and $\pihi_n := w_0^{-n} \circ \pihi \circ w_0^{n}$. \betaegin{proposition} The following diagrams are commutative \betaegin{equation*} \xiymatrix{ & K_0 && K_1 \alphar[ll]^{ p_1 } && K_2 \alphar[ll]^{ p_2 } && {\muathcal D}ots \alphar[ll]^{p_3} \\ &&&&&&&&\\ & K_0 \alphar[uu]^{ \pihi_0 } && K_1 \alphar[ll]^{ p_1 } \alphar[uu]^{ \pihi_1 } && K_2 \alphar[ll]^{ p_2 } \alphar[uu]^{ \pihi_2 } && {\muathcal D}ots \alphar[ll]^{ p_3 } } \end{equation*} \end{proposition} \betaegin{proof} Indeed, first note that $\pihi_0 \circ p_1=\pihi \circ p=p\circ \pihi_1$ and $w_0\circ \pihi_1=p_1$, which implies that $p_1\circ\pihi_1=\pihi_0\circ p_1$. Then, for any $n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 1$ we have \betaegin{align*} p_n \circ \pihi_n & = w_0^{-n+1} \circ p \circ w_0^{n-1} \circ w_0^{-n} \circ \pihi \circ w_0^n= w_0^{-n+1} \circ p \circ w_0^{-1} \circ \pihi \circ w_0 \circ w_0^{n-1} \\ & = w_0^{-n+1} \circ p_1 \circ \pihi_1 \circ w_0^{n-1} = w_0^{-n+1} \circ \pihi_0 \circ p_1 \circ w_0^{n-1} = \pihi_{n-1} \circ p_n. \end{align*} \end{proof} It follows that the maps $\{\pihi_n\}_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0}$ induce a map in the projective limit and by functoriality a map on $\varinjlim C(K_i)$ which we denote by $\alphalpha_\infty$. An element $f\in C(K_n)$ can be seen in $\varinjlim C(K_i)$ as the sequence $[f]=(0_{n}, f, f\circ p_{n+1}, f\circ p_{n, n+2}, \lambda} \def\La{\Lambdadots )$, where $p_{n, n+k}:= p_{n+1}\circ {\muathcal D}ots\circ p_{n+k}$. Accordingly the map $\alphalpha_\infty$ reads as $$ \alphalpha_\infty[f] := (0_{n}, f\circ \pihi_n, f\circ p_{n+1} \circ \pihi_{n+1}, f\circ p_{n, n+2} \circ \pihi_{n+2}, \lambda} \def\La{\Lambdadots ). $$ By functoriality each $(\pihi_n)^*: C(K_n)\tauo C(K_n)$ is a proper endomorphism, that is, it is injective, but not surjective. With the notation of Proposition \rhoef{prop41}, we set $\beta_i$ equal to $w_0^*$ for all $i\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0$. Thanks to Proposition 3.1, the map $\alphalpha_\infty$ is invertible and its inverse is given by $$ \alphalpha_\infty^{-1}[f] := (0_{n+1}, f\circ w_0, f\circ p_{n+1} \circ w_0, f\circ p_{n, n+2} \circ w_0, \lambda} \def\La{\Lambdadots ). $$ Denote by $E_n:=\{w_0^{-n}e, e\in E(K)\}$, $E_\infty:={\muathcal U}p_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0} E_n$, $E^n:=\{e\in E_\infty, \lambda} \def\La{\Lambdaength(e)=2^n\}$, $P^n$ the projection of $\ell_2(E_\infty)$ onto $\ell_2(E^n)$. It was shown in \cite[Sec. 6]{AGI3} that ${\muathcal A}_\infty:=\varinjlim C(K_n)$ supports a semifinite spectral triple $({\muathcal L}_\infty,{\muathcal H}_\infty, D_\infty; {\muathcal A}m_\infty, \tau_\infty)$, where ${\muathcal A}m_\infty:=\pii_\tau(B_\infty)''$ is a suitable closure of the geometric operators (see \cite[Sec. 5]{AGI3} for a precise definition), $D_\infty:=F|D|: \ell^2(E_\infty)\tauo \ell^2(E_\infty)$, $F$ is the orientation reversing operator on edges and $$ |D_\infty|:=\varsigmagma} \def\S{\Sigmaum_{n\in{\muathbb Z}}2^{-n}P^n. $$ \betaegin{theorem}\lambda} \def\La{\Lambdaabel{teo-gasket} Under the above hypotheses and with the notation of the former section, $C(K)\rhotimes_\alpha{\muathbb N}$ can be endowed with the finitely summable semifinite spectral triple $({\muathcal L}_\rhotimes, {\muathcal H}_\rhotimes,D_\rhotimes; {\muathcal A}m_\rhotimes, \tau_\rhotimes)$ of Theorem \rhoef{triple-cross-prod-N}, with Hausdorff dimension $\lambda} \def\La{\Lambdaog_23+1$. \end{theorem} \betaegin{proof} According to Theorem \rhoef{triple-cross-prod-N}, in order to construct a spectral triple on $C(K)\rhotimes_\alphalpha \muathbb{N}$ we only need to check that $\alphalpha_\infty$ is Lip-semibounded, that is $$ \varsigmagma} \def\S{\Sigmaup_{k\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0} \Arrowvert[D_\infty, \alphalpha_\infty^{-k}(f)]\Arrowvert <\infty, \quad \varphiorall f\in {\muathcal L}_\infty := {\muathcal U}p_{n\gamma} \delta} \def\D{\Deltaef\G{\Gammaeq 0}\ \muathrm{Lip}(K_n). $$ We are going to show that for any $f\in C(K_n)$ it holds that $$ \Arrowvert [D_\infty, \alphalpha_\infty^{-k}(f)] \Arrowvert =\varphirac{\Arrowvert [D_\infty, f]\Arrowvert}{2^k} \quad k\in \muathbb{N}. $$ Indeed, since both $p_n$ and $\pihi_n$ are isometries, we have that \betaegin{eqnarray*} \Arrowvert [D_\infty, \alphalpha_\infty^{-k}(f)] \Arrowvert &=& \lambda} \def\La{\Lambdaeft\Arrowvert \oplus_{e\in E_\infty} \varphirac{\alphalpha_\infty^{-k}(f)(e^+)-\alphalpha_\infty^{-k}(f)(e^-)}{l(e)} F \rhoight\Arrowvert \\ &=& \lambda} \def\La{\Lambdaeft\Arrowvert \oplus_{e\in E_\infty} \varphirac{f(w_0^k(e^+))-f(w_0^k(e^-))}{l(e)} F \rhoight\Arrowvert \\ &=& \lambda} \def\La{\Lambdaeft\Arrowvert \oplus_{e\in E_\infty} \varphirac{f(w_0^k(e^+))-f(w_0^k(e^-))}{2^k l(w_0^k(e))} F \rhoight\Arrowvert \\ &=& \lambda} \def\La{\Lambdaeft\Arrowvert \oplus_{e'\in E_\infty} \varphirac{f(e'^+)-f(e'^-)}{2^k l(e')} F\rhoight\Arrowvert \\ &=& \varphirac{\Arrowvert [D_\infty, f]\Arrowvert}{2^k}. \end{eqnarray*} \end{proof} \epsilonction*{Acknowledgement} We thank the referee for the attentive reading of this article and for useful suggestions. This work was supported by the following institutions: the ERC Advanced Grant 669240 QUEST "Quantum Algebraic Structures and Models", the MIUR PRIN ``Operator Algebras, Noncommutative Geometry and Applications'', the INdAM-CNRS GREFI GENCO, and the INdAM GNAMPA. V. A. acknowledges the support by the Swiss National Science foundation through the SNF project no. 178756 (Fibred links, L-space covers and algorithmic knot theory). D. G. and T. I. acknowledge the MIUR Excellence Department Project awarded to the Department of Mathematics, University of Rome Tor Vergata, CUP E83C18000100006. \betaegin{thebibliography}{10} {\muathbb I}bitem{AiGuIs01} V.~Aiello, D.~Guido, and T.~Isola. \nuewblock Spectral triples for noncommutative solenoidal spaces from self-coverings. \nuewblock {\em J. Math. Anal. Appl.}, 448(2):1378--1412, 2017. {\muathbb I}bitem{AGI3} V.~Aiello, D.~Guido, and T.~Isola. \nuewblock A spectral triple for a solenoid based on the {S}ierpinski gasket. \nuewblock {\em SIGMA Symmetry Integrability Geom. Methods Appl.}, 17(020):21, 2021. {\muathbb I}bitem{BMR} J.~Bellissard, M.~Marcolli, and K.~Reihani. \nuewblock A spectral triple for a solenoid based on the sierpinski gasket. \nuewblock arXiv:1008.4617, 2010. {\muathbb I}bitem{BEEK} O.~Bratteli, G.~A. Elliott, D.~E. Evans, and A.~Kishimoto. \nuewblock Noncommutative spheres. {II}. {R}ational rotations. \nuewblock {\em J. Operator Theory}, 27(1):53--85, 1992. {\muathbb I}bitem{BKRS} O.~Bratteli, E.~St{\o}rmer, A.~Kishimoto, and M.~R{\o}rdam. \nuewblock The crossed product of a {UHF} algebra by a shift. \nuewblock {\em Ergodic Theory Dynam. Systems}, 13(4):615--626, 1993. {\muathbb I}bitem{Chris} E.~Christensen and C.~Ivan. \nuewblock Spectral triples for {AF} {$C^*$}-algebras and metrics on the {C}antor set. \nuewblock {\em J. Operator Theory}, 56(1):17--46, 2006. {\muathbb I}bitem{Cuntz} J.~Cuntz. \nuewblock The internal structure of simple {$C^{\alphast} $}-algebras. \nuewblock In {\em Operator algebras and applications, {P}art {I} ({K}ingston, {O}nt., 1980)}, volume~38 of {\em Proc. Sympos. Pure Math.}, pages 85--115. Amer. Math. Soc., Providence, R.I., 1982. {\muathbb I}bitem{DGMW} R.~J. Deeley, M.~Goffeng, B.~Mesland, and M.~F. Whittaker. \nuewblock Wieler solenoids, {C}untz-{P}imsner algebras and {$K$}-theory. \nuewblock {\em Ergodic Theory Dynam. Systems}, 38(8):2942--2988, 2018. {\muathbb I}bitem{Exel} R.~Exel. \nuewblock A new look at the crossed-product of a {$C^*$}-algebra by an endomorphism. \nuewblock {\em Ergodic Theory Dynam. Systems}, 23(6):1733--1750, 2003. {\muathbb I}bitem{FaKo} T.~Fack and H.~Kosaki. \nuewblock Generalized {$s$}-numbers of {$\tauau$}-measurable operators. \nuewblock {\em Pacific J. Math.}, 123(2):269--300, 1986. {\muathbb I}bitem{FloGho} R.~Floricel and A.~Ghorbanpour. \nuewblock On inductive limit spectral triples. \nuewblock {\em Proc. Amer. Math. Soc.}, 147(8):3611--3619, 2019. {\muathbb I}bitem{GaGr} O.~Gabriel and M.~Grensing. \nuewblock Spectral triples and generalized crossed products. \nuewblock arXiv:1310.5993, 2013. {\muathbb I}bitem{GBFV} J.~M. Gracia-Bond\'{\i}a, J.~C. V\'{a}rilly, and H.~Figueroa. \nuewblock {\em Elements of noncommutative geometry}. \nuewblock Birkh\"{a}user Advanced Texts: Basler Lehrb\"{u}cher. [Birkh\"{a}user Advanced Texts: Basel Textbooks]. Birkh\"{a}user Boston, Inc., Boston, MA, 2001. {\muathbb I}bitem{GuIs05} D.~Guido and T.~Isola. \nuewblock Singular traces, dimensions and {N}ovikov-{S}hubin invariants. \nuewblock In {\em Operator theoretical methods ({T}imi\c{s}oara, 1998)}, pages 151--171. Theta Found., Bucharest, 2000. {\muathbb I}bitem{GuIs09} D.~Guido and T.~Isola. \nuewblock Dimensions and singular traces for spectral triples, with applications to fractals. \nuewblock {\em J. Funct. Anal.}, 203(2):362--400, 2003. {\muathbb I}bitem{GuIs16} D.~Guido and T.~Isola. \nuewblock Spectral triples for nested fractals. \nuewblock {\em J. Noncommut. Geom.}, 11(4):1413--1436, 2017. {\muathbb I}bitem{Skalski} A.~Hawkins, A.~Skalski, S.~White, and J.~Zacharias. \nuewblock On spectral triples on crossed products arising from equicontinuous actions. \nuewblock {\em Math. Scand.}, 113(2):262--291, 2013. {\muathbb I}bitem{IoMa} B.~Iochum and T.~Masson. \nuewblock Crossed product extensions of spectral triples. \nuewblock {\em J. Noncommut. Geom.}, 10(1):65--133, 2016. {\muathbb I}bitem{Jordans} B.~P.~A. Jordans. \nuewblock Real dimensional spaces in noncommutative geometry. \nuewblock {\em J. Funct. Anal.}, 268(10):2820--2850, 2015. {\muathbb I}bitem{KaKy} J.~Kaad and D.~Kyed. \nuewblock Dynamics of compact quantum metric spaces. \nuewblock {\em Ergodic Theory Dynam. Systems}, 41(7):2069--2109, 2021. {\muathbb I}bitem{KwLe} B.~K. Kwa\'{s}niewski and A.~V. Lebedev. \nuewblock Crossed products by endomorphisms and reduction of relations in relative {C}untz-{P}imsner algebras. \nuewblock {\em J. Funct. Anal.}, 264(8):1806--1847, 2013. {\muathbb I}bitem{LP2} F.~Latr\'{e}moli\`ere and J.~A. Packer. \nuewblock Noncommutative solenoids and their projective modules. \nuewblock In {\em Commutative and noncommutative harmonic analysis and applications}, volume 603 of {\em Contemp. Math.}, pages 35--53. Amer. Math. Soc., Providence, RI, 2013. {\muathbb I}bitem{Spin} H.~B. Lawson, Jr. and M.-L. Michelsohn. \nuewblock {\em Spin geometry}, volume~38 of {\em Princeton Mathematical Series}. \nuewblock Princeton University Press, Princeton, NJ, 1989. {\muathbb I}bitem{McCord} M.~C. McCord. \nuewblock Inverse limit sequences with covering maps. \nuewblock {\em Trans. Amer. Math. Soc.}, 114:197--209, 1965. {\muathbb I}bitem{Murphy} G.~J. Murphy. \nuewblock Crossed products of {$C^*$}-algebras by endomorphisms. \nuewblock {\em Integral Equations Operator Theory}, 24(3):298--319, 1996. {\muathbb I}bitem{Paterson} A.~L.~T. Paterson. \nuewblock Contractive spectral triples for crossed products. \nuewblock {\em Math. Scand.}, 114(2):275--298, 2014. {\muathbb I}bitem{Ped} G.~K. Pedersen. \nuewblock {\em {$C^{\alphast} $}-algebras and their automorphism groups}, volume~14 of {\em London Mathematical Society Monographs}. \nuewblock Academic Press, Inc. [Harcourt Brace Jovanovich, Publishers], London-New York, 1979. {\muathbb I}bitem{Plaut} C.~Plaut. \nuewblock Every continuum has a compact universal cover. \nuewblock arXiv:2109.02152, 2021. {\muathbb I}bitem{Rieffel99} M.~A. Rieffel. \nuewblock Metrics on state spaces. \nuewblock {\em Doc. Math.}, 4:559--600, 1999. {\muathbb I}bitem{Sakai} S.~Sakai. \nuewblock {\em {$C\varsigmagma} \def\S{\Sigmap*$}-algebras and {$W\varsigmagma} \def\S{\Sigmap*$}-algebras}. \nuewblock Springer-Verlag, New York-Heidelberg, 1971. \nuewblock Ergebnisse der Mathematik und ihrer Grenzgebiete, Band 60. {\muathbb I}bitem{StaceyCrossed} P.~J. Stacey. \nuewblock Crossed products of {$C^\alphast$}-algebras by {$\alphast$}-endomorphisms. \nuewblock {\em J. Austral. Math. Soc. Ser. A}, 54(2):204--212, 1993. {\muathbb I}bitem{Stacey} P.~J. Stacey. \nuewblock Endomorphisms of rational rotation {$C^*$}-algebras. \nuewblock {\em Math. Proc. Cambridge Philos. Soc.}, 127(2):289--294, 1999. {\muathbb I}bitem{Takeda} Z.~Takeda. \nuewblock Inductive limit and infinite direct product of operator algebras. \nuewblock {\em Tohoku Math. J. (2)}, 7:67--86, 1955. {\muathbb I}bitem{Take} M.~Takesaki. \nuewblock {\em Theory of operator algebras. {I}}. \nuewblock Springer-Verlag, New York-Heidelberg, 1979. {\muathbb I}bitem{Tep} A.~Teplyaev. \nuewblock Spectral analysis on infinite {S}ierpi\'{n}ski gaskets. \nuewblock {\em J. Funct. Anal.}, 159(2):537--567, 1998. {\muathbb I}bitem{WO} N.~E. Wegge-Olsen. \nuewblock {\em {$K$}-theory and {$C^*$}-algebras: A friendly approach}. \nuewblock Oxford Science Publications. The Clarendon Press, Oxford University Press, New York, 1993. {\muathbb I}bitem{Will} D.~P. Williams. \nuewblock {\em Crossed products of {$C{^\alphast}$}-algebras}, volume 134 of {\em Mathematical Surveys and Monographs}. \nuewblock American Mathematical Society, Providence, RI, 2007. \end{thebibliography} \end{document}
\begin{document} \title{Measuring a photonic qubit without destroying it } \author{G. J. Pryde} \affiliation{These authors contributed equally to this work} \affiliation{Centre for Quantum Computer Technology, Department of Physics, University of Queensland, Brisbane 4072, Australia} \author{J. L. O'Brien} \affiliation{These authors contributed equally to this work} \affiliation{Centre for Quantum Computer Technology, Department of Physics, University of Queensland, Brisbane 4072, Australia} \author{A. G. White} \affiliation{Centre for Quantum Computer Technology, Department of Physics, University of Queensland, Brisbane 4072, Australia} \author{S. D. Bartlett} \affiliation{Centre for Quantum Computer Technology, Department of Physics, University of Queensland, Brisbane 4072, Australia} \author{T. C. Ralph} \affiliation{Centre for Quantum Computer Technology, Department of Physics, University of Queensland, Brisbane 4072, Australia} \pacs{blah} \begin{abstract} Measuring the polarisation of a single photon typically results in its destruction. We propose, demonstrate, and completely characterise a \emph{quantum non-demolition} (QND) scheme for realising such a measurement non-destructively. This scheme uses only linear optics and photo-detection of ancillary modes to induce a strong non-linearity at the single photon level, non-deterministically. We vary this QND measurement continuously into the weak regime, and use it to perform a non-destructive test of complementarity in quantum mechanics. Our scheme realises the most advanced general measurement of a qubit: it is non-destructive, can be made in any basis, and with arbitrary strength. \end{abstract} \pacs{03.67.Lx, 85.35.-p, 68.37.Ef, 68.43.-h} \maketitle At the heart of quantum mechanics is the principle that the very act of measuring a system disturbs it. A quantum non-demolition (QND) scheme seeks to make a measurement such that this inherent \emph{back-action} feeds only into unwanted observables \cite{ca-rmp-52-341,bo-rmp-68-755}. Such a measurement should satisfy the following criteria \cite{gr-nat-396-537}: (1) The measurement outcome is correlated with the input; (2) The measurement does not alter the value of the measured observable; and (3) Repeated measurement yields the same result --- \emph{quantum state preparation} (QSP). Originally proposed for gravity wave detectors, most progress in QND has been in the continuous variable (CV) regime, involving measurement of the field quadrature of bright optical beams \cite{gr-nat-396-537}. Demonstrations at the single photon level have been limited to intra-cavity photons due to the requirement of a strong non-linearity \cite{tu-prl-75-4710,no-nat-400-239}. In addition, there has been no complete characterisation of a QND measurement due to a limited capacity to prepare input states, and thus inability to observe all the required correlations. The importance of single-photon measurements has been highlighted by schemes for optical quantum computation that proceed via a measurement induced non-linearity \cite{kn-nat-409-46,ob-nat}. Such schemes encode quantum information in the state (\emph{eg} polarisation) of single photons --- \emph{photonic qubits}. Measurement of single photon properties is traditionally a strong, destructive measurement employing direct photo-detection. However, quantum mechanics allows general measurements \cite{nielsen} that range from strong to arbitrarily weak --- one obtains full to negligible information --- and can be non-destructive (\emph{eg} QND). Such general measurements are required \cite{sa-pra-39-694} for tests of \emph{wave--particle duality} \cite{bohm}, and other fundamental tests of quantum mechanics \cite{re-quant-ph-0310091,re-quant-ph-0310113}. They may also find application in: optical quantum computing \cite{ob-nat,ko-pra-66-063814}; quantum communication protocols \cite{bo-pra-61-050301}; tests of such protocols \cite{we-pra-47-639,na-prl-84-4733}; nested entanglement pumping \cite{du-prl-90-067901}; and quantum feedback \cite{ll-pra-62-012307}. Here we propose, demonstrate, and completely characterise a scheme for the QND measurement of the polarisation of a free propagating single-photon qubit --- a flying qubit. This is achieved non-deterministically by using a measurement induced non-linearity. The measurement can be performed on all possible input states. Eigenstate inputs result in strong correlation with the measurement outcome; coherent superpositions exhibit ``collapse" and corresponding loss of coherence as a result of the measurement. Direct observation of all correlations demonstrates that the criteria (1-3), illustrated in Fig. \ref{schematic}(a), have been satisfied. To quantify the performance against these criteria, we introduce measures that are applicable to \emph{all} QND measurements. Finally, we show how our measurement scheme can be varied continuously from a strong measurement into the regime of a non-destructive weak measurement of polarisation. Using these weak measurements we perform a fundamental test of complementarity using ``which-path'' information without destroying the photon. Our scheme implements a measurement that is non-destructive, can be made in any basis, with arbitrary strength, and is therefore the most advanced general measurement to date. \begin{figure} \caption{A QND measurement of a polarisation encoded single photon qubit. (a) A schematic of a QND measurement. After interaction with the signal $s$, measurement of a meter $m$ provides information about the signal. The performance against the requirements (1-3) can be assessed by measuring the correlations indicated by the arrows, and can be quantified by the measurement fidelity $F_M$, the QND fidelity $F_{QND} \label{schematic} \end{figure} Our scheme for QND measurement of the polarisation of a single photon in the horizontal ($H$)/vertical ($V$) basis is illustrated in Fig. \ref{schematic}(b). After interaction, a destructive measurement of the polarisation ($H$ or $V$) of an ancilliary ``meter" photon realises a QND measurement of the free-propagating ``signal" photon. The required strong optical non-linearity, which couples the signal and meter, is realised using only linear optics and photo-detection following the principles developed for optical quantum computing \cite{kn-nat-409-46,ob-nat}. As with those schemes, our QND measurement is non-deterministic: it succeeds with non-unit probability, but whenever precisely one photon is detected in the meter output, it is known to have succeeded. The key to the operation of this circuit is that the QND device makes a photon number measurement $|n=0,1\rangle$ in the $s_H$ arm of the signal interferometer: the $H$ component of the meter experiences a $\pi$ phase shift \emph{conditional} on the signal being in the state $|H\rangle_s$ (\emph{ie} in the mode $s_H$). This conditional phase shift is realised by a non-classical interference between the two photons at the $\eta$ reflectivity beam splitter (BS) and conditioned on the detection of a single meter photon. When the signal photon is in a polarisation eigenstate, we require that the meter and signal outputs be the same state (ie $|H\rangle_s|H\rangle_m$ or $|V\rangle_s|V\rangle_m$). Consider the modes labelled in Fig. \ref{schematic}(b): the $m_V$ and $s_V$ modes are simply transmitted, while $s_{H_o}\rightarrow-\sqrt{\eta} s_H+\sqrt{1-\eta} m_H$ and $m_{H_o}\rightarrow\sqrt{1-\eta} s_H+\sqrt{\eta} m_H$. For the signal in the eigenstate $|V\rangle_s$, the signal and meter do not interact. We require the meter output to be $(|H\rangle_m+|V\rangle_m)/\sqrt{2}$, which is rotated by 45$^{\circ}$ to $|V\rangle_m$ by the half wave plate (HWP) set at 22.5$^{\circ}$. This is realised by preparing the meter state: \begin{displaymath} |D(\eta)\rangle_m=\sqrt{\tfrac{\eta}{1 + \eta}}|V\rangle_m + \sqrt{\tfrac{1}{1 + \eta}}|H\rangle_m. \end{displaymath} The $\sqrt{1-\eta}$ loss experienced by the $H$ component makes the $H$ and $V$ components equal and the signal-meter output state is: \begin{displaymath} |\phi^{V_s}_{out}\rangle= \sqrt{\tfrac{\eta}{1+\eta}}|V\rangle_s(|V\rangle_m+|H\rangle_m)+\sqrt{\tfrac{1-\eta}{1+\eta}}|H\rangle_s|V\rangle_s, \end{displaymath} where the first term represents successful operation, and the second a failure mechanism corresponding to two photons in the signal output and no photons in the meter output. After rotation of the meter state by 45$^\circ$ the successful output state is $|V\rangle_s|V\rangle_m$. When the signal is in the other eigenstate input $|H\rangle_s$ the output state is \begin{displaymath} \label{hsout} |\phi^{H_s}_{out}\rangle= \sqrt{\tfrac{1}{1+\eta}}[(1-2\eta)|H\rangle_s|H\rangle_m-\eta|H\rangle_s|V\rangle_m]+... \end{displaymath} where the terms not shown are ones with two photons in one of the outputs and zero in the other. We require the coefficients be equal so that the meter state is $(|H\rangle_m-|V\rangle_m)/\sqrt{2}$ (which is rotated to $|H\rangle_m$ by the HWP). This is only satisfied for $\eta=\tfrac{1}{3}$, and thus we prepare the meter in the state \begin{displaymath} |D'\rangle_m\equiv|D(\tfrac{1}{3})\rangle_m=\tfrac{1}{2}|V\rangle_m+\tfrac{\sqrt{3}}{2}|H\rangle_m. \end{displaymath} The probability of success for an arbitrary input $\gamma|H\rangle_s+\delta|V\rangle_s$ is $P=(\gamma^2+3\delta^2)/6$. The fact that $P$ is dependent on the input state must be taken into account when inferring populations from repeated measurements of identically prepared input states. It is possible to introduce the $\frac{2}{3}$ loss shown in the $s_V$ mode in Fig. \ref{schematic}(b) to make $P=\frac{1}{6}$ independent of $\delta$ and $\gamma$, as done below for weak measurement operation. Successful QND would then be signalled by the detection of a single photon in the meter output and no photon in this extra loss mode. \begin{figure} \caption{A schematic of the experimental setup. Pairs of photons of wavelength $\lambda=702.2$ nm were injected from the left. They were generated in a 5 mm non-linear $\beta$-barium-borate (BBO) crystal through spontaneous parametric down conversion of a $\lambda=351.1$ nm, $P\simeq300$ mW pump beam. The BBO crystal was cut for beam-like \protect\cite{ta-ol-26-843,ku-jmo-48-1997} \label{exp} \end{figure} Figure \ref{exp} outlines our experimental design for realising the schematic circuit of Fig. \ref{schematic}(b). The data collected are coincident counts --- simultaneous detection of a single photon at each of the detectors --- for two reasons. First, in order to characterise the QND measurement we need to measure the polarisation of both the signal and meter photons to determine the correlation between them. By adjusting our analysers we can directly measure the probability $P_{HH}$ of the two photons being horizontally polarised, etc. Second, although in principle measurement of a \emph{single} meter photon would indicate that the measurement worked, currently available single photon counting modules (SPCMs) cannot distinguish between one and many photons and operate with moderate efficiency. Note that wave plates can be used to measure the signal in any basis. \begin{table}[t!] \centering \caption{Experimental values for the joint probabilities for the signal and meter polarization $P_{sm}$. For the inputs $|D^-\rangle$ and $|R^-\rangle$ (not shown) results are almost identical to those for $|D^+\rangle$ and $|R^+\rangle$.}\label{prob} \begin{tabular}{|c|c|c|c|c|} \hline Signal input & $|H\rangle_s$ & $|V\rangle_s$ & $|D^+\rangle$ & $|R^+\rangle$ \\ \hline \hline $P_{HH}$ & 0.97(3) & 0.012(3) & 0.44(3) & 0.46(3)\\ \hline $P_{HV}$ & 0.024(3) & 0.00013(7) & 0.016(3) & 0.022(3)\\ \hline $P_{VH}$ & 0.007(1) & 0.18(1) & 0.10(1) & 0.104(8)\\ \hline $P_{VV}$ & 0.0005(3) & 0.81(4) & 0.44(3) & 0.41(2)\\ \hline \end{tabular} \end{table} We prepared the signal in the eigenstates $|H\rangle_s$ and $|V\rangle_s$, and the superposition states \begin{eqnarray*} &&|D^{\pm}\rangle_s\equiv\tfrac{1}{2}|V\rangle_s\pm\tfrac{\sqrt{3}}{2}|H\rangle_s,\ |R^{\pm}\rangle_s\equiv\tfrac{1}{2}|V\rangle_s\pm i\tfrac{\sqrt{3}}{2}|H\rangle_s \end{eqnarray*} (states which give equal probability of measuring $|H\rangle_s$ and $|V\rangle_s$), and measured the probabilities $P_{sm}=P_{HH}$, $P_{HV}$, $P_{VH}$, and $P_{VV}$ (Table \ref{prob}). The QND measurement works most successfully in the case of a $|H\rangle_s$ signal, because it requires only the splitting and recombining of the meter components. In contrast, all other measurements require both classical and non-classical interference. To quantify the performance of a QND measurement relative to the criteria (1-3), we define new measures that can be applied to all input states. These measures each compare two probability distributions $p$ and $q$ over the measurement outcomes $i$, using the (classical) fidelity \begin{displaymath} F(p,q)=(\sum_i\sqrt{p_i q_i})^2. \end{displaymath} For photonic qubits, $i\in\{H,V\}$; also, $F=1$ for identical distributions, $F=\frac{1}{2}$ for uncorrelated distributions, and $F=0$ for anti-correlated distributions. For a QND measurement there are three relevant probability distributions: $p^{in}$ of the signal input, $p^{out}$ of the signal output, and $p^m$ of the measurement. These distributions, and hence fidelities, are functions of the signal input state. The requirements (1-3) demand \emph{correlations} between these distributions [see Fig. \ref{scematic}(a)] as follows: (1) The success of the measurement is quantified by the \emph{measurement fidelity} $F_M=F(p^{in},p^m)$, which measures the overlap between the signal input and measurement distributions. For signal eigenstates, we measure $F_M(|H\rangle_s) = P_{HH} + P_{VH} = 0.97\pm0.03$ and $F_M(|V\rangle_s) = P_{VV} + P_{HV} = 0.81\pm0.04$. For all superposition states, $|D^{\pm}\rangle_s$ and $|R^{\pm}\rangle_s$, $F_M>0.99$. (2) For the measurement to be \emph{non-demolition}, the signal output probabilities should be identical to those of the input. This is characterised by the \emph{QND fidelity} $F_{QND}=F(p^{in},p^{out})$. For all signal inputs measured (eigenstates and superpositions), $F_{QND}>0.99$. (3) When the measurement outcome is $i$, a good QSP device gives the signal output state $|i\rangle_s$ with high probability. We denote this conditional probability $p_i^{out}|i$ and define the \emph{QSP fidelity} $F_{QSP}=\sum_i p_i^m p_i^{out}|i$, which is an average fidelity between the expected and observed conditional probability distributions. For our scheme $F_{QSP}=P_{HH}+P_{VV}$. The average for the six inputs quantifies the performance as a QSP device for \emph{any} unknown input, and is $0.88\pm0.05$. This quantity is also known as the \emph{likelihood} $L$ \cite{wo-prd-19-473} of measuring the signal to be $H$ or $V$ given the meter outcome $H$ or $V$, respectively. In the CV regime, $L=0$ due to the continuous spectrum of the measurement outcome. To compare directly with CV experiments, the QSP performance could also be quantified by the \emph{correlation function} \cite{qndnote1} between the signal and meter: $P_{HH} + P_{VV} - P_{HV} - P_{VH}$. This correlation is also referred to as the \emph{knowledge}; for qubits, $K = 2L -1$. Both $K$ and $L$ are useful for characterising the weak measurements, which we now describe. \begin{figure} \caption{Non-destructive weak measurement of a polarisation encoded single photon qubit. (a) No measurement: The real part of output density matrix $\rho$ of the signal qubit for $\alpha=0$ with purity Tr$[\rho^2]$=0.89. (b) Strong measurement: The real part output density matrix of the signal qubit for $\alpha=\frac{\sqrt{3} \label{weak} \end{figure} Along with performing strong measurements of polarisation, our device also allows for non-destructive weak, measurements. We vary the input state of the meter $|\Psi\rangle_m=\alpha|H\rangle_m+\beta|V\rangle_m$ to vary the strength of the measurement. We also introduce a $\tfrac{2}{3}$ loss in the $s_V$ mode [as shown in Fig. \ref{schematic}(b)] to balance the measurement statistics. The most interesting behaviour can be seen for an equal superposition signal input, eg $(|H\rangle_s+|V\rangle_s)/\sqrt{2})$. The output state for an arbitrary meter input is then \begin{eqnarray*} |\phi_{out}\rangle&=&\tfrac{1}{2\sqrt{3}}[|H\rangle_s(\sqrt{\tfrac{2}{3}}\alpha|H\rangle_m+\sqrt{2}\beta|V\rangle_m)\\ &+&|V\rangle_s(\sqrt{\tfrac{2}{3}}\alpha|H\rangle_m+\sqrt{2}\beta|V\rangle_m)]+... \end{eqnarray*} where again the terms not shown are failure mechanisms. We can characterise this weak measurement by measuring the 1-qubit reduced density matrix of the signal output [Fig. \ref{weak}(a) and (b)]. The $H$ and $V$ populations do not change, regardless of the meter input state. However for $|\alpha|=0$ we observe a coherent superposition, while for $\alpha=\frac{\sqrt{3}}{2}$ we have an incoherent mixture as expected when a strong measurement is made. In the intermediate region the signal output is partially mixed. The degree of coherence can also be determined by measuring the visibility $V$ of the linear polarisation fringes as shown in Fig. \ref{weak}(c). These results explicitly demonstrate the \emph{decoherence} that would appear in quantum cryptography due to an eavesdropper using weak QND measurement of polarisation, as simulated in \cite{na-prl-84-4733}. The fundamental principle of complementarity \cite{bohm}, in particular wave-particle duality, can be tested with our general measurement in the fashion originally proposed in Ref. \cite{sa-pra-39-694}. In the spatial interferometer of Fig. \ref{exp}, $K$ quantifies the degree of ``which-path" information, and $V$ the quantum indistinguishability. They must satisfy $V^2+K^2\leqslant1$ \cite{en-prl-77-2154}. In Fig. \ref{weak}(a) we plot $K$, $V$, and $K^2+V^2$ for a range of values of $\alpha$. As $K$ increases, $V$ decreases. Ideally our weak QND scheme is optimal: $K^2+V^2=1$ for all meter polarizations. In our experiment $K^2+V^2<1$ due to non-ideal mode matching. The decline of $K^2+V^2$ with increasing $K$ can be attributed to the increasing requirement for non-classical (as well as classical) interference as the strength of the QND measurement is increased. In contradistinction to the non-destructive scheme presented here, previous tests of complementarity have relied on encoding which-path information onto a different degree of freedom of the interfering particles \cite{du-nat-395-33,sc-pra-60-4285}, so that which-path information is only obtained destructively, when the particles are measured. In summary, we have proposed, demonstrated, and characterised a non-deterministic scheme for general measurement of a flying qubit. In addition, we have introduced the first set of fidelity measures to characterise the quality of any QND measurement. Because we are able to measure these fidelities directly and prepare all input states with high fidelity, we have demonstrated the most comprehensive characterisation of a QND measurement to date. We find that our device performs well against all three requirements of a QND measurement, with fidelities greater than 80\% for all measures and all input states. Operating in the weak regime, we have performed a non-destructive test of complementarity. \\ We thank J. S. Lundeen and G. J. Milburn for helpful discussions. This work was supported by the Australian government, the Australian Research Council, the US National Security Agency (NSA) and Advanced Research and Development Activity (ARDA) under Army Research Office (ARO) contract number DAAD 19-01-1-0651. \begin{thebibliography}{26} \expandafter\ifx\csname natexlab\endcsname\relax\def\natexlab#1{#1}\fi \expandafter\ifx\csname bibnamefont\endcsname\relax \def\bibnamefont#1{#1}\fi \expandafter\ifx\csname bibfnamefont\endcsname\relax \def\bibfnamefont#1{#1}\fi \expandafter\ifx\csname citenamefont\endcsname\relax \def\citenamefont#1{#1}\fi \expandafter\ifx\csname url\endcsname\relax \def\url#1{\texttt{#1}}\fi \expandafter\ifx\csname urlprefix\endcsname\relax\defURL {URL }\fi \providecommand{\bibinfo}[2]{#2} \providecommand{\eprint}[2][]{\url{#2}} \bibitem[{\citenamefont{Caves et~al.}(1980)\citenamefont{Caves, Thorne, Drever, Sandberg, and Zimmermann}}]{ca-rmp-52-341} \bibinfo{author}{\bibfnamefont{C.~M.} \bibnamefont{Caves}}, \bibinfo{author}{\bibfnamefont{K.~S.} \bibnamefont{Thorne}}, \bibinfo{author}{\bibfnamefont{R.~W.~P.} \bibnamefont{Drever}}, \bibinfo{author}{\bibfnamefont{V.~D.} \bibnamefont{Sandberg}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Zimmermann}}, \bibinfo{journal}{Rev. Mod. Phys.} \textbf{\bibinfo{volume}{52}}, \bibinfo{pages}{341} (\bibinfo{year}{1980}). \bibitem[{\citenamefont{Bocko and Onofrio}(1996)}]{bo-rmp-68-755} \bibinfo{author}{\bibfnamefont{M.~F.} \bibnamefont{Bocko}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Onofrio}}, \bibinfo{journal}{Rev. Mod. Phys.} \textbf{\bibinfo{volume}{68}}, \bibinfo{pages}{755} (\bibinfo{year}{1996}). \bibitem[{\citenamefont{Grangier et~al.}(1998)\citenamefont{Grangier, Levenson, and Poizat}}]{gr-nat-396-537} \bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Grangier}}, \bibinfo{author}{\bibfnamefont{J.~A.} \bibnamefont{Levenson}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.-P.} \bibnamefont{Poizat}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{396}}, \bibinfo{pages}{537} (\bibinfo{year}{1998}). \bibitem[{\citenamefont{Turchette et~al.}(1995)\citenamefont{Turchette, Hood, Lange, Mabuchi, and Kimble}}]{tu-prl-75-4710} \bibinfo{author}{\bibfnamefont{Q.~A.} \bibnamefont{Turchette}}, \bibinfo{author}{\bibfnamefont{C.~J.} \bibnamefont{Hood}}, \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{Lange}}, \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Mabuchi}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.~J.} \bibnamefont{Kimble}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{75}}, \bibinfo{pages}{4710} (\bibinfo{year}{1995}). \bibitem[{\citenamefont{Nogues et~al.}(1999)\citenamefont{Nogues, Rauschenbeutel, Osnaghi, Brune, Raimond, and Haroche}}]{no-nat-400-239} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Nogues}}, \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Rauschenbeutel}}, \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Osnaghi}}, \bibinfo{author}{\bibfnamefont{N.}~\bibnamefont{Brune}}, \bibinfo{author}{\bibfnamefont{J.~M.} \bibnamefont{Raimond}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Haroche}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{400}}, \bibinfo{pages}{239} (\bibinfo{year}{1999}). \bibitem[{\citenamefont{Knill et~al.}(2001)\citenamefont{Knill, Laflamme, and Milburn}}]{kn-nat-409-46} \bibinfo{author}{\bibfnamefont{E.}~\bibnamefont{Knill}}, \bibinfo{author}{\bibfnamefont{R.}~\bibnamefont{Laflamme}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.~J.} \bibnamefont{Milburn}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{409}}, \bibinfo{pages}{46} (\bibinfo{year}{2001}). \bibitem[{\citenamefont{O'Brien et~al.}(2003)\citenamefont{O'Brien, Pryde, White, Ralph, and Branning}}]{ob-nat} \bibinfo{author}{\bibfnamefont{J.~L.} \bibnamefont{O'Brien}}, \bibinfo{author}{\bibfnamefont{G.~J.} \bibnamefont{Pryde}}, \bibinfo{author}{\bibfnamefont{A.~G.} \bibnamefont{White}}, \bibinfo{author}{\bibfnamefont{T.~C.} \bibnamefont{Ralph}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Branning}}, \bibinfo{journal}{Nature, to appear} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Nielsen and Chuang}(2000)}]{nielsen} \bibinfo{author}{\bibfnamefont{M.~A.} \bibnamefont{Nielsen}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{I.~L.} \bibnamefont{Chuang}}, \emph{\bibinfo{title}{Quantum Computation and Quantum Information}} (\bibinfo{publisher}{Cambridge University Press}, \bibinfo{address}{Cambridge}, \bibinfo{year}{2000}), \bibinfo{note}{p 91}. \bibitem[{\citenamefont{Sanders and Milburn}(1989)}]{sa-pra-39-694} \bibinfo{author}{\bibfnamefont{B.~C.} \bibnamefont{Sanders}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.~J.} \bibnamefont{Milburn}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{39}}, \bibinfo{pages}{694} (\bibinfo{year}{1989}). \bibitem[{\citenamefont{Bohm}(1951)}]{bohm} \bibinfo{author}{\bibfnamefont{D.}~\bibnamefont{Bohm}}, \emph{\bibinfo{title}{Quantum Theory}} (\bibinfo{publisher}{Prentice-Hall}, \bibinfo{year}{1951}). \bibitem[{\citenamefont{Resch et~al.}(2003)\citenamefont{Resch, Lundeen, and Steinberg}}]{re-quant-ph-0310091} \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Resch}}, \bibinfo{author}{\bibfnamefont{J.}~\bibnamefont{Lundeen}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Steinberg}}, \bibinfo{journal}{quant-ph/0310091} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Resch and Steinberg}(2003)}]{re-quant-ph-0310113} \bibinfo{author}{\bibfnamefont{K.}~\bibnamefont{Resch}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Steinberg}}, \bibinfo{journal}{quant-ph/0310113} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Kok et~al.}(2002)\citenamefont{Kok, Lee, and Dowling}}]{ko-pra-66-063814} \bibinfo{author}{\bibfnamefont{P.}~\bibnamefont{Kok}}, \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Lee}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.~P.} \bibnamefont{Dowling}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{66}}, \bibinfo{pages}{063814} (\bibinfo{year}{2002}). \bibitem[{\citenamefont{Botero and Reznik}(2000)}]{bo-pra-61-050301} \bibinfo{author}{\bibfnamefont{A.}~\bibnamefont{Botero}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{B.}~\bibnamefont{Reznik}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{61}}, \bibinfo{pages}{050301} (\bibinfo{year}{2000}). \bibitem[{\citenamefont{Werner and Milburn}(1993)}]{we-pra-47-639} \bibinfo{author}{\bibfnamefont{M.~J.} \bibnamefont{Werner}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.~J.} \bibnamefont{Milburn}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{47}}, \bibinfo{pages}{639} (\bibinfo{year}{1993}). \bibitem[{\citenamefont{Naik et~al.}(2000)\citenamefont{Naik, Peterson, White, Berglund, and Kwiat}}]{na-prl-84-4733} \bibinfo{author}{\bibfnamefont{D.~S.} \bibnamefont{Naik}}, \bibinfo{author}{\bibfnamefont{C.~G.} \bibnamefont{Peterson}}, \bibinfo{author}{\bibfnamefont{A.~G.} \bibnamefont{White}}, \bibinfo{author}{\bibfnamefont{A.~J.} \bibnamefont{Berglund}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{P.~G.} \bibnamefont{Kwiat}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{84}}, \bibinfo{pages}{4733} (\bibinfo{year}{2000}). \bibitem[{\citenamefont{D\"{u}r and Briegel}(2003)}]{du-prl-90-067901} \bibinfo{author}{\bibfnamefont{W.}~\bibnamefont{D\"{u}r}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Briegel}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{90}}, \bibinfo{pages}{067901} (\bibinfo{year}{2003}). \bibitem[{\citenamefont{Lloyd and Slotine}(2000)}]{ll-pra-62-012307} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Lloyd}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{J.-J.~E.} \bibnamefont{Slotine}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{65}}, \bibinfo{pages}{012307} (\bibinfo{year}{2000}). \bibitem[{\citenamefont{Takeuchi}(2001)}]{ta-ol-26-843} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{Takeuchi}}, \bibinfo{journal}{Opt. Lett.} \textbf{\bibinfo{volume}{26}}, \bibinfo{pages}{843} (\bibinfo{year}{2001}). \bibitem[{\citenamefont{Kurtsiefer et~al.}(2000)\citenamefont{Kurtsiefer, Oberparleiter, and Weinfurter}}]{ku-jmo-48-1997} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Kurtsiefer}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Oberparleiter}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Weinfurter}}, \bibinfo{journal}{J. Mod. Opt} \textbf{\bibinfo{volume}{48}}, \bibinfo{pages}{1997} (\bibinfo{year}{2000}). \bibitem[{\citenamefont{Kurtsiefer et~al.}(2001)\citenamefont{Kurtsiefer, Oberparleiter, and Weinfurter}}]{ku-pra-64-023802} \bibinfo{author}{\bibfnamefont{C.}~\bibnamefont{Kurtsiefer}}, \bibinfo{author}{\bibfnamefont{M.}~\bibnamefont{Oberparleiter}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{H.}~\bibnamefont{Weinfurter}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{64}}, \bibinfo{pages}{023802} (\bibinfo{year}{2001}). \bibitem[{\citenamefont{Wootters and Zurek}(1979)}]{wo-prd-19-473} \bibinfo{author}{\bibfnamefont{W.~K.} \bibnamefont{Wootters}} \bibnamefont{and} \bibinfo{author}{\bibfnamefont{W.~H.} \bibnamefont{Zurek}}, \bibinfo{journal}{Phys. Rev. D} \textbf{\bibinfo{volume}{19}}, \bibinfo{pages}{473} (\bibinfo{year}{1979}). \bibitem[{qnd()}]{qndnote1} \bibinfo{note}{The correlation function is defined $C_{MS} = \langle \hat {O}_{M} \hat {O}_{S} \rangle/(\langle \hat {O}_{m}^{2} \rangle \langle \hat {O}_{s}^{2} \rangle)^\frac{1}{2}$, where $\hat {O}$ is the QND observable: the Stokes operator $\hat{S1}$ for our measurement; the quadrature amplitude $\hat X$ for CV systems.} \bibitem[{\citenamefont{Englert}(1996)}]{en-prl-77-2154} \bibinfo{author}{\bibfnamefont{B.-G.} \bibnamefont{Englert}}, \bibinfo{journal}{Phys. Rev. Lett.} \textbf{\bibinfo{volume}{77}}, \bibinfo{pages}{2154} (\bibinfo{year}{1996}). \bibitem[{\citenamefont{D\"{u}rr et~al.}(1998)\citenamefont{D\"{u}rr, Nonn, and Rempe}}]{du-nat-395-33} \bibinfo{author}{\bibfnamefont{S.}~\bibnamefont{D\"{u}rr}}, \bibinfo{author}{\bibfnamefont{T.}~\bibnamefont{Nonn}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{G.}~\bibnamefont{Rempe}}, \bibinfo{journal}{Nature} \textbf{\bibinfo{volume}{395}}, \bibinfo{pages}{33} (\bibinfo{year}{1998}). \bibitem[{\citenamefont{Schwindt et~al.}(1999)\citenamefont{Schwindt, Kwiat, and Englert}}]{sc-pra-60-4285} \bibinfo{author}{\bibfnamefont{P.~D.~D.} \bibnamefont{Schwindt}}, \bibinfo{author}{\bibfnamefont{P.~G.} \bibnamefont{Kwiat}}, \bibnamefont{and} \bibinfo{author}{\bibfnamefont{B.-G.} \bibnamefont{Englert}}, \bibinfo{journal}{Phys. Rev. A} \textbf{\bibinfo{volume}{60}}, \bibinfo{pages}{4285} (\bibinfo{year}{1999}). \end{thebibliography} \end{document}
\begin{document} \title{A note on continuous ensemble expansions of quantum states} \begin{abstract} Generalizing the notion of relative entropy, the difference between {\em a priori} and {\em a posteriori} relative entropy for quantum systems is drawn. The former, known as quantum relative entropy, is associated with quantum states recognition. The latter---{\em a posteriori} relative quantum entropy is introduced and shown to be related with state reconstruction due to the following property: given a density operator $\rho$, ensembles of pure states with Gibbs distribution with respect to the defined distance are proved to represent the initial state $\rho$ up to an amount of white noise (completely mixed state) which can be made arbitrary small. \end{abstract} In classical probability the {\scshape relative entropy} (or Kullback-Leibler distance) $S(\rho||\sigma)$ of a distribution $\rho=\{p_1,\ldots,p_{n}\}$ with respect to another distribution $\sigma=\{q_1,\ldots,q_{n}\}$ is defined as \begin{equation}\label{ekullleib} S(\rho||\sigma) \;=\; \left\lbrace \begin{array}{ll} -\sum_{k}p_k\log\left(\frac{p_k}{q_k}\right) & \mbox{ if $\support\rho\subseteq\support\sigma$} \\ +\infty & \mbox{ otherwise} \\ \end{array} \right. \end{equation} \noindent Usually, this notion is generalized in quantum information theory (see, {\em e.g.} \cite{ohyapetz} for a review) by analogy with von Neumann entropy, namely, the sum is replaced by operator trace. For two density operators $\rho$ and $\sigma$ the quantum relative entropy $S(\rho||\sigma)$ reads: \begin{equation}\label{edefqre} S(\rho||\sigma) \;=\; \left\lbrace \begin{array}{ll} -\trc\left[\rho\left(\log{}\rho -\log{}\sigma\right)\right] & \mbox{ if $\support\rho\subseteq\support\sigma$} \\ \\ +\infty & \mbox{ otherwise} \end{array} \right. \end{equation} \paragraph{Relative entropy and state recognition.} Although relative entropy does not satisfy the triangle inequality and is therefore not a `true' metric, it is a nonnegative convex function of $\{p_k\}$ and equals zero only if the distributions are equal, $\sigma=\rho$. Its relevance to distinguishing probability distributions is vindicated by the Sanov's theorem \cite{coverthomas} which states that the probability $P_{N}(\rho|\sigma)$ that the state $\sigma$ passes the test determining if the state is $\rho$ tends to \begin{equation}\label{esanovclass} P_{N}(\rho|\sigma) \;\rightarrow\; \const\cdot e^{-NS(\rho||\sigma)} \end{equation} \noindent as $N$ (the number of checked samples of $\sigma$) tends to $\infty$. A quantum analog of Sanov theorem was also proved \cite{hi-pe}. This shows that both classical and quantum relative entropy is an adequate tool to {\em recognize} states. \paragraph{Relative entropy from the operationalistic perspective.} In this note I give a quantum {\em operationalistic} analog of Kullback-Leibler distance \eqref{ekullleib}. For that, note that each probability $q_k$ can be treated in two ways \begin{itemize} \item {\em a priori}, that is as the probability of the event associated with the value $q_k$ of $\sigma$ to occur \item {\em a posteriori}, that is, as the probability of the event associated with the value $p_k$ of $\rho$ to occur provided the system was in state $\sigma$ \end{itemize} \noindent Coming to quantum formula according to {\em a priori} interpretation, we get the standard quantum relative entropy \eqref{edefqre} of $\rho$ with respect to $\sigma$. Following the second analogy, we get the relative entropy of $\rho$ with respect to the post-measurement state, which is called {\scshape L\"uders state} \cite{lueders}, it reads \begin{equation}\label{edeflueders} \luders{\rho}{\sigma} \;=\; \sum_{k=1}^{n}\limits \raypr{\mathbf{e}_k} \sigma \raypr{\mathbf{e}_k} \end{equation} \noindent and describes the state of the system which was initially in state $\sigma$ after the measurement associated with the density operator $\rho$ was carried out. Now we can introduce the distance between the states---the {\em a posteriori} quantum generalization of relative entropy---as follows \begin{equation}\label{efirstdefmynewdist} \mynewdist{\rho}{\phi} \;=\; S\left( \rho\rVert \luders{\rho}{\sigma} \right) \end{equation} \noindent Note that the value of this distance may be finite when $\sigma=\raypr{\phi}$ is a pure state and $\rho$ is mixed, which is never the case for quantum relative entropy. For further purposes the explicit expression---an analog of \eqref{edefqre}---for the distance function \eqref{efirstdefmynewdist} between a pure state $\phi$ and mixed state $\rho=\sum_{k=1}^{n}\limits p_k\, \raypr{\mathbf{e}_k}$ is to be written down: \begin{equation}\label{edefmynewdist} \mynewdist{\rho}{\phi} \;=\; \left\lbrace \begin{array}{ll} -\sum_{k}p_k\log\frac{p_k}{\lvert\braket{\mathbf{e}_k}{\phi}\rvert^2} & \mbox{ if $\phi$ has nonzero overlap with $\rho$} \\ \\ +\infty & \mbox{ otherwise} \end{array} \right. \end{equation} \noindent---a vector $\phi$ is said to have \cite{gdansk} a {\scshape nonzero overlap} with $\rho$ if $\bracket{\phi}{\rho}{\phi}\neq 0$. \paragraph{Reconstructing quantum states by Gibbs ensembles.} Given a full-range density operator $\rho$ in $\mathbf{H}=\mathbb{C}^n$, form a kind of Gibbs ensemble of pure states treating $\mynewdist{\rho}{\phi}$ as distance: \begin{equation}\label{egibbsens} \mu_\rho(\phi) \;=\; K \exp\left(-\frac{n}{\varepsilon} \mynewdist{\rho}{\phi}\right) \end{equation} \noindent where $\varepsilon>0$ is a small parameter and $K$ is a normalization constant with respect to Haar measure $dS$ on the set $\cfield{}S^n$ of unit vectors in $\mathbf{H}$. This ensemble is associated with a density operator \begin{equation}\label{ebigmix} I(\rho) \;=\; \int_{\cfield{}S^n}\limits \mu_\rho(\phi) \, \raypr{\phi} dS \end{equation} \noindent and the following {\scshape reconstruction formula} holds: \begin{equation}\label{emainformula} \int_{\cfield{}S^n}\limits \mu_\rho(\phi) \raypr{\phi} dS \;=\; (1-\varepsilon)\rho+\varepsilon\Lambda \end{equation} \noindent where $\Lambda=\frac{1}{n}$ is the density matrix of white noise---a completely mixed state. The proof of this formula is a routine integration, see Appendix for details and the explicit expression for the normalization constant. \paragraph{Limit distribution.} The limit distribution in \eqref{emainformula} corresponds to an ensemble $\mathcal{E}$ whose density matrix is exactly $\rho$, let us describe it explicitly. According to \eqref{emainformula}, the support of the limit distribution is the set of unit vectors for which the {\em a posteriori} distance from $\rho$ is zero, that is, \( \support(\mathcal{E}) = \{ \phi_{\theta_1,\ldots,\theta_n}\mid \mynewdist{\rho}{\phi_{\theta_1,\ldots,\theta_n}}=0 \} \) with \[ \phi_{\theta_1,\ldots,\theta_n} \;=\; \sum_{k=1}^{n}\limits e^{-\theta_k}\sqrt{p_k}\;\ket{\mathbf{e}_k} \] \noindent where $\theta_1,\ldots,\theta_n=0\ldots 2\pi$. Therefore the resulting distribution is uniform over the $n$-torus $S^1\times\cdots\times S^n$ and, as a consequence, any full-range density operator $\rho$ can be represented as the following {\scshape uniform ensemble} \begin{equation}\label{easympdistr} \rho \;=\; \frac{1}{(2\pi)^n} \int_{0}^{2\pi}\limits \cdots \int_{0}^{2\pi}\limits \raypr{\phi_{\theta_1,\ldots,\theta_n}} d\theta_1\cdots d\theta_n \end{equation} \paragraph{An example.} Consider a density operator $\rho$ whose spectral decomposition is \begin{equation}\label{enaitis} \rho=\frac{1}{4}\raypr{0}+\frac{3}{4}\raypr{1} \end{equation} \noindent in two-dimensional real space $\mathbf{H}=\mathbb{R}^2$. The picture below shows three expansions of $\rho$ (note that in case of real state space the uniform ensemble \eqref{easympdistr} is discrete as the phase multiples are $\pm 1$) \unitlength=1pt \begin{center} \begin{tabular}{ccc} \psfig{figure=gibbs.ps} \qquad & \qquad \psfig{figure=uniform.ps} \qquad & \qquad \psfig{figure=classical.ps} \cr Gibbs ensemble \eqref{emainformula} & Uniform ensemble \eqref{easympdistr} & Classical ensemble \eqref{enaitis} \end{tabular} \end{center} \paragraph{Acknowledgments.} The attention to work offered by the participants of the Friedmann seminar on theoretical physics (St.Petersburg), in particular, to S.Krasnikov and A.Lobashev is highly appreciated. \begin{thebibliography}{99} \bibitem{coverthomas} Cover, T. M. and Thomas, J. A., {\itshape Elements of Information Theory.} New York: Wiley, 1991 \bibitem{hi-pe} F. Hiai, D. Petz, The proper formula for relative entropy and its asymptotics in quantum probability. {\itshape Communications in Mathematical Physics,} {\bfseries 143} 99 (1991) \bibitem{gdansk} M.Horodecki, Aditi Sen De, Ujjwal Sen, Quantification of quantum correlation of ensemble of states, quant-ph/0310100 \bibitem{lueders} G.L\"uders, \"Uber die Zustands\"anderung durch den Me\ss proze\ss, {\itshape Ann. Physik}, {\bfseries 8} (6) 322 (1951); English translation `Regarding the state-change due to the measurement process' by K.A.Kirkpatrick is available at quant-ph/0403007 \bibitem{ohyapetz} M.Ohya, D.Petz, {\itshape Quantum entropy and its use}, Springer, 1993 \end{thebibliography} \subsection*{Appendix: the proof of formula \eqref{emainformula}} Introduce the following numerical integral \begin{equation}\label{edefippp} I^{a_{1}\ldots{}a_{n}}_{n} \;=\; \int_{z\in\cfield{}S_n}\limits \prod_{k=1}^n |\braket{\mathbf{e}_k}{z}|^{2a_k} \,dS \end{equation} \noindent for which the following lemma holds \begin{lemma}\label{leippp} For $a_1,\ldots,a_{n}\ge 0$ and $\braket{\mathbf{e}_k}{\mathbf{e}_j}=\delta_{kj}$ \begin{equation}\label{eippp} I^{a_{1}\ldots{}a_{n}}_{n} \;=\; \frac{2\pi^{n}\,\prod_{k=1}^{n}{}\Gamma(a_k+1)}{\Gamma \left({n}+\sum_{k=1}^{n}{}a_k\right)} \end{equation} \end{lemma} \begin{proof} Represent $\mathbb{C}^{n}$ as real space $\mathbb{R}^{2n}$ with coordinates $(r_1,\phi_1;\ldots;r_{n},\phi_{n})$ so that $\braket{\mathbf{e}_k}{z}=z_k=r_ke^{i\phi}$. Write down the integral \eqref{edefippp} as \[ I^{a_{1}\ldots{}a_{n}}_{n} \;=\; (2\pi)^{n}\int_{r_1^2+\cdots+r_{n}^2=1} \prod_{k=1}^{n} r_k^{2a_k} \,dS \] \noindent Getting rid of $r_1=\sqrt{1-r_2^2-\cdots-r_{n}^2}$ we obtain \[ I^{a_{1}\ldots{}a_{n}}_{n} \;=\; (2\pi)^{n}\cdot \int_{r_2^2+\cdots+r_{n}^2\le{}1} \left(1-\sum_{j=2}^{n}r_j^2\right)^{a_1} \left(\prod_{k=2}^{n}{}r_k^{2a_k+1} \,dr_k\right) \] \noindent Now, integrating over $r_{n}$, we obtain \[ I^{a_{1}\ldots{}a_{n}}_{n} \;=\; (2\pi)^{n}\cdot \int_0^1{}dr_{n}\;r_{n}^{2a_{n}+1} \int_{r_2^2+\cdots+r_{{n}-1}^2\le{}1-r_{n}^2} \left(1-r_{n}^2-\sum_{j=2}^{n-1}r_j^2\right)^{a_1} \left(\prod_{k=2}^{{n}-1}r_k^{2a_k+1} \,dr_k\right) \] \noindent which, under the substitution $\xi_k=r_k/\sqrt{1-r_{n}^2}$ for $k=2,\ldots,{n}-1$ and $r_{n}=\sin\alpha$ reads \[ I^{a_{1}\ldots{}a_{n}}_{n} \;=\; (2\pi)^{n}\cdot \int_0^{\pi/2}{}d\sin\alpha\;\sin^{2a_{n}+1}\alpha \cos^{2a_1}\alpha \left(\vphantom{A^A} \cos\alpha\right)^{2 \left(\sum_{k=2}^{{n}-1}a_k\right)+2({n}-2)} \times \] \[ \times \int_{\xi_2^2+\cdots+\xi_{{n}-1}^2\le{}1} \left(1-\sum_{j=2}^{n-1}\xi_j^2 \right)^{a_1} \left(\prod_{k=2}^{{n}-1}\xi_k^{2a_k+1} \,d\xi_k\right) \;=\; \] \[ \;=\; 2\pi \int_0^{\pi/2} \sin^{2a_{n}+1} \left(\vphantom{A^A}\cos\alpha\right)^{2 \left(\sum_{k=1}^{{n}-1}a_k+{n}-2\right)+1}d\alpha \cdot I_{{n}-1}^{a_{1}\ldots{}a_{{n}-1}} \] \noindent Using L\'egendre's formula \begin{equation}\label{elegtrig} \int_{0}^{\pi/2} \sin^{2\alpha+1}x\, \cos^{2\beta+1}x\, dx \;=\; \frac{1}{2}\cdot \frac{\Gamma(\alpha+1) \Gamma(\beta+1)}{\Gamma(\alpha+ \beta+2)} \end{equation} \noindent we obtain the following recurrent expression \begin{equation}\label{eippprecurr} I^{a_{1}\ldots{}a_{n}}_{n} \;=\; \pi\cdot \frac{\Gamma(a_{n}+1)\Gamma\left( {n}-1+\sum_{k=1}^{{n}-1}a_k\right)}{\Gamma\left( {n}+\sum_{k=1}^{n}a_k\right)}\cdot I_{{n}-1}^{a_{1}\ldots{}a_{{n}-1}} \end{equation} \noindent Direct calculations show that the formula \eqref{eippp} satisfies the recurrent expression \eqref{eippprecurr}, and it remains to prove the induction base. Do it for $n=1$: \[ I_1^{p} \;=\; \int_0^{2\pi}d\phi\cdot{}1^p \;=\; 2\pi \] \noindent which also accords with \eqref{eippp}. This completes the proof. \end{proof} Now let $A=\sum_{k=1}^{n}a_k\, \raypr{\mathbf{e}_k}$ be a positive self-adjoint operator in $\mathbf{H}=\mathbb{C}^n$. Introduce the operator integral \begin{equation}\label{edefiaa} I(A) \;=\; \int_{\cfield{}S^n}\limits \left( \prod_{k=1}^{n} \lvert \braket{\mathbf{e}_k}{\phi} \rvert^{2a_k} \right) \, \raypr{\phi} dS \end{equation} \noindent for which the following lemma holds. \begin{lemma}\label{leaplusid} For any positive self-adjoint operator $A$ in $\mathbf{H}$ \begin{equation}\label{e44} I(A) \;=\; \frac{2\pi^{n} \prod_{k=1}^{n} \Gamma(1+a_k)}{\Gamma(1+{n}+\trc{A})} \bigl( A+\mathbb{I} \bigr) \end{equation} \end{lemma} \begin{proof} First prove that $I(A)$ is diagonal in the eigenbasis of $A$. For any $j\neq{}k$ the appropriate matrix element reads \[ \bracket{\mathbf{e}_j}{I(A)}{\mathbf{e}_k} \;=\; (2\pi)^{n-2} \iint_0^{2\pi}e^{i(\phi_k-\phi_j)} d\phi_j d\phi_k \int_{r_1^2+\cdots+r_n^2=1} \prod_i r^{2a_i} dS \;=\; 0 \] \noindent Now calculate the diagonal elements \[ \bracket{\mathbf{e}_k}{I(A)}{\mathbf{e}_k} \;=\; \int_{\cfield{}S_n} \left( \prod_{i=1}^n |\braket{\mathbf{e}_i}{\phi}|^{2a_i} \right) \, \left|\braket{\mathbf{e}_k}{\phi}\right|^2 \,dS \;=\; I_n^{a_1\ldots(a_k+1)\ldots{}a_n} \] \noindent In the meantime it follows from \eqref{eippp} that \[ I_n^{a_1\ldots(a_k+1)\ldots{}a_n} \;=\; I_n^{a_1\ldots{}a_n} \frac{a_k+1}{\Gamma\left( n+1+\sum_i{}a_i\right)} \] \noindent then $I(A)=\sum_k{} \bracket{\mathbf{e}_k}{I(A)}{\mathbf{e}_k} \cdot \raypr{\mathbf{e}_k}$, hence \[ I(A) \;=\; \frac{2\pi^{n} \prod\Gamma(a_i+1)}{\Gamma\left( {n}+1+\sum{}a_i\right)} \, \sum_{k=1}^{n} (a_k+1) \raypr{\mathbf{e}_k} \] \noindent which completes the proof. \end{proof} Denote by $\Lambda$ the density matrix of the white noise---a completely mixed state \[ \Lambda \;=\; \frac{1}{n} \mathbb{I} \;=\; \frac{1}{n} \sum_{k=1}^{n} \raypr{\mathbf{e}_k} \] \noindent then the exact version of formula \eqref{emainformula} is stated by the following theorem. \begin{theorem}\label{thexact} Let $\rho = \sum_{k=1}^{n}p_k\,\raypr{\mathbf{e}_k}$ be a positive ($p_k>0$) operator in $\mathbf{H}=\mathbb{C}^n$. Then for any $\varepsilon\in(0,1)$ \begin{equation}\label{e44a} (1-\varepsilon)\rho+\varepsilon\Lambda \;=\; \frac{\Gamma\left(1+\frac{n}{\varepsilon}\right) e^{-\frac{n(1-\varepsilon)}{\varepsilon} S(\rho)}}{2\pi^{n} \prod_{k=1}^{n} \Gamma\left(1+\frac{n(1-\varepsilon)p_k}{\varepsilon} \right)} \int_{\cfield{}S^n}\limits e^{-\frac{n(1-\varepsilon)}{\varepsilon} \mynewdist{\rho}{\phi} } \, \raypr{\phi} dS \end{equation} \end{theorem} \begin{proof} Consider the formula \eqref{e44} and let $A=\frac{n(1-\varepsilon)}{\varepsilon}\rho$, then $a_k=\frac{n(1-\varepsilon)p_k}{\varepsilon}$ and \[ \frac{2\pi^{n} \prod_{k=1}^{n} \Gamma(1+a_k)}{\Gamma(1+{n}+\trc{A})} \left( \frac{n(1-\varepsilon)}{\varepsilon}\rho+\mathbb{I} \right) \;=\; \int_{\cfield{}S^n}\limits \left( \prod_{k=1}^{n} \lvert \braket{\mathbf{e}_k}{\phi} \rvert^{2\frac{n(1-\varepsilon)p_k}{\varepsilon}} \right) \, \raypr{\phi} dS \] \noindent thus, taking into account that $\trc A = \frac{n(1-\varepsilon)}{\varepsilon}$, we have \[ \frac{n(1-\varepsilon)}{\varepsilon}\rho+\mathbb{I} \;=\; \frac{\Gamma(1+ \frac{n}{\varepsilon})}{2\pi^{n} \prod_{k=1}^{n} \Gamma(\frac{n(1-\varepsilon)p_k}{\varepsilon} +1)} \int_{\cfield{}S^n}\limits e^{\frac{n(1-\varepsilon)}{\varepsilon} \sum_{k=1}^{n} p_k \log \lvert \braket{\mathbf{e}_k}{\phi} \rvert^{2} } \, \raypr{\phi} dS \] \noindent Using the definition \eqref{edefmynewdist} we obtain \( \frac{n(1-\varepsilon)}{\varepsilon}\rho+\mathbb{I}\,=\) \[\;=\; \frac{\Gamma\left(1+\frac{n}{\varepsilon}\right) e^{\frac{n(1-\varepsilon)}{\varepsilon} \sum_{k=1}^{n}p_k\log{}p_k}}{2\pi^{n} \prod_{k=1}^{n} \Gamma\left(1+\frac{n(1-\varepsilon)p_k}{\varepsilon} \right)} \int_{\cfield{}S^n}\limits e^{-\frac{n(1-\varepsilon)}{\varepsilon} \sum_{k=1}^{n} p_k \left( \log{}p_k - \log \lvert \braket{\mathbf{e}_k}{\phi} \rvert^{2} \right) } \, \raypr{\phi} dS \] \noindent from which the formula \eqref{e44a} follows directly. \end{proof} I emphasize that the formula \eqref{e44a} is exact. For small $\varepsilon$ we assume $1-\varepsilon\simeq 1$ and obtain the formula \eqref{emainformula}. \end{document}
\mbox{Be}gin{document} \text{b}aselineskip=24pt \title{{\text{b}f Scalable Bayesian nonparametric measures for exploring pairwise dependence via Dirichlet Process Mixtures}} \author{{\sc Sarah Filippi$^1$, Chris C. Holmes$^1$ and Luis E. Nieto-Barajas$^{1,2}$}\\[2mm] {\sl $^1$Department of Statistics, University of Oxford, England}\\[2mm] {\sl $^2$Department of Statistics, ITAM, Mexico}\\[2mm] {\tt \small [email protected], [email protected] {\rm and} [email protected]}} \text{d}ate{} \maketitle \mbox{Be}gin{abstract} In this article we propose novel Bayesian nonparametric methods using Dirichlet Process Mixture (DPM) models for detecting pairwise dependence between random variables while accounting for uncertainty in the form of the underlying distributions. A key criteria is that the procedures should scale to large data sets. In this regard we find that the formal calculation of the Bayes factor for a dependent-vs.-independent DPM joint probability measure is not feasible computationally. To address this we present Bayesian diagnostic measures for characterising evidence against a ``null model'' of pairwise independence. In simulation studies, as well as for a real data analysis, we show that our approach provides a useful tool for the exploratory nonparametric Bayesian analysis of large multivariate data sets. \end{abstract} \mbox{N}indent {\sl Key words}: Bayes nonparametrics, contingency table, dependence measure, hypothesis testing, mixture model, mutual information. \section{Introduction} \label{sec:intro} Identifying dependences among pairs of random variables measured on the same sample, producing datasets of the form $\text{D}=\{(x_i,y_i),\;i=1,\ldots,n\}$, is an important task in modern exploratory data analysis where historically the Pearson correlation coefficient and the Spearman's rank correlation have been used. More recently there has been a move to the use of non-linear or distribution free methods such as those based on Mutual Information (MI) \citep{cover2012elements,kinney2014equitability}. In this paper we present Bayesian nonparametric methods for screening large data sets for possible pairwise associations (dependencies). Having an explicit probability measure of dependences has numerous advantages both in terms of interpretability and for integration across different experimental conditions and/or within a formal decision theoretic analysis. As data sets become ever larger and more complex we increasingly require Bayesian procedures that can scale to modern applications and this will be a key design criteria here. The main building block of our procedures will be the Dirichlet Process Mixture (DPM) model, which is the most popular Bayesian nonparametric model. We frame the problem of screening for evidence of pairwise dependence as a nonparametric model choice problem with alternatives: \mbox{Be}gin{align} \mbox{N}number \mathcal{M}_0:& \text{ X and Y are independent random variables}\\ \label{eq:m0m1} \mathcal{M}_1:& \text{ X and Y are dependent random variables}\;. \end{align} Given a set of measurement pairs $\text{D}$, for $n$ exchangeable observations one could then evaluate the posterior probability for competing models $\text{P}(\mathcal{M}_1 | \text{D}) = 1 - \text{P}(\mathcal{M}_0 | \text{D})$ or consider the Bayes factor $\text{P}(\text{D} \mid \mathcal{M}_0)/\text{P}(\text{D} \mid \mathcal{M}_1)$ which is a measure of the strength of evidence for independence between the two samples against dependence. However with $p$ measurement variables under study there are $\approx \frac{1}{2}p^2$ such pairwise Bayes factors to compute, where even just one such evaluation might be problematic to compute. This motivates us to explore scalable alternatives to a formal Bayesian testing approach, by deriving summary statistics and functionals of the posterior that can provide strong indication in favour or against independence. Bayesian nonparametric hypotheses testing via Polya tree priors has been the focus of a couple of recent research papers \citep{holmes2015two,filippi2015bayesian}. Here, however, we specify model uncertainty in the distribution of X and Y via DPMs of Gaussians. This provides flexibility while also encompassing smoothness assumptions on the underlying joint distributions. Another advantage is that DPMs have been widely studied in the Bayesian nonparametric literature with excellent open source implementation packages available \citep[e.g.][]{jara2011dppackage}. Moreover, although not explored here, the use of DPMs makes our approach readily extendable to situations when $X$ and $Y$ are themselves collections of multivariate measurements. Here we consider pairwise dependence between univariate measurements where for $\mathcal{M}_0$, independence, the joint distribution factorises into a product of two univariate DPMs on $X$ and $Y$, while for $\mathcal{M}_1$ we can define a joint DPM model on the bivariate measurement space $(X,Y)$. In theory, given a DPM prior on the unknown densities, the Bayes factor can be calculated via the marginal likelihood. However this requires integrating over an infinite dimensional parameter space that does not have a tractable form. Moreover, using computational approaches to approximate the marginal likelihood is highly non-trivial, particularly when considering the need to scale to many thousands of comparisons with large $p$. To overcome this issue we present two new approaches to deriving scalable diagnostic measures corresponding to probabilistic measures of dependence, bypassing the need to calculate Bayes Factors that might not be feasible or desirable. Our methods are motivated by two recent proposals in the literature \citep{lock&dunson:13, kamary2014testing}, although neither of these papers consider the problem we address here as outlined below. Our first approach utilises the well known latent allocation, or clustering, structure of the DPM model to induce a partition of the two-dimensional data space. By running a Gibbs sampler under the independence model the cluster allocation of observations to specific mixture components at each iteration can then be used to define a latent contingency table given by the mixture component memberships. For each of these contingency tables we perform a parametric Bayesian independence-vs.-dependence test using conjugate multinomial-Dirichlet priors that lead to explicit analytic forms for the conditional marginal likelihoods. This proposal follows a similar idea considered in \cite{lock&dunson:13} who studied the two-sample testing problem. A key difference in what we present here, in addition to that we consider the problem of pairwise dependence, is that \cite{lock&dunson:13} use a finite mixture model to induce a partition instead of an infinite nonparametric mixture model used here. In our second approach, we adapt a recent procedure of \citep{kamary2014testing}, turning the model choice problem into an estimation problem by writing the competing models under a hierarchy that incorporates both models, $\mathcal{M}^* = \text{p}i \mathcal{M}_1 + (1-\text{p}i) \mathcal{M}_0$. We investigate the specification of $\mathcal{M}^*$ either as a mixture model with mixing component $0 \le \text{p}i \le 1$, or as a predictive linear ensemble of the two sub-models with constraints on the weights. We then estimate $\text{p}i$ which becomes a measure of the evidence for dependence. DPMs are used to obtain the likelihood associated to each of the competing models in $\mathcal{M}^*$, requiring a separate MCMC run for each potential pair of random variables. We compare and contrast the two procedures with particular regard to their scalability to large data sets. This latter feature naturally includes the amenity of the methods to simulation with modern parallel computation. We demonstrate that our association measures are scalable and successfully detect some highly non-linear dependences with equivalent performance to the current best conventional methods using mutual information, with the added advantages that fully probabilistic Bayesian methods enjoy. As mentioned above, some of these key advantages includes the ability to integrate results within a formal decision analysis framework, or within optimal experimental design, and the combination of results with other sources of information, or across studies such as arise in meta-analysis. The rest of the paper is as follows. In Section~\ref{sec:DPM} we review the Dirichlet Process and the DPM of Gaussians. In Section~\ref{sec:method} we describe the two approaches to quantify the evidence for dependence using Dirichlet Process Mixtures. In Section~\ref{sec:numerical} we illustrate our approach on the exploratory analysis of a real-world example from the World Health Organisation data set of country statistics and also on simulated data generated from simple models. We conclude the paper with a short discussion in Section~\ref{sec:conclusion}. \section{Dirichlet Process Mixtures} \label{sec:DPM} The Dirichlet process \citep{ferguson:73} is the most important process prior in Bayesian nonparametric statistics. It is flexible enough to approximate (in the sense of weak convergence) any probability law, although the paths of the process are almost surely discrete \citep{blackwell&macqueen:73}. Many years ago this discreteness was considered a drawback but nowadays it is simply a feature that characterises the Dirichlet process. This feature has recently been highly exploited in clustering applications (e.g. \citep{dahl:06}). The Dirichlet process is defined as follows. Let $G$ be a probability measure defined on $(\mathcal{X},\mathcal{B})$, where $\mathcal{X}\subset{\rm I}\!{\rm R}^p$ and $\mathcal{B}$ the corresponding Borel's $\sigma$-algebra. Let $G$ be a stochastic process indexed by the elements of $\mathcal{B}$. $G$ is a Dirichlet process with parameters $c$ and $G_0$ if for every measurable partition $(B_1,\ldots,B_k)$ of $\mathcal{X}$, $$(G(B_1),\ldots,G(B_k))\sim\mbox{Dir}(cG_0(B_1),\ldots,cG_0(B_k)).$$ From here we can see that, for every $B\in\mathcal{B}$, $\text{E}\{G(B)\}=G_0(B)$ and $\text{Var}\{G(B)\}=G_0(B)\{1-G_0(B)\}/(c+1)$. Therefore the parameter $c$ is known as precision parameter and $G_0$ as the centering measure. The Dirichlet process when used as a priori induces exchangeability in the data. In notation, let $X_1,\ldots,X_n$ be a sample of random variables such that conditional on $G$, $X_i\mid G\stackrel{\mbox{\scriptsize{iid}}}{\sim} G$. If we further take $G\sim\mathcal{DP}(c,G_0)$ then the marginal distribution of the data $(X_1,\ldots,X_n)$ once the process $G$ has been integrated out, is characterised by what is known as the P\'olya urn \citep{blackwell&macqueen:73}. We start with $X_1\sim G_0$ then \mbox{Be}gin{equation} \label{eq:polyaurn} X_{n}\mid X_1,\ldots,X_{n-1} \sim \frac{cG_0+\sum_{j=1}^{n-1}\text{d}elta_{X_j}}{c+n-1}. \end{equation} Instead of placing the Dirichlet process prior directly on the observable data, it can be used as the law of the parameters of another model (kernel) that generated the data. In notation, let us assume that for each $i=1,\ldots,n$, $$X_i\mid\theta_i\stackrel{\mbox{\scriptsize{ind}}}{\sim} f(x_i\mid\theta_i),$$ with $f$ a parametric density function. We can further take $$\theta_i\mid G \stackrel{\mbox{\scriptsize{iid}}}{\sim} G$$ with $$G\sim\mathcal{DP}(c,G_0).$$ This hierarchical specification can be seen as a mixture of density kernels $f(x\mid\theta)$ with mixing distribution coming from a Dirichlet process, i.e., $\int f(x\mid\theta)G(\text{d}\theta)$. This model is known as Dirichlet process mixture (DPM) model and was first introduced by \cite{lo:84} in the context of density estimation and written in hierarchical form by \cite{ferguson:83}. The most typical choice of kernel $f$ is the (multivariate) normal, in which case $\theta_i=(\mu_i,\sigma_i^2)$, with scalars mean and variance, in the univariate case, and $\theta_i=(\text{b}oldsymbol{\mu}_i,\text{b}oldsymbol{\Sigma}_i)$, with mean vector and variance-covariance matrix, in the multivariate case. We will work with this specific kernel throughout this paper. As can be seen by construction \eqref{eq:polyaurn}, in the mixture case, the Dirichlet process induces a joint distribution on the set $(\theta_1,\ldots,\theta_n)$ that allows for ties in the $\theta_i$'s. This in turn induces a clustering structure in the $\theta_i$'s (and $X_i$'s). Posterior inference of the DPM model usually relies on a Gibbs sampler \citep{smith&roberts:93}. At each iteration of the Gibbs sampler the model produces a different clustering structure. The number of clusters is a function of the sample size $n$ and the precision parameter $c$ of the underlying Dirichlet process. The larger the value of $c$, the larger the number of clusters induced. This clustering structure and parameter $c$ will play a central role in one of the independence test procedures that will be described later. \section{Two approaches for measuring dependence} \label{sec:method} As noted in Section 1, the calculation or approximation of the formal Bayes factor under $\mathcal{M}_0$ and $\mathcal{M}_1$ is not feasible when considering a large number of model comparisons. Indeed it may not even be desirable given that our objective is to highlight potential departures from independence rather than answer a formal model choice question. In this section we describe two distinct approaches for comparing models $\mathcal{M}_0$ and $\mathcal{M}_1$ defined in \eqref{eq:m0m1} based on DPM models that are computable and scalable to large data. \subsection{Contingency tables approach} The first approach is motivated by the paper from \cite{lock&dunson:13} who turned a two-sample testing problem into a discrete test on the clustered data. Recall that the two-sample testing problem considers the same measurement variable recorded on separate subjects under two different conditions; whereas we are considering different measurement variables recorded on the same subject. Similar to \cite{lock&dunson:13}, our procedure consists in marginally discretising the data into ordered categories and performing a Dirichlet-multinomial independence test on the induced contingency table. This amounts to first clustering the data under $\mathcal{M}_0$ and then exploring for evidence of departure from $\mathcal{M}_0$, toward $\mathcal{M}_1$, by testing for statistical association between the cluster memberships in $X$ and $Y$. Uncertainty in the cluster memberships is accounted for by the DPM defined under $\mathcal{M}_0$, as outlined below. To begin assume that the data are marginally clustered in $K_X$ and $K_Y$ clusters and denote by $\xi_{X,i}\in \{1\ldots,K_X\}$ and $\xi_{Y,i}\in \{1,\text{d}ots,K_Y\}$ the cluster indicators for the data points $x_i$ and $y_i$ respectively, for $i=1,\text{d}ots,n$. Using these cluster indicators, we can construct a contingency table $\text{b}M_{\xi_X,\xi_Y} =\{m_{kl}\}$ of size $K_X\times K_Y$, such that $m_{kl}=\sum_{i=1}^n I(\xi_{X,i}=k,\, \xi_{Y,i}=l)$, for $k=1,\text{d}ots K_X$ and $l=1,\text{d}ots,K_Y$. The contingency table $\text{b}M_{\xi_X,\xi_Y}$ represents a discretised version of the (unnormalised) marginals and joint distribution of the continuous vector $(X,Y)$. We can then apply Bayesian independence tests for discrete / categorical variables following \cite{gunel&dickey:74} and \cite{good&crook:87} who proposed a conjugate multinomial-Dirichlet independence test which is described as follows. Let $\text{b}M_{\xi_X,\xi_Y}\sim\mbox{Mult}(n,\text{b}p)$ with $\text{b}p=\{p_{kl}\}$ the matrix of cell probabilities of dimension $K_X\times K_Y$. Consider a conjugate prior distribution $\text{b}p\sim\mbox{Dir}(\text{b}oldsymbol{\alpha})$, with $\text{b}oldsymbol{\alpha}=\{\alpha_{kl}\}$ such that $\sum_{kl}\alpha_{kl}=a$. In practice we suggest to use $\alpha_{kl}=a ( K_XK_Y)^{-1}$ or $\alpha_{kl}=1/2$ for all $1\leq k\leq K_X$ and $1\leq l\leq K_Y$. Under model $\mathcal{M}_1$ the probability of having observed the counts in $\text{b}M_{\xi_X,\xi_Y}$ is \mbox{Be}gin{equation} \label{eq:pm0} \text{P}(\text{b}M_{\xi_X,\xi_Y}\mid\mathcal{M}_1, \xi_{X}, \xi_{Y})=\int \text{P}(\text{b}M_{\xi_X,\xi_Y}\mid\text{b}p) f(\text{b}p)\,\text{d}\text{b}p =\frac{\Gamma(a)}{\Gamma(a+n)}\text{p}rod_{k,l}\frac{\Gamma(\alpha_{kl}+m_{kl})}{\Gamma(\alpha_{kl})}. \end{equation} Under the independent model $\mathcal{M}_0$ the observed counts $\text{b}M_{\xi_X,\xi_Y}$ can be expressed in terms of the marginal counts $\text{b}m_X=\{m_{k\cdot}\}$ and $\text{b}m_Y=\{m_{\cdot l}\}$ whose implied distributions are again multinomial with probability vectors $\text{b}p_X=\{p_{k\cdot}\}$ and $\text{b}p_Y=\{p_{\cdot l}\}$, respectively, with $p_{k\cdot}=\sum_{l}p_{kl}$ and $p_{\cdot l}=\sum_{k}p_{kl}$. The induced prior distributions are also Dirichlet with parameters $\text{b}oldsymbol{\alpha}_X=\{\alpha_{k\cdot}\}$ and $\text{b}oldsymbol{\alpha}_Y=\{\alpha_{\cdot l}\}$. Then, the probability of $\text{b}M_{\xi_X,\xi_Y}$ under $\mathcal{M}_0$ becomes \mbox{Be}gin{align} \mbox{N}number \text{P}(\text{b}M_{\xi_X,\xi_Y}\mid\mathcal{M}_0, \xi_{X}, \xi_{Y})&=\int \text{P}(\text{b}m_X\mid\text{b}p_X)f(\text{b}p_X)\,\text{d}\text{b}p_X \int \text{P}(\text{b}m_Y\mid\text{b}p_Y)f(\text{b}p_Y)\,\text{d}\text{b}p_Y \\ \label{eq:pm1} &=\frac{\Gamma^2(a)}{\Gamma^2(a+n)}\text{p}rod_{k}\frac{\Gamma(\alpha_{k\cdot}+m_{k\cdot})}{\Gamma(\alpha_{k\cdot})}\text{p}rod_{l}\frac{\Gamma(\alpha_{\cdot l}+m_{\cdot l})}{\Gamma(\alpha_{\cdot l})}\;, \end{align} where $\alpha_{k\cdot}=\sum_{l}\alpha_{kl}$ and $\alpha_{\cdot l}=\sum_{k}\alpha_{kl}$. To compare evidence in favour of each model, we use expressions \eqref{eq:pm0} and \eqref{eq:pm1} to compute the Bayes factor $BF_{\xi}=\text{P}(\text{b}M_{\xi_X,\xi_Y}\mid\mathcal{M}_0, \xi_{X}, \xi_{Y})/\text{P}(\text{b}M_{\xi_X,\xi_Y}\mid\mathcal{M}_1, \xi_{X}, \xi_{Y})$. Using equal prior probabilities for both models, i.e. $\text{P}(\mathcal{M}_0)=\text{P}(\mathcal{M}_1)=0.5$, we obtain that the posterior probabilities for the independence and dependence models are $\text{P}(\mathcal{M}_1\mid\text{b}M_{\xi_X,\xi_Y})=1/(1+BF_{\xi_X,\xi_Y})=1-\text{P}(\mathcal{M}_0\mid\text{b}M_{\xi_X,\xi_Y}).$ where \mbox{Be}gin{equation} BF_{\xi_X,\xi_Y}=\frac{\Gamma(a)}{\Gamma(a+n)}\text{p}rod_{k}\frac{\Gamma(\alpha_{k\cdot}+m_{k\cdot})}{\Gamma(\alpha_{k\cdot})}\text{p}rod_{l}\frac{\Gamma(\alpha_{\cdot l}+m_{\cdot l})}{\Gamma(\alpha_{\cdot l})}\text{p}rod_{k,l}\frac{\Gamma(\alpha_{kl})}{\Gamma(\alpha_{kl}+m_{kl})}\;. \label{eq:BFinCT-BF} \end{equation} It should also be noted that this contingency table approach would also afford a conditional frequentist test. For example, consider Pearson's chi-squared test of independence \citep{pearson:22}. Denote by $m_{k\cdot}=\sum_l m_{kl}$ and $m_{\cdot l}=\sum_k m_{kl}$ the number of individuals classified in cluster $k$ of $X$ and cluster $l$ of $Y$, respectively. Then, the well known test statistic is \mbox{Be}gin{equation} \label{eq:CTpvalue} T=\sum_{k=1}^{K_X}\sum_{l=1}^{K_Y}\frac{(m_{kl}-m_{k\cdot}m_{\cdot l}/n)^2}{m_{k\cdot}m_{\cdot l}/n}\,. \end{equation} Under the null hypothesis $\mathcal{M}_0$ of independence, statistic $T$ follows a $\chi^2$ distribution with $(K_X-1)(K_Y-1)$ degrees of freedom. If the test statistic is improbably large according to that chi-square distribution, then one rejects the null hypothesis $\mathcal{M}_0$ in favour of the dependence hypothesis $\mathcal{M}_1$. The hypothesis testing approach described in this section assumes that the data are marginally clustered. However, these clusters are not known a prior. A Bayesian approach for data clustering is to define a prior distribution over the clustering and then update the posterior based on the evidence provided by the data. Here we make use of the DPM model structure to create an empirical partition of the two-dimensional data space, taking into account the uncertainty on the allocation process. More precisely, we consider two independent DPM prior models for each of the marginal densities with the following specifications: \mbox{Be}gin{equation} \label{eq:prior0a} f_{0,X}(x)\sim\int\mbox{N}(x\mid\theta_X)G_X(\text{d}\theta_X)\;\;\;\text{and}\;\;\; f_{0,Y}(y)\sim\int\mbox{N}(y\mid\theta_Y)G_Y(\text{d}\theta_Y), \end{equation} where $\theta_X=(\mu_X,\sigma_X^2)$ and $\theta_Y=(\mu_Y,\sigma_Y^2)$, with \mbox{Be}gin{equation} \label{eq:prior0b} G_X\sim\mathcal{DP}(c_0,G_0)\;\; \text{and}\;\; G_Y\sim\mathcal{DP}(c_0,G_0) \end{equation} and $G_0=\mbox{N}(\mu\mid \mu_0,\sigma^2/k_0)\,\mbox{IGa}(\sigma^2\mid \nu/2-1,\text{p}si/2)$. The latent clustering structure induced by the DPM models defined by \eqref{eq:prior0a} and \eqref{eq:prior0b}, can then be used to construct a contingency table as described above. Note that in an ideal world one would carefully specify subjective beliefs on the prior marginals for $X$ and $Y$. However, when the number of variables is large this is not feasible and we require some default specification as done here, by assuming a common prior after suitable transformation of the data. Although it is clear from the properties of the DP that it induces a partition, in practice it is not easy to determine an optimal one. Fitting a DPM model via a Gibbs sampler provides a partition at each iteration. We can proceed in two different ways. One is to use all potential partitions coming from the MCMC, and for each of them perform the Bayesian independence test and report the expected posterior probability. More precisely, the functional we consider is \mbox{Be}gin{equation} p_\text{dep}=\int \frac{1}{(1+BF_{\xi_X,\xi_Y})}p(\xi_{X}, \xi_{Y})d\xi_Xd\xi_Y\;. \label{eq:pindepCTBF} \end{equation} This is the procedure we recommend and develop below. An alternative approach would be to consider the selection of one of the partitions using an appropriate optimization criterion, for example using the criterion of \cite{dahl:06} who proposes to choose the partition that minimises the squared deviations with respect to the average pairwise clustering matrix, and use that single partition to perform the test, ignoring the uncertainty in the partition structure as in \cite{lock&dunson:13} for the two-sample test. In \textit{Supplementary Material} we provide an empirical comparison between both procedures. In the rest of the paper we will focus on the first alternative that considers all potential partitions; we will refer to this procedure as CT-BF. \mbox{Be}gin{algorithm} \caption{Independence measure based on Contingency table (CT-BF)} \label{alg:CT} \mbox{Be}gin{algorithmic} \REQUIRE Data $\text{D}=\{x_i,y_i\}_{i=1}^n$ \REQUIRE Prior parameters $a$ \REQUIRE Prior parameters for the DPM and number of iterations $N_\text{it}$ \text{E}NSURE Probability of dependence $p_\text{dep}$ \STATE \mbox{Un}derline{DPM inference:} \STATE Infer a DPM model for the distribution $f_{0,X}(x)$ using a Gibbs Sampler with $n_\text{it}$ iterations \STATE $\rightarrow$ for each iteration $1\leq j\leq N_\text{it}$, record a vector of cluster indicator $\xi_X^{(j)}$ \STATE Infer a DPM model for the distribution $f_{0,Y}(y)$ using a Gibbs Sampler with $N_\text{it}$ iterations \STATE $\rightarrow$ for each iteration $1\leq j\leq N_\text{it}$, record a vector of cluster indicator $\xi_Y^{(j)}$ \FOR{$1\leq j\leq N_\text{it}$} \STATE Construct a contingency table $\text{b}M^{(j)}$ of size $K_X^{(j)}\times K_Y^{(j)}$ based on $\xi_X^{(j)}$ and $\xi_Y^{(j)}$ \STATE $p^{(j)} \leftarrow 1/(1+BF)$ where $BF$ is defined in \eqref{eq:BFinCT-BF}. \text{E}NDFOR \STATE $p_\text{dep}\leftarrow\frac{1}{n_\text{it}}\sum_{j=1}^{n_\text{it}}p^{(j)}$ \end{algorithmic} \end{algorithm} \subsection{Mixture model predictive approach} In this section we consider an alternative approach for testing between hypothesis \eqref{eq:m0m1}. Motivated by \cite{kamary2014testing} we replace the testing problem with an estimation one by defining a predictive ensemble model $\mathcal{M}^*$ whose components are the competing models $\mathcal{M}_0$ and $\mathcal{M}_1$. To be precise, let $f_0$ and $f_1$ denote the densities of $(X,Y)$ defined by models $\mathcal{M}_0$ and $\mathcal{M}_1$, respectively. Then we define a predictive mixture model as a linear combination of sub-models of the form \mbox{Be}gin{equation} \label{eq:mixmod} f^*(x,y) = \text{p}i f_1(x,y)+(1-\text{p}i) f_0(x,y), \end{equation} where $\text{p}i$ is a free regression parameter with constraint $0\leq \text{p}i\leq 1$ and $ f_0(x,y)=f_{0,X}(x)f_{0,Y}(y)$. This model embeds both $\mathcal{M}_0$ and $\mathcal{M}_1$ for values of $\text{p}i$ equal to $0$ or $1$. The main idea of this method is to estimate from the data the mixture parameter $\text{p}i$, which indicates the preference of the data for dependence model $\mathcal{M}_1$. In contrast to the latent contingency table procedure this approach requires the explicit construction of a joint model under hypothesis ${\cal{M}}_1$. Since $f_0$ and $f_1$ are unknown densities, we assume Bayesian nonparametric prior distributions. For $f_{0_X}(x)$ and $f_{0,Y}(y)$ we consider the DPM model defined by equations \eqref{eq:prior0a} and \eqref{eq:prior0b}. For $f_1$ we take a bivariate DPM model defined as \mbox{Be}gin{equation} \label{eq:prior1a} f_1(x,y)\sim\int\mbox{N}(x,y\mid\theta_{X,Y})G_{X,Y}(\text{d}\theta_{X,Y}), \end{equation} where $\theta_{X,Y}=(\text{b}oldsymbol{\mu},\text{b}oldsymbol{\Sigma})$, with \mbox{Be}gin{equation} \label{eq:prior1b} G_{X,Y}\sim\mathcal{DP}(c_{1},G_{1}) \end{equation} and $G_{1}=\mbox{N}(\text{b}oldsymbol{\mu}\mid \text{b}oldsymbol{\mu}_0,(1/k_0)\text{b}oldsymbol{\Sigma})\,\mbox{IW}(\text{b}oldsymbol{\Sigma}\mid\nu,\text{b}oldsymbol{\text{P}si})$. The parameter $\text{p}i$ has also to be estimated so we take a prior of the form $\text{p}i\sim\mbox{Be}(a_0,b_0)$. We ensure that the centring measures $G_0$ and $G_1$ are comparable by setting their hyper-parameters as follows: we have $G_{d-1}=\mbox{N}(\text{b}oldsymbol{\mu}\mid \text{b}oldsymbol{\mu}_0,(1/k_0)\text{b}oldsymbol{\Sigma})\,\mbox{IW}(\text{b}oldsymbol{\Sigma}\mid\nu,\text{b}oldsymbol{\text{P}si})$ for $d=1$ and $2$ with $\nu=d+2$, the $d$-dimensional vector $\text{b}oldsymbol{\mu}_0\sim\mbox{N}(0_d,c_\mu\;\text{b}I_d)$, the $d\times d$-matrix $\text{b}oldsymbol{\text{P}si}\sim\mbox{IW}(\nu,c_\text{P}si \;\text{b}I_d)$ where $\text{b}I_d$ is the identity matrix of dimension $d$. The hyper-parameters $c_\mu$, $c_\text{P}si$ and $k_0$ are set to be equal for $G_0$ and $G_1$. Our objective is to highlight pairwise dependence across many pairs of variables, and order the pairs into those showing evidence from strongest to weakest association. This motivates us to consider a simplified method by assessing the relative posterior predictive evidence under $\mathcal{M}_0$ to that of $\mathcal{M}_1$, by calculating an ensemble model using the posterior predictive probability of the observed data $f_1(x_{new},y_{new}|D)$ and $f_0(x_{new},y_{new}|D)$ separately. In the following we will use the notations $\hat{f}_j(x_{new},y_{new})=f_j(x_{new},y_{new}|D)$, $j=0,1$ to denote the posterior predictive distribution. It is important to note that for all $[p \times (p-1) / 2]$ $X,Y$ pairs we use the same prior, and hence same model complexity across all pairs, so ranking by the improvement in posterior predictive likelihood under $\mathcal{M}_1$ relative to $\mathcal{M}_0$ should not {\em{a priori}} favour certain pairs over others. This procedure significantly simplifies the inference as we can infer the posterior models by first fitting the three DPM models separately each using the entire sample data, and then updating the ensemble parameter $\text{p}i$ from its posterior conditional distribution $$f(\text{p}i\mid\text{D})\text{p}ropto f(\text{p}i)\text{p}rod_i \left(\text{p}i \hat{f}_1(x_i,y_i)+(1-\text{p}i)\hat{f}_0(x_i,y_i)\right)\;,$$which is a simple line search on $[0,1]$. We will refer to this inference procedure as MixMod-ensemble -- see Algorithm \ref{alg:MixMod2steps}. \mbox{Be}gin{algorithm} \caption{Independence test MixMod-ensemble } \label{alg:MixMod2steps} \mbox{Be}gin{algorithmic} \REQUIRE Data $\text{D}=\{x_i,y_i\}_{i=1}^n$; Prior parameters $a_0$ and $b_0$; Prior parameters for the DPMs \text{E}NSURE Estimate of mixture parameter $\text{p}i$ \STATE \mbox{Un}derline{DPMs inference:} \STATE $\hat{f}_{0,X} \leftarrow$ posterior prediction of a DPM for distribution of $\{x_i\}_i$ averaged over all Gibbs sampler iteration \STATE $\hat{f}_ {0,Y}\leftarrow$ posterior prediction of a DPM for distribution of $\{y_i\}_i$ averaged over all Gibbs sampler iteration \STATE $\hat{f}_{1} \leftarrow$ posterior prediction of a DPM for distribution of $\{x_i,y_i\}_i$ averaged over all Gibbs sampler iteration \STATE \mbox{Un}derline{Estimation of $\hat{\text{p}i}$:} \STATE Define a fine grid of $[0,1]$ with intervals of length $\eta=10^{-4}$ \FOR{$j=0, \text{d}ots, \eta^{-1}$} \STATE $\text{p}i^{(j)}\leftarrow j\times \eta$ \STATE $L_j \leftarrow \sum_{i=1}^n\log(\text{p}i^{(j)}\hat{f}_1(x_i,y_i)+(1-\text{p}i^{(j)})\hat{f}_{0,X}(x_i)\hat{f}_{0,Y}(y_i))+\log(\mbox{Be}(\text{p}i^{(j)}\mid a_0,b_0))$ \text{E}NDFOR \STATE $\hat{\text{p}i}\leftarrow \frac{1}{\sum_j\exp(L_j)}\sum_j\text{p}i^{(j)}\exp(L_j)$ \end{algorithmic} \end{algorithm} An alternative approach, more closely resembling \cite{kamary2014testing}, is to consider $\mathcal{M}^*$ as a mixture-model rather than an ensemble model where with probability $\text{p}i$ the data arises from $f_0$ and with probability $1-\text{p}i$ from $f_1$. \cite{diebolt&robert:94} show that posterior sampling in a mixture model is simplified if we introduce latent variable indicators $\zeta_i\sim\mbox{Be}r(\text{p}i)$ that determine whether observation $i$ comes from $f_1$, when $\zeta_i=1$, or from $f_0$, when $\zeta_i=0$. Conditional on these latent indicators the mixture components $f_0$ and $f_1$ can be updated using only the data points allocated to each model. As noted by \cite{kamary2014testing}, the Gibbs sampler implemented in this way can become quite inefficient if the parameter $\text{p}i$ approaches the boundaries $\{0,1\}$, specially for large sample sizes. We refer to this method as MixMod. For our purposes this requires specifying a Gibbs sampler for the mixture model utilising three DPM models $\{f_1(x,y), f_{0,X}(x), f_{0,Y}(y)\}$ and the mixture allocations for points across all $p \times (p-1) / 2$ pairs. In the paper we will illustrate the performance using MixMod-ensemble, and in the \textit{Supplementary Material} we provide a comparison between MixMod and MixMod-ensemble. Regardless of the posterior inference procedure, different estimators of $\text{p}i$ could be obtained from its posterior distribution. We chose to select the expected value as a statistic of dependence, that is, \mbox{Be}gin{equation} \hat{\text{p}i}=\text{E}(\text{p}i\mid\text{D})=\int_0^1\text{p}i\; f(\text{p}i \mid D)d\text{p}i\;. \label{eq:MM} \end{equation} \subsection{Computational tractability} Both of the Bayesian non-parametric approaches proposed here are motivated by the increasing necessity of screening large data sets for possible pairwise dependencies where calculation of the formal Bayes factor under $\mathcal{M}_0$ and $\mathcal{M}_1$ is unfeasible or undesirable. In this section, we discuss some computational advantages of our two methods including their amenity to implementation on modern computing architectures exploiting parallelisation on multi-core standalone machines, or clusters of multi-core and many-core machines, or cloud based computing environments. In relation to parallelisation we see that both methods are divided in two steps: one starts by inferring DPMs using a Gibbs sampler and then perform a dependence test using every iteration of the Gibbs sampler. This decoupling of the inference step and the model comparison step allows to significantly reduce the computational cost of the procedure. In particular, only a couple of thousands of Gibbs sampling iterations are necessary to estimate the predictive posterior densities and posterior distributions over the latent allocation variables. In the environment for statistical computing \cite{rpackage}, the parallelisation of both approaches is very simple and only consists in replacing the command \textit{apply} by the command \textit{parLapply} from the package \textit{parallel} -- which is included in versions of R following 2.14.0. The R code to run CT-BF and MixMod-ensemble independence tests is available in the \textit{Supplementary Material}. The CT-BF approach based on the construction of a contingency table is particularly attractive as it is trivially parallelizable and does not involve an explicit DPM model for the joint $f_1(x,y)$ under $\mathcal{M}_1$. With $p$ measurement variables under study, this approach only needs to infer $p$ independent marginal DPMs, recording information from $N_\text{it}$ Gibbs sampling iterations for each of them independently in parallel. The MCMC output from the $p$ models is then combined and we perform $N_\text{it} \times p\times(p-1)/2$ independent tests where following (\ref{eq:BFinCT-BF}) only involves computing ratios of Gamma functions. As an illustration, in the example described in more details in Section 4, for $p=562$ measurement variables, the first stage of inference on the DPMs take less than $3$ minutes on a $48$-core machine, and then the resulting $1.5\times 10^8$ pairwise tests of dependence for all pairs of variables are performed in one hour. In comparison the MixMod-ensemble approach incurs a greater computational overhead as we require bivariate DPMs, $f_1(x,y)$, to be fit for all pairs. In the illustration below the MixMod-ensemble procedure for the $1.5\times 10^8$ pairs takes approximatively 36 hours on the same 48-core machine. \section{Numerical Analysis} \label{sec:numerical} \subsection{World Health Organisation dataset} \label{sec:WHO} In this section, we apply the two approaches described in Section \ref{sec:method} to detect dependencies in economic, social and health indicators from the World Health Organisation (WHO). The WHO Statistical Information System (WHOSIS) has recently been incorporated into the Global Health Observatory (GHO) that contains a data repository (\url{http://www.who.int/gho/database/en/}) with mortality and global health estimates, demographic and socioeconomic statistics as well as information regarding health service coverage and risk factors for $194$ countries. We combined these datasets to obtain a set of $562$ statistics per country. We aim at highlighting potential dependencies between these indicators. Scatterplots of some of these indicators are represented in Figure~\ref{fig:whoData}, where for example we see, unsurprisingly, strong dependencies between indicators such as life expectancy at birth and increased life expectancy at age 60 (Pair E). \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{pairs.pdf} \caption{Examples of the relationship between economic, social and health indicators provided by the WHO Statistical Information System. Each dot corresponds to one country.} \label{fig:whoData} \end{figure} We applied both the CT-BF and the MixMod-ensemble test to compute the probability of dependence for all the 157,641 pairs of indicators. The two proposed methods require the specification of several parameters of the prior distributions. The impact of these choices is discussed in \textit{Supplementary Material}. For the approach based on contingency tables the prior specifications for models \eqref{eq:prior0a} and \eqref{eq:prior0b} are set as follows: $c_0=10$, $\mu_0\sim\mbox{N}(0,1)$, $k_0\sim\mbox{Ga}(1/2,100/2)$, $\nu=3$ and $\text{p}si\sim\mbox{IGa}(1/2,5)$. Note that $c_0$ controls the number of clusters induced, so in order to avoid having partitions with only one cluster we set this parameter at a relative large value. To specify the Dirichlet prior for the cell probabilities in the contingency table we took $\alpha_{kl}=1/2$, which is the Jeffreys prior in a multinomial model. In experimentation we found that the contingency table can be sensitive to the choice of the parameter $c_0$. This parameter influences the number of clusters in the DPM model and therefore the size of the contingency tables and it is important to specify a value that induces a reasonable number of clusters. We would recommend exploring several values. Results seem fairly insensitive to the choice of the parameters $\alpha_{kl}$ in the Dirichlet priors. For the approach considering an ensemble mixture model, the parameters $c_0$ and $c_1$ are not fixed but specified by $c_0, c_1\sim\mbox{Ga}(1,1)$ and $\mu_0\sim\mbox{N}(0,100)$. This change was introduced to allow the model to determine the best fit without constraining the number of clusters. In addition, the prior processes $G_0$ and $G_1$ are defined as follows: $G_{d-1}=\mbox{N}(\text{b}oldsymbol{\mu}\mid \text{b}oldsymbol{\mu}_0,(1/k_0)\text{b}oldsymbol{\Sigma})\,\mbox{IW}(\text{b}oldsymbol{\Sigma}\mid\nu,\text{b}oldsymbol{\text{P}si})$ for $d=1$ and $2$ with $\nu=d+2$, the $d$-dimensional vector $\text{b}oldsymbol{\mu}_0\sim\mbox{N}(0_d,100\;\text{b}I_d)$, the $d\times d$-matrix $\text{b}oldsymbol{\text{P}si}\sim\mbox{IW}(\nu, 0.1\;\text{b}I_d)$ and $k_0\sim\mbox{Ga}(1/2,50)$, where $\text{b}I_d$ is the identity matrix of dimension $d$. The prior distribution of the mixing proportion $\text{p}i$ was specified by taking $a_0=b_0=1/2$. Our experience is that results are fairly robust to the prior parameter settings (see \textit{Supplementary Material}). The procedures were implemented in the environment for statistical computing \cite{rpackage} and make use of the package \emph{DPpackage} \citep{jara2011dppackage}. Chains were run for 10,000 iterations with a burn in of 1,000 keeping one of every 5th draws for computing estimates. For both approaches the tests were performed only for pairs containing measurements for at least 10 countries. For the CT-BF approach, the $562$ DPMs are inferred using all the available data; however, the contingency tables were constructed taking into account only the countries for which both indicators (in the pair) are available. For the MixMod-ensemble approach, in order to avoid any bias towards one of the two models $\mathcal{M}_0$ or $\mathcal{M}_1$, both the DPMs on the marginals and the DPM on the joint space are inferred only on the countries for which measurements are available for both indicators. Extending the method to handle missing data is a future objective. The measure of dependences obtained following our two approaches, i.e. $p_\text{dep}$ for CT-BF and $\hat\text{p}i$ for MixMod-ensemble, defined respectively equations~\eqref{eq:pindepCTBF} and \eqref{eq:MM}, are compared for each pair of variables in Figure~\ref{fig:whoResults} (left panel). Strong dependences (defined as $p_\text{dep}> 0.8$) are detected for 5\% of pairs, and credible independence (i.e. $p_\text{dep}< 0.2$) between 30\% of the indicators. We observe that the two probabilistic measures of dependence generally agree for most of the pairs, with the probability value obtained following the MixMod-ensemble method being generally higher than the probability measure obtained following the CT-BF approach. This elevation in the evidence in dependence is perhaps to be expected as MixMod-ensemble uses the conditional posterior predictive likelihood which will favour the more complex joint model of $f_1(x,y)$. However, the two methods disagree (defined as the probability value obtained following one method is lower than $0.2$ while it is larger than $0.8$ following the other method) for less than 0.36\% of the pairs; and these differences mainly occur when one of the $(X,Y)$ variables is equal to $0$ for more than 20\% of the countries (see for example pairs C and D). On balance we prefer to use the CT-BF approach due to its computational scalability, 1 hour of run-time on a 48-core computer in comparison with 36 hours for MixMod-ensemble in this example. We compared the analysis from the CT-BF to that using a mutual information approach computed using the 20-nearest neighbours method, as in \cite{kinney2014equitability} (see Figure~\ref{fig:whoResults} right panel where the labelled points correspond to plots in Fig~\ref{fig:whoData}). We remark that some pairs of variables with strong dependences under CT-BF have a wide spread of mutual information, in particular we note pairs D and F that have a probability of dependence close to 1 for CT-BF but relatively low MI values. Visually at least one could argue that associations of the form seen in Figure~\ref{fig:whoResults} D and F may be of potential interest to follow up by the analyst. \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{pCT_PMixMod_MI.png} \caption{Performance comparison between the CT-BF and the MixMod-ensemble approaches (left) and the mutual information (right) for every pair of indicators in the WHO dataset. The probabilities of dependences obtained following CT-BF and MixMod-ensemble are respectively $p_\text{dep}$ and $\hat\text{p}i$, defined equations~\eqref{eq:pindepCTBF} and \eqref{eq:MM} and approximated following algorithms \ref{alg:CT} and \ref{alg:MixMod2steps}. The letters A to F correspond to the 6 pairs of indicators illustrated in Figure~\ref{fig:whoData}.} \label{fig:whoResults} \end{figure} \subsection{Simulation Study for frequentist power analysis} \label{sec:simulated} In this section we perform a simulation study to examine the frequentist performance of the two proposed tests on some controlled scenarios. The objective is to verify that we are not losing much power against a popular non-probabilistic method based on mutual information, which is optimised for frequentist power. Simulated datasets are generated under the following four different scenarios: \mbox{Be}gin{enumerate} \item A bivariate normal model: $(X,Y)\sim\mbox{N}_2(\text{b}zero,\text{b}oldsymbol{\Sigma})$ with $\text{b}oldsymbol{\Sigma}=\left(\mbox{Be}gin{array}{c c} 1 & \rho\\ \rho & 1 \end{array}\right)$, \item A sinusoidal model: $Y = 2\sin(X) + \eta$, with $\eta\sim\mbox{N}(0,\text{p}hi^2)$, and $X\sim \mbox{Un} [0,5\text{p}i]$ \item A parabolic model: $Y=2X^2/3 +\eta$, with $\eta\sim\mbox{N}(0,\text{p}hi^2)$, and $X\sim\mbox{N}(0,1)$ \item A circular model: $X = 10\cos(\theta) + \eta$ and $Y = 10\sin(\theta) + \eta$, with $\theta\sim\mbox{Un}[0,2\text{p}i]$ and $\eta\sim\mbox{N}(0,\text{p}hi^2)$. \end{enumerate} For the sinusoidal, parabolic and circular models, the parameter $\text{p}hi$ controls the level of noise, whereas for the normal model the correlation $\rho$ controls the degree of dependence between the two samples. We generated fifty independent datasets from each model with a sample size $n=250$ with different correlations $\rho\in\{0,0.1,0.3,0.5,0.9\}$, for model (a), and levels of noise $\text{p}hi\in\{1,2,3,4,5 \}$ for models (b)--(d). Figure~\ref{fig:models} shows one of the fifty simulated dataset as illustration. For all the simulated datasets we apply our different procedures for testing hypothesis \eqref{eq:m0m1}. We use the same priors specifications as described in Section~\ref{sec:WHO}. \mbox{Be}gin{figure}[H] \centering \includegraphics[width=0.8\textwidth]{Article_plotModels.pdf} \caption{Samples of size $250$ generated from the four scenarios for two levels of correlation $\rho$ in the normal model and two levels of noise $\text{p}hi$ in the sinusoidal, parabolic and circular models.} \label{fig:models} \end{figure} To investigate the power of the two approaches, we create ROC curves that compare the rate of true positives (percentage of times the procedure detects dependence among the fifty datasets generated from a dependent model) and false positives (percentage of times the procedure detects dependence among fifty null datasets generated by randomly permuted the indexes of the two samples to destroy any dependences) for different threshold values. We also compare the performance of the proposed methods to the current state of the art conventional method, which is based on mutual information (using the $20$ nearest neighbours). The ROC curves are reported in Figure \ref{fig:ROC_simp}; see also \textit{Supplementary Material} that contains additional more extensive comparisons. We observe that the proposed methods have similar performances to the current leading conventional method for data coming from a sinusoidal or a parabolic model. For data generated from the circular model however the mutual information method outperforms our approaches. \mbox{Be}gin{figure}[H] \includegraphics[width=\textwidth]{Article_type6.pdf} \caption{ROC curves for competing methods as a function of correlation and noise level for models (a)--(d). CT-BF (blue line), MixMod-ensemble (red line) and Mutual Information approximated using the $20$ nearest neighbours (black dotted line).} \label{fig:ROC_simp} \end{figure} \section{Conclusion} We presented two Bayesian nonparametric procedures for highlighting pairwise dependencies between random variables that are scalable to large data sets. The methods make use of standard software in R for implementing DPM of Gaussians and are designed to exploit modern computer architectures. As such they are readily amenable to applied statisticians interested in exploratory analysis of large data sets. A power analysis shows that the procedures are comparable with that of current non-Bayesian methods based on mutual information, while having the advantage of being probabilistic in their measurement. \label{sec:conclusion} \text{b}ibliographystyle{abbrvnat} \text{b}ibliography{paper} {\huge Supplementary Material} \appendix \section{Comparison between variants of the two approaches} In Section~\ref{sec:method} we described two Bayesian nonparametric approaches to highlight dependences between two random variables. For each approach, we mentioned different variants. Here we provide a comparison of these variants on the simulated dataset described in section~\ref{sec:simulated}. For the approaches based on contingency table, the main method consists in using all potential partitions coming from the Gibbs Sampling, performing the test at each iteration and reporting the average probability of dependence (over all the iterations). An alternative to this approach, as mentioned in Section 3, is based on only one of the partitions selected using an optimisation criteria; we refer to this approach as CT-BF-1-clust. Regarding the mixture model approach, an alternative method to the posterior predictive approach of MiixMod-ensemble is a more conventional approach (called MixMod) described in algorithm~\ref{alg:MixModIt}. It consists in iteratively allocating the data to the independent or the dependent model and inferring each model based only on the data that has been allocated to it. \mbox{Be}gin{algorithm} \caption{Independence test MixMod} \label{alg:MixModIt} \mbox{Be}gin{algorithmic} \REQUIRE Data $\text{D}=\{x_i,y_i\}_{i=1}^n$; Prior parameters $a_0$ and $b_0$; Prior parameters for the DPMs; Number of iterations $N_\text{it}$ \text{E}NSURE Estimate of mixture parameter $\text{p}i$ \STATE \mbox{Un}derline{Initialisation:} \STATE $j\leftarrow 1$ \STATE continue $\leftarrow$ true \STATE $\text{p}i^{(1)}\leftarrow 0.5$ \STATE $\xi\leftarrow$ vector with $n/2$ values equal to $0$ and $n/2$ values equal to $1$ randomly allocated \WHILE{$ j\leq N_\text{it}$ and continue=true} \STATE \mbox{Un}derline{Infer DPMs given data allocation:} \STATE $n_\text{it}\leftarrow$ integer randomly sampled from $50$ to $100$ \STATE $f_X^{(j)} \leftarrow$ posterior prediction of a DPM for distribution of $\{x_i, i \text{ s.t. } \xi_i=0\}$ based on the $n_\text{it}$-th Gibbs sampler iteration \STATE $f_Y^{(j)} \leftarrow$ posterior prediction of a DPM for distribution of $\{y_i, i \text{ s.t. } \xi_i=0\}$ based on the $n_\text{it}$-th Gibbs sampler iteration \STATE $f_{XY}^{(j)} \leftarrow$ posterior prediction of a DPM for joint distribution of $\{(x_i,y_i), i \text{ s.t. } \xi_i=1\}$ based on the $n_\text{it}$-th Gibbs sampler iteration \STATE \mbox{Un}derline{Allocation of the data:} \FOR{$1\leq i\leq n$} \STATE $p\leftarrow \left(\text{p}i^{(j)}f_{XY}^{(j)}(x_i,y_i)\right)/\left(\text{p}i^{(j)}f_{XY}^{(j)}(x_i,y_i)+(1-\text{p}i^{(j)})f_X^{(j)}(x_i)f_Y^{(j)}(y_i)\right)$ \STATE $\tilde\xi_i\sim \mbox{Be}r(p)$ \text{E}NDFOR \STATE $\tilde{l_0}\leftarrow \#\{\tilde\xi_i=0\}$ \IF{$\min(\tilde{l_0}, n-\tilde{l_0})\ge 5$} \STATE $\xi_i \leftarrow \tilde\xi_i$ for all $1\leq i\leq n$ \STATE$l_0\leftarrow \#\{\xi_i=0\}$ \text{E}NDIF \STATE $\text{p}i^{(j+1)}\sim \mbox{Be}(a_0+l_0, b_0+n-l_0)$ \STATE $j\leftarrow j+1$ \text{E}NDWHILE \STATE $\hat{\text{p}i}\leftarrow \frac{1}{N_\text{it}-N_\text{it}/10+1}\sum_{j=N_\text{it}/10}^{N_\text{it}}\text{p}i^{(j)}$ \end{algorithmic} \end{algorithm} We compare the four approaches (two alternatives for each approach) in terms of their frequentist power on the simulated examples of Section 4. The ROC curves are reported in Figure~\ref{fig:ROC}. As expected for all models, the statistical power increases for larger correlation values and decreases for larger levels of noise. We observe that the CT-BF approach using all iterations of the Gibbs sampling has significantly more power than the approach when using a unique cluster. Regarding the mixture models approach, the posterior predictive two steps approach is slightly more powerful and is computationally much cheaper than the iterative approach. \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{Article_type4.pdf} \caption{ROC curves for competing methods as a function of correlation and noise level for models (a)--(d). CT-BF 1 clust (blue dashed line), CT-BF (blue solid line), MixMod (red dashed line), and MixMod-ensemble (red solid line).} \label{fig:ROC} \end{figure} \section{Sensitivity to prior choices} All the proposed methods require the specification of several hyper-parameters controlling the prior distributions of the DPM and also of the test themselves. We have investigated the impact of these choices and observed that none of the approaches is sensitive to the choice of the hyper-parameters controlling $G_0$ or $G_1$ in the DPM models. In the following, we illustrate the impact of the other hyper-parameters on simulated data sampled from the sinusoidal model described in Section~\ref{sec:simulated}. The contingency table approach is highly sensible to the choice of the parameter $c_0$. This parameter influence the number of clusters in the DPM model and therefore the size of the contingency tables. The frequentist power of the method on the simulated sinusoidal example increases with the parameter $c_0$ (see Figure~\ref{fig:Sensitivity:CTROC}). However, the ROC curves are fairly insensitive to the choice of the parameters $a$ controlling the Dirichlet prior for the cell probability: $\alpha_{kl}=a$ for all $k$ and $l$. We suggest to use $a=0.5$ as that value spreads the probability of dependence in the interval $(0,1)$ for the different levels of additive noise (see Figure~\ref{fig:Sensitivity:CT}). We observe in Figures~\ref{fig:Sensitivity:Mix} and~\ref{fig:Sensitivity:MixROC} that the performance of the MixMod-ensembl approach in term of power is not sensible to the choice of the parameters of the beta prior ($a_0$ $b_0$) and for the mixture proportion $\text{p}i$. \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{SN250Sensitivity_contingency_ROC.pdf} \caption{Power analysis of the CT-BF method illustrated on the simulated sinusoidal example with different values of $\text{p}hi$ corresponding to different lines) varying the parameters $a$ and $c_0$. } \label{fig:Sensitivity:CTROC} \end{figure} \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{SN250Sensitivity_contingency.pdf} \caption{Probability of dependence obtained following the CT-BF approach for different simulated data generated from the sinusoidal example as a function of the noise level ($\text{p}hi$). Each panel corresponds to a different value of the parameters $a$ and $c_0$. The box plots illustrate the distribution of the probabilities obtained for 50 Monte-Carlo samples.} \label{fig:Sensitivity:CT} \end{figure} \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{SN250Sensitivity_mixture.pdf} \caption{Probability of dependence obtained following the MixMod-ensemble approach for different simulated data generated from the sinusoidal example as a function of the noise level ($\text{p}hi$). Each panel corresponds to a different value of the parameters $a_0$ and $b_0$. The box plots illustrate the distribution of the probabilities obtained for 50 Monte-Carlo samples.} \label{fig:Sensitivity:Mix} \end{figure} \mbox{Be}gin{figure} \includegraphics[width=\textwidth]{SN250Sensitivity_mixture_ROC.pdf} \caption{Power analysis of the MixMod-ensemble method illustrated on the simulated sinusoidal example with different values of $\text{p}hi$ corresponding to different lines) varying the parameters $a_0$ and $b_0$. } \label{fig:Sensitivity:MixROC} \end{figure} \section{R code} In this section, we provide the R code that has been used to run our two independence tests on the WHO dataset. The data are stored in a matrix of size $194\times 562$, called \textit{data}. The first subsection present the file containing the main functions of interest and the second subsection shows how to call these routines in parallel. \subsection{functions.R} \mbox{Be}gin{lstlisting} ndata=dim(data)[2] # Prior specifications for CTBF gammavec = 10 prior=list(alpha=gammavec,m2=rep(0,1),s2=diag(1,1),psiinv2=solve(diag(0.1,1)),nu1=3,nu2=3,tau1=1,tau2=100) # Prior specifications for MixMod prior_marg = list(a0=1,b0=10,m2=rep(0,1),s2=diag(100,1), psiinv2=solve(diag(0.1,1)), nu1=3,nu2=3,tau1=1,tau2=100) prior_joint = list(a0=1,b0=10,m2=rep(0,2),s2=diag(100,2), psiinv2=solve(diag(0.1,2)), nu1=4,nu2=4,tau1=1,tau2=100) # Information for MCMC scheme state = NULL mcmc = list(nburn=100,nsave=1000,nskip=5,ndisplay=10001) # Function to fit DPMs on the two marginal and on the joint space extractClusterInfoDPM_ind <-function(indvar){ print(indvar) I=is.finite(data[,indvar]) x=data[I,indvar] x=x-mean(x) x=x/sd(x) if (length(x)>10){ id=seq(1,2*length(x),2,) tmp=tryCatch({ x.fit = DPdensity(y=x,prior=prior,mcmc=mcmc,state=state,status=TRUE) tmp = matrix(NA,nsave,length(I)) tmp[,I]=x.fit$save.state$randsave[,id] return(tmp) }, error=function(e){return(matrix(NA,nsave,length(I)))}) }else{ tmp=NA } return(tmp) } # Functions to perform tests based on Contingency Tables pFromAllEval=function(ind){ i=indI[ind] j=indJ[ind] x.means.all=allEval[[i]] y.means.all=allEval[[j]] if(sum(is.finite(x.means.all))>1 & sum(is.finite(y.means.all))>1){ I=!(is.na(x.means.all[1,])|is.na(y.means.all[1,])) if (length(which(I==T))>10){ xhere=x.means.all[,I] yhere=y.means.all[,I] pH1=lapply(seq(nsave), function(k) pFromAllEval1iter(xhere,yhere,k)) out =mean(as.numeric(pH1)) }else{ out=NA } }else{ out=NA } return(out) } pFromAllEval1iter = function(x,y,k){ x.means=round(x[k,],8) y.means=round(y[k,],8) m=table(y.means,x.means) kx=ncol(m) ky=nrow(m) # computing Dirichlet test a=0.5 mx=apply(m,2,sum) my=apply(m,1,sum) bm=sum(lgamma(a+m))-lgamma(sum(a+m)) bmx=sum(lgamma(ky*a+mx))-lgamma(sum(ky*a+mx)) bmy=sum(lgamma(kx*a+my))-lgamma(sum(kx*a+my)) b=sum(lgamma(a+m-m))-lgamma(sum(a+m-m)) bx=sum(lgamma(ky*a+mx-mx))-lgamma(sum(ky*a+mx-mx)) by=sum(lgamma(kx*a+my-my))-lgamma(sum(kx*a+my-my)) bf=exp(bmx-bx+bmy-by+b-bm) p1=1/(1+bf) return(p1) } # Function to perform MixMod-ensemble test MixMod2steps = function(ind){ x=data[,indI[ind]] y=data[,indJ[ind]] I=!(is.na(x)|is.na(y)) x=x[I] y=y[I] x=x-mean(x) y=y-mean(y) x=x/sd(x) y=y/sd(y) # do the two tests if (length(x)>10){ x.fit = DPdensity(y=x,prior=prior_marg,mcmc=mcmc2, state=state,status=TRUE, grid=x) y.fit = DPdensity(y=y,prior=prior_marg,mcmc=mcmc, state=state,status=TRUE, grid=y) xy.fit = DPdensity(y=cbind(x,y),prior=prior_joint,mcmc=mcmc2, state=state,status=TRUE, grid=cbind(x,y)) a0=0.5 b0=0.5 xy_d=diag(xy.fit$dens) interv=0.0001 alphaGrid = seq(interv,1-interv,interv) logpa =rep(NA,length(alphaGrid)) for (it in 1:length(alphaGrid)){ alpha=alphaGrid[it] logpa[it]=sum(log(alpha*xy_d+(1-alpha)*x.fit$dens*y.fit$dens))+log(dbeta(alpha,a0,b0)) } pa=exp(logpa+min(-500-logpa))/sum(exp(logpa+min(-500-logpa))*interv) res=sum(pa*alphaGrid)/length(alphaGrid) }else{ res=NA } return(res) } \end{lstlisting} \subsection{main.R} \mbox{Be}gin{lstlisting} library(parallel) library(DPpackage) source("functions.R") ncore=48 # Construct list of indexes ntmp=matrix(NA,ncol=ndata,nrow=ndata) indI=matrix(seq(ndata),ncol=ndata,nrow=ndata)[upper.tri(ntmp)] indJ=matrix(seq(ndata),ncol=ndata,nrow=ndata, byrow=T)[upper.tri(ntmp)] ######### For the CT-BF approach # for each variable, run DPM cl = makeCluster(ncore) clusterEvalQ(cl, source("functions.R")) allEval=parLapply(cl, seq(ndata), function(ind) extractClusterInfoDPM_ind(ind)) stopCluster(cl) #Run all the tests in parallel cl = makeCluster(ncore) clusterEvalQ(cl, source("functions_parallelCTBF.R")) clusterExport(cl, c("allEval", "indI","indJ")) pH1=parLapply(cl,seq(length(indI)), function(k) pFromAllEval(k)) stopCluster(cl) # reshape the results and write them in a text file pCT=matrix(NA,ndata,ndata) pCT[upper.tri(pCT)]=as.numeric(pH1) write.table(pCT,"WHOres_pCT_parallel.txt") ######### For the MixMod-ensemble approach cl = makeCluster(ncore) clusterEvalQ(cl, source("functions.R")) clusterExport(cl, c("indI","indJ")) t1=Sys.time() pH1=parLapply(cl,seq(length(indI)),function(i) tryCatch({tmp=MixMod2steps(i)}, error=function(e){return(NA)})) print(Sys.time()-t1) stopCluster(cl) # reshape the results pMM=matrix(NA,ntot,ntot) pMM[upper.tri(pMM)]=as.numeric(pH1) write.table(pMM,"WHOres_pMixMod_parallel.txt") \end{lstlisting} \end{document}
\begin{document} \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{claim}[theorem]{Claim} \newtheorem{cor}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{definition}{Definition} \newtheorem{question}[theorem]{Question} \newtheorem{remark}[theorem]{Remark} \newcommand{{{\mathrm h}}}{{{\mathrm h}}} \numberwithin{equation}{section} \numberwithin{theorem}{section} \numberwithin{table}{section} \def\mathop{\sum\!\sum\!\sum}{\mathop{\sum\!\sum\!\sum}} \def\mathop{\sum\ldots \sum}{\mathop{\sum\ldots \sum}} \def\mathop{\sum \sum}{\mathop{\sum \sum}} \def\mathop{\int\ldots \int}{\mathop{\int\ldots \int}} \def\hbox{\rlap{$\sqcap$}$\sqcup$}{\hbox{\rlap{$\sqcap$}$\sqcup$}} \def\qed{\ifmmode\hbox{\rlap{$\sqcap$}$\sqcup$}{\mathbf{\,e}}lse{\unskip\nobreak\hfil \penalty50\hskip1em\null\nobreak\hfil\hbox{\rlap{$\sqcap$}$\sqcup$} \parfillskip=0pt\finalhyphendemerits=0{\mathbf{\,e}}ndgraf}\fi} \newfont{\tildeeneufm}{eufm10} \newfont{\seveneufm}{eufm7} \newfont{\fiveeufm}{eufm5} \newfam{\mathbf{\,e}}ufmfam \tildeextfont{\mathbf{\,e}}ufmfam=\tildeeneufm \scriptfont{\mathbf{\,e}}ufmfam=\seveneufm \scriptscriptfont{\mathbf{\,e}}ufmfam=\fiveeufm \def\frak#1{{\fam{\mathbf{\,e}}ufmfam\relax#1}} \newcommand{{\boldsymbol{\lambda}}}{{\boldsymbol{\lambda}}} \newcommand{{\boldsymbol{\mu}}}{{\boldsymbol{\mu}}} \newcommand{{\boldsymbol{\xi}}}{{\boldsymbol{\xi}}} \newcommand{{\boldsymbol{\rho}}}{{\boldsymbol{\rho}}} \def\mathfrak K{\mathfrak K} \def\mathfrak{T}{\mathfrak{T}} \def{\mathfrak A}{{\mathfrak A}} \def{\mathfrak B}{{\mathfrak B}} \def{\mathfrak C}{{\mathfrak C}} \def\mathsf {E}{\mathsf {E}} \def \balpha{\bm{\alpha}} \def \bbeta{\bm{\beta}} \def \bgamma{\bm{\gamma}} \def \blambda{\bm{\lambda}} \def \bchi{\bm{\chi}} \def \bphi{\bm{\varphi}} \def \bpsi{\bm{\psi}} \def{\mathbf{\,e}}qref#1{(\ref{#1})} \def\vec#1{\mathbf{#1}} \def{\mathcal A}{{\mathcal A}} \def{\mathcal B}{{\mathcal B}} \def{\mathcal C}{{\mathcal C}} \def{\mathcal D}{{\mathcal D}} \def{\mathcal E}{{\mathcal E}} \def{\mathcal F}{{\mathcal F}} \def{\mathcal G}{{\mathcal G}} \def{\mathcal H}{{\mathcal H}} \def{\mathcal I}{{\mathcal I}} \def{\mathcal J}{{\mathcal J}} \def{\mathcal K}{{\mathcal K}} \def{\mathcal L}{{\mathcal L}} \def{\mathcal M}{{\mathcal M}} \def{\mathcal N}{{\mathcal N}} \def{\mathcal O}{{\mathcal O}} \def{\mathcal P}{{\mathcal P}} \def{\mathcal Q}{{\mathcal Q}} \def{\mathcal R}{{\mathcal R}} \def{\mathcal S}{{\mathcal S}} \def{\mathcal T}{{\mathcal T}} \def{\mathcal U}{{\mathcal U}} \def{\mathcal V}{{\mathcal V}} \def{\mathcal W}{{\mathcal W}} \def{\mathcal X}{{\mathcal X}} \def{\mathcal Y}{{\mathcal Y}} \def{\mathcal Z}{{\mathcal Z}} \newcommand{\rmod}[1]{\: \mbox{mod} \: #1} \def{\mathcal g}{{\mathcal g}} \def{\mathbf{\,e}}{{\mathbf{\,e}}} \def{\mathbf{\,e}}p{{\mathbf{\,e}}_p} \def{\mathbf{\,e}}q{{\mathbf{\,e}}_q} \def{\mathrm{Tr}}{{\mathrm{Tr}}} \def{\mathrm{Nm}}{{\mathrm{Nm}}} \def{\mathrm{E}}{{\mathrm{E}}} \def{\mathrm{T}}{{\mathrm{T}}} \def{\mathbf{S}}{{\mathbf{S}}} \def{\mathrm{lcm}}{{\mathrm{lcm}}} \def\tilde{\tildeilde} \def\overline{\overlineerline} \def\left({\left(} \def\right){\right)} \def\left|{\left|} \def\right|{\right|} \def\fl#1{\left\lfloor#1\right\rfloor} \def\rf#1{\left\lceil#1\right\rceil} \def\flq#1{\langle #1 \rangle_q} \def\qquad \mbox{and} \qquad{\qquad \mbox{and} \qquad} \newcommand{\commIg}[1]{\marginpar{ \begin{color}{magenta} \vskip-\baselineskip \raggedright\footnotesize \itshape\hrule Ig: #1\par \hrule{\mathbf{\,e}}nd{color}}} \newcommand{\commSi}[1]{\marginpar{ \begin{color}{blue} \vskip-\baselineskip \raggedright\footnotesize \itshape\hrule Si: #1\par \hrule{\mathbf{\,e}}nd{color}}} \newcommand{\commB}[1]{\marginpar{ \begin{color}{red} \vskip-\baselineskip \raggedright\footnotesize \itshape\hrule Br: #1\par \hrule{\mathbf{\,e}}nd{color}}} \hyphenation{re-pub-lished} \mathsurround=1pt \defb{b} \overlineerfullrule=5pt \def \F{{\mathbb F}} \def \K{{\mathbb K}} \def \Z{{\mathbb Z}} \def \Q{{\mathbb Q}} \def \R{{\mathbb R}} \def \C{{\\mathbb C}} \def\F_p{\F_p} \def \fp{\F_p^*} \defS_{k,\ell,q}(m,n){S_{k,{\mathbf{\,e}}ll,q}(m,n)} \def\cK_p(m,n){{\mathcal K}_p(m,n)} \def\psi_p(m,n){\psi_p(m,n)} \def\cS_{k,\ell,q}(\cM){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal M})} \def\cS_{k,\ell,q}(\cM)N{{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal M},{\mathcal N})} \def\cS_{k,\ell,q}(\cA;\cM,\cN){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal A};{\mathcal M},{\mathcal N})} \def\cS_{k,\ell,q}(\cA,\cB;\cM,\cN){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal A},{\mathcal B};{\mathcal M},{\mathcal N})} \def\cS_{k,\ell,q}(\cI,\cJ){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal I},{\mathcal J})} \def\cS_{k,\ell,q}(\cA;\cJ){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal A};{\mathcal J})} \def\cS_{k,\ell,q}(\cA, \cB;\cJ){{\mathcal S}_{k,{\mathbf{\,e}}ll,q}({\mathcal A}, {\mathcal B};{\mathcal J})} \def\cS_{k,q}^*(\cM){{\mathcal S}_{k,q}^*({\mathcal M})} \def\cS_{k,q}^*(\cM)N{{\mathcal S}_{k,q}^*({\mathcal M},{\mathcal N})} \def\cS_{k,q}^*(\cA;\cM,\cN){{\mathcal S}_{k,q}^*({\mathcal A};{\mathcal M},{\mathcal N})} \def\cS_{k,q}^*(\cA,\cB;\cM,\cN){{\mathcal S}_{k,q}^*({\mathcal A},{\mathcal B};{\mathcal M},{\mathcal N})} \def\cS_{k,q}^*(\cI,\cJ){{\mathcal S}_{k,q}^*({\mathcal I},{\mathcal J})} \def\cS_{k,q}^*(\cA;\cJ){{\mathcal S}_{k,q}^*({\mathcal A};{\mathcal J})} \def\cS_{k,q}^*(\cA, \cB;\cJ){{\mathcal S}_{k,q}^*({\mathcal A}, {\mathcal B};{\mathcal J})} \def\cS_{k,p}^*(\cA, \cB;\cJ){{\mathcal S}_{k,p}^*({\mathcal A}, {\mathcal B};{\mathcal J})} \def \xbar{\overlineerline x} \author[B. Kerr] {Bryce Kerr} \address{Department of Pure Mathematics, University of New South Wales, Sydney, NSW 2052, Australia} {\mathbf{\,e}}mail{[email protected]} \author[S. Macourt] {Simon Macourt} \address{Department of Pure Mathematics, University of New South Wales, Sydney, NSW 2052, Australia} {\mathbf{\,e}}mail{[email protected]} \keywords{exponential sum, sparse polynomial} \subjclass[2010]{11L07, 11T23} \tildeitle[Exponential sums with general weights]{Multilinear exponential sums with a general class of weights} \begin{abstract} In this paper we obtain some new estimates for multilinear exponential sums in prime fields with a more general class of weights than previously considered. Our techniques are based on some recent progress of Shkredov in Additive Combinatorics with roots in Rudnev's point plane incidence bound. We apply our estimates to obtain new results concerning exponential sums with sparse polynomials and Weyl sums over small generalized arithmetic progressions. {\mathbf{\,e}}nd{abstract} \maketitle \section{Introduction} Given a prime number $p$, subsets ${\mathcal X}_1,\dots,{\mathcal X}_n \subseteq \F^{*}_p$ and sequences of complex numbers $\omega_1(\tildeextbf{x}),\dots,\omega_n(\tildeextbf{x})$, we define the weighted multilinear exponential sum over $n$ variables by \begin{align} \label{eq:S(X1XN)} S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)= \sum_{x_1\in{\mathcal X}_1}\dots \sum_{x_n\in{\mathcal X}_n} \omega_1(\tildeextbf{x}) \dots \omega_n(\tildeextbf{x}) {\mathbf{\,e}}p(x_1\dots x_n), {\mathbf{\,e}}nd{align} where $\omega_i$ are $n-1$ dimensional weights that depend on all but the $i$th variable and ${\mathbf{\,e}}p(u) ={\mathbf{\,e}}xp(2 \pi i u/p)$. Assuming each $|\omega_i(\tildeextbf{x})|\le 1$, we are interested in obtaining upper bounds of the form \begin{align*} \left|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)\right|\le X_1\dots X_np^{-\delta}, {\mathbf{\,e}}nd{align*} where $|{\mathcal X}_i|=X_i$. The first result in this direction is Vinogradov's bilinear estimate and states that \begin{align} \label{eq:Vin} \left|\sum_{x_1\in X_1}\sum_{x_2\in X_2}\omega_1(x_2)\omega_2(x_1)e_p(x_1x_2) \right|\le p^{1/2}X_1^{1/2}X_2^{1/2}, {\mathbf{\,e}}nd{align} which is nontrivial provided $X_1X_2>p$. For values of $n\ge 3$ progress has been made through Additive Combinatorics with the first results due to Bourgain, Glibichuck and Konyagin~\cite{BouGliKon} under some restrictions on the sets, weights and number of variables occuring in~{\mathbf{\,e}}qref{eq:S(X1XN)} although their result was general enough to obtain new estimates for sums over small subgroups. Bourgain~\cite{Bour2} extended the results of~\cite{BouGliKon} and obtained an optimal result with respect to the size of $X_1\dots X_n$. In particular, Bourgain showed that for all $\varepsilon>0$ there exists a $\delta>0$ such that \begin{align*} \sum_{x_1\in{\mathcal X}_1}\dots \sum_{x_n\in{\mathcal X}_n} e_p(x_1\dots x_n)\ll X_1\dots X_np^{-\delta}, {\mathbf{\,e}}nd{align*} provided \begin{align*} X_i>p^{\varepsilon}, \quad X_1\dots X_n\ge p^{1+\varepsilon},{\mathbf{\,e}}nd{align*} and we note that Bourgain gives the dependence of $\delta$ on $\varepsilon$. Recently, Shkredov~\cite{Shkr3} has made significant quantitative improvements to the results of Bourgain by exploiting a direct connection with geometric incidence estimates of Rudnev~\cite{Rud}. Of particular relevance are the results of Petridis and Shparlinski~\cite{PetShp} and Macourt \cite{Mac1} for recent estimates of three and four dimensional multilinear sums and Shkredov~\cite{Shkr} for the sharpest current results for exponential sums over subgroups of medium size. We mention that a direct application of the methods from~\cite{PetShp,Mac1} are unable to give bounds for multilinear sums beyond four dimensional sums. However, in this paper we are able to break through this barrier and apply related techniques to given new non-trivial results for multilinear sums beyond four variables. \newline Given a set ${\mathcal A}\subseteq \F_p$ and an integer $k$ we let $D^{\tildeimes}_k({\mathcal A})$ count the number of solutions to the equation \begin{align*} (a_1-a_2)(a_3-a_4)\dots (a_{2k-1}-a_{2k})=(b_1-b_2)(b_3-b_4)\dots (b_{2k-1}-b_{2k}), {\mathbf{\,e}}nd{align*} for $a_i,b_i\in {\mathcal A}$. The quantity $D_k({\mathcal A})$ plays an important role in our arguments and we obtain some new estimates for $D_k({\mathcal A}),$ one of which improves the error term in a result of Shkredov~\cite[Theorem~32]{Shkr3} for sets of cardinality $|{\mathcal A}|\ge p^{1/2}$. We then apply our estimates to obtain some new bounds for sums of the form~{\mathbf{\,e}}qref{eq:S(X1XN)} which are motivated by applications to exponential sums with sparse polynomials and Weyl sums over small generalized arithmetic progressions. \newline Given tuples of integers $a_1,\dots,a_t$ and $k_1,\dots,k_t$ we define the $t$-sparse polynomial \begin{align} \label{eq:poly123} \Psi(X) = \sum_{i=1}^t a_iX^{k_i}, {\mathbf{\,e}}nd{align} and consider the exponential sums \begin{align} \label{eq:mult} T_\chi(\Psi) = \sum_{x\in \F^*_p} \chi(x) {\mathbf{\,e}}p (\Psi(x)). {\mathbf{\,e}}nd{align} Multinomial exponential sums of the form {\mathbf{\,e}}qref{eq:mult} have been studied extensively. We first note by the Weil bound, see \cite[Appendix 5, Example 12]{Weil} \begin{align*} T_\chi(\Psi) \le p^{1/2} \max(k_1, \dots, k_t). {\mathbf{\,e}}nd{align*} When $k_1,\dots,k_t$ are small the above estimate is sharp and we consider the case when $\max(k_1, \dots, k_t)$ grows with $p$. In this setting, progress on the simplest case of monomials was first made by Shparlinski \cite{Shp1} and was further improved by Heath-Brown and Konyagin~\cite{HBK} using techniques based on Stepanov's method, although the current sharpest estimates are based on Additive Combinatorics, see for example~\cite{BouGliKon,BouKon}. More general sums of the form~{\mathbf{\,e}}qref{eq:mult} were first considered by Mordell~\cite{Mord} and are often referred to as Mordell's exponential sum and we refer the reader to \cite{Bour,CoCoPi1, CoCoPi2, CoPi1, CoPi2, CoPi3, CoPi4} for previous estimates of these sums. We also mention the cases of trinomials and quadrinomials have been given new bounds in \cite{Mac2} and \cite{MSS} and we follow these techniques to reduce to multilinear sums of the form~{\mathbf{\,e}}qref{eq:S(X1XN)}. \newline A second application of our bound for the sums~{\mathbf{\,e}}qref{eq:S(X1XN)} is a new estimate Weyl sums over small generalized arithmetic progressions. Generalized arithmetic progressions are defined as sets of the form $${\mathcal A}=\{\alpha_1h_1+\dots+\alpha_rh_r+\beta \ : \ 1\le h_i\le H_i\}.$$ For ${\mathcal A}$ as above, we define the rank of ${\mathcal A}$ to be $r$ and say that ${\mathcal A}$ is proper if $$|{\mathcal A}|=H_1\dots H_r.$$ Shao~\cite{Shao1} has previously shown that for any polynomial $F$ we have \begin{align} \sum_{a\in {\mathcal A}}{\mathbf{\,e}}_p(F(a))\ll_r p^{1/2+o(1)}, {\mathbf{\,e}}nd{align} which can be considered a P\'{o}lya-Vinogradov type estimate for generalised arithmetic progressions. We use our estimates for~{\mathbf{\,e}}qref{eq:S(X1XN)} to obtain a power saving for Weyl sums over proper generalized arithmetic progressions with an essentially optimal range on the cardinality of ${\mathcal A}$, see Theorem~~\ref{thm:genap}. \newline For the entirety of this paper we let $|{\mathcal X}_i|=X_i$, and similarly for other sets $|{\mathcal Y}|=Y.$ We also use the notation $A \ll B$ to indicate $A \le c|B|$ for some absolute constant $c$ and similarly $A \ll_k B$ to mean the same where $c$ depends on some parameter $k$. \subsection{Main Results} In what follows we keep notation as in~{\mathbf{\,e}}qref{eq:S(X1XN)}. \begin{theorem} \label{thm:multlin2} Let $n\ge 4$, ${\mathcal X}_i \subset \F^{*}_p$ subsets satisfying \begin{align*} |{\mathcal X}_i|=X_i, \quad X_1\ge X_2\ge\dots\ge X_{n}, {\mathbf{\,e}}nd{align*} and \begin{align*} X_1X_n^{1/2} \le p. {\mathbf{\,e}}nd{align*} Then we have \begin{align*} &S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n) \\ &\ll_n X_1\dots X_n \left(\frac{1}{X_1^{1/2}}+\dots+\frac{1}{X_n^{1/2^n}}+p^{\frac{1}{2^n}}X_1^{-\frac{1}{2^n}} X_n^{-\frac{1}{2^{n+1}}}\prod_{i=2}^{n-1} B_{n}({\mathcal X}_i)\right) {\mathbf{\,e}}nd{align*} where \begin{align*} B_n({\mathcal X}) = \left\{ \begin{array}{ll} p^{\frac{1}{2^{2n-3}(n-2)}}X^{-\frac{2^{n-2}+1}{2^{2n-3}(n-2)}+o(1)},& \tildeext{if $ p^{\frac{1}{2}+\frac{1}{2^{n-1}+2}}\ge X \ge p^{\frac{217}{433}}$},\\ X^{-\frac{2^{n-2}-1+2c_1}{2^{2n-3}(n-2)}+o(1)},& \tildeext{if $ p^{\frac{217}{433}} > X\ge p^{\frac{48}{97}}$}, \\ X^{-\frac{2^{n-2}-1+2c_2}{2^{2n-3}(n-2)}+o(1)},& \tildeext{if $ X <p^{\frac{48}{97}}$}, {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} and $c_1=\frac{1}{434}$ and $c_2 = \frac{1}{192}$. {\mathbf{\,e}}nd{theorem} We give an example of when Theorem \ref{thm:multlin2} is nontrivial. Suppose $n=6$ and $X_1=X_2=\dots =X_6\le p^{\frac{48}{97}}$. Then we have \begin{align*} S({\mathcal X}_1, \dots, {\mathcal X}_6; \omega_1, \dots, \omega_6) \ll p^{\frac{1}{64}}X_1^{\frac{3110399}{524288}+o(1)}. {\mathbf{\,e}}nd{align*} One can see that this is stronger than the trivial bound \begin{align*} S({\mathcal X}_1, \dots, {\mathcal X}_6; \omega_1, \dots, \omega_6)\ll X_1^6 {\mathbf{\,e}}nd{align*} for $X_1>p^{8/27}$. In the case of sets of cardinality a little larger than $p^{1/2}$ we can obtain sharper estimates. \begin{theorem} \label{thm:multlin3} Let ${\mathcal X}_i \subset \F_p$ satisfy $|{\mathcal X}_i|=X_i,$ $X_1\ge X_2\ge\dots\ge X_{n}$ \begin{align} \label{thm:multlin3cond} |{\mathcal X}_i|\ge p^{1/2+1/(2^{n+1}-6)}. {\mathbf{\,e}}nd{align} Then we have \begin{align*} &|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|\ll_n \\ & X_1\dots X_n\left(\frac{1}{X_1^{1/2}}+\dots+\frac{1}{X_n^{1/2^n}}+p^{o(1)}\left(\frac{p^{1/2}}{(X_1\dots X_n)^{1/n}}\right)^{1/2^n}\right). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{theorem} The following is a consequence of Theorem~\ref{thm:multlin2}. \begin{theorem} \label{thm:multinom} Let $\Psi(X)$ be a multinomial of the form {\mathbf{\,e}}qref{eq:poly123}, with co-efficients $a_i \in \F^*_p$ for $i=1, \dots , t$. We define \begin{align*} \alpha_{k_i}= \gcd(k_i, p-1) {\mathbf{\,e}}nd{align*} and \begin{align*} \beta_{k_i}= \frac{\alpha_{k_i}}{\gcd(\alpha_{k_i}, \alpha_{k_t})}. {\mathbf{\,e}}nd{align*} Suppose $\beta_{k_1}\ge \dots \ge \beta_{k_{t-1}}$. Then \begin{align*} &T_\chi(\Psi) \\ & \ll p\left( \left(\frac{\alpha_{k_t}}{p-1}\right)^{\frac{1}{2}}+\beta_{k_1}^{\frac{-1}{2^2}} +\dots+\beta_{k_{t-1}}^{\frac{-1}{2^{t}}}+p^{\frac{1}{2^t}}C_t(\alpha_{k_t}) \prod_{i=1}^{t-2}D_t(\beta_{k_i})\right) {\mathbf{\,e}}nd{align*} where \begin{align*} C_t(\alpha) = \left\{ \begin{array}{ll} \alpha^{\frac{3}{2^{t+1}}}p^{-\frac{3}{2^{t+1}}},& \tildeext{if $ \alpha\ge p^{\frac{1}{2}}\log{p}$},\\ \alpha^{\frac{1}{2^{t+1}}}p^{-\frac{1}{2^t}}, &\tildeext{if $\alpha< p^{\frac{1}{2}}\log{p}$}, {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} and \begin{align*} D_t(\beta) = \left\{ \begin{array}{ll} p^{-\frac{1}{2^t(t-2)}},& \tildeext{if $ \beta \ge p^{\frac{1}{2}}\log{p}$},\\ \beta^{-\frac{1}{2^{t-1}(t-2)}}, & \tildeext{if $\beta< p^{\frac{1}{2}}\log{p}$}. {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{theorem} We mention that Theorem \ref{thm:multinom} returns the same bound as \cite[Theorem 1.1]{Mac2} when $t=4$. We also mention the strength in this bound is that it relies on mutual greatest common divisors, rather than the size of the exponents. With this in mind, one can give examples of when this is stronger than all known bounds for a given $t$ by first ensuring that $\alpha_{k_t}$ is small and each of the powers are large. We direct the reader to \cite[Corollary 1.2]{Mac2} for such an example for the case $t=4$. Combining ideas from the proof of Theorem~\ref{thm:multlin2} with estimates of Bourgain for multilinear sums we extend a result of Shao~\cite{Shao1} to the setting of Weyl sums over small generalized arithmetic progressions. \begin{theorem} \label{thm:genap} Let $p$ be prime, ${\mathcal A}\subseteq \F_p$ a proper generalized arithmetic progression of rank $r$ and $F\in \F_p[X]$ a polynomial of degree $d$. For any $\varepsilon>0$ there exists some $\delta>0$ such that if \begin{align} \label{eq:genAPcond} |{\mathcal A}|\ge p^{1/d+\varepsilon}, {\mathbf{\,e}}nd{align} then \begin{align*} \sum_{a\in {\mathcal A}}e_p(F(a))\ll_{r,d} |{\mathcal A}|p^{-\delta}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{theorem} We note the condition $|{\mathcal A}|\ge p^{1/d+\varepsilon}$ is sharp which may be seen by considering the example \begin{align*} {\mathcal A}=\{1,2,\dots,\lfloor p^{1/d}/10 \rfloor\}, \quad F(x)=x^{d}, {\mathbf{\,e}}nd{align*} so that \begin{align*} \sum_{a\in {\mathcal A}}e_p(F(a))\gg |{\mathcal A}|. {\mathbf{\,e}}nd{align*} \section{Multilinear Exponential Sums} \subsection{Reduction mean values} \label{sec:mv} The following result is a variant of \cite[Lemma~2.10]{PetShp} which is more suitable for applications to exponential sums when the variables may run through sets of differing cardinalities. \begin{lemma} \label{lem:SXin} Let $n \ge 2$. Suppose $S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)$ is defined as in {\mathbf{\,e}}qref{eq:S(X1XN)}. Then \begin{align*} &|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|^{2^{n-1}}\ll (X_1\dots X_n)^{2^{n-1}}\left(\frac{1}{X_n^{2^{n-2}}}+\dots+\frac{1}{X_2} \right) \\ & \qquad \qquad + X_1^{2^{n-1}-1}(X_2 \dots X_n)^{2^{n-1}-2} \sum_{\substack{x_2,y_2 \in{\mathcal X}_2 \\ x_2\neq y_2}}\dots \sum_{\substack{x_n, y_n \in{\mathcal X}_n \\ x_n\neq y_n}} \\ &\qquad \qquad \qquad \qquad \qquad \tildeimes\left|\sum_{x_1 \in {\mathcal X}_1}{\mathbf{\,e}}p(x_1(x_2-y_2)\dots (x_n-y_n))\right|. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} We proceed by induction on $n$ and first consider the case $n=2$. Our sums take the form \begin{align*} S({\mathcal X}_1,{\mathcal X}_2,\omega_1,\omega_2)=\sum_{x_1\in {\mathcal X}_1}\sum_{x_2\in {\mathcal X}_2}\omega_1(x_2)\omega_2(x_1)e_p(x_1x_2), {\mathbf{\,e}}nd{align*} and hence by the Cauchy-Schwarz inequality \begin{align*} \left|S({\mathcal X}_1,{\mathcal X}_2,\omega_1,\omega_2)\right|^2\le X_1\sum_{x_1\in {\mathcal X}_1}\left|\sum_{x_2\in {\mathcal X}_2}e_p(x_1x_2) \right|^2. {\mathbf{\,e}}nd{align*} Expanding the square, interchanging summation and isolating the diagonal contribution, we get \begin{align*} \left|S({\mathcal X}_1,{\mathcal X}_2,\omega_1,\omega_2)\right|^2\le X_1^2X_2+X_1 \sum_{\substack{x_2,y_2\in {\mathcal X}_2 \\ x_2\neq y_2}}\left|\sum_{x_1\in {\mathcal X}_1}e_p(x_1(x_2-y_2)) \right|. {\mathbf{\,e}}nd{align*} Suppose the statement of Lemma~\ref{lem:SXin} is true for some integer $n-1\ge 2$ and consider the sums $S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)$. By the Cauchy-Schwarz inequality \begin{align*} & \left|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)\right|^2\le X_1\dots X_{n-1} \\ &\sum_{\substack{x_i\in {\mathcal X}_i \\ 1\le i \le n-1}}\left|\sum_{x_n\in {\mathcal X}_n}\omega_1(\tildeextbf{x})\dots \omega_{n-1}(\tildeextbf{x})e_p(x_1\dots x_n) \right|^2, {\mathbf{\,e}}nd{align*} which after expanding the square, interchanging summation and isolating the diagonal contribution results in \begin{align*} \left|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)\right|^2 &\le \frac{(X_1\dots X_n)^2}{X_n} \\ &+X_1\dots X_{n-1}\sum_{\substack{x_n,y_n\in {\mathcal X}_n \\ x_n\neq y_n}}S(x_n,y_n), {\mathbf{\,e}}nd{align*} where \begin{align*} &S(x_n,y_n)= \\ & \left|\sum_{\substack{x_i\in {\mathcal X}_i \\ 1\le i \le n-1}}\omega_1'(\mathbf{x}',x_n,y_n)\dots \omega_{n-1}'(\mathbf{x}',x_n,y_n)e_p(x_1\dots x_{n-1}(x_n-y_n)) \right|, {\mathbf{\,e}}nd{align*} and $$\tildeextbf{x}'=(x_1,\dots,x_{n-1}), \quad \omega_j(\tildeextbf{x}',x_n,y_n)=\omega_j(\tildeextbf{x}',x_n)\overlineerline \omega_j(\tildeextbf{x}',y_n).$$ By H\"{o}lder's inequality \begin{align*} &\left|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)\right|^{2^{n-1}}\ll \frac{(X_1\dots X_n)^{2^{n-1}}}{X_n^{2^{n-2}}} \\ &\quad +(X_1\dots X_{n-1})^{2^{n-2}}X_n^{2^{n-1}-2}\sum_{\substack{x_n,y_n\in {\mathcal X}_n \\ x_n\neq y_n}}S(x_n,y_n)^{2^{n-2}}. {\mathbf{\,e}}nd{align*} We next fix some pair $x_n\neq y_n$ and apply our induction hypothesis to the sum $S(x_n,y_n)$. This gives \begin{align*} & S(x_n,y_n)^{2^{n-2}}\le (X_1\dots X_{n-1})^{2^{n-2}}\left(\frac{1}{X_{n-1}^{2^{n-3}}}+\dots+\frac{1}{X_2} \right) \\ &+X_1^{2^{n-2}-1}(X_2\dots X_{n-1})^{2^{n-2}-2} \\ &\sum_{\substack{x_i,y_i\in {\mathcal X}_i \\ x_i\neq y_i \\ 2\le i \le n-1}}\left|\sum_{x_1\in {\mathcal X}_1}e_p(x_1(x_2-y_2)\dots(x_{n-1}-y_{n-1})(x_n-y_n)) \right|, {\mathbf{\,e}}nd{align*} which combined with the above implies \begin{align*} &\left|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)\right|^{2^{n-1}}\le (X_1\dots X_n)^{2^{n-1}}\left(\frac{1}{X_n^{2^{n-2}}}+\dots+\frac{1}{X_2} \right) \\&+X_1^{2^{n-1}-1}(X_2\dots X_n)^{2^{n-1}-2} \\ & \tildeimes \sum_{\substack{x_i,y_i\in {\mathcal X}_i \\ x_i\neq y_i \\ 2\le i \le n-1}}\left|\sum_{x_1\in {\mathcal X}_1}e_p(x_1(x_2-y_2)\dots(x_{n-1}-y_{n-1})(x_n-y_n)) \right|, {\mathbf{\,e}}nd{align*} and completes the proof. {\mathbf{\,e}}nd{proof} We mention that the above proof is independent of the sizes of the $X_i$, and as such the lemma is left without such restrictions. For any set ${\mathcal A} \subset \F_p$ we define \begin{align*} &D^\tildeimes_k({\mathcal A}) \\ &= |\{(a_1-a_2)\dots(a_{2k-1}-a_{2k}) = (b_1-b_2)\dots(b_{2k-1}-b_{2k}) : a_i, b_i \in {\mathcal A}\}|, {\mathbf{\,e}}nd{align*} and extend the notation when variables run through different sets by defining $D^\tildeimes_k({\mathcal X}_1, \dots, {\mathcal X}_k)$ to be the number of solutions to \begin{align*} (w_1-x_1)\dots(w_{k}-x_{k}) = (y_1-z_1)\dots(y_{k}-z_{k}), {\mathbf{\,e}}nd{align*} for $w_i, x_i, y_i, z_i \in {\mathcal X}_i$. Finally, we use the notation $D_k^{\tildeimes, *}$ for the above cases where we exclude the solutions when the equation is $0$ and define \begin{align*} \widetilde D_k^{\tildeimes, *}({\mathcal X}_1,\dots,{\mathcal X}_k)=D_k^{\tildeimes, *}({\mathcal X}_1,\dots,{\mathcal X}_k)-\frac{\left(\prod_{i=1}^kX_i(X_i-1) \right)^2}{p-1}. {\mathbf{\,e}}nd{align*} We note that $\widetilde D_k^{\tildeimes, *}$ is the error in approximation of $D_k^{\tildeimes, *}$ by the expected main term. \begin{lemma} \label{lem:Dktimes*} Let ${\mathcal X}_1, \dots ,{\mathcal X}_k \subset \F_p$. Then \begin{align*} D^{\tildeimes,*}_k({\mathcal X}_1, \dots, {\mathcal X}_k) \le (D^{\tildeimes,*}_k({\mathcal X}_1)\dots D^{\tildeimes,*}_k({\mathcal X}_k))^{1/k}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} We let $K=D^{\tildeimes,*}_k({\mathcal X}_1, \dots, {\mathcal X}_k)$ and express $K$ in terms of multiplicative characters \begin{align*} K&= \sum_{w_1,x_1,y_1,z_1 \in {\mathcal X}_1} \dots \sum_{w_k,x_k,y_k,z_k \in {\mathcal X}_k} \\ &\qquad \quad \frac{1}{p-1}\sum_{\chi \in \Omega} \chi(w_1-x_1)\dots(w_{k}-x_{k}) \overlineerline{\chi}(y_1-z_1)\dots(y_{k}-z_{k}) {\mathbf{\,e}}nd{align*} where $\Omega$ is the set of all distinct characters. Clearly, \begin{align*} K = \frac{1}{p-1}\sum_{\chi \in \Omega}\left| \sum_{w_1,x_1\in {\mathcal X}_1} \chi(w_1-x_1)\right|^2 \dots \left|\sum_{w_k,x_k\in {\mathcal X}_k} \chi(w_k-x_k)\right|^2. {\mathbf{\,e}}nd{align*} Using Holder's inequality, we obtain \begin{align*} K^k &\le \frac{1}{(p-1)^k} \sum_{\chi \in \Omega}\left| \sum_{w_1,x_1\in {\mathcal X}_1} \chi(w_1-x_1)\right|^{2k} \dots \sum_{\chi \in \Omega}\left| \sum_{w_k,x_k\in {\mathcal X}_k} \chi(w_k-x_k)\right|^{2k}\\ &= D^{\tildeimes,*}_k({\mathcal X}_1)\dots D^{\tildeimes,*}_k({\mathcal X}_k). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{proof} The proof of the following is similar to that of Lemma~\ref{lem:Dktimes*} with summation only over non-principal characters. \begin{lemma} \label{lem:tildeDktimes*} Let ${\mathcal X}_1, \dots ,{\mathcal X}_k \subset \F_p$. Then \begin{align*} \widetilde D^{\tildeimes,*}_k({\mathcal X}_1, \dots, {\mathcal X}_k) \le (\widetilde D^{\tildeimes,*}_k({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_k({\mathcal X}_k))^{1/k}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} Using Lemma~\ref{lem:SXin}, Lemma~\ref{lem:Dktimes*} and Lemma~\ref{lem:tildeDktimes*} we give two general results relating estimates for $S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)$ to the quantities $D^\tildeimes_k({\mathcal A})$ and $\widetilde D^\tildeimes_k({\mathcal A})$. We first recall the classic Vinogradov bilinear estimate, see \cite[Equation 1.4]{BouGar} or \cite[Lemma 4.1]{Gar}. \begin{lemma} \label{lem:bilin} For any sets ${\mathcal X}, {\mathcal Y} \subseteq \F_p$ and any $\alpha= (\alpha_{x})_{x\in {\mathcal X}}$, $\beta = \left( \beta_{y}\right)_{y \in {\mathcal Y}}$ with \begin{align*} \sum_{x\in {\mathcal X}}|\alpha_{x}|^2 = A \qquad \mbox{and} \qquad \sum_{y \in {\mathcal Y}}|\beta_{y}|^2 = B, {\mathbf{\,e}}nd{align*} we have \begin{align*} \left |\sum_{x \in {\mathcal X}}\sum_{y \in {\mathcal Y}} \alpha_{x} \beta_{y} {\mathbf{\,e}}p(xy) \right| \le \sqrt{pAB}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{lemma} \label{lem:SMV} Let $n \ge 2$. Suppose $S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)$ is defined as in {\mathbf{\,e}}qref{eq:S(X1XN)} and that $$X_1\ge X_2\dots \ge X_n.$$ Then \begin{align*} &|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|^{2^{n}}\ll (X_1\dots X_n)^{2^{n}}\left(\frac{1}{X_1^{2^{n-1}}}+\dots+\frac{1}{X_{n-1}^2} \right) \\ & \qquad \qquad + pX_n^{2^{n}-1}(X_1 \dots X_{n-1})^{2^{n}-4}(D^{\tildeimes,*}_{n-1}({\mathcal X}_1)\dots D^{\tildeimes,*}_{n-1}({\mathcal X}_{n-1}))^{1/(n-1)}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} Writing $$S=\sum_{\substack{x_1,y_1 \in{\mathcal X}_1 \\ x_1\neq y_1}}\dots \sum_{\substack{x_{n-1}, y_{n-1} \in{\mathcal X}_{n-1} \\ x_{n-1}\neq y_{n-1}}}\left|\sum_{x_n \in {\mathcal X}_n}{\mathbf{\,e}}p(x_n(x_1-y_1)\dots (x_{n-1}-y_{n-1}))\right|,$$ by Lemma~\ref{lem:SXin} it is sufficient to show that \begin{align*} &S^2\le pX_n(D^{\tildeimes,*}_{n-1}({\mathcal X}_1)\dots D^{\tildeimes,*}_{n-1}({\mathcal X}_{n-1}))^{1/(n-1)}. {\mathbf{\,e}}nd{align*} Let $I(\lambda)$ count the number of solutions to the equation $$\lambda=(x_1-y_1)\dots(x_{n-1}-y_{n-1}), \quad x_i,y_i\in {\mathcal X}_i, \ \ x_i\neq y_i,$$ so that \begin{align*} S=\sum_{\lambda}I(\lambda)\left|\sum_{x_n\in {\mathcal X}_n}e_p(\lambda x_1) \right|, {\mathbf{\,e}}nd{align*} and hence by Lemma~\ref{lem:bilin} \begin{align*} S^2\le \left(\sum_{\lambda}I(\lambda)^2\right)pX_n, {\mathbf{\,e}}nd{align*} and the result follows from Lemma~\ref{lem:Dktimes*} since \begin{align*} \sum_{\lambda}I(\lambda)^2=D^\tildeimes_{n-1}({\mathcal X}_1, \dots, {\mathcal X}_{n-1}). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{proof} Our next estimate does better in applications over Lemma~\ref{lem:SMV} when our sets have ${\mathcal X}_1,\dots,{\mathcal X}_n$ have large cardinalities. \begin{lemma} \label{lem:SMV1} Let $n \ge 2$. Suppose $S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)$ is defined as in {\mathbf{\,e}}qref{eq:S(X1XN)}. Then we have \begin{align*} &|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|^{2^{n}}\ll (X_1\dots X_n)^{2^{n}}\left(\frac{1}{X_1^{2^{n-1}}}+\dots+\frac{1}{X_n} \right) \\ & \qquad \qquad +p^{1/2}(X_1\dots X_n)^{2^n-2}(\widetilde D^{\tildeimes,*}_n({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_n({\mathcal X}_n))^{1/2n}. {\mathbf{\,e}}nd{align*} \begin{proof} Writing $$S=\sum_{\substack{x_2,y_2 \in{\mathcal X}_2 \\ x_2\neq y_2}}\dots \sum_{\substack{x_n, y_n \in{\mathcal X}_n \\ x_n\neq y_n}}\left|\sum_{x_1 \in {\mathcal X}_1}{\mathbf{\,e}}p(x_1(x_2-y_2)\dots (x_n-y_n))\right|,$$ by Lemma~\ref{lem:SXin} it is sufficient to show that \begin{align*} &S^2\le \frac{(X_1\dots X_n)^4}{X_1^2} \\ & \quad \quad \quad +(X_2\dots X_n)^2p^{1/2}(\widetilde D^{\tildeimes,*}_n({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_n({\mathcal X}_n))^{1/2n}. {\mathbf{\,e}}nd{align*} Applying the Cauchy-Schwarz inequality, interchanging summation and isolating the diagonal contribution gives \begin{align} \label{eq:SMVS2} S^2\le X_1(X_2\dots X_n)^4+(X_2\dots X_n)^2\left|\sum_{\lambda=1}^{p-1}I(\lambda)e_p(\lambda)\right|, {\mathbf{\,e}}nd{align} where $I(\lambda)$ counts the number of solutions to the equation \begin{align*} (x_1-y_1)\dots(x_n-y_n)=\lambda, \quad x_i,y_i\in {\mathcal X}_i, \ \ x_i\neq y_i. {\mathbf{\,e}}nd{align*} Let $$\Delta=\frac{X_1(X_1-1)\dots X_n(X_n-1)}{p-1},$$ and write \begin{align*} \sum_{\lambda=1}^{p-1}I(\lambda)e_p(\lambda)=\Delta\sum_{\lambda=1}^{p-1}e_p(\lambda)+\sum_{\lambda=1}^{p-1}(I(\lambda)-\Delta)e_p(\lambda). {\mathbf{\,e}}nd{align*} We have \begin{align} \label{eq:SMVS2-1} \left|\sum_{\lambda=1}^{p-1}I(\lambda)e_p(\lambda)\right|\ll \frac{(X_1\dots X_n)^2}{p}+\sum_{\lambda=1}^{p-1}|I(\lambda)-\Delta|. {\mathbf{\,e}}nd{align} With notation as in Lemma~\ref{lem:tildeDktimes*}, by the Cauchy-Schwarz inequality \begin{align*} \sum_{\lambda=1}^{p-1}|I(\lambda)-\Delta|\le p^{1/2}\left(\sum_{\lambda=1}^{p-1}|I(\lambda)-\Delta|^2\right)^{1/2}=p^{1/2}\widetilde D^{\tildeimes,*}_n({\mathcal X}_1, \dots, {\mathcal X}_n)^{1/2}, {\mathbf{\,e}}nd{align*} and hence \begin{align*} \sum_{\lambda=1}^{p-1}|I(\lambda)-\Delta|\le p^{1/2}(\widetilde D^{\tildeimes,*}_n({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_n({\mathcal X}_n))^{1/2n}. {\mathbf{\,e}}nd{align*} Combining the above with~{\mathbf{\,e}}qref{eq:SMVS2} and~{\mathbf{\,e}}qref{eq:SMVS2-1} gives \begin{align*} S^2&\le \frac{(X_1\dots X_n)^4}{X_1^3}+\frac{(X_1\dots X_n)^4}{p}\\ &\qquad \qquad +(X_2\dots X_n)^2p^{1/2}(\widetilde D^{\tildeimes,*}_n({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_n({\mathcal X}_n))^{1/2n} \\ &\ll \frac{(X_1\dots X_n)^4}{X_1^3}+(X_2\dots X_n)^2p^{1/2}(\widetilde D^{\tildeimes,*}_n({\mathcal X}_1)\dots \widetilde D^{\tildeimes,*}_n({\mathcal X}_n))^{1/2n}, {\mathbf{\,e}}nd{align*} and completes the proof. {\mathbf{\,e}}nd{proof} {\mathbf{\,e}}nd{lemma} \subsection{Estimates for $D^{\tildeimes}_k({\mathcal A})$} In this section we give estimates for $D^{\tildeimes}_k({\mathcal A})$ which will be combined with results from Section~\ref{sec:mv} to obtain estimates for multilinear sums. We first recall the following result \cite[Theorem 32]{Shkr3}. \begin{lemma} \label{lem:DtimesE+} Suppose ${\mathcal A} \subset \F_p$ is a set and $|{\mathcal A}|=A$. For all $k\ge 2$ \begin{align*} D^\tildeimes_k({\mathcal A}) - \frac{A^{4k}}{p} \ll_k (\log A)^4 A^{4k-2-2^{-k+2}}E^+({\mathcal A})^{1/2^{k-1}}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} We then have the following lemma \cite[Theorem 41]{Shkr3}. \begin{lemma} \label{lem:Dtimessmall} Let ${\mathcal A} \subset \F_p$ be a set, $A \le p^{2846/4991}$. Then for any $c < \frac{1}{434}$ one has \begin{align*} D_2^\tildeimes({\mathcal A}) \ll A^{13/2-c}. {\mathbf{\,e}}nd{align*} Furthermore, if $A \le p^{48/97}$, then for any $c_1 < \frac{1}{192}$ one has \begin{align*} D_2^\tildeimes({\mathcal A}) \ll A^{13/2-c_1}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} We first notice that from the proof of \cite[Theorem 32]{Shkr3} we have \begin{align} \label{eq:Dkk-1} D^\tildeimes_k({\mathcal A})-\frac{A^{4k}}{p} \ll_k (\log A)^2 A^{2k+1}\left(D^\tildeimes_{k-1}(A) -\frac{A^{4(k-1)}}{p}\right)^{1/2}. {\mathbf{\,e}}nd{align} Using $E^+({\mathcal A}) \le A^3$, combined with Lemma \ref{lem:Dtimessmall} and {\mathbf{\,e}}qref{eq:Dkk-1} we have the following corollary. \begin{cor}\label{cor:Dktimes} Suppose ${\mathcal A} \subset \F_p$ is a set and $|{\mathcal A}|=A$. For all $k\ge 2$ \begin{align*} D^\tildeimes_k({\mathcal A}) - \frac{A^{4k}}{p} \ll_k (\log A)^4 A^{4k-2+2^{-k+1}}. {\mathbf{\,e}}nd{align*} Similarly if $A\le p^{2846/4991}$, for any $c<\frac{1}{434}$ we have \begin{align*} D^\tildeimes_k({\mathcal A}) - \frac{A^{4k}}{p} \ll_k (\log A )^4 A^{4k-2+2^{-k+1}-c2^{-k+2}} {\mathbf{\,e}}nd{align*} and if $A \le p^{48/97}$, for any $c_1 < \frac{1}{192}$ we have \begin{align*} D^\tildeimes_k({\mathcal A}) - \frac{A^{4k}}{p} \ll_k (\log A )^4 A^{4k-2+2^{-k+1}-c_12^{-k+2}}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} It is clear that we can use the above to give other estimates on $D^\tildeimes_k$ using previous estimates on $D^\tildeimes_2$. We recall the following result \cite[Lemma 2.6]{Mac1}, which is given from Murphy et. al \cite{MPR-NRS} result on collinear triples. \begin{lemma} Let ${\mathcal A} \subset \F_p$. Then \begin{align*} D^\tildeimes_2({\mathcal A})- \frac{A^8}{p} \ll p^{1/2}A^{11/2}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} Again, we have the following corollary. \begin{cor} \label{cor:Dktimes2} Let ${\mathcal A} \subset \F_p$. Then \begin{align*} D^\tildeimes_k({\mathcal A}) - \frac{A^{4k}}{p} \ll_k p^{2^{1-k}}(\log A)^4 A^{4k-2-2^{-k+1}}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} We next prepare to give an estimate for $D^{\tildeimes}_k({\mathcal A})$ which improves on the above results for sets of cardinality a little larger than $p^{1/2}$. As in Shkredov~\cite{Shkr3}, our main tool is Rudnev's point plane incidence bound~\cite{Rud}. \begin{lemma} \label{lem:Rud} Let $p$ be an odd prime, ${\mathcal P}\subset \F_p^3$ a set of points and $\Pi$ a collection of planes in $\F_p^3$. Suppose $|{\mathcal P}|\le |\Pi|$ and that $k$ is the maximum number of collinear points in ${\mathcal P}$. Then the number of point-planes incidences satisfies $${\mathcal I}({\mathcal P},\Pi)\le \frac{|{\mathcal P}||\Pi|}{p}+|{\mathcal P}|^{1/2}|\Pi|+k|{\mathcal P}|.$$ {\mathbf{\,e}}nd{lemma} \begin{lemma} \label{lem:Dtimes} For a prime number $p$ and a subset ${\mathcal A}\subseteq \F_p$ with $|{\mathcal A}|=A$ we have \begin{align*} D_2^{\tildeimes}({\mathcal A})&=\frac{A^8}{p}+O\left(A^6(\log{A})^2+p^{1/2}A^4E_{+}({\mathcal A})^{1/2}(\log{A})^2\right) \\ & \quad \quad \quad+O\left(pA^4(\log{A})^2\right). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} We have \begin{align*} D_2^{\tildeimes}({\mathcal A})=\sum_{\substack{a_i\in {\mathcal A} \\ (a_1-a_2)(a_3-a_4)=(a_5-a_6)(a_7-a_8) \\ a_5\neq a_6 }}1+O(A^6). {\mathbf{\,e}}nd{align*} Let $I(x)$ denote the indicator function of the multiset $$\{ a-a' \ : \ a,a'\in {\mathcal A}\},$$ and let $\widehat I$ denote the Fourier transform of $I$. We note that the Fourier coefficients satisfy \begin{align} \label{eq:Ihat} \widehat I(x)=\left|\sum_{a\in {\mathcal A}}e_p(ax) \right|^2. {\mathbf{\,e}}nd{align} We have \begin{align} \label{eq:DW} \nonumber D_2^{\tildeimes}(A)&=\sum_{\substack{a_i\in {\mathcal A} \\ a_5\neq a_6 }}I\left(\frac{(a_1-a_2)(a_3-a_4)}{(a_5-a_6)} \right)+O(A^6) \\ &=\frac{A^8}{p}+O(A^6)+W, {\mathbf{\,e}}nd{align} where \begin{align*} W=\frac{1}{p}\sum_{y=1}^{p-1}\widehat I(y)\sum_{\substack{a_i \in {\mathcal A} \\ a_5\neq a_6}}e_p(-y(a_1-a_2)(a_3-a_4)(a_5-a_6)^{-1}). {\mathbf{\,e}}nd{align*} We have \begin{align*} W&\le \frac{1}{p}\sum_{y=1}^{p-1}\sum_{z=1}^{p}\widehat I(y) \widehat I(z)\sum_{\substack{a_i \in {\mathcal A} \\ (a_1-a_2)y=(a_3-a_4)z \\ a_3\neq a_4}}1 \\ &=\frac{A^5}{p}\sum_{y=1}^{p-1}\widehat I(y)+\frac{1}{p}\sum_{y=1}^{p-1}\sum_{z=1}^{p-1}\widehat I(y) \widehat I(z)\sum_{\substack{a_i \in {\mathcal A} \\ (a_1-a_2)y=(a_3-a_4)z }}1, {\mathbf{\,e}}nd{align*} where we have removed the condition $a_3\neq a_4$ in the last display since by~{\mathbf{\,e}}qref{eq:Ihat} the Fourier coefficients are nonnegative. The above implies \begin{align} \label{eq:WW0} W\le W_0+O(A^6), {\mathbf{\,e}}nd{align} where \begin{align*} W_0=\frac{1}{p}\sum_{y=1}^{p-1}\sum_{z=1}^{p-1}\widehat I(y) \widehat I(z)\sum_{\substack{a_i \in {\mathcal A} \\ (a_1-a_2)y=(a_3-a_4)z}}1. {\mathbf{\,e}}nd{align*} For integer $i\ge 1$ we define the sets \begin{align} \label{eq:Jidef} J(i)=\{ 1\le z \le p \ : \ 2^{i-1}-1\le \widehat I(z)< 2^{i}-1 \}, {\mathbf{\,e}}nd{align} so that \begin{align} \label{eq:WWj} W_0\ll \frac{1}{p}\sum_{1\le i,j \ll \log{A}}2^{i+j}W(i,j), {\mathbf{\,e}}nd{align} where \begin{align*} W(i,j)=\sum_{\substack{a_i \in {\mathcal A}, y\in J(i), z\in J(j) \\ (a_1-a_2)y=(a_3-a_4)z}}1. {\mathbf{\,e}}nd{align*} Fix some pair $(i,j)$ and consider $W(i,j)$. If $|J(i)|\le |J(j)|,$ then we consider the set of points $${\mathcal P}=\{ (a_1y,y,a_3) \ : \ y\in J(i), \ a_1,a_3\in {\mathcal A} \},$$ and the collection of planes $$\Pi = \{ x_1-a_2x_2-zx_3+a_4z=0 \ : \ z\in J(j), \ \ a_2,a_4\in {\mathcal A} \}.$$ We see that $W(i,j)$ is bounded by the number of point-plane incidences between ${\mathcal P}$ and $\Pi$ \begin{align*} W(i,j)\le {\mathcal I}({\mathcal P},\Pi). {\mathbf{\,e}}nd{align*} Since the maximum number of collinear points in ${\mathcal P}$ is $\max\{ A,|J(i)|\}$ an application of Lemma~\ref{lem:Rud} gives \begin{align} \begin{split} \label{eq:Wij1} W(i,j)\ll \frac{A^4|J(i)||J(j)|}{p}&+A^3|J(i)|^{1/2}|J(j)|\\ &\qquad \qquad +A^2|J(i)|\max\{ A,|J(i)|\}. {\mathbf{\,e}}nd{split} {\mathbf{\,e}}nd{align} In a similar fashion, if $|J(j)|\le |J(i)|$ then \begin{align} \begin{split} \label{eq:Wij2} W(i,j)\ll \frac{A^4|J(i)||J(j)|}{p}&+A^3|J(j)|^{1/2}|J(i)|\\ & \qquad \qquad +A^2|J(j)|\max\{ A,|J(j)|\}. {\mathbf{\,e}}nd{split} {\mathbf{\,e}}nd{align} This implies that \begin{align*} W(i,j) &\ll \frac{A^4|J(i)||J(j)|}{p}+A^3|J(i)|^{1/2}|J(j)|+A^3|J(j)|^{1/2}|J(i)| \\ & \quad \quad +A^2\min\{|J(i)|^2,|J(j)|^2\} \\ & \ll \frac{A^4|J(i)||J(j)|}{p}+A^3|J(i)|^{1/2}|J(j)|+A^3|J(j)|^{1/2}|J(i)| \\ & \quad \quad +A^2|J(i)||J(j)|, {\mathbf{\,e}}nd{align*} and hence substituting the above into~{\mathbf{\,e}}qref{eq:WWj} we get \begin{align*} W_0 &\ll \frac{A^4}{p^2}\left(\sum_{1\le i \ll \log{A}}2^{i}|J(i)| \right)^2 \\ & \qquad +\frac{A^3}{p}\left(\sum_{1\le i \ll \log{A}}2^i|J(i)|^{1/2} \right)\left(\sum_{1\le i \ll \log{A}}2^{i}|J(i)| \right)\\ &\qquad \qquad +\frac{A^2}{p}\left(\sum_{1\le i \ll \log{A}}2^{i}|J(i)| \right)^2. {\mathbf{\,e}}nd{align*} Recalling~{\mathbf{\,e}}qref{eq:Ihat} and~{\mathbf{\,e}}qref{eq:Jidef}, we have \begin{align*} \sum_{1\le i \ll \log{A}}2^{i}|J(i)|&\ll p+\sum_{2\le i \ll \log{A}}2^{i}|J(i)| \\ & \ll p+\log{A}\sum_{y=1}^{p}|\sum_{a\in {\mathcal A}}e_p(ya)|^2=pA\log{A}, {\mathbf{\,e}}nd{align*} and \begin{align*} \left(\sum_{1\le i \ll \log{A}}2^i|J(i)|^{1/2} \right)^2&\ll p+\log{A}\sum_{2\le i \ll \log{A}}2^{2i}|J(i)|\\ &\ll p+(\log{A})^2\sum_{y=1}^{p}\left|\sum_{a\in {\mathcal A}}e_p(ya) \right|^4, {\mathbf{\,e}}nd{align*} so that \begin{align*} \sum_{1\le i \ll \log{A}}2^i|J(i)|^{1/2}\ll p^{1/2}E_{+}({\mathcal A})^{1/2}\log{A}. {\mathbf{\,e}}nd{align*} This implies \begin{align*} W\ll A^6(\log{A})^2+p^{1/2}A^4E_{+}({\mathcal A})^{1/2}(\log{A})^2+pA^4, {\mathbf{\,e}}nd{align*} and hence by~{\mathbf{\,e}}qref{eq:DW} and~{\mathbf{\,e}}qref{eq:WW0} \begin{align*} D_2^{\tildeimes}(A)&=\frac{A^8}{p}+O\left(A^6(\log{A})^2\right)+O\left(p^{1/2}A^4E_{+}({\mathcal A})^{1/2}(\log{A})^2\right) \\ &+O(pA^4(\log{A})^2), {\mathbf{\,e}}nd{align*} which completes the proof. {\mathbf{\,e}}nd{proof} We next establish a recurrence type inequality similar to~\cite[Theorem~32]{Shkr3}. \begin{lemma} \label{lem:Dktimes} For a prime number $p$ and a subset ${\mathcal A}\subseteq \F_p$ with $|{\mathcal A}|=A$ we have \begin{align*} D_k^{\tildeimes}({\mathcal A})=\frac{A^{4k}}{p}+O_k\left(\left(A^{4k-2}+pA^{4k-4}+p^{1/2}A^{2k}D^{\tildeimes}_{k-1}({\mathcal A})^{1/2}\right)\log^2{A}\right). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} Let $D'_k({\mathcal A})$ count the number of solutions to the equation $$(a_{1,1}-a_{1,2})\dots (a_{k,1}-a_{k,2})= (a_{k+1,1}-a_{k+1,2})\dots(a_{2k,1}-a_{2k,2}),$$ with variables $a_{1,1},\dots,a_{2k,2}\in {\mathcal A}$ satisfying $$a_{1,1}\neq a_{1,2}, \quad a_{k+1,1}\neq a_{k+1,2},$$ so that \begin{align} \label{eq:DD'} D^{\tildeimes}_k({\mathcal A})=D'_k({\mathcal A})+O(A^{4k-2}). {\mathbf{\,e}}nd{align} Let $I(y)$ denote the indicator function of the multiset $$\{ (a_{2,1}-a_{2,2})\dots (a_{k,1}-a_{k,2}) \ : a_{2,1},\dots,a_{k,2}\in {\mathcal A}\},$$ and let $\widehat I(y)$ denote the Fourier transform of $I$. We have \begin{align*} D'_k&({\mathcal A})=\sum_{\substack{a_{j,1},a_{j,2}\in {\mathcal A} \\ a_{1,1}\neq a_{1,2}\\ a_{k+1,1}\neq a_{k+1,2}}}I((a_{k+1,1}-a_{k+1,2})\dots(a_{2k,1}-a_{2k,2})(a_{1,1}-a_{1,2})^{-1}) \\ &=\frac{1}{p}\sum_{y=1}^{p-1}\widehat I(y)\\ & \sum_{\substack{a_{j,1},a_{j,2}\in {\mathcal A} \\ a_{1,1}\neq a_{1,2}\\ a_{k+1,1}\neq a_{k+1,2}}}e_p\left(-y(a_{k+1,1}-a_{k+1,2})\dots(a_{2k,1}-a_{2k,2})(a_{1,1}-a_{1,2})^{-1} \right) \\ &=\frac{1}{p}\sum_{z=1}^p\sum_{y=1}^{p-1}\widehat I(y)\widehat I(-z)\sum_{\substack{a_{i,j}\in {\mathcal A} \\ y(a_{1,1}-a_{1,2})=z(a_{2,1}-a_{2,2}) \\ a_{j,1}\neq a_{j,2}, \ j=1,2}}1, {\mathbf{\,e}}nd{align*} which implies that \begin{align} \label{eq:DkW0} D'_k({\mathcal A})=\frac{A^{4k}}{p}+W_0+O(A^{4k-2}), {\mathbf{\,e}}nd{align} where $$W_0=\frac{1}{p}\sum_{z=1}^{p-1}\sum_{y=1}^{p-1}\widehat I(y)\widehat I(-z)\sum_{\substack{a_{i,j}\in {\mathcal A} \\ y(a_{1,1}-a_{1,2})=z(a_{2,1}-a_{2,2}) \\ a_{j,1}\neq a_{j,2}, \ j=1,2}}1.$$ For integer $i\ge 1$ we define $$J(i)=\{ y\in \F_p^* \ : \ 2^{i-1}-1\le |\widehat I(y)|\le 2^i-1 \},$$ so that \begin{align} \label{eq:W0d1} W_0\ll \frac{1}{p}\sum_{\substack{i,j \ll \log{A^{2k}}}}2^{i+j}W(i,j), {\mathbf{\,e}}nd{align} where \begin{align*} W(i,j)=\sum_{\substack{a_{i,j}\in {\mathcal A}, \\ y\in J(i), z\in J(j) \\ y(a_{1,1}-a_{1,2})=z(a_{2,1}-a_{2,2}) \\ a_{j,1}\neq a_{j,2}, \ j=1,2}}1. {\mathbf{\,e}}nd{align*} Using Lemma~\ref{lem:Rud} as in the proof of Lemma~\ref{lem:Dtimes}, we see that \begin{align} \label{eq:Wij123123} W(i,j) & \ll \frac{A^4|J(i)||J(j)|}{p}+A^3|J(i)|^{1/2}|J(j)|+A^3|J(j)|^{1/2}|J(i)| \\ & \quad \quad +A^2|J(i)||J(j)| \nonumber. {\mathbf{\,e}}nd{align} We have \begin{align*} &\sum_{i\ll \log{A}}2^{i}|J(i)|\\ &\qquad \ll p+\sum_{y=1}^{p-1}\left|\sum_{\substack{a_{i,1},a_{i,2}\in {\mathcal A} \\ 1\le i \le k-1}}e_p(y(a_{1,1}-a_{1,2})\dots (a_{k-1,1}-a_{k-1,2})) \right| \\ &\qquad \le p+\sum_{\substack{a_{i,1},a_{i,2}\in {\mathcal A} \\ 2\le i \le k-1}}\sum_{y=1}^{p-1}\left|\sum_{a\in A}e_p(y(a_{2,1}-a_{2,2})\dots (a_{k-1,1}-a_{k-1,2})a) \right|^2 \\ &\qquad \ll pA^{2k-3}, {\mathbf{\,e}}nd{align*} and \begin{align*} & \sum_{i\ll \log{A}}2^{i}|J(i)|^{1/2}\ll p^{1/2}\\ &+\left( \log{A}\sum_{y=1}^{p-1}\left|\sum_{\substack{a_{i,1},a_{i,2}\in {\mathcal A} \\ 1\le i \le k-1}}e_p(y(a_{1,1}-a_{1,2})\dots (a_{k-1,1}-a_{k-1,2})) \right|^2 \right)^{1/2}, {\mathbf{\,e}}nd{align*} so that \begin{align*} \sum_{i\ll \log{A}}2^{i}|J(i)|^{1/2}\ll_k (\log{A})^{1/2}p^{1/2}D^{\tildeimes}_{k-1}({\mathcal A})^{1/2}. {\mathbf{\,e}}nd{align*} Combining the above with~{\mathbf{\,e}}qref{eq:W0d1} and~{\mathbf{\,e}}qref{eq:Wij123123} we see that \begin{align*} W_0\ll_k \left(A^{4k-2}+pA^{4k-4}+p^{1/2}A^{2k}D^{\tildeimes}_{k-1}({\mathcal A})^{1/2}\right)\log^2{A}, {\mathbf{\,e}}nd{align*} and hence by~{\mathbf{\,e}}qref{eq:DD'} and~{\mathbf{\,e}}qref{eq:DkW0} \begin{align*} D_k^{\tildeimes}(A)&=\frac{A^{4k}}{p}\\ &\quad +O_k\left(\left(A^{4k-2}+pA^{4k-4}+p^{1/2}A^{2k}D^{\tildeimes}_{k-1}({\mathcal A})^{1/2}\right)\log^2{A}\right), {\mathbf{\,e}}nd{align*} which completes the proof. {\mathbf{\,e}}nd{proof} Combining Lemma~\ref{lem:Dtimes} and Lemma~\ref{lem:Dktimes} with an induction argument gives the following Corollary. \begin{cor} \label{cor:Dk1} For a prime number $p$ and a subset ${\mathcal A}\subseteq \F_p$ with $|{\mathcal A}|=A \ge p^{1/2}$ we have \begin{align*} &D_k^{\tildeimes}({\mathcal A})=\frac{A^{4k}}{p}\\ & \quad +O_k\left(\left(A^{4k-2}+p^{1-2^{-(k-1)}}A^{4k-4}E_{+}({\mathcal A})^{2^{-(k-1)}}\right)\log^4{A}\right). {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} Using the trivial bound $E_{+}({\mathcal A})\le A^3$ in Corollary~\ref{cor:Dk1} gives the following sharp asymptotic formula for $D_k^{\tildeimes}({\mathcal A})$ for sets of cardinality a little larger than $p^{1/2}$. \begin{cor} \label{cor:Dksharp} For any $k\ge3$ and $A\ge p^{1/2+1/(2^{k+1}-6)}$ we have \begin{align*} D_k^{\tildeimes}({\mathcal A})=\frac{A^{4k}}{p}+O_k\left(A^{4k-2}\log^4{A}\right) . {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} We define $N({\mathcal X},{\mathcal Y},{\mathcal Z})$ to be the number of solutions to \begin{align*} x_1(y_1-z_1)=x_2(y_2-z_2) {\mathbf{\,e}}nd{align*} with $x_1,x_2 \in {\mathcal X}, y_1, y_2 \in {\mathcal Y}$ and $z_1,z_2 \in {\mathcal Z}$. We now recall \cite[Corollary 2.4]{PetShp}. \begin{lemma} \label{lem:NXYZ} Let ${\mathcal X}, {\mathcal Y}, {\mathcal Z} \subset \F^*_p$ with $|{\mathcal X}|=X, |{\mathcal Y}|=Y, |{\mathcal Z}|=Z$ and $M=\max(X,Y,Z)$. Then \begin{align*} N({\mathcal X},{\mathcal Y},{\mathcal Z}) \ll \frac{X^2Y^2Z^2}p + X^{3/2}Y^{3/2}Z^{3/2}+MXYZ. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \iffalse \begin{theorem} \label{thm:multlin} Let ${\mathcal X}_i \subset \F_p$ with $|{\mathcal X}_i|=X_i$ and $X_1\ge X_2\ge\dots\ge X_n$ and $ X_{2}^{2-2^{-n+2}}(\log_2(X_{2}))^{-4} < p$ . Then \begin{align*} &S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n) \\ &\qquad \ll p^{\frac{1}{2^n}}X_1^{\frac{2^n-1}{2^n}}(X_{2}\dots X_n)^{1+o(1)-\frac{2-2^{-n+2}}{(n-1)2^n}}\\ &\qquad \qquad \qquad \qquad \qquad \qquad \qquad + X_1\dots X_{n-1}X_n^{\frac{2^n-2}{2^n}}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{theorem} \begin{proof} By \ref{lem:SXin} we have \begin{align*} |S|^{2^{n-1}}&= |S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|^{2^{n-1}} \\ & \qquad \qquad \le X_1^{2^{n-1}-1}(X_2 \dots X_n)^{2^{n-1}-2} \sum_{x_2,y_2 \in{\mathcal X}_2}\dots \sum_{x_n, y_n \in{\mathcal X}_n} \\ &\qquad \qquad \qquad \qquad \qquad \left|\sum_{x_1 \in {\mathcal X}_1}{\mathbf{\,e}}p(x_1(x_2-y_2)\dots (x_n-y_n))\right|. {\mathbf{\,e}}nd{align*} Separating the number of solutions when $x_i-y_i=0$, $i=2, \dots, n$ and using the Cauchy inequality we have \begin{align*} |S|^{2^{n-1}}&\ll_n X_1^{2^{n-1}-1}(X_2 \dots X_n)^{2^{n-1}-2} \sum_{\substack{x_2,y_2 \in{\mathcal X}_2 \\ x_2 \neq y_2}}\dots \sum_{\substack{x_n, y_n \in{\mathcal X}_n \\ x_n\neq y_n}} \\ &\qquad \qquad \qquad \qquad \left|\sum_{x_1 \in {\mathcal X}_1}{\mathbf{\,e}}p(x_1(x_2-y_2)\dots (x_n-y_n))\right| \\ &\qquad \qquad \qquad \qquad \qquad + (X_1X_n)^{2^{n-1}-1}(X_2 \dots X_{n-1})^{2^{n-1}} \\ &\ll_n X_1^{2^{n-1}-1}(X_2 \dots X_n)^{2^{n-1}-2} \sum_{\lambda \in \F^*_p} J(\lambda) \left| \sum_{x_1 \in {\mathcal X}_1 } {\mathbf{\,e}}p(\lambda x)\right|\\ &\qquad \qquad \qquad \qquad \qquad +(X_1 \dots X_{n-1})^{2^{n-1}}X_n^{2^{n-1}-1} \\ |S|^{2^{n}}&\ll_n X_1^{2^{n}-2}(X_2 \dots X_n)^{2^n-4}K \sum_{\lambda \in \F^*_p} \left| \sum_{x_1 \in {\mathcal X}_1 } {\mathbf{\,e}}p(\lambda x)\right|^2\\ &\qquad \qquad \qquad \qquad \qquad + (X_1 \dots X_{n-1})^{2^{n}}X_n^{2^{n}-2} {\mathbf{\,e}}nd{align*} where $J(\lambda)$ is the number of solutions to \begin{align*} (x_2-y_2)\dots (x_n-y_n) = \lambda, \qquad \qquad \lambda \in \F^*_p {\mathbf{\,e}}nd{align*} and \begin{align*} K = \sum_{\lambda \in \F^*_p} J(\lambda)^2. {\mathbf{\,e}}nd{align*} We use the orthogonality of exponential functions to get \begin{align*} \sum_{\lambda\in \F_p}\left| \sum_{x_1 \in {\mathcal X}_1 } {\mathbf{\,e}}p(\lambda x)\right|^2 = pX {\mathbf{\,e}}nd{align*} and now apply Corollary \ref{cor:Dktimes} and Lemma \ref{lem:Dktimes*} to obtain \begin{align} \label{eq:S2n} S^{2^n} &\ll p^{\frac{n-r}{n-1}}X_1^{2^{n}-1}(X_2\dots X_r)(X_{r+1}\dots X_n)^{2^n-\frac{2+o(1)-2^{-n+2}}{(n-1)}} \\ &\qquad \qquad \qquad \qquad \qquad + (X_1 \dots X_{n-1})^{2^{n}}X_n^{2^{n}-2}. {\mathbf{\,e}}nd{align} We now recall the classical bilinear bound. \begin{align*} \sum_{x_1 \in {\mathcal X}_1} \sum_{x_2 \in {\mathcal X}_2} {\mathbf{\,e}}p (x_1x_2) \ll (pX_1X_2)^{1/2}. {\mathbf{\,e}}nd{align*} Therefore, we have \begin{align} \label{eq:Bilinn} |S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)| \le p^{1/2}X_1^{1/2}X_2^{1/2}X_3\dots X_n. {\mathbf{\,e}}nd{align} We wish to compare {\mathbf{\,e}}qref{eq:S2n} with {\mathbf{\,e}}qref{eq:Bilinn} to see when it is nontrivial. We consider $X_2\ge p^{\frac{1}{2-2^{-n+2}}}$, now it is clear that {\mathbf{\,e}}qref{eq:Bilinn} is stronger when \begin{align*} &p^{\frac{n-r}{n-1}}X_1^{2^{n}-1}(X_2\dots X_r)^{2^n}(X_{r+1}\dots X_n)^{2^n-\frac{2-2^{-n+2}}{(n-1)}} \\ &\qquad \qquad \qquad \qquad \qquad \ge p^{2^{n-1}}X_1^{2^{n-1}}X_2^{2^{n-1}}(X_3\dots X_n)^{2^n} {\mathbf{\,e}}nd{align*} or equivalently \begin{align*} p^{\frac{n-r}{n-1}}X_1^{2^{n}-1}X_2^{2^n} \ge p^{2^{n-1}}X_1^{2^{n-1}}X_2^{2^{n-1}} (X_{r+1}\dots X_n)^{\frac{2-2^{-n+2}}{(n-1)}} {\mathbf{\,e}}nd{align*} Now, \begin{align} \label{eq:compbil} p^{\frac{n-r}{n-1}} X_1^{2^{n}-1}X_2^{2^n} &\ge p^{2^{n-1}} X_1^{2^{n-1}}X_2^{2^{n-1}} \cdot p^{\frac{n-r}{n-1}} X_1 \cdot \\ & = p^{2^{n-1}}X_1^{2^{n-1}}X_2^{2^{n-1}}\cdot p^{\frac{n-r}{n-1}}(X_1^{n-1})^{\frac{1}{n-1}} \\ & \ge p^{2^{n-1}} X_1^{2^{n-1}}X_2^{2^{n-1}} \cdot X_1^{3 - 2^{2 - n} + \frac{2 (-1 + 2^{1 - n}) (-1 + r)}{-1 + n}}. {\mathbf{\,e}}nd{align} One can check that the final term involving $X_1$ has degree greater than $1$ for all possible $r$. Hence, \begin{align*} p^{\frac{n-r}{n-1}} X_1^{2^{n}-1}X_2^{2^n} &\ge p^{2^{n-1}} X_1^{2^{n-1}}X_2^{2^{n-1}}\cdot X_1^{\frac{n-1}{n-1}} \\ & \ge p^{2^{n-1}} X_1^{2^{n-1}}X_2^{2^{n-1}} \cdot (X_{\lceil n/2\rceil} \dots X_n)^{\frac{2}{n-1}}. {\mathbf{\,e}}nd{align*} Similarly, one can check that for $r>n/2$ that the final term of {\mathbf{\,e}}qref{eq:compbil} has degree greater than $2$. Therefore, \begin{align*} p^{\frac{n-r}{n-1}} X_1^{2^{n}-1}X_2^{2^n} &\ge p^{2^{n-1}} X_1^{2^{n-1}}X_2^{2^{n-1}} (X_{3}\dots X_n)^{\frac{2-2^{-n+2}}{(n-1)}}. {\mathbf{\,e}}nd{align*} Hence, the classical bilinear bound is always stronger when $X_2$ is large. Therefore, we let $X_2 < p^{\frac{1}{2-2^{-n+2}}}$. This completes the proof. {\mathbf{\,e}}nd{proof} We note that choosing $n=3, 4$ return the bounds of \cite[Theorem 1.3, 1.4]{PetShp} with the loss of a logarithmic term. We also give the following example. Suppose $n=6$ and $X_1=X_2=\dots=X_n$. Then \begin{align} \label{eq:n=6} S({\mathcal X}_1, \dots, {\mathcal X}_6;\omega_1, \dots, \omega_6) \ll p^{\frac{1}{64}}X_1^{5+977/1024}. {\mathbf{\,e}}nd{align} We compare {\mathbf{\,e}}qref{eq:n=6} to the trivial bound and see it is non-trivial for $X_1 > p^{16/47+o(1)}$. We also compare it to the classical bilinear bound and see it is stronger for $X_1<p^{496/977+o(1)}$. \fi \subsection{Proof of Theorem \ref{thm:multlin2}} \begin{proof} Let \begin{align*} S= S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n). {\mathbf{\,e}}nd{align*} By Lemma \ref{lem:SXin}, after permuting the variables, we have \begin{align*} |S|^{2^{n-1}} &\ll (X_1 \dots X_{n})^{2^{n-1}}\left(\frac{1}{X_{n-1}}+\dots+\frac{1}{X_{1}^{2^{n-2}}}\right) \\ &\qquad + (X_1\dots X_{n-1})^{2^{n-1}-2}X_n^{2^{n-1}-1}\sum_{\substack{x_1,y_1 \in{\mathcal X}_1 \\ x_1 \neq y_1}}\dots \sum_{\substack{x_{n-1}, y_{n-1} \in{\mathcal X}_{n-1}\\ x_{n-1} \neq y_{n-1}}} \\ &\qquad \qquad \qquad \qquad \qquad \left|\sum_{x_n \in {\mathcal X}_n}{\mathbf{\,e}}p(x_n(x_1-y_1)\dots (x_{n-1}-y_{n-1}))\right|. {\mathbf{\,e}}nd{align*} We now collect together $(x_2-y_2)\dots(x_{n-1}-y_{n-1})=\lambda$ and denote the number of solutions to this equation to be $J(\lambda)$. Similarly we collect $x_1(x_n-y_n) = \mu$ and we denote the number of solutions to this equation to be $I(\mu)$. Hence, \begin{align*} |S|^{2^{n-1}}&\ll_n (X_1 \dots X_{n})^{2^{n-1}}\left(\frac{1}{X_{n-1}}+\dots+\frac{1}{X_{1}^{2^{n-2}}}\right)\\ &\qquad + (X_1 \dots X_{n-1})^{2^{n-1}-2}X_n^{2^{n-1}-1} \sum_{\lambda \in \F^*_p} J(\lambda) \left| \sum_{\mu \in \F_p } I(\mu) {\mathbf{\,e}}p(\lambda \mu)\right|\\ &= (X_1 \dots X_{n})^{2^{n-1}}\left(\frac{1}{X_{n-1}}+\dots+\frac{1}{X_{1}^{2^{n-2}}}\right)\\ &\qquad +(X_1 \dots X_{n-1})^{2^{n-1}-2}X_n^{2^{n-1}-1} \sum_{\lambda \in \F^*_p} \sum_{\mu \in \F_p } J(\lambda) {\mathbf{\,e}}ta_\lambda I(\mu) {\mathbf{\,e}}p(\lambda \mu) {\mathbf{\,e}}nd{align*} for some complex weight ${\mathbf{\,e}}ta_\lambda$ with $|{\mathbf{\,e}}ta_\lambda|=1$. Now, by Lemma \ref{lem:NXYZ} with $X=Y=X_1, Z=X_n$ we have \begin{align*} \sum_{\mu \in \F_p} I(\mu)^2 = N({\mathcal X}_n, {\mathcal X}_1, {\mathcal X}_1) \ll X_1^{3}X_n^{3/2}. {\mathbf{\,e}}nd{align*} Similarly, \begin{align*} \sum_{\lambda \in \F_p^*} J(\lambda)^2 = D^{\tildeimes,*}_{n-2}({\mathcal X}_2, \dots, {\mathcal X}_{n-1}). {\mathbf{\,e}}nd{align*} We apply Corollary \ref{cor:Dktimes} and \ref{cor:Dktimes2} combined with Lemma \ref{lem:Dktimes*} along with Lemma \ref{lem:bilin} to obtain \begin{align*} |S|^{2^{n-1}}&\ll_n (X_1 \dots X_{n})^{2^{n-1}}\left(\frac{1}{X_{n-1}}+\dots+\frac{1}{X_{1}^{2^{n-2}}}\right) \\ & \qquad \qquad+ (X_1\dots X_n)^{2^{n-1}}p^{1/2} X_1^{-1/2}X_n^{-1/4}\left(\prod_{i=2}^{n-1}B_n({\mathcal X}_i)^{2^{n-1}}\right). {\mathbf{\,e}}nd{align*} This completes the proof. {\mathbf{\,e}}nd{proof} \subsection{Proof of Theorem \ref{thm:multlin3}} We note that the conditions~{\mathbf{\,e}}qref{thm:multlin3cond} and Corollary~\ref{cor:Dksharp} imply that $$\widetilde D_{n}^{\tildeimes,*}({\mathcal X}_i)\ll (\log{p})^{4}X_i^{4n-2},$$ and hence by Lemma~\ref{lem:SMV1} \begin{align*} &|S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n)|^{2^{n}}\ll (X_1\dots X_n)^{2^{n}}\left(\frac{1}{X_1^{2^{n-1}}}+\dots+\frac{1}{X_n} \right) \\ & \qquad \qquad +(\log{p})^{4}p^{1/2}(X_1\dots X_n)^{2^n-1/n}, {\mathbf{\,e}}nd{align*} from which the desired result follows. \section{Multinomial Exponential Sums} \subsection{Preliminaries} The aim of this section is to extend the results of \cite{Mac2} and \cite{MSS} beyond the cases of trinomials and quadrinomials, to more general multinomial sums. We recall the following bound of \cite{MSS}. \begin{lemma} \label{Dtimesgroup} Let ${\mathcal G} \subseteq\F_p^*$ be a multiplicative subgroup with $|{\mathcal G}|=G$. Then \begin{align*} D_2^\tildeimes({\mathcal G}) - \frac{G^8}{p} \ll \left\{ \begin{array}{ll} p^{1/2} G^{\frac{11}{2}},& \tildeext{if $ G \ge p^{\frac{2}{3}}$},\\ G^7 p^{-\frac{1}{2}} , & \tildeext{if $p^{\frac{2}{3}} > G \ge p^{\frac{1}{2}}\log p$},\\ G^6 \log G, & \tildeext{if $G< p^{\frac{1}{2}}\log p$}. {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} Combining with {\mathbf{\,e}}qref{eq:Dkk-1} and observing which term dominates we get the following corollary. \begin{cor} \label{cor:Dtimesgroup} Let ${\mathcal G} \subseteq\F_p^*$ be a multiplicative subgroup with $|{\mathcal G}|=G$. Then \begin{align*} D_k^\tildeimes({\mathcal G})\ll \left\{ \begin{array}{ll} G^{4k}p^{-1} &\tildeext{if $G \ge p^{\frac{1}{2}}\log p$},\\ G^{4k-2+o(1)}, & \tildeext{if $G< p^{\frac{1}{2}}\log p$}. {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} We also have the following result as a consequence of \cite[Lemma 2.4]{Mac2}. \begin{lemma} \label{lem:NGHgroup} Let ${\mathcal G}, {\mathcal H} \subset \F^*_p$ be multiplicative subgroups with cardinalities $G,H$ respectively with $G \ge H$. Then, \begin{align*} N({\mathcal H}, {\mathcal G}, {\mathcal G}) \ll \left\{ \begin{array}{ll} H^2G^{\frac{7}{2}}p^{-\frac{1}{2}} &\tildeext{if $G \ge p^{\frac{1}{2}}\log p$},\\ H^2G^{\frac{5}{2}+o(1)}, & \tildeext{if $G< p^{\frac{1}{2}}\log p$}. {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} We then have the following result on multilinear exponential sums over subgroups, which may be of independent interest to the reader. \begin{lemma} \label{lem:multlingroup} Let ${\mathcal X}_i \subset \F_p$ be multiplicative subgroups with $|{\mathcal X}_i|=X_i$, $X_1\ge X_2\ge\dots\ge X_{n}$, $n\ge 4$. Then \begin{align*} S({\mathcal X}_1, \dots, {\mathcal X}_n; \omega_1, \dots, \omega_n) &\ll_n (X_1\dots X_n)p^{\frac{1}{2^n}} A_n({\mathcal X}_1) \prod_{i=2}^{n-1} B_n({\mathcal X}_i) \\ &\qquad + (X_1\dots X_n)\left(\frac{1}{X_n^{1/2}}+\dots+\frac{1}{X_1^{1/2^n}} \right) {\mathbf{\,e}}nd{align*} where \begin{align*} A_n({\mathcal X}_1) = \left\{ \begin{array}{ll} X_1^{-\frac{1}{2^{n+1}}}p^{-\frac{1}{2^{n+1}}}, & \tildeext{if $X_1 \ge p^{\frac{1}{2}}\log p$}, \\ X_1^{-\frac{3}{2^{n+1}}+o(1)}, & \tildeext{if $X_1 < p^{\frac{1}{2}}\log p$,} {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} and \begin{align*} B_n({\mathcal X}_i) = \left\{ \begin{array}{ll} p^{-\frac{1}{2^{n}(n-2)}},& \tildeext{if $ X_i \ge p^{\frac{1}{2}}\log p$},\\ X_i^{-\frac{1}{2^{n-1}(n-2)}},& \tildeext{if $X_i< p^{\frac{1}{2}}\log p$.} {\mathbf{\,e}}nd{array} \right. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \begin{proof} The proof follows that of Theorem \ref{thm:multlin2}, however we use Corollary \ref{cor:Dtimesgroup} and Lemma \ref{lem:NGHgroup} in place of their relevant results on arbitrary sets. {\mathbf{\,e}}nd{proof} \subsection{Proof of Theorem \ref{thm:multinom}} Let $\alpha_{k_i} = \gcd(k_i, p-1)$ for each $i=1, \dots, t$. We then let ${\mathcal G}_{\alpha_i}$ be the subgroups of $\F^*_p$ generated by the elements of order $\alpha_{k_i}$. Then \begin{align*} T_\chi(\Psi) &= \frac{1}{\alpha_{k_1} \dots \alpha_{k_{t-1}}} \sum_{x_1 \in {\mathcal G}_{\alpha_1}}\dots \sum_{x_{t-1} \in {\mathcal G}_{\alpha_{t-1}}}\\ & \qquad \qquad \qquad \qquad \sum_{x_t \in \F^*_p} \chi(x_1\dots x_t){\mathbf{\,e}}p(\Psi(x_1 \dots x_t)) \\ &= \frac{1}{\alpha_{k_1} \dots \alpha_{k_{t-1}}} \sum_{x_1 \in {\mathcal G}_{\alpha_1}}\dots \sum_{x_{t-1} \in {\mathcal G}_{\alpha_{t-1}}} \sum_{x_n \in \F^*_p} \chi(x_1\dots x_t)\\ & {\mathbf{\,e}}p(a_1(x_2\dots x_t)^{k_1})\dots {\mathbf{\,e}}p(a_{t-1}(x_1\dots x_{t-2}x_t)^{k_{t-1}}){\mathbf{\,e}}_p(a_t(x_1\dots x_t)^{k_t})\\ &= \frac{1}{\alpha_{k_1} \dots \alpha_{k_{t-1}}} \sum_{x_1 \in {\mathcal G}_{\alpha_1}}\dots\sum_{x_{t-1} \in {\mathcal G}_{\alpha_{t-1}}}\\ & \qquad \sum_{x_t \in \F^*_p} \omega_1(\tildeextbf{x}) \dots \omega_t (\tildeextbf{x}) {\mathbf{\,e}}p(a_t(x_1\dots x_t)^{k_t}). {\mathbf{\,e}}nd{align*} Now the image ${\mathcal X}_t = \{x_t^{k_t} : x_t \in \F^*_p\}$ of non-zero $k_tth$ powers contains $(p-1)/\alpha_{k_t}$ elements, each appearing with multiplicity $\alpha_{k_t}$. Similarly, the images ${\mathcal X}_i = \{x_i^{k_t} : x_i \in {\mathcal G}_{\alpha_{k_i}}\}$ contain $\alpha_{k_i}/\gcd(\alpha_{k_i}, \alpha_{k_t})$ elements, each appearing with multiplicity $\gcd(\alpha_{k_i}, \alpha_{k_t})$, for $i=1, \dots, t-1$. Hence, we apply Lemma \ref{lem:multlingroup} to obtain \begin{align*} &T_\chi(\Psi) \ll_t \frac{\alpha_{k_t}}{\beta_{k_1} \dots \beta_{k_{t-1}}} \cdot\left (p^{\frac{1}{2^t}}\beta_{k_{t-1}} A_t\left(\frac{p-1}{\alpha_{k_t}}\right) \prod_{i=1}^{t-2}B_t(\beta_{k_i}) \right) \\ & + \frac{\alpha_{k_t}}{\beta_{k_1} \dots \beta_{k_{t-1}}} \cdot \frac{p-1}{\alpha_{k_t}}\beta_{k_1} \dots\beta_{k_{t-1}}\left( \left(\frac{\alpha_{k_t}}{p-1}\right)^{\frac{1}{2}}+\beta_{k_1}^{\frac{-1}{2^2}} +\dots+\beta_{k_{t-1}}^{\frac{-1}{2^{t}}}\right). {\mathbf{\,e}}nd{align*} By simplifying we reach the required result. \iffalse \section{Twisted Character Sums} We define the twisted character sums \begin{align*} Q_{\psi, \chi}({\mathcal X}_1, \dots, {\mathcal X}_n) = \sum_{x_1 \in {\mathcal X}_1} \dots \sum_{x_n \in {\mathcal X}_n}\psi(x_1\dots x_n) \chi(x_1\dots x_n+1) {\mathbf{\,e}}nd{align*} where $\psi$ and $\chi$ are non-trivial additive and multiplicative characters respectively of $\F_q$, the finite field of $q$ elements with characteristic $p$. We also define$E^\tildeimes_{n}({\mathcal X}_1, \dots, {\mathcal X}_n)$ to be the number of solutions to \begin{align*} x_1\dots x_n = y_1\dots y_n, \qquad \tildeext{where $x_i,y_i \in {\mathcal X}_i$}. {\mathbf{\,e}}nd{align*} We have the following result. \begin{theorem} Let ${\mathcal X}_1, \dots, {\mathcal X}_n \in \F_q$ with $X_1\ge X_2\ge \dots \ge X_n$. Then, \begin{align*} Q_{\psi, \chi}({\mathcal X}_1, \dots, {\mathcal X}_n) \ll q^{1/4}X_1^{1/2}X_2\dots X_n + q^{1/2}X_1^{1/2}E^\tildeimes_{n-1}({\mathcal X}_2, \dots, {\mathcal X}_n)^{1/2}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{theorem} \begin{proof} By the Cauchy inequality we have \begin{align*} &Q_{\psi, \chi}({\mathcal X}_1, \dots, {\mathcal X}_n)^2\\ &\qquad \le X_1 \sum_{x_1\in {\mathcal X}_1}\left| \sum_{x_2 \in {\mathcal X}_2}\dots \sum_{x_n \in {\mathcal X}_n} \psi(x_1\dots x_n) \chi(x_1\dots x_n+1) \right|^2 \\ & \qquad \le X_1 \sum_{x_1\in {\mathcal X}_1} \sum_{x_2, y_2 \in {\mathcal X}_2}\dots \sum_{x_n,y_n \in {\mathcal X}_n} \\ & \qquad \qquad \psi(x_1 (x_2\dots x_n-y_2\dots y_n)) \chi(x_1 x_2\dots x_n+1)\overlineerline{\chi}(x_1y_2\dots y_n +1) \\ &\qquad \le X_1\sum_{x_2, y_2 \in {\mathcal X}_2}\dots \sum_{x_n,y_n \in {\mathcal X}_n} \chi\left(\frac{x_2\dots x_n}{y_2\dots y_n}\right) \sum_{x_1\in \F^*_q} \\ &\qquad \psi(x_1 (x_2\dots x_n-y_2\dots y_n)) \chi(x_1+ x_2^{-1}\dots x_n^{-1})\overlineerline{\chi}(x_1+y_2^{-1}\dots y_n^{-1}). {\mathbf{\,e}}nd{align*} The inner sum satisfies the Weil bound as long as $x_2\dots x_n \neq y_2\dots y_n$. Hence, \begin{align*} Q_{\psi, \chi}({\mathcal X}_1, \dots, {\mathcal X}_n)^2 \ll q^{1/2}X_1X_2^2 \dots X_n^2 + qX_1E^\tildeimes_{n-1}({\mathcal X}_2, \dots ,{\mathcal X}_n)^{1/2}. {\mathbf{\,e}}nd{align*} This completes the proof. {\mathbf{\,e}}nd{proof} Taking $E_{n-1}^\tildeimes({\mathcal X}_2, \dots, {\mathcal X}_n) \le X_2X_3^2 \dots X_n^2$ we have the following corollary. \begin{cor} Let ${\mathcal X}_1, \dots, {\mathcal X}_n \in \F_q$ with $X_1\ge X_2\ge \dots \ge X_n$. Then, \begin{align*} Q_{\psi, \chi}({\mathcal X}_1, \dots, {\mathcal X}_n) \ll q^{1/4}X_1^{1/2}X_2\dots X_n + q^{1/2}X_1^{1/2}X_2^{1/2}X_3 \dots X_n. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{cor} \fi \section{Weyl Sums Over Generalized Arithmetic Progressions} \subsection{Preliminaries} We will require an estimate for the ${\mathbf{\,e}}ll_1$ norm of the Fourier transform of proper generalized arithmetic progressions which is due to Shao~\cite{Shao1}. \begin{lemma} \label{lem:genAPf} Let ${\mathcal A}\subseteq \F_p$ be a proper generalized arithmetic progression of rank $r$ and let $\widehat {\mathcal A}(z)$ denote the Fourier transform of ${\mathcal A}$. Then we have \begin{align*} \sum_{z=1}^{p}|\widehat {\mathcal A}(z)|\ll_r p(\log{p})^r. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} The following is due to Bourgain~\cite[Theorem~A]{Bour2}. \begin{lemma} \label{lem:bourml} Let $0<\delta<1/4$ and $r\ge 2$. There exists some $\delta'$ such that if $p$ is a sufficiently large prime and $A_1,\dots,A_r\subseteq \F_p$ satisfy \begin{align*} & |A_i|>p^{\delta} \\ &\prod_{i=1}^{r}|A_i|>p^{1+\delta}, {\mathbf{\,e}}nd{align*} then \begin{align*} \left|\sum_{x_1\in A_1,\dots x_r\in A_r}e_p(x_1\dots x_r) \right|\ll |A_1|\dots |A_r|p^{-\delta'}. {\mathbf{\,e}}nd{align*} {\mathbf{\,e}}nd{lemma} \subsection{Proof of Theorem~\ref{thm:genap}} Considering the sum \begin{align*} S=\sum_{a\in {\mathcal A}}e_p(F(a)), {\mathbf{\,e}}nd{align*} we note that for any $a_2,\dots,a_d\in {\mathcal A}$ \begin{align} \label{eq:Sgap} S&=\frac{1}{p}\sum_{z=1}^{p}\left(\sum_{a\in {\mathcal A}}e_p(-za)\right) \\ & \tildeimes\sum_{a_1\in d{\mathcal A}}e_p(F(a_1-a_2-\dots-a_d)+z(a_1-a_2-\dots-a_d)), {\mathbf{\,e}}nd{align} where $d{\mathcal A}$ denotes the $d$-fold sumset \begin{align*} d{\mathcal A}=\{ a_1+\dots+a_d \ : a_i\in {\mathcal A} \}, {\mathbf{\,e}}nd{align*} so that \begin{align} \label{eq:dAbound} |d{\mathcal A}|\ll |{\mathcal A}|. {\mathbf{\,e}}nd{align} Averaging~{\mathbf{\,e}}qref{eq:Sgap} over $a_2,\dots,a_d\in {\mathcal A}$ and using Lemma~\ref{lem:genAPf} gives \begin{align} \label{eq:SthmGenAP} S\ll \frac{1}{p}\sum_{z=1}^{p}|\widehat {\mathcal A}(z)|\frac{|T(z)|}{|{\mathcal A}|^{d-1}}\ll_r \frac{p^{o(1)}|T(z_0)|}{|{\mathcal A}|^{d-1}}, {\mathbf{\,e}}nd{align} for some $z_0\in \F_p$, where \begin{align*} T(z)=\sum_{\substack{a_1\in d{\mathcal A} \\ a_2,\dots,a_d\in {\mathcal A}}}e_p(F(a_1-a_2-\dots-a_d)+z(a_1-a_2-\dots-a_d)). {\mathbf{\,e}}nd{align*} Since $F$ has degree $d$, we may write \begin{align*} F(z)=\sum_{i=0}^{d}b_iz^{i}, {\mathbf{\,e}}nd{align*} and hence \begin{align*} & F(a_1-a_2-\dots-a_d)=\sum_{i=0}^{d}b_i(a_1-a_2-\dots-a_d)^{i} \\ & \quad \quad \quad =F_1(a_2,\dots,a_d)+\dots+F_d(a_1,\dots,a_{d-1})+(-1)^{d-1}b_da_1\dots a_d, {\mathbf{\,e}}nd{align*} for some sequence of polynomials $F_1,\dots,F_d$ where $F_i$ is independent of the variable $a_i$. This implies that \begin{align*} &T(z_0)= \\ & \sum_{\substack{a_1\in d{\mathcal A} \\ a_2,\dots,a_d\in {\mathcal A}}}\omega_1(a_2,\dots,a_d)\dots \omega_d(a_1,\dots,a_{d-1})e_p((-1)^{d-1}b_da_1\dots a_d), {\mathbf{\,e}}nd{align*} for some sequence of weights $\omega_1,\dots,\omega_d$ with $\omega_i$ independent of $a_i$. By Lemma~\ref{lem:SXin} \begin{align*} |T(z_0)|^{2^{d-1}}& \ll_d |{\mathcal A}|^{d2^{d-1}-1} \\ &+|{\mathcal A}|^{d2^{d-1}-2d+1}\sum_{\substack{a_j,a_j'\in {\mathcal A} \\ j\ge 2}}\left|\sum_{a_1\in d{\mathcal A}}e_p(b_da_1(a_2-a_2')\dots (a_d-a_d')) \right|, {\mathbf{\,e}}nd{align*} and by the Cauchy-Schwarz inequality \begin{align*} &|T(z_0)|^{2^{d}}\ll_d |{\mathcal A}|^{d2^{d}-2} \\ & +|{\mathcal A}|^{d2^{d}-2d}\left|\sum_{\substack{a_j,a_j'\in {\mathcal A} \\ j\ge 2}}\sum_{a_1,a_1'\in d{\mathcal A}}e_p(b_d(a_1-a_1')(a_2-a_2')\dots (a_d-a_d')) \right|. {\mathbf{\,e}}nd{align*} This implies that \begin{align*} |T(z_0)|^{2d}\ll_d |{\mathcal A}|^{d2^{d}-2}+|{\mathcal A}|^{d2^{d}-d}\left|\sum_{\substack{a_j\in {\mathcal A}+c_j \\ j\ge 2}}\sum_{a_1\in d{\mathcal A}+c_1}e_p(b_da_1\dots a_d) \right|, {\mathbf{\,e}}nd{align*} for some $c_1,\dots,c_d\in \F_p$. We note that the assumption~{\mathbf{\,e}}qref{eq:genAPcond} implies that the conditions of Lemma~\ref{lem:bourml} are satisfied and hence \begin{align*} |T(z_0)|^{2d}\ll_d |{\mathcal A}|^{d2^{d}}p^{-\delta'}, {\mathbf{\,e}}nd{align*} for some $\delta'>0$ depending on $\varepsilon$ and the result follows from~{\mathbf{\,e}}qref{eq:SthmGenAP}. \begin{thebibliography}{9999} \bibitem{Bour} J. Bourgain, {\it Mordell's exponential sum estimate revisited}, J. Amer. Math. Soc. {\bf 18} (2005), no. 2, 477--499. \bibitem{Bour1} \bysame, {\mathbf{\,e}}mph{Sum-product theorems and exponential sum bounds in residue classes for general modulus}, C. R. Math. Acad. Sci. Paris \tildeextbf{344} (2007), no.~6, 349--352. \MR{2310668} \bibitem{Bour2} \bysame, {\mathbf{\,e}}mph{Multilinear exponential sums in prime fields under optimal entropy condition on the sources}, Geom. Funct. Anal. \tildeextbf{18} (2009), no.~5, 1477--1502. \MR{2481734} \bibitem{Bour3} \bysame, {\mathbf{\,e}}mph{Estimates on polynomial exponential sums}, Israel J. Math. \tildeextbf{176} (2010), 221--240. \MR{2653193} \bibitem{Bour4} \bysame, {\mathbf{\,e}}mph{On exponential sums in finite fields}, An irregular mind, Bolyai Soc. Math. Stud., {\bf 21}, 2010, ~219--242. \MR{2815603} \bibitem{BouGar} J.~Bourgain and M.~Z. Garaev, {\mathbf{\,e}}mph{On a variant of sum-product estimates and explicit exponential sum bounds in prime fields}, Math. Proc. Cambridge Philos. Soc. \tildeextbf{146} (2009), no.~1, 1--21. \MR{2461864} \bibitem{BouGliKon} J.~Bourgain, A. A.~Glibichuck and S. V. Konyagin, {\mathbf{\,e}}mph{Estimates for the number of sums and products and for exponential sums in fields of prime order}, J. Lond. Math. Soc. \tildeextbf{2}, (2006), 380--398. \bibitem{BouKon} J. Bourgain, S. Konyagin, {\it Estimates for the number of sums and products and for exponential sums over subgroups in fields of prime order}, C. R. Acad. Sci. Paris {\bf 337} (2003), 75--80. \bibitem{CoCoPi1} T. Cochrane, J. Coffelt, and C. Pinner, {\mathbf{\,e}}mph{A further refinement of {M}ordell's bound on exponential sums}, Acta Arith. \tildeextbf{116} (2005), no.~1, 35--41. \MR{2114903} \bibitem{CoCoPi2} \bysame, {\mathbf{\,e}}mph{A system of simultaneous congruences arising from trinomial exponential sums}, J. Th\'eor. Nombres Bordeaux \tildeextbf{18} (2006), no.~1, 59--72. \MR{2245875} \bibitem{CoPi1} T. Cochrane and C. Pinner, {\mathbf{\,e}}mph{An improved {M}ordell type bound for exponential sums}, Proc. Amer. Math. Soc. \tildeextbf{133} (2005), no.~2, 313--320. \MR{2093050} \bibitem{CoPi2} \bysame, {\mathbf{\,e}}mph{Using {S}tepanov's method for exponential sums involving rational functions}, J. Number Theory \tildeextbf{116} (2006), no.~2, 270--292. \MR{2195926} \bibitem{CoPi3} \bysame, {\mathbf{\,e}}mph{Bounds on fewnomial exponential sums over {$\Bbb Z_p$}}, Math. Proc. Cambridge Philos. Soc. \tildeextbf{149} (2010), no.~2, 217--227. \MR{2670213} \bibitem{CoPi4} \bysame, {\mathbf{\,e}}mph{Explicit bounds on monomial and binomial exponential sums}, Q. J. Math. \tildeextbf{62} (2011), no.~2, 323--349. \MR{2805207} \bibitem{Gar} M.~Z. Garaev, {\mathbf{\,e}}mph{Sums and products of sets and estimates for rational trigonometric sums in fields of prime order}, Uspekhi Mat. Nauk \tildeextbf{65} (2010), no.~4(394), 5--66. \MR{2759693} \bibitem{HBK} D.R. Heath-Brown and S.V. Konyagin, {\it New bounds for Gauss sums derived from k-th powers and for Heilbronn’s exponential sum}, Q. J. Math. {\bf 51} (2000), no. 2, 221--235. \bibitem{Mac2} S.~{Macourt}, {\mathbf{\,e}}mph{{Bounds on exponential sums with quadrinomials}}, J. Number Theory \tildeextbf{193} (2018), 118--127. \bibitem{Mac1} \bysame, {\mathbf{\,e}}mph{Incidence results and bounds of trilinear and quadrilinear exponential sums}, SIAM J. Discrete Math. \tildeextbf{32} (2018), no.~2, 815--825. \bibitem{MSS} S.~{Macourt}, I.~D. {Shkredov}, and I.~E. {Shparlinski}, {\mathbf{\,e}}mph{{Multiplicative energy of shifted subgroups and bounds on exponential sums with trinomials in finite fields}}, Canad. J. Math. \tildeextbf{70} (2018), no.~6, 1319-1338. \bibitem{Mord} L.J. Mordell, {\it On a sum analogous to a Gauss sum}, Q. J. Math. {\bf 3} (1932), 161--162. \bibitem{MPR-NRS} B.~{Murphy}, G.~{Petridis}, O.~{Roche-Newton}, M.~{Rudnev}, and I.~D. {Shkredov}, {\mathbf{\,e}}mph{{New results on sum-product type growth over fields}}, arXiv:1702.01003 \bibitem{PetShp} G.~{Petridis} and I.~E. {Shparlinski}, {\mathbf{\,e}}mph{{Bounds on trilinear and quadrilinear exponential sums}}, J. Anal. Math. (to appear). \bibitem{Rud} M. Rudnev, {\mathbf{\,e}}mph{On the number of incidences between planes and points in three dimensions,} Combinatorica, 2017, doi:10.1007/s00493-016-3329-6. \bibitem{Shao1} X. Shao, {\it On character sums and exponential sums over generalized arithmetic progressions}, Bull. Lond. Math. Soc., {\bf 45}, (3), (2013), 541-550. \bibitem{Shkr} I.~D.~Shkredov, {\it On exponential sums over multiplicative subgroups of medium size,} Finite Fields Appl., {\bf 30} (2014), 72--87. \bibitem{Shkr3} \bysame, {\mathbf{\,e}}mph{{On asymptotic formulae in some sum-product questions}}, arXiv:1802.09066 \bibitem{Shp1} I.~E Shparlinski, {\mathbf{\,e}}mph{On bounds of Gaussian sums}, Mat. Zametki \tildeextbf{50} (1991), 122--130. \bibitem{Weil} Andr\'e Weil, {\mathbf{\,e}}mph{Basic number theory}, Classics in Mathematics, Springer-Verlag, Berlin, 1995, Reprint of the second (1973) edition. \MR{1344916} {\mathbf{\,e}}nd{thebibliography} {\mathbf{\,e}}nd{document}
\begin{document} \title{The Relationship Between Agnostic Selective Classification Active Learning and the Disagreement Coefficient} \author{ \name Roei Gelbhart \email [email protected]\\ \addr Department of Computer Science\\ Technion -- Israel Institute of Technology\\ \name Ran El-Yaniv \email [email protected]\\ \addr Department of Computer Science\\ Technion -- Israel Institute of Technology} \maketitle \begin{abstract}A selective classifier $(f,g)$ comprises a classification function $f$ and a binary selection function $g$, which determines if the classifier abstains from prediction, or uses $f$ to predict. The classifier is called pointwise-competitive if it classifies each point identically to the best classifier in hindsight (from the same class), whenever it does not abstain. The quality of such a classifier is quantified by its rejection mass, defined to be the probability mass of the points it rejects. A ``fast'' rejection rate is achieved if the rejection mass is bounded from above by $\tilde{O}(1/m)$ where $m$ is the number of labeled examples used to train the classifier (and $\tilde{O}$ hides logarithmic factors). Pointwise-competitive selective (PCS) classifiers are intimately related to disagreement-based active learning and it is known that in the realizable case, a fast rejection rate of a known PCS algorithm (called Consistent Selective Strategy) is equivalent to an exponential speedup of the well-known CAL active algorithm. We focus on the agnostic setting, for which there is a known algorithm called LESS that learns a PCS classifier and achieves a fast rejection rate (depending on Hanneke’s disagreement coefficient) under strong assumptions. We present an improved PCS learning algorithm called ILESS for which we show a fast rate (depending on Hanneke's disagreement coefficient) without any assumptions. Our rejection bound smoothly interpolates the realizable and agnostic settings. The main result of this paper is an equivalence between the following three entities: (i) the existence of a fast rejection rate for any PCS learning algorithm (such as ILESS); (ii) a poly-logarithmic bound for Hanneke's disagreement coefficient; and (iii) an exponential speedup for a new disagreement-based active learner called {{\textrm{Active-ILESS}}}. \end{abstract} \begin{keywords} Active learning, selective prediction, disagreement coefficient, selective sampling, selective classification, reject option, pointwise-competitive, selective classification, statistical learning theory, PAC learning, sample complexity, agnostic case \end{keywords} \section{Introduction} \label{sec:intro} \textbf{Selective classification} is a unique and extreme instance of the broader concept of confidence-rated prediction \citep{Chow70,VovkGS05,BartlettW08,yuan2010classification,cortes2016boosting,wiener2012pointwise,kocak2016conjugate,zhang2014beyond}. Given a training sample consisting of $m$ labeled instances, the learning algorithm is required to output a \emph{selective classifier} \citep{ElYaniv10}, defined to be a pair $(f,g)$, where $f$ is a prediction function, chosen from some hypothesis class ${\cal F}$, and $g: {\cal X} \to \{0,1\}$ is a \emph{selection function}, serving as a qualifier for $f$ as follows: for any $x$, if $g(x) =1$, the classifier predicts $f(x)$, and otherwise it abstains. The general performance of a selective classifier is quantified in terms of its \emph{coverage} and \emph{risk}, where coverage is the probabilistic mass of non-rejected instances, and risk is the normalized average loss of $f$ restricted to non-rejected instances. Let $f^*$ be any (unknown) true risk minimizer\footnote{We assume that there exists an $f^{*}$ in ${\cal F}$. Otherwise, we can artificially define $f^*$ to be any function whose risk is sufficiently close to $\inf_{f \in {\cal F}}(R(f))$, for instance, not greater than a small multiplicative factor from this infimum.} in ${\cal F}$ for the given problem. The selective classifier $(f,g)$ is said to be \emph{pointwise-competitive} if, for each $x$ with $g(x)=1$, it must hold that $f(x) =f^*(x)$ for all $f^* \in {\cal F}$ \cite{WienerE14}. Thus, pointwise-competitiveness w.h.p. over choices of the training sample, is a highly desirable property: it guarantees, for each non-rejected test point, the best possible classification obtainable using the best in-hindsight classifier from $\mathcal{F}$. We don't restrict $g$ to be from any specific hypothesis class, however, because we use disagreement based selective prediction, the selection of $\mathcal{F}$ will limit the possibilities of $g$. The scenario of a predefined decision functions hypothesis class is investigated in \cite{cortes2016learning}. Pointwise-competitive selective classification ({\rm PCS}) was first considered in the realizable case \citep{ElYaniv10}, for which a simple consistent selective strategy (CSS) was shown to achieve a bounded and monotonically increasing (with $m$) coverage in various non-trivial settings. Note that in the realizable case, any {\rm PCS} strategy attains zero risk (over the sub-domain it covers). These results were recently extended to the agnostic setting \citep{WienerE14,wiener2011agnostic} with a related but different algorithm called \emph{low-error selective strategy (LESS)}, for which a number of coverage bounds were shown. These bounds relied on the fact that the underlying probability distribution and the hypothesis class ${\cal F}$ will satisfy the so-called ``$(\beta_1,\beta_2)$-Bernstein property'' \cite{BarMenPhi04}. The coverage bounds in \citep{WienerE14,wiener2011agnostic} are dependent on the parameters $\beta_1,\beta_2$. This Bernstein property assumption (as presented in \cite{BarMenPhi04}), which allows for better concentration, can be problematic. First, it is defined with respect to a unique true risk minimizer $f^*$, a property which is unlikely to hold in noisy agnostic settings. Moreover, for arbitrary ${\cal F}$, even for the 0/1 loss function, it is not necessarily known whether the Bernstein property can hold at all.\footnote{It was mentioned in \cite{WienerE14} that, under the Tsybakov noise condition \cite{Tsybakov04}, the desired property holds, but this is guaranteed only for cases in which the Bayes classifier is within ${\cal F}$, which is a fairly strong assumption in itself.} We removed the Bernstein assumption from our analysis. Assuming that a selective classifier is w.h.p. pointwise-competitive, our key goal is a small rejection rate. We will say that a learner has a \textbf{fast $R^*$ rejection rate}, if w.h.p. the rejection rate is bounded by $$ {\rm polylog}(\frac{1}{R(f^*)+1/m}) \cdot R(f^*) + \frac{{\rm polylog}(m,d,1/\delta)}{m}. $$ Selective classification is very closely related to the field of \textbf{active learning (AL)}. In active learning, the learner can actively influence the learning process by selecting the points to be labeled. The incentive for introducing this extra flexibility is to reduce labeling efforts. A key question in theoretical studies of AL is how many label requests are sufficient to learn a given (unknown) target concept to a specified accuracy, a quantity called \emph{label complexity}. For an AL algorithm satisfying the ``passive example complexity'' property (consuming the same number of labeled/unlabeled examples as a passive algorithm for achieving the same error; see Definition \ref{passive example complexity}), we will say it has \textbf{$R^*$ exponential speedup}, if w.h.p. the number of labels it requests is bounded by $$ {\rm polylog}(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+ {\rm polylog}(m,d,1/\delta). $$ The connection between AL and confidence-rated prediction is quite intuitive. A (pointwise-competitive) selective classifier $P$ can be straightforwardly used as the querying component of an active learning algorithm. This reduction is most naturally demonstrated in the stream-based AL model: at each iteration, the active algorithm trains a selective classifier on the currently available labeled samples, and then decides to query a newly introduced (unlabeled) point $x$ if $P$ abstains on $x$. Hanneke's \textbf{disagreement coefficient} \cite{Hanneke07} (see Definition \ref{disagreementCoefficient}), is a well-known parameter of the hypothesis class and the marginal distribution; it is used in most of the known label complexity bounds \cite{hsu:thesis, Hanneke07, ailon2011active}. The disagreement coefficient is the supremum of the relation between the disagreement mass of functions that are $r$-distanced from $f^*$ to $r$, over $r$. PCS classification is based on using generalization bounds to estimate the empirical error of $f^*$, and more specifically, its distance from the empirical error of the ERM. Whenever there is a unanimous agreement of all the functions that reside within a ball around the ERM, the classifier choses to classify. Thus, the abstain rate is dependent on the disagreement mass of the functions within the ball. The radius of the ball depends on the generalization bounds. The generalization bounds we use are of the form $\tilde{O}(1/m)$ for the realizable case (we consider the realizable case here for simplicity). After observing $m$ examples, we can bound the disagreement mass of a ball around the ERM, by multiplying the radius of the ball, which is $\tilde{O}(1/m)$, with the disagreement coefficient. Thus, if for example, the disagreement coefficient is bounded by a constant, the abstain rate of some PCS algorithms can be bounded by $\tilde{O}(1/m)$ for the realizable case. This gives a basic idea of the disagreement coefficient, which will be formally presented later on. Note that in principle, the disagreement coefficient can be replaced by another important quantity, namely, the \textbf{version space compression set size}, recently shown to be equivalent to it \cite{JMLR:v16:wiener15a,el2015version}. Specifically, an $O({\rm polylog}(m)log(1/\delta))$ version space compression set size minimal bound was shown in \cite[Corollary 11]{JMLR:v16:wiener15a}, to be equivalent to an $O({\rm polylog}(1/r))$ disagreement coefficient. The first contribution of this paper is a novel selective classifier, called {{\rm{ILESS}}}, which utilizes a tighter generalization error bound than LESS and depends on $R(f^*)$ (and interpolates the agnostic and realizable cases). Most importantly, the new strategy can be analyzed completely without the Bernstein condition. We derive an active learning algorithm, called {{\textrm{Active-ILESS}}}, corresponding to our selective classifier, {{\rm{ILESS}}}. {{\textrm{Active-ILESS}}} is constructed to work in a stream-based AL model and its querying function is extremely conservative: for each unlabeled example, the algorithm requests its label if and only if the labeling of the optimal classifier (from the same class) on this point cannot be inferred from information already acquired. This querying strategy, which is often termed ``disagreement-based,'' has been used in a number of stream-based AL algorithms such as $A^2$ (\emph{Agnostic Active}), developed in \cite{balcan2006agnostic}, RobustCAL, studied by the authors of \cite{Hanneke11,Hanneke_book} and \cite{hanneke:12b}, or the general agnostic AL algorithm of \cite{dasgupta2007general}. In \cite{NIPS2015_5939}, a computationally efficient algorithm for disagreement based AL. The first formal relationship between {\rm PCS} classification and AL was proposed in \citep{el2012active,Wiener13}, where the aforementioned CSS algorithm was shown to be equivalent to the well-known CAL AL algorithm of \cite{CohAtlLad94}, in the sense that a fast coverage rate for CSS was proven to be equivalent to exponential label complexity speedup for CAL. This result applies to the realizable setting only. Our first contribution is a similar equivalence relation between pointwise-competitive selective classification and AL, which applies to the more challenging agnostic case and smoothly interpolates the realizable and agnostic settings. Our second and main contribution is to show a complete equivalence between (i) selective classification with a fast $R^*$ rejection rate, (ii) AL with $R^*$ exponential speedup (represented by {{\textrm{Active-ILESS}}}), and (iii) the existence of an $f^*$ with a bounded disagreement coefficient. This is illustrated in Figure \ref{figure1}, where the blue errors indicate the equivalence relationships we prove in this paper, and the red arrow indicates a previously known result (from \cite{hsu:thesis, Hanneke07}) (and can also be deduced from the other arrows). \begin{figure} \caption{Main results \label{figure1} \label{figure1} \end{figure} \section{Definitions} \label{sec:definitions} Consider a domain $\mathcal{X}$, and a binary label set $\mathcal{Y} = \{\pm 1\}$. A learning problem is specified via a hypothesis class $\mathcal{F}$ and an unknown probability distribution $\mathcal{P_{X,Y}}$. Given a sequence of labeled training examples $S_{m}=((x_{1},y_{1}),(x_{2},y_{2}),...,(x_{m},y_{m}))$, such that $\forall i,(x_{i},y_{i})\in \mathcal{X}\times \mathcal{Y}$, the empirical error of a hypothesis $f$ over $S_m$ is $\hat{R}(f,S_{m}) \triangleq \frac{1}{m}\sum_{i=1}^m \ell(f(x_i), y_i)$, where $\ell: \mathcal{Y} \times \mathcal{Y} \to \mathbb{R}^+$ is a loss function. In this paper we will mainly focus on the zero-one loss function, $\ell_{01}(y,y') \triangleq \mathbbm{1}\{y \neq y'\}$. The true (zero-one) error of $f$ is $R(f) \triangleq \mathbb{E}_{\mathcal{P}}\left[\ell_{01} (f(x),y) \right]$. An empirical risk minimizer hypothesis (henceforth an ERM) is \begin{equation} \hat{f}(S_{m}) \triangleq \mathop{\rm argmin}_{f \in {\cal F}} \hat{R}(f,S_{m}) , \end{equation} and a true risk minimizer is $f^* \triangleq \mathop{\rm argmin}_{f \in {\cal F}} R(f)$.\footnote{We assume that $f^{*}$ exists, and that it need not be unique, in which case $f^{*}$ refers to any one of the minimizers.} We acquire the following definitions from \cite{WienerE14}. For any hypothesis class ${\cal F}$, hypothesis $f \in {\cal F}$, distribution $\mathcal{P_{X,Y}}$, sample $S_m$, and real number $r>0$, define the true and empirical \emph{low-error sets}, \begin{equation} \label{eq:V} {\cal V}(f,r) \triangleq \left\{f' \in {\cal F} : R(f') \leq R(f) + r \right\} \end{equation} and \begin{equation} \label{eq:Vhat} \hat{{\cal V}}(f,r) \triangleq \left\{f' \in {\cal F} : \hat{R}(f',S_{m}) \leq \hat{R}(f,S_{m}) + r \right\}. \end{equation} Let $G \subseteq {\cal F}$. The \emph{disagreement set} \cite{Hanneke07} and \emph{agreement set} \cite{ElYaniv10} w.r.t. $G$ are defined, respectively, as \begin{equation} \label{eq:dis} DIS(G) \triangleq \left\{x \in {\cal X} : \exists f_1,f_2 \in G \quad \text{s.t.} \quad f_1(x) \neq f_2(x)\right\} \end{equation} \begin{equation} \label{eq:agr} \text{and} \ \ AGR(G) \triangleq \left\{x \in {\cal X} : \forall f_1,f_2 \in G \quad \text{s.t.} \quad f_1(x) = f_2(x)\right\}. \end{equation} In \emph{selective classification} \cite{ElYaniv10}, the learning algorithm receives $S_m$ and is required to output a \emph{selective classifier}, defined to be a pair $(f,g)$, where $f \in {\cal F}$ is a classifier, and $g: {\cal X} \to \{0,1\}$ is a \emph{selection function}, serving as a qualifier for $f$ as follows. For any $x \in {\cal X}$, $(f,g)(x) = f(x)$ iff $g(x) =1$. Otherwise, the classifier outputs ``I don't know''. For any selective classifier $(f,g)$ we define its coverage to be $$ \mathbb{P}hi(f,g) \triangleq \mathbb{P}r_{X \sim \mathcal{P_{X}}}(g(X)=1), $$ and its complement, $1- \mathbb{P}hi$, is called the \textbf{abstain rate}. For any $f \in {\cal F}$ and $r > 0$, define the set $B(f,r)$ of all hypotheses that reside within a ball of radius $r$ around $f$, $$ B(f,r) \triangleq \left\{f' \in {\cal F} : \mathbb{P}r_{X \sim \mathcal{P_{X}}}\left\{f'(X) \neq f(X)\right\} \leq r \right\}. $$ For any $G \subseteq {\cal F}$, and distribution $\mathcal{P_X}$, we denote by $\Delta G$ the volume of the disagreement set of $G$ (see (\ref{eq:dis})), $\Delta G \triangleq \mathbb{P}r\left\{DIS(G)\right\}$. \begin{defi} [Disagreement Coefficient] \label{disagreementCoefficient} Let $r_0 \geq 0$. Then, Hanneke's \emph{disagreement coefficient} \cite{Hanneke07} of a classifier $f \in {\cal F}$ with respect to the target distribution $\mathcal{P_X}$ is \begin{equation} \label{eq:disagreementCoefficient1} \theta_{f}(r_0)\triangleq\sup_{r>r_0}\frac{\Delta B(f,r)}{r}, \end{equation} and the general \emph{disagreement coefficient} of the entire hypothesis class ${\cal F}$ is \begin{equation} \label{eq:disagreementCoefficient} \theta(r_0) \triangleq \sup_{f \in {\cal F}}\theta_{f}(r_0). \end{equation} \end{defi} Notice that this definition of the disagreement coefficient is independent of $\mathcal{P_{Y|X}}$. Another commonly used definition of the disagreement coefficient does depend on a true risk minimizer $f^*$, as follows: \begin{equation} \label{eq:disagreementCoefficient2} \theta'(r_0) = \sup_{r>r_0}\frac{\Delta B(f^*,r)}{r}. \end{equation} Clearly, it always holds that $\theta' \leq \theta$. The independence of $\theta$ of unknown quantities such as the underlying distribution (and $f^*$), however, is a convenient property that sometimes allows for a direct estimation of $\theta$, which only depends on the marginal distribution, $\mathcal{P_X}$. This is, for example, the case in active learning, where labels are expensive but information about the marginal distribution (provided by unlabeled examples) is cheap. Note also that the above definition of $\theta'$ implicitly assumes a unique $f^*$. Nevertheless, the definition can be extended to cases where $f^*$ is not unique, in which case the infimum over all $f^*$ can be considered (the analysis can be extended accordingly using limits). For more on the disagreement coefficient, and examples of probabilities distributions and hypothesis classes for which it is bounded, see \cite{Hanneke_book}. \section{Convergence Bounds and LESS} We use a uniform convergence bound from \cite{dasgupta2007general,BousquetBL03}. Define convergence slacks $\sigma_{R-\hat{R}}(m,\delta,d,R,\hat{R})$ and $\sigma_{\hat{R}-R}(m,\delta,d,R,\hat{R})$, given in terms of the training sample, $S_{m}$, its size, $m$, the confidence parameter, $\delta$, and the VC-dimension $d$ of the class ${\cal F}$. For any $f \in {\cal F}$, \begin{equation} \label{eq:slackOne} \sigma_{R-\hat{R}}(m,\delta,d,R,\hat{R}) \triangleq \min \{\underbrace{\frac{4d \ln( \frac{16me}{d\delta} )}{m} + \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot \hat{R}}}_{\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R})} ,\underbrace{ \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot R}}_{\bar{\sigma}_{R-\hat{R}}(m,\delta,d,R)} \} \end{equation} and \begin{equation} \sigma_{\hat{R}-R}(m,\delta,d,R,\hat{R}) \triangleq \min \{\underbrace{ \frac{4d \ln( \frac{16me}{d\delta} )}{m} + \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot R}}_{\bar{\sigma}_{\hat{R}-R}(m,\delta,d,R)},\underbrace{ \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot \hat{R}}}_{\hat{\sigma}_{\hat{R}-R}(m,\delta,d,\hat{R})} \}. \end{equation} To simplify the analysis, we further decompose the above slack terms to their empirical and non-empirical components. For~(\ref{eq:slackOne}), we thus have, respectively, \begin{equation} \hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}) \triangleq \frac{4d \ln( \frac{16me}{d\delta} )}{m} + \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot \hat{R}} \end{equation} and \begin{equation} \hat{\sigma}_{\hat{R}-R}(m,\delta,d,\hat{R}) \triangleq \sqrt{\frac{4d \ln( \frac{16me}{d\delta} )}{m} \cdot \hat{R}} . \end{equation} Similarly, the non-empirical part in these minimums are denoted by $\bar{\sigma}_{R-\hat{R}}$ and $\bar{\sigma}_{\hat{R}-R}$. With this notation, we can write, for example, $\sigma_{R-\hat{R}} = \min \{ \hat{\sigma}_{R-\hat{R}} ,\bar{\sigma}_{R-\hat{R}} \}$. Our Lemma \ref{lemma:agnostic} is taken from \cite[Lemma 1]{dasgupta2007general}, which is based on \cite[Theorem 7]{BousquetBL03} \footnote{In the original lemma from \cite{dasgupta2007general}, there appears $S(\mathcal{H},n)$, the growth function. We plug in Sauer's Lemma, $S(\mathcal{H},n)\leq (\frac{em}{d})^d$, into Lemma 1 from \cite{dasgupta2007general} to get our lemma.}. \begin{lemma} [\cite{dasgupta2007general}] \label{lemma:agnostic} Let $\mathcal{F}$ be a hypothesis class with VC-dimension $d$. For any $0<\delta < 1$, with probability of at least $1-\delta$ over the choice of $S_m$ from $\mathcal{P}^m$, any hypothesis $f \in {\cal F}$ satisfies \begin{equation} R(f) \leq \hat{R}(f) + \sigma_{R-\hat{R}}\left(m,\delta,d,R(f),\hat{R}(f)\right) \label{eq:bound1} \end{equation} \begin{equation} \hat{R}(f) \leq R(f) + \sigma_{\hat{R}-R}\left(m,\delta,d,R(f),\hat{R}(f)\right). \label{eq:bound2} \end{equation} \end{lemma} Strategy~\ref{alg:SemanticSort} is the $\rm LESS$ algorithm of \cite{WienerE14}. $\rm LESS$ learns w.h.p. a pointwise-competitive selective classifier, $(f,g)$, where $f \in \mathcal{F}$ and $g: {\cal X} \to \{0,1\}$ is its selection function which determines whether to abstain or to classify. A \emph{pointwise-competitive selective classifier} must satisfy the following condition: for each $x$ with $g(x)=1$, it must hold that $f(x) =f^*(x)$ for all $f^* \in {\cal F}$. \begin{remark} The original definition of pointwise-competitiveness from \cite{WienerE14} requires a single $f^*$. We widen the definition to cases for which there are more than one $f^*$, and require that a pointwise-competitive selective classifier will be equal to \emph{all} $f^*$, wherever $g=1$. This extrapolation seems a bit strict. However, even if the requirement would have been relaxed to ``any $f^*$'', any pointwise-competitive selective classifier would still have been forced to identify with all $f^*$, as it is impossible to differentiate whether a set of functions are all $f^*$, or one is better than the rest. \end{remark} The main idea behind $\rm LESS$ is that, w.h.p. all $f^{*}$ lie within a ball around an ERM hypothesis with error radius of $2\sigma(m,\delta / 4,d)$, where \begin{equation} \label{eq:sigma} \sigma(m,\delta,d) \triangleq 2\sqrt{\frac{2d\left(\ln\frac{2me}{d}\right)+\ln{\frac{2}{\delta}}}{m}} \end{equation} is the slack term of a certain uniform convergence bound. Therefore, if all the functions in that ball agree over the labeling of any instance $x$, we know with high probability that all $f^{*}$ label $x$ the same way as the ERM. This property ensures that $\rm LESS$ is pointwise-competitive w.h.p. \begin{strategy} \caption{Agnostic low-error selective strategy (\rm LESS)} \label{alg:one} \footnotesize { \begin{algorithmic}[1] \REQUIRE Sample set of size $m$, $S_m$,\\ Confidence level $\delta$\\ Hypothesis class $\mathcal{F}$ with VC dimension $d$\\ \mathbb{E}NSURE A selective classifier $(h,g)$ \STATE Set $\hat{f} = ERM({\cal F},S_m)$, i.e., $\hat{f}$ is any empirical risk minimizer from ${\cal F}$ \STATE Set $G = \hat{{\cal V}}\left(\hat{f}, 2\sigma(m,\delta / 4,d) \right)$ \STATE Construct $g$ such that $g(x) = 1 \mathcal{L}ongleftrightarrow x \in \left\{{\cal X} \setminus DIS\left(G\right)\right\}$ \STATE $f = \hat{f}$ \end{algorithmic} } \label{alg:SemanticSort} \end{strategy} \section{ILESS} \label{sec:iLESS} \begin{strategy} \caption{Improved Low-Error Selective Strategy ({\rm{ILESS}})} \label{alg:iLESS} \footnotesize { \begin{algorithmic}[1] \REQUIRE Sample set of size $m$, $S_m$,\\ Confidence level $\delta$\\ Hypothesis class $\mathcal{F}$ with VC dimension $d$\\ \mathbb{E}NSURE A selective classifier $(h,g)$ \STATE Set $\hat{f} = ERM({\cal F},S_m)$, i.e., $\hat{f}$ is any empirical risk minimizer from ${\cal F}$ \STATE Set $\sigma_{{\rm{ILESS}}}= \hat{\sigma}_{R-\hat{R}} \left(m,\delta,d,\hat{R}(\hat{f},S_m) \right) + \bar{\sigma}_{\hat{R}-R} \left(m,\delta,d,\hat{R}(\hat{f},S_m)+\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m)) \right)$ \STATE Set $G = \hat{{\cal V}}\left(\hat{f}, \sigma_{{\rm{ILESS}}} \right)$ \STATE Construct $g$ such that $g(x) = 1 \mathcal{L}ongleftrightarrow x \in \left\{{\cal X} \setminus DIS\left(G\right)\right\}$ \STATE $h = \hat{f}$ \end{algorithmic} } \label{alg:LESS-sigma} \end{strategy} In this section we introduce an improved version of $\rm LESS$, called ${\rm{ILESS}}$, which utilizes a radius of the form ${\rm polylog}(m,1/\delta ,d)\cdot(\frac{1}{m}+\sqrt{\frac{R(f^{*})}{m}})$. Noting that the radius, $2\sigma(m,\delta / 4,d)$, used by $\rm LESS$ to define $G = \hat{{\cal V}}$, is of the form ${\rm polylog}(m,1/\delta ,d) / \sqrt{m}$, we observe that in cases where $R(f^{*}) \approx \frac{C}{m}$, this new radius behaves as $\frac{{\rm polylog}(m,1/\delta ,d)}{m}$. We later show that this radius allows ${\rm{ILESS}}$ to achieve a faster rejection decay rate than the one achieved by $\rm LESS$. Consider the pseuodo-code of ${\rm{ILESS}}$ given in Strategy~\ref{alg:iLESS}. We now analyze ${\rm{ILESS}}$, and begin by showing in Lemma \ref{thm:f*in_LESS} that ${\rm{ILESS}}$ is pointwise-competitive w.h.p., i.e., for any $x$ for which $g(x) = 1$, $f(x) = f^*(x)$ for all $f^*$. The calculation of $g$ appears to be very problematic, as for a specific $x$, a unanimous decision over an infinite number of functions must be ensured. This problem was shown to be reducible to finding an ERM under one constraint (\cite[Lemma 6.1]{el2011agnostic} a.k.a. the disbelief principle). This is a difficult problem, nonetheless, albeit one that could be estimated with heuristics. \begin{defi} \label{E} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown probability distribution. Given a sample set $S_m$, drawn from $\mathcal{P_{X,Y}}$, we denote by $\cal E$ the event where both inequalities (\ref{eq:bound1}) and (\ref{eq:bound2}) of Lemma~\ref{lemma:agnostic} simultaneously hold. We know from the lemma that $\cal E$ occurs with probability of at least $1-\delta$. \end{defi} \begin{lemma}[${\rm{ILESS}}$ is pointwise-competitive] \label{thm:f*in_LESS} Given that event $\cal E$ occurred (see Definition \ref{E}), for all $f^* \in \mathcal{F}$, $f^*$ resides within $G$ (from Strategy~\ref{alg:LESS-sigma}), and therefore, ${\rm{ILESS}}$ is pointwise-competitive w.h.p. \end{lemma} \begin{proof} From (\ref{eq:bound2}) it follows that, \begin{eqnarray} \label{u1} \hat{R}(f^{*},S_m) &\leq& R(f^{*})+\sigma_{\hat{R}-R}(m,\delta,d,R(f^{*}),\hat{R}(f^{*},S_m)) \nonumber \\ &\leq& R(f^{*})+\bar{\sigma}_{\hat{R}-R}(m,\delta,d,R(f^{*})). \end{eqnarray} Additionally, by the definition of $f^{*}$, we know that it has the lowest true error, and using Inequality~(\ref{eq:bound1}) from Lemma~\ref{lemma:agnostic} we obtain, \begin{eqnarray} R(f^{*}) &\leq& R(\hat{f}) \nonumber \\ &\leq& \hat{R}(\hat{f},S_m) +\sigma_{R-\hat{R}}(m,\delta,d,R(\hat{f}),\hat{R}(\hat{f},S_m)) \nonumber \\ &\leq&\hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m)). \label{u2} \end{eqnarray} Finally, by applying (\ref{u2}) in (\ref{u1}), we have, \begin{multline} \hat{R}(f^{*},S_m) \leq \hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m)) \\+\bar{\sigma}_{\hat{R}-R}\left(m,\delta,d,\hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m))\right) \nonumber , \end{multline} \begin{eqnarray} \hat{R}(f^{*},S_m) &\leq& \hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m)) +\bar{\sigma}_{\hat{R}-R}\left(m,\delta,d,\hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m))\right) \nonumber , \end{eqnarray} which means that $ f^{*}\in G$. \end{proof} Lemma~\ref{lemma:radius} below bounds the radius $\sigma_{{\rm{ILESS}}}$ of ${\rm{ILESS}}$. The lemma utilizes the notation $$ A \triangleq 4d \ln( \frac{16me}{d\delta} ), $$ with which, by the definition of $\sigma_{{\rm{ILESS}}}$ (see Strategy~\ref{alg:iLESS}), we have, \begin{eqnarray} \sigma_{{\rm{ILESS}}} &=& \hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m)) +\bar{\sigma}_{\hat{R}-R}\left(m,\delta,d,\hat{R}(\hat{f},S_m) +\hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}(\hat{f},S_m))\right) \nonumber \\ &=& \frac{A}{m} + \sqrt{\frac{A}{m} \cdot \hat{R}(\hat{f},S_m)} + \frac{A}{m} + \sqrt{\frac{A}{m} \cdot [\hat{R}(\hat{f},S_m) + \frac{A}{m} + \sqrt{\frac{A}{m} \cdot \hat{R}(\hat{f},S_m)} ]} . \nonumber \\ \label{t5} \end{eqnarray} \begin{lemma} \label{lemma:radius} Given that event $\cal E$ (see Definition \ref{E}) occurred, the radius of ${\rm{ILESS}}$ satisfies \begin{eqnarray} \sigma_{{\rm{ILESS}}}=6\frac{A}{m} + 3\sqrt{\frac{A}{m} \cdot R(f^{*})}=O(\frac{A}{m}+ \sqrt{\frac{A}{m} \cdot R(f^{*})} ), \end{eqnarray} where \begin{math} A \triangleq 4d \ln( \frac{16me}{d\delta} ). \end{math} \end{lemma} \begin{proof} Under our assumption, inequalities (\ref{eq:bound1}) and (\ref{eq:bound2}) hold for every $f \in {\cal F}$. We thus have \begin{equation} \hat{R}(\hat{f},S_m) \leq \hat{R}(f^*,S_m) \leq R(f^{*})+ \frac{A}{m} + \sqrt{\frac{A}{m} \cdot R(f^{*})}. \label{t6} \end{equation} Replacing the three occurrences of $\hat{R}(f^*,S_m)$ in~(\ref{t5}) with the R.H.S. of (\ref{t6}), and using the basic inequalities $\sqrt{A+B} \leq \sqrt{A} + \sqrt{B}$ and $\sqrt{AB} \leq A/2 + B/2$, we get, \begin{eqnarray} \sigma_{{\rm{ILESS}}} &\leq& \frac{A}{m} + \sqrt{\frac{A}{m} \cdot\left(R(f^{*})+ \frac{A}{m} + \sqrt{\frac{A}{m} \cdot R(f^{*})}\right)} + \frac{A}{m} + \nonumber \\ &&+ \sqrt{\frac{A}{m} \cdot \left[{R(f^{*})+ \frac{A}{m} + \sqrt{\frac{A}{m} \cdot R(f^{*})}} + \frac{A}{m} + \sqrt{\frac{A}{m} \cdot \left(R(f^{*})+ \frac{A}{m} + \sqrt{\frac{A}{m} \cdot R(f^{*})}\right)} \right]} \nonumber \\ &\leq& \frac{A}{m} + \sqrt{\frac{A}{m} \cdot\left(R(f^{*})+ \frac{A}{m} + \frac{A}{2m} + \frac{1}{2}R(f^{*})\right)} + \frac{A}{m} + \nonumber \\ &&+ \sqrt{\frac{A}{m} \cdot \left[R(f^{*})+ \frac{A}{m} + \frac{A}{2m} + \frac{1}{2}R(f^{*}) + \frac{A}{m} + \sqrt{\frac{A}{m} \cdot \left(R(f^{*})+ \frac{A}{m} + \frac{A}{2m} + \frac{1}{2}R(f^{*})\right)} \right] } \nonumber \\ &\leq& \frac{2A}{m} + \frac{3A}{2m} + \frac{3}{2}\sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{A}{m} \cdot \left[ \frac{5A}{2m} + \frac{3}{2}R(f^{*}) + \sqrt{\frac{A}{m} \cdot \left(\frac{3A}{2m} + \frac{3}{2}R(f^{*})\right)} \right] } \nonumber \\ &\leq& \frac{7A}{2m} + \frac{3}{2}\sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{A}{m} \cdot \left[ \frac{5A}{2m} + \frac{3}{2}R(f^{*}) + \frac{3A}{2m} +\sqrt{\frac{A}{m} \cdot \frac{3}{2}R(f^{*})} \right] } \nonumber \\ &\leq& \frac{7A}{2m} + \frac{3}{2}\sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{A}{m} \cdot \left[ \frac{5A}{2m} + \frac{3}{2}R(f^{*}) + \frac{3A}{2m} +\frac{3A}{4m} + \frac{3}{4}R(f^{*}) \right]} \nonumber \\ &\leq& \frac{7A}{2m} + \frac{3}{2}\sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{19}{4}}\frac{A}{m}+ \sqrt{\frac{A}{m} \cdot\frac{9}{4}R(f^{*})} \nonumber \\ &\leq& 6\frac{A}{m} + 3\sqrt{\frac{A}{m} \cdot R(f^{*})} \label{t7}. \end{eqnarray} \end{proof} In comparison, the radius of $\rm LESS$ is of order $O(\sqrt{\frac{A}{m}})$, which can be significantly larger when $R(f^{*})$ is small. This potential radius advantage translates to a potential coverage advantage of ${\rm{ILESS}}$, as stated in the following theorem. \begin{theorem} \label{thm:LessRejection} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown probability distribution. Given that event $\cal E$ (see Definition \ref{E}) occurred, for all $f^*$, the abstain rate is bounded by $$ 1-\mathbb{P}hi({\rm{ILESS}}) \leq \theta_{f^*}(R_{0}) \cdot R_{0}, $$ where $$ R_{0} \triangleq 2\cdot R(f^{*}) + 11 \cdot \frac{A}{m} + 6 \cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}. $$ This immediately implies (by definition) that $$ 1-\mathbb{P}hi({\rm{ILESS}}) \leq \theta(R_{0}) \cdot R_{0}. $$ \end{theorem} \begin{remark} Note that $R_0=O(R(f^{*}) + \frac{A}{m} )$ due to $\sqrt{\frac{A}{m} \cdot R(f^{*})}\leq\frac{1}{2}(\frac{A}{m}+R(f^{*}))$. \end{remark} \begin{proof} We start by showing that $G$, defined in Strategy \ref{alg:iLESS}, resides within a ball around any specific $f^{*}$. To do so, we need to bound the true error of all functions in $G$. \begin{eqnarray} f \in G &\Rightarrow& \hat{R}(f,S_m)\leq\hat{R}(\hat{f},S_m) + \sigma_{{\rm{ILESS}}} \label{e18}\\ &\Rightarrow& \hat{R}(f,S_m) \leq R(f^{*})+ \frac{A}{m} + \sqrt{\frac{A}{m} \cdot R(f^{*})} + 6\frac{A}{m} + 3\sqrt{\frac{A}{m} \cdot R(f^{*})} \label{e19}\\ &\Rightarrow& \hat{R}(f,S_m) \leq R(f^{*})+7\cdot \frac{A}{m}+4\cdot \sqrt{\frac{A}{m} \cdot R(f^{*})} \label{e20}, \end{eqnarray} where inequality (\ref{e18}) is by the definition of $G$, and inequality~(\ref{e19}) follows from (\ref{t6}) and (\ref{t7}) (under event ${\cal E}$). We then have, \begin{eqnarray} R(f) &\leq& \hat{R}(f,S_m) + \hat{\sigma}_{R-\hat{R}}(m,\delta,d,\hat{R}) \label{e21} \\ &\leq& \hat{R}(f,S_m) + \frac{A}{m} + \sqrt{\frac{A}{m} \cdot \hat{R}(f,S_m)} \label{e22}\\ &\leq& R(f^{*})+8\cdot \frac{A}{m}+4\cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{A}{m}\cdot \left[ R(f^{*})+7\cdot \frac{A}{m}+4\cdot \sqrt{\frac{A}{m} \cdot R(f^{*})} \right]} \label{e23}\\ &\leq& R(f^{*})+8\cdot \frac{A}{m}+4\cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}+ \sqrt{\frac{A}{m}\cdot \left[ 3R(f^{*})+9\cdot \frac{A}{m} \right]} \label{e24}\\ &\leq& R(f^{*})+11\cdot \frac{A}{m}+6\cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}, \label{e25} \end{eqnarray} where inequality~(\ref{e21}) is (\ref{eq:bound1}) (which holds given $\cal E$), inequality~(\ref{e22}) follows directly from the definition of $\hat{\sigma}_{R-\hat{R}}$, inequality~(\ref{e23}) is obtained using (\ref{e20}), inequality (\ref{e24}) follows from $\sqrt{AB} \leq A/2 + B/2$, and~(\ref{e25}) from $\sqrt{A+B} \leq \sqrt{A} + \sqrt{B}$. Using (\ref{e25}), for all $f \in G$, and any $f^*$ we have, \begin{eqnarray} \mathbb{P}r_{X \sim \mathcal{P_{X}}}\left\{f(X) \neq f^{*}(X)\right\} &=& \mathbb{P}r_{X,Y \sim \mathcal{P_{X,Y}}}\left\{f(X) \neq f^{*}(X) \wedge f^{*}(X)=Y \right\} + \mathbb{P}r_{X,Y \sim \mathcal{P_{X,Y}}}\left\{f(X) \neq f^{*}(X) \wedge f^{*}(X)\neq Y \right\} \nonumber \\ &\leq& \mathbb{P}r_{X,Y \sim \mathcal{P_{X,Y}}}\left\{f(X) \neq f^{*}(X) \wedge f^{*}(X)=Y \right\} + R(f^{*}) \nonumber\\ &\leq& \mathbb{P}r_{X,Y \sim \mathcal{P_{X,Y}}}\left\{f(X) \neq Y \right\} + R(f^{*}) \nonumber \\ &=& R(f) + R(f^{*}) \nonumber\\ &\leq& 2\cdot R(f^{*}) + 11 \cdot \frac{A}{m} + 6 \cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}. \label{eq30} \end{eqnarray} It follows that $$ f \in B(f^{*},2\cdot R(f^{*}) + 11 \cdot \frac{A}{m} + 6 \cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}) = B(f^{*}, R_0), $$ and, in particular, $$ G\subseteq B(f^{*},R_{0}), $$ so $$ \Delta G \leq \Delta B(f^{*},R_{0}). $$ The abstain rate of ${\rm{ILESS}}$ equals $\Delta G$. We can now use the disagreement coefficient to bound the abstain rate from above, \begin{eqnarray} \Delta G \leq \Delta B(f^{*},R_{0}) = \frac{\Delta B(f^{*},R_{0})}{R_{0}}\cdot R_{0} \leq \theta(R_{0}) \cdot R_{0}, \end{eqnarray} which concludes the proof. \end{proof} According to Theorem \ref{thm:LessRejection}, assuming the disagreement coefficient is $\theta(r) = O({\rm polylog}(1/r))$ for $r \geq R(f^*)$, the rejection mass of ${\rm{ILESS}}$, defined as the probability that the classifier trained by ${\rm{ILESS}}$ will output ``I don't know'' is bounded w.h.p. by \begin{equation} {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*) + \frac{{\rm polylog}_2(m,d,1/\delta)}{m}. \label{fast_rejection} \end{equation} In many cases, the disagreement coefficient, $\theta(r)$, is bounded by a constant, or by $O({\rm polylog}(1/r))$ for all $r>0$ (see \cite{Hanneke_book}). For example, it was shown in \cite{JMLR:v16:wiener15a}, that for linear separators under mixture of Gaussians, and for axis-aligned rectangles under product densities over $R^k$, $\theta(r)$ is bounded by $O({\rm polylog}(1/r))$ for all $r>0$. For such cases, we know that (\ref{fast_rejection}) always holds, regardless of the size of $R(f^*)$. The disagreement coefficient is only dependent on the marginal $\mathcal{P_{X}}$, the hypothesis class $\mathcal{F}$, and the identity of the true risk minimizers, $f^*$ (which is not necessarily unique). This fact motivates the following definition of a rejection rate of a selective learning algorithm, which is only dependent on $\mathcal{P_{X}}$,$\mathcal{F}$ and $f^*$. \begin{defi}[Fast $R^*$ Rejection Rate] \label{fast $R^*$ rejection rate} Given $\mathcal{P_{X}}$,$\mathcal{F}$ and $f^*$, if for any $\mathcal{P_{Y|X}}$, for which $f^*$ is a true risk minimizer, the rejection mass of a selective classifier learning algorithm is bounded by probability of at least $1-\delta$ by (\ref{fast_rejection}), we say that the algorithm achieves a \textbf{fast $R^*$ rejection rate}, with ${\rm polylog}_1$ and ${\rm polylog}_2$ as its parameters. \end{defi} Clearly, by Theorem \ref{thm:LessRejection}, if $\theta(r)=O({\rm polylog}(1/r))$ for all $r>0$, then {{\rm{ILESS}}} has a fast $R^*$ rejection rate. In the next section, we will show the other direction; that is, if there is a {\rm PCS } learning algorithm that has a fast $R^*$ rejection rate, then $\theta(r)=O({\rm polylog}(1/r))$ for all $r>0$. As long as the number of training examples that ${\rm{ILESS}}$ receives is not ``too large'' relative to $1/R(f^*)$, i.e., $m \ll \frac{1}{R(f^{*})}$, the rejection mass of ${\rm{ILESS}}$ is $ O(\frac{{\rm polylog}(m,d,1/\delta)}{m}). $ When $m$ is large, and $R(f^{*})$ becomes more dominant than $\frac{1}{m}$, our coverage bound is dominated by $R(f^{*})$. This should not surprise us, as ${\rm{ILESS}}$ achieves \emph{pointwise-competitiveness} w.h.p., and any strategy that achieves pointwise-competitiveness cannot ensure a better rejection mass than $R(f^{*})$ without making more assumptions about the error or the distribution. This can be seen in the following example, in which $\theta(r)\leq 1$ for all $r>0$, but the rejection mass of any pointwise-competitive strategy is always at least $R(f^{*})$. \begin{example} \label{example:1} Given any $0<\epsilon<0.5$, let ${\cal X} = [0,1]$, and ${\cal F} = \{f_1,f_2\}$ where \begin{displaymath} f_1(x)= \begin{cases} 1, & x<\epsilon \\ 0, & \text{otherwise} \end{cases}, f_2(x)= \begin{cases} 1, & x>1-\epsilon \\ 0, & \text{otherwise}. \end{cases} \end{displaymath} \end{example} Let $\mathcal{P_{X}}$ be the uniform distribution over $[0,1]$. Assume that $Y$ will always be zero. $f_1$ and $f_2$ are both $f^*$. Every pointwise-competitive classifier will have to output $g(x)=0$ for every $x$ in the disagreement set of $f_1$ and $f_2$. $R(f^*)=\epsilon$, and the rejection mass is $2\epsilon (= 2R(f^*))$. \section{From Selective Classification to the Disagreement Coefficient} We now turn to show a reduction from selective classification, to the disagreement coefficient. \begin{theorem} \label{thm:PointwiseSelectiveToCoeff} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. Let {\rm PCS} be an algorithm that returns a pointwise-competitive selective classifier w.h.p. If there exists an $m_{max}$ s.t. for every $m \leq m_{max}$, with probability of at least $1-\delta$, the abstain rate $1 - \mathbb{P}hi$ of ${\rm PCS}(S_m,\delta, \mathcal{F}, d)$ is bounded above as follows: \begin{equation} \label{eq:probEventGeneral} 1- \mathbb{P}hi(\rm PCS) \leq \frac{{\rm polylog}(m,d,1/\delta)}{m}. \end{equation} Then for every $f^*$ (every true risk minimizer), for every $r \geq 1/m_{max}$, $$ \theta_{f^*}(r) \leq 8({\rm polylog}(1/r,d,1/r)+3). $$ \end{theorem} \begin{proof} For any $m \in \left \{ 2,3,...,m_{max} \right \}$, denote by ${\cal S}_m$ a random training sample drawn from $\mathcal{P_{X,Y}}$. Let $Z$ be a random variable representing a single random unlabeled example sampled from $\mathcal{P_{X}}$, and let $f^*$ to be a specific true risk minimizer. Given $z \in DIS\left( B(f^*,\frac{1}{m})\right)$, as used in \cite[Lemma 47]{Hanneke11}, we know that there exists a function $h_z \in {\cal F}$ s.t. $h_z(z) \neq f^*(z)$ and $Pr(h_z(X)\neq f^*(X)) \leq \frac{1}{m}$. We denote by $\mathcal{P_{X,Y}}_z$ a new probability distribution that is identical to $\mathcal{P_{X,Y}}$ over all $x \in{\cal X}$ with the exception of $\{ x : \ \ h_z(x) \neq f^*(x)\}$, over which it is defined to be $Y \triangleq h_z(x)$. It is easy to see that $h_z$ is an $f^*$ for such a distribution. Denote by $\emph{e}_1$ the probability event where (\ref{eq:probEventGeneral}) holds (for a specific $m \leq m_{max}$). Denote by $\emph{e}_2$ the event where {\rm PCS} has succeeded in returning a pointwise-competitive selective classifier $(f_{s_m},g_{s_m})$ under $S_m$. Define $S'_m$ to be a modified $S_m$. For every $x$ s.t. $h_z(x) \neq f^*(x)$, $y$ changes to be $y=h_z(x)$. $S_m'$ is a random training sample drawn from $\mathcal{P_{X,Y}}_z$. Denote by $\emph{e}_{3z}$ the event where {\rm PCS} has succeeded in returning a pointwise-competitive selective classifier $(f_{s'_m},g_{s'_m})$ under $S'_m$. $h_z$ is only defined for cases in which $z \in DIS\left( B(f^*,\frac{1}{m})\right)$, and thus we define that $\emph{e}_{3z}$ will also include cases for which $z \notin DIS\left( B(f^*,\frac{1}{m})\right)$. Under our assumptions, $\mathbb{P}r(\emph{e}_1),\mathbb{P}r(\emph{e}_2) \geq 1-\delta$. For every $z \in DIS (B(f^*,\frac{1}{m}))$, $\mathbb{P}r(\emph{e}_{3z}|z) \geq 1-\delta$, and for every $z \notin DIS (B(f^*,\frac{1}{m}))$, $\mathbb{P}r(\emph{e}_{3z}|z)=1$, which implies that $\mathbb{P}r(\emph{e}_{3z}) \geq 1-\delta$. We denote by $h_z(S_m) = f^*(S_m)$ the event where $h_z(x) = f^*(x)$ for all $x \in S_m$. The explanations for the following equations follow. \begin{eqnarray} &&\mathbb{P}r\{Z \in DIS \left(B(f^*,\frac{1}{m})\right)\wedge h_z(S_m) = f^*(S_m) \} \label{aa1} \\ &=& \mathbb{P}r\{ Z \in DIS \left(B(f^*,\frac{1}{m})\right) \wedge h_z(S_m) = f^*(S_m) \wedge \emph{e}_1 \wedge \emph{e}_2 \wedge \emph{e}_{3z}\} \label{aa2}\\ && + \mathbb{P}r\{ Z \in DIS \left(B(f^*,\frac{1}{m})\right) \wedge h_z(S_m) = f^*(S_m) \ | \ \neg(\emph{e}_1 \wedge \emph{e}_2 \wedge \emph{e}_{3z}) \}\cdot \mathbb{P}r(\neg(\emph{e}_1 \wedge \emph{e}_2 \wedge \emph{e}_{3z})) \nonumber \\ &\leq& \mathbb{P}r\{ Z \in DIS \left(B(f^*,\frac{1}{m})\right) \wedge h_z(S_m) = f^*(S_m) \wedge \emph{e}_1 \wedge \emph{e}_2 \wedge \emph{e}_{3z}\} + 3\delta \label{aa3} \\ &\leq& \mathbb{P}r\{ g_{s_m}(Z) =0 \wedge \emph{e}_1 \wedge \emph{e}_2 \wedge \emph{e}_{3z}\} + 3\delta \label{aa4} \\ &\leq& \mathbb{P}r\{ g_{s_m}(Z) =0 \wedge \emph{e}_1 \} + 3\delta \\ &\leq& \mathbb{P}r\{ g_{s_m}(Z) =0 \ | \ \emph{e}_1 \} + 3\delta \\ &\leq& \frac{{\rm polylog}(m,d,1/\delta)}{m} + 3\delta. \label{aa5} \end{eqnarray} In (\ref{aa1}), it is convenient to view the random experiment as if we draw $Z$ first, and then $S_m$. If $Z \in DIS (B(f^*,\frac{1}{m}))$, then consider $h_z$ to be any function that holds $h_z(Z) \neq f^*(Z)$ and $Pr(h_z(X)\neq f^*(X)) \leq \frac{1}{m}$. If $Z \notin DIS (B(f^*,\frac{1}{m}))$, then the event described in (\ref{aa1}) does not occur, and $h_z$ is undefined. In (\ref{aa2}), we use conditional probability, and in (\ref{aa3}) we apply the union bound. Inequality (\ref{aa4}) is justified as follows. If $h_z(S_m) = f^*(S_m)$, then the algorithm received the same input under $\mathcal{P_{X,Y}}_z$ and $\mathcal{P_{X,Y}}$. Given that $\emph{e}_{2}$ and $\emph{e}_{3z}$ occurred, we know that the algorithm had successfully output a pointwise-competitive selective classifier for both probabilities, which means that whenever $f^*$ and $h_z$ disagree, $g_{s_m}$ has to output zero; otherwise, it will not be pointwise-competitive for one of the distributions. By the definition of $h_z$, $h_z(Z) \neq f^*(Z)$, which explains the inequality. (\ref{aa5}) is driven from the definition of $\emph{e}_1$. Taking $\delta=\frac{1}{m}$, we get, \begin{eqnarray} \mathbb{P}r\{Z \in DIS \left(B(f^*,\frac{1}{m})\right)\wedge h_z(S_m) = f^*(S_m) \} &\leq& \frac{{\rm polylog}(m,d,m)+3}{m}. \label{aa7} \end{eqnarray} The following inequalities are derived using elementary conditional probability. In Equation (\ref{aa6}) we use an argument taken from the proof of \cite[Lemma 47]{Hanneke11}. $h_z \in \left( B(f^*,\frac{1}{m})\right)$ and thus the probability that $f^*$ and $h_z$ will have the same labels over a sample of size $m$ is at least $(1-\frac{1}{m})^m$. \begin{eqnarray} &&\mathbb{P}r\{Z \in DIS \left(B(f^*,\frac{1}{m})\right)\wedge h_z(S_m) = f^*(S_m) \} \nonumber\\ &=& \mathbb{P}r\{ h_z(S_m) = f^*(S_m) \ | \ Z \in DIS\left( B(f^*,\frac{1}{m})\right) \} \cdot\mathbb{P}r\{Z \in DIS\left( B(f^*,\frac{1}{m})\right)\} \nonumber \\ &\geq& (1-\frac{1}{m})^m \cdot\mathbb{P}r\{Z \in DIS\left( B(f^*,\frac{1}{m})\right)\} \label{aa6} \\ &\geq& \frac{1}{4} \cdot \Delta B(f^*,\frac{1}{m}) .\label{aa8} \end{eqnarray} Combining (\ref{aa7}) and (\ref{aa8}), we get that for every $m \in \left \{ 2,3,...,m_{max} \right \}$, \begin{eqnarray} \frac{\Delta B(f^*,1/m)}{1/m} &\leq& 4({\rm polylog}(m,d,m)+3). \label{aa} \end{eqnarray} The following inequalities follow from (\ref{aa}), and from the fact that $\Delta B(f^*,x)$ and ${\rm polylog}_1(x)$ are non-decreasing. For any $r$ in $[\frac{1}{m_{max}} ,\frac{1}{2}]$, \begin{eqnarray*} \frac{\Delta B(f^*,r)}{r} &\leq& \frac{\Delta B(f^*,\frac{1}{\left \lfloor 1/r \right \rfloor})}{\frac{1}{\left \lfloor 1/r \right \rfloor}} \cdot \frac{1}{r \cdot \left \lfloor 1/r \right \rfloor} \\ &\leq& 4 ( {\rm polylog}(\lfloor 1/r \rfloor,d,\lfloor 1/r \rfloor)+3 ) \cdot \frac{1}{r \cdot (1/r-1)} \\ &\leq& 4 ( {\rm polylog}(\lfloor 1/r \rfloor,d,\lfloor 1/r \rfloor)+3 ) \cdot \frac{1}{1-r} \\ &\leq& 8 ( {\rm polylog}(1/r ,d,1/r)+3 ) \end{eqnarray*} and for $r$ in $[\frac{1}{2},1]$, \begin{eqnarray} \frac{\Delta B(f^*,r)}{r} &\leq& \frac{1}{1/2} = 2, \end{eqnarray} which concludes the proof. \end{proof} \begin{corollary} \label{thm:iLessToCoeff} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. If there exists an $m_{max}$ s.t. for every $m \leq m_{max}$, with probability of at least $1-\delta$, the abstain rate $1 - \mathbb{P}hi$ of ${{\rm{ILESS}}}(S_m,\delta, \mathcal{F}, d)$ is bounded above as follows: \begin{equation} \label{eq:probEvent} 1- \mathbb{P}hi({\rm{ILESS}}) \leq \frac{{\rm polylog}(m,d,1/\delta)}{m}. \end{equation} Then for every $f^*$ (every true risk minimizer), for every $r \geq 1/m_{max}$, $$ \theta_{f^*}(r) \leq 8({\rm polylog}(1/r,d,1/r)+3). $$ \end{corollary} \begin{proof} This is a direct result from Theorem \ref{thm:PointwiseSelectiveToCoeff}, and from the fact that {{\rm{ILESS}}} is PCS. \end{proof} Given $\mathcal{P_{X}}$,$\mathcal{F}$ and $f^*$, if any {\rm PCS} has a fast $R^*$ rejection rate, we can apply Theorem \ref{thm:PointwiseSelectiveToCoeff} with a deterministic $\mathcal{P_{Y|X}}$ distribution for which $Y=f^*(X)$, and get that $R(f^*)=0$. Thus, by definition, \begin{equation} 1- \mathbb{P}hi({\rm{ILESS}}) \leq {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot 0 + \frac{{\rm polylog}_2(m,d,1/\delta)}{m}. \end{equation} We can now apply Theorem \ref{thm:PointwiseSelectiveToCoeff} with $m_{max}=\infty$, and get that the disagreement coefficient is bounded by ${\rm polylog}(1/r)$ for all $r>0$. Thus, completing a two sided equivalence from {\rm PCS} with a fast $R^*$ rejection rate to a bounded disagreement coefficient for all $r>0$. \section{Active-ILESS} \label{sec:Active-iLESS} \begin{strategy} \caption{Agnostic low-error active learning strategy ({\textrm{Active-ILESS}})} \label{alg:Active-iLESS} \footnotesize { \begin{algorithmic}[1] \REQUIRE $\epsilon \text{ and/or } m$ depending on the desired termination condition (error or labeling budget, respectively)\\ Confidence level $\delta$\\ Hypothesis class $\mathcal{F}$ with VC dimension $d$\\ An unlabeled input sequence sampled i.i.d from $\mathcal{P_{X,Y}}$: $x_{1}, x_{2},x_{3},\ldots$ \mathbb{E}NSURE A classifier $\hat{f}$. \textbf{Initialize:} Set $\hat{S} = \emptyset $, $G_{0} = {\cal F}$, $t = 1$. \textbf{Perform for each example $x_{t}$ received:} \STATE if $x_{t}\in AGR(G_{t-1})$: don't request label for $x_{t}$ and set $y_{t}=f(x_{t})$ using any $f\in G_{t-1}$ otherwise: request label $y_t$. \STATE Set $\hat{S} = \hat{S} \cup \{(x_{t},y_{t})\}$. \STATE Set $\hat{f} = \hat{f}(\hat{S})$ \STATE if $\log_2 (t) \in \mathbb{N}$: \begin{itemize} \item Set $\sigma_{Active}= \hat{\sigma}_{R-\hat{R}} \left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S}) \right) + \bar{\sigma}_{\hat{R}-R} \left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})+\hat{\sigma}_{R-\hat{R}}(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})) \right)$ \item If $\epsilon$ was given as input and $\sigma_{Active}<\epsilon$, terminate and return $\hat{f}$ \item Set $G_{t} = \hat{{\cal V}}\left(\hat{f},\sigma_{Active}\right)$ \item Set $\hat{S} = \emptyset $. \end{itemize} otherwise: \begin{itemize} \item $G_{t} = G_{t-1}$ \end{itemize} \STATE If $m$ was given as input and $t=m$, terminate and return $\hat{f}$ \STATE Set t = t +1 \end{algorithmic} } \end{strategy} In this section we introduce, in Strategy \ref{alg:Active-iLESS}, an agnostic active learning algorithm called ${\textrm{Active-ILESS}}$. ${\textrm{Active-ILESS}}$ is very similar to Agnostic CAL \cite{hsu:thesis}, Algorithm 4.2 on page 36, and $A^2$ \cite{balcan2006agnostic}. Much like Agnostic CAL, ${\textrm{Active-ILESS}}$ creates artificial labels (step 1). The two algorithms differ mainly in that ${\textrm{Active-ILESS}}$ works in batches (inside each batch, the decision whether to query an example is made instantly and not at the end of the batch). This allows ${\textrm{Active-ILESS}}$ to be a bit more conservative with its deltas. Moreover, while Agnostic CAL requires calculation of an ERM with many constraints (defined by the function LEARN in HSU's thesis), ${\textrm{Active-ILESS}}$ requires a calculation of the ERM with only one constraint, as seen from the disbelief principle \cite{el2011agnostic}, already discussed in Section \ref{sec:iLESS}. Although {{\rm{ILESS}}} is not novel in and of itself, we use its similarity to Agnostic CAL to demonstrate a deep connection between active learning and selective classification. In Section~\ref{sec:BatchiIless} we use ${\textrm{Active-ILESS}}$ to show an equivalence between active learning (represented by ${\textrm{Active-ILESS}}$) and selective classification (represented by a variant of ${\rm{ILESS}}$, ``{\textrm{Batch-ILESS}}''). The introduction of these new variants facilitates a straightforward proof of the equivalence relationship. This equivalence implies a novel relationship between selective and active classification in the agnostic setting. We begin by analyzing ${\textrm{Active-ILESS}}$ and showing that much like ${\rm{ILESS}}$, $f^{*} \in G_t$ in each iteration $t$. The low-error set $G$, maintained by ${\rm{ILESS}}$, contains all the hypotheses that have an empirical error smaller than $\hat{R}(\hat{f})+ \sigma_{{\rm{ILESS}}}$. In Lemma \ref{lemma:agnostic} we showed that this condition implies that $f^{*}$ resides within the low-error set $G$ of ${\rm{ILESS}}$. A proof that $f^{*} \in G_{t}$, after each iteration of ${\textrm{Active-ILESS}}$, cannot follow the same argument due to the fact that ${\textrm{Active-ILESS}}$, shown in Strategy \ref{alg:Active-iLESS}, labels by itself each example whose label is not requested from the teacher, and obviously, since we consider an agnostic setting, these self-labels can differ from the true labels. ${\textrm{Active-ILESS}}$, as seen in Strategy \ref{alg:Active-iLESS}, receives as a termination condition either $\epsilon>0$ and/or $m$, and terminates when the radius of its low-error set, $G_t$, is smaller than $\epsilon$, or when it has processed $m$ examples. ${\textrm{Active-ILESS}}$ changes its low-error set, $G_{t}$, only for $t$ that are natural powers of $2$. For each change, ${\textrm{Active-ILESS}}$ begins to create fake labels for $x_{t}\in AGR(G_{t-1})$ that may or may not be equal to the real label of $x_{t}$ (under the original distribution). In fact, this $G_{t}$ defines a new distribution, $\mathcal{P_{X,Y}}(G_{t})$, and this distribution changes for every $t$ that is a natural power of $2$. With respect to a run of ${\textrm{Active-ILESS}}$, and $t=2^{i},i \in \mathbb{N}$, we denote by $\mathcal{P_{X,Y}}(G_{t})$, the new probability distribution implied by $G_{t}$, and the fake labels created by the algorithm. $R_{\mathcal{P_{X,Y}}(G_{t})}(f)$ will be the true risk under the new distribution, while $R_{\mathcal{P_{X,Y}}}(f)$ is the true risk of $f$ under the original distribution. \begin{defi} \label{K} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. Given a run of {{\textrm{Active-ILESS}}}, we denote by $\cal K$ the event where both inequalities (\ref{errorbounds1}) and (\ref{errorbounds2}) hold simultaneously for every $f \in {\cal F}$, for all iterations of {{\textrm{Active-ILESS}}} where $t=2^{i},i \in \mathbb{N}$. $\hat{R}(f) \triangleq \hat{R}(f,\hat{S})$ for $\hat{S}$ before it was initialized: \end{defi} \begin{equation} \label{errorbounds1} R_{\mathcal{P_{X,Y}}(G_{t})}(f) \leq \hat{R}(f) + \sigma_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,R(f),\hat{R}(f)\right) \end{equation} \begin{equation} \label{errorbounds2} \hat{R}(f) \leq R_{\mathcal{P_{X,Y}}(G_{t})}(f) + \sigma_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,R(f),\hat{R}(f)\right) \end{equation} \begin{lemma} \label{lemma:errorbounds} $\mathcal{K}$ occurs with probability of at least $1-\delta$. \end{lemma} \begin{proof} $G_{t}$ changes only for iterations of the type $2^i, i \in \mathbb{N}$. We know by Lemma \ref{lemma:agnostic} that the probability that inequalities (\ref{errorbounds1}) and (\ref{errorbounds2}) do not hold is smaller than $\delta/(2t)$. By the union bound, the probability that one of these inequalities does not hold after any iteration is smaller than \begin{displaymath} \sum_{t=2^{i},i \in \mathbb{N}}\frac{\delta}{2t}\leq \delta. \end{displaymath} \end{proof} \begin{lemma} \label{lemma:f_star_best} If $f^{*}$, a true risk minimizer under probability distribution $\mathcal{P_{X,Y}}$, resides within $G_{t}$, then it is also a true risk minimizer under probability distribution $\mathcal{P_{X,Y}}(G_{t})$. \end{lemma} \begin{proof} \begin{displaymath} \mathop{\rm argmin}_{f \in {\cal F}} R_{\mathcal{P_{X,Y}}(G_{t})}(f)=\mathop{\rm argmin}_{f \in {\cal F}} \left( \underbrace{R_{\mathcal{P_{X,Y}}}(f)}_{A}+\underbrace{R_{\mathcal{P_{X,Y}}(G_{t})}(f)-R_{\mathcal{P_{X,Y}}}(f)}_{B} \right). \end{displaymath} We know that $f^{*}$ minimizes $A$, and we note that every function that resides within $G_{t}$ minimizes $B$, because every difference in the labeling between $\mathcal{P_{X,Y}}$ and $\mathcal{P_{X,Y}}(G_{t})$ was done according to the label given by the unanimous decision of functions in $G_{t}$. Hence, $f^{*}$ minimizes $A+B$. \end{proof} The proofs of the following four lemmas appear in the appendix. They all show basic good qualities of {{\textrm{Active-ILESS}}}. \begin{lemma} \label{lemma:f*in} Given that event $\cal K$ (see Definition \ref{K}) occurred, each $f^{*}$ of the \textbf{original} distribution $\mathcal{P_{X,Y}}$ resides within $G_{t}$ for all $t$. This implies that $R_{\mathcal{P_{X,Y}}(G_{t})}(f^{*}) \leq R(f^*)$, for all $t$, as every change in the labeling is done according to $f^*$. \end{lemma} \begin{lemma} \label{lemma:epsilon} Given that event $\cal K$ (see Definition \ref{K}) occurred, and under the assumption that ${\textrm{Active-ILESS}}$ terminated with the $\epsilon$ condition, the hypothesis returned by ${\textrm{Active-ILESS}}$, $\hat{f}$, holds: \begin{displaymath} R_{\mathcal{P_{X,Y}}}(\hat{f}) \leq R_{\mathcal{P_{X,Y}}}(f^{*}) + \epsilon. \end{displaymath} \end{lemma} \begin{lemma} \label{lemma:radius_active} Given that event $\cal K$ (see Definition \ref{K}) occurred, the final radius of ${\textrm{Active-ILESS}}$ satisfies \begin{eqnarray} \sigma_{Active}=O(\frac{B}{m}+ \sqrt{\frac{B}{m} \cdot R(f^{*})} ), \end{eqnarray} where \begin{math} B \triangleq 16d \ln( \frac{16m^2e}{d\delta} ). \end{math} \end{lemma} \begin{lemma} \label{lemma:max_examples_observed} Given that event $\cal K$ (see Definition \ref{K}) occurred, the total number of examples that ${{\textrm{Active-ILESS}}(\epsilon)}$ processed (without necessarily requesting labels) is $$ O \left( \frac{1}{\epsilon}\ln(\frac{1}{\epsilon})+ \frac{R(f^*)}{\epsilon^2}\ln(\frac{R(f^*)}{\epsilon^2} ) \right), $$ where we hide factors of $d, \ln(1/\delta)$ under the $O$. \end{lemma} \begin{defi} \label{passive example complexity} An active learner that generates a hypothesis whose true error is smaller than $\epsilon$ w.h.p., has \textbf{passive example complexity}, if it observes up to $O \left( \frac{1}{\epsilon}\ln(\frac{1}{\epsilon})+ \frac{R(f^*)}{\epsilon^2}\ln(\frac{R(f^*)}{\epsilon^2} ) \right)$ examples (not necessarily labeled). \end{defi} By Lemmas \ref{lemma:epsilon} and \ref{lemma:max_examples_observed} we know that {{\textrm{Active-ILESS}}} has passive example complexity. The definition of a fast $R^*$ rejection rate for selective classification induces the following related definition for exponential speedup of active learning algorithms. \begin{defi}[$R^*$ Exponential Speedup] \label{$R^*$ exponential speedup} Given $\mathcal{P_{X}}$,$\mathcal{F}$ and $f^*$, we say that an active learner has \textbf{$R^*$ exponential speedup}, with ${\rm polylog}_1$ and ${\rm polylog}_2$ as its parameters, if for every $\mathcal{P_{Y|X}}$ for which $f^*$ is a true risk minimizer, and for every $m>0$, with probability of at least $1-\delta$, the number of labels requested by the active learner after observing $m$ examples is not greater than $$ {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+ {\rm polylog}_2(m,d,1/\delta). $$ \end{defi} In \cite{hsu:thesis}, Hsu introduced the agnostic CAL algorithm and showed (Theorem 4.3, page 41) that if the disagreement coefficient is bounded, then Agnostic CAL has $R^*$ exponential speedup (under our new definition). Any active algorithm that has passive example complexity and achieves $R^*$ exponential speedup requires w.h.p. no more than $O\left({\rm polylog}(\frac{R(f^*)}{\epsilon^2}) \frac{R(f^*)^2}{\epsilon^2} + {\rm polylog}(\frac{1}{\epsilon}) \right)$ labels to reach a true error smaller than $\epsilon$. The proof is immediate by considering the cases $\frac{R(f^*)}{\epsilon} \geq 1$ and $\frac{R(f^*)}{\epsilon} < 1$. The leading term of this bound is $\frac{R(f^*)^2}{\epsilon^2}$, which is also the case for $A^2$ \cite{balcan2006agnostic}. \section{A Reduction from Active-iLess to Batch-ILESS} \label{sec:BatchiIless} In Strategy \ref{alg:Batch iLESS} we define a selective classifier, called ${\textrm{Batch-ILESS}}$, which uses ${\textrm{Active-ILESS}}$ as its engine. Given a labeled sample $S_m$, ${\textrm{Batch-ILESS}}$ simulates the active algorithm, by applying it over a uniformly random ordering of $S_m$ in a straightforward manner (i.e., it sequentially introduces to the active algorithm an unlabeled example and reveals the label only if the active algorithm requests it). Upon termination, after the active algorithm has consumed all examples, our batch algorithm receives $\hat{f}$ from the active algorithm and utilizes its last low-error set $G_t$ to define its selection function. Lemma \ref{lemma:f*in} implies that ${\textrm{Batch-ILESS}}$ is pointwise-competitive. We note that Lemma \ref{lemma:radius}, Theorem \ref{thm:LessRejection} and Theorem \ref{thm:iLessToCoeff}, which were proven for {\rm{ILESS}}, can also be proven for {\textrm{Batch-ILESS}}. We chose to prove it for {\rm{ILESS}}, as it is more simple than {\textrm{Batch-ILESS}}, and doesn't require an active algorithm as its engine. We state these ideas formally, and give sketches for their proofs, in the Appendix in Lemma \ref{lemma:radius_batch} and Theorem \ref{thm:LessRejection_batch}. \begin{strategy}[] \caption{Batch Improved Low-Error Selective Strategy ({\textrm{Batch-ILESS}})} \footnotesize { \begin{algorithmic}[1] \REQUIRE Sample set of size $m$, $S_m$,\\ Confidence level $\delta$\\ Hypothesis class $\mathcal{F}$ with VC dimension $d$\\ \mathbb{E}NSURE A selective classifier $(h,g)$ \STATE Simulate ${\textrm{Active-ILESS}}$ with a random ordering of $S_m$ as its input stream; let $G_t$ be the low-error set obtained by ${\textrm{Active-ILESS}}$ in its last round, and let $\hat{f}$ be its resulting classifier. \STATE Construct $g$ such that $g(x) = 1 \mathcal{L}ongleftrightarrow x \in \left\{{\cal X} \setminus DIS\left(G_t\right)\right\}$ \STATE $h = \hat{f}$ \end{algorithmic} } \label{alg:Batch iLESS} \end{strategy} The following theorem shows a deep connection between the speedup of {{\textrm{Active-ILESS}}} to the rejection mass of {{\textrm{Batch-ILESS}}} for specific $\mathcal{P_{X,Y}}$. An immediate corollary of this theorem is that if {{\textrm{Active-ILESS}}} has $R^*$ exponential speedup (see Definition \ref{$R^*$ exponential speedup}), then {{\textrm{Batch-ILESS}}} has a fast $R^*$ rejection rate (see Definition \ref{fast $R^*$ rejection rate}). \begin{theorem} \label{thm:activeToSelective} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. If after observing $m$ examples, with probability of at least $1-\delta$, the number of labels requested by ${\textrm{Active-ILESS}}$ is not greater than \begin{equation} {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+ {\rm polylog}_2(m,d,1/\delta), \label{eq:active_speedup} \end{equation} then the rejection mass of {{\textrm{Batch-ILESS}}} is bounded w.h.p. by $$ 8 \cdot {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*) + \frac{2\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) +2{\rm polylog}_2(2m,2/\delta)}\right)^2 }{m} . $$ \end{theorem} \begin{proof} Consider an application of ${\textrm{Active-ILESS}}$ with $\delta = \delta_0$ over $m_0 \triangleq 2^{\left \lceil \log(m+1) \right \rceil}$ examples. Denote by $X_i$ an indicator random variable for the labeling of its $i$th example, $1 \leq i \leq m_0$. With probability of at least $1-\delta_0$ over the choice of samples from $\mathcal{P_{X,Y}}$, \begin{eqnarray} \sum_{i=1}^{m_0} X_i \leq {\rm polylog}_1(\frac{1}{R(f^*)+1/m_0}) \cdot R(f^*)m_0+ {\rm polylog}_2(m_0,1/\delta_0). \label{h1} \end{eqnarray} We know by the definition of ${\textrm{Active-ILESS}}$ (Strategy \ref{alg:Active-iLESS}), that the last $m_0/2$ examples had the exact same probability, $\Delta G_{m_{0}/2}$, of requiring a label, and that this is exactly the probability that ${\textrm{Batch-ILESS}}$ will decide to abstain after receiving $m$ examples, according to Strategy \ref{alg:Batch iLESS}. We now estimate $\Delta G_{m_{0}/2}$ using the following version of the Chernoff bound given by Canny \cite{JohnCannyNotes}. For the sake of self-containment, Canny's statement and proof of the bound are provided in Lemma \ref{ChernoffBound} in the Appendix. The statement of the lemma is as follows. Let $X_1,X_2,\ldots,X_n$ be independent Bernoulli trials with $Pr[X_i=1]=p$, let $X\triangleq\sum_{i=1}^{n} X_i$, and $\mu = \mathbb{E} X$. Then, for every $\alpha>0$: $$\mathbb{P}r \left( X<(1-\alpha)\mu \right) \leq \exp(-\mu \alpha^2 /2).$$ Applying the Chernoff bound with the indicator variables of the last $m_0/2$ examples, we have $X=\sum_{m_0/2}^{m_0} X_i$, $\mu = p\frac{m_0}{2}$, and set $p \triangleq \Delta G_{m_{0}/2}$. Select $\alpha$ such that $$ \exp(- p\frac{m_{0}}{2} \alpha^2 /2) = \delta_2. $$ Solving for $\alpha$, $$ \alpha =\sqrt{ \frac{4 \ln(1/\delta_1)}{m_0p}}. $$ We conclude that with probability of at least $1-\delta_1$, \begin{eqnarray} & & X \geq (1-\sqrt{ \frac{4 \ln(1/\delta_1)}{m_0p}}) \cdot p\frac{m_0}{2} \nonumber \\ &\mathcal{L}eftrightarrow& 0 \geq \frac{pm_0}{2} - \sqrt{pm_0 \cdot \ln(1/\delta_1)} -X \label{eq:quadratic}. \end{eqnarray} Solving the quadratic equation~(\ref{eq:quadratic}) for $\sqrt{pm_0}$, we get that \begin{eqnarray} &&\sqrt{pm_0} \leq \frac{\sqrt{\ln(1/\delta_1)} + \sqrt{\ln(1/\delta_1) + 2X} }{1} \nonumber \\ & \Rightarrow & p \leq \frac{(\sqrt{\ln(1/\delta_1)} + \sqrt{\ln(1/\delta_1) + 2X})^2 }{m_0} \label{h2} . \end{eqnarray} Combining (\ref{h1}) and (\ref{h2}), from the union bound we get that with probability of at least $1-\delta_0 -\delta_1$, \begin{eqnarray*} \Delta G_{m_{0}/2} \leq \frac{\left( \sqrt{\ln(1/\delta_1)} + \sqrt{\ln(1/\delta_1) + 2{\rm polylog}_1(\frac{1}{R(f^*)+1/m_0}) \cdot R(f^*)m_0+2{\rm polylog}_2(m_0,1/\delta_0)} \right)^2 }{m_0}. \end{eqnarray*} If we take $\delta_0=\delta_1=\delta/2$, then, since $m\leq m_0 \leq 2m$, we can use $\sqrt{a+b} \leq \sqrt{a} + \sqrt{b}$ and $(a+b)^2 \leq 2a^2 + 2b^2$, to obtain \begin{eqnarray*} \Delta G_{m_{0}/2} &\leq& \frac{\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) + 4{\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+2{\rm polylog}_2(2m,2/\delta)} \right)^2 }{m} \\ &\leq& \frac{\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) +2{\rm polylog}_2(2m,2/\delta)} +\sqrt{4{\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m} \right)^2 }{m}\\ &\leq& \frac{2\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) +2{\rm polylog}_2(2m,2/\delta)}\right)^2 + 2\left( \sqrt{4{\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m} \right)^2 }{m}\\ &=& \frac{2\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) +2{\rm polylog}_2(2m,2/\delta)}\right)^2 }{m} + 8\cdot {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*) \end{eqnarray*} \end{proof} \begin{corollary} \label{thm:ActiveiLessToCoeff} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. If after observing $m$ examples, with probability of at least $1-\delta$, the number of labels requested by ${\textrm{Active-ILESS}}$ is not greater than \begin{equation*} {\rm polylog}_1(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+ {\rm polylog}_2(m,d,1/\delta), \end{equation*} then for every $r \geq R(f^*)$, $$ \theta_{f^*}(r) \leq 8 \left( 2\left( \sqrt{\ln(2r)} + \sqrt{\ln(2r) +2{\rm polylog}_2(2/r,2/r)}\right)^2 +8\cdot {\rm polylog}_1(1/r)+2 \right) = O({\rm polylog}(1/r)). $$ \end{corollary} \begin{proof} The proof follows from Theorems \ref{thm:activeToSelective} and \ref{thm:PointwiseSelectiveToCoeff}. Applying Theorem \ref{thm:activeToSelective}, we know that for $m \leq 1/R(f^*)$, the rejection mass of {{\textrm{Batch-ILESS}}} is bounded w.h.p. by, $$ \frac{2\left( \sqrt{\ln(2/\delta)} + \sqrt{\ln(2/\delta) +2{\rm polylog}_2(2m,2/\delta)}\right)^2 +8\cdot {\rm polylog}_1(\frac{1}{R(f^*)+1/m})}{m}. $$ Applying Theorem \ref{thm:PointwiseSelectiveToCoeff} with $m_{max} = 1/R(f^*)$, we get that for every $r \geq R(f^*)$, \begin{eqnarray*} \theta_{f^*}(r) &\leq& 8 \left(2\left( \sqrt{\ln(2r)} + \sqrt{\ln(2r) +2{\rm polylog}_2(2/r,2/r)}\right)^2 +8 \cdot {\rm polylog}_1(\frac{1}{R(f^*)+r})+3 \right)\\ &\leq& 8 \left( 2\left( \sqrt{\ln(2r)} + \sqrt{\ln(2r) +2{\rm polylog}_2(2/r,2/r)}\right)^2 +8\cdot {\rm polylog}_1(1/r)+3 \right). \end{eqnarray*} Note that the Theorem \ref{thm:PointwiseSelectiveToCoeff} does not require $m_{max}$ to be an integer. \end{proof} \section{From the Disagreement Coefficient to Active Learning} In this section we show that when $\theta'(r)$ is bounded by ${\rm polylog}_1(1/r)$ for all $r>R(f^*)$ for some specific $\mathcal{P_{X,Y}}$, then the label complexity of {{\textrm{Active-ILESS}}} under the same $\mathcal{P_{X,Y}}$ is bounded by \begin{equation} {\rm polylog}_2(\frac{1}{R(f^*)+1/m}) \cdot R(f^*)m+ {\rm polylog}_3(m,d,1/\delta), \label{eq:active_speedup4} \end{equation} where the parameters of ${\rm polylog}_2$ and ${\rm polylog}_3$ are only dependent on ${\rm polylog}_1(1/r)$. Thus, if $\theta'(r)\leq {\rm polylog}_1(1/r)$ for all $r>0$, we get that {{\textrm{Active-ILESS}}} has $R^*$ exponential speedup. This direction has been shown before in \cite{hsu:thesis,Hanneke07} for agnostic CAL and $A^2$. For the sake of self-containment, we show it here for {{\textrm{Active-ILESS}}}. Due to the fact that {{\textrm{Active-ILESS}}} relies on {{\rm{ILESS}}}, which we already have bounds for, the proof is straightforward. As a preparation for the theorem, we present Lemma \ref{lemma:disagreement_monotonicity} (shown before in \cite{Hanneke_book}), in which we introduce a small feature of the disagreement coefficient that will serve us later. \begin{lemma} \label{lemma:disagreement_monotonicity} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown distribution. For every $f \in \mathcal{F}$ and $0<r\leq 1$, $\theta_{f}(r) \cdot r$ is a non-decreasing function. \end{lemma} \begin{proof} Given $0<r_1<r_2$, we will show that $\theta_{f}(r_1) \cdot r_1 \leq \theta_{f}(r_2) \cdot r_2$. Assume by contradiction that $$ \theta_{f}(r_1) \cdot r_1 > \theta_{f}(r_2) \cdot r_2, $$ i.e., $$ \sup_{r>r_1}\frac{\Delta B(f,r_1)}{r_1} \cdot r_1 > \sup_{r>r_2}\frac{\Delta B(f,r_2)}{r_2} \cdot r_2. $$ This implies, that there exists $r_1\leq \hat{r} < r_2$ s.t. $$ \frac{\Delta B(f,\hat{r})}{\hat{r}} r_1 > \sup_{r>r_2}\frac{\Delta B(f,r_2)}{r_2} r_2 \geq \frac{\Delta B(f,r_2)}{r_2} r_2 = \Delta B(f,r_2). $$ This contradicts the known monotonicity of $\Delta B(f,x)$. \end{proof} \begin{theorem} \label{thm:coeffToActiveLearning} Let $\mathcal{F}$ be a hypothesis class with a finite VC dimension $d$, let $\mathcal{P_{X,Y}}$ be an unknown distribution, and $f^*$ is a true risk minimizer of $\mathcal{P_{X,Y}}$. If for all $r>R(f^*)$, $$ \theta'(r)\leq {\rm polylog}_1(1/r), $$ then the label complexity of ${\textrm{Active-ILESS}} (m,\delta/2)$ is bounded by $$ {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) 2e \cdot mR(f^{*}) + \log_2(2/\delta) + 56 e \cdot \log_2 m \cdot A \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) , $$ which has the same form of Equation (\ref{eq:active_speedup4}). \end{theorem} \begin{proof} Each run of ${\textrm{Active-ILESS}}(m,\delta/2)$ simulates $\log_2 m$ runs of ${\rm{ILESS}}$. We know by Lemma \ref{lemma:errorbounds} that with probability of at least $1-\delta/2$, inequalities (\ref{errorbounds1}) and (\ref{errorbounds2}) hold for each run. Recall that we denoted by $\cal K$ the event where both inequalities hold through out all runs of ${\rm{ILESS}}$, which is exactly the definition of event $\cal E$ per run (see Definition \ref{E}). Under event $\cal K$, Lemma \ref{lemma:f*in} implies that all $f^{*}$ of the original distribution $\mathcal{P_{X,Y}}$ reside within $G_{t}$ for all $t$. This also implies that all $f^*$ of the original distribution remain the true risk minimizers under $\mathcal{P_{X,Y}}(G_{t})$, for all $t$, as they always benefit from the creation of the artificial labels. Because the marginal of the distribution does not change during the run of {{\textrm{Active-ILESS}}}, and because event $\cal E$ holds for each iteration of {{\rm{ILESS}}}, we can apply Theorem \ref{thm:LessRejection} for all of the runs of {{\rm{ILESS}}}. We thus get that for every run of {{\rm{ILESS}}}, the rejection mass is bounded by $$ 1-\mathbb{P}hi({\rm{ILESS}}) \leq \theta(R_{0}) \cdot R_{0}, $$ where $$ R_{0} \triangleq 2\cdot R(f^{*}) + 11 \cdot \frac{A}{m} + 6 \cdot \sqrt{\frac{A}{m} \cdot R(f^{*})}. $$ We denoted by $R(f^{*})$ the true error according to the original distribution, which might be larger than the true error implied by the fake label distributions that the algorithm induces. However, according to Lemma \ref{lemma:disagreement_monotonicity}, enlarging $R_{0}$ can only weaken the bound, and thus, there is no problem doing so. We additionally bound $R_{0}$ using $\sqrt{AB} \leq A/2 + B/2$ to get $$ R_{0} \leq 5\cdot R(f^{*}) + 14 \cdot \frac{A}{m}. $$ Given our bound on the disagreement coefficient, we conclude that $$ 1-\mathbb{P}hi({\rm{ILESS}}) \leq {\rm polylog}_1(\frac{1}{5\cdot R(f^{*}) + 14 \cdot \frac{A}{m}}) \cdot (5\cdot R(f^{*}) + 14 \cdot \frac{A}{m}). $$ Each activation of ${\rm{ILESS}}$ has delta equals $\frac{\delta}{4t}$, and thus, exactly as in Lemma \ref{lemma:errorbounds}, with probability of at least $1-\delta/2$, they all have a bounded rejection mass simultaneously. We assume that this event occurred. According to the definition of $G_t$ in Strategy \ref{alg:Active-iLESS}, the probability distribution of the artificial labeling done by {{\textrm{Active-ILESS}}} changes only when $t$ is a natural power of 2. Thus, the probability of requesting label $t>2$, denoted by $P_t$, is bounded by \begin{equation} \label{ccc} P_t \leq {\rm polylog}_1 \left(\frac{1}{5\cdot R(f^{*}) + 14 \cdot \frac{A}{T}} \right) \cdot 5R(f^*) + \frac{14A \cdot {\rm polylog}_1 \left(\frac{1}{5\cdot R(f^{*}) + 14 \cdot \frac{A}{T}} \right)}{T}, \end{equation} where $T=2^{\left \lfloor \log_2 (t-1) \right \rfloor -1}$. We now have a series of Poisson trials, $X_1,X_2,\ldots,X_m$, with $Pr(X_t = 1)=P_t$, and each $X_i$ is an indicator variable for the labeling of the $i$th example. We use a version of the Chernoff bound \cite{JohnCannyNotes} to bound the label complexity.\footnote{We found this useful bound in \cite{hannekestatistical} (Theorem 5.4).} The statement and a sketch of the proof of this bound are provided in Lemma \ref{ChernoffBound2} in the Appendix. For independent Poisson variables $X_1,X_2,\ldots,X_m$, where $Pr[X_i=1]=p_i$, $X\triangleq\sum_{i=1}^{n} X_i$, and $\mu = \mathbb{E} X$, for every $\alpha>2 e -1$: $$ \mathbb{P}r (X>(1+\alpha)\mu) \leq 2^{-\mu \alpha}. $$ To bound $\mu = \mathbb{E} X$ from above, we use inequality (\ref{ccc}) and plug it into the definition of $\mu$. \begin{eqnarray} \mu &=& P_1 + P_2 + \sum_{i=3}^{m} P_t \nonumber \\ &\leq& 2 + \sum_{k=1}^{\log_2 m -1} 2^{k}P_{2^{k+1}} \nonumber \\ &\leq& 2 + m \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) \cdot R(f^*) + \sum_{k=1}^{\log_2 m -1} 2^{k} \frac{14A \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right)}{2^{k-1}} \nonumber \\ &\leq& 2 + m \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) \cdot R(f^*) + 28 \log_2 m \cdot A \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) . \nonumber \\ \label{fg} \end{eqnarray} We need to choose an $\alpha$ that satisfies both $ 2^{-\mu \alpha} \leq \delta / 2, $ and $ \alpha > 2e-1$. Clearly, $\alpha = \frac{\log_2(2/\delta)}{\mu}+2e-1$ suffices. Hence, we get that with probability of at least $1-\delta/2$, \begin{eqnarray*} X &\leq& (1+\frac{\log_2(2/\delta)}{\mu}+2e-1)\mu \\ &=& \log_2(2/\delta)+2e\mu. \end{eqnarray*} Inequality (\ref{fg}) holds with probability of at least $1-\delta/2$, and using the union bound, we get that with probability of at least $1-\delta$, \begin{eqnarray} X &\leq& \log_2(2/\delta)+2e\left( 2 + m \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) \cdot R(f^*) + 28 \log_2 m \cdot A \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) \right) \nonumber \\ &=& {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) 2e \cdot mR(f^{*}) + \log_2(2/\delta) + 56 e \cdot \log_2 m \cdot A \cdot {\rm polylog}_1 \left(\frac{1}{5 R(f^{*}) + 14 \frac{A}{m}} \right) \nonumber \\ \label{bbb} \end{eqnarray} \end{proof} The dominant factor of Equation (\ref{bbb}), if we ignore the logarithmic factors, is $m R(f^*)$. {{\textrm{Active-ILESS}}} has passive example complexity (see Definition \ref{passive example complexity}), which means that the total sample complexity is bounded by $\tilde{O}( \frac{1}{\epsilon}+ \frac{R(f^*)}{\epsilon^2} )$, where $\tilde{O}( \cdot)$ hides logarithmic factors. Plugging the sample complexity into $m$ in (\ref{bbb}), we get that the total label complexity is bounded by $\tilde{O}(\frac{R(f^*)^2}{\epsilon^2})$, in cases for which ${\rm{ILESS}}$ has a fast $R^*$ rejection rate. In \cite[Theorem 3]{kar2006}, Kääriäinen showed that for every active learning algorithm, under a specific (non-trivial) hypothesis class $\cal F$, there exists a deterministic target function $g$, and a marginal distribution $\mathcal{P_{X}}$, s.t. the label complexity is $\tilde{\Omega}(\frac{R(f^*)^2}{\epsilon^2})$ (where $\tilde{\Omega}(\cdot)$ hides logarithmic factors). \section{Concluding Remarks} In this paper we focused on disagreement-based methods. Namely, we always required that $f^*$ remain inside a low-error subset of hypotheses w.h.p., and made decisions based on disagreement considerations. We introduced a new selective classification algorithm, called ${\rm{ILESS}}$, whose rejection ``engine'' utilizes sharp generalization bounds (which depend on $R(f^*)$). Our analysis proves that ${\rm{ILESS}}$ has sometimes significantly better rejection guarantees relative to the best known pointwise-competitive selective strategy of \cite{WienerE14}. Moreover, the guarantees we provide for ${\rm{ILESS}}$ do not depend at all on the Bernstein assumption. For the general agnostic setting, we showed an equivalence relation between pointwise-competitive selective classification, active learning, and the disagreement coefficient (see Figure~\ref{figure1}). This equivalence is formulated in terms of a fast $R^*$ rejection rate and $R^*$ exponential speedup (Definitions~\ref{fast $R^*$ rejection rate} and~\ref{$R^*$ exponential speedup}). Theorems \ref{thm:PointwiseSelectiveToCoeff} and \ref{thm:LessRejection} show that selective classification with a fast $R^*$ rejection rate is completely equivalent to having a disagreement coefficient bounded by ${\rm polylog}(1/r)$ for $r>0$. In Section \ref{sec:Active-iLESS}, in Strategy \ref{alg:Active-iLESS}, we define {{\textrm{Active-ILESS}}} using {{\rm{ILESS}}} implicitly as its engine (see State 4 in Strategy \ref{alg:Active-iLESS}). We can replace {{\rm{ILESS}}} with another pointwise-competitive selective algorithm, and thus construct a new active learner, that queries a label whenever the selective classifier abstains, and create a fake label according to the decision of the classifier whenever it decides to predict. Because the selective predictor is pointwise-competitive, we know that the underlying distribution induced by its fake labels is equivalent to a distribution defined by a deterministic labeling according to $f^*$ and the same $\mathcal{P_{X}}$. The algorithm will terminate using the exact same termination condition as {{\textrm{Active-ILESS}}} (when $\sigma_{Active}<\epsilon$), and thus the total sample complexity (labeled and unlabeled examples) will remain the same. The change will only be in the labeling criterion. Lemmas \ref{lemma:f_star_best}, \ref{lemma:f*in}, \ref{lemma:epsilon}, \ref{lemma:radius_active}, and \ref{lemma:max_examples_observed} can all be generalized to such an algorithm. Going in the other direction to create a selective classifier from a general active learner is more challenging. However, if the active learner follows the {{\textrm{Active-ILESS}}} paradigm, and in particular, uses a pointwise-competitive selective classifier to decide on label requests, then a new pointwise-competitive selective classifier can be created in the same way that {{\textrm{Batch-ILESS}}} was created, and then we can obtain a restatement of Theorem \ref{thm:activeToSelective} providing a reduction from an $R^*$ exponential speedup of the active algorithm, to a fast $R^*$ rejection rate of the selective classifier. Disagreement-based decision making in active and selective learning leads to ``defensive'' algorithms. For example, in the active learning case, this means that a defensive algorithm will ask for more labels than a more aggressive algorithm. In selective classification, this defensiveness provides the power to be pointwise-competitive, but will entail an increased rejection rate. It would be interesting to consider more aggressive algorithms that could, for example, take into consideration an estimation of $\mathcal{P_{X}}$ in order to ignore examples that cause disagreement only between functions that are very similar to each other (in terms of the probability mass of their difference). Such algorithms can be seen in \cite{Dasgupta05coarse, FreundEtAl97a,gonen2013efficient}, for the realizable and the low error scenarios. We believe that there is still work to be done for the agnostic scenario. Many aggressive algorithms could be devised under assumptions about knowledge of $\mathcal{P_{X}}$ (that could be acquired during the run of the algorithm, and is given in the transductive case), or in a Bayesian setting where a prior distribution on ${\cal F}$ exists. When researching this direction, one might also want to define a cost over unlabeled examples, and discuss the trade-off between labeled and unlabeled examples. The main open question inspired by our results would be to identify similar correspondence between aggressive selective classification algorithms and aggressive active learners. Another aspect of selective classification and active learning, which was not addressed in this paper, is differentiating between more and less noisy areas of the distribution. A noisy area could be defined as an area for which even the best classifier in the class could not achieve a low-error. This motivates a new type of labeling for selective prediction, where one can abstain for two reasons: (i) lack of knowledge in a specific region of ${\cal X}$, i.e., not enough examples were observed in that region, and the generalization bounds are not sufficiently tight. (ii) The region was well explored, but even the best classifier performs poorly, and thus the answer is unknown (the region is noisy). In our paper, an active learner will query for both scenarios; however, a more clever active learner might only query examples of the first type, as examples of the second type cannot reduce its error. \section*{Acknowledgments} This research was supported by The Israel Science Foundation (grant No. 1890/14) \appendix \section{} \begin{proof} [of Lemma \ref{lemma:f*in}] We prove the claim by induction over $t$ for which $G_{t}$ is different from $G_{t-1}$. The base case of the induction is clear. We now show that functions that are true risk minimizers of $\mathcal{P_{X,Y}}(G_{t-1})$ reside within $G_{t}$. According to Lemma~\ref{lemma:f_star_best}, $f^{*}$ is a true risk minimizer under $\mathcal{P_{X,Y}}(G_{t-1})$ (given the induction hypothesis), and hence will also be within $G_{t}$. We refer by $f^{*}$ to a true risk minimizer according to $\mathcal{P_{X,Y}}(G_{t-1})$. Using inequality (\ref{errorbounds2}) and the definition of $\bar{\sigma}_{\hat{R}-R}$, \begin{eqnarray} \hat{R}(f^{*},\hat{S}) &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})+\sigma_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*}),\hat{R}(f^{*},\hat{S})\right) \nonumber \\ &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})+\bar{\sigma}_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})\right), \nonumber \\ \label{t1} \end{eqnarray} and by inequality (\ref{errorbounds1}) and the definition of $\hat{f}$ we get, \begin{eqnarray} R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*}) &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(\hat{f}) \nonumber \\ &\leq& \hat{R}(\hat{f},\hat{S}) +\sigma_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,R_{\mathcal{P_{X,Y}}(G_{t-1})}(\hat{f}),\hat{R}(\hat{f},\hat{S})\right) \nonumber \\ &\leq&\hat{R}(\hat{f},\hat{S}) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right). \nonumber \\ \label{t2} \end{eqnarray} Plugging (\ref{t2}) into (\ref{t1}) we get, \begin{eqnarray} \hat{R}(f^{*},\hat{S}) &\leq& \hat{R}(\hat{f},\hat{S}) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right) +\bar{\sigma}_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S}) +\hat{\sigma}_{R-\hat{R}}(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S}))\right) \nonumber \\ &&\Rightarrow f^{*}\in G_{t}. \label{t3} \end{eqnarray} \end{proof} \begin{proof}[of Lemma \ref{lemma:epsilon}] Let $G_{t-1}$ be the final low-error set of ${\textrm{Active-ILESS}}$, and let $\hat{S}$ be the final set of examples. The following inequalities are derived from Lemma~\ref{lemma:errorbounds} and inequalities (\ref{t1}) and (\ref{t2}). \begin{eqnarray*} R_{\mathcal{P_{X,Y}}(G_{t-1})}(\hat{f}) &\leq& \hat{R}(\hat{f},\hat{S}) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right) \nonumber \\ &\leq&\hat{R}(f^{*},\hat{S}) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right) \nonumber \\ &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})+\bar{\sigma}_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})\right)+\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right)\nonumber \\ &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})+\bar{\sigma}_{\hat{R}-R}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S}) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right) \right) +\hat{\sigma}_{R-\hat{R}}\left(\frac{t}{2},\frac{\delta}{2t},d,\hat{R}(\hat{f},\hat{S})\right) \nonumber \\ &\leq& R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})+ \epsilon. \nonumber \\ \end{eqnarray*} By Lemma~\ref{lemma:f*in} we know that $f^{*}$ resides within $G_{t-1}$, which implies that any change in $\mathcal{P_{X,Y}}(G_{t-1})$ in comparison to $\mathcal{P_{X,Y}}$ reduces the true error of $f^{*}$. This also means that for every $f \in \mathcal{F}$, \begin{displaymath} R_{\mathcal{P_{X,Y}}}(f) - R_{\mathcal{P_{X,Y}}(G_{t-1})}(f) \leq R_{\mathcal{P_{X,Y}}}(f^{*}) - R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*}), \end{displaymath} which results in \begin{displaymath} R_{\mathcal{P_{X,Y}}}(\hat{f}) \leq R_{\mathcal{P_{X,Y}}}(f^{*}) + \epsilon. \end{displaymath} \end{proof} \begin{proof}[of Lemma \ref{lemma:radius_active}] The proof is similar to the proof of Lemma \ref{lemma:radius}. We consider the last modification of $G_t$ as a run of {{\rm{ILESS}}}, under $\mathcal{P_{X,Y}}(G_{t-1})$, with $m_0 \triangleq 2^{\lfloor log_2 m \rfloor -1}$ examples and delta equal to $\frac{\delta}{4m_0}$. Under event $\cal K$, the conditions of Lemma~\ref{lemma:radius} hold, and by Lemma \ref{lemma:f*in}, $R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*}) \leq R(f^*)$. We simply apply Lemma~\ref{lemma:radius} with these parameters to get $A'$ ($A$ in Lemma~\ref{lemma:radius}). $$ A' = 4d \ln( \frac{16m_0 e}{d\delta/4m_0} )= 4d \ln( \frac{64m_0^2 e}{d\delta } ). $$ The fact that $m/4 \leq m_0 \leq m/2$ completes the proof. \end{proof} \begin{proof}[of Lemma \ref{lemma:max_examples_observed}] We know by Lemma \ref{lemma:radius_active} that there exist constants $C_1$,$C_2$ that depend only on $\ln(\frac{1}{\delta})$ and $d$, and are independent of $m$, s.t. $$ \sigma_{Active} \leq C_1\frac{\ln m}{m} + C_2\sqrt{\frac{\ln m}{m}\cdot R(f^*)}. $$ We also know by the definition of {{\textrm{Active-ILESS}}} (Strategy \ref{alg:Active-iLESS}), that it terminates when $\sigma_{Active}$ is smaller than the given $\epsilon$. We will find $m$ large enough s.t. \begin{eqnarray} && C_1\frac{\ln m}{m} \leq \epsilon/2, \label{bb80}\\ && C_2\sqrt{\frac{\ln m}{m}\cdot R(f^*)} \leq \epsilon/2 \label{bb81}. \end{eqnarray} We assume that $\epsilon \leq 1/e$, as it is easy to find a proper $m$ for $\epsilon > 1/e$. Starting with Equation (\ref{bb80}), we want to show that $m=O(\frac{1}{\epsilon}\ln(\frac{1}{\epsilon}))$ satisfies it. Thus, we find $k_1$ s.t. \begin{eqnarray*} && C_1\frac{\ln (k_1\frac{1}{\epsilon}\ln(\frac{1}{\epsilon}))}{k_1\frac{1}{\epsilon}\ln(\frac{1}{\epsilon})} \leq \frac{\epsilon}{2} \label{bb82}\\ &\mathcal{L}eftrightarrow& \frac{\ln(k_1\frac{1}{\epsilon}\cdot \ln( \frac{1}{\epsilon}))}{\ln(\frac{1}{\epsilon})} \leq \frac{k_1}{2C_1}. \end{eqnarray*} Bounding the left-hand side of the equation for $\epsilon \leq 1/e$ gives us, \begin{eqnarray*} \frac{\ln(k_1\frac{1}{\epsilon}\cdot \ln( \frac{1}{\epsilon}))}{\ln(\frac{1}{\epsilon})} &\leq& \frac{\ln(k_1\frac{1}{\epsilon}\cdot \frac{1}{\epsilon})}{\ln(\frac{1}{\epsilon})} \\ &\leq& 2+\ln k_1. \end{eqnarray*} We need to find $k_1$ that will satisfy \begin{eqnarray*} 2+\ln k_1 \leq \frac{k_1}{2C_1}. \end{eqnarray*} $k_1=16C_1^2$ will work for $C_1 \geq 1$; otherwise, we take $k_1=10$. We use the same procedure to show that $m=O(\frac{R(f^*)}{\epsilon^2}\ln(\frac{R(f^*)}{\epsilon^2} ))$ satisfies Equation (\ref{bb81}). We rewrite the equation in the following way: \begin{eqnarray*} && \frac{\ln m}{m} \leq \frac{\epsilon^2}{4C_2^2 R(f^*)} \triangleq \epsilon_0. \end{eqnarray*} We assume that $\epsilon_0 \leq 1/e$ ($m=4$ holds otherwise) and find $k_2$ s.t. \begin{eqnarray*} && \frac{\ln (k_2\frac{1}{\epsilon_0}\ln(\frac{1}{\epsilon_0}))}{k_2\frac{1}{\epsilon_0}\ln(\frac{1}{\epsilon_0})} \leq \epsilon_0. \end{eqnarray*} As before, we reduce the problem to finding $k_2$ that satisfies \begin{eqnarray*} 2+\ln(k_2) \leq k_2. \end{eqnarray*} $k_2 = 4$ suffices. We thus get that $m=O(\frac{1}{\epsilon_0^2}\ln(\frac{1}{\epsilon_0^2} ))=O(\frac{R(f^*)}{\epsilon^2}\ln(\frac{R(f^*)}{\epsilon^2} ))$ satisfies Equation (\ref{bb81}). This implies that there exists a function $m(1/\epsilon, R(f^*)) = O \left(\frac{1}{\epsilon}\ln(\frac{1}{\epsilon})+ \frac{R(f^*)}{\epsilon^2}\ln(\frac{R(f^*)}{\epsilon^2} \right) $ that bounds the total number of labels processed by {{\textrm{Active-ILESS}}}. \end{proof} \begin{lemma} \label{ChernoffBound} \cite{JohnCannyNotes} Let $X_1,X_2,...,X_n$ be independent Bernoulli trials with $Pr[X_i=1]=p$, let $X\triangleq\sum_{i=1}^{n} X_i$, and let $\mu = \mathbb{E} X$. Then, for every $\alpha \geq 0$: $$\mathbb{P}r (X<(1-\alpha)\mu) \leq \exp(-\mu \alpha^2 /2).$$ \end{lemma} \begin{proof} This proof is taken from the work of John Canny \cite{JohnCannyNotes}. For $t>0$, we have \begin{eqnarray} \mathbb{P}r (X<(1-\alpha)\mu) = \mathbb{P}r (\exp(-tX) > \exp(-t(1-\alpha)\mu)). \label{ap1} \end{eqnarray} We use Markov's inequality. For a nonnegative random variable $X$, and $a>0$, \begin{eqnarray*} \mathbb{P}r (X \leq a) \leq \frac{\mathbb{E}(X)}{a}. \end{eqnarray*} We apply the inequality for the right-hand side of Equation (\ref{ap1}), to get \begin{eqnarray} \mathbb{P}r (X<(1-\alpha)\mu) \leq \frac{\mathbb{E}(\exp(-tX))}{\exp(-t(1-\alpha)\mu)}. \label{ap2} \end{eqnarray} $X_1,X_2,...,X_n$ are independent and thus $$\mathbb{E}(\exp(-tX)) = \prod_{i=1}^{n} \mathbb{E}(\exp(-tX_i)).$$ For each $X_i$ $$\mathbb{E}(\exp(-tX_i)) = pe^{-t} + (1-p) = 1 - p(1 - e^{-t}).$$ We use the fact that $1-x < \exp(-x)$ for all $x$, with $x=p(1 - e^{-t})$, to get $$\mathbb{E}(\exp(-tX_i)) \leq \exp(-p(1 - e^{-t})), $$ and conclude that \begin{eqnarray} \label{bb60} \mathbb{E}(\exp(-tX)) = \prod_{i=1}^{n} \mathbb{E}\left(\exp(-tX_i)\right) \leq \prod_{i=1}^{n} \exp\left(-p(1 - e^{-t})\right) \nonumber\\ =\exp\left(\sum_{i=1}^{n} p(e^{-t}-1)\right) = \exp\left(\mu (e^{-t}-1)\right). \end{eqnarray} Going back to Equation (\ref{ap2}), we have, \begin{eqnarray} \mathbb{P}r (X<(1-\alpha)\mu) \leq \frac{\exp \left( \mu (e^{-t}-1)\right)}{\exp\left(-t(1-\alpha)\mu\right)} = \exp\left(\mu (e^{-t}-1+t-t\alpha ) \right). \label{ap3} \end{eqnarray} We choose $t>0$ to make the right-hand side of the equation as small as possible. After derivation, we get that the best $t$ is $t=\ln(\frac{1}{1-\alpha})$, and plugging it into Equation (\ref{ap3}) gives us, \begin{eqnarray} \mathbb{P}r (X<(1-\alpha)\mu) &\leq& \exp\left(\mu (1-\alpha-1+\ln(\frac{1}{1-\alpha})-\ln(\frac{1}{1-\alpha})\alpha )\right) \nonumber \\ &=& \exp \left( \mu (-\alpha+\ln(\frac{1}{1-\alpha})(1-\alpha) ) \right) \nonumber \\ &=& \left(\frac{e^{-\alpha}}{(1-\alpha)^{1-\alpha}}\right)^\mu. \label{ap4} \end{eqnarray} We now simplify this bound to get the desired result. We know that $(1-\alpha)^{1-\alpha} = e^{(1-\alpha)ln(1-\alpha)}$, and by Taylor expansion $$ ln(1-\alpha) = -\alpha - \frac{\alpha^2}{2} - \frac{\alpha^3}{3}... , $$ which multiplied by $(1-\alpha)$, gives us \begin{equation} (1-\alpha)ln(1-\alpha) = -\alpha + \frac{\alpha^2}{2} + \text{positive terms} > -\alpha + \frac{\alpha^2}{2}. \label{cccc} \end{equation} Plugging (\ref{cccc}) into Equation (\ref{ap4}), we finally get, \begin{eqnarray} \mathbb{P}r (X<(1-\alpha)\mu) &\leq& \left(\frac{e^{-\alpha}}{(1-\alpha)^{1-\alpha}}\right)^\mu \nonumber \\ &=& \left(\frac{e^{-\alpha}}{e^{(1-\alpha)ln(1-\alpha)}}\right)^\mu \nonumber \\ &\leq& \left(\frac{e^{-\alpha}}{e^{-\alpha + \frac{\alpha^2}{2}}}\right)^\mu \nonumber \\ &=& e^{-\mu\alpha^{2}/2} \end{eqnarray} \end{proof} \begin{lemma} \label{ChernoffBound2} \cite{JohnCannyNotes} Let $X_1,X_2,...,X_n$ be independent Poisson trials with $Pr[X_i=1]=p$, let $X\triangleq\sum_{i=1}^{n} X_i$, and let $\mu = \mathbb{E} X$. Then, for every $\alpha\geq 2e-1$: $$ \mathbb{P}r (X>(1+\alpha)\mu) \leq 2^{-\mu \alpha}. $$\\ \end{lemma} \begin{proof sketch} This sketch is taken from the work of John Canny \cite{JohnCannyNotes}. It is almost identical to the proof of Lemma \ref{ChernoffBound}. We start by showing that \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &\leq& \left(\frac{e^{\alpha}}{(1+\alpha)^{1+\alpha}}\right)^\mu. \end{eqnarray*} For every $t>0$, \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &=& \mathbb{P}r[\exp(tX)>\exp \left( t(1+\alpha)\mu \right) ]. \end{eqnarray*} As we did in Lemma \ref{ChernoffBound}, we compute the Markov bound, \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &\leq& \frac{\mathbb{E}(\exp(tX))}{\exp(t(1+\alpha)\mu)}, \end{eqnarray*} and use the fact that $X_i$ are independent, just like in (\ref{bb60}), to get that $$\mathbb{E}(\exp(tX)) \leq \exp \left( \mu (e^{t}-1) \right) .$$ Thus we get that \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &\leq& \frac{\exp(\mu (e^{t}-1))}{\exp(t(1+\alpha)\mu)}=\exp\left(\mu(e^t-1-t-\alpha t)\right). \end{eqnarray*} From deviation, we choose $t=\ln(1+\alpha)$ to get \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &\leq& \left(\frac{e^{\alpha}}{(1+\alpha)^{1+\alpha}}\right)^\mu. \end{eqnarray*} For $\alpha \geq 2e-1$: \begin{eqnarray*} \mathbb{P}r (X>(1+\alpha)\mu) &\leq& \left(\frac{e^{\alpha}}{(1+\alpha)^{1+\alpha}}\right)^\mu \leq \left(\frac{e^{\alpha}}{(2e)^{1+\alpha}}\right)^\mu \leq \left(\frac{e^{\alpha}}{(2e)^{\alpha}}\right)^\mu = 2^{-\mu\alpha}. \end{eqnarray*} \end{proof sketch} \begin{lemma} \label{lemma:radius_batch} Given that event $\cal K$ (see Definition \ref{K}) occurred, the radius of ${\textrm{Batch-ILESS}}$, as defined in Strategy \ref{alg:Active-iLESS}, stage 4, satisfies \begin{eqnarray} \sigma_{Active}=O\left(\frac{B}{m}+ \sqrt{\frac{B}{m} \cdot R(f^{*})} \right), \end{eqnarray} where $B \triangleq 4d \ln( \frac{8m^2e}{d\delta} ).$ \end{lemma} \begin{proof} {{\textrm{Batch-ILESS}}} simulates a run of {{\textrm{Active-ILESS}}}. Consider a run of {\textrm{Active-ILESS}} with $m_0$ examples and $\delta = \delta_0$. The last iteration in which $G_t$ has changed (relative to $G_{t-1}$) was iteration $2^{\lfloor \log_2 m_0 \rfloor} \triangleq T$. $G_T$ is calculated in exactly the same way as {{\rm{ILESS}}} calculates its $G$ under probability distribution $\mathcal{P_{X,Y}}(G_{T-1})$, when it is provided with $T/2$ examples, and $\frac{\delta_0}{2T}$ as its delta. Assuming that event $\cal K$ occurred, we deduce that event $\cal E$ (see Definition \ref{E}) occurred as well. Therefore, Lemma~\ref{lemma:radius} holds for the last iteration of {{\textrm{Batch-ILESS}}}. {{\rm{ILESS}}} operates in this run on $T/2$ labeled examples, and it holds that $m_0/4 \leq T/2 \leq m_0/2$. The delta it uses in this run is $\frac{\delta_0}{2T} > \frac{\delta_0}{m_0}$, so by Lemma \ref{lemma:radius}, we have \begin{equation*} \sigma_{Active} \leq 6\frac{B}{m_0/4} + 3\sqrt{\frac{B}{m_0/4} \cdot R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})} = 24\frac{B}{m_0} + 6\sqrt{\frac{B}{m_0} \cdot R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})}. \end{equation*} To finish the proof, we need to show that $R(f^*) \geq R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})$. From Lemma \ref{lemma:f*in}, we know that when $\cal K$ occurs, any $f^{*}$ of the original distribution $\mathcal{P_{X,Y}}$ resides within $G_{t}$ for all $t$. Thus, the true error of $f^*$ can only decrease under the revised distribution $G_{t-1}(f^{*})$. \end{proof} \begin{theorem} \label{thm:LessRejection_batch} Let $\mathcal{F}$ be a hypothesis class with VC-dimension $d$, and let $\mathcal{P_{X,Y}}$ be an unknown probability distribution. Assume that event $\cal K$ (see Definition \ref{K}) occurred. Then, for all $f^*$, the abstain rate is bounded by $$ 1-\mathbb{P}hi({\textrm{Batch-ILESS}}) \leq \theta_{f^*}(R_{0}) \cdot R_{0}, $$ where $$ R_{0} \triangleq 2\cdot R(f^{*}) + 44 \cdot \frac{B}{m} + 12 \cdot \sqrt{\frac{B}{m} \cdot R(f^{*})}. $$ where $B \triangleq 4d \ln( \frac{8m^2e}{d\delta} ).$ This immediately implies (by definition) that $$ 1-\mathbb{P}hi({\textrm{Batch-ILESS}}) \leq \theta(R_{0}) \cdot R_{0}. $$ \end{theorem} \begin{proof sketch} The proof is very similar to the proof of Lemma \ref{lemma:radius_batch}. We observe the last modification of $G_T$, and notice that the change was made according to a run of {{\rm{ILESS}}}, on the implied probability distribution $\mathcal{P_{X,Y}}(G_{T-1})$. Then we simply activate Theorem \ref{thm:LessRejection} with the relevant parameters plugged into it. Note that by Lemma \ref{lemma:f*in}, all $f^*$ of the original distribution reside within $G_t$ for all $t$, and thus, by Lemma \ref{lemma:f_star_best}, they are all true risk minimizers of $\mathcal{P_{X,Y}}(G_{T-1})$. This also implies that $R(f^*) \geq R_{\mathcal{P_{X,Y}}(G_{t-1})}(f^{*})$ and thus can be used to bound Equation (\ref{eq30}) of the original theorem that was proven for {\rm LESS}. $\theta_{f}$ is independent of $\mathcal{P_{Y|X}}$ for all $f$, and thus the change of the labels does not affect it. \end{proof sketch} \appendix \end{document}
\begin{document} \title[ Families of embeddings and of group actions ]{Holomorphic families of non-equivalent embeddings and of holomorphic group actions on affine space} \operatorname{Aut}hor{FRANK KUTZSCHEBAUCH} \address{Institute of Mathematics, University of Bern \\ Sidlerstrasse 5, CH-3012 Bern, Switzerland} \email{[email protected]} \operatorname{Aut}hor{SAM LODIN} \address{Dept. of Natural Sciences, Engineering and Mathematics, Mid Sweden University \\ SE-851 70 Sundsvall, Sweden} \email{[email protected]} \thanks{The first author supported by Schweizerische Nationalfonds grant 200021-116165/1} \begin{abstract} We construct holomorphic families of proper holomorphic embeddings of $\mathbb{C}^k$ into $\mathbb{C}^n$ ($0<k<n-1$), so that for any two different parameters in the family no holomorphic automorphism of $\mathbb{C}^n$ can map the image of the corresponding two embeddings onto each other. As an application to the study of the group of holomorphic automorphisms of $\mathbb{C}^n$ we derive the existence of families of holomorphic $\mathbb{C}^*$-actions on $\mathbb{C}^n$ ($n\ge 5$) so that different actions in the family are not conjugate. This result is surprising in view of the long standing Holomorphic Linearization Problem, which in particular asked whether there would be more than one conjugacy class of $\mathbb{C}^*$ actions on $\mathbb{C}^n$ (with prescribed linear part at a fixed point). \end{abstract} \keywords{Complex Analysis; Proper Holomorphic Embeddings; Equivalent Embeddings; Eisenman Hyperbolicity; Complex Euclidean Spaces, Holomorphic Automorphisms, Group Actions, Oka-principle, Andersen-Lempert-theory} \subjclass[2000]{Primary 32M05, 32H02; Secondary 32Q28, 32Q40, 32Q45.} \maketitle \section{Introduction and statement of the main results.} It is a famous theorem of Remmert that any Stein manifold of dimension $n$ admits a proper holomorphic embedding into affine $N$-space $\mathbb{C}^N$ of sufficiently high dimension $N$~C^{\infty}te{R}. Concerning this dimension, Eliashberg, Gromov~C^{\infty}te{EG} and Sch\"urmann~C^{\infty}te{S} proved that any Stein manifold of dimension $n>1$ can be embedded into $\mathbb{C}^{[3n/2]+1}$. A key ingredient in these results is the homotopy principle for holomorphic sections of elliptic submersions over Stein manifolds C^{\infty}te{Gro}, C^{\infty}te{FP2}, C^{\infty}te{FF2}. These dimensions are the smallest possible due to an example of Forster~C^{\infty}te{Fs1}. The optimal dimension for embeddings of Stein spaces can be found in Sch\"urmann's paper~C^{\infty}te{S}. In this paper we do not investigate the question whether a given Stein space can be embedded into $\mathbb{C}^N$ for a given dimension $N$, but rather we investigate in how many ways this can be done in situations where at least one embedding exists. More precisely, we study the number of equivalence classes of proper holomorphic embeddings $\mathbb{P}hi\colon X\hookrightarrow\mathbb{C}^n$ with respect to the following equivalence relation: \begin{definition}\label{def-eq-emb} Two embeddings $\mathbb{P}hi,\mathbb{P}si\colon X\hookrightarrow\mathbb{C}^n$ are {\it equivalent} if there exist automorphisms $\varphi\in\operatorname{Aut}(\mathbb{C}^n)$ and $\primesi\in\operatorname{Aut}(X)$ such that $\varphiC^{\infty}rc\mathbb{P}hi=\mathbb{P}siC^{\infty}rc\primesi$. \end{definition} In the algebraic case the question about the number of classes of equivalent embeddings $\mathbb{C}^k\hookrightarrow\mathbb{C}^n$ is well known and has been studied for a long time. The most famous result, due to Abhyankar and Moh~C^{\infty}te{AM}, states that every polynomial embedding of $\mathbb{C}$ into $\mathbb{C}^2$ is equivalent to the standard embedding. The same is in general true for high codimension, Kaliman~C^{\infty}te{Ka} proved that if $X$ is an affine algebraic variety and $n\geq\max\set{1+2\dim X,\dim TX}$ then all polynomial embeddings of $X$ into $\mathbb{C}^n$ are equivalent (by means of algebraic automorphisms). In the same paper Kaliman also proved that any polynomial embedding of $\mathbb{C}$ into $\mathbb{C}^3$ is holomorphically equivalent to the standard embedding. It is still an open question if this holds algebraically. In the holomorphic case the situation is different. Rosay and Rudin C^{\infty}te{RR2} were the first to construct non-standard embeddings of $\mathbb{C}$ into $\mathbb{C}^n$, $n>2$, thus showing that the number of equivalence classes is at least two. Forstneri\v c, Globevnik and Rosay~C^{\infty}te{FGR} showed that the result of Rosay and Rudin also holds for $n=2$. More generally, Forstneri\v c~C^{\infty}te{F} showed that the number of equivalence classes of embeddings $\mathbb{C}^k$ into $\mathbb{C}^n$ is at least two for any $0<k<n$. Later, Derksen and the first author~C^{\infty}te{DK} proved that there are uncountably many non-equivalent embeddings of $\mathbb{C}$ into $\mathbb{C}^n$ for $n>1$. Their result heavily uses the fact that the holomorphic automorphism group of $\mathbb{C}$ is a Lie group, i.e., it is very small in comparison to the automorphism group of complex Euclidean spaces in dimensions greater than $1$. Combining the ideas of that paper with the cancellation property for Eisenman hyperbolic spaces Borell and the first author then proved (see C^{\infty}te{BK}) that the number of equivalence classes of proper holomorphic embeddings of $\mathbb{C}^k$ into $\mathbb{C}^n$ is uncountable for any $0<k<n$. The last two above mentioned results are proven by using the Cantor diagonal process and it remained still an unsolved challenging problem whether non-equivalent embeddings could occur in continuous or even holomorphic families. Our first main result gives an affirmative answer. \begin{theorem}\label{main} Let $X$ be a complex space, which can be embedded into $\mathbb{C}^n$ and such that the group of holomorphic automorphisms $\operatorname{Aut}_{\rm{hol}}(X)$ is a Lie group. Then there exist, for $k=n-1-\dim X$, a family of holomorphic embeddings of $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$, such that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ the embeddings $\varphi_{w_1},\varphi_{w_2}:X \hookrightarrow \mathbb{C}^n$ are non-equivalent. \end{theorem} \begin{notation} Observe that for $k=0$, $\dim X=n-1$ the conclusion of the theorem is empty. In this situation it is still known that there are uncountably many equivalence classes of embeddings by the above mentioned results from C^{\infty}te{BK}. \end{notation} We would like to emphasize that there is another (weaker) definition of equivalence --- called $\operatorname{Aut}(\mathbb{C}^n)$-equivalence --- which is used by several authors, e.g., Buzzard, Forstneri\v c, Globevnik and Varolin. In these papers uncountability of certain equivalence classes of embeddings in this weaker sense is proved. Our main result is much stronger than these results. In our definition, two embeddings $\mathbb{P}hi,\mathbb{P}si\colon X\hookrightarrow\mathbb{C}^n$ are equivalent if their {\bf images} coincide modulo $\operatorname{Aut}(\mathbb{C}^n)$, i.e., if there is an automorphism $\varphi\in\operatorname{Aut}(\mathbb{C}^n)$ such that the images of $\varphiC^{\infty}rc\mathbb{P}hi$ and $\mathbb{P}si$ coincide. In such a situation, the map $\mathbb{P}si^{-1}C^{\infty}rc\varphiC^{\infty}rc\mathbb{P}hi$ is well defined and it is an automorphism of $X$. The weaker notion mentioned above demand that $\varphiC^{\infty}rc\mathbb{P}hi$ and $\mathbb{P}si$ are equal as maps, i.e., it demands that $\mathbb{P}si^{-1}C^{\infty}rc\varphiC^{\infty}rc\mathbb{P}hi$ is the identity on $X$. Our application to group actions would not work for the weaker definition. Using the cancellation property for Eisenman hyperbolic spaces we can cross our situation with some affine space and we are able to conclude \begin{theorem} \label{main1}(see Corollary \ref{main corollary}) There exist, for $k=n-l-1$, a family of holomorphic embeddings of $\mathbb{C}^l$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$, such that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ the embeddings $\primesi_{w_1},\primesi_{w_2}:\mathbb{C}^l \hookrightarrow \mathbb{C}^{n}$ are non-equivalent. \end{theorem} We also give an application of Theorem \ref{main} to actions of compact (or equivalently complex reductive, see C^{\infty}te{Ku}) groups on $\mathbb{C}^n$. It was a long standing problem whether all holomorphic actions of such groups on affine space are linear after a change of variables (see for example the overview article C^{\infty}te{Hu}). The first counterexamples to that (Holomorphic Linearization) problem were constructed by Derksen and the first author in C^{\infty}te{DK1}. In the present paper we show that the method from there is holomorphic in a parameter and therefore applied to our parametrized situation leads to \begin{theorem} \label{action1} For any $n\ge 5$ there is a holomorphic family of $\mathbb{C}^*$-actions on $\mathbb{C}^n$ parametrized by $\mathbb{C}^{n-4}$ $$\mathbb{C}^{n-4} \times \mathbb{C}^* \times \mathbb{C}^n \to \mathbb{C}^n \quad(w, \theta, z) \mapsto \theta_w (z)$$ so that for different parameters $w_1\neq w_2\in \mathbb{C}^{n-4}$ there is no equivariant isomorphism between the actions $\theta_{w_1}$ and $\theta_{w_2}$. \end{theorem} The linearization problem for holomorphic $\mathbb{C}^*$-actions on $\mathbb{C}^n$ is thus solved to the positive for $n=2$ by Suzuki C^{\infty}te{Su} and still open for $n=3$. For $n=4$ there are uncountably many actions (non-linearizable ones among them) C^{\infty}te{DK} and for $n\ge 5$ our result implies that there are families. Moreover there are families including a linear action as a single member of the family as our last main result shows \begin{theorem} \label{action2} For any $n\ge 5$ there is a holomorphic family of $\mathbb{C}^*$-actions on $\mathbb{C}^n$ parametrized by $\mathbb{C}$ $$\mathbb{C} \times \mathbb{C}^* \times \mathbb{C}^n \to \mathbb{C}^n\quad (w, \theta, z) \mapsto \theta_w (z)$$ so that for different parameters $w_1\neq w_2\in \mathbb{C}$ there is no equivariant isomorphism between the actions $\theta_{w_1}$ and $\theta_{w_2}$. Moreover the action $\theta_0$ is linear. \end{theorem} The paper is organized as follows. In section \ref{technik} we give all technical preparations for our (quite complicated) construction. The proofs will be given in the appendix. Section \ref{hauptsatz} contains the proof of Theorem \ref{main}. The next section \ref{Eisenman} contains an addition to Theorem \ref{main} which allows to deduce Theorem \ref{main1}. Section \ref{wirkung} contains the application to group actions in particular the proofs of Theorems \ref{action1} and \ref{action2}. Some concluding remarks are contained in section \ref{concluding}. The results of the present paper have been partially announced in C^{\infty}te{K1}. At that time the technical details had been extremely complicated and lengthy. Over the last years they have become much shorter and much more elegant so that the authors finally decided to publish the present complete version. Part of the work was done during a stay of the first author at the Mittag-Leffler-Institute during the special program in Complex Analysis of Several Variables 2008. We would like to thank the Institute for hospitality and excellent working conditions. \section{Technical preparations}\label{technik} In this section we state the main lemmas needed to prove the main theorem. The proofs of theese lemmas can be found in section \ref{prooflemmas}. For the benefit of the reader we give a list of notations mostly adhered to in this paper. By an automorphism of $\mathbb{C}^n$ depending on a parameter $w\in \mathbb{C}^k$ we mean an element of $\mathrm{Aut}^{k}_{hol}(\mathbb{C}^{n}):= \{ \primesi \in \mathrm{Aut}_{hol}(\mathbb{C}^{k+n})\quad : \quad \primesi (w, z)= (w, \primesi_1 (w,z))\} $, and approximations are understood to be uniform on compacts. We will throughout the paper call a holomorphic map $\eta : \mathbb{C}^k \to \mathbb{C}^n$ a {\em parametrized point} $\eta (w)$ in $\mathbb{C}^n$. \begin{itemize} \item $\mathbb{B}_i$ is the open unit ball of $\mathbb{C}^i$. The closed ball is denoted $\overline \mathbb{B}_i$. \item $X$ a complex space of dimension $\dim X$. \item $\varphi_0=\iota :X\hookrightarrow \mathbb{C}^n$, where $\iota$ is the inclusion map. \item $\primehi_0:\mathbb{C}^k \times X \to \mathbb{C}^k\times \mathbb{C}^n$ is given by $(w,x)\mapsto (w,\varphi_0(x))$ for $w\in \mathbb{C}^k$. \item $\alpha_n\in \rm{Aut}^k_{hol}(\mathbb{C}^n)$. \item $A_n=\alpha_nC^{\infty}rc \alpha_{n-1}C^{\infty}rc \ldots C^{\infty}rc \alpha_1$. \item $\primehi_n=A_nC^{\infty}rc \primehi_0=\alpha_nC^{\infty}rc \alpha_{n-1}C^{\infty}rc \ldots C^{\infty}rc \alpha_1C^{\infty}rc \primehi_0:\mathbb{C}^k \times X \to \mathbb{C}^k\times \mathbb{C}^n$. \item $\primei_2$ the projection of $\mathbb{C}^k\times \mathbb{C}^n$ onto $\mathbb{C}^n$. \item $\primehi =\lim_{n\to \infty} A_nC^{\infty}rc \primehi_0(w,x)$. \item $\varphi_n=\primei_2(A_nC^{\infty}rc \primehi_0)=\primei_2(\primehi_n)$. \item $\varphi =\primei_2(AC^{\infty}rc \primehi_0)$ (or $\varphi_w$ if $w$ is a fixed parameter value). \item $P_2:\mathbb{C}^k\times X\to X$ is given by $P_2(w,x)=x$. \item $\xi_i(w)\in \mathbb{C}^n$ interpolation points (osculation points) which vary with respect to $w\in \mathbb{C}^k$. \item $\eta_i$ the points of $X$ corresponding to the points $\xi_i(w)$ (preimage points of $\xi_i(w)$). \item $\mu$ induction variable. For every $\mu$ we define $\epsilon_{\mu},R_{\mu}>0$ and finite subsets $\cup_{j=1}^{k(\mu )}\{a^{\mu}_j\}$ of $\primeartial (\mu +1)\mathbb{B}_n$ and $\cup_{j=1}^{k(\mu )}\{x^{\mu}_j\}$ of $X$ respectively. \end{itemize} \subsection{Growth restrictions for holomorphic maps} In the construction of our families of non-equivalent embeddings we will use techniques of growth restrictions on entire maps from $\mathbb{C}^n$ to $\mathbb{C}^n$. These growth restrictions are governed by the following lemma, Lem\-ma 4.3 in C^{\infty}te{RR1} which we present with a simple additional conclusion, namely that one can avoid the nowhere dense set $Q$. The additional conclusion is obvious from the proof. \begin{lemma}\label{lemma4.3copy} Given real numbers $0<a_1<a_2$, $0<r_1<r_2$ and $c>0$, let $\Gamma$ be the class of holomorphic mappings $$f=(f_1,\ldots ,f_k):a_2\mathbb{B}_n\to r_2\mathbb{B}_k$$ such that $$|f(0)|\leq \frac 12r_1$$ and $$\| \frac{\primeartial (f_1,\ldots ,f_k)}{\primeartial (z_1,\ldots ,z_k)}\| \geq c$$ at some point of $a_1\bar \mathbb{B}_n$. Let $Q\subset \primeartial (r_1\mathbb{B}_k )$ be a set such that $\primeartial (r_1\mathbb{B}_k ) \setminus Q$ is dense in $\primeartial (r_1\mathbb{B}_k).$ Then there is a finite set $E=E(a_1,a_2,r_1,r_2,c)\subset \primeartial (r_1\mathbb{B}_k )\setminus Q$ with the property that, if $f\in \Gamma$ and $f(a_1\mathbb{B}_n)$ intersects $\primeartial (r_1\mathbb{B}_k )$ then $f(a_2\mathbb{B}_n)$ intersects $E$. \end{lemma} The following technical detail is well known, we include for completeness, it is Lemma 5.4. in C^{\infty}te{F}. It will be used frequently in the proof of Theorem \ref{main}. \begin{lemma}\label{FGRlemma2copy} Let $K$ be a polynomially convex set in $\mathbb{C}^n$ and let $X$ be a closed analytic subvariety of $\mathbb{C}^n$. Moreover let $X_0$ be a compact holomorphically convex subset of $X$, such that $K \cap X_0$ is contained in the (relative) interior of $X_0$. Then the set $K\cup X_0$ is polynomially convex. \end{lemma} We need to construct parametrized points with a certain property. \begin{proposition}\label{pointprop} Given natural numbers $k>0$ and $n\geq 2$ there is a number $m\in \mathbb{N}$ such there are $m$, pairwise different, parametrized points $\xi_1,\xi_2, \ldots ,\xi_m:\mathbb{C}^k \to \mathbb{C}^n$ parametrized by a parameter $w\in \mathbb{C}^k$ with the following property: For $w_1\neq w_2$ there is no affine automorphism $\alpha \in \mathrm{Aff}(\mathbb{C}^n)$ which maps the set of points $\{ \xi_1(w_1),\xi_2(w_1),\ldots ,\xi_m(w_1)\}$ onto the set of points $\{ \xi_1(w_2),\xi_2(w_2),\ldots ,\xi_m(w_2)\}$. \end{proposition} It is not difficult to see that the holomorphic (even the algebraic) automorphism group of $\mathbb{C}^n$ ($n\geq 2$) acts transitively on finite subsets of $\mathbb{C}^n$ with fixed cardinality, i.e. one can move $k$ distinct points $z_1,z_2,\ldots ,z_k\in \mathbb{C}^n$ by an automorphism into some standard position, for example to the points $(1,0,\ldots , 0)$,$(2,0,\ldots , 0)$,$\ldots$,$(k,0,\ldots , 0)$, (usually if the dimension is clear we will write $(k,0)$). For this apply a generic linear change of coordinates so that afterwards all coordinates of the points $ z_1,z_2,\ldots ,z_k$ become different and then apply appropriate shears. The question whether the holomorphic automorphism group of $\mathbb{C}^n$ acts transitively on countable discrete subsets of $\mathbb{C}^n$ was answered to the negative by Rosay and Rudin in C^{\infty}te{RR1}. They called the countable discrete subsets in the $\mathrm{Aut}_{hol}(\mathbb{C}^n )$-orbit the "standard" countable discrete subset $e_1\mathbb{N} =\{ (1,0),(2,0),\ldots ,(k,0),\ldots \}$ tame sets and proved the existence of non tame sets. For our construction of families of embeddings we have to move finitely many points holomorphically depending on a parameter to some prescribed position by an automorphism which also depends holomorphically on the parameter. \begin{definition} Let $N$ be a natural number and $\zeta_1,\zeta_2,\ldots ,\zeta_N:\mathbb{C}^k\to \mathbb{C}^n$ be holomorphic maps such that for each fixed parameter $w\in \mathbb{C}^k$ the $N$ points $\zeta_1(w),\zeta_2(w),\ldots ,\zeta_N(w)$ in $\mathbb{C}^n$ are different. We call them \textbf{simultaneously standardizable}, if there exists an automorphism $\primesi \in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ holomorphically depending on the parameter $w$ with $$\primesi (w,\zeta_i(w))=(w,(i,0)) \quad \textrm{for all } i=1,2,\ldots ,N \quad \textrm{and for all } w\in \mathbb{C}^k .$$ \end{definition} At the moment we are not able to decide under which condition on the parameter space any collection of parametrized points is simultaneously standardizable. We have the following partial result which is sufficient for our purposes. \begin{proposition}\label{standp} Let $\zeta_1,\zeta_2,\ldots ,\zeta_N:\mathbb{C}^k\to \mathbb{C}^n$, $n\geq 2$, be holomorphic maps such that the points $\zeta_1(w),\zeta_2(w),\ldots ,\zeta_N(w)$ in $\mathbb{C}^n$ are different for each $w\in \mathbb{C}^k$ and suppose $k<n-1$. Then the parametrized points $\zeta_1,\zeta_2,\ldots ,\zeta_N$ are simultaneously standardizable. \end{proposition} \subsection{Interpolation lemma} The following lemma is a key ingredient in the proof of our main theorem. It can be used to prove interpolation results for parametrized embeddings as this is well-known in the non parametrized version. For the non-parametrized case we refer to the papers C^{\infty}te{G}, C^{\infty}te{K}, C^{\infty}te{FIKP}, C^{\infty}te{F}. Another approach to interpolation is used in C^{\infty}te{Pr1}. We do not prove parametrized interpolation theorems for countable sets in this paper, we just use the lemma as a tool in the proof of our main Theorem \ref{main}. Therefore before we formulate the lemma we need to introduce some notation: Let $\primehi : \mathbb{C}^k\times X\hookrightarrow \mathbb{C}^k \times \mathbb{C}^n$ be a (parametrized) embedding of a complex space $X$ into $\mathbb{C}^n$, i.e., an embedding of the form $\primehi (w, x) = (w, \tilde \primehi (w, x))$. $X_R$ is a holomorphically convex compact subset of $X$. $\overline \mathcal{D}elta$ is a ball (of any radius) in $\mathbb{C}^k$. $\overline \mathbb{B}$ is a ball (of any radius) in $\mathbb{C}^n$. We assume that $\primehi^{-1}_w(\overline \mathbb{B} )\subset X_R$ for all $w\in \overline \mathcal{D}elta$, which implies that $K=(\overline \mathcal{D}elta \times \overline \mathbb{B} )\cup \primehi (\overline \mathcal{D}elta \times \overline X_R)$ is a polynomially convex subset of $\mathbb{C}^k\times \mathbb{C}^n$ (Lemma \ref{FGRlemma2copy} above). Furthermore we assume the dimension condition \begin{equation}\label{dim} \dim X +k <n . \end{equation} \begin{lemma}\label{FLYTTLEMMAT} Let $b_1(w),b_2(w),\ldots , b_N(w)$ be $N$ parametrized points contained in $K=( \overline \mathcal{D}elta \times \overline \mathbb{B} )\cup \primehi ( \overline \mathcal{D}elta \times \overline X_{R})$. Assume that $(w,p(w))$ and $(w,q(w))$ are parametrized points in $\mathbb{C}^k \times \mathbb{C}^n\setminus K $ and let $s$ be some positive integer. Then for each $\epsilon >0$ there exists an automorphism $\alpha \in \rm{Aut}^k_{hol}(\mathbb{C}^n)$ such that $|\alpha (w,z)-(w,z)|\leq \epsilon$ for every $(w,z)\in K$, $\alpha (w,b_i(w) )=(w,b_i(w))$ of order $s$ for every $w\in \mathbb{C}^k$ and $\alpha (w,p(w))=(w,q(w))$ for every $w\in \mathbb{C}^k$. \end{lemma} \subsection{Osculation lemma} The conclusion of the growth restrictions will be that only affine automorphisms could map the images of different embeddings from the family onto each other. To exclude these affine automorphisms we will have a finite number of points "marked" in each embedding of our family. The marking is in such a way that by affine automorphisms the sets of marked points have to be mapped onto each other. The marking will be achieved by letting the embeddings osculate of higher order exactly at these points. \begin{definition} Let $l\geq 2$ be a natural number. We say that a submanifold $M$ of $\mathbb{C}^n$ osculates of order $l$ at $x\in M$ if $M$ has contact order $l$ with the tangent space $T_xM\subset \mathbb{C}^n$ in $x \in M$. \end{definition} In local coordinates osculating can be interpreted as follows: Let $\zeta :U(\subset \mathbb{C}^m)\to M$ be a holomorphic coordinate system for the $m$ dimensional manifold $M$ at $x$, $\zeta (0)=x$. Then $M$ osculates of order $l$ at $x$ if and only if $\frac{\primeartial}{\primeartial w^{\alpha}}|_{w=0}\zeta \in T_xM$ for every multiindex $\alpha =(\alpha_1,\alpha_2,\ldots ,\alpha_m)$ with $2\leq |\alpha |\leq l$. The property to osculate is preserved by affine coordinate changes on $\mathbb{C}^n$, i.e. if $\primesi :\mathbb{C}^n \to \mathbb{C}^n$ is an affine automorphism of $\mathbb{C}^n$ then the submanifold $M\subset \mathbb{C}^n$ osculates of order $l$ at $x\in M$ if and only if the submanifold $\primesi (M)$ osculates of order $l$ at $\primesi (x)\in \primesi (M)$. \begin{notation}\label{oscul} (1) The property not to osculate of order $l$ at any point is generic for $l\ge 2$ if not $\dim M =1$ and $n=2$. In the later case it is generic for $l\ge3$. (2) In the proof of Theorem \ref{main} the role of $M$ will be played by the smooth part $X \setminus Sing (X)$ of the space we want to embed. \end{notation} By a manifold $M_{\mathbb{C}^k}$ in $\mathbb{C}^n$ parametrized by $w\in \mathbb{C}^k$ we mean the image (in $\mathbb{C}^k \times \mathbb{C}^n$) of a proper holomorphic embedding $\mathbb{P}hi : \mathbb{C}^k \times M \to \mathbb{C}^k\times \mathbb{C}^n$ which is of form $(w,m) \mapsto (w, \primehi (w, m))$. By $M(w)$ we denote the image $\mathbb{P}hi (\{w\}\times M) \subset \mathbb{C}^n$. \begin{lemma}\label{KYSSLEMMAT} Let $M_{\mathbb{C}^k}$ be a manifold in $\mathbb{C}^n$ parametrized by $w\in \mathbb{C}^k$ and assume that $m=\dim M <n$. Let $\xi_1(w),\ldots, \xi_t(w)$ be simultaneously standardizable parametrized points such that $\xi_i(w)\in M(w)$ for $i=1,\ldots , t$. Then for $l \ge 2 \in \mathbb{N}$ there exists an automorphism $\kappa \in \rm{Aut}_{hol}^k(\mathbb{C}^n)$ such that $\kappa (M(w))$ osculates of order $l$ in $\xi_i(w)$ for $i=1,\ldots, t$ and all $w\in \mathbb{C}^k$. \end{lemma} \subsection{Non-osculation lemma} To keep the osculation of order $l$ in $\xi_i(w)$ and make sure that the embedding do not osculates of order $l$ at other points, we use the following lemma. \begin{lemma}\label{AKlemma} Let $M_{\mathbb{C}^k}$ be a manifold in $\mathbb{C}^n$ para\-metrized by $w\in \mathbb{C}^k$ and denote $m = \dim M$. Also let \begin{enumerate}[a)] \item $K_M$ be a compact subset of $\mathbb{C}^k\times M_{\mathbb{C}^k}$. \item $K$ be a compact subset of $\mathbb{C}^k\times \mathbb{C}^n$. \item $a_1,a_2,\ldots ,a_r$ be finitely many points in $K_M$. \item $b_1(w),b_2(w),\ldots ,b_q(w)$ be finitely many parametrized points in $\mathbb{C}^k\times M(w)\setminus K_M$, (these are the points where we want to keep the osculation order $l$). \item $l\geq 2$ be a natural number. If $m=1$ and $n=2$ let $l\geq 3$. \item $\epsilon >0$ be a real number. \end{enumerate} Then there exists an automorphism $\primesi \in \rm{Aut}_{hol}^k(\mathbb{C}^n)$ such that \begin{enumerate}[1.] \item $\primesi (a_i)=a_i$ for every $i=1,2,\ldots ,r$. \item $\primesi_w(z)=z+O(|z-b_i(w)|^{l+1})$ as $z\to b_i$ for every $i=1,2,\ldots ,q$ and every $w\in \mathbb{C}^k$. \item $|\primesi_w(z)-z|+|\primesi_w^{-1}(z)-z|<\epsilon$ for every $(w,z)\in K$ \item There is no point $x\in K_M$ such that $\primesi (M(w))$ osculates of order $l$ in $\primesi (x)$. \end{enumerate} \end{lemma} \section{Proof of main theorem.} \label{hauptsatz} To be precise let us define the notion used in the formulation of Theorem \ref{main} \begin{definition} Let X, Y be complex spaces, Z a complex (resp. topological) space. A holomorphic (resp. continuous) map $$\mathbb{P}hi : Z \times X \to Y$$ is called a {\sl holomorphic (resp. continuous) family of holomorphic embeddings of $X$ into $Y$ parametrized by (a parameter) in $Z$} if for each point $z$ in the parameter space $Z$ the map $\mathbb{P}hi_z : X\to Y, \ \ x\mapsto \mathbb{P}hi(z,x)$ is a proper holomorphic embedding. \end{definition} In the proof we are working with families of embeddings of some complex space $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$ which come from the following construction. \begin{notation} If a holomorphic map $$\primehi : \mathbb{C}^k \times X \to \mathbb{C}^k \times \mathbb{C}^n\label{??.1}$$ of the form \begin{equation} \primehi (w, x) = (w, \tilde \primehi (w, x)), \ \ w\in \mathbb{C}^k,\ x\in X \label{wei} \end{equation} is a proper holomorphic embedding of $\mathbb{C}^k\times X$ into $\mathbb{C}^k \times \mathbb{C}^n$ then the map $$\tilde \primehi : \mathbb{C}^k \times X \to \mathbb{C}^n$$ (where $\tilde \primehi$ is defined by (\ref{wei})) is a family of holomorphic embeddings of $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$. Note that the contrary does not hold, i.e., if $$\tilde \primehi : \mathbb{C}^k \times X \to \mathbb{C}^n$$ is a family of holomorphic embeddings of $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$, the corresponding map $$ \primehi : \mathbb{C}^k \times X \to \mathbb{C}^k \times \mathbb{C}^n$$ defined by $$\primehi (w, x) = (w, \tilde \primehi (w, x)), \ \ w\in \mathbb{C}^k,\ x\in X$$ may fail to be an embedding (see the example below). Also note the following fact which we will use in our construction : \noindent If $\alpha \in \operatorname{Aut}_{hol} (\mathbb{C}^k\times \mathbb{C}^n)$ is of the form $\alpha (w,z) = (w, \tilde \alpha (w,z))$, i.e. $\alpha \in \operatorname{Aut}_{hol}^k (\mathbb{C}^n)$, then $\alpha C^{\infty}rc\ \primehi$ is again an embedding of the form (\ref{wei}), hence its "second coordinate" $\primei_2 C^{\infty}rc (\alpha C^{\infty}rc \primehi): \mathbb{C}^k \times X \to \mathbb{C}^n$ is a family of holomorphic embeddings (where $\primei_2 : \mathbb{C}^k \times \mathbb{C}^n \to \mathbb{C}^n$ is defined by $(w, v) \mapsto v, \ \ w\in \mathbb{C}^k, v \in \mathbb{C}^n$). \label{param} \end{notation} \begin{example} If $$\tilde \primehi : \mathbb{C}^k \times X \to \mathbb{C}^n$$ is a holomorphic family of holomorphic embeddings of $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$ then it is straightforward to prove that the map $$\primehi : \mathbb{C}^k \times X \to \mathbb{C}^k \times \mathbb{C}^n$$ defined by $$\primehi (w, x) = (w, \tilde \primehi (w, x)), \ \ w\in \mathbb{C}^k,\ x\in X$$ is holomorphic, injective and immersive. On the other hand properness may fail, as the following example shows: \noindent We are going to define a holomorphic family of embeddings of $\mathbb{C}$ into $\mathbb{C}^2$ parametrized by $\mathbb{C}$. Define $f : \mathbb{C} \times \mathbb{C} \to \mathbb{C}^2$ by $f(y, x)= (x + y\cdot x^2, y\cdot x)$. For each fixed point $y$ in the parameter space $\mathbb{C}$ we are given a proper, injective, immersive, holomorphic map from $\mathbb{C}$ into $\mathbb{C}^2$ (for $y\ne 0$ the second coordinate by itself gives already such an embedding, and for $y=0$ the first coordinate is such an imbedding). \primear On the other hand the map $\mathbb{C} \times \mathbb{C} \to \mathbb{C}\times \mathbb{C}^2$ defined by $(y, x)\mapsto (y, x + y\cdot x^2, y\cdot x)$ is not proper. Indeed the sequence $(x_n, y_n)$ defined by $x_n = n$ and $y_n = {1-n \over n^2}$ leaves any compact subset of the definition space but is mapped onto the sequence $({1-n \over n^2}, 1, {1-n\over n})$ which converges to the point $(0,1, -1)$ in the target space (which is not in the image, in fact the image is not closed it is the hypersurface $\{(a,b,c)\in \mathbb{C}^3 : a \cdot b =c (c+1) \}$ except the line $\{ c=-1, a=0 \}$). \end{example} We would like to emphasize that we will prove a slightly stronger statement than just holomorphic families of embeddings. Our families are always such that the map $\mathbb{P}hi : \mathbb{C}^k \times X \to \mathbb{C}^k \times \mathbb{C}^n$ is a proper holomorphic embedding which we will denote by using the symbol $\hookrightarrow$, i.e. $\mathbb{P}hi : \mathbb{C}^k \times X \hookrightarrow \mathbb{C}^k \times \mathbb{C}^n$ for our families. This subtle point plays a role in the last section, since if $\mathbb{P}hi (\mathbb{C}^k\times X)$ is not a closed submanifold in $\mathbb{C}^k\times \mathbb{C}^n$ the construction of pseudo-affine modification does not work. \begin{notation} In the proof of Theorem \ref{main} we use the property that the group of holomorphic automorphisms of $X$ can be exhausted by a sequence of compact subsets (in c.-o. topology). A Lie group (with possibly countably many components) can be exhausted by a sequence of compact subsets. On the other hand, if a locally compact topological group acts effectively on a manifold it is a Lie group~C^{\infty}te{BM}. Since the group of holomorphic automorphisms of a Stein space $X$ acts effectively on the smooth part $\tilde X = X\setminus\operatorname{Sing}(X)$, this implies that $\operatorname{Aut}(X)$ is a Lie group if and only if it can be exhausted by compacts. \end{notation} \begin{proof} (of Theorem \ref{main}) By assumption the complex space $X$ embeds into $\mathbb{C}^n$, say $\varphi_0 : X\hookrightarrow \mathbb{C}^n$. We start with the trivial family of embeddings of $X$ into $\mathbb{C}^n$, $\primehi_0 :\mathbb{C}^k \times X\to \mathbb{C}^k\times \mathbb{C}^n$ given by $(w,x)\mapsto (w,\varphi_0(x))$. We will construct automorphisms $\alpha_n\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ recursively. Let $A_n= \alpha_nC^{\infty}rc\alpha_{n-1}C^{\infty}rc \ldots C^{\infty}rc \alpha_1$. We further arrange $\alpha_n\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ inductively such that $$A(w,z)=\lim_{n\to \infty}A_n(w,z)=(w,\lim_{n\to \infty}\tilde A_n(w,z))$$ exist uniformly on compacts for some open neighborhood $\Omega \subset \mathbb{C}^k \times \mathbb{C}^n$ containing $\mathbb{C}^k\times X$ and such that the mapping $A:\Omega \to \mathbb{C}^k\times \mathbb{C}^n$ given by $(w,z)\mapsto (w,\tilde A_n(w,z))$ defines a biholomorphic mapping on $\mathbb{C}^k\times \mathbb{C}^n$. The existence of the limit follows from Proposition 4.1 and 4.2 in C^{\infty}te{F}. Now let $\primehi_n=\alpha_nC^{\infty}rc\alpha_{n-1}C^{\infty}rc \ldots C^{\infty}rc \alpha_1C^{\infty}rc \primehi_0:\mathbb{C}^k\times X\to \mathbb{C}^k\times \mathbb{C}^n$. Also define $\primei_2$ as the projection of $\mathbb{C}^k\times \mathbb{C}^n$ to $\mathbb{C}^n$. The family of holomorphic embeddings will then be given by the second coordinate of $AC^{\infty}rc \primehi_0=\lim_{n\to \infty}\primehi_n$, i.e. $\varphi =\primei_2(AC^{\infty}rc \primehi_0)$. It follows that $\primehi=\lim_{n\to \infty}A_nC^{\infty}rc \primehi_0=AC^{\infty}rc \primehi_0$ is a proper holomorphic embedding of $\mathbb{C}^k \times X$ into $\mathbb{C}^k\times \mathbb{C}^n$, which gives that $\varphi =\primei_2(AC^{\infty}rc \primehi_0)$ is a family of holomorphic embeddings of $X$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$. In order to make the embeddings $\primehi_w$ different for different parameters, we will choose them such that no affine automorphism, can map the image of one of these embeddings onto another. At the same time we make sure, using growth conditions, that the only way to map the image of one embedding onto another is by an affine mapping. This ensures that the embeddings will be non-equivalent for different parameters $w$. So the construction in short is: \begin{enumerate}[a)] \item Choose sufficiently many points $\xi_1(w), \ldots ,\xi_m(w)$ in correct positions such that no affine automorphism can map the points $\{ \xi_i(w_1)\}$ into the points $\{ \xi_i(w_2)\}$ for $w_1\neq w_2$. These points will be chosen differently for each parameter (however holomorphically depending on the parameter). \item Embed the space $X$ through these points. \item In order to single out these points we make sure that our embeddings osculate at these points of a certain order $l$, and osculates of order less than $l$ at all other points. \item Divide the rest of $\mathbb{C}^n$ into concentric shells with increasing radii. In each shell we choose inductively points through which we later will embed the space $X$. These points and their preimages in $X$ are inductively chosen in such a way that we get some growth conditions on the embedding. \item Embed $X$ inductively through all the points with careful chosen preimage points. In each step of the inductive process $X$ will be embedded through the (finitely many) points contained in one shell, keeping the points from previous shells contained in the image of $X$. \item We then show, using the growth conditions introduced in d), that for two embeddings to be equivalent for different parameters they have to differ by an affine automorphism. \item The condition in a) shows that there is no such affine automorphism. Consequently the embeddings are different for all parameters. \end{enumerate} The first automorphism $\alpha_1$ will take care of that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ there is no affine automorphism $\beta \in \textrm{Aff}(\mathbb{C}^n)$ mapping the image of $\varphi_{w_1}(X)$ onto the image of $\varphi_{w_2}(X)$. For this first use Proposition \ref{pointprop} to get $m$ points $\xi_1,\ldots \xi_m:\mathbb{C}^k \to \mathbb{C}^n$ parametrized by an parameter. Then choose $m$ points, $\eta_1,\ldots ,\eta_m$ in the smooth part $\tilde X$ of $X$ or more exactly of $\primehi_0 (X)\subset \mathbb{C}^n$. After that use Lemma \ref{FLYTTLEMMAT} to find an automorphism $\gamma \in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ such that $\gamma (w,\eta_i)=(w,\xi_i(w))$ for every $w\in \mathbb{C}^k$. Fix a natural number $l\ge 2$ such that not osculating of order $l$, at any point, is a generic property for a submanifold of $\dim X$ in $\mathbb{C}^n$ (see Remark \ref{oscul}). Using Lemma \ref{KYSSLEMMAT} and Lemma \ref{AKlemma} we get an automorphism $\delta \in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ with $\delta (w,\xi_i(w))=(w,\xi_i(w))$ for every $w\in \mathbb{C}^k$ that prescribes the higher derivatives of $\delta$ in the $\mathbb{C}^n$-direction such that for all $w\in \mathbb{C}^k$ the subvariety $\delta (\gamma (w,\varphi_0(X)))$ of $\mathbb{C}^n$ will be tangent of order $l$ at the points $\xi_i(w)$. The automorphism $\alpha_1\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ is now given by $\alpha_1=\delta C^{\infty}rc \gamma$. We also note that with $\alpha_1$ we have the osculation order in the points we want, so in the future we only have to make sure we do not destroy the osculation order in $\xi_i(w)$ but destroy it everywhere else. We now choose an exhaustion of the space $T=\operatorname{Aut} (X)$ with compact sets $T_i$ such that $$T=\cup_{i=1}^{\infty}T_i\textrm{ and }T_i\subset \overset{C^{\infty}rc}T_{i+1}.$$ Further let $\rho :X\to \mathbb{R}^{\geq 0}$ be a continuous exhaustion function of $X$, so $X_r:=\rho^{-1}([0,r])$ is a compact subset of $X$ for every $r\geq 0$. ($X_r=\{ x\in X:\rho (x)<r \} \subset \subset X$ for every $r$, for example $\rho (x)=\| \iota (x)\|^2$ will work.) Denote the unit ball in $\mathbb{C}^n$ by $\mathbb B_n=\{ z\in \mathbb{C}^n:\| z\| <1\}$ and $\mathbb{B}_k =\{ w\in \mathbb{C}^k:\| w\| <1\}$. Choose a sequence of relatively open neighborhoods $U_i$, $i=1,2,3,\ldots$ of the set $\eta =\cup_{i=1}^m\{ \eta_i\}$ in $X$ with $\cap_{i=1}^{\infty}U_i=\eta$ and $\overline U_{i+1}\subset \overset{C^{\infty}rc}U_i$. Remember that the points $\eta_i$ in $X$ are the preimages of the points in $\mathbb{C}^n$ at which the varieties $\varphi (w,X)$ have osculation of order $l$, i.e. the points $\xi_i(w)$, for every $w\in \mathbb{C}^k$. Now we inductively, for $\mu =1,2,3,\ldots$, define real numbers $\epsilon_{\mu}, R_{\mu}>0$, finite subsets $\cup_{j=1}^{k(\mu )}\{ a^{\mu}_j\}$ of $\primeartial (\mu +1)\mathbb{B}_n$ and finite subsets $\cup_{j=1}^{k(\mu )}\{ x^{\mu}_j\}$ of $X$ with the same cardinality $k(\mu )$, and automorphisms $\alpha_{\mu}\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ of $\mathbb{C}^n$ parametrized by $w\in \mathbb{C}^k$. When choosing the subsets $\cup_{j=1}^{k(\mu )}\{ a^{\mu}_j\}$ and $\cup_{j=1}^{k(\mu )}\{ x^{\mu}_j\}$ it is important to remember that since we are going to embed the point $x^{\mu}_j$ of $X$ through $a^{\mu}_j$, we have to choose the points $a^{\mu}_j$ such that $\cup_{j=1}^{k(\mu )}\{ a^{\mu}_j\} \cap \primei_2(\primehi_{\mu}(\mathbb{C}^k\times X))=\emptyset$. This is possible by Lemma \ref{lemma4.3copy} due to the dimension of $X$, since $\primei_2(\primehi_{\mu}(\mathbb{C}^k\times X))\cap \primeartial (\mu +1)\mathbb{B}_n$ has measure zero in $\primeartial (\mu +1)\mathbb{B}_n$. Start by letting $\epsilon_1=1, R_1=1, k(1)=m$ and $\alpha_1$ be as constructed earlier. For $\mu \geq 2$ we construct these entities such that the following properties are satisfied: \begin{enumerate}[$1_{\mu}$.] \item $0<\epsilon_{\mu}<\frac{\epsilon_{\mu -1}}{3}$ \item If $F:\mathbb{B}_n\to (\mu +2)\mathbb{B}_n\setminus \cup_{j=1}^{k(\mu )}\{ a^{\mu}_j\}$ is a holomorphic mapping with $\| F(0)\| \leq \frac{\mu +1}2$ and $| JF(0)|\geq 1$ then $F((1-\frac{\epsilon_{\mu}}{2})\mathbb{B}_n)\subset (\mu +1)\mathbb{B}_n$. \item $\primehi_{\mu}(w,x^{\mu}_j)=\alpha_{\mu}C^{\infty}rc \primehi_{\mu -1}(w,x^{\mu}_j)=(w,a^{\mu}_j)$ and $\rho (x^{\mu}_j)>\max \{ \rho (t(x)):t\in T_{\mu}, x\in P_2(\primehi_{\mu -1}^{-1}(\mu \overline \mathbb{B}_k\times \mu \overline \mathbb{B}_n))\}$ where $P_2:\mathbb{C}^k \times X\to X$ is given by $P_2(w,x)=x$. \item $\| \alpha_{\mu}C^{\infty}rc \primehi_{\mu -1}(w,x)-\primehi_{\mu -1}(w,x)\| =\| \primehi_{\mu }(w,x)-\primehi_{\mu -1}(w,x)\| \leq \epsilon_{\mu}$ for every $x\in X_{R_{\mu -1}}$ and for every $w\in (\mu -1)\overline \mathbb{B}_k$. \item $\| \alpha_{\mu} (w,z)-(w,z)\| \leq \epsilon_{\mu}$ for $(w,z)\in \mu \overline \mathbb{B}_k \times \mu \overline \mathbb{B}$ \begin{notation} Motivated by $4_{\mu}$ and $5_{\mu}$ we define the compact set $$K_{\mu}=(\mu \overline \mathbb{B}_k \times \mu \overline \mathbb{B}_n)\cup \primehi_{\mu -1}( (\mu -1) \overline \mathbb{B}_k \times \overline X_{R_{\mu -1}}).$$ \end{notation} \item $\alpha_{\mu}C^{\infty}rc \primehi_{\mu -1}(w,x^l_j)=(w,a_j^l)$ for every $w\in \mathbb{C}^k$ and for every $l<\mu$, $j=1,2,\ldots ,k(l)$. \item $\alpha_{\mu}(w,z)=(w,z)+O(|z-\xi_i(w)|^{l+1})$ as $z\to \xi_i(w)$. \item For fix $w\in \mu \overline \mathbb{B}_k$ the submanifold $\primei_2(\primehi_{\mu}(w,\tilde X))$ of $\mathbb{C}^n$ do not osculate of order $l$ in any point $\primei_2(\primehi_{\mu}(w,x))$ with $x\in (X_{R_{\mu -1}}\cap \tilde X )\setminus U_{\mu}$. Here $\tilde X$ is the union of all components of $X$ which are smooth and of maximal dimension. \item $\| \primehi_{\mu}(w,x)\| \geq \mu +1$ for every $x\in X\setminus X_{R_{\mu }}$ and for every $w\in \mu \overline \mathbb{B}_k$. \item $R_{\mu }>R_{\mu -1}+1$. \end{enumerate} We will now confirm that such a construction is possible. For step 2 of the induction we choose, in the following order, \begin{enumerate}[(1)] \item $\epsilon_2<\frac{\epsilon_1}{3}$ \item $\cup_{j=1}^{k(2)}\{ a^2_j\}$ a finite subset of $\primeartial (3\mathbb{B}_n)\subset \mathbb{C}^n$ which does not intersect the image $\primei_2(\primehi_1 (\mathbb{C}^k\times X))$ and satisfies $2_2$. This is possible by Lemma \ref{lemma4.3copy}, namely $\dim X+k<n$ makes it possible to choose the points $\{ a^2_j\}$ outside $\primei_2(\primehi_1 (\mathbb{C}^k\times X))$ (which has measure zero by Sards theorem). \item $\cup_{j=1}^{k(2)}\{ x^2_j\}$ a finite subset of $X$ such that $\rho (x_j^2)>\max \{ \rho (t(x)): t\in T_2, x\in P_2(\primehi_1^{-1}(2\overline \mathbb{B}_k \times 2\overline \mathbb{B}_n))\}$. Property $3_2$ will then be fulfilled. We shall also choose $x_j^2$ such that $(w,x_j^2)\notin \hat K_2$ for every $j$, where $K_2=(2\overline \mathbb{B}_k \times 2\overline \mathbb{B}_n)\cup \primehi_1( 1\overline \mathbb{B}_k \times \overline X_{R_1})$ is a compact subset of $\mathbb{C}^k \times \mathbb{C}^n$. As $\primehi_1$ is a proper holomorphic embedding, Lemma \ref{FGRlemma2copy} gives that $\hat K_2$ is contained in $ (2\overline \mathbb{B}_k \times 2\overline \mathbb{B}_n)\cup \primehi_1( 1\overline \mathbb{B}_k \times \overline X )$, in particular the points $(w,a_j^2)$ will not intersect the set $\hat K_2$. \end{enumerate} By our dimension assumptions ($k<n-1$) the parametrized points $\{ (w,a^2_j),(w,x^2_j)\}$ are simultaneously standardizable (Corollary \ref{standp}). We will now use Lemma \ref{FLYTTLEMMAT} $k(2)$ times to find an automorphism $\alpha_2'\in \mathrm{Aut}_{hol}^k(\mathbb{C}^n)$ holomorphically depending on $w\in \mathbb{C}^k$ such that $4_2,5_2,6_2$ and $7_2$ are satisfied with $\epsilon_2/2$ instead of $\epsilon_2$, (as we will combine $\alpha'$ with another automorphism $\alpha''$). Using Lemma \ref{AKlemma} we find an automorphism $\alpha_2''\in \mathrm{Aut}_{hol}^k(\mathbb{C}^n)$ not moving any point of $\alpha_2'(K_2)$ more than $\epsilon_2/2$, matches the identity up to order $l$ at the points $(w,\xi_i(w))$ for every $w\in \mathbb{C}^k$ and fixing the points $(w,a_j^2)$ $j=1,\ldots ,k(2)$ for every $w\in \mathbb{C}^k$. In addition, for $w\in 2\overline \mathbb{B}_k$, the submanifold $\alpha_2''C^{\infty}rc \alpha_2' C^{\infty}rc \primehi_1(w,\tilde X)$ of $\mathbb{C}^n$ do not osculate of order $l$ in any of the points $P_2(\primehi_1 (w,x))$ with $x\in (X_{R_1}\cap \tilde X)\setminus U_1$. The composition $\alpha_2''C^{\infty}rc \alpha_2'$ will then satisfy $4_2,5_2,6_2,7_2$ and $8_2$. Finally choose $R_2$ so large that $9_2$ and $10_2$ are satisfied. The induction for step $s$, $s>2$, goes exactly as step 2. At all steps $s$, we have to make sure that the property not to osculate of order $l$ for $\primehi_{s-1}C^{\infty}rc \primehi_0(\tilde X)$ is preserved in every point of the image of $(X_{R_{s-2}}\cap \tilde X)\setminus U_{s-2}$. We therefore have to choose $\epsilon_s \leq \epsilon_{s-1}$ so small that every perturbation of $\primehi_{s-1}C^{\infty}rc \primehi_0:\tilde X\hookrightarrow \mathbb{C}^n$ less than $3\epsilon_s$ on the compact $(X_{R_{s-2}}\cap \tilde X)\setminus U_{s-2}$ do not destroy that property. Because of $5_{\mu}$ and the fact that $\epsilon_{\mu}<\frac 1{\mu}$ Proposition 4.1 and 4.2 from C^{\infty}te{F} gives that $A=\lim_{\mu \to \infty}A_{\mu}(w,z)=(w,\lim_{\mu \to \infty}\tilde A_{\mu}(w,z))$ exists uniformly on compacts on $\Omega =\cup_{\mu =1}^{\infty}A_{\mu}^{-1}(\mu \mathbb{B}_k \times \mu \mathbb{B}_n)$ and defines a biholomorphic mapping from $\Omega$ onto $\mathbb{C}^k\times \mathbb{C}^n$. By $4_{\mu }$ the set $\mathbb{C}^k \times X$ is contained in $\Omega$. Since $A_{\mu}\in \mathrm{Aut}_{hol}^k(\mathbb{C}^n)$ for each fixed $w_0\in \mathbb{C}^k$ the map $A_{w_0}:\Omega_{w_0}\to \mathbb{C}^n$, given by $(w,z)\mapsto \tilde A(w,z)$, is a biholomorphic mapping from $\Omega_{w_0}=\{ z\in \mathbb{C}^n: (w_0,z)\in \Omega \}$ onto $\mathbb{C}^n$, and $\Omega_{w_0}$ contains $X$. Therefore for all $w_0\in \mathbb{C}^k$ the map $\primehi_{w_0}$ defined by $x\mapsto \primehi (w_0,x)$ is a proper holomorphic embedding of $X$. We will now confirm that the constructed embedding satisfies the theorem. Property $3_{\mu}$ and $6_{\mu}$ gives for every $n\in \mathbb{N}$ that $\primehi (w,x_j^n)=(w,a^n_j)$ for every $w\in \mathbb{C}^k$ and $j=1,2,\ldots ,k(\mu )$. Define $\epsilon =\sum_{i=2}^{\infty}\epsilon_i$, condition $1_{\mu}$ gives that $\epsilon <\frac 12$. Now suppose that there is a non-degenerate holomorphic mapping $F:\mathbb{C}^n\to \mathbb{C}^n$ and that there are two values $w_1\neq w_2\in \mathbb{C}^k$ of the parameter space such that $F^{-1}(\mathbb{C}^n\setminus \primehi_{w_2}(X)) = \mathbb{C}^n\setminus \primehi_{w_1}(X)$ and that $\primehi^{-1}_{w_2}C^{\infty}rc FC^{\infty}rc \primehi_{w_1}=t$ for some $t\in T$, i.e. some element of the family of automorphisms of $X$. In particular this will hold if for $w_1\neq w_2$ the embeddings $\primehi_{w_1}$ and $\primehi_{w_2}$ are equivalent. By moving the origin by an arbitrary small translation, we can assume that $JF(0)\neq 0$. Let $\beta =\primerod_{i=2}^{\infty}(1-\frac{\epsilon_i}{2})>0$ and let $\nu_0$ be a number so large that for every $\nu \geq \nu_0$ we have that $t\in T_{\nu}$, $JF(0)>\frac 1{(\nu \beta )^n}$, $F(0)\in \frac{\nu +2}{2}\mathbb{B}$ and $w_1,w_2\in \nu \mathbb{B}_k$. For a given $\nu \geq \nu_0$ choose a natural number $k$ such that $F(\nu \beta \mathbb{B}_n )\subset (k+2)\mathbb{B}_n$ and $k>\nu +2$. Define $F_j(z):\mathbb{B}_n \to (k+2)\mathbb{B}_n$ by $F_j(z)=F(z\cdot \nu \primerod_{l=j+1}^k(1-\frac{\epsilon_l}{2}))$ for $j=1,2,\ldots ,k$. For some fix $j\in \{ \nu +1,\ldots ,k\}$ we have that $F_j(\mathbb{B}_n )=F(\nu \primerod_{l=j+1}^k(1-\frac{\epsilon_l}{2})\mathbb{B}_n )\subset F(\nu \mathbb{B}_n )$, which by Lemma \ref{LEMMAB} implies that $F_j(\mathbb{B}_n )$ do not contain any point $a^j_l\in \primeartial (j+1)\mathbb{B}_n$, $l=1,2,\ldots ,k(j)$ and $j\geq \nu +1$. In addition we have $$| JF_j(0)|=|\nu^n\primerod_{l=j+1}^k(1-\frac{\epsilon_l}{2})^nJF(0)|>\nu^n\beta^nJF(0)>1$$ and $F_j(0)=F(0)\in \frac{j+1}2\mathbb{B}_n$. Property $2_k$ now gives that $F_k((1-\frac{\epsilon_k}{2})\mathbb{B}_n )=F_{k-1}(\mathbb{B}_n )\subset (k+1)\mathbb{B}_n$. Induction from $k$ down to $\nu +1$ gives that $F_{j-1}(\mathbb{B}_n )\subset (j+1)\mathbb{B}_n$ so, for $j=\nu +1$ we have $F_{\nu}(\mathbb{B}_n )\subset (\nu +2) \mathbb{B}_n$ and therefore $F(\beta \nu \mathbb{B}_n )\subset (\nu +2)\mathbb{B}_n$ for every $\nu \geq \nu_0$. This growth condition implies that $F$ is an affine mapping, and the fact that $F$ is non-degenerate means that $F$ is an affine automorphism. Remember that $(\varphi_1)_w (\tilde X)$ osculates of order $l$ at $\xi_i(w)$, for $w\in \mathbb{C}^k$, and this is preserved by $7_{\mu}$ over the induction. Hence, we see that $\varphi_w (\tilde X)$ osculates of order $l$ at $\varphi (x_i)=\xi_i(w)$, $i=1,2,\ldots ,m $. From property $8_{\mu}$ it follows that $\varphi_w (\tilde X)$ do not osculate of order $l$ in any other point. Due to this, since the affine automorphism $F$ maps $\varphi_{w_1}(\tilde X)$ to $\varphi_{w_2}(\tilde X)$, it also maps the set $\{ \xi_i(w_1) \}_{i=1}^m$ to the set $\{ \xi_i(w_2) \}_{i=1}^m$. By the choice of $\{ \xi_i(w)\}$ there is no such automorphism, see Proposition \ref{pointprop}. \end{proof} \begin{lemma}\label{LEMMAA} In the notation of the proof of Theorem \ref{main} holds: \begin{multline*} \varphi^{-1}((\nu -1)\overline \mathbb{B}_k \times (\nu -1)\mathbb{B}_n)\subset P_2((A_{\nu} C^{\infty}rc \primehi_0)^{-1}((\nu -1)\overline \mathbb{B}_k \times \nu \mathbb{B}_n))=\\ =P_2(\primehi_{\nu}^{-1}((\nu -1)\overline \mathbb{B}_k \times \nu \mathbb{B}_n)) \end{multline*} \end{lemma} \begin{proof} Let $w\in (\nu -1)\overline \mathbb{B}_k$ be a fix point and consider some $x\in \varphi_w^{-1}((\nu -1)\overline \mathbb{B}_n)$, where $\varphi_w^{-1}$ is the restriction of $\varphi^{-1}$ to $\{ w\} \times \mathbb{C}^n$. This implies that $\varphi_w(x)\in (\nu -1)\overline \mathbb{B}_n$. Now choose $k_0>\nu$ and $0<\delta < 1-2\epsilon$ such that \begin{equation}\label{A*} \primehi_{k,w}(x)\in (\nu -1+\delta )\overline \mathbb{B}_n \textrm{ for every } k\geq k_0 . \end{equation} Property $5_{\nu +1}$ gives that $\| \alpha_{\nu +1}(w,z)-(w,z)\| \leq \epsilon_{\nu +1}$ for $(w,z)\in (\nu +1)\overline (\mathbb{B}_k \times \overline \mathbb{B}_n )$ and Rouchés theorem that $\alpha_{\nu +1,w}(\nu \mathbb{B}_n)\supset (\nu -2\epsilon_{\nu})\mathbb{B}_n$, see Remark \ref{roch}, or in other words $\nu \mathbb{B}_n \supset (\alpha_{\nu +1,w})^{-1}((\nu -2\epsilon_{\nu})\mathbb{B}_n )$ so \begin{multline*} (\primehi_{\nu})_w^{-1}(\nu \mathbb{B}_n )=(A_{\nu}C^{\infty}rc \primehi_0)_w^{-1}(\nu \mathbb{B}_n )\supset (A_{\nu +1}C^{\infty}rc \primehi_0)_w^{-1}((\nu -2\epsilon_{\nu})\mathbb{B}_n )=\\ =(\primehi_{\nu +1})^{-1}_w((\nu -2\epsilon_{\nu})\mathbb{B}_n ). \end{multline*} Induction using $5_{\nu +2},\ldots ,5_k$ gives \begin{multline}\label{B*} (A_{\nu}C^{\infty}rc \primehi_0)_w^{-1}(\nu \mathbb{B}_n )\supset (A_kC^{\infty}rc \primehi_0)_w^{-1}((\nu -2\sum_{l=\nu}^{k-1}\epsilon_l)\mathbb{B}_n )\supset \\ \supset (A_kC^{\infty}rc \primehi_0)_w^{-1}((\nu -2\epsilon )\mathbb{B}_n ) . \end{multline} By our choice of $\delta$ we have $\nu -2\epsilon >\nu -1+\delta$, so \eqref{A*} and \eqref{B*} implies that $\primehi_{\mu}^w(x)\in \nu \mathbb{B}_n$. \end{proof} \begin{notation}\label{roch} To see that $\alpha_{\nu +1,w}(\nu \mathbb{B}_n )\supset (\nu -2\epsilon_{\nu})\mathbb{B}_n$ holds, we consider the following situation, $\| \alpha_q(w,z)-(w,z)\| \leq \epsilon_q$ for $(w,z)\in q\overline \mathbb{B}_k \times q\overline \mathbb{B}_n$. \begin{multline*} \| \alpha_q(w,z)-(w,z)\| =\| \alpha_q(w,z)-(w,p)-((w,z)-(w,p))\| \leq \\ \epsilon_q <\| (w,z)-(w,p)\| \textrm{ for } z\in q\overline \mathbb{B}_k \times \primeartial q\overline \mathbb{B}_n \textrm{ and } p\in (q-2\epsilon_q)\overline \mathbb{B}_n . \end{multline*} Since $(w,z)-(w,p)$ has a root and consequently, by Rouché, $\alpha_q(w,z)-(w,p)$ will too. So for every $p\in (q-2\epsilon_q)\overline \mathbb{B}_n$ we always have a solution to the equation $\alpha_q(w,z)=(w,p)$ for some $z\in q\overline \mathbb{B}_n$, therefore we draw the conclusion that $\alpha_{\nu +1,w}(\nu \mathbb{B}_n )\supset (\nu -2\epsilon_{\nu})\mathbb{B}_n$. \end{notation} \begin{lemma}\label{LEMMAB} In the notation of the proof of Theorem \ref{main} holds: For every $j\geq \nu+1$ we have that $ F(\nu \mathbb{B}_n)\cap \cup_{l=1}^{k(j)}\{ a^j_l\} =\emptyset$. \end{lemma} \begin{proof} Suppose, to reach a contradiction, that there exist $z\in \nu \mathbb{B}_n$ such that $F(z)=a^j_l$ for some $j\geq \nu +1$ and some $l$ between $1$ and $k(j)$. Since $F^{-1}(\mathbb{C}^n\setminus \varphi_{w_2}(X))= \mathbb{C}^n\setminus \varphi_{w_1}(X)$, we have that $z\in \varphi_{w_1}(X)$. Let $x=\varphi^{-1}_{w_1}(z)\in \varphi^{-1}_{w_1}(\nu \mathbb{B}_n )$, which gives $FC^{\infty}rc \varphi_{w_1}(x)=a^j_l=\varphi_{w_2}(x^j_l(w_2))$. Thus $t(x)=\varphi^{-1}_{w_2}C^{\infty}rc FC^{\infty}rc \varphi_{w_1}(x)=x^j_l(w_2)$. Using Lemma \ref{LEMMAA} we conclude that $x\in P_2(\primehi^{-1}_{\nu +1,w_1}((\nu +1)\overline \mathbb{B}_n))$ and with $t\in T_{\nu}$ and $w_1\in \nu \mathbb{B}_k$ it follows that $$\rho (t(x))\leq \max_{\primehi_{\nu +1} ^{-1}(\nu +1)\overline \mathbb{B}_n ,t\in T_{\nu},w\in \nu \mathbb{B}_k}\rho(t(y)). $$ Since $j\geq \nu +1$ we have $$\rho (t(x))\leq \max_{\primehi_{j,w}^{-1}j\overline \mathbb{B}_n ,t\in T_{j-1},w\in (j-1) \mathbb{B}_k}\rho(t(y)), $$ (for $j>\nu +1$ we have that $\primehi_j$ maps the inverse image $\primehi^{-1}_{j-1}((j-1)\mathbb{B}_n )$ into $(1+ \epsilon_{j})(j-1)\mathbb{B}_n \subset j\mathbb{B}_n $). However condition $3_j$ gives $$\rho (x^j_l(w))>\max_{y\in \primehi^{-1}_{j-1,w}(j\overline \mathbb{B}_k \times j\overline \mathbb{B}_n), t\in T_j}\rho (t(y)).$$ Therefore $\rho(x_l^j(w_2))>\rho (t(x))$, which contradicts $t(x)=x_l^j(w_2)$. \end{proof} \section{Eisenman hyperbolicity of the embeddings} \label{Eisenman} Let $M$ be a complex manifold of dimension $n$. We denote the holomorphic tangent bundle of $M$ by $TM$ and the holomorphic tangent space at $p\in M$ by $T_pM$. The $k$-th exterior power of $T_pM$ and $TM$ will be denoted by $\bigwedge^kT_pM$ and $\bigwedge^kTM$. Let also $D^k_pM$ and $D^kM$ denote the set of decomposable elements in $\bigwedge^kT_pM$ and $\bigwedge^kTM$. Recall that the Eisenman $n$-norm for a $u\in D^n_pM$ is defined as C^{\infty}te{E}, C^{\infty}te{GW} $$E_n^M(p,u)= \inf \{ \| v\|^2:v\in D_0^n\mathbb{B}_n, \exists F\in\mathcal{O} (\mathbb{B}_n ,M),F(0)=p, F_*v=u\} .$$ A complex manifold is called $n$-Eisenman hyperbolic if $E_n^M(p,u) >0$ for all $p\in M$ and all non-zero $u\in D^n_pM$. Compare with C^{\infty}te{BF}. We use the notation from the proof of Theorem \ref{main}. \begin{theorem}[Addition to Theorem \ref{main}]\label{mainadd} For all $w\in \mathbb{C}^k$ the complement $\mathbb{C}^n \setminus \varphi_w(X)$ of the embedding $\varphi_w(X)$ is Eisenman $n$-hyperbolic. \end{theorem} \begin{proof} Suppose there exists a point $p\in \mathbb{C}^n\setminus \varphi_w (X)=M$ such that $ E_n^M(p,u)=0$ for the (unique up to a constant) non-zero $u\in D^n_pM$. This means that \begin{equation}\label{eishyp} \inf_f \frac{1}{| Jf(0)|^2}\primearen{\frac i2}^ndz\wedge d\bar z=0 \end{equation} for some point $p$ where $f\in \mathcal{O} (\mathbb{B}_n ,M)$ such that $f(0)=p$ and $f_*(T_0\mathbb{B}_n)=v$. Let $\nu \in \mathbb{N}$ be a fixed number such that $p\in \frac{ \nu + 2}{2}\mathbb{B}_n$. By \eqref{eishyp} there is $F:\mathbb{B}_n\to M$ such that $F(0)=p$ and $¦JF(0)¦$ is arbitrary large, for example \begin{equation}\label{eishyp2} ¦JF(0)¦>\max (\frac{1}{\beta^n} ,(\nu +2)^n\beta^n) \end{equation} There is an $\alpha \in \mathbb{C}$, $0<¦\alpha ¦<1$ so that $¦JF(0)>\frac 1{\alpha^n\beta^n}$. Since $F(\alpha \overline{\mathbb{B}}_n)$ is compact we find $k\in \mathbb{N}$ such that $F(\alpha \mathbb{B}_n)\subset (k+2)\mathbb{B}_n$. Define $F_j(z)=F(\alpha \primerod_{l=j+1}^k(1-\frac{\epsilon_j}2)z)$. It holds: $¦JF_j(0)¦\geq \alpha^n\beta^n¦JF(0)¦>1$ for every $j$, also for $\nu +1 \leq j \leq k$ we have $F_j(0)=p\in \frac{ \nu + 2}{2}\mathbb{B}_n$ and $F_j(\mathbb{B}_n)$ obviously does not meet the points $a^j_l$, $l=1,2,\ldots ,k(j)$, (for $j$ large enough as in the proof of the main theorem). We conclude inductively by property $2_j$ $F_{j-1}(\mathbb{B}_n)\subset (j+1)\mathbb{B}_n$ for $\nu +1 \leq j\leq k$. This means in particular $F_{\nu}(\mathbb{B}_n)\subset (\nu +2)\mathbb{B}_n$ which implies $$¦JF_{\nu}(0)¦\leq (\nu +2)^n$$ and therefore $$¦JF(0)¦\leq (\nu +2)^n\alpha^n\beta^n .$$ This contradicts \eqref{eishyp2}, thus $\mathbb{C}^n\setminus \varphi_w (X)$ is Eisenman $n$-hyperbolic. \end{proof} That Eisenman hyperbolic manifold have a cancellation property was used in C^{\infty}te{Z}, Theorem 1.10., (for a simple proof see for example C^{\infty}te{Bo}) \begin{proposition}\label{cancel} Let $Y$ and $Z$ be $n$-Eisenman hyperbolic manifolds. Then any biholomorphic map $\mathbb{P}si =(\primesi_1,\primesi_2 ):Y\times \mathbb{C}^l \to Z\times \mathbb{C}^l$ is of the form $\mathbb{P}si (y,z)=(\primesi_1(y),\primesi_2(y,z))$, where $\primesi_1:Y\to Z$ is biholomorphic. \end{proposition} With Proposition \ref{cancel} and Theorem \ref{mainadd} we get \begin{theorem}\label{cross} Let $X$ be a complex space, which can be embedded in $\mathbb{C}^n$ and such that the group of holomorphic automorphisms $\operatorname{Aut}_{\rm{hol}}(X)$ is a Lie group. Then there exist, for $k=n-1-\dim X$, a family of holomorphic embeddings of $X\times \mathbb{C}^l$ into $\mathbb{C}^n\times \mathbb{C}^l$ parameterized by $\mathbb{C}^k$, such that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ the embeddings $\primesi_{w_1},\primesi_{w_2}:X\times \mathbb{C}^l \hookrightarrow \mathbb{C}^{n+l}$ are non-equivalent (up to automorphisms). \end{theorem} \begin{proof} Take $\varphi$ from Theorem \ref{main} and consider $$\mathbb{P}si :\mathbb{C}^k\times X\times \mathbb{C}^l \to \mathbb{C}^k\times \mathbb{C}^n\times \mathbb{C}^l$$ defined by $$(w,x,y)\mapsto (w,\varphi (w,x),y)=(w,\primesi(w,x,y)) .$$ Assume that the embeddings $\primesi_{w_1}$ and $\primesi_{w_2}$, where $w_1\neq w_2$ are equivalent. This means that there exists an automorphism $\alpha \in \operatorname{Aut}_{\rm{hol}}(\mathbb{C}^{n+l})$ such that $\alpha(\varphi_{w_1} (X)\times \mathbb{C}^l)=\varphi_{w_2} (X)\times \mathbb{C}^l$ and therefore the same for the complements $\alpha((\mathbb{C}^n\setminus \varphi_{w_1} (X))\times \mathbb{C}^l)=(\mathbb{C}^n\setminus \varphi_{w_2} (X))\times \mathbb{C}^l$. Now by Proposition \ref{cancel} there exists $\alpha_1 \in \operatorname{Aut}_{\rm{hol}}(\mathbb{C}^n)$ such that $\alpha_1(\mathbb{C}^n\setminus \varphi_{w_1} (X))=\mathbb{C}^n\setminus \varphi_{w_2} (X)$. Thus $\alpha_1(\varphi_{w_1} (X))=\varphi_{w_2} (X)$, which contradicts the choice of $\varphi$. \end{proof} A special case which is worth to state separately is $X=\mathbb{C}$. \begin{corollary} \label{main corollary} There exist, for $k=n-l-1$, a family of holomorphic embeddings of $\mathbb{C}^l$ into $\mathbb{C}^n$ parameterized by $\mathbb{C}^k$, such that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ the embeddings $\primesi_{w_1},\primesi_{w_2}:\mathbb{C}^l \hookrightarrow \mathbb{C}^{n}$ are non-equivalent. \end{corollary} We end this section with a little trick showing that one can even have families of pairwise non-equivalent embeddings containing the standard embedding as a member of the family. Note that the embeddings constructed in the proof of Theorem \ref{main} are not containing the standard embedding since the complement of each embedding is $n$-Eisenman hyperbolic. \begin{proposition} \label{standard} For each $0<l<n-1$ there is a holomorphic family of holomorphic embeddings of $\mathbb{C}^l$ into $\mathbb{C}^n$ parameterized by $\mathbb{C}$, such that for different parameters $w_1\neq w_2\in \mathbb{C}$ the embeddings $\primesi_{w_1},\primesi_{w_2}:\mathbb{C}^l \hookrightarrow \mathbb{C}^{n}$ are non-equivalent. Moreover for the embedding $\primesi_0$ is equivalent to the standard embedding. \end{proposition} \begin{proof} Take a family $\mathbb{P}hi : \mathbb{C} \times \mathbb{C} \to \mathbb{C} \times \mathbb{C}^{n-l+1}$ $\mathbb{P}si_0 (w, z) = (w, \primehi (w,z))$ as constructed in Theorem \ref{main} and cross it with $\mathbb{C}^{l-1}$ as in Theorem \ref{cross} to get a family $\mathbb{P}si_1 : \mathbb{C} \times \mathbb{C}^l \to \mathbb{C} \times \mathbb{C}^n$ $\mathbb{P}si_1 (w, x) = (w, \primehi_1 (w,x))$. By using a translation we can assume that $\primehi_1 (w, 0) = 0 \ \forall w\in \mathbb{C}$. Now define the family $\mathbb{P}si : \mathbb{C} \times \mathbb{C}^l \to \mathbb{C} \times \mathbb{C}^n$ by $$ \mathbb{P}si (w, x) = (w, {1 \over w} \primehi_1 (w, w x)) =: (w, \primesi (w,x))$$ for $w\ne 0$ and by its obvious limit $ x \mapsto \primehi_1^\primerime (0,0) x$ for $w=0$. Thus for $w=0$ we have the standard embedding in the family. All other members $\primesi_w$ of the family are by definition equivalent to the embedding $\primehi_w$ and therefore pairwise non-equivalent. No member in the family except $\primesi_0$ is equivalent to the standard embedding since otherwise there would exist a holomorphic map of rank $n-l+1$ into the complement of $\primehi_{w} (\mathbb{C})$ which contradicts the Eisenman $n-l+1$-hyperbolicity (Theorem \ref{mainadd}). \end{proof} \section{Families of holomorphic $\mathbb{C}^*$-actions on affine space}\label{wirkung} In this section we employ the method from C^{\infty}te{DK1} and C^{\infty}te{DK} to construct (non-linearizable) $\mathbb{C}^*$-actions on affine spaces out of embeddings $\mathbb{C}^l\hookrightarrow \mathbb{C}^n$. We will not give all proofs in detail. The important point we want to check here is that if the embeddings are holomorphically parametrized, then the resulting $\mathbb{C}^*$-actions depend holomorphically on the parameter. Let's go through the method: For an embedding $\varphi: \mathbb{C}^l \to \mathbb{C}^n$ take generators of the ideal $I_{\varphi (\mathbb{C}^l)} < \mathcal{O} (\mathbb{C}^n)$ of the image manifold, say $f_1, \ldots, f_N \in \mathcal{O} ({\mathbb{C}^n)}$ (in this case $N= n-l$ would be sufficient, since $\mathbb{C}^l$ is always a complete intersection in $\mathbb{C}^n$ by results of Forster and Ramspott C^{\infty}te{FoRa}, but this is not important for the construction) and consider the manifold \begin{multline*} M:= \{ (z_1, \ldots, z_n, u_1,\ldots u_N, v) \in \mathbb{C}^{n+N+1} : \\ f_i (z_1, \ldots, z_n) = u_i \ v \quad \forall \ i=1, \ldots, N \} \end{multline*} which in C^{\infty}te{DK1} is called Rees space. This notion was introduced there by the authors since they were not aware of the fact that this is a well-known construction, called affine modification, going back to Oscar Zariski. Geometrically the manifold $M$ results from $\mathbb{C}^{n+1}_{z, v}$ by blowing up along the center $\mathcal{C} = \varphi (\mathbb{C}^l) \times 0_v$ and deleting the proper transform of the divisor $\mathcal{D} = \{ v = 0\}$. Since our center is not algebraic but analytic, the process usually is called pseudo-affine modification. Lets denote the constructed manifold $M$ by $Mod (\mathbb{C}^{n+1}, \mathcal{D}, \mathcal{C}) = Mod( \mathbb{C}^{n+1}_{z, v}, \{v = 0\}, \varphi (\mathbb{C}^l)\times \{v=0\})$. It's clear from the geometric description that the resulting manifold does not depend on the choice of generators for the ideal $I_\mathcal{C}$ of the center. The important fact about the above modifications is that \noindent $Mod( \mathbb{C}^{n+1}_{z, v}, \{v = 0\}, \varphi (\mathbb{C}^l)\times \{v=0\}) \times \mathbb{C}^l$ is biholomorphic to $\mathbb{C}^{n+l+1}$ $\cong Mod (\mathbb{C}^{n+l+1}_{z, u, v}, \{ v=0\}, \varphi(\mathbb{C}^l) \times 0_u \times 0_v)$. The later biholomorphism comes from the fact that there is an automorphism of $\mathbb{C}^{n+l+1}$ leaving the divisor $\{ v= 0\}$ invariant and straightening the center $\varphi(\mathbb{C}^l) \times 0_v$ inside the divisor (see Lemma 2.5. in C^{\infty}te{DK1}). Lets check that this important fact depends holomorphically on the parameter. \begin{lemma}\label{straight} Let $\mathbb{P}hi_1 : \mathbb{C}^k \times X \hookrightarrow \mathbb{C}^k \times \mathbb{C}^n$, $\mathbb{P}hi_1 (w, x) = (w, \varphi_1 (w, x))$ and $\mathbb{P}hi_2 : \mathbb{C}^k \times X \hookrightarrow \mathbb{C}^k \times \mathbb{C}^m$, $\mathbb{P}hi_2 (w, x) = (w, \varphi_2 (w, x))$ be two holomorphic families of proper holomorphic embeddings of a complex space $X$ into $\mathbb{C}^n$ resp. $\mathbb{C}^m$ parametrized by $\mathbb{C}^k$. Then there is an automorphism $\alpha$ of $\mathbb{C}^{n+m}$ parametrized by $\mathbb{C}^k$, i.e. $\alpha \in \operatorname{Aut}_{hol} (\mathbb{C}^k_w \times \mathbb{C}^{n+m}_z)$ with $\alpha (w, z) = (w, \tilde \alpha (w, z))$, such that $\alpha C^{\infty}rc (\mathbb{P}hi_1 \times 0_m) = 0_n \times \mathbb{P}hi_2$. \end{lemma} \begin{proof} By an application of Theorem B the holomorphic map $\varphi_1 : \mathbb{C}^k \times X $ to $ \mathbb{C}^n$ extends to a holomorphic map $\mu_1$ from $\mathbb{C}^k \times \mathbb{C}^m \supset \mathbb{P}hi_2 (\mathbb{C}^k \times X)$ to $\mathbb{C}^n$ (so $\mu_1C^{\infty}rc \varphi_2 = \varphi_1$). Likewise there is a holomorphic map $\mu_2 : \mathbb{C}^k\times \mathbb{C}^n \to \mathbb{C}^m$ with $\mu_2 C^{\infty}rc \varphi_1 = \varphi_2$. Define the parametrized automorphisms $\alpha_1, \alpha_2$ of $\mathbb{C}^k\times \mathbb{C}^n\times \mathbb{C}^m$ by $\alpha_1 (w, z, y) = (w, z, y+ \mu_2 (w, z))$ and $\alpha_2 (w, z, y) = (w, z + \mu_1 (w, y), y)$. Now $\alpha = \alpha_2^{-1} C^{\infty}rc \alpha_1$ is the desired automorphism. \end{proof} \begin{lemma} \label{fam} Let $\mathbb{P}hi : \mathbb{C}^k \times \mathbb{C}^l \hookrightarrow \mathbb{C}^k \times \mathbb{C}^n$ $\mathbb{P}hi (w, \theta) = (w, \varphi (w, \theta))$ be a holomorphic family of proper holomorphic embeddings of $\mathbb{C}^l$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$. Then $Mod( \mathbb{C}^{k+n+1}_{w, z, v}, \{ v=0\}, \mathbb{P}hi (\mathbb{C}^k\times \mathbb{C}^l )\times \{ v=0 \}) \times \mathbb{C}^l \cong \mathbb{C}^{k+n+l+1}$. Moreover there is a biholomorphism such that the restriction to each fixed parameter $w\in \mathbb{C}^k$ is a biholomorphism from $ Mod( \mathbb{C}^{n+1}_{ z, v}, \{v=0\}, \mathbb{P}hi(\{w\}\times \mathbb{C}^l)\times \{v=0\}) \times \mathbb{C}^l \cong \mathbb{C}^{n+l+1}$. \end{lemma} \begin{proof} Apply Lemma \ref{straight} to the families $\mathbb{P}hi_1 = \mathbb{P}hi$ and $\mathbb{P}hi_2$ the trivial family $\mathbb{P}hi_2 : \mathbb{C}^k \times \mathbb{C}^l \hookrightarrow \mathbb{C}^k\times \mathbb{C}^l$ $\mathbb{P}hi_2 (w, \theta) = (w, \theta)$. Let $\alpha \in \operatorname{Aut}_{hol} (\mathbb{C}^k \times \mathbb{C}^n \times \mathbb{C}^l)$ be the resulting parametrized automorphism which we extend to $\mathbb{C}^{k+n+l+1}$ by letting it act trivial on the last coordinate $v$. Then by definition $Mod( \mathbb{C}^{k+n+1}_{w, z, v}, \{ v=0\}, \mathbb{P}hi (\mathbb{C}^k\times \mathbb{C}^l )\times \{ v=0 \}) \times \mathbb{C}^l = Mod( \mathbb{C}^{k+n+l+1}_{w, z,\theta, v}, \{ v=0\}, \mathbb{P}hi (\mathbb{C}^k\times \mathbb{C}^l )\times \{ v=0 \} \times 0_l)$ and applying (the extended) $\alpha$ we get that the later is biholomorphic to $Mod( \mathbb{C}^{k+n+l+1}_{w, z,\theta, v}, \{ v=0\}, \mathbb{C}^k_w \times 0_n \times \mathbb{C}^l_\theta \times \{ v=0 \} )$. The last manifold is obviously biholomorphic to $\mathbb{C}^{k+n+l+1}$ since blowing up along a straight center and deleting the proper transform of a straight divisor does not change the affine space. The above constructed biholomorphism restricts to each fixed parameter as desired since $\alpha$ is a parametrized automorphism. This can be also seen by writing down concrete formulas for the modifications using generators $f_1 (w,z), \ldots, f_N(w, z)$ of the ideal $I_{\primehi(\mathbb{C}^k\times\mathbb{C}^l)}$ in $\mathcal{O} (\mathbb{C}^{k+n})$ and remarking that for each fixed $w \in \mathbb{C}^k$ the functions $f_1 (w,\cdot), \ldots, f_N(w, \cdot )$ generate the ideal $I_{\mathbb{P}hi_w (\mathbb{C}^l)}$. \end{proof} Now we describe the group actions: \noindent Let $f_1 (w,z), \ldots, f_N(w, z)$ be generators of the ideal $I_{\primehi(\mathbb{C}^k\times\mathbb{C}^l)}$ in $\mathcal{O} (\mathbb{C}^{k+n})$ and consider $Mod( \mathbb{C}^{k+n+1}_{w, z, v}, \{ v=0\}, \mathbb{P}hi (\mathbb{C}^k\times \mathbb{C}^l )\times \{ v=0 \}) \times \mathbb{C}^l \cong \mathbb{C}^{k+n+l+1}$ as the affine manifold given by equations: \begin{multline*} \{ (w, z, v, u) \in \mathbb{C}^k\times \mathbb{C}^n\times \mathbb{C}\times \mathbb{C}^N : f_i (w, z) = u_i \ v \quad \forall \ i=1, \ldots, N \} \times \mathbb{C}^l_x \end{multline*} On it we consider the action of $\mathbb{C}^*_\nu$ given by the restriction of the following linear action on the ambient space: \begin{multline} \mathbb{C}^* \times \mathbb{C}^k \times \mathbb{C}^n \times \mathbb{C} \times \mathbb{C}^N \times \mathbb{C}^l \to \mathbb{C}^k \times \mathbb{C}^n \times \mathbb{C} \times \mathbb{C}^N \times \mathbb{C}^l \\(\nu, (w, z, v, u, x)) \mapsto (w, z, \nu^2 v, \nu^{-2} u_1, \ldots ,\nu^{-2} u_N, \nu x_1, \ldots, \nu x_l) \end{multline} This gives by Lemma \ref{fam} a holomorphic family of $\mathbb{C}^*$-actions on $\mathbb{C}^{n+l+1}$ parametrized by $\mathbb{C}^k$, i.e., an action $\mathbb{C}^* \times \mathbb{C}^k \times \mathbb{C}^{n+l+1} \to \mathbb{C}^k \times \mathbb{C}^{n+l+1}$ of the form $(\nu (w, z)) \mapsto (w, \nu (w, z))$. Calculating (as in C^{\infty}te{DK}) the Luna-stratification of the categorical quotient $\mathbb{C}^{n+l+1}/ \hspace{-3 pt} / \mathbb{C}^*$ for the $\mathbb{C}^*$-action for fixed $w$, in particular the inclusion of the fixed point stratum in the $\mathbb{Z}/2\mathbb{Z}$-isotropy stratum one sees that this inclusion is biholomorphic to $\mathbb{P}hi_w (\mathbb{C}^l) \subset \mathbb{C}^n$. Thus if for different parameters $w_1 \ne w_2$ there were an equivariant automorphism $\alpha \in \operatorname{Aut}_{hol} (\mathbb{C}^n)$ the induced isomorphism of the categorical quotients would map the Luna-stratifications onto each other. Therefore the restriction of that induced isomorphism to the $\mathbb{Z}/2\mathbb{Z}$-isotropy stratum would give an automorphism $\beta$ of $\mathbb{C}^n$ with $\beta (\mathbb{P}hi_{w_1} (\mathbb{C}^l)) = \mathbb{P}hi_{w_2} (\mathbb{C}^l)$. This shows that pairwise non-equivalent embeddings lead to non-equivalent $\mathbb{C}^*$-actions. Combining this with Theorem \ref{main} (embeddings of $\mathbb{C}$ into $\mathbb{C}^n$ parametrized by $\mathbb{C}^{n-2}$ for $n\ge 3$) we have proved Theorem \ref{action1} from the introduction. In the same way Theorem \ref{action2} from the introduction follows from Proposition \ref{standard}. It's an easy exercise that a straight embedding leads to a linear action. \section{Concluding remarks}\label{concluding} Carefully examining the proof of Theorem \ref{main} and the proofs of the technical results from section \ref{technik} one sees that there is no place where we use the fact that the parameter space is affine space $\mathbb{C}^k$. What we use of the parameter space is a graduation (in the proof of the parametrized Anders\'en-Lempert-theorem) so say an affine algebraic variety would do the job. Most important is the dimension condition $\dim X + \dim (\rm {parameter space}) < n$ (here dimension is always dimension of the smooth part). So in fact we construct families parametrized by any space of the right dimension. The authors wonder whether there is any nice structure on the set of all equivalence classes of proper holomorphic embeddings say of $\mathbb{C}^l$ into $\mathbb{C}^n$ and how "big" is this set? Our construction of embeddings used two techniques, the growth restrictions which worked well for embedding manifolds with a "small" automorphism group, namely a Lie group, and the Eisenman hyperbolicity for crossing the situation with affine space. Combining this we got families of embeddings of affine spaces. What about the number of equivalence classes of proper holomorphic embeddings of other manifolds with infinite-dimensional automorphism groups, e.g. manifolds with the density property, into affine spaces? A concrete question in this direction would be: {\em How many embeddings of a Danielevski surface $f(X) = u v$ into affine spaces do there exist?} It's known that there exist at least two algebraic embeddings of the Danielevski surface $p(y) = u^n v$ (degree of $p$ is at least 2) into $\mathbb{C}^3$ which are algebraically non-equivalent, i.e. there is no algebraic automorphism of $\mathbb{C}^3$ mapping one image onto the other C^{\infty}te{FrMo}. In the same paper Freudenburg and Moser show that the constructed embeddings are holomorphically isomorphic using the linearization results of Heinzner and the first author C^{\infty}te{HK}. On the other hand there is a non-standard holomorphic embedding of the Danielevski surface into $\mathbb{C}^3$, which follows from the ideas of Rosay and Rudin C^{\infty}te{RR1}: \begin{proposition} Any algebraic subvariety $A$ in $\mathbb{C}^n$ ($n\ge2$) admits another holomorphic embedding into $\mathbb{C}^n$ not isomorphic to the inclusion. \end{proposition} \begin{proof} The restriction of a generic projection onto a hyperplane to $A$ is a proper map. Thus by the results in C^{\infty}te{RR1} any discrete sequence of points in $A$ is tame (in fact very tame). Now there is a holomorphic embedding $\varphi$ of $A$ into $\mathbb{C}^n$ (constructed by applying a sequence of automorphisms to the inclusion) such that $\varphi (A)$ contains a non tame set $F$ (details as in C^{\infty}te{FGR}). The existence of a holomorphic automorphism mapping $A$ onto $\varphi (A)$ contradicts the non tameness of $F$. \end{proof} It would be interesting to know under which conditions parametrized points (by any parameter space and in any category, continuous, holomorphic differentiable, algebraic) are simultanuously standardizable. \section{APPENDIX: proofs of technical preparations}\label{prooflemmas} In this section we give the proofs of the lemmas etc. used in the proof of the main theorem. \subsection{A parametrized version of the Andersén-Lempert theorem} Our main technique we use to construct families of embeddings are (compositions of) automorphisms of $\mathbb{C}^n$. The ground-breaking papers of Anders\' en and Lempert (C^{\infty}te{A}, C^{\infty}te{AL}) established remarkable properties of the automorphism group of $\mathbb{C}^n$ $(n \ge 2 )$ which imply, in particular, that any local holomorphic phase flow on a Runge domain $\Omega$ in $\mathbb{C}^n$ can be approximated by global holomorphic automorphisms of $\mathbb{C}^n$ (for an exact statement see Theorem 2.1 in C^{\infty}te{FR}). We will give here a parametrized version of the so called Andersén-Lempert-theorem and in addition we consider the following two geometric structures: that of vector fields vanishing on the first $N$ standard points in $\mathbb{C}^n$, and that of vector fields vanishing on the first coordinate axis. Since the parametric version is an easy consequence of the non-parametric version and the fixing of the first $N$-standard points is a special case of Theorem 6 in C^{\infty}te{KaKu} we just give a small indication of the proof. \begin{theorem}[Andersén-Lempert-theorem with parameter and fixing finitely many points]\label{alwpfc} Let $\Omega$ be an open set in $\mathbb{C}^k\times \mathbb{C}^n$ ($n\geq 2$) and let $(w,z_j)=(w,j,0,\ldots ,0)\in \Omega$, $j=1,\ldots ,N$. For every $t\in [ 0,1]$ let $\mathbb{P}hi_t$ be a biholomorphic map from $\Omega$ into $\mathbb{C}^k\times \mathbb{C}^n$, which is of the form $$\mathbb{P}hi_t(w,z)=(w,\varphi_t(w,z)), \quad z\in \mathbb{C}^n,w\in \mathbb{C}^k$$ such that $\mathbb{P}hi_t(w,z_j)=(w,z_j)\ \forall w\in \mathbb{C}^k$ (resp. $\mathbb{P}hi_t(w,z_1,0,\ldots, 0)=(w,z_1,0, \ldots, 0)\ \forall z_1 \in \mathbb{C} \ \forall w\in \mathbb{C}^k$) and such that it is of class $C^2$ in $(t,z,w)\in [0,1]\times \Omega$. Assume that each domain $\mathbb{P}hi_t(\Omega )$ is Runge in $\mathbb{C}^k\times \mathbb{C}^n$. If $\mathbb{P}hi_0$ can be approximated on $\Omega $ by holomorphic automorphisms of $\mathbb{C}^n$ depending on the parameter $w\in \mathbb{C}^k$, fixing $(w,z_j)$ for every $w\in \mathbb{C}^k$ (resp. fixing $(w,z_1,0, \ldots, 0)\ \forall z_1 \in \mathbb{C} \ \forall w\in \mathbb{C}^k$) then for every $t\in [0,1]$ the map $\mathbb{P}hi_t$ can be approximated on $\Omega$ by holomorphic automorphisms $\alpha$ of $\mathbb{C}^n$ depending on the parameter $w\in \mathbb{C}^k$ such that $\alpha (w,z_j)=(w,z_j) \ \forall w\in \mathbb{C}^k$ (resp. $\alpha (w,z_1,0,\ldots, 0)=(w,z_1,0, \ldots, 0)\ \forall z_1 \in \mathbb{C} \ \forall w\in \mathbb{C}^k$). \end{theorem} To indicate the proof we just remark that the above theorem follows by standard techniques from the following version of the \noindent {\bf Anders\'en-Lempert-observation:} \textit{ Every polynomial vector field on $\mathbb{C}^{k+n}$ ($n\geq 2$) of the form \begin{multline*} X=p_1(w_1,\ldots ,w_k,z_1,z_2,\ldots ,z_n)\frac{\primeartial}{\primeartial z_1}+ \ldots +\\ + p_n(w_1,\ldots ,w_k,z_1,z_2,\ldots ,z_n)\frac{\primeartial}{\primeartial z_n}. \end{multline*} vanishing at the first $N$ standard points, i.e. with $$p_i ( w,z_j) = 0 \ \forall i = 1, \ldots , n \ \forall j= 1, \ldots , N ,$$ is a finite Lie combination of completely integrable polynomial vector fields of the above form vanishing at the first $N$ standard points. The same holds if we consider polynomial vector fields vanishing on the first coordinate line instead.} To prove this observation we develop $X$ by powers of $w$ $$ X = \sum_\alpha w^\alpha X_\alpha $$ and remark that the polynomial vector fields $X_\alpha$ on $\mathbb{C}^n$ vanish at the first $N$ standard points (resp. on the first coordinate line). By Theorem 6 in C^{\infty}te{KaKu} (the union of the first $N$ standard points is an algebraic subset of $\mathbb{C}^n$ of codimension at least 2) (resp. by Theorem 5.1 in C^{\infty}te{V}) they can be written as a finite Lie combination of globally integrable polynomial fields on $\mathbb{C}^n$ vanishing on the first $N$ standard points (resp. on the first coordinate line) say $\theta^i_\alpha$ $i= 1, \ldots, N(\alpha)$. The same Lie combination with $\theta^1_\alpha$ replaced by $w^\alpha \theta^1_\alpha$ (which is still globally integrable on $\mathbb{C}^{n+k}$, on each orbit the factor $w^\alpha$ is a constant) yields $w^\alpha X_\alpha$ as a Lie combination of globally integrable fields. Summing up over the multiindex $\alpha$ we get the desired result. \subsection{Families of generic finite sets with respect to affine automorphisms} As already mentioned we will use growth restrictions to prove that the embeddings for different parameters are not equivalent. The conclusion of these growth conditions will be the following: If two different embeddings in our family are equivalent, then their images can be mapped onto each other only by some affine automorphism of $\mathbb{C}^n$. Although it is very unlikely that the images of two different and more or less complicated embeddings can be mapped onto each other by affine automorphisms, we must be accurate in excluding this possibility. Here are some technical preparations to this point. If we choose $n+1$ points $x_1,x_2,\ldots ,x_{n+1}$ such that the difference vectors $x_1-x_i$, $i=2,3,\ldots ,n+1$ form a basis of $\mathbb{C}^n$, i.e. $x_1,x_2,\ldots ,x_{n+1}$ do not all lie on some affine hyperplane, then with a little linear algebra we reach the following conclusion: \begin{lemma}\label{pointlemma} For each $m\geq n+2$ there exist $m$ points $x_1,x_2,\ldots ,x_m\in \mathbb{C}^n$ with the following property: No affine automorphism $\alpha \in \rm{Aff}(\mathbb{C}^n )$ of $\mathbb{C}^n$ can map $n+2$ of them into the set $\{ x_1,x_2,\ldots ,x_m\}$. \end{lemma} \begin{notation} Given any open subset $\Omega$ of $\mathbb{C}^n$ the points $x_1,x_2,\ldots ,x_m$ can be chosen to be contained in $\Omega$. \end{notation} Let $\delta_{ij}$ denote the diagonal $$\delta_{ij}=\{ (z_1,z_2,\ldots , z_N)\in (\mathbb{C}^n)^N:z_i=z_j\}$$ and $(\underbrace{\mathbb{C}^n\times \cdots \times \mathbb{C}^n}_N\setminus \bigcup_{1\leq i<j\leq N}\delta_{ij})/S_N$ is the quotient (manifold since we have excluded all diagonals) by the action of the symmetric group $S_N$ in $N$ letters acting by permuting the entries on $N$-tuples of points in $\mathbb{C}^n$. The corresponding map is denoted by $\primei$. \begin{lemma}\label{nnklemma} Let $n\geq 2$ and $k>0$ be natural numbers. Then there exists some $N\in \mathbb{N}$ such there is an injective holomorphic map $$\varphi :\mathbb{C}^k \to \underbrace{\mathbb{C}^n\times \cdots \times \mathbb{C}^n}_N\setminus \bigcup_{1\leq i<j\leq N}\delta_{ij}$$ such that the composition map $$\primei C^{\infty}rc \varphi :\mathbb{C}^k \to (\underbrace{\mathbb{C}^n\times \cdots \times \mathbb{C}^n}_N\setminus \bigcup_{1\leq i<j\leq N}\delta_{ij})/S_N$$ is injective. Moreover if $\varphi =(\varphi_1,\varphi_2,\ldots ,\varphi_N)$ then $\mathbb{C}^n\setminus \cup^N_{i=1}\varphi_i(\mathbb{C}^k)$ contains some nonempty open subset. \end{lemma} \begin{proof} The desired number $N$ will be any number such that $n\cdot N\geq k$. Namely we will prove that there is a biholomorphic image $\Omega$ of $(\mathbb{C}^n)^N$ in $(\mathbb{C}^n)^N$ (a so called Fatou-Bieberbach domain) which does not intersect any of the diagonals $\delta_{ij}$ and so that the restriction of the quotient map $\primei :(\mathbb{C}^n)^N \to ((\mathbb{C}^n)^N)/S_N$ onto $\Omega$ is injective, i.e. if the point $(z_1,z_2,\ldots ,z_N)$ is in $\Omega$ then for any permutation $\sigma \in S_N\setminus \textrm{Id}$ the point $(z_{\sigma (1)},z_{\sigma (2)},\ldots , z_{\sigma (N)})$ is not contained in $\Omega$. For this start with countably many pairwise disjoint Fatou-Bieber\-bach domains $\Omega_1,\Omega_2,\ldots ,\Omega_N,\ldots$ in $\mathbb{C}^n$. Such domains exist, see for example C^{\infty}te{EFW}, where countably many pairwise disjoint Fatou-Bieberbach domains are constructed arising as basins of attraction of some automorphism of $\mathbb{C}^n$ having countably many attracting fixed points. Now take $N$ of them and denote by $\primesi_i$ some biholomorphic maps $\primesi_i:\mathbb{C}^n\to \Omega_i\subset \mathbb{C}^n\quad i=1,2,\ldots ,N$. The map $$\primesi :(\mathbb{C}^n)^N\to (\mathbb{C}^n)^N, \primesi (z_1,z_2,\ldots ,z_N)=(\primesi_1(z_1),\primesi_2(z_2), \ldots ,\primesi_N(z_N))$$ is injective, its image is the Fatou-Bieberbach domain $\Omega_1\times \Omega_2\times \cdots \times \Omega_N$ in $(\mathbb{C}^n)^N$, which does not intersect any diagonal since the $\Omega_i$'s are pairwise disjoint and for the same reason $\Omega \cap \sigma (\Omega )=\emptyset$ for any permutation $\sigma \in S_N\setminus \textrm{Id}$. The complement $\mathbb{C}^n\setminus \cup_{i=1}^N(\mathbb{C}^k)$ contains the union $\cup_{i=N+1}^{\infty}\Omega_i$ of all remaining Fatou-Bieberbach domains, hence a non-empty subset. For $n\cdot N\geq k$ we can choose some injective holomorphic map $\alpha :\mathbb{C}^k \mapsto (\mathbb{C}^n)^N$ and put $\varphi :=\primesi C^{\infty}rc \alpha$, which is the desired map. \end{proof} Now we are able to prove Proposition \ref{pointprop}. \begin{proof}[Proof of Proposition \ref{pointprop}] By Lemma \ref{nnklemma} we can find $N$ (for $n\cdot N\geq k$) (pairwise different) points $\xi_1,\xi_2, \ldots ,\xi_N$ in $\mathbb{C}^n$ parametrized by $\mathbb{C}^k$ such that for different parameters $w_1\neq w_2\in \mathbb{C}^k$ the set of points $\{ \xi_1(w_1),\xi_2(w_1),\ldots ,\xi_m(w_1)\}$ and $\{ \xi_1(w_2),\xi_2(w_2),\ldots ,\xi_m(w_2)\}$ are different. Choose $M\in \mathbb{N}$ such that $M-N\geq n+2$. By Lemma \ref{pointlemma} we can find $M$ points $x_1,x_2,\ldots ,x_M$ in $\mathbb{C}^n$ such that no affine automorphism except the identity can map $n+2$ of them into the set $\{ x_1,x_2,\ldots ,x_M\}$. Choose the next parametrized points $\xi_{N+1},\xi_{N+2},\ldots ,\xi_{N+M}:\mathbb{C}^k\to \mathbb{C}^n$ to be constant $$\xi_{N+i}(w)=x_i\quad \textrm{ for every } w\in \mathbb{C}^k \quad i=1,2,\ldots ,M.$$ To make the parametrized points $\xi_1,\xi_2,\ldots ,\xi_{M+N}$ pairwise different (for any fixed parameter) we choose the points $x_1,x_2,\ldots ,x_M$ from some open subset in the complement of all images $\xi_j(\mathbb{C}^k)\subset \mathbb{C}^n\quad j=1,2,\ldots ,N$. We claim that $\{ \xi_1(w),\xi_2(w),\ldots ,\xi_{N+M}(w)\}$ satisfy our condition (so $m=N+M$). Indeed suppose that for two different parameters $w_1\neq w_2\in \mathbb{C}^k$ there is an affine automorphism $\alpha \in \mathrm{Aff}(\mathbb{C}^n)$ which map the set of points $\{ \xi_1(w_1),\xi_2(w_1),\ldots ,\xi_{N+M}(w_1)\}$ onto the set of points $\{ \xi_1(w_2),\xi_2(w_2),\ldots ,\xi_{N+M}(w_2)\}$. Since $M-N\geq n+2$ at least $n+2$ of the last $M$ points, ($x_1,x_2,\ldots ,x_M$), are mapped by $\alpha$ into $\{ x_1,x_2,\ldots ,x_M\}$, and note that at most $N$ points among them can be mapped onto the first $N$ points! By the choice of $x_1,x_2,\ldots ,x_M$ according to Lemma \ref{pointlemma} this implies that $\alpha$ is the identity map. But this means that the identity maps the last $M$ (constant) points onto themselves, hence the points $\{ \xi_1(w_1),\xi_2(w_1),\ldots ,\xi_N (w_1)\}$ onto the points $\{ \xi_1(w_2),\xi_2(w_2),\ldots ,\xi_N(w_2)\}$, which is impossible since those sets are different by Lemma \ref{nnklemma}. Thus no such affine automorphism $\alpha$ exists. \end{proof} \subsection{Moving finitely many parametrized points.} Recall that we needed the notion of simultaneously standardazable points in the proof of the main theorem. Given $N$ parametrized points $\zeta_i:\mathbb{C}^k\to C^n$. If we can find an automorphism $\primesi \in \operatorname{Aut}_{hol}^k(\mathbb{C}^n)$ such that $$\primesi (w,\zeta_i(w))=(w,(i,0)) \quad \textrm{for all } i=1,2,\ldots ,N \quad \textrm{and for all } w\in \mathbb{C}^k ,$$ we say that the points are simultaneously standardazable. The following theorem is a special case of the Oka-Grauert-Gromov-h principle in complex analysis. Even if our application would fit in the classical context proved by Grauert C^{\infty}te{Gr1}, C^{\infty}te{Gr2}, C^{\infty}te{Gr3} (our transition functions are affine linear, i.e. contained in a complex Lie group) we formulate it in a more general (but not too general in order to avoid the discussion of sprays) way. For reference see C^{\infty}te{FF2} , section 2.3 in C^{\infty}te{Gro} or Theorem 1.4 in C^{\infty}te{FP2}. \begin{theorem}[Oka principle with approximation]\label{oka} Let $X$ be a Stein manifold and let $Z$ be a locally trivial bundle such that the fiber $Z_x$ is isomorphic to $\mathbb{C}^n$. If $s : X\to Z$ is a continuous section which is holomorphic in a neighborhood of an $\mathcal{O} (X)$-convex compact subset $K$ then there exists a holomorphic section $\tilde s: X\to Z$ such that $\tilde s$ approximates $s$ uniformly on $K$. \end{theorem} \begin{example} Let $q_1(w),q_2(w),\ldots ,q_n(w):\mathbb{C}^k \to \mathbb{C}$ be holomorphic functions without common zeros. We want to find holomorphic functions $h_1(w),h_2(w),\ldots ,h_n(w) :\mathbb{C}^k \to \mathbb{C}$ such that \begin{equation*} h_1(w)q_1(w)+h_2(w)q_2(w)+\ldots +h_n(w)q_n(w)=1 \end{equation*} for every $w\in \mathbb{C}^k$, i.e. find a point $(h_1(w),h_2(w),\ldots ,h_n(w))$ in the hyperplane given by \begin{equation}\label{corona} x_1q_1(w)+x_2q_2(w)+\ldots +x_nq_n(w)=1. \end{equation} Since $q_1(w),q_2(w),\ldots ,q_n(w)$ do not have common zeros, we can find local solutions: If (e.g.) $q_1(w)\neq 0$ then we can solve the problem in a neighborhood of $w_0$ by setting $h_2(w)=\ldots =h_n(w)=1$ and $$h_1(w)=\frac{1-(h_2(w)q_2(w)+\ldots +h_n(w)q_n(w))}{q_1(w)}.$$ Let $Z$ be the (locally trivial with affine linear transition functions) bundle over $\mathbb{C}^k$ such that the fiber $Z_w$ is the hyperplane \eqref{corona} in $\mathbb{C}^n$. Given a holomorphically convex compact set $J\in \mathbb{C}^k$ and a holomorphic section $s$ of the bundle over a neighborhood of $J$ we can, by standard arguments in obstruction theory (all homotopy groups of the fiber vanish), extend it to a continuous section $s : \mathbb{C}^k\to Z$. Theorem \ref{oka} gives a holomorphic section $\tilde s : \mathbb{C}^k\to Z$ which approximates $s$ uniformly on the compact $J$. \end{example} \begin{lemma}\label{te1.1} Given a holomorphic map $\xi =(\xi_1,\ldots ,\xi_n):\mathbb{C}^k\to \mathbb{C}^n$, ($n\geq 2$), always disjoint from the first $N$ standard points, $\xi (w)\notin \cup_{i=1}^N\{ (i,0,\ldots ,0)\},$ and such that the functions $\xi_2,\ldots ,\xi_n \in \mathcal{O} (\mathbb{C}^k)$ have no common zero on $\mathbb{C}^k$. Then there exist $\alpha \in \rm{Aut}^k_{hol}(\mathbb{C}^n)$ fixing the first $N$ standard points, $\alpha (w,i,0,\ldots ,0)=(w,i,0,\ldots ,0)$ with $\alpha (w,\xi (w))=(w,z_0)$, i.e. $\alpha (w,\xi (w))$ is a constant point $(w,z_0)$ for every $w\in \mathbb{C}^k$. Moreover given a ball $J=r\overline \mathbb{B}_k \subset \mathbb{C}^k$ and a number $R>N$ such that for $w\in J$ hold: $| \xi (w)|>R$. Then for any $\epsilon >0$ the automorphism $\alpha$ can be chosen in such a way that $$\max_{w\in J, |z|\leq R}|\alpha (w,z)-(w,z)| <\epsilon .$$ \end{lemma} \begin{proof} The first step consists in an application of Theorem \ref{alwpfc} with fixing the first coordinate line to bring the points $\xi (w)$, $w\in J$, arbitrarily nearby to a constant position. To apply the theorem let $\Omega = \bigcup_{w\in r'\mathbb{B}_k}\{ w\} \times \{ R'\mathbb{B}_n \cup \epsilon_1\mathbb{B}_n (\xi (w))\}$, where $\mathbb{B}_n(\xi (w))$ is the unit ball in $\mathbb{C}^n$ with center in $\xi (w)$, with $r',R'$ slightly bigger than $r,R$ and $\epsilon_1$ sufficiently small so that $\epsilon_1\mathbb{B}_n (\xi (w))$ has empty intersection with the first coordinate line for all $w\in r'\mathbb{B}_k$. Note that $\Omega$ is Runge in $\mathbb{C}^k\times \mathbb{C}^n$. Approximating the map $\mathbb{P}hi_t:[ 0,1] \times \Omega \to \mathbb{C}^k\times \mathbb{C}^n$ defined by $\mathbb{P}hi_t(w,z)=(w,z)$ for every $w\in r'\mathbb{B}_k$, $z\in R'\mathbb{B}_n$ and $\mathbb{P}hi_t(w,z)=(w,\xi ((1-t) w)+z-\xi (w))$ for $w\in r'\mathbb{B}_k , z\in \epsilon_1 \mathbb{B}_n (\xi (w))$, gives an $\alpha_1 \in \rm{Aut}^k_{hol}(\mathbb{C}^n)$: \begin{multline*} \alpha_1 (w,(z_1,0,\ldots ,0))=(w,(z_1,0,\ldots ,0)) \ \forall z_1 \in \mathbb{C} \ \forall w\in \mathbb{C}^k\\ |\alpha_1 (w,z)-(w,z)|_{w\in J, z\in R\mathbb{B}_n }<\epsilon \\ |\alpha_1 (w,\xi (w))- (w,\xi (0))| < \epsilon \quad \forall w \in J \end{multline*} where $\epsilon$ is arbitrarily small. Remark that the last $n-1$ coordinate functions of $\alpha_1 (w, \xi (w))$ have no common zero on $\mathbb{C}^k$, since by assumption the same was true for $\xi (w)$ and the first coordinate line is fixed by $\alpha_1$. A second application of Theorem \ref{alwpfc} again with fixing the first coordinate line using a $C^2$-path from $\xi (0)$ to the point $(2R,1,0,\ldots ,0)$ not intersecting the first coordinate line and not intersecting $R'\mathbb{B}_n$ shows that we in addition can assume $$|\alpha_1 (w,\xi (w))-(2R,1,0, \ldots ,0)|<\epsilon$$ for $w\in J$. Denote the coordinate functions of $\alpha_1 (w, \xi (w))$ by $(\Check q_1(w),\Check q_2(w),\ldots ,$ $\Check q_n(w))$ and observe that $\Check q_2(w),\ldots , \Check q_n(w)$ have no common zeros for $w\in \mathbb{C}^k$. Now define functions $\hat h_i\in \mathcal{O} (J)$ by $$\hat h_3(w)=\ldots =\hat h_n(w)=1$$ and $$\hat h_2(w)=\frac{1-\Check q_3(w)\hat h_3(w)-\ldots -\Check q_n(w)\hat h_n(w)}{\Check q_2(w)}$$ for $w\in J$. Note that $\hat h_2(w)\approx 1$ for $w\in J$. By Theorem \ref{oka} (see example thereafter) we have that for every $\epsilon>0$ there exist $h_i\in \mathcal{O} (\mathbb{C}^k)$ with $$\sum_{i=2}^nh_i(w)\Check q_i(w)=1, \forall w\in \mathbb{C}^k$$ and $$\| h_i(w)-\hat h_i(w)\| _J<\epsilon ,$$ which implies $h_i(w)\approx 1$ for $w\in J$ for all $i=2, \ldots ,n$. Define an automorphism $\alpha_2\in \rm{Aut}^k_{hol}(\mathbb{C}^n)$ by \begin{multline*} (w,z)\mapsto \\ (w, z_1+(2R-\Check q_1(w))[z_2h_2(w)+z_3h_3(w)+\ldots z_nh_n(w)],z_2,z_3,\ldots , z_n).$$ \end{multline*} It holds: $$\alpha_2(w,\Check q(w))=(w,2R,\Check q_2(w), \ldots ,\Check q_n(w))$$ and $$\alpha_2(w, (i,0,\ldots ,0))=(w, (i,0,\ldots ,0))$$ The next step is to construct an automorphism $\alpha_3\in \rm{Aut}^k_{hol}(\mathbb{C}^n)$ that moves $\alpha_2(w,\Check q(w))$ to $(w,2M,0,\ldots ,0)$. So define a polynomial on $\mathbb{C}$ $$Q(t)=\left( (t-1)(t-2)\cdots (t-N)\frac 1{(2R-1)(2R-2)\cdots (2R-N)} \right)^H,$$ where $H$ is so large that $|t|<R$ implies $|Q(t)|< \epsilon$, and define the automorphism $\alpha_3$ by \begin{multline*} (w,z)\mapsto \\ (w,z_1,z_2-Q(z_1)\Check q_2(w), z_3-Q(z_1)\Check q_3(w),\ldots ,z_n-Q(z_1)\Check q_n(w)). \end{multline*} From this we get $$\alpha_3 (w,i,0,\ldots ,0)=(w,i,0,\ldots ,0)$$ for $i=1,\ldots N,$ $$\alpha_3C^{\infty}rc \alpha_2(w,\Check q(w))=(w,2R,0,\ldots ,0)$$ and it is easy to check that $$\max_{w\in J, |z|\leq R} |\alpha_3 C^{\infty}rc \alpha_2C^{\infty}rc \alpha_1(w,z)|$$ is arbitrarily small. The composition $\alpha =\alpha_3 C^{\infty}rc \alpha_2 C^{\infty}rc \alpha_1$ is our desired automorphism. \end{proof} \begin{notation} The reason that we bring the points $\{ \xi (w), w\in J \}$ first near to $(2R,1,0,\ldots ,0)$ (instead of $(2R,0,\ldots ,0)$ directly) and make them afterwards constant at the point $(2R,0,\ldots ,0)$ is the following: Our method could lead to a big movement of $R\mathbb{B}_n$, as we see in the following example. \end{notation} \begin{example}\label{bigmotion} Let $K=K_1 \times \bar \mathcal{D}elta^n$ where $\bar \mathcal{D}elta^n$ is the closure of the unit polydisc in $\mathbb{C}^n$ and $K_1$ some compact set in the parameter space $\mathbb{C}^k$. We will consider the following perturbation of the first $N$ standard points: We suppose that the first $N-1$ points remains at their standard positions, $$\zeta_i(w)=(i,0,\ldots ,0)\quad i=1,2,\ldots ,N-1$$ and the $N$-th point is moved by some very small amount from the standard position $$\zeta_n(w)=(2R-\epsilon ,\epsilon^2,\ldots ,\epsilon^2).$$ According to the proof of Lemma \ref{te1.1} we have to find holomorphic functions $h_2(w),h_3(w), \ldots ,h_n(w)$ with $\sum_{i=2}^Nh_i(w)\epsilon^2=-2R+\epsilon +2R=\epsilon$, i.e. $\sum_{i=2}^Nh_i(w)=1/\epsilon$. Therefore the automorphism $\alpha_2\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ defined by $$(w,z_1,z_2,\ldots ,z_n)\mapsto (w,z_1+z_2h_2(w)+\ldots +z_nh_n(w),z_2,z_3,\ldots ,z_n)$$ moves for instance all points of the form $(0,z_1,1,\ldots ,1)\in K_1 \times \bar \mathcal{D}elta ^n$ by the vector $(0,1/\epsilon ,0, \ldots ,0)$, i.e. $(0,z_1,1,\ldots ,1)\mapsto (0,z_1+1/\epsilon, 1,\ldots ,1)$, which has length going to infinity when our perturbation of the $N$-th point is going to zero. \end{example} From Lemma \ref{te1.1} we get Proposition \ref{standp} \begin{proof}[Proof of Proposition \ref{standp}] Proceed by induction over the number $N$ of points: For $N=1$ the (parametrized translation) automorphism defined by $(w,z)\mapsto (w,z-\zeta_1(w)+1)$ solves the problem in general, i.e. without any assumption on the dimension $k$ of the parameter space. Suppose the problem is solved for $N-1$ parametrized points. To solve it for $N$ points take by induction assumption an automorphism $\alpha_1 \in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ which moves the first $N-1$ points to their standard places $$\alpha_1(w,\zeta_i(w))=(w,(i,0))\textrm{ for all } i=1,2,\ldots ,N-1 \textrm{ and for all } w\in \mathbb{C}^k.$$ It is not difficult to find an automorphism $\alpha_2\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ which fixes the first $N-1$ standard points, $\alpha_2(w,(i,0))=(w,(i,0))$ for all $i=1,2,\ldots ,N-1$ and for all $w\in \mathbb{C}^k$, such that the submanifold of $\mathbb{C}^{k+n}$ described by the (moved) last parametrized point $$U:=\{ (w,\alpha_2 C^{\infty}rc \alpha_1C^{\infty}rc \zeta_N(w)):w\in \mathbb{C}^k\} \subset \mathbb{C}^{k+n}$$ is transversal to the (parametrized) $z_1$-axis $$V:=\{ (w,(z_1,z_2,\ldots ,z_n))\in \mathbb{C}^{k+n}:z_2=z_3=\ldots =z_n=0\} .$$ Because of the dimension assumption this means that the two submanifolds $U$ and $V$ do not meet ($\dim U +\dim V=k+(k+1)<n+k=\dim \mathbb{C}^{k+n}$). In other words we are in the position of Lemma \ref{te1.1} and find an automorphism $\alpha_3\in \mathrm{Aut}^k_{hol}(\mathbb{C}^n)$ fixing the first $N-1$ standard points and moving $(w,\alpha_2 C^{\infty}rc \alpha_1C^{\infty}rc \zeta_N(w))$ to its standard place. The composition $\alpha_3 C^{\infty}rc \alpha_2 C^{\infty}rc \alpha_1$ is the desired automorphism moving all $N$ parametrized points into their standard positions. \end{proof} We are now set to prove the interpolation lemma, Lemma \ref{FLYTTLEMMAT}. \begin{proof}[proof of Lemma \ref{FLYTTLEMMAT}] Since by the dimension assumption $\dim X + k <n$ the points $b_i(w)$ are simultaneously standardizable(see Corollary \ref{standp}), we can find $\alpha_1\in \rm{Aut}^k_{hol}(\mathbb{C}^n)$ such that $\alpha_1(w,b_i(w))=(w,(i,0,\ldots ,0))$ for every $w\in \mathbb{C}^k$. Choose $R>0$ such that $\primei_2(\alpha_1(K))\subset R\mathbb{B}_n$ and choose $M$ such that $\alpha_1^{-1}(R\mathbb{B}_n )\subset M\mathbb{B}_n$. Let $C$ be a positive real constant such that the point $\tilde p(w)=Cp(w)$ is outside of the ball of radius $M$ for every $w\in \overline \mathcal{D}elta$. By transversality and our dimension condition \eqref{dim} we can assume that an arbitrary small perturbation $\gamma_p$ of the path $p(w)+t(\tilde p(w)-p(w))$, $t\in [0,1]$, does not intersect $\primei_2(\primehi ( \overline \mathcal{D}elta \times \overline X_{R}))$ for any $w\in \overline \mathcal{D}elta$. Construct $\tilde q(w)$ and a path $\gamma_q$, in the same way with the additional demand that the path for $q$ does not intersect the path for $p$. By the Andersén-Lempert-theorem with parameters and fixing standard points (see Theorem \ref{alwpfc}) applied to the set $\alpha_1(K)$ union with a neighborhood of the paths $\alpha_1(\gamma_p)$ and $\alpha_1 (\gamma_q)$ there exists an $\alpha_2\in$ $ \rm{Aut}^k_{hol}(\mathbb{C}^n)$ such that $\alpha_2(w,z)$ is close to the identity for $(w,z)\in K$ and $|\primei_2(\alpha_2C^{\infty}rc \alpha_1(w,q(w)))|>R$ for every $w\in \overline \mathcal{D}elta$. Furthermore we have $\alpha_2C^{\infty}rc \alpha_1(w,b_i(w))=(w,(i,0,\ldots ,0))$ for every $w\in \mathbb{C}^k$. Moreover $\alpha_2C^{\infty}rc \alpha_1(K)\subset R\mathbb{B}_n$. To be able to apply Lemma \ref{te1.1} we perturb $\alpha_2C^{\infty}rc \alpha_1(w,p(w))$ slightly to make sure that the last $n-1$ coordinate functions have no common zero on $\mathbb{C}^k$, at the same time fixing the points $(w,(i,0,\ldots ,0))$, $i=1,\ldots ,N$. This is possible by transversality and our dimension assumption \eqref{dim}, i.e. $\dim X +k<n$. Now an application of Lemma \ref{te1.1} gives an automorphism $\alpha_3 $ arbitrarily close to identity on $\overline \mathcal{D}elta \times R \mathbb{B}_n$ fixing the first $N$ standard points, such that $\alpha_3 C^{\infty}rc \alpha_2C^{\infty}rc \alpha_1(w,p(w))=(w,2R,1,0,\ldots ,0)$ for every $w\in \mathbb{C}^k$. By another application of the same lemma, we can in addition assume that $\alpha_3 C^{\infty}rc \alpha_2C^{\infty}rc \alpha_1(w,q(w))=(w,2R,2,0,\ldots ,0)$ for every $w\in \mathbb{C}^k$. Finally we take a polynomial $\tilde P(z_1)$ such that $\tilde P(2R)=1$, $| \tilde P(z_1)|< \epsilon $ for $z_1\in (R+1)\overline \mathbb{B}_1$ and $\tilde P(z_1)=0$ to order $l$, for $z_1\in \{ 1,\ldots N\}$. Then define the automorphism $$\alpha_4(w,z)=(w,z_1,z_2+ \tilde P(z_1),z_3, \ldots, z_n).$$ Consequently $$\alpha_4 (w,i,0,\ldots ,0)=(w,i,0,\ldots ,0)$$ to prescribed order for $i=1,\ldots N$ and $\alpha_4 C^{\infty}rc \alpha_3 C^{\infty}rc \alpha_2 C^{\infty}rc \alpha_1(w,p(w))=\alpha_3 C^{\infty}rc \alpha_2 C^{\infty}rc \alpha_1(w,q(w))$. In total, the automorphism $$\alpha =\alpha_1^{-1}C^{\infty}rc \alpha_2^{-1}C^{\infty}rc \alpha_3^{-1}C^{\infty}rc \alpha_4 C^{\infty}rc \alpha_3 C^{\infty}rc \alpha_2 C^{\infty}rc \alpha_1,$$ will have the properties stated in the lemma. \end{proof} \subsection{Proof of osculation lemma} Remember that we will mark a finite number of points. The points will be used to exclude affine automorphisms in the main theorem. Before we come to this point, a standard jet interpolation lemma in a parametrized form is established. This form is quite easy, since we do assume that the linear part of each prescribed jet is the identity. For general linear parts one would need (in order to get the linear part from shears) to write a holomorphic (depending on the parameter in $ \mathbb{C}^k$) invertible matrix as a product of holomorphic elementary matrices. This is the so called Vaserstein problem posed by Gromov in C^{\infty}te{Gro}. Although it was recently solved by Ivarsson and the first author C^{\infty}te{IK}, C^{\infty}te{IK1} we will restrict ourselves to the present simple version of our lemma since it is fully sufficient for the purpose of the present paper. \begin{lemma}\label{prekiss} Let $\xi_i=(i,i,\ldots ,i)\in \mathbb{C}^n$ for $i=0,1,2,\ldots ,N$ and let $P_i:\mathbb{C}^k\times \mathbb{C}^n\to \mathbb{C}^n$ be polynomial maps of degree $s$ such that $$P_i(w,z)=\xi_i+(z-\xi_i )+O(|z-\xi_i|^2)$$ for $z\to \xi_i$ and all $w\in\mathbb{C}^k$. Then there exists $\kappa \in \rm{Aut}_{hol}^k(\mathbb{C}^n)$ such that $$\primei_2(\kappa (w,z)-(w,P_i(w,z)))=O(|z-\xi_i|^{s+1})$$ for $z\to \xi_i$ with $i=0,1,2,\ldots ,N$. \end{lemma} \begin{notation} The reason that we have chosen $\xi_i=(i,i,\ldots ,i)\in \mathbb{C}^n$ for $i=0,1,2,\ldots , N$ is to ensure that our points have pairwise different projections along all coordinate directions. This is suitable for the use of shears.\end{notation} \begin{proof} The proof goes exactly as in the non-parametric case (see Step 2.10 in Forstneric C^{\infty}te{F}) by induction over the number of points and the order of the jets. The beginning step of the induction (first order) is empty in our case. To realize the homogeneous part $P^j$ of order $j \ge 2$ of a polynomial map $P$ by a composition of overshears on $\mathbb{C}^n$ depending holomorphically on the parameter on $w$ we need to establish the existence of finitely many linear functionals $\lambda_i$ together with vectors $v_i$ having the properties $\lambda_i (v_i) = 0$ and $|v_i| = 1$ such that \begin{equation}\label{vsum1} \begin{split} P^j (w,z) &=\sum_ic_i(w)(\lambda_i (z-\xi_1))^jv_i + \\ &+\sum_k d_k(w)(\lambda_k (z-\xi_1))^{j-1}\langle z-\xi_1,v_k\rangle v_k \end{split} \end{equation} with holomorphic functions $c_i,d_i \in \mathcal{O} (\mathbb{C}^k)$. This follows from the purely algebraic fact, Lemma \ref{basis} below. \end{proof} We denote the complex vector space $\mathbb{C}^n$ by $V$, $S^k (V^*)$ denotes the vector space of homogenous polynomials of degree $k$ on $V$ and $r_{k,n} = {n+k-1 \choose n-1}$ its dimension. \begin{lemma} There exist $r_{k,n}$ linear forms $\lambda_1, \lambda_2, \ldots, \lambda_{r_{k,n}} \in (\mathbb{C}^n)^*$ such that the homogenous polynomials $(\lambda_i)^k$ of degree $k$ $i=1, 2, \ldots, r_{k,n}$ form a basis of $S^k (V^*)$. Moreover the $\lambda_i$ can be chosen from any nonempty open subset $\Omega$ of $(\mathbb{C}^n)^*$. \label{l1} \end{lemma} \begin{proof} Take any nonzero element $\lambda_0 \in \Omega \in V^*$. The map $V^* \to S^k (V^*)$ defined by $\lambda \mapsto \lambda^k$ is $\rm Gl (V)$-equivariant and since $S^k (V^*)$ is an irreducible $\rm Gl (V)$-module the linear span of the $\rm Gl (V)$-orbit through $\lambda_0$ $${\rm span} \{ (g\cdot \lambda_0)^k, \quad g\in \rm Gl (V)\}$$ is the whole module $S^k (V^*)$. The same holds for any open part of the orbit, i.e. $${\rm span} \{ (g\cdot \lambda_0)^k,\quad g\in U\} = S^k (V^*)$$ for any open subset $U$ of $\rm Gl (V)$, since if the left hand side would be contained in some proper linear subspace $W \subset S^k (V^*)$ then by the identity theorem for holomorphic mappings the whole orbit would be contained in $W$ contradicting the irreducibility of $S^k (V^*)$. We can therefore find $r_{k,n}$ group elements $g_1, g_2, \ldots, g_{r_{k,n}}$ $\in \rm Gl (V)$ contained in some open neighborhood $U$ of the identity (with $U\cdot \lambda_0 \subset \Omega$) such that the homogenous polynomials $(g_j \cdot \lambda_0)^k$ $k=1, 2, \ldots, r_{k,n}$ form a basis of $S^k (V^*)$. \end{proof} \begin{lemma} There exist $n \cdot {n+k-2 \choose n-1}- {n+k-2 \choose n-1}$ linear forms $\lambda_i \in (\mathbb{C}^n)^*$ and vectors $v_i\in \mathbb{C}^n$ with $\lambda_i (v_i)=0$ and $\Vert v_i\Vert =1 $ $i=1, 2, \ldots, {n+k-2 \choose n-1}$ together with ${n+k-2 \choose n-1}$ linear forms $\tilde\lambda_j \in (\mathbb{C}^n)^*$ and vectors $w_j \in \mathbb{C}^n$ with $\lambda_j (w_j)=0$ and $\Vert w_j\Vert =1 $ $j=1, 2, \ldots, n \cdot {n+k-2 \choose n-1}- {n+k-2 \choose n-1}$ such that the homogenous polynomial maps $$z\mapsto (\lambda_i (z))^k v_i, \quad i=1, 2, \ldots, n \cdot {n+k-1 \choose n-1}- {n+k-2 \choose n-1}$$ of degree $k$ together with the homogenous polynomial maps $$ z\mapsto (\tilde\lambda_j (z))^{k-1} \langle z, w_j\rangle w_j, \quad j=1, 2, \ldots, {n+k-2 \choose n-1}$$ of degree $k$ form a basis of the vector space $V_k \cong S^k ((\mathbb{C}^n)^*) \otimes \mathbb{C}^n$ of homogenous polynomial maps of degree $k$. Moreover if $v_0 \in \mathbb{C}^n$ and a non-zero functional $\lambda_0 \in (\mathbb{C}^n)^*$ with $\lambda_0 (v_0)=0$ and $\Vert v_0\Vert =1 $ and a number $\epsilon >0$ are given, then the vectors $v_i, w_j$ together with the functionals $\lambda_i,\tilde \lambda_j$ can be chosen with $\Vert v_0 -v_i\Vert <\epsilon$ $\Vert v_0 -w_j\Vert <\epsilon$ and $\Vert \lambda_0 - \lambda_i \Vert <\epsilon$, $\Vert \lambda_0 -\tilde \lambda_j \Vert <\epsilon$. \label{basis} \end{lemma} \begin{proof} Set $V=\mathbb{C}^n$. The vector space $V_k \cong S^k (V^* ) \otimes V$ is as $\rm Gl (V)$-module isomorphic to the direct sum of two irreducible representations $W_1 \oplus W_2$, where $W_1$ is isomorphic to $S^{k-1} (V^* )$ and $W_2$ is isomorphic to the kernel of the $\rm Gl (V)$-equivariant map $$\primesi : S^k (V^*) \otimes V \to S^{k-1} (V^*), \quad \quad X\mapsto \operatorname{div} X .$$ We will provide some ``section'' of $\primesi$: \noindent By Lemma \ref{l1} there exist $\tilde\lambda_j \in V^*$ $j=1, 2, \ldots, {n+k-2 \choose n-1}$ ($\epsilon$-near to $\lambda_0$ if desired) such that the homogenous polynomials $\tilde\lambda_i^{k-1}$ form a basis of $S^{k-1} (V^*)$. Choose vectors $w_j$ with $\tilde\lambda_j (w_j) = 0$ and $\Vert w_j \Vert =1$ (and $\epsilon$-near to $v_0$ if desired). For a homogenous polynomial $p(z)$ of degree $k-1$ we write it in the basis $p(z) = \sum_{j=1}^{n+k-2 \choose n-1} d_j (\tilde\lambda_j(z))^{k-1}$ and define the section $s(p)$ by $$s(p) (z) = \sum_{j=1}^ {n+k-2 \choose n-1} d_j (\tilde\lambda_j(z))^{k-1} \langle z, w_j\rangle w_j.$$ An easy calculation shows $\primesi (s(p))) = \operatorname{div} s(p) = p$. Thus the homogenous polynomial maps $ z \mapsto (\tilde\lambda_j(z))^{k-1} \langle z, w_j\rangle w_j$ $j= 1, 2, \ldots,{n+k-2 \choose n-1}$ form a basis of some linear subspace of $S^k (V^*) \otimes V$ complementary to the kernel of $\primesi$. Now take our nonzero linear functional $\lambda_0 \in V^*$ and some vector $v_0 \in V$ with $\lambda_0 (v_0) = 0$ and $\Vert v_0 \Vert = 1$. Since $\ker \primesi \cong W_2$ is an irreducible $\rm Gl (V)$-module the linear span of any $\rm Gl (V)$-orbit through a nonzero point in $W_2$ is the whole vector space $W_2 \cong \ker \primesi$. Since $\lambda_0 (v_0) =0$ the element $\lambda_0^k \otimes v_0$ is such a point and like in the proof of Lemma \ref{l1} we find group elements $g_1, g_2, \ldots , g_{ n \cdot {n+k-1 \choose n-1}- {n+k-2 \choose n-1}}\in \rm Gl (V)$ contained in any given nonempty open neighborhood of the identity element such that the homogenous polynomial maps $g_j \cdot (\lambda_0^k \otimes v_0) = (g_j \cdot \lambda_0) g_j\cdot v_0$ form a basis of $\ker \primesi$. Defining $\\lambda_i = g_i \cdot \lambda_0$ (remember $g \cdot \lambda_0 (v) := \lambda_0 ( g^{-1} v)$) and $\tilde v_i = g_i \cdot v_0, v_i= \frac { \tilde v_i}{\vert \tilde v_i \vert}$ (instead of normalizing we could have chosen the $g_j$ from the unitary group since by the identity principle in complex analysis any irreducible $\rm Gl_n$-representation is $\rm{U}_n$-invariant) we get a basis $\lambda_i^k \otimes v_i$ $i=1, 2, \ldots, n \cdot {n+k-1 \choose n-1}- {n+k-2 \choose n-1}$ of $\ker \primesi$. Together with the above constructed basis of the complementary subspace it forms a basis of the vector space $V_k \cong S^k ((\mathbb{C}^n)^*) \otimes \mathbb{C}^n$ of homogenous polynomial maps of degree $k$. \end{proof} Using these prerequisits we give the proof of Lemma \ref{KYSSLEMMAT} \begin{proof}[proof of Lemma \ref{KYSSLEMMAT}] Since the points $\xi_1(w),\ldots ,\xi_t(w)$ are simultaneously standardizable we can assume that $\xi_i(t)=(i,i,\ldots ,i)$ for $1\leq i \leq t$. Now we want to apply Lemma \ref{prekiss} to make $\kappa (M(w))$ osculating of order $l$. This means we have to ensure that there is for each $i$ a holomorphically depending on $w\in \mathbb{C}^k$ polynomial map $P_i(w,z)$ with \begin{equation}\label{jetform} P_i(w,z)=\xi_i+(z-\xi_i )+O(|z-\xi_i|^2). \end{equation} We then get osculation of order $l$ at $\xi_i(w)$ after applying an automorphism with this prescribed jet at $\xi_i(w)$. For a given point $x$ in a submanifold $M$ of $\mathbb{C}^n$ the set $P(x,M)$ of $l$-jets of the form \eqref{jetform} ensuring the osculation up to order $l$ is biholomorphic to a vectorspace and the change of variables in the jet-bundle is affine-linear. This means we have to find a holomorphic section in a locally trivial fibration over $\mathbb{C}^k$ of the form $$\begin{matrix} \bigcup_{w\in \mathbb{C}^k}P(\xi_i(w),M(w))\\ \downarrow \\ \mathbb{C}^k \end{matrix}$$ where the fibers are biholomorphic to a vectorspace $\mathbb{C}^N$ and the structure group is $\mathrm{Aff}(\mathbb{C}^N)$, the group of affine linear automorphisms of $\mathbb{C}^N$. Since $\mathbb{C}^N$ is contractible a continuous section always exists and the Oka-Grauert principle (Theorem \ref{oka}) implies the existence of a holomorphic section. \end{proof} \subsection{The proof of Lemma \ref{AKlemma}} To prove Lemma \ref{AKlemma} we use the following sublemma: \begin{lemma}\label{sublemma} For every point $p=(w_0,\hat p)\in K_M=K_1\times K_2$ there is an open neighborhood $V_p\times U_p\ni p$ in $K_1\times M$ and a family of automorphisms $\primesi_t$ of $\mathbb{C}^k\times \mathbb{C}^n$ parametrized by $\mathbb{C}^{N}$, where $N=N(m,n,s)=\primearen{ \binom{m+s}{m}-(m+1)}(n-m)$, such that \begin{enumerate}[1.] \item $\primesi_0=\rm{Id}$. \item Every $\primesi_t$ satisfies 1. and 2. in Lemma \ref{AKlemma}. \item There exist an open neighborhood $T$ of $0$ in $\mathbb{C}^N$ such that \begin{multline*} \Sigma =\{ t\in T: \textrm{ There exists } p'\in V_p\times U_p \textrm{ such that }\primesi_t(w,M(w))\\ \textrm{ osculates of order $l$ in } \primesi_t(p') \} \end{multline*} is a set of Lebesgue measure zero. \end{enumerate} \end{lemma} \begin{proof} If $M(w)$ does not osculate of order $l$ in $p$ let $\primesi_t=\rm{Id}$ for every $t\in \mathbb{C}^N$. Now suppose that $M(w)$ osculates of order $l$ in $p$. Without loss of generality assume that $p=(w_0,0)$ and that the tangent plane is given by $T_{(w_0,0)}M(w)=\{ (z_1,\ldots ,z_m,0,\ldots ,0)\}$. Let $\primei_1:\mathbb{C}^n \to \mathbb{C}^m$ denote the projection to the $m$ first coordinates of $\mathbb{C}^n$. After a linear change of variables we can assume that $\primei_{1}(b_i(w))\neq 0\in \mathbb{C}^m$ for $1\leq i\leq q$ and for $w\in \tilde V_p$ where $\tilde V_p$ is some open neighborhood in $K_1$. To control if $\primesi (M(w))$ osculates of order $l$ in some point $\primesi (p')$, $p'\in \mathbb{C}^k \times M(w)$ for a given automorphism $\primesi \in \rm{Aut}^k(\mathbb{C}^n)$, consider the map $F^{\primesi}:\mathbb{C}^k\times M(w)\to \mathbb{C}^N$ where the coordinate functions $F^{\primesi}_{\alpha ,u}$, enumerated by pairs $(\alpha ,u)$, where $\alpha$ is a multi index $\alpha =(\alpha_1,\alpha_2,\ldots ,\alpha_m)$ with $2\leq |\alpha |\leq l$ and $u\in \mathbb{N}$ satisfies that $m+1\leq u \leq n$, are given by \begin{equation*} F^{\primesi}_{\alpha ,u}(w,\zeta )=\det \begin{pmatrix} \frac{\primeartial}{\primeartial \zeta_1}(\primesi )_1(w,\zeta ) & \dots & \frac{\primeartial}{\primeartial \zeta_1}(\primesi )_m(w,\zeta ) &\frac{\primeartial}{\primeartial \zeta_1}(\primesi )_u(w,\zeta ) \\ \vdots & \ddots & \vdots & \vdots \\ \frac{\primeartial}{\primeartial \zeta_m}(\primesi )_1(w,\zeta ) & \dots & \frac{\primeartial}{\primeartial \zeta_m}(\primesi )_m(w,\zeta ) &\frac{\primeartial}{\primeartial \zeta_m}(\primesi )_u(w,\zeta ) \\ \frac{\primeartial}{\primeartial \zeta^{\alpha}}(\primesi )_1(w,\zeta ) & \dots & \frac{\primeartial}{\primeartial \zeta^{\alpha}}(\primesi )_m(w,\zeta ) &\frac{\primeartial}{\primeartial \zeta^{\alpha}}(\primesi )_u(w,\zeta ) \end{pmatrix} \end{equation*} Here $(\primesi )_i$ denotes the $i$-th $z$-coordinate function of the map $\primesi :\mathbb{C}^k\times \mathbb{C}^n \to \mathbb{C}^k\times \mathbb{C}^n$ and $(\zeta_i)_{1\leq i\leq m}$ is some fixed system of local coordinates of $M(w)$ near $p$. Then $\primesi (M(w))$ osculates of order $l$ in some point $(w,\zeta )$ if and only if $F^{\primesi}(w,\zeta )=0$. If we restrict our attention to a small enough neighborhood $\tilde U_p\in K_2$, containing $0$, we can use $z_1,\ldots ,z_m$ as local coordinates on $M(w_0)$ near $p$, in fact we will use the coordinates $(w,z_1,\ldots ,z_m)$ in the restriction to $\tilde V_p\times \tilde U_p$. To construct the family of automorphisms $\primesi_t$ of $\mathbb{C}^k\times \mathbb{C}^n$ we do the following: For every pair $(\alpha ,u)$ we choose a holomorphic function $h_{\alpha , u}$ on $\mathbb{C}^k\times \mathbb{C}^m$ such that \begin{enumerate}[1.] \item $(w,h_{\alpha ,u}(w,z))=(w,z^{\alpha})$ of order at least $l+1$ in $(w_0,0)$. \item $(w,h_{\alpha ,u}(w,\primei_1(b_i(w))))=(w,0)$ of order at least $l+1$ for $1\leq i\leq q$ and for all $w\in \mathbb{C}^k$. \item $(w,h_{\alpha ,u}(w,\primei_1(a_i))=(w,0)$ for $1\leq i\leq r$ and for all $w\in \mathbb{C}^k$. \end{enumerate} Now define the map $\primesi :\mathbb{C}^N\times \mathbb{C}^k \times \mathbb{C}^n \to \mathbb{C}^k \times \mathbb{C}^n$ by $$\primesi(t,w,z)=(w,z+\sum_{(\alpha ,u)}t_{(\alpha ,u)}h_{\alpha ,u}(w,z_1 ,\ldots ,z_m)e_u),$$ where $e_u$ is the $u$-th unit vector, $m+1\leq u \leq n$. This construction gives that for every $t\in \mathbb{C}^N$ the map $\primesi_t=\primesi (t,\cdot )$ is a parametrized automorphism of $\mathbb{C}^n$ and because of condition 2. and 3. $\primesi$ fulfills conditions 1. and 2. of Lemma \ref{AKlemma}. Furthermore $\primesi_0=\rm Id$. The only thing left to check is that $\primesi$ fulfills condition 3.. Using the fact that $h_{\alpha ,u}(w,z)=z^{\alpha}$ we get that \begin{multline*} \frac{\primeartial}{\primeartial t_{(\alpha ,u)}}F^{\primesi_t}_{\alpha ,u}|_{\substack{z=0\\ w_0=0}}=-\frac{\primeartial}{\primeartial t_{(\alpha ,u)}}\sum t_{(\alpha ,u)}\frac{\primeartial}{\primeartial z^{\alpha}}h_{\alpha ,u}(w,z)|_{\substack{z=0\\ w_0=0}}=\\ =\frac{\primeartial}{\primeartial t_{(\alpha ,u)}}\sum t_{(\alpha ,u)}\frac{\primeartial z^{\alpha}}{\primeartial z^{\alpha}}|_{\substack{z=0\\ w_0=0}}=\alpha !. \end{multline*} Moreover the derivative with respect to $t$ depending on other pairs $(\alpha ',u')$ will vanish \begin{multline*}\frac{\primeartial}{\primeartial t_{(\alpha ' ,u')}}F^{\primesi_t}_{\alpha ,u}|_{\substack{z=0\\ w_0=0}}=-\frac{\primeartial}{\primeartial t_{(\alpha ',u')}}\sum t_{(\alpha ,u)}\frac{\primeartial}{\primeartial z^{\alpha}}h_{\alpha ,u}(w,z)|_{\substack{z=0\\ w_0=0}}=\\ =\frac{\primeartial}{\primeartial t_{(\alpha ',u')}}\sum t_{(\alpha ,u)}\frac{\primeartial z^{\alpha}}{\primeartial z^{\alpha}}|_{\substack{z=0\\ w_0=0}}=0, \end{multline*} whenever $u\neq u'$ or whenever $u=u', |\alpha '|\leq |\alpha |$ and $\alpha '\neq \alpha$. This implies that the map $\mathbb{P}hi :\mathbb{C}^N\times \mathbb{C}^k\times M\to \mathbb{C}^N$ defined by $\mathbb{P}hi (t,w,z)=F^{\mathbb{P}hi_t}(w,z)$ has maximal rank near $(0,w_0,0)=(0,p)$. Thus there exists an open neighborhood $\Omega_p$ of the form $\Omega_p=T\times V_p\times U_p$ of $(0,p)$ in $\mathbb{C}^N\times \mathbb{C}^k\times M$ such that $\mathbb{P}hi |_{\Omega_p}$ is transversal to $0\in \mathbb{C}^N$. This implies that for almost all $t\in T$ the map $F^{\primesi_t}:\mathbb{C}^k\times M(w)\to \mathbb{C}^N$ is transversal to $0$. Since $m<N$ this means that for almost all $t\in T$ the image $F^{\primesi_t}(V_p\times U_p)$ does not meet $0$, i.e. $\primesi_t((w,M))$ does not osculate of order $l$ for any $p'\in V_p\times U_p$. \end{proof} \begin{proof}[Proof of Lemma \ref{AKlemma}] Choose finitely many open subsets $V_i\times U_i$ of $\mathbb{C}^k\times M$ together with families $\primesiî:T_i \times \mathbb{C}^k\times \mathbb{C}^n \to \mathbb{C}^k\times \mathbb{C}^n$ of automorphisms $i=1,2,\ldots ,r$ as in Lemma \ref{sublemma} and choose compact subsets $K_i\subset V_i\times U_i$ of $V_i\times U_i$ which cover $K_M$. Since $\primesi^1_0$ is the identity, for $t$ sufficiently small the automorphism $\primesi^1_t$ moves no point of $K_M$ more than $\frac{\epsilon}{r}$. So we find $t_1\in T_1$ such that $|P_2 \primehi^1_{t_1}(w,z)-z|<\frac{\epsilon}{r}$ for every $(w,z)\in K$ and the submanifold $\primesi^1_{t_1}(\mathbb{C}^k\times M)$ does not osculate of order $l$ at any point of $\primesi^1_{t_1}(K_1)$. Observe that the property of not osculating of order $l$ at some point is preserved under small perturbations, i.e. for each compact subset $L$ of a submanifold $M$ of $\mathbb{C}^n$ which does not osculate of order $l$ at any point of $L$ there exists some $\epsilon >0$ such that for each automorphism $\primesi$ of $\mathbb{C}^n$ the property $|\primesi (z)-z|<\epsilon$ for every $z\in L$ implies that $\primesi (M)$ remains non-osculating of order $l$ at any point of $\primesi (L)$ (for holomorphic maps small perturbations in values imply small perturbations in derivatives). Hence we find a sufficiently small $t_2\in T_2$ such that first $|P_2 \primesi^2_{t_2}(w,z)-z|<\frac{\epsilon}{r}$ for every $z\in \primesi^1(K)$, second the submanifold $\primesi^2_{t_2}C^{\infty}rc \primesi^1_{t_1}(\mathbb{C}^k\times M)$ does not osculate of order $l$ at any point of $ \primesi^2_{t_2}C^{\infty}rc \primesi^1_{t_1}(K_2)$ and third $\primesi^2_{t_2}C^{\infty}rc \primesi^1_{t_1}(\mathbb{C}^k\times M)$ remains non-osculating of order $l$ at any point of $ \primesi^2_{t_2}C^{\infty}rc \primesi^1_{t_1}(K_1)$. Proceeding by induction we find an automorphism $\primesi :=\primesi^r_{t_r}C^{\infty}rc \primesi^{r-1}_{t_{r-1}}C^{\infty}rc \cdots C^{\infty}rc \primesi^1_{t_1}$ moving no point of $K$ more than $\epsilon$ and such that $\primesi (\mathbb{C}^k\times M)$ does not osculate of order $l$ at any point of $\primesi \left( \cup_{i=1}^rK_i\right) \supset \primesi (K_M)$. Since all automorphisms $\primesiî_t$ satisfy properties 1. and 2., $\primesi$ satisfies them as well. \end{proof} \end{document}
\begin{equation}gin{document} {t_{\mathrm{o}}}tle{Fr\"ohlich-coupled qubits interacting with fermionic baths} \author{Erik Aurell} \email{[email protected]} \affiliation{KTH – Royal Institute of Technology, AlbaNova University Center, SE-106 91 Stockholm, Sweden} \affiliation{Faculty of Physics, Astronomy and Applied Computer Science, Jagiellonian University, 30-348 Krak\'ow, Poland} \author{Jan Tuziemski} \email{[email protected]} \altaffiliation[On leave from ]{Department of Applied Physics and Mathematics, Gdansk University of Technology} \affiliation{Department of Physics, Stockholm University, AlbaNova University Center, Stockholm SE-106 91 Sweden} \affiliation{Nordita, Royal Institute of Technology and Stockholm University,Roslagstullsbacken 23, SE-106 91 Stockholm, Sweden} \date{\today} \begin{equation}gin{abstract} We consider a quantum system such as a qubit, interacting with a bath of fermions as in the Fr\"ohlich polaron model. The interaction Hamiltonian is thus linear in the system variable, and quadratic in the fermions. Using the recently developed extension of Feynman-Vernon theory to non-harmonic baths we evaluate quadratic and the quartic terms in the influence action. We find that for this model the quartic term vanish by symmetry arguments. Although the influence of the bath on the system is of the same form as from bosonic harmonic oscillators up to effects to sixth order in the system-bath interaction, the temperature dependence is nevertheless rather different, unless rather contrived models are considered. \end{abstract} {\bf p}acs{03.65.Yz,05.70.Ln,05.40.-a} \keywords{Stochastic thermodynamics, quantum power operators, quantum heat switches} \title{Fr\"ohlich-coupled qubits interacting with fermionic baths} \section{Introduction} \langlebel{sec:intro} The theory of open quantum systems has attracted increased attention in recent years, motivated by advances quantum information theory~\cite{Wilde-book} and emerging quantum technologies~\cite{DevoretWallrafMartinis,WendinShumeiko}. For these to become practically useful in a broad range of applications a main roadblock to overcome is the strong tendency of large quantum systems to turn classical due to interactions with the rest of the world~\cite{HarocheRaimond1996,Zurek2003,Schlosshauer-book}. Open quantum systems encompass the various concepts and analytic and numerical techniques that have been developed to describe and estimate the development of a quantum system interacting with an environment~\cite{Weiss-book,open}. A special place in open quantum system theory belongs to problems where a general system (the system of interest) interacts linearly with one or several baths of harmonic oscillators. One reason is that resistive elements in a small electrical circuit can be modeled as many LC elements in parallel, of which each one obeys the equation of a harmonic oscillator. At very low temperature as in quantum technology applications, these harmanic oscillators should be quantised~\cite{Devoret1995}. A related reason is the number of physical environments (phonons, photons) that can also be directly described this way. In the Lagrangian formulation of quantum mechanics~\cite{FeynmanHibbs} the development of a wave function (unitary operator $U$) is described by a path integral, while the development of a density matrix (quantum operation $U\cdot U^{\dagger}$) is described by two path integrals, one (forward path) for $U$ and one (backward path) for $U^{\dagger}$. A third reason why harmonic oscillator baths are interesting is that the paths of such baths can be integrated out yielding the famous Feynman-Vernon theory~\cite{Feynman1963}. The only trace of the bath (or baths) is then the Feynman-Vernon action, quadratic terms in the forward and backward paths. Nevertheless, most physical environments do only approximately or not at all consist of degrees of freedom that can be described as bosonic harmonic oscillators. Conduction band electrons in normal metals are for instance obviously fermions. Even if these fermions by themselves are free (and hence can be treated as fermionic harmonic oscillators), in the open quantum system context it is their interaction with the system of interest that counts. If that system is a quantum variable such as a qubit, the simplest interaction that can be considered is quadratic in the fermionic variables and linear in the system of interest. As a term in an interaction Hamiltonain that is $X a b$ where $X$ is the quantum variable of the system of interest and $a$ and $b$ are creation or destruction operators of the fermions. Interaction Hamiltonians of this type appear in the Fr\"ohlich polaron model of the motion of a conduction electron in an ionic crystal~\cite{Froelich54,DevreeseAleksandrov2009}. In Feynman's variational treatment, one electron is modelled as a non-relativistic particle interacting with a bath of bosonic harmonic operators which are then integrated out. Here we are interested in the opposite case where one bosonic degree of freedom, \textit{i.e.} the qubit, describes the system of interest, and we want to ``integrate out'' the fermions. One problem with such an approach is that fermionic functional integrals (Grassman integrals) are mathematically non-trivial objects. Another is that for the Fr\"ohlich-like coupling both the bath Hamiltonian and the interaction are quadratic in the fermionic degrees of freedom; the result is hence two fermionic functional determinants depending on the forward and backward histories of the system of interest acting as external fields. An approach to similar problems, used for a long time in condensed matter theory, is Keldysh techniques~\cite{KELDYSH1,KELDYSH2}. While essentially equivalent to Feynman-Vernon theory, Keldysh theory was developed for other applications, and encompassing from the start fermionic baths. The kernels of the quadratic terms in Feynman-Vernon theory can thus be identified with pair-wise bath correlation functions, in Keldysh theory referred to as ``dressed non-equilibrium Greens functions''. Here we will instead follow the recently developed extension of the Feynman-Vernon theory to non-harmonic baths~\cite{AurellGoyalKawai2019}. One advantage of this approach is that it gives access also to terms in Feynman-Vernon influence functional higher than quadratic. Let us remark that from the functional integral point of view it is obvious that such terms must exist: while the bath can always be integrated out in principle, it is only for harmonic (bosonic or fermionic) baths that all the integrals are Gaussian and can be done in closed form. A main result of~\cite{AurellGoyalKawai2019} is that higher-order Feynman-Vernon terms depend on cumulants of bath correlation functions. The first non-standard term in the extended Feynman-Vernon theory for the dynamics of the system hence involves fourth-order cumulants of the correlation functions of the compound bath variables $a b$ \textit{i.e.} eighth-order fermionic correlations. Perhaps suprisingly we find that for the Fr\"ohlich-coupled system these terms actually cancel in the influence function. \\\\ The paper is organized as follows. In Section~\ref{sec:problem} we state the problem and make general remarks of what one can expect of the solution. In \ref{sec:spin-boson} we assume as a concrete example that the variable is a qubit (a two-state system) coupled to the bath as in the spin-boson problem, and state more precisely the system-bath interaction we study in the rest of the paper. In Section~\ref{sec:presentation} we present the structure of the first term of the Feynamn-Vernon action and state that the second term in the expansion of the action vanishes in our model. Here we also sketch calculations of bath correlation functions of interest in our theory. In Section~\ref{sec:analysis} the standard (second order) Feynamn-Vernon action of the considered model is compared to that of a harmonic bosonic bath. Appendices \ref{sec:non-harmonic}, \ref{sec:generalized-FV} contain summaries of technical details from~\cite{AurellGoyalKawai2019}, included for completeness. Appendix \ref{app:4th} presents the detailed argument that in the model considered the fourth order cumulant vanishes, and therefore there is no fourth order contribution to the generalized Feynman-Vernon action. \section{Statement of the problem} \langlebel{sec:problem} Let us consider a system consisting of one bosonic variable and a bath of free fermions as discussed above. That means a Hamiltonian \begin{equation}gin{equation} \langlebel{eq:bath-free} \hat{H}_{TOT} = \hat{H}_{S} + \hat{H}_{INT} + \hat{H}_{B}, \end{equation} where the first term $\hat{H}_{S}$ is the Hamiltonian of the system. For a bosonic variable the evolution operator corresponding to $\hat{H}_{S}$ can be written as a path integral \begin{equation}gin{equation} U_S=e^{-\frac{i}{\hbar}H_S \tau} = \int {\cal D}X e^{\frac{i}{\hbar}S[X]}, \end{equation} where $S[X]$ is the action of path $X$. The evolution operator acting on density matrices is similarly a double path integral over a ``forward path'' and a ``backward path'' \begin{equation}gin{equation} \langlebel{eq:quantum-map-pure} U_S\cdot U_S^{\dagger} = \int {\cal D}X {\cal D}Y e^{\frac{i}{\hbar}S[X]-\frac{i}{\hbar}S[Y]} \cdot \end{equation} where the slot marks where the initial density matrix is to be inserted. The free bath Hamiltonian in \eqref{eq:bath-free} is \begin{equation}gin{equation} \langlebel{eq:H_B} \hat{H}_{B} = \sum_k \epsilon_k \hat{c}^{\dagger}_k \hat{c}_k, \end{equation} where $\hat{c}^{\dagger}_k$ ($\hat{c}_k$) is the creation (destruction) of fermions, and the interaction Hamiltonian is of the type (below we will use a more specific model) \begin{equation}gin{equation} \hat{H}_{INT} = X \sum_{k,l} g_{kl} \hat{c}^{\dagger}_k \hat{c}_l. \end{equation} Initially the bath and the system are assumed independent, and the bath is in thermal equilibrium at inverse temperature $\begin{equation}ta$. The evolution operator of the system is the quantum map (or quantum operator) given by \begin{equation}gin{equation} \langlebel{eq:quantum-map-dissipative} \Phi \cdot = \hbox{Tr}_B\left[U\left(\rho_B(\begin{equation}ta)\otimes \cdot\right)U^{\dagger}\right] \end{equation} where $\hat{U}=e^{-\frac{i}{\hbar}(\hat{H}_S+\hat{H}_{INT}+\hat{H}_B) \tau}$ is the total evolution operator of the combined system and bath, $\rho_B(\begin{equation}ta)$ is the initial equilibrium density matrix of the bath, and $\cdot$ marks where to insert the initial density matrix of the system. Suppose that the evolution of the bath can also be written as a double path integral. If so the bath can be integrated out, so that we have \begin{equation}gin{equation} \langlebel{eq:quantum-map-dissipative-2} \Phi \cdot = \int {\cal D}X {\cal D}Y e^{\frac{i}{\hbar}S[X]-\frac{i}{\hbar}S[Y]} {\cal F}[X,Y] \cdot \end{equation} The new term compared to \eqref{eq:quantum-map-pure} is the Feynman-Vernon influence functional, \textit{i.e.} what remains after integrating out the bath paths while the system paths are held fixed. Although important general properties of the influence functional were stated in~\cite{Feynman1963}, in practice this formalism has mostly been used for when the baths are free bosons interacting linearly with a system. In that case all the path integrals over the baths are Gaussians, and ${\cal F}$ can be written as $e^{\frac{i}{\hbar}S_i[X,Y]-\frac{1}{\hbar}S_r[X,Y]}$ where $S_i[X,Y]$ and $S_r[X,Y]$ are two explicit quadratic functionals of the forward and backward system paths, usually known as Feynman-Vernon action. On the other hand, it is not necessary to assume that the bath can be represented as path integrals. As reviewed in~\cite{open} and rederived in \cite{AurellGoyalKawai2019}, the super-operator $\Phi$ in \eqref{eq:quantum-map-dissipative} can be computed perturbatively, and the terms translated back to a double path integral over the system. In this way one can identify the kernels in the actions $S_i[X,Y]$ and $S_r[X,Y]$ as being equilibrium pair correlations in the bath. Importantly this holds for any equilibrium bath. The price to pay if the bath is not harmonic is that there are higher-order terms that are respectively fourth, sixth etc order in the system variables $X$ and $Y$. \section{A qubit coupled to a fermionic bath as in spin-boson problem} \langlebel{sec:spin-boson} For concreteness, and since this would be a main application to quantum information science, we now assume that the system of interest is a a qubit (a two-state system) governed by a system Hamiltonian \begin{equation}gin{equation} \langlebel{eq:H_S} \hat{H}_{S} = \frac{\varepsilon}{2}\hat{_\textsc{i}gma}_z + \hbar\frac{\Delta}{2}\hat{_\textsc{i}gma}_x . \end{equation} The evolution operator $e^{-\frac{i}{\hbar}H_S \tau}$ can be represented by inserting resolution of the identity between very small time increments $\delta\tau$. The first term in \eqref{eq:H_S} then only contributes if the state stays the same between two small time increments; that contribution is $e^{{\bf p}m \frac{i}{\hbar}\frac{\varepsilon}{2} \delta\tau}$. The parameter $\varepsilon$ is hence the \textit{level splitting}. The second term in \eqref{eq:H_S} on the other hand only contributes if the state changes over a small time increment, and the contribution is $({\bf p}m i\frac{\Delta}{2} \delta\tau)$. The parameter $\Delta/2$, which has dimension of a rate, is hence the \textit{tunelling element}. The paths in $X$ and $Y$ in the path integral in \eqref{eq:quantum-map-pure} are nothing but a way to represent $e^{-\frac{i}{\hbar}\hat{H}_S \tau}$ and $e^{\frac{i}{\hbar}\hat{H}_S \tau}$, and are hence \textit{piece-wise constant}, equal to ${\bf p}m 1$. Before continuing we note a clash of conventions: $X$ and $Y$ are in the literature on open quantum systems used to refer to the history of a system variable which is intergrated over. In our case these are the histories (forward and backward) of a representation of $\hat{_\textsc{i}gma}_z$, and $\hat{X}$ is also used for the system part of the interaction Hamiltonian. This is the convention we follow. In the quantum information literature $\hat{X}$ and $\hat{Y}$ instead refer to the operators $\hat{_\textsc{i}gma}_x$ and $\hat{_\textsc{i}gma}_y$, while the operator $\hat{_\textsc{i}gma}_z$ is written $\hat{Z}$. We do not follow this convention. Now, it is convenient to include the contributions from the level splitting in the actions in \eqref{eq:quantum-map-pure}, and the contributions from the tunelling elements in the path measures ${\cal D}X$ and ${\cal D}Y$. If so ${\cal D}X$ and ${\cal D}Y$ are nothing but the path probabilities of (classical) Poisson point processes, except that the jump rates are purely imaginary. That is, we can interpret $X$ as $s_i,n,t_1,\ldots,t_n$ where $s_i$ is the initial state (up or down), $n$ is the number of jumps and $t_1<t_2<\ldots$ are the jump times. The purely imaginary path measures are then \begin{equation}gin{equation} \langlebel{eq:DX} \int {\cal D}X \left(\cdot\right) = \sum_n {\bf p}rod_{s=1}^n ({\bf p}m i\frac{\Delta}{2}) \int dt_s \left(\cdot\right). \end{equation} The advantage of the above is that it can accomodate also a coupling to a bath when that coupling is proportional to $_\textsc{i}gma_z$. When the bath is composed of bosonic harmonic oscillators this is the \textit{spin-boson problem}; the above path integral was developed by Leggett and collaborators for that problem in \cite{Leggett87}. For our problem we will consider the interaction Hamiltonian is \begin{equation}gin{eqnarray} \langlebel{eq:H-INT-Schrodinger-rep} \hat{H}_{INT} &=& \sum_{\substack{k,l}}g_{kl}\hat{X}\left( \hat{c}_k + \hat{c}_k^{\dagger} \right) \left( \hat{c}_l + \hat{c}_l^{\dagger}\right), \end{eqnarray} where $\hat{X}$ ($\hat{_\textsc{i}gma}_z$) is the system part of the interaction, $\hat{c}^{\dagger}_l,\hat{c}^{\dagger}_k$ ($\hat{c}_k,\hat{c}_l$) are the creation (destruction) operators of two fermions, and $g_{kl}$ is a coupling constant. Due to the anti-commutation rules for fermions we can set $g_{kl} = -g_{lk}$. For the following sections it is convenient to introduce an interaction representation based on \eqref{eq:H_B} and \eqref{eq:H_S}. Bath destcruction operators transform as \begin{equation}gin{equation} \hat{c}_i(t) = e^{\frac{i}{\hbar}\hat{H}_B t} \hat{c}_i e^{-\frac{i}{\hbar}\hat{H}_B t} = \hat{c}_i e^{-i\omega_i t}, \end{equation} where $\omega_i \equiv \epsilon_i/\hbar$, and bath creation operators as $\hat{c}^{\dagger}_i(t) = \hat{c}^{\dagger}_i e^{i\omega_i t}$. Explicit form of the transformed system operator $\hat{X}$ in \eqref{eq:H-INT-Schrodinger-rep} is not relevant for further considerations. In this representation the interaction Hamiltonian is \begin{equation}gin{eqnarray} \langlebel{eq:H-INT-interaction-rep} \hat{H}_{INT}(t) &=& \sum_{k,l} g_{kl}X(t)\left( \hat{c}_k e^{-i\omega_k t}+ \hat{c}_k^\dagger e^{i\omega_kt} \right)\nonumber \\ && \left( \hat{c}_l e^{-i\omega_l t}+ \hat{c}_l^\dagger e^{i\omega_l t}\right). \end{eqnarray} We also assume that the bath is initially in a thermal state where $\rho = e^{-\begin{equation}ta \sum_k E_k \hat{c}^\dagger_k \hat{c}_k}$ with $\begin{equation}ta \equiv 1/(k_B T)$. \section{The generalized Feynman-Vernon action terms} \langlebel{sec:presentation} In most cases discussed in literature Feynman-Vernon action is of the second order in the system paths. This occurs e.g. for a system interacting linearly with a bath of free bosons. However, for other type of baths and couplings higher order terms in the action appear. A systematic way of dealing with such situations was formulated in \cite{AurellGoyalKawai2019} and for the convenience of the reader is summarized in Appendix \ref{sec:non-harmonic} and \ref{sec:generalized-FV}. In that approach the total Feynman-Vernon action is expressed as a sum of different-order terms (i.e. involving different number of system paths). In the Appendix \ref{sec:generalized-FV} we show that expression for the the usual quadratic Feynman-Vernon action can be rewritten such that \begin{equation}gin{eqnarray} S^{(2)}=-\frac{\hbar}{2}\int_{t_0}^{t}\dd{t_1} \int_{t_0}^{t}\dd{t_2} C(t_1,t_2) \mathcal{J}\left[Y_{t_2},Y_{t_1},X_{t_1},X_{t_2}\right], \nonumber \langlebel{eq:quadraticFV} \end{eqnarray} where $\mathcal{J}\left[\ldots\right]$ is a quadratic functional over paths of the system, which explicit form is given by Eq. (\ref{eq:Adquadratic}), $X_s$ and $Y_s$ are forward and backward path of the system evaluated at time $s$, and $C(t_1,t_2)$ is the bath correlation function. For the problem considered here it reads \begin{equation}gin{eqnarray} \langlebel{eq:2times} C(t_1,t_2) &=& \\ &&-\sum_{k,l} g^2_{kl}\langlengle \hat{Q}_k(t_1) \hat{Q}_k(t_2) \ranglengle \langlengle \hat{Q}_l(t_1) \hat{Q}_l(t_2) \nonumber \ranglengle, \end{eqnarray} where $\hat{Q}_k(t_i) \equiv \hat{c}_k e^{-i\omega_k t_i}+ \hat{c}_k^\dagger e^{i\omega_k t_i} $ and \begin{equation}gin{eqnarray} &&\left \langlengle \hat{Q}_k(t_1) \hat{Q}_k(t_2) \right \ranglengle \equiv \\ \nonumber &&\cos \omega_k \left(t_1-t_2\right) -i _\textsc{i}n\omega_k \left(t_1-t_2\right) \tanh {\frac{\begin{equation}ta E_k}{2}}, \end{eqnarray} is thermal expectation value of fermionic operators. Derivation of the above result relies on two simple facts. The first is that, in general, consecutive action terms depend on the following bath correlation functions \begin{equation}gin{equation} \begin{equation}gin{aligned} & C(t_1,\ldots,t_{2n}) = \sum_{k_1, \ldots k_{2n}, l_1, \ldots, k_{2n}} g_{k_1 l_1} \ldots g_{k_{2n} l_{2n}} {t_{\mathrm{o}}}mes\\ \nonumber &Tr(\hat{Q}_{k_1}(t_1)\hat{Q}_{l_1}(t_1) \dots \hat{Q}_{k_{2n}}(t_{2n})\hat{Q}_{l_2n}(t_{2n}) \rho ). \nonumber \end{aligned} \end{equation} The above is non-zero only if the number of all fermion indices $k$'s and $l$'s is even. For the quadratic action term we find that the only non-zero contribution is \begin{equation}gin{equation} \begin{equation}gin{aligned} &Tr(\hat{Q}_{k}(t_1)\hat{Q}_{l}(t_1)\hat{Q}_{k}(t_2)\hat{Q}_{l}(t_2)\rho ) = \nonumber \\ &-Tr(\hat{Q}_k(t_1)\hat{Q}_k(t_2) \hat{Q}_l(t_1)\hat{Q}_l(t_2) \rho ), \nonumber \end{aligned} \end{equation} from which Eq. (\ref{eq:2times}) immediately follows. The third order term is automatically zero as it contains odd number of indices $k$ and $l$. The fourth order term reads \begin{equation}gin{eqnarray} \langlebel{eq:operatorFV4thMT} &&S^{(4)}=\frac{(-i)^{4}}{4 !\hbar^4} \int_{t_{i}}^{t_{1}} \mathrm{d} t_{1} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{2} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{3} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{4} \sum_{d_{1}, d_{2}, d_{3},d_{4}} \\ \nonumber &&G_{4}^{d_{1}, d_{2} ; d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, t_4\right) \mathcal{X}_{\mathrm{s}}^{d_{1}}\left(t_{1}\right) \mathcal{X}_{\mathrm{s}}^{d_{2}}\left(t_{2}\right) \mathcal{X}_{\mathrm{s}}^{d_{3}}\left(t_{3}\right) \mathcal{X}_{\mathrm{s}}^{d_{4}}\left(t_{4}\right), \end{eqnarray} where \begin{equation}gin{equation} \langlebel{eq:4thcummulant} \begin{equation}gin{aligned} &G_{4}^{d_{1}, d_{2}, d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, t_{4}\right) =\\&C^{d_{1}, d_{2}, d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, d_{4}\right)-C^{d_{1}, d_{2}}\left(t_{1}, t_{2}\right) C^{d_{3}, d_{4}}\left(t_{3}, t_{4}\right) \\ &-C^{d_{1}, d_{3}}\left(t_{1}, t_{3}\right) C^{d_{2}, d_{4}}\left(t_{2}, t_{4}\right)-C^{d_{1}, d_{4}}\left(t_{1}, t_{4}\right) C^{d_{2}, d_{3}}\left(t_{2}, t_{3}\right), \end{aligned} \end{equation} is the fourth order super-operator cumulant involving super-operator correlation functions of the bath \begin{equation}gin{eqnarray} \begin{equation}gin{aligned} &C^{d_{1}, \cdots, d_{2n}}\left(t_{1}, \cdots, t_{n}\right) = \\& \sum_{k_1, \ldots k_{2n}, l_1, \ldots, k_{2n}} g_{k_1 l_1} \ldots g_{k_{2n} l_{2n}} {t_{\mathrm{o}}}mes \\ &Tr\left[\overleftarrow{\mathcal{T}} (\hat{Q}^{d_1}_{k_1}(t_1)\hat{Q}^{d_2}_{l_1}(t_1) \dots \hat{Q}^{d_{2n-1}}_{k_{2n}}(t_{2n})\hat{Q}^{d_{2n}}_{l_2n}(t_{2n}) ) \rho\right]. \end{aligned} \end{eqnarray} Indices $d_{i}+{\bf p}m$ indicate, on which side of the environment density matrix an operator acts (left or right for $+$, $-$ respectively). As for now $d_{1}, \cdots d_{4}$ will be fixed and dependence on them will be dropped. We show that, due to cancellations, there is no fourth order contribution to the Feynman-Vernon action. Here we present the main steps of the argument, the details can be found in Appendix \ref{app:4th}. First of all one needs to calculate the fourth order correlation function, which for our model reads \begin{equation}gin{eqnarray} \langlebel{eq:4timecorfunMT} C(t_1,t_2,t_3,t_4) = \sum_{k_1,l_1, \ldots k_4,l_4}g_{k_1l_1} \ldots g_{k_4l_4} {t_{\mathrm{o}}}mes \\ Tr \left[\hat{Q}_{k_1}(t_1)\hat{Q}_{l_1}(t_1)\ldots \hat{Q}_{k_4}(t_4)\hat{Q}_{l_4}(t_4) \rho \right]. \nonumber \\ \end{eqnarray} Note that integration in Eq. (\ref{eq:operatorFV4thMT}) is performed with respect to un-ordered times. To avoid confusion, the time-ordered times will be referred to as $s_i$. The non-zero terms in the sum Eq. (\ref{eq:4timecorfunMT}), are those in which a given index e.g. $k_i$ appears an even number of times. Therefore we can distinguish the following cases:\\ 1. Pair-wise groupings. Here a given index appears only twice. In the Appendix \ref{app:4th} we show that it is sufficient to consider forming pairs out of $k_i$ and $l_j$ indices respectively. An example of such a grouping is $k_1=k_2=k,\; k_3=k_4=k'$ and a similar pairing for $l_j$. This term reads \begin{equation}gin{eqnarray} g^2_{kl}g^2_{k'l'}Tr&&\left[\hat{Q}_{k}(s_1)\hat{Q}_{k}(s_2)\hat{Q}_{k'}(s_3)\hat{Q}_{k'}(s_4)\right. \\ \nonumber &&{t_{\mathrm{o}}}mes \left. [\hat{Q}_{l}(s_1)\hat{Q}_{l}(s_2)\hat{Q}_{l'}(s_3)\hat{Q}_{l'}(s_4) \right] = \\ g^2_{kl}g^2_{k'l'}Tr&&\left[ [12]_{k}[34]_{k'} [12]_{l}[34]_{l'} \right], \end{eqnarray} where the expression was written using time ordered times $s_i$ and we introduced new notation $[ij]_k \equiv \hat{Q}_{k}(s_i)\hat{Q}_{k}(s_j)$, which will be helpful in further considerations. If the super-operator indices $d_{1}, \cdots d_{4}$ are fixed we can relate time-ordered times to un-constraint times as $s_1=t_x,\;, s_2=t_y, \; s_3=t_z \; ,s_4=t_w$. Now we consider a new time ordering (depending on $d_1, \ldots d_4$) of operators $\hat{Q}_{m}(s_i)$, where $m \in \{k,k,l,l' \}$ \begin{equation}gin{eqnarray} S_{d,m}(a,b) = T_{d,m}(x,y) = &&\overrightarrow{\mathcal{T}} {\bf p}rod_{i\in (x,y); d_i=-} Q_m(t_i) {t_{\mathrm{o}}}mes \\ &&\overleftarrow{\mathcal{T}}{\bf p}rod_{j\in (x,y); d_i=+} Q_m(t_j), \end{eqnarray} if the order of the operators is the same as $(x, y)$, and \begin{equation}gin{eqnarray} S_{d,m}(a,b) = -T_{d,m}(x,y), \end{eqnarray} for the opposite order. In this way we can rewrite the term $[12]_{k}[34]_{k'}$ as $S(1234 \rightarrow xyzw)S_{d,k}(x,y)S_{d,k}(w,z)$, where $S(p \rightarrow q)$ is a permutation sign. Subsequently, in the Appendix \ref{app:4th} we show that pair-wise groupings terms can be rewritten with as sum over all such permutations. We examine properties of those permutations under change of time variables and exchange of indices and show which terms cancel. The final contribution from the pair-wise groupings is found to be \begin{equation}gin{eqnarray} \langlebel{eq:counterIresMT} \sum_{k,l,k',l'}g_{kl}^2 g_{k'l'}^2 &&\left( \left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k'} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} \right. + \nonumber \\ && \left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k'} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} + \nonumber \\ && \left. \left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k'} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right), \end{eqnarray} where $\left \langlengle ab \right \ranglengle_{m}\equiv \left \langlengle \hat{Q}_m(t_a) \hat{Q}_m(t_b) \right \ranglengle $. More detailed discussion can be found in Appendix \ref{sub:subI}. \\ 2. Four $k$ grouping, pair-wise $l$ grouping. An example of such a term (in the operator notation) is \begin{equation}gin{equation} \sum_{k,l,l'}g_{kl}^2 g_{kl'}^2 Tr \left[[1234]_k \left([12]_{l}[34]_{l'}-[13]_{l}[24]_{l'}+[14]_{l}[23]_{l'}\right)\right]. \end{equation} The $k-$th part of this expression can be evaluated using Wicks theorem. Then one applies essentially the same arguments as those mentioned in the previous case, as the reasoning does not rely on the particular arrangement of $k,l$ indices. Thus the final contribution of those terms is found to be \begin{equation}gin{eqnarray} \langlebel{eq:caseIIresMT1} \sum_{k,l,l'}g_{kl}^2 g_{kl'}^2&&\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} \right. + \nonumber \\ &&\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} +\nonumber \\ &&\left. \left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right). \end{eqnarray} \\ 3. Four $k$,$l$ grouping. In the operator notation we can write \begin{equation}gin{eqnarray} \sum_{kl}g^4_{kl}Tr \left[[1234]_k [1234]_l\right], \end{eqnarray} We use Wicks theorem and arguments from previous cases to show that these trms equal \begin{equation}gin{eqnarray} \langlebel{eq:caseIIIresMT1} \sum_{k,l}g_{kl}^4&&\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l} \right. + \nonumber \\ &&\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l} + \nonumber \\ &&\left. \left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l} \right), \end{eqnarray} Finally we need to subtract from above results the counter-terms from Eq. (\ref{eq:4thcummulant}) i.e. the products of two-times correlation functions. Direct calculation shows (see Appendix \ref{app:4th}) that there is a total cancellation of those terms. As a result, for the considered model there is no contribution from the fourth order term. \section{Physical analysis of the quadratic action terms} \langlebel{sec:analysis} In this section we analyze the quadratic term of the action. Our aim is to compare it to the action for a harmonic bosonic bath that is linear coupled to a system. The easiest way of doing this is by rewriting the Feynman-Vernon action with the help of imaginary and real parts of the kernel $k^I(t)$ and $k^R(t)$ respectively. The general expression reads \begin{equation}gin{eqnarray} S^{(2)} =-\frac{\hbar}{2}\int_{t_f}^{t}\dd{t} \int_{t}^{t}\dd{s} &&\left(X_t-Y_t \right) k_{R}(t-s)\left(X_s+Y_s \right) +\\ &&\left(X_t-Y_t \right) k_{I}(t-s)\left(X_s-Y_s \right), \end{eqnarray} where $X_t,Y_t$ correspond to forward and backward path of a system operator. For the fermionic bath considered here the kernels $k^I(t)$ and $k^R(t)$ are respectively: \begin{equation}gin{equation} \begin{equation}gin{aligned} &k^I_F(t_1-t_2)= \\ \nonumber &2i \sum_{k,l} g^2_{kl} \left[_\textsc{i}n \left[\omega_k \left( t_1-t_2\right)\right] \cos \left[\omega_l \left( t_1-t_2\right)\right] \tanh \frac{\begin{equation}ta E_k}{2} \right. \\ & \left.+\cos \left[ \omega_k \left( t_1-t_2\right)\right] _\textsc{i}n \left[ \omega_l \left( t_1-t_2\right)\right] \tanh \frac{\begin{equation}ta E_l}{2} \right] \\ &k^R_F(t_1-t_2)= \\ \nonumber -&2 \sum_{k,l} g^2_{kl} \left[\cos \left[ \omega_k \left( t_1-t_2\right)\right] \cos \left[ \omega_l \left( t_1-t_2\right)\right] \right. \\ & \left.-_\textsc{i}n \left[ \omega_k \left( t_1-t_2\right)\right] _\textsc{i}n \left[\omega_l \left( t_1-t_2\right)\right] \tanh \frac{\begin{equation}ta E_k}{2} \tanh \frac{\begin{equation}ta E_l}{2} \right], \end{aligned} \end{equation} whereas for bosonic baths coupled linearly to the system (see e.g. \cite{open}): \begin{equation}gin{equation} \begin{equation}gin{aligned} &k^I_{B}(t_1-t_2)=i\sum_{k} \frac{c^2_{k}}{2 m_{k} \omega_{k}} _\textsc{i}n \omega_{k}\left(t_1-t_2\right)\\ &k^R_{B}(t_1-t_2)=\sum_{k} \frac{c^2_{k} }{2 m_{k} \omega_{k}} \operatorname{coth}\left(\frac{\begin{equation}ta \omega_{k} }{2}\right) \cos \omega_{k}\left(t_1-t_2\right). \end{aligned} \end{equation} Let us now discuss differences and similarities between those expressions. Imaginary kernels modify action and hence describe dissipation. For harmonic bosonic baths imaginary kernel is temperature independent, what is not the case for the model consider here. However, we can consider two temperature regimes with a simpler behavior. In the low temperature regime ($\begin{equation}ta \gg 1$) the fermionic kernel resembles the bosonic one $k^I(s-u)\approx 2 i \sum_{k,l} g^2_{kl} _\textsc{i}n\left[\left(\omega_k+\omega_l\right)\left(s-u\right)\right]$, with frequency of a bosonic mode replaced by sum of frequencies of interacting fermions. In the opposite regime i.e. high temperatures ($\begin{equation}ta \ll 1$) the dissipation kernel vanishes. On the other hand, the real kernel introduces noise and is responsible for decoherence process. In the bosonic case decoherence strength increases with temperature. For the fermionic model in the low temperature limit ($\begin{equation}ta \gg 1$) the real kernel $k^R(s-u)\approx -2 \sum_{k,l} g^2_{kl} \cos\left[\left(\omega_k+\omega_l\right)\left(s-u\right)\right] $ is similar to the bosonic one. However, magnitude of the fermionic kernel does not grow with temperature: The high temperature limit ($\begin{equation}ta \ll 1$) of the real kernel reads $k^I(s-u)\approx -2\sum_{k,l} g^2_{kl} \cos \left[ \omega_k \left( s-u\right)\right] \cos \left[ \omega_l \left( s-u\right) \right] $. As we can see, for the low temperatures the fermionic bath behaves similarly to the bosonic one and the differences between them are most important in the high temperature regime. The formalism described here is general and can be applied also to bosonic systems coupled aquatically to a free bosonic bath. In such a case fermionic operators $\hat{c}_k,\hat{c}^{\dagger}_k$ are replaced with their bosonic counterparts $\hat{a}_k,\hat{a}^{\dagger}_k$ that obey the canonical commutation relations $ \left[\hat{a}_k,\hat{a}^{\dagger}_l \right] =\delta_{k,l}$ (other commutators vanish). Apart from this change the form of the interaction Hamiltonian Eq. (\ref{eq:quadraticFV}) and the free bath Hamiltonian Eq. (\ref{eq:H_B}) remains the same. Therefore, the structure of the results is similar to the discussed fermionic case, and the differences steam from the different (commutation) relations for the bosonic operators. In particular we find that the second order action for the bi-linear coupling to the bosonic bath is \begin{equation}gin{equation} \begin{equation}gin{aligned} &k^I_{BB}(t_1-t_2)= \\ \nonumber &2i \sum_{k,l} g^2_{kl} \left[_\textsc{i}n \left[\omega_k \left( t_1-t_2\right)\right] \cos \left[\omega_l \left( t_1-t_2\right)\right] \coth \frac{\begin{equation}ta E_k}{2} \right. \\ & \left.+\cos \left[ \omega_k \left( t_1-t_2\right)\right] _\textsc{i}n \left[ \omega_l \left( t_1-t_2\right)\right] \coth \frac{\begin{equation}ta E_l}{2} \right] \\ &k^R_{BB}(t_1-t_2)= \\ \nonumber &2 \sum_{k,l} g^2_{kl} \left[\cos \left[ \omega_k \left( t_1-t_2\right)\right] \cos \left[ \omega_l \left( t_1-t_2\right)\right] {t_{\mathrm{o}}}mes \right. \\ & \left. \coth \frac{\begin{equation}ta E_k}{2} \coth \frac{\begin{equation}ta E_l}{2} -_\textsc{i}n \left[ \omega_k \left( t_1-t_2\right)\right] _\textsc{i}n \left[\omega_l \left( t_1-t_2\right)\right] \right]. \end{aligned} \end{equation} From the above one can see that the following substitution allows to recover results for the bosonic bi-linear bath from the fermionic one \begin{equation}gin{eqnarray} &&k^I_{BB}(t_1-t_2) = \coth \frac{\begin{equation}ta E_k}{2} \coth \frac{\begin{equation}ta E_l}{2} k^R_{F}(t_1-t_2), \end{eqnarray} and the same relation holds for the real part of the kernel $k^R_{BB}(t_1-t_2)$. \section{Discussion} \langlebel{sec:discussion} In this paper we addressed the model of a quantum variable such as a qubit interacting with a fermionic bath. The coupling between the qubit and the bath is quadratic in fermionic operators, and the bath is initially in a thermal state. To investigate this system we employed the extension of the Feynman-Vernon influence functional technique that allows to systematically study higher order contributions to the Feynamn-Vernon action that arise from system-bath interaction being non-linear with respect to bath operators. We explicitly computed the second order contribution to the Feynman-Vernon action. While this is the standard term having the same functional form also in the case of bosonic harmonic baths, the dependence on temperature will in general be different for a fermionic bath with two-fermion coupling. We identified one regime where nevertheless the fermionic environment mimics a bosonic one. Finally, we showed (details in appendix) that the fourth order terms in the generalized Feynman-Vernon influence action vanish for the model considered. The first non-zero corrections to Feynman-Vernon or Keldysh theory are hence of sixth order in the system-bath interaction coefficient. \appendix \section{Non-harmonic baths and cluster expansions} \langlebel{sec:non-harmonic} Here we briefly sketch how the cumulant expansion can be used to express influence of the bath on the system. Firstly we summarize necessary notation from~\cite{AurellGoyalKawai2019}. That paper employs the super-operator approach to find dynamics of the system interacting with the environment: A map governing evolution of system operators is obtained by tracing out bath degrees of freedom from the formal solution of the full (i.e. including system and the bath) Liouville–von Neumann equation. A crucial step in performing the trace is evaluation of multi-time super-operator correlation functions (correlation functions with indices) in the bath, which are defined in terms of ordinary bath correlation by \begin{equation}gin{eqnarray}\langlebel{eq:multi-time-correlation-reorder} C^{d_1,\cdots,d_n}(t_1,\cdots,t_n) &=& \tr\big[ \overrightarrow{\mathcal{T}}_\textsc{b} \left ({\bf p}rod_{d_i = "<"} \mathcal{Q}^{d_i}_\textsc{b} (t_i) \right ) \\ \nonumber && {\bf q}quad \overleftarrow{\mathcal{T}}_\textsc{b} \left ({\bf p}rod_{d_i = ">"} \mathcal{Q}^{d_i}_\textsc{b} (t_i) \right ) \rho_B(t_0) \big], \end{eqnarray} where $\mathcal{Q}_\textsc{b} (t_i)$ are time evolved bath operators from the interaction part of the Hamiltonian (in interaction picture) and $\rho_B(t_0) $ is the initial state of the bath. As the starting point was Liouville–von Neumann equation, one needs to include indecies $d_1,\cdots,d_n$ to time-order the operators in two groups, one $(d_i = "<")$ acting from the right on the bath density matrix in ascending time order, and other other ($d_i = ">"$) acting from the left in descending time order. Let us recall that, for the ordinary operator correlation functions, successive orders of cumulants (cluster expansion) are defined inductively as \begin{equation}gin{eqnarray}\langlebel{eq:cumulants-123} G_1(t_1) &=& C(t_1) \nonumber \\ G_2(t_1,t_2) &=& C(t_1,t_2) - G_1(t_1) G_1(t_2) \nonumber \\ G_3(t_1,t_2,t_3) &=& C(t_1,t_2,t_3) - G_1(t_1) G_1(t_2) G_1(t_3) - G_1(t_1) G_2(t_2,t_3) \nonumber \\ &&{\bf q}uad - G_1(t_2) G_2(t_1,t_3) - G_1(t_3) G_2(t_1,t_2) \nonumber \\ &\vdots& \nonumber \end{eqnarray} The only difference between standard and super-operator correlation functions is that the latter need to be time ordered of time as determined by the indices $d_1,\ldots,d_{N}$. Once this is done one can write a general cumulant expansion as \begin{equation}gin{widetext} \begin{equation}gin{eqnarray} C^{d_1, \cdots d_{N}}(t_1, \cdots, t_{N}) &=& \sum_{\text{(all possible groupings)}} {\bf p}rod_{\text{(groups of one time)}} G_1 (t) {\bf p}rod_{\text{(groups of two times)}} G_2 (t,t') \cdots \langlebel{eq:cumulant-expansion} \end{eqnarray} where $N$ can be even or odd, and where the times on the right-hand side are inserted after the re-ordering. All odd order cumulants vanish for a bath where the Hamiltonian is an even function (as in our case) and the second order cumulant is the same as the second order correlation function. The first non-trivial cumulant is then \begin{equation}gin{eqnarray}\langlebel{eq:cumulants-4} G_4^{d_1,d_2,d_3,d_4}(t_1,t_2,t_3,t_4) &=& C^{d_1,d_2,d_3,d_4}(t_1,t_2,t_3,d_4) - C^{d_1,d_2}(t_1,t_2) C^{d_3,d_4}(t_3,t_4) - C^{d_1,d_3}(t_1,t_3) C^{d_2,d_4}(t_2,t_4) \nonumber \\ &&{\bf q}uad - C^{d_1,d_4}(t_1,t_4) C^{d_2,d_3}(t_2,t_3) \end{eqnarray} where we have retained the super-operator notation on the right-hand side. \end{widetext} All cumulants beyond $G_2$ vanish for correlation functions of (classical) Gaussian processes~\cite{VANKAMPEN}. This also holds as for operator correlation functions of harmonic bosonic baths, because in the path integral language these are all determined by Gaussian integrals. Alternatively, all higher-order operator correlation functions are in a bath of free bosons by Wick theorem given by combinations of pairwise operator correlation functions, which give same expressions as the cumulants used here. For free fermions all higher-order correlation functions are also given in terms of pair-wise combinations of pair-wise correlation functions, but with signs, and therefore different from the cumulants used here. \section{Generalized Feynman-Vernon actions} \langlebel{sec:generalized-FV} This Appendix summarizes the derivation of the generalized Feynman-Vernon action from~\cite{AurellGoyalKawai2019} and relates it to the cluster expansion. The multi-time super-operator function in \eqref{eq:multi-time-correlation-reorder} multiplies super-operator representation of the system operator. The connection to the path integral formulation is established in the following way: For indices $d_i="<"$ the super-operators correspond to forward paths $X_i$, whereas for indices $d_i=">"$ to a backward paths $Y_i$ (with a negative sign). A general bath correlation function is represented as in Eq. (\ref{eq:cumulant-expansion}) then a relevant series summation is performed. As a result, one obtains a reduced system propagator of the form (\ref{eq:quantum-map-dissipative-2}), where contributions to the generalized Feynman-Vernon action $S^{(n,m)}$ contain $n$ number of $X$ and $m$ number $Y$ as \begin{equation}gin{widetext} \begin{equation}gin{eqnarray}\langlebel{eq:cumulants-5} S^{(n,m)}&=& \frac{(-i)^{n} (i)^{m}}{\hbar^{n+m}}\int_{t_0}^{t}\dd{s_1} {\bf p}hi(s_1) \int_{t_0}^{s_1} \dd{s_2}{\bf p}hi(s_2)\cdots \int_{t_0}^{t}\dd{u_1}{\bf p}hi(u_1) \int_{t_0}^{u_1} \dd{u_2}{\bf p}hi(u_2)\cdots X_{s_1}X_{s_2}\cdots X_{s_n} \nonumber \\ &&{\bf q}uad \cdot Y_{u_1} Y_{u_2}\cdots Y_{u_m} G_{n+m}(u_m,\ldots,u_1,s_1,\ldots,s_n). \end{eqnarray} The last term in the above expression is the cumulant of the operator correlation function with an appropriate time ordering (first times for the backward path in reverse chronological order, then times for the forward path in chronological order). The term corresponding to $m+n=2$ is the standard quadratic Feynman-Vernon action as given by Eq. (\ref{eq:quadraticFV}). Renaming the variables so that times are always ordered $s>u$ and rewriting the resulting expression in terms of sum and difference of system paths $\chi_s=X_s+Y_s$ and $\xi_s=X_s-Y_s$ gives \begin{equation}gin{eqnarray} \langlebel{eq:second-cumulant} \sum_{n+m=2}S^{(n,m)} &=& -\frac{1}{2} \int_{t_0}^{t} \dd{s} \xi_s {\bf p}hi(s) \int_{t_0}^{s} \dd{u}{\bf p}hi(u) \left(\chi_u A +\xi_u B \right) \end{eqnarray} where $A,B$ are difference and sum of bath correlation functions at different times \begin{equation}gin{eqnarray} \langlebel{eq:second-cumulant-2} A &=& C(s,u) - C(u,s) \\ B &=& C(s,u) +C(u,s). \end{eqnarray} We want to simplify the above expression with regard to the correlation function and shift all time re-orderings to the system operators. Therefore we rewrite it as \begin{equation}gin{eqnarray} \langlebel{eq:Adquadratic} \int^{t}_{t_i} dt_1 \int^{t}_{t_i} dt_2 C(t_1,t_2) \left[ \Theta(t_2-t_1) X_{t_1} X_{t_2} + \Theta(t_1-t_2) Y_{t_1} Y_{t_2} - Y_{t_1} X_{t_2} \right], \end{eqnarray} where $\Theta(t-t')$ is the Heaviside step function. The next term of the action is a sum of all contributions of total order three, however in our case it vanishes and will be not discussed here. \section{Calculation of the 4th order cumulant} \langlebel{app:4th} In order to show that in the considered model the fourth-order cumulant vanishes we will exploit several properties of the super-operator expression for the fourth order Feynman-Vernon action, which reads \begin{equation}gin{equation} \langlebel{eq:operatorFV4th} \frac{(-i)^{4}}{4 !\hbar^4} \int_{t_{i}}^{t_{1}} \mathrm{d} t_{1} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{2} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{3} \int_{t_{i}}^{t_{f}} \mathrm{d} t_{4} \sum_{d_{1}, d_{2}, d_{3},d_{4}} G_{4}^{d_{1}, d_{2} ; d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, t_4\right) \mathcal{X}_{\mathrm{s}}^{d_{1}}\left(t_{1}\right) \mathcal{X}_{\mathrm{s}}^{d_{2}}\left(t_{2}\right) \mathcal{X}_{\mathrm{s}}^{d_{3}}\left(t_{3}\right) \mathcal{X}_{\mathrm{s}}^{d_{4}}\left(t_{4}\right), \end{equation} where \begin{equation}gin{equation} \begin{equation}gin{aligned} G_{4}^{d_{1}, d_{2}, d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, t_{4}\right) &=C^{d_{1}, d_{2}, d_{3}, d_{4}}\left(t_{1}, t_{2}, t_{3}, d_{4}\right)-C^{d_{1}, d_{2}}\left(t_{1}, t_{2}\right) C^{d_{3}, d_{4}}\left(t_{3}, t_{4}\right) \\ &-C^{d_{1}, d_{3}}\left(t_{1}, t_{3}\right) C^{d_{2}, d_{4}}\left(t_{2}, t_{4}\right)-C^{d_{1}, d_{4}}\left(t_{1}, t_{4}\right) C^{d_{2}, d_{3}}\left(t_{2}, t_{3}\right), \end{aligned} \end{equation} is the fourth order super-operator cumulant involving super-operator correlation functions of the bath \begin{equation}gin{eqnarray} \begin{equation}gin{aligned} C^{d_{1}, \cdots, d_{n}}\left(t_{1}, \cdots, t_{n}\right) &= \sum_{k_1, \ldots k_{2n}, l_1, \ldots, k_{2n}} g_{k_1 l_1} \ldots g_{k_{2n} l_{2n}} Tr\left[\overleftarrow{\mathcal{T}} (\hat{Q}_{k_1}(t_1)\hat{Q}_{l_1}(t_1) \dots \hat{Q}_{k_{2n}}(t_{2n})\hat{Q}_{l_2n}(t_{2n}) ) \rho\right]. \end{aligned} \end{eqnarray} As for now $d_{1}, d_{2}, d_{3}, d_{4}$ will be fixed and dependence on them will be dropped. The key step in providing expression for the cumulant is calculation of the fourth order correlation function, which for our model reads \begin{equation}gin{eqnarray} \langlebel{eq:4timecorfun} C(t_1,t_2,t_3,t_4) = \sum_{k_1,l_1,k_2,l_2,k_3,l_3,k_4,l_4}g_{k_1l_1}g_{k_2l_2}g_{k_3l_3}g_{k_4l_4}Tr \left[\hat{Q}_{k_1}(t_1)\hat{Q}_{l_1}(t_1)\hat{Q}_{k_2}(t_2)\hat{Q}_{l_2}(t_2)\hat{Q}_{k_3}(t_3)\hat{Q}_{l_3}(t_3)\hat{Q}_{k_4}(t_4)\hat{Q}_{l_4}(t_4) \rho \right]. \nonumber \\ \end{eqnarray} In the above there will be four fermionic operators with indices $k_i$ as well as four with indices $l_j$. The non-zero contributions to the action come from pairing of indices: Expressions with an odd number of an indices $k_i, l_i$ vanish. In order to provide the final result in the simplest form we deal with different possible pairings of the fermion operators case by case. We also note that the time integrals over $t_1, t_2, t_3, t_4$ in Eq. (\ref{eq:operatorFV4th}) are unconstrained, although they can also be written as time-ordered integrals. In order to avoid confusion with the notation time-ordered times will be denoted as $s_1, s_2, s_3, s_4$. \subsection{Case I: Pair-wise groupings} \langlebel{sub:subI} Here we treat the terms, in which if $k_i=k_j$ or $k_i=l_j$ from some indices $i$ and $j$, then they are different from all the other $k$ and $l$ (e.g. $ k_1=k_2=k \; k_3=k_4=k'$). Furthermore, we can divide those terms into two categories: "Separated-pairing" and "mixed pairing" terms. For "Separated-pairing" $k$ indices are paired among each other and the same holds for $l$. In "mixed pairing" terms $k$'s might br paired with $l$'s. Subsequently, we observe that "mixed-pairing" terms can be brought into "separated-pairing" form with the help of a permutation $k_j \leftrightarrow l_j$ (note that such permutations do not change the overall sign of terms). As a result, is is sufficient to consider "separated-pairing" terms with an additional factor $4$ and in such a way all the terms are accounted for. Now we use the fact that all fermionic operators with $k$ indices can be moved to the left and, if time ordering is preserved, the sign of this expression remains the same. The possible pairings for $k$'s are \begin{equation}gin{eqnarray} &&\hat{Q}_k(s_1)\hat{Q}_k(s_2) \hat{Q}_{k'}(s_3) \hat{Q}_{k'}(s_4) \equiv [12]_{k}[34]_{k'} \\ &&\hat{Q}_k(s_1)\hat{Q}_{k'}(s_2) \hat{Q}_{k}(s_3) \hat{Q}_{k'}(s_4) = - \hat{Q}_k(s_1)\hat{Q}_{k}(s_3) \hat{Q}_{k'}(s_2) \hat{Q}_{k'}(s_4) \equiv - [13]_{k}[24]_{k'} \\ &&\hat{Q}_k(s_1)\hat{Q}_{k'}(s_2) \hat{Q}_{k'}(s_3) \hat{Q}_{k}(s_4) = \hat{Q}_k(s_1) \hat{Q}_{k}(s_4) \hat{Q}_{k'}(s_2) \hat{Q}_{k'}(s_3) \equiv [14]_{k}[23]_{k'}, \end{eqnarray} where we introduced a symbolic notation: The first and second square bracket groups operators with $k$ and $k'$ respectively and number inside bracket denote indices of re-ordered times $s_i$. To get the total operator acting on $\rho_B$ in Eq. (\ref{eq:4timecorfun}) one needs to multiply the $k,k;$ operators with $l, l'$ operators and include appropriate combination of coupling constants. In fact there are just two possible pre-factors: $g_{kl}^2 g_{k'l'}^2$ for terms of a form $[ab]_{k}[cd]_{k'}[ab]_{l}[cd]_{l'}$ and $g_{kl} g_{kl'} g_{k'l} g_{k'l'}$ for all others. It will prove convenient to write the resulting expression in the following form \begin{equation}gin{eqnarray} && \left(g_{kl}^2 g_{k'l'}^2 -g_{kl} g_{kl'} g_{k'l} g_{k'l'}\right) \left( [12]_{k}[34]_{k'} [12]_{l}[34]_{l'} + [13]_{k}[24]_{k'} [13]_{l}[24]_{l'} + [14]_{k}[23]_{k'} [14]_{l}[23]_{l'} \right) + \langlebel{eq:same-same} \\ &&\left(g_{kl}^2 g_{k'l'}^2 + g_{kl} g_{kl'} g_{k'l} g_{k'l'} \right)\left([12]_{k}[34]_{k'}-[13]_{k}[24]_{k'}+[14]_{k}[23]_{k'}\right)\left([12]_{l}[34]_{l'}-[13]_{l}[24]_{l'}+[14]_{l}[23]_{l'}\right), \langlebel{eq:all-all} \end{eqnarray} The above expressions are written using re-ordered times $s_1,s_2,s_3,s_4$. Assuming that superoperator indices $d_1,d_2,d_3,d_4$ are fixed we relate re-ordered times to unconstrained times in the following way \begin{equation}gin{equation} s_1 = t_x \;\;\; s_2 = t_y \;\;\; s_3 = t_z\;\;\;s_4 = t_w, \end{equation} where indices ${x,y,z,w}$ are related to values in${1,2,3,4}$ through a permutation \begin{equation}gin{equation} (x,y,z,w)=P_{d,t}(1,2,3,4), \end{equation} where indices $d,t$ indicate dependence of the permutation on the superoperator ordering $d_i$ and unconstrained times $t_j$. Then we have that e.g. $[12]_{k}= \hat{Q}_k(s_1) \hat{Q}_k(s_2)= \hat{Q}_k(t_a) \hat{Q}_k(t_b)$. We now show that Eq. (\ref{eq:all-all}) vanish. Consider a new $d$-dependent time ordering of operators $\hat{Q}_m(t)$, where $m \in \left\{k,k',l,l \right\}$ \begin{equation}gin{eqnarray} &&S_{d,m}(x,y) = T_{d,m}(x,y) = \overrightarrow{\mathcal{T}} {\bf p}rod_{i\in (x,y); d_i=-} \hat{Q}_m(t_i) \overleftarrow{\mathcal{T}}{\bf p}rod_{j\in (x,y); d_i=+} \hat{Q}_m(t_j), \end{eqnarray} if the order of the operators is the same as $(x,y)$, and \begin{equation}gin{eqnarray} &&S_{d,m}(x,y) = - T_{d,m}(x,y), \end{eqnarray} if the order of the operators is the opposite of $(x,y)$. Additionally we will need the sign of a permutation $S(m \rightarrow n)$, then we can rewrite the first bracket in Eq. (\ref{eq:all-all}) as \begin{equation}gin{eqnarray} &&[12]_{k}[34]_{k'}-[13]_{k}[24]_{k'}+[14]_{k}[23]_{k'} = S(1234 \rightarrow xyzw) S_{d,k}(a,b) S_{d,k'}(c,d) + S(1234 \rightarrow xzyw) S_{d,k}(a,c) S_{d,k'}(b,d) + \nonumber \\ &&S(1234 \rightarrow xwyz) S_{d,k}(a,d) S_{d,k'}(b,c). \end{eqnarray} The above expression is a sum that goes over 3 permutations of $(xywz)$ where the first is the identity, $S(abcd \rightarrow abcd) = 1$. It can be extended to the sum over all 24 permutations of $(abcd)$ \begin{equation}gin{eqnarray} [12]_{k}[34]_{k'}-[13]_{k}[24]_{k'}+[14]_{k}[23]_{k'} = \frac{1}{8} \left[ \sum_{xyzw} S(1234 \rightarrow xyzw) S_{d,k}(x,y) S_{d,k'}(z,w) \right]. \end{eqnarray} The same argument applies to the $l,l'$ terms in the second bracket of Eq. (\ref{eq:all-all}) so we can rewrite both brackets as \begin{equation}gin{eqnarray} \frac{1}{64} \left[ \sum_{xyzw} S(1234 \rightarrow xyzw) S_{d,k}(x,y)S_{d,k'}(z,w) \right]\left[ \sum_{x'y'z'w'} S(1234 \rightarrow x'y'z'w') S_{d,k}(x',y') S_{d,k'}(z',w') \right]. \end{eqnarray} Let us consider product of two terms from the two sums \begin{equation}gin{eqnarray} \frac{1}{64} S(1234 \rightarrow xyzw) S_{d,k}(x,y) S_{d,k'}(z,w)S(1234 \rightarrow x'y'z'w') S_{d,k}(x',y')S_{d,k'}(z',w'). \end{eqnarray} We need to consider the following cases: \\ 1. The same pairing i.e. \begin{equation}gin{itemize} \item $xy=x'y'$ and $zw=z'w'$ \item $xy=z'w'$ and $zw=x'y'$ \end{itemize} Those terms are of the same structure as the ones in Eq. (\ref{eq:same-same}). \\ 2. Different pairing e.g. $\frac{1}{64} S(1234 \rightarrow xyzw) S_{d,k}(x,y) S_{d,k'}(z,w) S(1234 \rightarrow xzyw) S_{d,l}(x,z) S_{d,l'}(y,w)$. The overall expression is summed over indices $d_i$ and integrated over times $t_j$. Consider therefore the following change of variables \begin{equation}gin{eqnarray} t'_x=t_y \; \; \; d'_x = d_y \; \; \; t'_y=t_x \; \; \; d'_y = d_x, \end{eqnarray} with rest of them unchanged. Now we will analyze how such a change affects the sign of the considered term (it is useful to bring in the dependence of $t$ and $t'$). We have \begin{equation}gin{eqnarray} &&S(1234 \rightarrow xyzw) \;\;\; \text{unchanged}, \nonumber \\ &&S_{d,k}(x,y;t) \rightarrow S_{d',k}(x,y;t') \;\;\; \text{unchanged}, \nonumber \\ &&S_{d,k}(z,w;t) \rightarrow S_{d',k}(z,w;t') \;\;\; \text{unchanged}, \nonumber \\ &&S(1234 \rightarrow xzyw) \;\;\; \text{unchanged}, \nonumber \\ &&S_{d,k}(x,z;t) \rightarrow S_{d',k}(x,z;t') \;\;\; \text{changed}, \nonumber \\ &&S_{d,k}(y,w;t) = S_{d',k'}(y,w;t') \;\;\; \text{changed}. \nonumber \end{eqnarray} We compare the above to the effect of permuting indices $x \leftrightarrow y$. \begin{equation}gin{eqnarray} &&S(1234 \rightarrow xyzw) \rightarrow S(1234 \rightarrow yxzw) = - S(1234 \rightarrow xyzw), \nonumber \\ &&S_{d,k}(x,y;t) \rightarrow S_{d,k}(y,x;t) = - S_{d,k}(x,y;t), \\ &&S_{d,k}(z,w;t) \;\;\; \text{unchanged}, \nonumber \\ &&S(1234 \rightarrow xzyw) \rightarrow S(1234 \rightarrow yzxw) =-S(1234 \rightarrow xzyw) \;\;\; \text{unchanged}, \nonumber \\ &&S_{d,k}(x,z;t) \rightarrow S_{d,k}(y,z;t) \;\;\; \text{changed}, \nonumber \\ &&S_{d,k}(y,w;t) = S_{d,k'}(x,w;t) \;\;\; \text{changed}. \nonumber \end{eqnarray} From definition of $S_{d,k}(x,z;t)$ it follows that \begin{equation}gin{eqnarray} S_{d',k}(x,z;t') = S_{d,k}(y,z;t) \;\;\; \text{and} \;\;\; S_{d',k}(y,w;t') = S_{d,k}(x,w;t), \end{eqnarray} and all the above same relations hold for $k',l,l'$. Therefore we found that all terms where the pairing is not the same cancel pairwise. As a result the only non-zero term, from Eq. (\ref{eq:same-same}) and (\ref{eq:all-all}) is \begin{equation}gin{equation} g_{kl}^2 g_{k'l'}^2 \left( [12]_{k}[34]_{k'} [12]_{l}[34]_{l'} + [13]_{k}[24]_{k'} [13]_{l}[24]_{l'} + [14]_{k}[23]_{k'} [14]_{l}[23]_{l'} \right). \end{equation} Performing the trace yields the following result \begin{equation}gin{equation} \langlebel{eq:counterIres} \sum_{k,l,k',l'}g_{kl}^2 g_{k'l'}^2 \left( \left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k'} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} + \left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k'} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} + \left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k'} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right), \end{equation} where $\left \langlengle ab \right \ranglengle_{k} \left \langlengle ab \right \ranglengle_{m}\equiv \left \langlengle \hat{Q}_m(t_a) \hat{Q}_m(t_b) \right \ranglengle $ is thermal expectation value and we restored the sum over bath degrees of freedom. \subsection{Case II: Four $k$ grouping, pairwise $l$ grouping } \langlebel{sub:subII} An example of such indices arrangement is $k_1=k_2=k_3=k_4=k \; l_1=l_2=l \; l_3=l_4=l'$. Using the operator notation introduced in the previous case we find that the relevant expression is \begin{equation}gin{equation} g_{kl}^2 g_{kl'}^2[1234]_k \left([12]_{l}[34]_{l'}-[13]_{l}[24]_{l'}+[14]_{l}[23]_{l'}\right) \end{equation} This can be evaluated into \begin{equation}gin{eqnarray} g_{kl}^2 g_{kl'}^2\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} -\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \right)\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{l'} -\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{l'} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{l'} \right), \end{eqnarray} where $\left \langlengle ab \right \ranglengle_{k}$ is thermal expectation value. The above expression can be simplified using exactly the same discussion as in the previous Subsection. The reason for this is that it does not involve indices $k$'s and $l$'s but only time orderings and signs of permutations. Therefore we find that the final expression is \begin{equation}gin{eqnarray} \langlebel{eq:caseIIres} \sum_{k,l,l'}g_{kl}^2 g_{kl'}^2\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} +\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right), \end{eqnarray} where we restored the sum over bath degrees of freedom. \subsection{Case III: Four $k,l$ grouping} \langlebel{sub:subIII} In the operator notation we can write \begin{equation}gin{eqnarray} g^4_{kl}[1234]_k [1234]_l, \end{eqnarray} using Wicks theorem this evaluates into \begin{equation}gin{equation} g^4_{kl}\left( \left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} -\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \right)\left( \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l} -\left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l} +\left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l} \right). \end{equation} Here again we can apply reasoning from Subsection (\ref{sub:subI}), so finally we have \begin{equation}gin{eqnarray} \langlebel{eq:caseIIIres} \sum_{k,l}g_{kl}^4\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l} +\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l} \right), \end{eqnarray} \subsection{Counter terms and the final result} Subsections \ref{sub:subI}, \ref{sub:subII}, \ref{sub:subIII} were devoted to calculation of correlation function involving four times. In order to obtain final expression for the fourth order cumulant one needs to subtract from those results products of two times correlation functions. We find \begin{equation}gin{eqnarray} &&C(t_1,t_2)C(t_3,t_4) + C(t_1,t_3)C(t_2,t_4) + C(t_1,t_4)C(t_2,t_3) = \nonumber \\ \langlebel{eq:counterI} &&\sum_{k,k',l,l'} \left( \left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k'} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} + \left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k'} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} + \left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k'} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right) \\ \langlebel{eq:counterII} &&\sum_{k,l,l'} \left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l'} +\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l'} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l'} \right) \\ \langlebel{eq:counterIII} &&\sum_{k,l,} g_{kl}^42\left(\left \langlengle 12 \right \ranglengle_{k} \left \langlengle 34 \right \ranglengle_{k} \left \langlengle 12 \right \ranglengle_{l} \left \langlengle 34 \right \ranglengle_{l} +\left \langlengle 13 \right \ranglengle_{k} \left \langlengle 24 \right \ranglengle_{k} \left \langlengle 13 \right \ranglengle_{l} \left \langlengle 24 \right \ranglengle_{l} +\left \langlengle 14 \right \ranglengle_{k} \left \langlengle 23 \right \ranglengle_{k} \left \langlengle 14 \right \ranglengle_{l} \left \langlengle 23 \right \ranglengle_{l} \right). \end{eqnarray} Compering the above to the results of Subsections \ref{sub:subI}, \ref{sub:subII}, \ref{sub:subIII} we find that line (\ref{eq:counterI}) equals to Eq. (\ref{eq:counterIres}), line (\ref{eq:counterII}) equals to Eq. (\ref{eq:caseIIres}) and line (\ref{eq:counterIII}) equals to Eq. (\ref{eq:caseIIIres}). As a result, in our model the fourth order cumulant vanishes. \end{widetext} \end{document}
\begin{document} \title{Generalized Hyperbolicity and Shadowing in $L^p$ spaces} \author{Emma D'Aniello \\ \and Udayan B. Darji \\ \and Martina Maiuriello \\} \newcommand{\Addresses}{{ \footnotesize E.~D'Aniello, \\ \textsc{Dipartimento di Matematica e Fisica,\\ Universit\`a degli Studi della Campania ``Luigi Vanvitelli",\\ Viale Lincoln n. 5, 81100 Caserta, ITALIA} \\ \textit{E-mail address: \em [email protected]} U.B.~Darji,\\ \textsc{Department of Mathematics,\\ University of Louisville,\\ Louisville, KY 40292, USA}\\ \textit{E-mail address: \em [email protected]} M.~Maiuriello,\\ \textsc{Dipartimento di Matematica e Fisica,\\ Universit\`a degli Studi della Campania ``Luigi Vanvitelli",\\ Viale Lincoln n. 5, 81100 Caserta, ITALIA}\\ \textit{E-mail address: \em [email protected]} }} \maketitle \begin{abstract} It is rather well-known that hyperbolic operators have the shadowing property. In the setting of finite dimensional Banach spaces, having the shadowing property is equivalent to being hyperbolic. In 2018, Bernardes et al. constructed an operator with the shadowing property which is not hyperbolic, settling an open question. In the process, they introduced a class of operators which has come to be known as generalized hyperbolic operators. This class of operators seems to be an important bridge between hyperbolicity and the shadowing property. In this article, we show that for a large natural class of operators on $L^p(X)$ the notion of generalized hyperbolicity and the shadowing property coincide. We do this by giving sufficient and necessary conditions for a certain class of operators to have the shadowing property. We also introduce computational tools which allow construction of operators with and without the shadowing property. Utilizing these tools, we show how some natural probability distributions, such as the Laplace distribution and the Cauchy distribution, lead to operators with and without the shadowing property on $L^p(X)$. \end{abstract} \let\thefootnote\relax\footnote{\date{\today} \\ 2010 {\em Mathematics Subject Classification:} Primary: 37B65, 47B33 Secondary: 37D05, 47A16.\\ {\em Keywords:} Shadowing Property, Hyperbolicity, Generalized Hyperbolicity, Composition Operators, Dissipative Systems.} \tableofcontents \section{Introduction} Linear dynamics is a relatively recent area of mathematics which lies at the intersection of operator theory and dynamical systems. During the last two decades, a flurry of intriguing results have been obtained in this area concerning dynamical properties such as transitivity, mixing, Li-Yorke, Devaney and distributional chaos, invariant measures, ergodicity and frequent hypercyclicity. We refer the reader to books \cite{BernardesMessaoudiETDS2020} and \cite{GrosseErdmannMaguillot2011} for general information on the topic. Hyperbolic dynamics on manifolds is an important part of smooth dynamical systems. Indeed, some of the important questions in hyperbolic dynamics concern relationships between hyperbolicity, the shadowing property, expansivity and structural stability. It is rather well-known that hyperbolicity implies the shadowing property, expansivity and structural stability. Classical results of Smale \cite{Smale1967} and Walters \cite{Walters1978} show that the shadowing property and expansivity imply spectral decomposition and structural stability. Abdenur and Diaz \cite{AbdenurDiaz2007} showed that, for generic $C^1$ homeomorphisms on closed manifolds, the shadowing property implies hyperbolicity in certain important contexts. Pilyugin and Tikhomirov \cite{PilyuginTikhomirov2010} showed that the Lipschitz shadowing property and structural stability are equivalent for $C^1$ homeomorphisms of closed smooth manifolds. This is just a glimpse of important works in this field. Although hyperbolic dynamics of linear operator is rather recent, there are some classical results from the 1960's where relationships between expansivity and spectrum of an operator were obtained. In particular, Eisenberg and Hedlund \cite{EisenbergHedlundPJM1970, HedlundPJM1971} showed that an invertible operator $T$ is uniformly expansive if and only if $\sigma_a (T)$, the approximate spectrum of $T$, does not intersect the unit circle $\mathbb{T}$. In 2000, Mazur \cite{MazurFDE2000} showed that an invertible normal operator on a Hilbert space has the shadowing property if and only if it is hyperbolic. A detailed study of hyperbolicity, expansivity, the shadowing property and the spectrum of an operator on Banach space was initiated by Bernardes et al. in 2018 \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}. Among many results obtained there, an important question was settled, namely that there are operators with the shadowing property which are not hyperbolic. This result was proved by constructing a class of operators which have a weaker splitting than the usual splitting of hyperbolic operators. Cirilo et al. in a subsequent work named them generalized hyperbolic operators. This class of operators seems to be the correct bridge between hyperbolicity and the shadowing property as it is evident by results in \cite{BernardesMessaoudiETDS2020}, \cite{BernadesMessaouidPAMS2020} and \cite{CiriloGollobitPujals2020}. That generalized hyperbolic operators have the shadowing property was shown in [Theorem A, \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}]. Bernardes and Messaoudi [Theorem 18, \cite{BernardesMessaoudiETDS2020}] gave a characterization of weighted shifts which have the shadowing property. From this characterization one obtains that for the class of weighted shifts on $\ell^p(Z)$, generalized hyperbolicity is equivalent to the shadowing property. Before describing results in this article, we mention some seminal recent results. Bernardes and Messaoudi \cite{BernardesMessaoudiETDS2020} showed that a linear operator on a Banach space is hyperbolic if and only if it is expansive and has the shadowing property. In \cite{BernadesMessaouidPAMS2020} they also showed that all generalized hyperbolic operators are structurally stable. We also point out that general properties of generalized hyperbolic operators with applications are carried out in \cite{CiriloGollobitPujals2020}. In a very different direction from linear dynamics, relationship between hyperbolicity, expansivity and the shadowing property in the setting of noncompact spaces was carried out in \cite{LeeNguyenYang2018}. In this article we explore the boundary between generalized hyperbolicity and the shadowing property. In particular, we show that for a large natural class of operators on $L^p(X)$ the notions of generalized hyperbolicity and the shadowing property coincide. More specifically, we start with a $\sigma$-finite dissipative measure space $(X, {\mathcal B}, \mu)$ and a nonsingular, invertible, bimeasurable transformation $f:X \rightarrow X$. We consider the composition operator $T_f:L^p(X) \rightarrow L^p(X)$ given by $T_f(\varphi) = \varphi \circ f$. If the Radon-Nikodym derivative of $\mu(f)$ with respect to $\mu$ is bounded below away from zero, then $T_f$ is a bounded operator. We assume such is the case for the Radon-Nikodym derivative of $\mu(f)$ and $\mu(f^{-1})$ with respect to $\mu$. Moreover, we assume that our measurable transformation satisfies the bounded distortion condition. Among this class of operators, we give necessary and sufficient conditions for an operator to have the shadowing property: Theorems SS, SN. Using the obtained characterization of the shadowing property, we conclude as a corollary that, in this particular class of operators, the shadowing property and generalized hyperbolicity coincide. In Theorem RN, we give computationally useful conditions which easily allow construction of operators with and without shadowing property. In particular, we show how some natural probability distributions, such as the Laplace distribution and the Cauchy distribution, lead to operators with and without the shadowing property on $L^p(X)$. At this point we like to point out that a systematic study of composition operators in the setting of linear dynamics was initiated in \cite{BayartDarjiPiresJMAA2018} and \cite{BernardesDarjiPiresMM2020}. In \cite{BayartDarjiPiresJMAA2018}, necessary and sufficient conditions were given for a composition operator to be topologically transitive and mixing. Necessary and sufficient conditions for an operator to be Li-Yorke chaotic were given in \cite{BernardesDarjiPiresMM2020}. The motivation for the study of composition operators is to have a concrete but large class of operators which can be utilized as examples and counterexamples in linear dynamics. These types of operators include weighted shifts but the class is much larger than that. For example, it includes operators induced by measures on odometers \cite{BDDPOdometers}. The paper is organized as follows. In Section~2, we give definitions and background results. In Section~3, we state our main results. In Section~4, we construct concrete examples. Section~5 consists of proofs, and Section~6 of open problems. \section{Definitions and Background Results} Given a Banach space $X$, by $S_X$ we denote the {\em unit sphere} of $X$, that is $S_X=\{x \in X\, : \, \Vert x \Vert =1 \}$. If $T$ is a bounded operator on a Banach space $X$, then $\sigma(T), \sigma_p(T), \sigma_a(T)$ and $\sigma_r(T)$ denote, respectively, the {\em spectrum}, the {\em point spectrum}, the {\em approximate point spectrum} and the {\em residual spectrum} of $T$, while $r(T)$ denotes the {\em spectral radius} of $T$ and it satisfies the {\em spectral radius formula} $r(T)=\lim_{n \rightarrow \infty} \Vert T^n \Vert ^{\frac{1}{n}}$. In the sequel, as usual, ${\mathbb N}$ denotes the set of all positive integers and ${\mathbb N}_0={\mathbb N}\cup \{0\} .$ Moreover, ${\mathbb D}$ and ${\mathbb T}$ denote the open unit disk and the unit circle in the complex plane ${\mathbb C}$, respectively. \subsection{Weighted Shifts} Due to the importance of {\em weighted shifts} in the area of linear dynamics and operator theory, the study of their dynamical behavior has received special attention in recent years. We recall some preliminary definitions and results. \begin{defn} Let $A= {\mathbb Z}$ or $A = {\mathbb N}$. Let $X=\ell^p(A)$, $1 \leq p < \infty$ or $X=c_0(A).$ Let $w=\{w_n\}_{n \in A}$ be a bounded sequence of scalars, called {\em weight sequence}. Then, the {\em weighted backward shift $B_w$ on $X$} is defined by \[B_w( \{x_n\}_{n \in A}) =\{w_{n+1}x_{n+1}\}_{n \in A}.\] If $A= {\mathbb Z}$, the shift is called {\em bilateral}. If $A = {\mathbb N}$, then the shift is {\em unilateral}. A unilateral weighted backward shift is not invertible. On the other hand, a bilateral $B_w$ is invertible if and only if $\inf_{n \in \mathbb Z} \vert w_n \vert >0$. \end{defn} \subsection{Expansivity} Expansivity is an important concept in hyperbolic dynamics. In the context of linear dynamics, various notions of expansivity have simpler formulations. We use them as defined below. We refer the reader to \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018} for a discussion of how they are obtained from the original definitions in the general setting. \begin{defn} An invertible operator $T$ on a Banach space $X$ is said to be {\em expansive} if for each $x \in S_X$ there exists $n \in \mathbb Z$ such that $\Vert T^n x\Vert \geq 2$. \end{defn} \begin{defn} An invertible operator $T$ on a Banach space $X$ is said to be {\em uniformly expansive} if there exists $n \in \mathbb N$ such that \[z \in S_X \Longrightarrow \Vert T^nz \Vert \geq 2 \text{ or } \Vert T^{-n}z \Vert \geq 2.\] \end{defn} We point out that in the previous definitions, the number 2 can be replaced by any number $c>1$. In \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}, the authors characterize various types of expansivity for invertible operators on Banach spaces (\cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}: Proposition 19) and, in particular, they also obtain a complete characterization of the notions of expansivity for weighted shifts (\cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}: Theorem E). \subsection{Shadowing} As for expansivity, the concept of shadowing has a simplified formulation in the setting of linear dynamics. We use this formulation in our work. \begin{defn} Let $T:X \rightarrow X$ be an operator on a Banach space $X$. A sequence $\{x_n\}_{n \in \mathbb Z}$ in $X$ is called a {\em $\delta$-pseudotrajectory} of $T$, where $\delta >0$, if \[ \Vert Tx_n - x_{n+1} \Vert \leq \delta, \text{ for all $n \in \mathbb Z$.}\] \end{defn} The basic property of an operator related to the notion of a pseudotrajectory is {\em the shadowing property}: \begin{defn} Let $T:X \rightarrow X$ be an invertible operator on a Banach space $X$. Then $T$ is said to have the {\em shadowing property} if for every $\epsilon >0$ there exists $\delta >0$ such that every $\delta $-pseudotrajectory $\{x_n\}_{n \in \mathbb Z}$ of $T$ is $\epsilon $-shadowed by a real trajectory of $T$, that is, there exists $x \in X$ such that \[ \Vert T^n x-x_n \Vert < \epsilon, \text{ for all $n \in \mathbb Z$.}\] \end{defn} We can define the notion of {\em positive shadowing} for an operator $T$ by replacing the set $\mathbb Z$ by $\mathbb N$ in the above definition. In such ``positive" case, $T$ does not need to be invertible. The following is an equivalent formulation of shadowing in the context of linear dynamics which one normally uses. \begin{lem} [\cite{Pilyugin1999}] \label{LEM1} An invertible operator $T$ on a Banach space $X$ has the shadowing property if and only if there is a constant $K>0$ such that, for every bounded sequence $\{z_n\}_{n \in \mathbb Z}$ in $X$, there is a sequence $\{y_n\}_{n \in \mathbb Z}$ in $X$ such that \[ \sup_{n \in \mathbb Z} \Vert y_n \Vert \leq K \sup_{n \in \mathbb Z} \Vert z_n \Vert \hspace{0.3 cm}\text{ and } \hspace{0.3 cm} y_{n+1}=Ty_n + z_n, \text{ for all $n \in \mathbb Z$.}\] \end{lem} In \cite{BernardesMessaoudiETDS2020}, Bernardes and Messaoudi establish the following characterization of shadowing for bilateral weighted backward shifts. \begin{thm} [\cite{BernardesMessaoudiETDS2020}: Theorem 18] \label{theoSHADBW} Let $X=\ell^p({\mathbb Z})$ $(1 \leq p < \infty)$ or $X=c_0({\mathbb Z})$ and consider a bounded weight sequence $w=\{w_n\}_{n \in \mathbb Z}$ with $\inf _{n \in \mathbb Z} \vert w_n \vert >0$. Then, the bilateral weighted backward shift $B_w:X \longrightarrow X$ has the shadowing property if and only if one of the following conditions holds: \begin{itemize} \item[a)]{$\lim_{n \rightarrow \infty}( \sup _{k \in {\mathbb Z}} \vert w_k \cdots w_{k+n}\vert ^{\frac{1}{n}} )<1;$} \item[b)]{$\lim_{n \rightarrow \infty}( \inf_{k \in {\mathbb Z}} \vert w_{k} \cdots w_{k+n}\vert^{\frac{1}{n}} )>1;$} \item[c)]{$\lim_{n \rightarrow \infty}(\sup _{k \in {\mathbb N}} \vert w_{-k} \cdots w_{-k-n}\vert^{\frac{1}{n}} )<1$ and \\ $\lim_{n \rightarrow \infty}(\inf_{k \in {\mathbb N}} \vert w_{k} \cdots w_{k+n}\vert^{\frac{1}{n}} )>1.$ } \end{itemize} \end{thm} \subsection{Hyperbolicity and Generalized Hyperbolicity} A fundamental notion in linear dynamics is that of {\em hyperbolicity}. \begin{defn} An invertible operator $T$ is said to be {\em hyperbolic} if $\sigma (T) \cap {\mathbb T}= \emptyset.$ \end{defn} It is known \cite{EisenbergHedlundPJM1970,HedlundPJM1971} that $T$ is uniformly expansive if and only if $\sigma_a(T)\cap {\mathbb T}=\emptyset$. Hence, every invertible hyperbolic operator is uniformly expansive and the converse, in general, is not true \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018, EisenbergHedlundPJM1970}. There is an equivalent useful formulation of hyperbolic operator which does not use the spectrum of the operator. It is classical that $T$ is hyperbolic if and only if there is a splitting $X = X_s \oplus X_u$, $T = T_s \oplus T_u$ (the {\em hyperbolic splitting} of $T$), where $X_s$ and $X_u$ are closed $T$-invariant subspaces of $X$ (the {\em stable} and the {\em unstable subspaces} for $T$), $T_s = T_{|_{X_s}}$ is a {\em proper contraction} (i.e., $\|T_s\| < 1$), $T_u = T_{|_{X_u}}$ is invertible and it is a {\em proper dilation} (i.e., $\|T_u^{-1}\| < 1$). The above reformulation of hyperbolicity and the negative solution of the problem whether every operator with the shadowing property is hyperbolic (\cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}: Theorem~B) led to the following notion of generalized hyperbolicity \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018, BernardesMessaoudiETDS2020, CiriloGollobitPujals2020}. \begin{defn} [\cite{CiriloGollobitPujals2020}: Definition 1] \label{DEFGH} Let $T$ be an invertible operator on a Banach space $X$. If $X=M\oplus N$, where $M$ and $N$ are closed subspaces of $X$ with $T(M)\subset M$ and $T^{-1}(N) \subset N$, and $T_{|_M}$ and $T^{-1}_{|_N}$ are proper contractions, then $T$ is said to be {\em generalized hyperbolic}. \end{defn} The following corollary ties this new concept to the shadowing property. \begin{cor}[\cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}: Corollary 8] \label{Cor1} Every invertible generalized hyperbolic operator $T$ on a Banach space $X$ has the shadowing property. \end{cor} It was long known that hyperbolicity and the shadowing properties are equivalent for special cases such as in finite dimensional Banach spaces and for normal operators on Hilbert spaces \cite{MazurFDE2000, OmbachUIAM1994}. Recently, Bernardes and Messaoudi \cite{BernardesMessaoudiETDS2020} gave the precise conditions when they are equivalent. \begin{thm}[\cite{BernardesMessaoudiETDS2020}: Theorem 1] For any invertible operator $T$ on a Banach space $X$, the following are equivalent: \begin{enumerate} \item{$T$ is hyperbolic;} \item{T is expansive and has the shadowing property.} \end{enumerate} \end{thm} Returning back to invertible bilateral weighted shifts, we summarize the known results in the following characterizations of hyperbolicity and generalized hyperbolicity. \begin{thm} Let $X=\ell^p({\mathbb Z})$ $(1 \leq p < \infty )$ or $X=c_0({\mathbb Z}),$ and consider a weight sequence $w=\{w_n\}_{n \in \mathbb Z}$ with $\inf _{n \in {\mathbb Z}} \vert w_n \vert >0.$ Then, \begin{enumerate} \item $B_w$ is hyperbolic if and only if a) or b) of Theorem~\ref{theoSHADBW} are satisfied. \item $B_w$ is generalized hyperbolic if and only if it has the shadowing property. \end{enumerate} \end{thm} \begin{proof} Statement (1) is rather well-known. For example, see Remark 35 in \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018}. For Statement (2) we have already discussed above that generalized hyperbolic operators have the shadowing property. If a bilateral weighted backward shift has the shadowing property, then, using Theorem~\ref{theoSHADBW}, it can be easily shown that $B_w$ has a splitting as in Definition~\ref{DEFGH}, namely, we let \begin{align*} M &= \{ \{x_n\}_{n \in \mathbb{Z}} \in \ell^p({\mathbb Z}): x_n = 0 \ \ \forall n \ge 0\} \\ N &= \{ \{x_n\}_{n \in \mathbb{Z}} \in \ell^p({\mathbb Z}): x_n = 0 \ \ \forall n <0\}. \end{align*} \end{proof} We put these concepts in the following diagram to have a clear picture of the relationships between them. \[ \begin{tikzcd} T \text{ hyperbolic} \arrow[d, Rightarrow] \arrow[r, Rightarrow] & T \text{ generalized hyperbolic} \arrow[d, Rightarrow] \\ T \text{ unif. expansive} \arrow[d, Rightarrow] & T \text{ shadowing} \\ T \text{ expansive} \\ \end{tikzcd} \] \subsection{Composition Operators} Our goal is to investigate the notions of generalized hyperbolicity and the shadowing property in the context of composition operators on $L^p$-spaces. We use the basic set up from \cite{BayartDarjiPiresJMAA2018, BernardesDarjiPiresMM2020}. \begin{defn}\label{compodyn} A {\em composition dynamical system} is a quintuple $(X,{\mathcal B},\mu, f, T_f)$ where \begin{enumerate} \item $(X,{\mathcal B},\mu)$ is a $\sigma$-finite measure space, \item $f : X \to X$ is an injective {\em bimeasurable transformation}, i.e., $f(B) \in {\mathcal B}$ and $f^{-1}(B) \in {\mathcal B}$ for every $B \in {\mathcal B}$, \item there is $c > 0$ such that \begin{equation}\label{condition} \mu(f^{-1}(B)) \leq c \mu(B) \ \textrm{ for every } B \in {\mathcal B}, \tag{$\star$} \end{equation} \item $T_f: L^p(X) \rightarrow L^p(X) $, $1 \le p <\infty$, is the {\em composition operator} induced by $f$, i.e., \[T_f : \varphi \mapsto \varphi \circ f.\] \end{enumerate} \end{defn} It is well-known that (\ref{condition}) guarantees that $T_f$ is a bounded linear operator. Moreover, if $f$ is surjective and $f^{-1}$ satisfies (\ref{condition}), then $T_{f^{-1}}$ is a well-defined bounded linear operator and $T^{-1}_f = T_{f^{-1}}$. We refer the reader to the book \cite{SinghManhas1993} for a detailed exposition on composition operators. \subsection{Dissipative Systems and Bounded Distortion} Characterizing the shadowing property and generalized hyperbolicity for composition operators seems complicated. We are able to give an explicit characterization in the setting of a dissipative measure space. Even in this setting we need an additional condition. Below we give relevant definitions and recall how dissipative systems naturally arise from Hopf decomposition of general measurable systems. Throughout this paper all measure spaces are $\sigma$-finite. \begin{defn}\label{nullNS} A measurable transformation $f: X \rightarrow X$ on the measure space $(X, {\mathcal B}, \mu)$ is called {\em nonsingular} if, for any $B \in \mathcal B$, $\mu(f^{-1}(B))=0$ if and only if $\mu(B)=0$. \end{defn} We point out here that, if $f$ and $f^{-1}$ satisfy (\ref{condition}), then $f$ is nonsingular. Now we recall the Hopf Decomposition Theorem. \begin{thm}[Hopf, \cite{AaronsonMSM1997,Krengel1985}] Let $(X, {\mathcal B}, \mu)$ be a measure space and $f: X \rightarrow X$ be a nonsingular transformation. Then, $X$ is the union of two disjoint invariant sets ${\mathcal C}(f)$ and ${\mathcal D}(f)$, called the conservative and dissipative parts of $f$, respectively, satisfying the following conditions. \begin{enumerate} \item For all $B \subseteq {\mathcal C}(f)$ with $\mu(B) >0$, there is $n >0$ such that $\mu(B \cap f^{-n}( B)) >0$. \item ${\mathcal D}(f)$ is the pairwise disjoint union of $\{f^n(W)\}_{n \in \mathbb{Z}}$ for some $W \in {\mathcal B}$ i.e., ${\mathcal D}(f)= \dot{\cup}_{k=-\infty}^{+ \infty} f^k (W)$. \end{enumerate} \end{thm} In above, the set $W$ is called a {\em wandering set of $f$}, i.e., $\{f^n(W)\}_{n \in \mathbb{Z}}$ are pairwise disjoint. In general, $\mu(W)$ does not have to be finite. Based on Hopf Decomposition Theorem, we use the following definition of dissipative suitable for our purpose. \begin{defn} \label{dissipcompodyn} A measurable dynamical system $(X,{\mathcal B},\mu, f)$ is called a {\em dissipative system} if $X = \dot {\cup} _{k=-\infty}^{+ \infty} f^k (W)$ for some $W \in {\mathcal B}$ with $0 < \mu (W) < \infty$. We will often say that the system is {\em generated by $W$.} \end{defn} We now introduce a special type of dissipative system involving the notion of bounded distortion. It occurs naturally in various places, e.g., see \cite{VianaOliveira2016}. In the sequel, we let ${\mathcal B}(W) =\{ B \cap W, B \in {\mathcal B} \}.$ \begin{defn} \label{defnBD} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$. We say that $f$ is of {\em bounded distortion on $W$} if there exists $K>0$ such that \begin{equation}\label{conditionbd} \dfrac{1}{K} \mu(f^k(W))\mu(B) \leq \mu(f^k (B))\mu (W) \leq K \mu(f^k(W))\mu(B), \tag{$\Diamond$} \end{equation} for all $k \in \mathbb Z$ and $B \in {\mathcal B}(W)$. In the case of above, we will say that $(X,{\mathcal B},\mu, f)$ is a {\em dissipative system of bounded distortion.} \end{defn} \begin{prop} \label{diststar} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion generated by $W$. Then, the following are true. \begin{enumerate} \item There is a constant $H>0$ such that, for all $B \in {\mathcal B}(W)$ with $\mu(B)> 0$ and all $s, t \in \mathbb{Z}$, we have \begin{equation} \label{generalbd} \dfrac{1}{H} \dfrac{\mu(f^{t+s}(W))}{\mu(f^s(W))} \leq \dfrac{\mu(f^{t+s} (B))}{\mu (f^s(B))} \leq H \dfrac{\mu(f^{t+s}(W))}{\mu(f^s(W))}. \tag{$\Diamond \Diamond$} \end{equation} \item If $\sup \left \{\frac{\mu(f^{k-1}(W))}{\mu(f^{k}(W))}, \frac{\mu(f^{k+1}(W))}{\mu(f^{k}(W))}: k \in {\mathbb{Z}} \right \}$ is finite, then $f$ and $f^{-1}$ satisfy Condition (\ref{condition}). \end{enumerate} \end{prop} \begin{proof} To prove the first part, we note that by Condition (\ref{conditionbd}) we have that $\mu(B) =0$ if and only if $\mu(f^k(B)) =0$ for all $k \in \mathbb{Z}$. Hence, Condition (\ref{generalbd}) is well-defined for all $s, t \in \mathbb{Z}$. Let $K$ be the constant associated with the fact that $f$ is of bounded distortion on $W$. Then, for each $s,t \in \mathbb{Z}$ \begin{eqnarray*} \dfrac{\mu(f^{t+s}(W))}{\mu(f^s(W))} & = & \dfrac{\mu(f^{t+s}(W))}{\mu(W)} \dfrac{\mu (W)}{\mu(f^s(W))} \\ & \leq & K^2 \dfrac{\mu(f^{t+s}(B))}{\mu(B)} \dfrac{\mu (B)}{\mu(f^s(B))} \\ & = & K^2 \dfrac{\mu(f^{t+s}(B))}{\mu(f^s(B)),}\\ \end{eqnarray*} and, analogously on the other side, we have that $\dfrac{\mu(f^{t+s}(W))}{\mu(f^s(W))} \geq \dfrac{1}{K^2} \dfrac{\mu(f^{t+s}(B))}{\mu(f^s(B)).}$ Setting $H=K^2$ completes the proof of the first part. For the second part, we will show that $f$ satisfies Condition (\ref{condition}). The proof for $f^{-1}$ is analogous. Let $M=\sup \left \{\frac{\mu(f^{k-1}(W))}{\mu(f^{k}(W))}, \frac{\mu(f^{k+1}(W))}{\mu(f^{k}(W))}: k \in {\mathbb{Z}} \right \}$. Let $A \in {\mathcal B}$ and set $A_k = A \cap f^k(W)$. By the countable additivity property of measures, it suffices to show Condition (\ref{condition}) for $A_k$. If $\mu (A_k)=0$, then applying (\ref{conditionbd}) to $B = f^{-k}(A_k) \subseteq W$ and $k$, we have that $\mu(f^{-k}(A_k)) =0$. Hence $\mu(f^l (f^{-k}(A_k)) =0$ for all $l \in \mathbb{Z}$ and, in particular, $\mu (f^{-1}(A_k))= 0$. For $\mu (A_k) > 0$, we apply the right side of Condition ({\ref{generalbd}}) to $B= f^{-k}(A _k) \subseteq W$, $s=k$ and $t=-1$, we obtain that \[\dfrac{\mu(f^{-1} (A_k))}{\mu(A_k)} \leq H \dfrac{\mu(f^{-1+k}(W))}{\mu(f^k(W))} \leq H M.\] Letting $c = HM$, we have that \[\mu(f^{-1} (A_k)) \leq c \mu(A_k), \ \ \forall k \in \mathbb{Z}, \] verifying Condition (\ref{condition}). \end{proof} For the following, $\dfrac{d\mu(f^{k})}{d\mu}$ denotes the Radon-Nikodym derivative of $\mu(f^{k})$ with respect to $\mu$. \begin{prop}[Bounded RN Condition] \label{PROPBRN} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$. Let ${\rho}_{k} = \dfrac{d\mu(f^{k})}{d\mu}$, $m_{k} = \underset{ x \in W}{\mathrm{ess\,inf}} \ {\rho}_{k} (x) $, and $M_{k} = \underset{ x \in W}{\mathrm{ess\,sup}} \ {\rho}_{k} (x)$. If $\left \{\frac{M_k}{m_k} \right \} _{k \in \mathbb{Z}}$ is bounded, then $f$ is of bounded distortion on $W$. \end{prop} \begin{proof} Let $K$ be a bound on $\left \{\frac{M_k}{m_k} \right \} _{k \in \mathbb{Z}}$. We prove that Condition $(\Diamond)$ holds. If $B \in {\mathcal B}(W)$ with $\mu(B) =0$, then Condition $(\Diamond)$ clearly holds as all Radon-Nikodym derivatives are bounded above. Hence, let us consider the case $\mu(B) >0$. For every $k \in {\mathbb{Z}}$ and $B \in {\mathcal B}(W)$, \[\mu(f^{k}(B)) = \int_{B} {\rho}_{k} d \mu \leq \int_{B} M_k d \mu = M_k \mu(B) \] and \[\mu(f^{k}(W)) = \int_{W} {\rho}_{k} d \mu \geq \int_{W} m_k d \mu = m_k \mu(W). \] Dividing the two inequalities we get \[\frac{\mu(f^{k}(B))}{\mu(f^{k}(W))} \leq \frac{M_k}{m_k} \frac{\mu(B)}{\mu(W)} \le K \frac{\mu(B)}{\mu(W)} \] and, on the other side, \[\frac{\mu(f^{k}(B))}{\mu(f^{k}(W))} \geq \frac{m_k}{M_k} \frac{\mu(B)}{\mu(W)} \ge \frac{1}{K} \frac{\mu(B)}{\mu(W)}. \] Putting them together, we have that \[\frac{1}{K} \frac{\mu(f^k(W))}{\mu(W)} \leq \frac{\mu(f^{k}(B))}{\mu(B)} \leq K \frac{\mu(f^{k}(W))}{\mu(W)},\] i.e., Condition $(\Diamond)$ holds. \end{proof} \section{Main Results and Examples} \subsection{Shadowing Results} Throughout this subsection, we assume that $T_f$ is a well-defined invertible operator, i.e., functions $f$ and $f^{-1}$ satisfy (\ref{condition}). The following three conditions are essential in the description of characterization of the shadowing property. As the formulas are long, we give them names to avoid writing them repeatedly. \begin{defn} Let $(X,{\mathcal B},\mu, f)$ be a measurable system. We say that {\em Conditions $\hc{}$, $\hd{}$ and $\gh{}$ hold}, respectively, when the following are true: \begin{equation}\label{hc} \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} {\left (\frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))}\right )}^{\frac{1}{n}} <1 \tag*{${\hc{}}$} \end{equation} \begin{equation}\label{hd} \lowlim_{n \rightarrow \infty} \inf _{k \in {\mathbb Z}} {\left (\frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))} \right)}^{\frac{1}{n}} >1 \tag*{$\hd{}$} \end{equation} \begin{equation}{\label{gh} \uplim_{n \rightarrow \infty} \sup _{k \in -{\mathbb N}_{0}} { \left (\frac{\mu(f^{k-n}(W))}{\mu(f^{k}(W))} \right )}^{\frac{1}{n}} < 1 \ \ \& \\ \lowlim_{n \rightarrow \infty} \inf_{k \in {\mathbb N_{0}} } {\left (\frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))}\right )}^{\frac{1}{n}} >1 \tag*{$\gh{}$}} \end{equation} \end{defn} We begin by sufficient conditions on the measurable system $(X,{\mathcal B},\mu, f)$ which guarantee the shadowing property of $T_f$. \begin{manualtheorem}{SS}[Shadowing Sufficiency]\label{thmSS} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion generated by $W$. Then the following hold. \begin{enumerate} \item If Condition~$\hc{}$ is satisfied, then $T_f$ is a contraction. \item If Condition~$\hd{}$ is satisfied, then $T_f$ is a dilation. \item If Condition~$\gh{}$ is satisfied, then $T_f$ is a generalized hyperbolic operator. \end{enumerate} Hence, $T_f$ has the shadowing property in all three cases. \end{manualtheorem} Below we prove the necessary condition for shadowing. \begin{manualtheorem}{SN}[Shadowing Necessity]\label{thmSN} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion generated by $W$. If the composition operator $T_f$ has the shadowing property then one of conditions~$\hc{}$, $\hd{}$ or $\gh{}$ holds. \end{manualtheorem} Putting Theorems~\ref{thmSS} and Theorem~\ref{thmSN} together, we have the following characterization of shadowing. \begin{manualcor}{SC}[Shadowing Characterization]\label{CorSC} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion generated by $W$. Then the following are equivalent. \begin{enumerate} \item The composition operator $T_f$ has the shadowing property. \item One of Conditions~$\hc{}$, $\hd{}$ or $\gh{}$ holds. \end{enumerate} \end{manualcor} \begin{manualcor}{GH}[Generalized Hyperbolic Characterization]\label{CorGH} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion. Then, the following are equivalent. \begin{enumerate} \item{The composition operator $T_f$ is generalized hyperbolic.} \item{The composition operator $T_f$ has the shadowing property.} \end{enumerate} \end{manualcor} \begin{proof} We recall that every generalized hyperbolic operator has the shadowing property \cite{BernardesCiriloDarjiMessaoudiPujalsJMAA2018, CiriloGollobitPujals2020}. Hence, (1) implies (2). That (2) implies (1) follows from applying Theorem~\ref{thmSN} first and then Theorem~\ref{thmSS}. \end{proof} The following reformulation of Theorem~\ref{CorSC} will be a useful tool for giving explicit examples of composition operators with various properties. \begin{manualtheorem}{RN}\label{thmRN} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$, ${\rho}_{k} = \dfrac{d\mu(f^{k})}{d\mu}$, $m_{k} = \underset{ x \in W}{\mathrm{ess\,inf}} \ {\rho}_{k} (x) $ and $M_{k} = \underset{ x \in W}{\mathrm{ess\,sup}} \ {\rho}_{k} (x) $. Furthermore, assume that $\left \{\frac{M_k}{m_k} \right \} _{k \in \mathbb{Z}}$ is bounded. Then, the following are equivalent. \begin{enumerate} \item The composition operator $T_f$ has the shadowing property. \item One of the following properties hold. \end{enumerate} \begin{equation}\label{contractionRN} \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} {\left (\frac{M_k}{m_{k+n}}\right )}^{\frac{1}{n}} <1 \tag{${\mathcal {RNC} }$} \end{equation} \begin{equation}\label{dialationRN} \lowlim_{n \rightarrow \infty} \inf _{k \in {\mathbb Z}} {\left (\frac{M_k}{m_{k+n}} \right)}^{\frac{1}{n}} >1 \tag{${\mathcal {RND} }$} \end{equation} \begin{equation}\label{GHRN} \uplim_{n \rightarrow \infty} \sup _{k \in -{\mathbb N}_{0}} { \left (\frac{M_{k-n}}{m_{k}} \right )}^{\frac{1}{n}} <1 \ \ \& \ \ \lowlim_{n \rightarrow \infty} \inf_{k \in {\mathbb N}_{0} } {\left (\frac{M_k}{m_{k+n}}\right )}^{\frac{1}{n}} >1 \tag{${\mathcal {RNGH}}$} \end{equation} Moreover, Conditions ~\ref{contractionRN}, \ref{dialationRN}, \ref{GHRN} imply that $T_f$ is a contraction, a dilation, a generalized hyperbolic operator, respectively. \end{manualtheorem} \begin{rmk}\label{rmkthmrn} It will follow from the proof of Theorem~\ref{thmRN} that, in Condition~\ref{contractionRN}, Condition~\ref{dialationRN}, and Condition~\ref{GHRN}, one can exchange $M$ for $m$ and the theorem still holds. \end{rmk} \subsection{Shadowing Examples} Next, we show how composition operators with various properties can be constructed with ease using standard measures and probability distributions on ${\mathbb R}$. For the next four examples, we will be working with $X = {\mathbb R}$, ${\cal B}$ the collection of Borel subsets of ${\mathbb R}$, and $f(x) = x+1$. Note that, independent of the measure $\mu$ we choose on ${\mathbb R}$, we get a dissipative system generated by $W = [0,1)$. Moreover, all of our $\mu$ will be given by a density, i.e., \[\mu(B) = \int _B h d\lambda,\] where $\lambda$ is the Lebesgue measure on ${\mathbb R}$ and $h$ is some non-negative Lebesgue integrable function. As \[ \frac{d (\mu f^i)}{d\lambda} = \frac{d (\mu f^i)}{d\mu} \cdot \frac{d\mu }{d\lambda} \] and \[ \frac{d (\mu f^i)}{d\lambda} (x) = h(x+i), \ \ \ \ \ \ \ \ \ \ \ \ \frac{d\mu }{d\lambda} (x) = h(x), \] we have that \[ \frac{d (\mu f^i)}{d\mu} (x) = \frac{h(x+i)}{h(x)}. \] \begin{exmp}[Contraction $T_f$ ] \label{exmpcontraction} Let $\mu$ be the measure whose density is $h(x) = e^ x$. Then, $\frac{d (\mu f^i)}{d\mu} (x) = \frac{h(x+i)}{h(x)} = e^i$. Applying Theorem~\ref{thmRN}, we obtain that \[ \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} {\left (\frac{M_k}{m_{k+n}}\right )}^{\frac{1}{n}} = \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} {\left (\frac{e^k}{e^{k+n}}\right )}^{\frac{1}{n}} =\frac{1}{e},\] implying that $T_f$ is a contraction. \end{exmp} \begin{exmp} [Dilation $T_f$ ]\label{exmpdilation} An analogous calculation to the above shows that, if we let $\mu$ be a measure whose density is $e^{-x}$, then $T_f$ is a dilation. \end{exmp} \begin{exmp}[Generalized Hyperbolic $T_f$] \label{Lap} In this example, we use Laplace distribution. Recall that the Laplace distribution is defined by the following probability density function \[h(x, b, \lambda)=\frac{1}{2b} e^{-\frac{\vert x- \lambda\vert}{b}},\] where $\lambda \in \mathbb{R}$ and $b>0$ are two parameters. For the sake of simplicity, we use the standard Laplace distribution, i.e., $\lambda=0$ and $b=1$. Hence, we let $\mu$ be the probability measure whose density is given by \[h(x)=\frac{1}{2} e^{-\vert x\vert}.\] Using the fact that $\frac{d (\mu f^i)}{d\mu} (x) = \frac{h(x+i)}{h(x)} = \frac{e^{-|x+i|}}{e^{-|x|} }$, we have \[ M_i = e^{-i} \ \ \ \ m_i = e^{-i} \ \ \ \ \forall i \ge 0\] \[ M_i = e^{2+i} \ \ \ \ m_i = e^{i} \ \ \ \ \forall i < 0.\] For $n \in {\mathbb N}$, we have that \[ \frac{M_k}{m_{k+n}} = e^n \ \mbox { for } k \ge 0 \ \ \ \ {and} \ \ \ \ \frac{M_{k-n}}{m_{k}} = e^{2-n} \ \mbox { for } k \leq 0. \] Using the above estimates, it is readily verified that Condition~\ref{GHRN} holds and, hence, $T_f$ is generalized hyperbolic. \end{exmp} Our next example shows that our techniques can also be used to show that certain operators $T_f$ do not have the shadowing property. \begin{exmp}[Non-Shadowing $T_f$] The standard Cauchy distribution is a continuous distribution on $\mathbb{R}$, defined by the following probability density function \[ h(x)= \frac{1}{\pi(1+x^2)}. \] As earlier, let $\mu$ be the probability measure whose density is $h$. We will show that none of Condition~\ref{contractionRN}, Condition~\ref{dialationRN}, nor Condition~\ref{GHRN} is satisfied, yielding that $T_f$ does not have the shadowing property. Indeed, we have that \[\frac{d (\mu f^i)}{d\mu} (x) = \frac{h(x+i)}{h(x)} = \frac{1+x^2}{1+(x+i)^2}.\] This time calculating the exact values of $M_i$ and $m_i$ is a bit complicated. However, we will find appropriate bounds on $M_i$'s and $m_i$'s and this will suffice. Note that, for $ 0 \le x \le 1$ and $i \ge 0$, we have \[ \frac{1}{1+(i+1)^2} \le \frac{1+x^2}{1+(x+i)^2}\le \frac{2}{1+i^2}, \] implying \[ M_i \le \frac{2}{ 1+i^2} \ \ \ \ \ \ \ \ m_i \ge \frac{1}{ 1+ (i+1)^2} \ \ \ \ \forall i \ge 0.\] Similarly, for $ 0 \le x \le 1$ and $i \leq 0$, we have \[ \frac{1}{1+i^2} \le \frac{1+x^2}{1+(x+i)^2}\le \frac{2}{1+(i+1)^2}, \] implying \[ M_i \ge \frac{1}{ 1+i^2} \ \ \ \ \ \ \ \ m_i \le \frac{2}{ 1+ (i+1)^2} \ \ \ \ \forall i \leq 0.\] As Condition~\ref{contractionRN} implies the left half of Condition~\ref{GHRN} and Condition~\ref{dialationRN} implies the right half of Condition~\ref{GHRN}, it suffices to prove that both limits fail in Condition~\ref{GHRN}, in order to conclude that $T_f$ does not have the shadowing property. Observe that, for $n \in {\mathbb N}$, we have \[ \frac{M_k}{m_{k+n}} \le \frac{2[1+ (k+n+1)^2]}{1+k^2} \le 2 [1+ (k+n+1)^2], \ \ \ \ k \ge 0 \] and \[ \frac{M_{k-n}}{m_{k}} \ge \frac{1+(k+1)^2}{2[1+ (k-n)^2]} \ge \frac{1}{2[1+ (k-n+1)^2]}, \ \ \ \ k \leq 0. \] Hence, \[ \ \ \lowlim_{n \rightarrow \infty} \inf_{k \in {\mathbb N}_{0} } {\left ( \frac{M_k}{m_{k+n}} \right )}^{\frac{1}{n}} \le \lowlim_{n \rightarrow \infty} \inf_{k \in {\mathbb N}_{0} } {\left\{ 2 [1+ (k+n+1)^2 ]\right \}}^{\frac{1}{n}} =1,\] and \[ \uplim_{n \rightarrow \infty} \sup _{k \in -{\mathbb N}_{0}} { \left (\frac{M_{k-n}}{m_{k}} \right )}^{\frac{1}{n}} \ge \uplim_{n \rightarrow \infty} \sup _{k \in -{\mathbb N}_{0}} { \left \{\frac{1}{ 2[1+ (k-n+1)^2]} \right \}}^{\frac{1}{n}} =1, \] verifying that both parts of Condition~\ref{GHRN} fail and completing the proof. \end{exmp} We end this subsection of examples by commenting that our methods are flexible enough to handle a large class of examples. For example, if we want to work in higher dimensions, we may take $X= {\mathbb{R}}^{2}$, $f(x,y)= (x,y) + (1,0)$ and $W=[0,1[ \times {\mathbb{R}}$. Then, taking different types of 2-dimensional joint density functions, we can obtain $T_f$ with various properties as in our 1-dimensional examples. \section{Shadowing Proofs} Throughout this section, $(X,{\mathcal B},\mu, f)$ is a dissipative system and $T_f$ is the associated invertible composition operator on $L^p(X)$. \subsection{Proof of Theorem~\ref{thmSS}} We prove a series of propositions which lead to the proof of Theorem~\ref{thmSS}. We first introduce some notation and terminology to facilitate our proofs. \begin{defn} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$ and $\varphi \in L^p(X)$. Then, $\varphi = \varphi_+ + \varphi_-$, where \[\varphi_+(x)= \left\{ \begin{array}{ll} 0 & \mbox{ if } x \in \cup_{k=0}^{\infty}f^{k}(W)\\ \varphi(x) & \mbox{ otherwise, } \\ \end{array} \right.\] and, similarly, $\varphi_ -$ is zero on $\cup_{k=1}^{\infty}f^{-k}(W)$ and $\varphi$ elsewhere. Let $L_+ = \{\varphi_+: \varphi \in L^p(X)\}$ and $L_-= \{\varphi_-: \varphi \in L^p(X)\}$. We note that $L^p(X) = L_+ \oplus L_-$ and $T_f(L_+) \subseteq L_+$ and $T_f^{-1}(L_-) \subseteq L_-$. \end{defn} \begin{defn} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$ and $K, t>0$. Let $\uc{(K, t)}$ and $\ud{(K, t)}$ be the set of all $\varphi \in L^p(X)$ which satisfy the following conditions, respectively: \begin{equation}\label{uc} \sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu}\right )} \leq K t^n \ \ \ \ \ \forall n \in \mathbb{N} \tag*{$\uc{}$} \end{equation} \begin{equation}\label{ud} \inf _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu} \right)} \geq K t^n \ \ \ \ \ \forall n \in \mathbb{N} \tag*{$\ud{}$} \end{equation} We let $\ughp{(K,t)}$ and $\ughm{(K,t)}$ consist of those $\varphi$ in $L_+$ and $L_-$, respectively, which satisfy the following conditions: \begin{equation*}\label{ughp} \sup _{k \in -{\mathbb N}_{0}} \left ( \frac{\int_X \vert \varphi \vert ^p \circ f^{-(k-n)} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu} \right ) \leq K t^n \ \ \ \ \ \forall n \in \mathbb{N} \tag*{$\ughp{}$} \end{equation*} \begin{equation*}\label{ughm} \inf_{k \in {\mathbb N}_{0} } {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu}\right )} \geq K \frac{1}{t^n} \ \ \ \ \ \forall n \in \mathbb{N} \tag*{$\ughm{}$}.\\ \end{equation*} \end{defn} The next simple fact follows from the definitions of $\uplim$ and $\lowlim$. \begin{prop}\label{propbasic} Let $\{a_n\}_{n \in \mathbb{N}}$ be a sequence of non-negative real numbers and $t >0$. Then, the following hold. \begin{enumerate} \item {If $\uplim _{n \rightarrow \infty} a_n ^{\frac{1}{n}}<t,$ then there exists $K>0$ such that $a_n \leq Kt^n$ for every $n \in \mathbb{N}$.} \item {If $\lowlim _{n \rightarrow \infty} a_n ^{\frac{1}{n}}>t,$ then there exists $K>0$ such that $a_n \geq Kt^n$ for every $n \in \mathbb{N}$.} \end{enumerate} \end{prop} \begin{prop} \label{proprcW} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$. Then \begin{enumerate} \item $\hc{}$ holds $\Leftrightarrow$ $\chi_W \in \uc{(K, t)}$ for some $K >0$ and $t<1$. \item $\hd{}$ holds $\Leftrightarrow$ $\chi_W \in \ud{(K,t)}$ for some $K >0$ and $t>1$. \item $\gh{}$ holds $\Leftrightarrow$ there exist $K >0$ and $t<1$ such that $\chi_W \in \ughm{(K,t)}$ and $\chi_{f^{-1}(W)} \in \ughp{(K,t)}$. \end{enumerate} \end{prop} \begin{proof} We first prove (1). ($\mathbb{R}ightarrow$) Suppose that $\hc{}$ holds. Applying Proposition \ref{propbasic} to the sequence $a_n=\sup _{k \in \mathbb{Z}} \frac{\mu(f^k(W))}{\mu (f^{k+n}(W))}$ and by the fact that $\mu(f^{s}(B))=\int_X \vert \chi_{B} \vert ^p \circ f^{-s} d\mu$, for $s \in \mathbb{Z}$, $B \in {\mathcal B}$, we have that $\chi _W \in \uc{(K, t)}$ for some $K >0$ and $t<1$. ($\Leftarrow$) Let $K >0$ and $t<1$ be such that $\chi _W \in \uc{(K, t)}$, that is, \[ \sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \chi _W\vert ^p \circ f^{-k} d\mu}{\int_X \vert \chi _W\vert ^p \circ f^{-(k+n)} d\mu}\right )} \leq K t^n. \] Then, \begin{eqnarray*} \uplim _{n \rightarrow \infty} \sup _{k \in \mathbb{Z}} \left(\frac{\mu(f^k(W))}{\mu (f^{k+n}(W))}\right)^{\frac{1}{n}} &=& \uplim _{n \rightarrow \infty}\sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \chi _W\vert ^p \circ f^{-k} d\mu}{\int_X \vert \chi _W\vert ^p \circ f^{-(k+n)} d\mu}\right )}^{\frac{1}{n}} \\ & \leq & \uplim _{n \rightarrow \infty} K^{\frac{1}{n}}t \\ & = & t <1, \end{eqnarray*} i.e. condition $\hc{}$ holds. (2) This proof is analogous to the proof of (1). (3) ($\mathbb{R}ightarrow$) Suppose that $\gh{}$ holds. We note that in $\gh{}$ we can replace $W$ by $f^{-1}(W)$ and the condition still holds. We use this in the first part of $\gh{}$. Applying Proposition \ref{propbasic} as before, we obtain $K >0$ and $0 < t <1$ such that, for every $n \in \mathbb{N}$, \[\sup_{k \in - {\mathbb N}_{0}} \frac{\mu(f^{k-n}(f^{-1}(W)))}{\mu (f^{k}(f^{-1}(W)))} \leq K t^{n}\] and \[\inf _{k \in {\mathbb N}_{0}} \frac{\mu(f^{k}(W))}{\mu (f^{k+n}(W))} \geq K {\frac{1}{t^n}}.\] As \[\inf _{k \in {\mathbb N}_{0}}\frac{\int_X \vert \chi _W\vert ^p \circ f^{-k} d\mu}{\int_X \vert \chi _W\vert ^p \circ f^{-(k+n)} d\mu} = \inf _{k \in {\mathbb N}_{0}} \frac{\mu(f^{k}(W))}{\mu (f^{k+n}(W))} \ge K {\frac{1}{t^n}},\] we have that $\chi_W \in \ughm{(K,t)}$. Similarly, as \begin{eqnarray*} \sup_{k \in - {\mathbb N}_{0}}\frac{\int_X \vert \chi _{f^{-1}(W)}\vert ^p \circ f^{-(k-n)} d\mu}{\int_X \vert \chi _{f^{-1}(W)} \vert ^p \circ f^{-k} d\mu} = \sup_{k \in - {\mathbb N}_{0}} \frac{\mu(f^{k-n}(f^{-1}(W)))}{\mu (f^{k}(f^{-1}(W)))} \leq K t^{n} , \end{eqnarray*} we have that $\chi_{f^{-1}(W)} \in \ughp{(K,t)}$. \\ ($\Leftarrow$) This proof is straightforward and follows as in the proof of (1). \end{proof} Next proposition follows from the definitions. \begin{prop}\label{prop:compfj} The following are true. \begin{itemize} \item Let ${\mathcal U} (K,t) \in \{\uc{(K,t)}, \ud{(K,t)}\}$. If $\varphi \in {\mathcal U}(K,t)$ then $\varphi \circ f^j \in {\mathcal U}(K,t)$, $j \in \mathbb{Z}$. \item If $\varphi \in \ughp{(K,t)}$ then $\varphi \circ f^j \in\ughp{(K,t)}$, $j \ge 0$. \item If $\varphi \in \ughm{(K,t)}$ then $\varphi \circ f^j \in\ughm{(K,t)}$, $j \le 0$. \end{itemize} For the sake of notational convenience, for the next two propositions, we let ${\mathcal U} (K,t) \in \{\uc{(K,t)}, \ud{(K,t)}, \ughp{(K, t)},\ughm{(K, t)}\}$. The first one simply follows from the definitions. \end{prop} \begin{prop}\label{prop:scalar} If $\varphi \in {\mathcal U}(K,t)$ and $a \in \mathbb{R} \setminus \{0\}$, then $a \cdot \varphi \in {\mathcal U}(K,t)$. \end{prop} \begin{prop} \label{finitesum} If $\varphi _1, \varphi _2 \in {\mathcal U}(K,t)$ with disjoint supports, then $\varphi _1 + \varphi _2 \in {\mathcal U}(K,t)$. \end{prop} \begin{proof} We do the proof for ${\mathcal U}(K,t) = \uc{(K,t)}$. The proofs for the rest are analogous. Let $\varphi_1$ and $\varphi_2$ be elements of $\uc{(K,t)}$, that is, \[\sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \varphi_i \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi_i \vert ^p \circ f^{-(k+n)} d\mu}\right )} \leq Kt^n,\] for $i=1,2$, with disjoint supports. Then, for each $k \in {\mathbb Z}$ and $i=1,2$, \begin{equation} \int_X \vert \varphi_i \vert ^p \circ f^{-k} d\mu \leq Kt^{n} \int_X \vert \varphi_i \vert ^p \circ f^{-(k+n)} d\mu.\tag*{$(\bullet)$} \end{equation} As $\varphi_1$ and $\varphi_2$ have disjoint supports, we have that, for $m \in \mathbb Z$, \[\int_X \vert \varphi_1 + \varphi_2 \vert ^p \circ f^{-m} d\mu = \int_X \vert \varphi_1 \vert ^p \circ f^{-m} d\mu + \int_X \vert \varphi_2 \vert ^p \circ f^{-m} d\mu.\] Now, by adding term by term in the inequalities $(\bullet)$, we obtain that, for each $k \in {\mathbb Z}$, \[\int_X \vert \varphi_1 + \varphi_2 \vert ^p \circ f^{-k} d\mu \leq Kt^{n} \int_X \vert \varphi_1 + \varphi_2 \vert ^p \circ f^{-(k+n)} d\mu.\] Therefore, \[ \sup_{k \in {\mathbb Z}}{\left (\frac{\int_X \vert \varphi_1 + \varphi_2 \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi_1 + \varphi_2 \vert ^p \circ f^{-(k+n)} d\mu}\right)} \leq Kt^n.\] Hence, it follows that $\varphi_1 + \varphi_2 \in \uc{(K,t)}$. \end{proof} \begin{prop} \label{prop:subW} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion, generated by $W$. Let $H$ be the bounded distortion constant from Proposition~\ref{diststar} and $j \in \mathbb{Z}$. \begin{enumerate} \item Let ${\mathcal U} (K,t) \in \{\uc{(K,t)}, \ud{(K,t)}\}$. If $\chi_{f^j(W)}$ is in ${\mathcal U}{(K,t)}$, then $\chi_{f^j(B)}$ is in ${\mathcal U}{(HK,t)}$, for all $B \subseteq W$ with $\mu(B) >0$. \item If $\chi_{f^j(W)}$ is in $ \ughm{(K,t)}$ for $j \ge 0$, then $\chi_{f^j(B)}$ is in $\ughm{(HK,t)}$ for all $B \subseteq W $with $\mu(B) >0$. \item If $\chi_{f^j(W)}$ is in $ \ughp{(K,t)}$ for $j <0$, then $\chi_{f^j(B)}$ is in $\ughp{(HK,t)}$ for all $B \subseteq W $with $\mu(B) >0$. \end{enumerate} \end{prop} \begin{proof} We do the proof for $\uc{(K,t)}$. Proofs for the rest are analogous. Assume the hypotheses, i.e., let $\chi_{f^j(W)} \in \uc{(K,t)}$ and $B \subseteq W$ with $\mu(B) >0$. By Proposition~\ref{diststar}, there exists a constant $0<H < + \infty$ such that, for every $s,l \in \mathbb Z$, \[ \dfrac{1}{H} \dfrac{\mu(f^{l+s}(W))}{\mu(f^s(W))} \leq \dfrac{\mu(f^{l+s} (B))}{\mu (f^s(B))} \leq H \dfrac{\mu(f^{l+s}(W))}{\mu(f^s(W))}.\] Hence, in particular, for fixed $n \in \mathbb{N}$, \[ \sup _{k \in {\mathbb Z}} {\left( \frac{\mu(f^{k+j}(B))}{\mu(f^{k+j+n}(B))}\right)} \leq H \sup _{k \in {\mathbb Z}} {\left(\frac{\mu(f^{k+j}(W))}{\mu(f^{k+j+n}(W))}\right)}.\] Then, \begin{eqnarray*} \sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \chi_{f^j(B)} \vert ^p \circ f^{-k} d\mu}{\int_X \vert \chi_{f^j(B)} \vert ^p \circ f^{-(k+n)} d\mu}\right )} & = & \sup _{k \in {\mathbb Z}} {\left( \frac{\mu(f^{k+j}(B))}{\mu(f^{k+j+n}(B))}\right)} \\ & \leq & H \sup _{k \in {\mathbb Z}} {\left(\frac{\mu(f^{k+j}(W))}{\mu(f^{k+j+n}(W))}\right)} \\ & = & H \sup_{k \in \mathbb{Z}} \left(\frac{\int_X \vert \chi_{f^j(W)} \vert ^p \circ f^{-k} d\mu}{\int_X \vert \chi_{f^j(W)}\vert ^p \circ f^{-(k+n)} d\mu}\right) \\ & \leq & HKt^n, \end{eqnarray*} that is, $\chi_{f^j(B)}\in \uc{(HK,t)}$. \end{proof} The next proposition easily follows from the well-known fact that the set of simple functions is dense in $L^p(X)$. \begin{prop}\label{prop:simplefun} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system generated by $W$. Then, \begin{enumerate} \item $ \left \{ \sum_{i=0}^n a_i \chi_{B_i}: B_i \subseteq f^{j_{i}}(W),j_i \in \mathbb{Z}, \mu(B_i)>0, B_{i} \cap B_{i'} = \emptyset, i \neq i' \right \}$ is dense in $L^p(X)$. \item $ \left \{ \sum_{i=0}^n a_i \chi_{B_i}: B_i \subseteq f^{j_{i}}(W),j_i <0, \mu(B_i)>0, B_{i} \cap B_{i'} = \emptyset, i \neq i' \right \}$ is dense in $L_+$. \item $ \left \{ \sum_{i=0}^n a_i \chi_{B_i}: B_i \subseteq f^{j_{i}}(W),j_i \ge 0, \mu(B_i)>0, B_{i} \cap B_{i'} = \emptyset, i \neq i' \right \}$ is dense in $L_-$. \end{enumerate} \end{prop} \begin{prop}\label{prop:DenseUs} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion, generated by $W$. \begin{enumerate} \item Let ${\mathcal U} (K,t) \in \{\uc{(K,t)}, \ud{(K,t)}\}$. If $\chi_W \in {\mathcal U}(K,t)$, then $ {\mathcal U}(HK,t) = L^p(X)$. \item If $\chi_W \in \ughm{(K,t)}$, then $\ughm{(HK,t)}=L_-$. \item If $\chi_{f^{-1}(W)} \in \ughp{(K,t)}$, then $\ughp{(HK,t)}=L_+$. \end{enumerate} \end{prop} \begin{proof} (1) First, by Proposition \ref{prop:subW}, for every $B \subseteq W$ with $\mu(B) >0$, $\chi_B$ is in ${\mathcal U}(HK, t)$. Then, by Proposition \ref{prop:compfj}, for every $i \in {\mathbb Z}$, for every $B \subseteq f^{i}(W)$ with $\mu(B) >0$, $\chi_{B}$ is in ${\mathcal U}(HK,t)$. Now, by Proposition \ref{prop:scalar} and Proposition \ref{finitesum}, \[ \left \{ \sum_{i=0}^n a_i \chi_{B_i}: B_i \subseteq f^{j_i}(W), j_i \in \mathbb{Z}, \mu(B_i)>0, B_{i} \cap B_{i'} = \emptyset, i \neq i' \right \} \subseteq {\mathcal U}(HK,t).\] Hence, by applying Proposition \ref{prop:simplefun}, and passing through limit, the conclusion follows. The proofs of (2) and (3) are similar. We only show (2). First, by Proposition \ref{prop:subW}, for every $B \subseteq W$ with $\mu(B) >0$, $\chi_B$ is in $\ughm(HK, t)$. Then, by Proposition \ref{prop:compfj}, for every $i \geq 0$, for every $B \subseteq f^{i}(W)$ with $\mu(B) >0$, $\chi_{B}$ is in $\ughm(HK,t)$. Now, by Proposition \ref{prop:scalar} and Proposition \ref{finitesum}, \[ \left \{ \sum_{i=0}^n a_i \chi_{B_i}: B_i \subseteq f^{j_i}(W), j_i \geq 0, \mu(B_i)>0, B_{i} \cap B_{i'} = \emptyset, i \neq i' \right \} \subseteq \ughm(HK,t).\] Hence, by applying Proposition \ref{prop:simplefun} and passing through limit, the conclusion follows. \end{proof} {\em Proof of Theorem~\ref{thmSS}.} \begin{proof} We first prove (1). As Condition~$\hc{}$ is satisfied, by Proposition \ref{proprcW}, there exist $0<t <1$ and $K>0$ such that $\chi_{W} \in \uc{(K,t)}$. By Proposition \ref{prop:DenseUs}, $\uc{(HK,t)} = L^p(X)$, i.e, for all $\varphi \in L^p(X)$, \begin{equation*} \sup _{k \in {\mathbb Z}} {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu}\right )} \leq HKt^n . \end{equation*} Plugging $k =-n$ in above, we have that, for all $n \in \mathbb{N}$, \[ {\frac{\int_X \vert \varphi \vert ^p \circ f^{n} d\mu}{\int_X \vert \varphi \vert ^p d\mu}} \leq HK t^n. \] As \[\int_X \vert \varphi \vert ^p \circ f^{n} d\mu= \Vert T_{f}^{n}(\varphi)\Vert_{p}^{p},\] we have that \[\frac{\Vert T_{f}^{n}(\varphi) \Vert_{p}^{p}}{\Vert \varphi \Vert_{p}^{p}} \le HKt^n.\] Hence, \[\lim_{n \rightarrow \infty} \Vert T_{f}^{n} \Vert ^\frac{1}{n}\leq t < 1.\] As the spectral radius of $T_{f} $ is $\lim_{n \rightarrow \infty} \Vert T_{f}^{n} \Vert ^\frac{1}{n}$, we have that $T_{f}$ is a contraction. Proof of (2) is analogous to the above case. Finally, let us prove (3). Assume that $\gh{}$ holds. By Proposition \ref{proprcW}, there exist $0<t <1$ and $K >0$ such that $\chi_{W} \in \ughm{(K,t)}$ and $\chi_{f^{-1}(W)} \in \ughp{(K,t)}$. Then, by Proposition \ref{prop:DenseUs}, $\ughm{(HK,t)} = L_{-}$ and\\ $\ughp{(HK,t)}= L_{+}$. Hence, for all $\varphi \in L_{+}$, \[\sup _{k \in -{{\mathbb N}}_{0}} \left ( \frac{\int_X \vert \varphi \vert ^p \circ f^{-(k-n)} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu} \right ) \leq HK t^n.\] Plugging $k =0$ in above, we have that, for all $n \in \mathbb{N}$, \[ {\frac{\int_X \vert \varphi \vert ^p \circ f^{n} d\mu}{\int_X \vert \varphi \vert ^p d\mu}} \leq HK t^n,\] implying that, for all $\varphi \in L_{+}$, \[\frac{\Vert T_{f}^{n}(\varphi) \Vert_{p}^{p}}{\Vert \varphi \Vert_{p}^{p}} \le HKt^n.\] Hence, the spectral radius of the restriction of $T_{f}$ to $L_{+}$, ${T_f}_{|_{L_+}}$, is less than or equal to $t <1$, therefore ${T_f}_{|_{L_+}}$ is a contraction. Similarly, as $\ughm{(HK,t)} = L_{-}$, we have that for all $\varphi \in L_{-}$ \[ \inf_{k \in {\mathbb N}_{0} } {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu}\right )} \geq HK \frac{1}{{t}^n}.\] Therefore, \[ \sup_{k \in {\mathbb N}_{0} } {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-(k+n)} d\mu}{\int_X \vert \varphi \vert ^p \circ f^{-k} d\mu}\right )} \leq \frac{1}{HK} {t}^n.\] Plugging $k =0$ in above, we have that, for all $n \in \mathbb{N}$, \[ \sup_{k \in {\mathbb N}_0 } {\left (\frac{\int_X \vert \varphi \vert ^p \circ f^{-n} d\mu}{\int_X \vert \varphi \vert ^p d\mu}\right )} \leq \frac{1}{HK} {t}^n.\] As \[\int_X \vert \varphi \vert ^p \circ f^{-n} d\mu= \Vert {({T_{f}}^{-1})}^{n}(\varphi)\Vert_{p}^{p},\] we have that, for all $\varphi \in L_{-}$, \[\frac{\Vert {({T_{f}}^{-1})}^{n}(\varphi) \Vert_{p}^{p}}{\Vert \varphi \Vert_{p}^{p}} \le HK{t}^n.\] Hence, the spectral radius of the restriction of ${T_{f}}^{-1}$ to $L_{-}$, ${T_f}^{-1}_{|_{L_-}}$, is less than or equal to $t <1$, therefore ${T_{f}}^{-1}_{|_{L_-}}$ is a contraction. Thus, we have shown that $L^p(X) = L_+ \oplus L_-$, $T_f(L_+) \subseteq L_+$ and $T_f^{-1}(L_-) \subseteq L_-$, ${T_f}_{|_{L_+}}$ and ${T_f}_{|_{L_-}}^{-1}$ are contractions, i.e., $T_f$ is generalized hyperbolic. \end{proof} \subsection{Proof of Theorem~\ref{thmSN}} In general, a factor of a map with shadowing property does not have the shadowing property. A condition which guarantees this in the dynamics of compact metric spaces was given by Good and Meddaugh (\cite{GoodMeddaughIM2020}: Theorem 23). Below, we give a condition in the setting of linear dynamics which guarantees that factors of maps with the shadowing property have the shadowing property. Using this result and the characterization of the shadowing property for weighted backward shifts, we arrive at the proof of the main theorem in this subsection. We begin by recalling the definition of a factor in linear dynamics. \begin{defn}\label{defSC} Let $(X,S)$ and $(Y,T)$ be two linear dynamical systems. We say that {\em $T$ is a factor of $S$} if there exists a {\em factor map $\Pi$}, i.e., a linear, continuous, onto map $\Pi : X \rightarrow Y$ such that $\Pi \circ S=T \circ \Pi$. Moreover, we say that {\em $\Pi$ admits a bounded selector} if the following condition holds. \[\exists L >0 \text{ s.t., } \forall y \in Y, \exists x \in {\Pi}^{-1}(y) \text{ with }\Vert x \Vert \leq L \Vert y \Vert. \] \end{defn} \begin{lem} \label{shadowfactor} Let $(X,S)$ and $(Y,T)$ be linear dynamical systems with a factor map $\Pi: X \rightarrow Y$ that admits a bounded selector. If $S$ has the shadowing property, then so does $T$. \end{lem} \begin{proof} Let $L$ be a constant which witnesses that $\Pi$ has a bounded selector. We use the formulation of the shadowing property stated in Lemma \ref{LEM1}. Let $K$ be a constant associated with the fact that $S$ has the shadowing property. Let $\{y_n\}_{n \in \mathbb Z}$ be a bounded sequence in $Y$, with $\sup_{n \in {\mathbb Z}} \Vert y_{n} \Vert = M$. By hypothesis, we can take, for each $n \in {\mathbb Z}$, $x_{n} \in X$ such that $\Pi(x_{n})= y_{n}$, with $\Vert x_{n} \Vert \leq L \Vert y_{n} \Vert$. Then, $\{x_n\}_{n \in \mathbb Z}$ is a sequence in $X$ with $\| x_n\| \le LM $, $n \in \mathbb{Z}$. As $S$ has the shadowing property, there is a sequence $\{s_n\}_{n \in \mathbb Z}$ in $X$ such that \[ \sup_{n \in \mathbb Z} \Vert s_n \Vert \leq K \sup_{n \in \mathbb Z} \Vert x_n \Vert \hspace{0.3 cm}\text{ and } \hspace{0.3 cm} s_{n+1}=S(s_n) + x_n, \text{ for all $n \in \mathbb Z$.}\] Setting $t_n = \Pi (s_n)$, we have that \[\sup_{n \in \mathbb Z} \Vert t_n \Vert \leq \Vert \Pi \Vert \sup_{n \in \mathbb Z} \Vert s_{n} \Vert \leq \Vert \Pi \Vert K\sup_{n \in \mathbb Z} \Vert x_n \Vert \leq \Vert \Pi \Vert K L \Vert y_{n} \Vert.\] Moreover, applying $\Pi$ to the equation \[s_{n+1}=S(s_n) + x_n, \] we obtain that \[ t_{n+1} = T(t_{n}) + y_{n}.\] Hence, we have proved that there exists a constant $C$, namely $C= \Vert \Pi \Vert KL,$ such that, for every bounded sequence $\{y_n\}_{n \in \mathbb Z}$ in $Y$, there is a sequence $\{t_n\}_{n \in \mathbb Z}$ in $Y$ such that \[ \sup_{n \in \mathbb Z} \Vert t_n \Vert \leq C \sup_{n \in \mathbb Z} \Vert y_n \Vert \hspace{0.3 cm}\text{ and } \hspace{0.3 cm} t_{n+1}=T(t_n) + y_n, \text{ for all $n \in \mathbb Z$},\] yielding that $T$ has the shadowing property. \end{proof} \begin{lem} \label{factorBw} Let $(X,{\mathcal B},\mu, f)$ be a dissipative system of bounded distortion generated by $W$. Consider the weighted backward shift $B_w$ on $\ell^p(\mathbb{Z})$ with weights \[w_{k} = \left( \frac{\mu(f^{k-1}(W))}{\mu(f^{k}(W))}\right)^{\frac{1}{p}} .\] Then, $B_w$ is a factor of the map $T_f$ by a factor map $\Pi$ admitting a bounded selector. \end{lem} \begin{proof} As $T_f$ is an invertible composition operator, we have that $f$ and $f^{-1}$ satisfy Condition~(\ref{condition}). Therefore, $0 < \inf _{n \in Z} |w_n| \le \sup _{n \in Z} |w_n| < \infty $, implying that $B_w$ is an invertible operator. We need to find a bounded linear surjective map $\Pi: L^p(X) \rightarrow \ell ^p({\mathbb Z})$ admitting a bounded selector such that the diagram in Figure \ref{Figsemiconj2} commutes. \begin{figure} \caption{Factor map from $(L^p(X), T_f)$ to $(\ell^p({\mathbb Z} \label{Figsemiconj2} \end{figure} Let $\varphi \in L^p(X)$. Define $\Pi(\varphi) = {\bf x} = \{x_{k}\}_{k \in {\mathbb Z}}$, where \[x_{k} = \dfrac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \varphi \circ f^{k} d \mu.\] It is clear that $\Pi$ is linear. Now we show that $\Pi \circ T_{f} = B_{w} \circ \Pi$, that is, the diagram in Figure \ref{Figsemiconj2} commutes. Indeed, for any $\varphi \in L^{p}(X)$, for any ${k \in \mathbb Z}$, \begin{align*} {\left((\Pi \circ T_{f})(\varphi)\right)}_{k} & ={\left(\Pi(\varphi \circ f)\right)}_{k} = \dfrac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \left(\varphi \circ f \right) \circ f^{k} d \mu\\ & = \dfrac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \varphi \circ f^{k+1} d \mu \end{align*} and, on the other hand, it is also the case that \begin{align*} {\left (B_{w} \circ \Pi\right)(\varphi)}_{k} & = w_{k+1} {\left( \Pi(\varphi)\right)}_{k+1} = \left( \frac{\mu(f^{k}(W))}{\mu(f^{k+1}(W))}\right)^{\frac{1}{p}}\frac{\mu(f^{k+1}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \varphi \circ f^{k+1} d \mu \\ & = \dfrac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \varphi \circ f^{k+1} d \mu. \end{align*} We now show that $\Pi$ is a bounded operator with $\|\Pi\|_p \le H^{\frac{1}{p}} $, where $H$ is the bounded distortion constant in $(\Diamond \Diamond)$. In the proof, we will use the following version of Jensen Inequality: \[\left( \int _B g d\mu \right)^p \leq \mu(B)^{p-1} \int_B \vert g \vert ^p d\mu,\] as well as the fact that if $\nu \ll \mu$, then \[\left \| \left. \frac{d \nu}{d \mu} \right |_{W} \right \|_{\infty} \le \sup_{ \substack{B \subseteq W \\ \mu (B) \neq 0} }\frac{ \nu(B)}{ \mu (B)}.\] Let $\varphi \in L^{p}(X).$ Then, \begin{align*} {\Vert \Pi(\varphi) \Vert}_{p}^{p} & = {\Vert {\bf x} \Vert}_{p}^{p} = \sum_{k \in {\mathbb Z}} {\vert x_{k} \vert}^{p} = \sum_{k \in {\mathbb Z}} {\left(\frac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \right)}^{p} {\left | \int_{W} \varphi \circ f^{k} d \mu \right |}^{p} \\ & \leq \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)^p} \mu(W)^{p-1} \int_W \vert \varphi \vert ^p \circ f^k d\mu \\ & = \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} \int_{f^k(W)} \vert \varphi \vert^p d\mu f^{-k}\\ & = \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} \int_{f^k(W)} \vert \varphi \vert^p \frac{d\mu f^{-k}}{d\mu} d\mu \\ & \leq \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} \left. \left\|\frac{d\mu f^{-k}}{d\mu}\right |_{f^k(W)}\right \|_{\infty} \int_{f^k(W)} \vert \varphi \vert^p d\mu \\ & \leq \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} \sup_{\substack{f^k(B), \{\mathcal B}\subseteq W}}\left(\frac{\mu( f^{-k}(f^k(B)))}{\mu(f^k(B))}\right) \int_{f^k(W)} \vert \varphi \vert^p d\mu \\ & = \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} \sup_{\substack{f^k(B), \{\mathcal B}\subseteq W}}\left( \frac{\mu(B)}{\mu(f^k(B))}\right) \int_{f^k(W)} \vert \varphi \vert^p d\mu \\ & \leq \sum_{k \in {\mathbb Z}} \frac{ \mu(f^{k}(W))}{\mu(W)} H \frac{\mu(W)}{\mu(f^k(W))} \int_{f^k(W)} \vert \varphi \vert^p d\mu\\ &= H \sum_{k \in {\mathbb Z}} \int_{f^k(W)} \vert \varphi \vert^p d\mu =H \Vert \varphi \Vert_p^p. \end{align*} Hence, we have shown that \[{\Vert \Pi(\varphi) \Vert}_{p} \leq H^{\frac{1}{p}} {\Vert \varphi \Vert}_{p},\] proving the continuity of $\Pi$. We now prove that $\Pi$ admits a bounded selector with $L = 1$. Let ${\bf x}= \{x_k\}_{k \in {\mathbb Z}} \in \ell ^p (\mathbb{Z}) $. We need to find $\varphi \in L^p(X)$ such that $\Pi(\varphi)={\bf x} $ with $\|\varphi\|_p \le \|{\bf x}\|_p$. We let \[\varphi = \sum_{k \in {\mathbb Z}} \frac{x_k}{\mu(f^{k}(W))^{\frac{1}{p}}} \chi_{f^{k}(W)}.\] It is easy to verify that ${\|\varphi \|}_p = \|{\bf x} \|_p$. Moreover, \begin{align*} {\left(\Pi({\varphi})\right)}_{k} & =\frac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \varphi \circ f^{k} d \mu \\ & = \frac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \int_{W} \sum_{n \in {\mathbb Z}} \frac{x_n}{\mu(f^{n}(W))^{\frac{1}{p}}} \chi_{f^{n}(W)} \circ f^{k} d \mu\\ & = \frac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \sum_{n \in {\mathbb Z}} \int_{W} \frac{x_n}{\mu(f^{n}(W))^{\frac{1}{p}}} \chi_{f^{n}(W)} \circ f^{k} d \mu\\ & = \frac{\mu(f^{k}(W))^{\frac{1}{p}}}{\mu(W)} \frac{x_{k}}{\mu(f^{k}(W))^{\frac{1}{p}}} \int_{W} \chi_{f^{k}(W)} \circ f^{k} d \mu\\ & = \frac{x_{k}}{\mu(W)} \mu(W)= x_k. \end{align*} Hence, we have shown that $\Pi$ is a bounded, linear, surjective map admitting a bounded selector and such that $\Pi \circ T_f = B_w \circ \Pi$, completing the proof. \end{proof} {\em Proof of Theorem~\ref{thmSN}.} Assume the hypotheses. By Lemma~\ref{factorBw}, we have that $B_w: \ell^p(Z) \rightarrow \ell^p(Z)$ is a factor of $T_f$ with \[w_{k} = \left( \frac{\mu(f^{k-1}(W))}{\mu(f^{k}(W))}\right)^{\frac{1}{p}}.\] Moreover, the factor map $\Pi$ which exhibits this admits a bounded selector. As $T_f$ has the shadowing property, by Lemma~\ref{shadowfactor}, we have that $B_w$ also has the shadowing property. By Theorem~\ref{theoSHADBW}, we have that Condition a), b) or c) of that theorem is satisfied. Now using the fact that \[w_k \ldots w_{k+n} = w_k \cdot \left( \frac{\mu(f^{k}(W))}{\mu(f^{k+1}(W)) }\right)^{\frac{1}{p}}\ldots \left( \frac{\mu(f^{k+n-1}(W))}{\mu(f^{k+n}(W))}\right)^{\frac{1}{p}} =w_k \cdot \left(\frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W)) }\right)^{\frac{1}{p}}, \] and that $ 0< \inf |w_k| \le \sup |w_k| < \infty$, it is easy to check that Condition a) implies Condition~\ref{hc}, Condition b) implies Condition~\ref{hd} and Condition c) implies Condition~\ref{gh}. Indeed, for instance, if Condition a) holds, then \[\lim_{n \rightarrow \infty}( \sup _{k \in {\mathbb Z}} \vert w_k \cdots w_{k+n}\vert ^{\frac{1}{n}} )<1,\] or, equivalently, \[\lim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left \vert \left( \frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))}\right) ^{\frac{1}{p}}\right \vert ^{\frac{1}{n}} =\lim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left \vert w_k \cdot \left(\frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))}\right)^{\frac{1}{p}}\right \vert ^{\frac{1}{n}} < 1, \] implying that \[\lim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left \vert \left( \frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))}\right) \right \vert ^{\frac{1}{n}} < 1,\] and yielding that Condition~\ref{hc} holds. Analogous arguments show the other two implications. \subsection{Proof of Theorem~\ref{thmRN}} We will show in detail that Condition~\ref{hc} holds if and only if Condition~\ref{contractionRN} holds. Analogous arguments will show that Condition~\ref{hd} and Condition~\ref{gh} hold if and only if Condition~\ref{dialationRN} and Condition~\ref{GHRN} hold, respectively. By the definition of Radon-Nikodym derivative, we have that, for all $i \in \mathbb{Z}$, \[ m_i \cdot \mu (W) \le \mu (f^i(W)) \le M_i \cdot \mu (W),\] implying \[ \frac{m_k}{M_{k+n}} \le \frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))} \le \frac{M_k}{m_{k+n}}.\] Recall the hypothesis that $\frac{M_i}{m_i} < K$ for all $i \in {\mathbb Z}$. Hence, we have that $\frac{M_k}{m_{k+n}} \le K^2 \cdot \frac{m_k}{M_{k+n}}$, and putting it all together, \[ \frac{m_k}{M_{k+n}} \le \frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))} \le \frac{M_k}{m_{k+n}} \le K^2 \cdot \frac{m_k}{M_{k+n}}. \] Now $\lim \limits_{n \rightarrow \infty} (K^2) ^{\frac{1}{n}} = 1$. This implies that \[\uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left ( \frac{m_k}{M_{k+n}} \right ) ^{\frac{1}{n}} = \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left ( \frac{\mu(f^{k}(W))}{\mu(f^{k+n}(W))} \right ) ^{\frac{1}{n}} = \uplim_{n \rightarrow \infty} \sup _{k \in {\mathbb Z}} \left ( \frac{M_k}{m_{k+n}} \right ) ^{\frac{1}{n}}, \] completing the proof. \qedsymbol \section{Open Questions} We now make some final remarks and state some open questions. The following question addresses whether the condition of bounded distortion is necessary. \begin{prob} Suppose we have a dissipative system but we drop the hypothesis of bounded distortion. \begin{enumerate} \item Is it still the case that an operator is generalized hyperbolic if and only if it has the shadowing property? \item Does the characterization provided for the shadowing property still hold? \end{enumerate} \end{prob} Next we inquire what happens in the direction orthogonal to dissipative systems. \begin{prob} Suppose we work with purely conservative systems instead of dissipative systems. \begin{enumerate} \item Is it still the case that an operator is generalized hyperbolic if and only if it has the shadowing property? \item Is there a natural characterization for the shadowing property? \item In particular, what happens if we consider linear operator induced by odometers as in \cite{BDDPOdometers}? \end{enumerate} \end{prob} Finally, more generally, we have the following question. \begin{prob} In the arbitrary setting of linear dynamics, does shadowing imply generalized hyperbolicity? \end{prob} \Addresses \end{document}
\begin{document} \allowdisplaybreaks \begin{titlepage} \title{\textbf{Choice by Rejection}\thanks{We are extremely grateful to Sean Horan for his detailed comments. This paper has benefited from discussions with Arunava Sen, Debasis Mishra, Rohan Dutta, Saptarshi Mukherjee and Abhinash Borah. We would also like to thank seminar participants at ACM EC 2021, DSE Winter school 2020 and ISI-ISER Young Economists Workshop 2020 .}} \author{Bhavook Bhardwaj \thanks{Indian Statistical Institute, New Delhi ({\tt [email protected]}).} \and Kriti Manocha \thanks{Indian Statistical Institute, New Delhi ({\tt [email protected]}).}} \maketitle \begin{abstract} We propose a boundedly rational model of choice where agents eliminate dominated alternatives using a transitive rationale before making a choice using a complete rationale. This model is related to the seminal two-stage model of \cite{manzini2007sequentially}, the Rational Shortlist Method (RSM). We analyze the model through \textit{reversals} in choice and provide its behavioral characterization. The procedure satisfies a weaker version of the \textit{Weak Axiom of Revealed Preference} (WARP) allowing for at most two reversals in choice in terms of set inclusion for any pair of alternatives. We show that the underlying rationales can be identified from the observable reversals in the choice. We also characterize a variant of this model in which both the rationales are transitive \noindent Keywords: Bounded Rationality, Two-stage Choice, Revealed Preference, Choice Reversals \noindent JEL Classification number: D01, D91 \\ \mbox{\rm E}nd{abstract} \thispagestyle{empty} \mbox{\rm E}nd{titlepage} \section{Introduction} In the last two decades, various two-stage choice procedures have been proposed to rationalize systematic violations of the standard notion of rationality. In this paper we consider a new two-stage procedure of decision making in which a decision maker (DM) first shortlists a set of alternatives by \textit{rejecting} the set of \textit{minimal} alternatives with respect to the (first) rationale\footnote{We define rationale as a binary asymmetric relation}. By minimal, we mean an alternative which is dominated by some other alternative and does not dominate any other alternative. In the second stage, she chooses the maximum\footnote{Maximum alternative is the one which is not dominated by any other alternative with respect to the relation involved} alternative from the shortlisted set with respect to the (second) rationale. Rejecting the ``worst" alternatives before choosing is a natural way of making choices. Stochastic models of \cite{tversky1972choice}, \cite{dutta2020gradual} and deterministic models of \cite{apesteguia2013choice} and \cite{masatlioglu2007theory} discuss procedures of eliminating alternatives before making the choice. Such choice behaviors are often observed in real life too. \textbf{Example 1.} The editor of an Economics journal receives paper submissions and has the option of desk-rejecting before sending them to reviewers. Due to a large number of submissions, her rejection is based on the abstracts and she wishes to shortlist all \textit{reasonable} papers for a detailed review by the referees. It is natural to assume that her ranking over the papers might be incomplete. In order not to reject a possibly good quality paper, she chooses to eliminate only the set of \textit{minimal} papers before forwarding them to reviewers. \textbf{Example 2.} Economics department of a university is hiring for a faculty position. The selection committee has four applicants $x,y,z \text{ and } w$ to choose from. The applications are shortlisted for the interview on the basis of published work. The publications may not be comparable across sub-fields. Those applicants with no publication or with publication in \textit{lower valued} journals are \textit{rejected}. In the second round, the best candidate is chosen using an overall ranking (considering teaching experience, interview, conference presentations etc). While considering $x$ and $y$, $x$ is selected as a better candidate. When $z$ is also considered, the committee goes for $y$. If all the four candidates are compared, $x$ is selected. \begin{figure}[h] \centering \includegraphics[scale=0.28]{example.png} \mbox{\rm E}nd{figure} There is a large literature on two-stage choice procedures, well known as \textit{shortlisting procedures} (\cite{tyson2013behavioral}). These procedures rationalize boundedly rational behavior in different environments. The Rational Shortlist Method (RSM) (\cite{manzini2007sequentially}), Categorize then Choose (CTC) (\cite{manzini2012categorize}) and many related shortlisting procedures satisfy a weaker form of the \textit{Weak axiom of revealed preference} (WARP)\footnote{ \cite{samuelson1938note} showed that WARP alone behaviorally characterizes a choice function that is generated by maximizing by an underlying preference relation. It requires that for a pair of alternatives $x,y$, if $x$ is chosen in the presence of $y$, then $y$ cannot be chosen in the presence of $x$}. It requires that if an alternative $x$ is chosen in binary comparison with $y$, as well as in a set $S$ containing both $x$ and $y$, then $y$ should not be chosen in any ``intermediate'' set $S'$ between $\{x,y\}$ and $S$. Effectively, it allows for at most one reversal in terms of set inclusion for any pair of alternatives. Clearly, these models cannot explain scenarios like the ones described above where there is a reversal in choice from $x$ to $y$ and then $x$ again. We can rationalize this behavior using our model. For instance, publications of $y$ are in a different sub-field than those of $x,z \text{ and } w$ which are comparable with $z$ being the best and $w$ being the worst. Therefore, when only $x$ and $y$ are considered, both are shortlisted. If $z$ is also considered, $x$ is rejected on the basis of lower valued publications. If all the four are considered, $w$ being lowest ranked on the basis of publications, gets rejected. The overall ranking of the candidates, $x \succ y \succ z \succ w$, then rationalizes the final choices. Our model thus allows for a \textit{double reversal} in terms of set inclusion for a pair of alternatives. In this paper, we formalize and analyze the model described above, called \textbf{Choice by Rejection (CBR)} . First, we axiomatically characterize the model where the first rationale is transitive and the second rationale is complete. Our analysis makes use of two types of choice reversals which we term as \textit{weak} and \textit{strong} reversals. The main axiom in our characterization restricts certain choice reversals. Second, we show that first and second rationales which represent the data can be identified from the reversals. We use a \textit{small menu} property displayed by the choice function in identifying the class of \textit{CBR-representable} rationales. This property allows us to focus only on menus of pairs and triples. Third, we characterize a variant of CBR where the second rationale is restricted to be a linear order. Finally, we provide some results that relate CBR with existing shortlisting procedures like the RSM. \subsection{Related Literature} \label{lit} The notion of ``eliminating" and choosing has been discussed in the literature. \cite{tversky1972choice} proposed a stochastic model where choice is analyzed as a probabilistic process of successive eliminations. In deterministic setting, \cite{apesteguia2013choice} proposed a procedure that involves sequential pairwise elimination of ``disliked" alternatives until only one alternative remains. \cite{masatlioglu2007theory} introduced a model of elimination wherein the DM eliminates those alternatives which are dominated by some ``comparable" alternative. Those alternatives that cannot be eliminated by any of its comparables end up being chosen. Observe that the DM ends up choosing the \textit{maximal} set of comparable alternatives. On the other hand, in the two-stage models like RSM (\cite{manzini2007sequentially}), the \textit{maximal} set is shortlisted in the first stage. This can be understood as rejecting those alternatives which are dominated by some alternative. However, this entails large dependence on the first rationale since the second rationale is only used to choose one alternative among the small set of shortlisted maximal alternatives. This paper proposes a weaker form of domination in shortlisting where the second rationale has more deciding power. Note that choosing the maximal alternatives is equivalent to successively rejecting the set of \textit{minimal} alternatives i.e. those alternatives which are dominated by some alternative and do not dominate any other alternative. Successive elimination by the DM however can increase the cognitive load of shortlisting. It has been shown that often individuals deploy heuristics while making complex choices (\cite{gigerenzer1999fast}). We look at a simple heuristic instead in which DM rejects ``disliked" alternatives just once\footnote{If the ``rejection" is successive, in the limit, CBR is equivalent to the T$_1$SM model of \cite{matsuki2018choice}, a variant of RSM where the first rationale is transitive.}. A recent paper that is related to our model is by \cite{RePEc:ash:wpaper:29}. In their choice procedure, the DM shortlists alternatives by rejecting the worst alternative using a preference order. A detailed comparison with their work is done in section \ref{sec7}. The literature on boundedly rational choice procedures involves weakening of the standard notion of rationality i.e. WARP. One of the most well-known weakening of WARP is Weak-WARP (WWARP), first introduced in \cite{manzini2007sequentially} \footnote{Some of the models which directly use WWARP to characterize their models are \cite{manzini2007sequentially}, \cite{manzini2012categorize}, \cite{LOMBARDI200958}, \cite{cherepanov2013rationalization}, \cite{ehlers2008weakened} }. We introduce a novel weakening of WWARP called R-WARP* which relates the two conditions using choice reversals. In terms of the number of reversals, it is well known that WWARP allows for at most one reversal between a pair of alternatives. R-WARP* extends this to at most two reversals which we call a \textit{double reversal}. Such behavior has been observed in different experimental settings (see \cite{manzini2010revealed}, \cite{teppan2009minimization}). The literature has attributed a single reversal to two well known effects called the \textit{compromise effect} and the \textit{attraction effect} which we will discuss later in the paper. Similarly, \textit{two-compromise effect} and \textit{two-decoy effect} are observed as double reversal in choices (see \cite{tserenjigmid2019choosing}). Our paper gives a choice theoretic understanding of these effects. To analyze our model, we follow a technique similar to the one discussed in \cite{horan2016simple}. This involves viewing violations of rationality as choice reversals between pairs of alternatives. Our analysis relies primarily on two types of reversals permitted in this model which we term \textit{weak} and \textit{strong reversals} (discussed later). The layout of the paper is as follows: Section \ref{sec2} discusses the model. Section \ref{sec4} provides axiomatic foundations of our model. Section \ref{sec5} discusses a variant of the model, \textit{Transitive}-CBR. Section \ref{axiomsdiscussion} discusses choice reversals and their behavioral interpretations. Section \ref{Iden} provides results on identification of the model. Section \ref{sec7} provides some results relating our model to the literature and Section \ref{sec8} concludes. \section{Preliminaries} \label{sec2} Let $X$ be a finite set of alternatives and $\mathcal{P}(X)$ be the set of all non-empty subsets of $X$. The function $C:\mathcal{P}(X) \rightarrow X$ is a choice function that gives for any menu $S$ \footnote{$S \subset X \setminus$ $\phi$}, a unique alternative from $S$, i.e. $C(S) \in S$ and $|C(S)|=1$. Let $(R,P)$ denote a pair of rationales\footnote{$R \subset X \times X$ and $P \subset X \times X$} where $R$ is transitive and $P$ is complete. We define the set of \textit{minimal} alternatives with respect to $R$ from a menu $S$ as $$ \min(S,R)=\{x \in S \ :\ \mbox{\rm E}xists \ z \in S \ \text{s.t.} \ zRx \ \text{and} \ \nexists \ z' \in S \ \text{s.t.} \ xRz'\}$$ Thus, an alternative is not minimal in a menu $S$ if and only if either (i) it is ``isolated" (not related to any other alternative with respect to $R$) or; (ii) there is at least one alternative which is ``dominated" by it with respect to $R$. The idea of shortlisting in this paper relies on a one shot elimination of minimal alternatives before making the final choice as against shortlisting by selection of \textit{maximal} \footnote{Formally, the set of \textit{maximal} alternatives of choice problem is defined as $ \max(S,R)=\{y \in S | \ \nexists x \in S \ s.t \ xRy \}$} alternatives. In our choice procedure, the DM first eliminates minimal alternatives using a selection criterion $R$ (first stage shortlisting) and then makes unique a choice from $S\setminus \min(S,R)$ by choosing the maximal alternative of the rationale $P$. \begin{figure}[h] \centering \includegraphics[scale=0.8]{procedure.png} \mbox{\rm E}nd{figure} \begin{defn} A choice function $C$ is \textbf{Choice by Rejection (CBR)} representable whenever there exists a pair of $(R,P)$, adsymmetric rationales with $R$ transitive (possibly incomplete) and $P$ complete such that $$C(S)=\max(S \setminus \min(S,R),P)$$ \mbox{\rm E}nd{defn} Note that for any rationale $R$ on a set $S$, $ \max(S,R) \subseteq S \setminus \min(S,R)$. It indicates that for a given selection criterion, number of alternatives shortlisted in the first stage in CBR are at least as much as the number of alternatives shortlisted in RSM.\footnote{A choice function $C$ is RSM representable if it can be rationalized by an ordered pair of rationales $(P_1,P_2)$ such that $C(S) = \max (\max (S,P_1), P_2)$ }\\ \section{Behavioral Characterization} \label{sec4} \subsection{Strong and Weak reversals} Observable choice reversals provide a succinct framework for analysis of boundedly rational models of choice. The characterization of the RSM model and the Transitive Shortlist Method (TSM)\footnote{TSM is a special case of the RSM model where both the rationales are transitive} by \cite{horan2016simple} is an important one in this regard. His characterization uses an interesting and easy to check consistency condition which is expressed using different types of choice reversals. In a similar manner, we categorize inconsistencies in choices in terms of choice reversals. We define two mutually exclusive reversals that help analyze our model and provide basis for our characterization. Consider three alternatives $x,y,z$ and a menu $S$ such that $ \{x,y\} \subseteq S$ and $z \notin S$. We say that the choice function $C$ displays an $(xy)$ reversal \textit{due} to $z$ if we observe the following choices $$C(xy)=C(S)=x, \ \ C(S\cup \{z\})=y$$ Such $(xy)$ reversals can be categorized as \textit{weak} or \textit{strong} depending on whether reversal is due to an alternative which is either pairwise \textit{dominated} or \textit{dominates} $x$. We call the first one a \textbf{weak} $(xy)$ reversal where $x \succ_c z$. This reversal is a weak reversal (due to $z$) in the sense that the introduction of an apparently ``weak" alternative ($z$) shifts the choice from $x$ to $y$. The second type of reversal is called a \textbf{strong} $(xy)$ reversal where $z \succ_c x$. This reversal is a strong reversal (due to $z$) as the introduction of an apparently ``strong" alternative shifts the choice from $x$ to $y$. By definition, if $(xy)$ has a weak(strong) reversal due to $z$, then $(xy)$ cannot have a strong(weak) reversal due to $z$.\footnote {\cite{horan2016simple} describes Weak and Direct reversals in a similar spirit. A choice function $C$ displays a Weak $(xy)$ reversal on $B \supset \{x,y\}$ if $C(xy)= x$ and $C(B) \neq C(B\setminus\{y\})$. $C$ displays a direct $(xy)$ reversal on $B \subseteq X \setminus\{x\}$ if $C(B)= y$ and $C(B \cup \{x\}) \notin \{x,y\}$.} We say that there is a reversal \textit{in the presence of} $x$ if it is already present in a menu on which a reversal happens. Formally, $C(S) = y,\ C(S \cup z) =w$ for some $S \ni x$. As it turns out, these reversals can provide us information about the first stage rationale. Intuitively, a reversal can occur when an alternative is in the minimal set for a given menu and upon addition of another alternative, it is ``pulled" out of the minimal set. Alternatively, an alternative can be ``pushed" into the minimal set upon addition of a new alternative. In order to capture all the information revealed by reversals, we define a relation $\succ_R$ on $X$ such that $x\succ_R y$ if and only if there is a: \begin{itemize} \setlength \itemsep{0.006cm} \item \textbf{weak} $(xy)$ reversal due to $w$ for some $w \in X$ or; \item \textbf{weak} $(wx)$ reversal due to $y$ for some $w \in X$ or; \item \textbf{strong} $(yw)$ reversal due to $x$ for some $w \in X$ \mbox{\rm E}nd{itemize} It can be noted that by our definition of weak and strong reversals, $x\succ_R y$ would imply $x \succ_c y$, hence this relation is asymmetric. Also, $\succ_R$ may not be complete. The following example illustrates instances of weak and strong reversals and the resulting $\succ_R$. \textbf{Example 3:} Let $X = \{x,y,z, w\}$ and the choice function is as follows: {\small \begin{center} \begin{tabular}{|c|c|c|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} \\ \hline $\{x,y\}$ & $x$ & $\{x,y,z\}$ & $y$ & $\{x,y,z,w\}$ & $x$ \\ $\{x,z\}$ & $z$ & $\{x,y,w\}$ & $x$ & & \\ $\{x,w\}$ & $x$ & $\{x,z,w\}$ & $x$ & &\\ $\{y,z\}$ & $y$ & $\{y,z,w\}$ & $y$ & & \\ $\{y,w\}$ & $y$ & & & & \\ $\{z,w\}$ & $z$ & & & & \\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } It can be seen that in the choice function above, we have a (i) \textbf{strong} $(xy)$ reversal due to $z$, and (ii) \textbf{weak} $(zx)$ reversal due to $w$. This generates the following $\succ_R$ \begin{figure}[h] \centering \includegraphics[scale=0.35]{example2.png} \caption{Dashed arrow indicates $\succ_c$ and solid arrow indicates $\succ_R$ } \label{psi} \mbox{\rm E}nd{figure} An implication of CBR is that these reversals imply reversals on \textit{small} menus-- menus of size 2 and 3 -- as well, a result which we will prove later. This permits us to define $\succ_R$ solely based on choices from small menus. This is discussed in detail in section \ref{Iden}. Now, we are equipped to introduce the behavioral axioms. \subsection{Axioms} We now provide conditions on the choices of DM which guarantee that these choices are the result of DM choosing according to CBR. Our model is characterized by four behavioral properties (axioms) stated below. The first axiom is called \textit{Never Chosen}. It is a mild consistency condition first introduced in \cite{RePEc:ash:wpaper:29} and is related to the \textit{Always Chosen} property discussed in \cite{manzini2007sequentially}.\footnote{\textit{Always Chosen} is an intuitive property which says that if an alternative is chosen in pairwise comparisons with all alternatives of a menu, then it must be chosen from that menu} It says that for any menu, if an alternative is never chosen in a pairwise comparison with alternatives of that menu, then that alternative cannot be chosen in that menu. Formally, we define it as\\ \textbf{(A1)} \textbf{Never Chosen (NC)}: For all $S \in \mathcal{P}(X)$ and any $x \in S$, \begin{center} $\forall \ y \in S \setminus \{x\}, \ C(xy)\neq x \implies C(S) \neq x$ \mbox{\rm E}nd{center} Our second axiom is a novel weakening of the weak contraction consistency (WCC) axiom introduced in \cite{ehlers2008weakened}.\footnote{WCC states that if $C(S)=x$, then $C(S \setminus \{y\})=x$ for some $y \in S \setminus \{x\}$.} \\ \textbf{(A2)} \textbf{Weak Contraction Consistency$^*$ (WCC$^*$)}: For any menu $S \supset \{x,y\}$ $$\text{If }C(S) \in \{x,y\}\text{, then there exists}\ z \ \in S \setminus \{x,y\} \text{ such that } C(S\setminus \{z\}) \in \{x,y\}$$ Intuitively, WCC ensures a ``path" of choices from $\{x,y\}$ to the menu $S$ where either $x$ or $y$ is chosen. An interesting implication is that if there is a reversal from $x$ to $y$, there exists at least one intermediate set where addition of an alternative leads to the switch (see Figure \ref{WCC}). \begin{figure}[h] \centering \includegraphics[scale=0.45]{wcc.png} \caption{Existence of a path from $\{x,y\}$ to $S$ with choices belonging to $\{x,y\}$} \label{WCC} \mbox{\rm E}nd{figure} In the spirit of the well known \textit{No Binary Cycles} (NBC) condition in the literature that restricts $\succ_c$ relation to be transitive, our next axiom prohibits cycles between only the pairs of alternatives related via $\succ_R$. Therefore, this condition can be viewed as a weaker form of NBC\\ \textbf{(A3)} \textbf{No binary cycles$^*$ (NBC$^*$)}: For all $x_1, \ldots, x_n \in X$, $$x_1 \succ_R x_2, \ x_2 \succ_R x_3, \ldots , \ x_{n-1} \succ_R x_n \implies x_1 \succ_c x_n $$ Our last axiom is the classic congruence condition required in any shortlisting procedure as discussed in \cite{tyson2013behavioral}. Intuitively, it requires that if an alternative $x$ is chosen in the presence of another alternative $y$ where $y$ is not ``dominated", then $y$ cannot be chosen in presence of $x$ whenever $x$ is not ``dominated". This domination can be captured using $\tc$ relation (We denote transitive closure of $\succ_R$ as $\tc$). We call this condition Reject-WARP (R-WARP).\\ \textbf{(A4)} \textbf{Reject-WARP (R-WARP)}: For any alternatives $x$ and $y$ and menus $S$ $S'$ such that $\{x,y\} \subseteq S , S'$ \begin{center} $\text{If } y \notin \min(S, \tc),\ \text{and} \ C(S)=x ,\ \text{then} \ x \notin \min(S', \tc) \implies C(S') \neq y$ \mbox{\rm E}nd{center} Our model can be behaviorally characterized using the above discussed axioms. The main result of our paper is as follows \begin{theorem} \label{thm1} A choice function $C$ is CBR representable if and only if it satisfies (A1)-(A4) \mbox{\rm E}nd{theorem} \paragraph{Outline of the proof:} WCC$^*$ and R-WARP imply that any choice reversal will be associated with an alternative $z$ such that the reversal is due to $z$. Hence, any reversal will be either a weak or a strong reversal. The axioms imply an exclusivity property which restricts the choice function such that if it displays a weak(strong) reversal for a pair of alternatives, then it cannot display a strong(weak) reversal. NC and NBC$^*$ further impose restriction on $\succ_R$ when the choice function displays a strong reversal for a pair of alternatives. A small menu property helps us view all the reversals displayed by the choice functions on small menus i.e. menus of size 2 and 3. This enables us to construct rationales for representation of the choice data. \section{Transitive-CBR} \label{sec5} In this section, we discuss a variant of our model in which we restrict the second rationale to be a preference order. We call this variant \textit{Transitive-CBR}. This model is related to \cite{RePEc:ash:wpaper:29} as it relaxes completeness of the first rationale from their model. It can be seen as a natural generalization of their dual self model. It may be argued that the \textit{`should'}- self interpretation of the first rationale can display instances of indecisiveness which is precisely reflected by dropping their assumption of completeness. We can characterize this model by generalizing R-WARP to \textit{R-SARP} which is defined as: \\ \textbf{(A4')} \textit{R-SARP}: For all $S_{1}, \ldots, S_{n} \in \mathcal{P}(X)$ and distinct $x_{1}, \ldots, x_{n} \in X:$ $$ \text{If} \ x_{i+1} \notin \min(S_i, \tc), \ C(S_{i})=x_{i} \text{ \ for \ } i=1, \ldots, n-1, \text { then \ }$$ $$x_{1} \notin \min(S_n, \tc) \implies C\left(S_{n}\right) \neq x_{n} $$ $\text{If } y \notin \min(S, \succ_R),\ \text{and} \ C(S)=x ,\ \text{then} \ x \notin \min(S', \succ_R) \Rightarrow C(S') \neq y$ It turns out that a characterization of \textit{Transitive-CBR} requires no more than this generalization of R-WARP to any arbitrary chain of alternatives. The characterization is then given by the following result \begin{theorem} A choice function $C$ is a Transitive-CBR representable if and only it satisfies (A1)-A(3) and (A4') \mbox{\rm E}nd{theorem} The proof can be found in the Appendix. \section{Discussion on Choice Reversals}\label{axiomsdiscussion} Rational choice theory does not allow for reversals i.e. the choice of an alternative $x$ when $y$ is available in a menu and the choice of $y$ when $x$ is available in a different menu. The literature is replete with empirical evidence displaying such reversals. Two prominent behavioral explanations of such reversals have been the \textit{compromise effect} and the \textit{attraction effect} which is also popularly known as the \textit{decoy effect}. The compromise effect first discussed in \cite{simonson1989choice} says that individuals avoid ``extreme" alternatives and ``compromise" for non-extreme alternatives. The idea is that addition of an alternative to a menu makes the previously chosen alternative appear ``extreme". Hence the choice shifts to an alternative which was not previously chosen, causing a reversal. The attraction effect first discussed in \cite{huber1982adding} on the other hand says that the addition of an alternative to a menu acts as a ``decoy" for an alternative that was previously not chosen. For alternatives $x$, $y$ and $z$, both the effects would be reflected behaviorally as $$C(\{x,y\})= x \ \ \ \text{and} \ \ \ C(\{x,y,z\}) = y$$ with $z$ acting as alternative that makes $x$ appear ``extreme" in the compromise effect and $z$ acting as a ``decoy" for $y$ in the decoy effect. We extend the idea above to what we call a \textit{single reversal}. Denote by $\succ_c$ the pairwise relation such that $x \succ_c y $ if and only if $C(\{x,y\})=x$ (we will abuse notation and use $(xy)$ and $\{x,y\}$ interchangeably). We now define a \textit{single reversal} as \begin{defn}{$(xy)$ \textit{single reversal}:} If $x \succ_c y$ and there exists $ S \supset \{x,y\}$ such that $C(S)=y$ then for $S' \supset S \supset \{x,y\}$, $C(S') \neq x$ \mbox{\rm E}nd{defn} \begin{figure}[h] \centering \includegraphics[scale=0.6]{wwarp.png} \caption{$(xy)$ single reversal} \label{WWARP} \mbox{\rm E}nd{figure} The above definition permits for at most one reversal with respect to a pair $(xy)$ in terms of set inclusion. It is easy to see that if a choice function satisfies WARP, then for a pair of alternatives $(xy)$ , $x \succ_c y$ would imply that $y$ can never be chosen from any menu that contains $x$. Expressed in terms of reversals, WARP allows for no reversal in choices between $x$ and $y$ along any sequence of sets (containing $x$ and $y$) ordered by set inclusion. Whereas WWARP allows for only \textit{single reversal} in choices. A natural implication of the compromise effect and the decoy effect are what \cite{tserenjigmid2019choosing} calls the \textit{two-compromise effect} and the \textit{two-decoy effect}. In the case of the two-compromise effect, the argument is that an addition of the fourth alternative $w$ to a menu would make $x$ no longer appear an ``extreme" alternative and the choice would revert to $x$. In case of the two-decoy effect, $w$ would act as a ``decoy" for $x$, nullifying the decoy effect of $z$ for $y$. Again, both the effects would be reflected behaviorally as $$C(\{x,y\})= x \ \ \ \text{and} \ \ \ C(\{x,y,z\}) = y \ \ \ \text{and} \ \ \ C(\{x,y,z, w\}) = x$$ In a similar manner as a \textit{single reversal}, we extend the above idea to what we call a \textit{double reversal} defined as \begin{defn}{ $(xy)$ \textit{double reversal}:} If $x \succ_c y$ and there exists $S' \supset S \supset \{x,y\}$ such that $C(S)=y, \ C(S')=x$ then for $S'' \supset S' \supset \{x,y\} $, $C(S'') \neq y$ \mbox{\rm E}nd{defn} \begin{figure}[h] \centering \includegraphics[scale=0.6]{rwwarp.png} \caption{$(xy)$ double reversal} \label{R-WARP} \mbox{\rm E}nd{figure} There is experimental evidence of double reversals (see \cite{tserenjigmid2019choosing}, \cite{manzini2010revealed} , \cite{teppan2009minimization}). We can see from example in the introduction that CBR allows for a double reversal and this is what differentiates CBR from other shortlisting models in the literature.\footnote{To the best of our knowledge, no shortlisting procedure disscussed in the literature allows for \textit{double reversals}} Choice reversals provide a framework to relate our axioms to some well-known axioms in the literature. An interesting implication of R-WARP and WCC$^*$ is that for any pair $(xy)$, there can be no more than two reversals. So for a $(xy)$ reversal from $S$ to $S'$, we can identify a menu $T$ and alternative $z$, such that $S \subseteq T \subset S'$, $C(T)=x$ and $C(T \cup \{z\})=y$, and choice is $x$ for all sets in a ``path" between $S$ and $T$, and choice is $y$ in a ``path" between $T \cup \{z\}$ and $S'$. Similarly, for a \textit{double reversal}, we can identify two menus where addition of an alternative leads to a reversal in the ``path". Thus, an $(xy)$ double reversal in the choice is associated with two alternatives $z_1$ and $z_2$ due to which the reversal takes place. The above axioms imply a weaker version of {WWARP which we call R-WARP*. This condition restricts the number of reversals in any pair to at most two. \begin{defn}{\textbf{R-WARP*}:} For all menus $S,S', S''$ such that $\{x,y\} \subset S' \subset S \subset S''$ \begin{center} $C(S) = C\{x,y\} = x$ and $C(S') = y$ implies $C(S'') \neq y$ \mbox{\rm E}nd{center} \mbox{\rm E}nd{defn} The above discussed restriction can be summarized by the following result \begin{lemma}\label{rwwarp} If $C$ satisfies R-WARP and WCC$^*$, then it satisfies R-WARP* \mbox{\rm E}nd{lemma} Another interesting implication of the axioms above is a condition which imposes clear limitations on the possibility of certain simultaneous weak and strong reversals. For a given weak reversal it precludes certain strong reversals and vice-versa. This is captured in a property which we call \textit{Exclusivity}.\footnote{This is closely related to the Exclusivity condition of \cite{horan2016simple}} It allows for only one type of reversal between a pair due to any alternative. \begin{defn} {\textit{Exclusivity}}: For any pair of alternatives $(xy)$, either: \begin{itemize} \item $C$ displays no \textbf{weak} $(xy)$ reversal; or \item $C$ displays no \textbf{strong} $(xy)$ reversal \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{defn} For any pair of alternatives, this condition precludes choice behavior which exhibits both types of reversals, strong and weak. Put differently, the possibility of strong reversals for a given pair of alternatives is ruled out by observing a single weak reversal for that pair (and vice versa). A corollary of the above result is the following result, which we use in the proof of Theorem \ref{thm1} \begin{lemma}\label{corr} If $C$ satisfies (A1)-(A4), then $C$ satisfies \textit{Exclusivity} \mbox{\rm E}nd{lemma} As we show in Appendix, it is an implication of lemma \ref{exclusivity}. \section{Identification} \label{sec6} \label{Iden} There can be multiple representations $(R,P)$ which rationalize a choice function $C$. In this section, we present two results related to identification in the CBR model. Firstly, we define revealed rationales $R^c$ and $P^c$ using the reversals in the choice data. According to our definition, the revealed rationales reflect only those features which are common to every CBR-representation. We then use these rationales to give bounds on both the rationales in the representation. We identify the minimal representation for which the first rationale $R$ is the intersection of first rationales of all the possible CBR representations of $C$. To give the upper bound on the first rationale, we define a revealed rationale which cannot have an intersection with first rationale of any representation. Identification uses a ``small menu property'' of the reversals. All the proofs of this section are relegated to the Appendix. \subsection{Small menu property}\label{smp} It can be shown that any weak reversal in the choice function will be seen in choices from pairs to triples. There will be no binary cycles in the alternatives \textit{involved}\footnote{$x$ is involved in a reversal if either there is a $(xy)$ or $(yx)$ reversal for some $y$ or there is a $(yz)$ reversal due to $x$} in the reversal. Any strong reversal can be seen in either a pair to a triple with a cycle in the pairwise relation, or in a triple to a quadruple with no cycle. We define this property as follows: \begin{defn} A choice function $C$ satisfies \textit{Small Menu Property} (SMP), then the following holds: \begin{itemize} \item If there is a \textbf{weak} $(xy)$ reversal due to $z$, then $x \succ_c y \succ_c z$ and $C(xyz)=y$ \item If there is a \textbf{strong} $(xy)$ reversal due to $z$, then either $x \succ_c y \succ_c z \succ_c x$ cycle exists and $C(xyz)=y$ or $z \succ_c x \succ_c y$ and $C(xyz)=z$ and for some $w$, $C(xyw)=x$ and $C(xyzw)=y$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{defn} \begin{lemma} \label{SMP} If $C$ is CBR representable, then $C$ satisfies SMP \mbox{\rm E}nd{lemma} This property enables us to provide an alternative formulation of $\succ_R$ relation in terms of choices from pairs and triples. \begin{defn} For any $x,y \in X$, $x\succ_R y$ if and only if: \begin{itemize} \setlength\itemsep{0.001cm} \item [(i)] $x \succ_c y \succ_c z$ and $C(xyz)=y$ for some $z \in X$; or \item [(ii)] $z \succ_c x \succ_c y$ and $C(xyz)=x$ for some $z \in X$; or \item [(iii)]$y \succ_c z \succ_c x \succ_c y$ and $C(xyz)=z$; for some $z \in X$ or \item [(iv)] $x \succ_c y \succ_c z$, $x \succ_c z \succ_c w$, $C(xwz)=z$ and $C(xyz)=x$ for some $z,w \in X$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{defn} \begin{figure}[h] \centering \includegraphics[scale=0.25]{reversals_new.png} \caption{Cases when $x\succ_R y$. Arrows depict pairwise choices. Colored alternatives are the choices in triples} \label{Reversals} \mbox{\rm E}nd{figure} Lemma \ref{SMP} helps us pin down behavior by observing choices over small menus. For any two CBR representable choice functions that agree on \textit{small menus}, i.e. pairs and triples, also agree on larger menus. It is summarized in the result below. \begin{lemma}\label{Idencor} If $C$ and $\bar{C}$ are $CBR$ representable, then $C(\cdot) = \bar{C}(\cdot)$ if and only $C(S) = \bar{C}(S)$ for all $ S \subseteq X$ such that $|S| \leq 3$ \mbox{\rm E}nd{lemma} \subsection{Class of representations} We now give a minimal representation of a choice function $C$. A minimal representation has the minimal number of pairwise relations required in the first rationale to rationalize $C$. Formally, it is the intersection of the first rationales of all possible representations. We begin by defining a ``revealed" rationale $\tilde{R^c}$. It captures all the information regarding identification that choice reveals about the first rationale. Define \begin{center} $x{R^c}y \iff x\tc y$ \mbox{\rm E}nd{center} As the first rationale is transitive $R^c$ captures the smallest relation that is required for the representation. Given the first rationale, the second rationale captures those relations which are needed to make the choice from the shortlisted set. An alternative $y$ is shortlisted in a set $S$ if $y \notin \min(S,R^c) $. If $C(S)=x$, then we need $xPy$ for $x$ to be chosen. Hence, we define \begin{center} $P^c \mbox{\rm E}quiv \hat{P}_{R_c}$ where $x \hat{P}_{R_c}y$ holds if for some $S \subseteq X$, $C(S)=x$ and $y \notin \min(S,R^c) $ \mbox{\rm E}nd{center} Our next result characterizes the entire class of minimal representations in terms of the revealed rationales \begin{theorem} \label{Iden1} If $C$ is CBR representable and $(R^*,P^*)$ is a minimal representation of $C$, then: \begin{itemize} \item[(i)] $R^*=R^c$ \item[(ii)] $P^c \subseteq P^* $ where $P^*$ is a complete rationale \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{theorem} Now, we discuss the upper bound on the first rationale recovered from any representation of the model. For this we find out the pairs that cannot be related in any representation. \begin{defn} Given a choice function $C$ define $\hat{Q}$ as $x\hat{Q}y$ if and only if : \begin{itemize} \setlength\itemsep{0.001cm} \item \textbf{strong} $(xw)$ reversal on set $S$ and $y \in S$ for some $w \in S \setminus \{x\}$ \item \textbf{strong} $(yw)$ reversal on set $S$ and $x \in S$ for some $w \in S \setminus \{x\}$ \item \textbf{weak} $(wx)$ reversal and $x,y,w \in S$, $C(S)=w$ for some $w \in S \setminus \{x\}$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{defn} The largest possible $R$ that can be a part of the representation $(R,P)$ will be the largest transitive relation that is a subset of $\succ_c \setminus \hat{Q} \supset R^c$. We define such largest transitive relation as $\bar{R}$. Note that this largest relation need not be unique. For the second rationale, analogous to $\hat{P}_{R_c}$, we define $x \hat{P}_{R}y$ if for some $S \subseteq X$, $C(S)=x$ and $y \notin \min(S,R) $. Those pairwise relations which are not covered in the first relation are added in the second rationale. The following result provides the class of identified rationales. \begin{theorem} \label{Iden2} If $C$ is CBR representable, then $(R,P)$ represents $C$ if and only if: \begin{itemize} \item[(i)] $R$ is a transitive rationale such that $R^c \subseteq R \subseteq \bar{R}$ \item[(ii)] $P$ is a complete rationale such that $P \supseteq \hat{P}_R \cup (\succ_c \setminus R)$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{theorem} \section{Comparison with Related Models} \label{sec7} The violation of rationality (WARP) is attributed to violation of either of the following two consistency conditions: \textit{Always chosen}\footnote{If $x$ is chosen in pairs, then it must been chosen union of those pairs} or \textit{No Binary Cycles}\footnote{Relation derived from pairwise choices cannot have a cycle}. Various boundedly rational models explain violation of rationality using violation of either of these conditions. \cite{manzini2007sequentially} show that RSM is able to accomodate the violation of \textit{No Binary Cycles}. However, a violation of \textit{Always Chosen} cannot be explained by RSM. The ego preserving heuristic (EPH) choice function of \cite{RePEc:ash:wpaper:29} on the other hand is able to accomodate the violations of \textit{Always Chosen} but unable to explain the violation of \textit{No Binary Cycles}. CBR however, is able to explain both the violations. We now compare some related models with CBR and show that if choice function is CBR representable it is equivalent to the related model if strong/weak reversal does not exist. \paragraph{(I) Rational Shortlist method:} RSM is not a special case of our model. \cite{manzini2007sequentially} characterize it by two axioms: Expansion (EXP) \footnote{For all $S,S' \supset \{x,y\}$, $C(S)= C(S') = x $ implies $C(S \cup S') = x$} and WWARP. Our model may violate WWARP. However, as shown earlier, it satisfies a weaker version of this axiom (R-WARP*) which allows for at most two reversals. Also, CBR may violate EXP as a weak $(xy)$ reversal due to $z$ implies $C(xyz)=y, \ C(xy)=x=C(xz), \ C(yz)=y$ which violates \textit{always chosen}. The reversals discussed in this paper establish a relation between our model and RSM. \begin{proposition}\label{RSM} If Choice function $C$ is CBR representable, then $C$ is RSM if and only if $C$ has displays no weak reversals \mbox{\rm E}nd{proposition} Proof of this result can be found in the Appendix \ref{proofsec7}. \paragraph{(II) Transitive Shortlist method:} The transitive shortlist method (TSM) is a variant of the RSM where both the rationales are transitive (possibly incomplete). \cite{horan2016simple} analyzes this choice procedure in terms of two choice reversals: direct and weak$\st$ \footnote{Weak reversal of TSM. $\star$ added to avoid confusion with weak reversal of this paper} reversal. A direct $\langle x , y \rangle$ reversal on $B \subset X \setminus \{x\}$ is defined as $$C(B) = y \ \ \text{and} \ \ C(B \cup \{x\}) =z \notin \{x,y\}$$ A weak $\langle x , y \rangle$ reversal on $B \supset \{x,y\}$ is defined as $$C(xy) = x \ \ \text{and} \ \ C(B \setminus \{y\}) \neq C(B)$$ TSM satisfies Exclusivity condition which says that for a pair $x,y$, either there is no direct $\langle x , y \rangle$ reversal on $B \subset X \setminus \{x\}$ or, there is no weak$\st$ $\langle x , y \rangle$ reversal. CBR violates this axiom when there is a double reversal. It can be seen in Example \ref{psi}. There is a direct $\langle z , x \rangle$ reversal on $\{x,z\}$ and a weak$\st$ $\langle z , x \rangle$ reversal on $\{y,z,w\}$. Another property satisfied by TSM is EXP(hence \textit{always chosen}), which CBR need not satisfy. Thus, TSM is also not a special case of CBR. Note that since TSM satisfies WWARP, in the case of a direct $\langle x,y \rangle $ reversal it must be be that $C(yz)=y$. Hence, whenever there is a strong or a weak reversal, we have a direct reversal. Conversely, as TSM also satisfies \textit{always chosen}, a choice function cannot display a weak reversal. Therefore this would be a strong reversal. \begin{comment} In the case of a weak reversal, depending on whether $C(B)=y$ or $C(B)=z$, it can be shown to correspond to a $(xy)$ strong reversal or $(xz)/(zx)$ strong reversal. The following example illustrates the link between these reversals and a strong reversal. \textbf{Example 3.} Let $X = \{x,y,z\}$ and consider two choice functions $C$ and $C'$ as follows: \begin{center} \begin{tabular}{|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{C'(S)}} \\ \hline $\{x,y\}$ & $x$ & $x$ \\ $\{x,z\}$ & $z$ & $z$ \\ $\{y,z\}$ & $y$ & $y$ \\ $\{x,y,z\}$ & $z$ & $x$ \\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } The choice function $C$ displays a direct $\langle x , y \rangle$ reversal on $\{y,z\}$ and $C'$ displays a weak $\langle x , y \rangle$ reversal on $\{x,y,z\}$. The direct $\langle x , y \rangle$ reversal corresponds to a strong $(xz)$ reversal due to $y$, whereas the weak $\langle x , y \rangle$ reversal corresponds to a strong $(zx)$ reversal due to $y$. Both the choice functions have a TSM as well as a CBR representation. $C$ can be represented by $R=P_1= \{(x,y)\}$ and $P = P_2 = \{(y,z), (z,x), (y,x)\}$ whereas $C'$ can be represented by $R=P_1= \{(y,z)\}$ and $P = P_2 = \{(z,x), (x,y), (z,y)\}$. \mbox{\rm E}nd{comment} As in the case of RSM, our model relates to TSM in the following way \begin{proposition}\label{TSM} If Choice function $C$ is T-CBR representable, then $C$ is TSM if and only if $C$ displays no weak reversals \mbox{\rm E}nd{proposition} \paragraph{(III) Ego-Preserving Heuristic :} \cite{RePEc:ash:wpaper:29} propose a two-stage choice model wherein both the rationales are linear orders. DM first eliminates the worst alternative with respect to the first order and then choose the maximal alternative with respect to the second order. In terms of reversals, our model is related to Ego-Preserving Heuristic (EPH). EPH is characterized by NC, NBC and a weaker form of WARP (WARP-EP). Their model cannot accommodate violation of \textit{no binary cycle}, but allows for violation of always chosen and hence can permit weak reversals. It turns out that their models does not allow for strong reversals as it leads to violation of WARP-EP. \begin{comment} We define ``Condition A" which is implied by WARP-EP if choice function is CBR representable. \begin{defn} A choice function satisfies \textbf{Condition A} iff a $(xy)$ reversal on set $S$ implies a $(zy)$ reversal for all $z \in S\setminus \{y\}$ \mbox{\rm E}nd{defn} \begin{proposition}\label{EPH} If Choice function $C$ is CBR representable, then $C$ is EPH if and only if $C$ displays no strong reversals and satisfies Condition A \mbox{\rm E}nd{proposition} \mbox{\rm E}nd{comment} \begin{comment} This section provides a connection between axioms introduced in related models and reversal in this paper. It can be noted that if a choice procedure satisfies \textit{always chosen}, then it does not accomodate weak reversals. Moreover, if a procedure satisfies \textit{no binary cycle}, it does not accomodate strong reversals. \mbox{\rm E}nd{comment} \section{Final remarks} \label{sec8} In this paper we introduced a new two-stage choice procedure that departs from the idea of shortlisting by maximization. We axiomatically characterized this procedure using intuitive behavioral properties. Bounds on the first and the second rationales were provided to identify the representations for a given choice function. We also compared this procedure with the Rational Shortlist Method (RSM) of \cite{manzini2007sequentially}. The main contribution of our model is its ability to explain double reversals observed experimentally that the existing models are unable to do. In addition to that, our choice procedure also provides an alternative explanation for single reversals discussed in the literature. The first rationale in our model can be interpreted in several ways. One such interpretation is when alternatives have multiple attributes. DM shortlist those alternatives which are either non-comparable with respect to any attribute, or dominate some alternative with respect to at least one attribute. To illustrate, consider $X= \{x,y,z\}$ and two attributes $R_1 = \{(x,y)\}$ and $R_2 = \{(y,z)\}$. Shortlisting using these two attributes is equivalent to shortlisting by a single transitive rationale $R= \{(x,y), (y,z), (x,z)\}$. Another interpretation is related to ``social influence''. DM is socially influenced by certain reference groups that she relates to: people that she finds similar to herself in a given situation. Pairwise choices of this group are observed, which in aggregate are transitive. Over these choices, she uses her preference to make the final choice. Given the interpretations above, it is natural to assume that the first rationale need not be complete. Our model is also a natural way of choosing in several contexts. One such setting is online dating. Users on a popular dating app, Tinder, are on average presented with 140 partner options a day (\cite{doi:10.1177/1948550619866189}). Large number of partner options sets off a rejection mindset: people become increasingly likely to reject potential partners before choosing. It may be an interesting future topic to study a possible extension of this model in a stochastic setup. One can think of a collection of rationales and a probability distribution over them that one uses to \textit{reject} ``worse" alternatives before making a final choice. \pagebreak \appendix \section{Appendix} \subsection{Proof of Theorem 1} First we prove the necessity of the axioms \begin{proposition}\label{necessity} If $C$ is CBR representable, then it satisfies (A1)-(A4) \mbox{\rm E}nd{proposition} \begin{proof} Let $(R,P)$ be a representation of the choice function $C$ where $R$ is a partial order and $P$ is a complete rationale. We will use the following three observations and a lemma to prove necessity: \begin{enumerate} \item $x R y$ implies $x \succ_c y$. Also, $\succ_c \ \subseteq R\cup P$ \item If $x \notin \min(S,R)$, then either $x$ is \textit{isolated} in $S$ with respect of $R$ ($(x,a) , (a,x) \notin R$ for all $a \in S$) or there exists a $b \in S$ such that $xRb$ holds \item If $x \in \min(S_{i},R)$ for all $i \in [n]$, then $x \in \min(\bigcup_{i} S_{i},R)$ \mbox{\rm E}nd{enumerate} Now we use the above observations to prove an intermediate result. \begin{lemma} \label{Rev} If $C$ is CBR representable, then the following is true: \begin{itemize} \item If there is a weak $(xy)$ reversal due to $z$, then $xRy$, $yPx$ and $yRz$ \item If there is a strong $(xy)$ reversal due to $z$ , then $\neg xRy$, $xPy$, $zRx$ and $yPz$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{lemma} \begin{proof} Let there be a weak $(xy)$ reversal due to some $z$. By observation (1), $ \neg yRx$ and $ x \succ_c y$ implies that $xRy$ or $xPy$. Suppose $xPy$ holds. Since $C(S)=x$ and $C(S \cup \{z\})=y$, it must be that $x \in \min(S \cup \{z\},R)$ and $x \notin \min (S,R)$. Therefore, we must have $zRx$, contradicting $x \succ_c z$. Thus $xRy$ holds and $x \notin \min(S \cup \{z\},R)$ implying $yPx$. For $C(S)= x$, it must be that $y \in \min(S,R)$ and for $C(S \cup \{z\}) =y$, $\ yRz$ must be true. \\ Now, let us consider the case of a strong $(xy)$ reversal due to $z$. If $xRy$ holds, then by the argument above, $yPx$ and $yRz$ holds. By transitivity of $R$, $xRz$ holds which contradicts $z \succ_c x$. Therefore $xPy$ holds and $x$ and $y$ are not related with respect to $R$. For $C(S \cup \{z\})=y$, it must be that $x \in \min(S\cup \{z\},R) $ and therefore for $C(S)=x$, it must be isolated in $S$ with respect to $R$. By an analogous argument in the case above, $zRx$ and $yPz$ hold. \mbox{\rm E}nd{proof} We can see that the following result immediately follows from the lemma above. \begin{corollary} \label{Cor} If $C$ is CBR representable, then $x \ \succ_R \ y \implies xRy$ \mbox{\rm E}nd{corollary} Now we establish necessity of the axioms \begin{itemize} \item[(i)]\textit{NC}: For any $S$ with $C(S)=x$, it must be that $x \notin \min(S,R)$. Therefore, either $xRz$ holds for some $z \in S$ or $x$ is isolated in $S$ with respect to $R$. If $xRz$ holds, then we know that $C(xz)=x$. If $x$ is isolated in $S$ with respect to $R$, then we must have at least one $z \in S$ such that $z \notin \min(S,R)$. Therefore we must have $xPz$. Since $x$ and $z$ are unrelated in $S$, we get $C(xz)= x$. \item[(ii)] \textit{WCC$^*$} : \\ Let $S=\{x,y,x_{1},x_{2},....,x_{n}\}$ and $C(xy)=x$. Define a general set $S_{i}$ which has alternative $x_{i}$ missing from set $S$ i.e. $$S_{i}=S\setminus\{x_{i}\}$$ Assume for contradiction that $C(S_{i}) \notin \{x,y\}$ for all $i \in \{1,2,...,n\}$. Hence, $C(S_{i})$ is one of the $x_{j}$ where $i \neq j$. We denote by $c_{i}$ as the choice in set $S_{i}$. Consider the first case where $C(S)=x$.\\ If $xRy$ then $x \notin$ $\min(S_{i},R)$ for all $i$. For $c_{i}$ to be chosen in $S_{i}$, $c_{i}Px$ must hold for all $i$. Note that for $C(S)=x$, it must be that $c_{i} \in \min(S,R)$ for all $i$, which is possible when $c_{i}$ is isolated in $S_i$ with respect to $R$ and $x_{i}Rc_{i}$ for all $i$. But, for every $i$, there exists a $j \neq i$ such that $c_{i}=x_{j}$, implying that there exists at least one $c_{i} \notin$ $\min(S,R)$ which is a contradiction.\\ Now, let $\neg xRy$ and thus $xPy$ hold. As $x \notin$ $\min(S,R)$, by observation (3), $x \in$ $\min(S_{i},R)$ in at most one $S_{i}$. If $ x \notin \min(S_{i},R)$ for all $i$, then argument becomes similar to the case above where $xRy$ holds. Assume $x \in$ $\min(S_{n},R)$ ($i=n$ W.L.O.G). For $c_{i}$ to be chosen in $S_{i}$ ($i \neq n$), $c_{i}Px$ holds and for $x$ to be chosen in $S$, $c_{i} \in$ $\min(S,R)$ for all $i \neq n$, for which $c_{i}$ is isolated in $S_i$ and $x_{i}Rc_{i}$ for all $i$. This restricts $c_{i}= x_{n}$ for all $i \neq n$. Also, given $x\in \min(S_{n},R)$, for $x\notin \min(S,R)$, we need $xRx_{n}$. As $c_{i}= x_{n}$, there exists a $z \in S \setminus \{x\}$ such that $x_{n}Rz$ holds which implies $x_{n} \notin \min(S,R)$, again a contradiction. Let us now consider the case $C(S)=y$. Now we have $(xy)$ reversal. Suppose $xRy$ is true. $yPx$ holds as $x \notin \min(S,R)$ and there exists a $x_{k} \in S$ such that $yRx_{k}$ holds. As $y \notin \min(S_k,R) \text{ for all } k \neq i$, choice of $c_{i}$ in $S_{i}$ requires $c_{i}Py$. Further, as $y$ is chosen in $S$, $c_{i} \in \min(S,R)$ for all $i \neq k$. By arguments above, this requires $x_{i}Rc_{i}$ and $c_{i}= x_{k}$ for all $i \neq k$. For $x_{k}$ to be chosen in $S_{i}$, there needs to be an alternative that is dominated by $x_k$ with respect to $R$, which implies $x_{k} \notin \min(S,R)$, a contradiction.\\ Assuming $\neg xRy$, $xPy$ must hold by observation (1). Choice of $y$ in $S$ requires $x \in \min(S,R)$ i.e. there exists a $x_{k}$ (say $x_{n}$) such that $x_{n}Rx$ holds and for no alternative $z$, $xRz$ is true. By observation (3), it must be that $y \in \min(S_{i},R)$ for atmost one $S_{i}$ (say $S_{k}$). Using arguments mentioned above, $c_{i}Py$ and $x_{i}Rc_{i}$ holds for all $i \neq k$ which restricts $c_{i}= x_{k}$ for all $i \neq k$. For $x_{k}$ to be chosen, there exists an alternative below it in $R$, a contradiction. \item[(iii)] \textit{NBC$^*$}:\\ This follows from corollary \ref{Cor} \item[(iv)]\textit{R-WARP}: \\ Consider $\{x,y\} \subseteq S,S' \in \mathcal{P}(X)$ and $y \in X$ such that the following is true: $$y \notin \min(S, \tc),\ C(S)=x ,\ \text{and} \ x \notin \min(S', \tc)$$ Consider the case when $y \ \tc \ z$ for some $z \in S$. Then by corollary \ref{Cor}, $y \notin \min(S,R)$. As $C(S)=x$, $xPy$ holds. Now, if $x \ \tc \ w$ for some $w \in S'$, then $x \notin \min(S',R)$. This implies $C(S') \neq y$. Now suppose, $\neg x \ \tc \ w$ for any $w \in S'$. For $C(S')=y$, we need $x \in \min(S',R)$. Suppose that $C(xy)=x$. Using the argument in Proposition \ref{necessity} part $(iii)$, there exists a $z \in S'$, such that there is a strong $(xy)$ reversal due to $z$ as weak reversal implies $xRy$ (lemma \ref{Rev}). By definition $z \succ_R x$ holds, which is a contradiction as for $x \notin \min(S',\tc)$, we need $x \tc w$ for some $w \in S'$. If $C(xy)=y$, then by similar argument, there exists a $w \in S$ such that there is a strong/weak $(yx)$ reversal due to some $w \in S$. If the reversal is weak, then $y \succ_R x$ holds. For $x \notin \min(S',\tc)$, there is a $w'\in S'$ such that $x \ \tc \ w'$ holds, a contradiction. If the reversal is strong, then $yPx$ holds, again a contradiction. Now consider the case when $y$ is not related to any alternative in $S$ with respect to $\succ_R$. If $x \ \tc \ z$ holds for some $z \in S'$, then the case is similar to the case above when $y \ \tc \ z$ and $\neg x \ \tc \ w$ for any $w \in S'$. Hence, consider the case when $y$ is not related to any alternative in $S'$ with respect to $\succ_R$. W.L.O.G, $C(xy)=x$. We then have a $(xy)$ reversal from $\{x,y\}$ to $S'$. As argues above, there exits a $z\in S$ such that reversal is due to $z$. If the reversal is a weak reversal, then $x \ \tc \ y$ holds which contradicts that $x$ is isolated in $S'$. If the reversal is a strong reversal, then $z \ \tc \ x$ holds, which again is a contradiction to $x$ is islolated in $S'$ with respect to $\tc$. \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{proof} Next, we prove the sufficiency part of the proof. Before that we prove some lemmas \begin{lemma} \label{SR1} If $C$ satisfies (A1)-(A4), then a strong $(xy)$ reversal implies $\neg x \ \tc \ y$ \mbox{\rm E}nd{lemma} \begin{proof} Given there is a strong $(xy)$ reversal on $S$ due to some $z$ , by the definition of $\succ_R$ , $z \succ_R x$ holds. If possible, $ x \ \tc \ y$. This implies $z \ \tc \ y$. Note that by R-WARP, $y \in \min(S, \tc)$. We now have a $(zy)$ reversal from $\{y,z\}$ to $S \cup \{z\}$. By WCC$^*$, there exists a $x_1 \in S$ such that the reversal is due to $x_1$. If it is a weak $(zy)$ reversal due to $x_1$, then $y \succ_R x_1$ holds, which is a contradiction. Therefore, it must be that we have strong $(zy)$ reversal due to $x_1$. By definition, $x_1 \succ_R z$ holds. This further implies $x_1 \ \tc \ y$. By NBC$^*$, $C(x_1y)=y$. Now, we have a $(x_1y)$ reversal due to some $x_2 \in S$. By similar argument as above, this must be a strong reversal, implying $x_2 \ \tc \ y$. This leads to $(x_2y)$ reversal due to some $x_3 \in S$. Proceeding inductively, this leads to $x_i \ \tc \ y$ for all $x_i \in (S \cup \{z\}) \setminus \{x\}$ as in each step, $x_{i+1} \neq x_k$, $k \le i$ by NBC$^*$. This violates NC as $x_i \succ_c y$ for all $x_i \in S \cup \{z\}$, a contradiction. \mbox{\rm E}nd{proof} \begin{lemma} \label{SR} If $C$ satisfies (A1)-(A4), then a strong $(xy)$ reversal on set $S$ implies $ \neg x \ \tc \ w$ and $ \neg w \ \tc \ x $ for all $w \in S$ \mbox{\rm E}nd{lemma} \begin{proof} Suppose a strong $(xy)$ reversal is observed on set $S$. By lemma \ref{SR1}, $\neg x \ \tc\ \ y$ and by NBC$^*$, $\neg y \ \tc\ \ x$. Hence, $y \notin \min(\{x,y\}, \tc)$. By R-WARP, $x \in \min(S \cup \{z\}, \tc)$ which implies $\neg x \ \tc \ w$ for all $w \in S$. Therefore $ x \in \min(S, \tc)$. If possible for some $w \in S$, $w \ \tc \ x$ holds. Then by WCC$^*$ and lemma \ref{SR1} there is a weak $(wx)$ reversal due to some $w' \in S$. By definition, $x \succ_R w'$, contradicting $x \in \min(S, \tc)$. \mbox{\rm E}nd{proof} \begin{lemma} \label{exclusivity} If $C$ satisfies (A1)-(A4) and there is a \textbf{weak} $(xy)$ reversal, then there does not exist $ y' \in X$ and a menu $S$ such that there is a: \begin{itemize} \item \textbf{strong} $(xy')$ reversal on $S \ni y$ ; or \item \textbf{strong} $(yy')$ reversal on $S \ni x$ \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{lemma} \begin{proof} Let there be a weak $(xy)$ reversal due to some $z$. By definition, $x \succ_R y$ holds. In both the cases, this is a contradicton as this violates lemma \ref{SR}. \mbox{\rm E}nd{proof} Exclusivity mentioned in section \ref{axiomsdiscussion} is a direct implication of the lemma above. We can see that if a choice function that is CBR representable has no strong reversal, then $C$ has no binary cycle in pairwise relation (satisfies \mbox{\rm E}mph{No Binary cycle} condition). Next we show that our axioms imply \textit{Small Menu Property (SMP)} \begin{lemma} \label{Red} If $C$ satisfies (A1)-(A4), then it satisfies ``small menu property'' \mbox{\rm E}nd{lemma} \begin{proof} Suppose for some $x,y \in X$ we have a weak $(xy)$ reversal due to some $z$. By definition $x \ \succ_R \ y, y \ \succ_R \ z $ and hence $x \ \tc \ z$. By NBC$^*$, $x \succ_c y \succ_c z $ and , $C(xyz) \neq z$ due to NC. Now, by R-WARP, $C(xyz) \neq x$. Therefore we have a weak $(xy)$ reversal from pair to triple. \\ Suppose for some $x,y \in X$ we have a strong $(xy)$ reversal due to some $z$ on some set $S$. By definition $z \succ_R x$. We know that $\neg y \ \tc \ z $ (as $y \ \tc \ z$ would imply $ y \ \tc \ x$, violating NBC$^*$). If $C(xyz)= x$ , then we have a $(zx)$ reversal due to $y$. It cannot be a strong reversal as it would imply $y \ \tc \ z$. Therefore, it must be a weak $(zx)$ reversal due to $y$ which implies $x \ \tc \ y$, a contradiction to lemma \ref{SR1}. If $C(xyz) = y$, then by NC, $C(yz)=y$ and we are done. Now, let us consider $C(xyz) = z$. Since $C(S \cup \{z\})=y$ and $z \notin \min(S \cup \{z\}, \tc)$, thus by R-WARP, $ y \in \min(\{x,y,z\}, \tc ) $. This implies $z \ \tc \ y$ as $\neg x \ \tc \ y$ by lemma \ref{SR1}. By NBC$^*$, $C(yz)=z$. Since $C(S \cup \{z\})=y$, by WCC$^*$ there exists a $w \in S$ such that the $(zy)$ reversal is due to $w$. This reversal is a weak reversal by lemma \ref{corr} and \ref{SR} and hence $y \ \tc \ w$. This implies $z \ \tc \ w$ (and $C(zw)=z$ by NBC$^*$). Now, let us show that $C(xyw)=x$. By R-WARP, $C(xyw) \neq y$ since $x \notin \min(\{x,y,w\}, \tc)$. If $C(xyw)=w$ then by lemma \ref{corr} and \ref{SR1}, we have a weak $(yw)$ reversal due to $x$ implying $y \ \tc \ x$ , a contradiction. Now, we show $C(xyzw)= y$. By R-WARP, $C(xyzw) \neq z$ as $y \notin \min(xyzw, \tc)$. Also, if $C(xyzw)=x$, then we have a weak $(zx)$ reversal due to $w$. By definition, $x \succ_R w$, violating lemma \ref{SR}. If $C(xyzw)= w$, then by WCC$^*$, the $(yw)$ reversal is either due to $x$ or $z$. It is not due to $x$ by lemma \ref{SR}. If the reversal is due to $z$ then this is a weak reversal, contradicting $C(zy) = z$. \mbox{\rm E}nd{proof} \bigbreak We are now equipped to prove the sufficiency of the axioms. \begin{proposition}\label{prop3} If $C$ satisfies A(1)-A(4), then it is CBR representable \mbox{\rm E}nd{proposition} \begin{proof} First, we define a partial order $R^c$ and a complete rationale $P^c$ on the choice function such that $$ C(S)= \max(S \setminus \min(S,R^c), P^c) $$ Define $R^c \mbox{\rm E}quiv \tc $. To define $P^c$, we first define $P_1$ as \begin{center} $xP_1 y$ if and only if there exists a $S$ such that $C(S)=x$ and $y \notin \min(S, \tc)$ \mbox{\rm E}nd{center} Note that $ \succ_R$ is acyclic by NBC$^*$. Hence, $R^c$ is asymmetric and transitive. $P_1$ is asymmetric by R-WARP. Let $P^c$ be an asymmetric and complete rationale such that $$P^c \mbox{\rm E}quiv P_1 \cup P_2$$ where $P_2=R^c \setminus P_1 \cup P_1^{-1}$ Let us now prove that $P^c$ is complete and asymmetric.\\ \textbf{Completeness:} Let us assume that $C(xy)=x$. If $x$ and $y$ are not related with respect to $R^c$, this implies $xP_1y$ since $y \notin \min(xy, R^c)$. Now, if $(xy) \notin P_1 \cup P_1^{-1}$, then it is related with respect to $R^c$. Therefore, $x$ and $y$ are related with respect to $P_2$ \begin{comment} Assume without loss of generality $C(xy)=x$ and $\neg xP^c y$ and $\neg yP^c x$. It can be argued that if there is a $(xy)$ reversal, then either $xP_1y$ or $yP_1x$ holds. This is because WCC$^*$ and R-WARP ensures that any reversal is of the form strong reversal or weak reversal due to some alternative. A weak $(xy)$ reversal on set $S$ due to $z$ implies $x\succ_R y, y \succ_R z$. Thus, in set $S \cup \{z\}$, $C(S \cup \{z\})=y$ and $x \notin \min(S \cup \{z\}, \tc)$, implying $yP_1 x$. Similarly, if there is a strong $(xy)$ reversal on set $S$ due to $z$, then by lemma \ref{SR1}, $x$ and $y$ are not related with respect to $ \tc$ in $\{x,y\}$ . Given $C(xy)=x, xP_1 y$ holds If $x \ \tc \ y$, then for no $S$, where $C(S)=y$ and $x \in S$ (as that would imply $yP_1x$). This implies $y$ is never chosen in presence of $x$ and hence $xP^c y$ holds. Note that $yR^c x$ cannot be true as that would imply $C(xy)=y$ by \textit{NBC$^*$}. This implies $y \in \lambda(xy)$, implying $xP^c y$.\\ \mbox{\rm E}nd{comment} \textbf{Asymmetry:} If possible, say for some $x,y$, both $xP^c y$ and $yP^c x$ is true. Either $x{P_1} y$ or $y{P_1} x$ is true otherwise both will be derived through $P_2$ which contradicts the asymmetry of $R^c$. W.L.O.G. suppose $x{P_1} y$ holds. Then $y{P_1}x$ cannot hold by R-WARP. It is easy to see that $y P_2 x$ also does not hold. Now, we show that the above defined $(R^c, P^c)$ rationalize the choices. Consider a set $S$ and $C(S)=x$. Suppose that $x \in \min(S,R^c)$. Then there exists a $y \in S \setminus \{x\}$ such that $yR^cx$ holds and $ \neg xR^cy'$ for all $ y' \in S \setminus \{x\}$. Note that by WCC$^*$, there exists a sequence of sets ordered by set inclusion from $\{x,y\}$ to $S$ with choices belonging to $\{x,y\}$. R-WARP ensures that there exists a $z \in S$ such that there is a $(yx)$ reversal due to $z$. Lemma \ref{corr} and \ref{SR} implies that it is a weak reversal. By Lemma \ref{Red}, $y \succ_c x \succ_c z$ and $C(xyz)=x$. Thus, we must have $x R^c z$, leading to a contradiction. \\ Now we show that $x = \max(S \setminus \min(S,R^c),P^c)$. Consider any $y$ such that $y \notin \min(S, R^c)$ and $ y P^c x$ holds. We know that by construction of $P^c$, we have $x {P_1}y \ (\implies xP^cy)$ which contradicts the asymmetry of $P^c$. Therefore $xP^cy$ for all $y \notin \min(S,R^c)$. \mbox{\rm E}nd{proof} Proposition \ref{necessity} and Proposition \ref{prop3} complete the proof of Theorem \ref{thm1} \subsection{Proof of Theorem 2} \begin{proposition} \label{prop4} If $C$ is Transitive CBR-representable, then $C$ satisfies A(1), A(2),(A3),(A4') \mbox{\rm E}nd{proposition} \begin{proof} The necessity of \textit{WCC$^*$}, NC and \textit{NBC$^*$} is same as shown in Appendix A.1. Let us now prove the necessity of R-SARP. Suppose for some $S_{1}, \ldots, S_{n} \in \mathcal{P}(X)$ and distinct $x_{1}, \ldots, x_{n} \in X$, we have: $$ x_{i+1} \notin \min(S_i, \tc), \ C(S_{i})=x_{i} \text{ \ for \ } i=1, \ldots, n-1, \text { and } x_{1} \notin \min(S_n, \tc\textit{}) $$ Using the argument in proving the necessity of R-WARP, we must have $x_iPx_{i+1}$ for all $i$. Since $P$ is transitive, we must have $x_1Px_n$. If $C(S_n) =x_n$, by a similar argument, it would imply $x_nPx_1$ a contradiction (since $P$ is asymmetric) \mbox{\rm E}nd{proof} Note that all the lemmas in the section above (Lemmas \ref{SR1} - \ref{Red}) hold true even when R-WARP is replaced by R-SARP. With this we prove the following result \begin{proposition} \label{prop5} If $C$ satisfies A(1), A(2),(A3),(A4'), then it is Transitive-CBR representable \mbox{\rm E}nd{proposition} \begin{proof} Define $R^c \mbox{\rm E}quiv \tc$ and $P^c$ as \begin{equation} \label{PcTCBR} P^c \mbox{\rm E}quiv \bar{P_1} \cup \hat{P_2} \mbox{\rm E}nd{equation} where $xP_1 y$ if and only if there exists a $S$ such that $C(S)=x$ and $y \notin \min(S, \tc)$ and $\bar{P_1} = tc(P_1)$. Also, $\hat{P_2} \mbox{\rm E}quiv R^c \setminus(\bar{P_1} \cup \bar{P_1}^{-1})$ $R^c$ is asymmetric and transitive as discussed above. Next we show that $P^c$ is a linear order. \textbf{Completeness:} Let us assume that $C(xy)=x$. If $x$ and $y$ are not related with respect to $R^c$, this implies $xP_1y$. Now, if $(xy) \notin \bar{P_1} \cup \bar{P_1}^{-1}$, then it is related with respect to $R^c$. Therefore, $(xy)$ is related with respect to $\hat{P_2}$ \textbf{Asymmetry:} If possible, say for some $x,y$, both $xP^c y$ and $yP^c x$ is true. Either $x\bar{P_1} y$ or $y\bar{P_1} x$ is true (else both will be derived through $R^c$ which is a contradiction). W.L.O.G suppose $x\bar{P_1} y$ holds. Then $y\bar{P_1}x$ cannot hold by R-SARP. Thus, it must be that $\neg y P^c x$ as it would mean $yR^cx$ and $x,y$ are not related with respect to $\bar{P_1}$. \textbf{Transitivity:} Assume for contradiction that $P^c$ is cyclic. Since $P^c$ is complete, we only need to consider a 3-cycle i.e. for some $x,y $ and $z$, $xP^cyP^czP^cx$. It is easy to see that at least one of the pair must be related in $\bar{P_1}$, thus the following cases are possible: \begin{itemize} \item $ x \bar{P_1}y$ and $y \bar{P_1}z$: This would imply $x \bar{P_1}z$ and $\neg z\bar{P_1}x$ therefore $\neg zP^c x$. \item $ x \bar{P_1}y$ and $y \hat{P_2} z$: Since $zP^cx$ is true, following two cases are true: \begin{itemize} \item $z \bar{P_1} x$: This would imply $z \bar{P_1}y$ and $\neg y\bar{P_1}z$ therefore $zP^c y$, a contradiction to the asymmetry of $P_c$. \item $z \hat{P_2}x$ : By \textit{NBC$^*$}, we know that $y R^c x$. By NC and NBC$^*$, $C(xyz) \in \{y,z\}$. As $ y, z \notin \min(xyz, R^c)$, we get $ (yz) \in \bar{P_1} \cup \bar{P_1}^{-1}$, a contradiction. \mbox{\rm E}nd{itemize} \mbox{\rm E}nd{itemize} \begin{comment} Now, let the smallest possible cycle be of length $k>3$ (see figure \ref{fig1}). \begin{figure} \centering \includegraphics[scale=0.2]{Cycle1.png} \caption{Cycle in $P^c$} \label{fig1} \mbox{\rm E}nd{figure} Note that for any two non-adjacent alternatives of the cycle , $x_i,x_j$ ($j \neq i+1$), we must have $(x_i,x_j), (x_j, x_i) \notin P^c$ (otherwise we have a smaller cycle). Consider the triple $x_1P^cx_{2}P^cx_{3}$. Note that we cannot have $x_1\bar{P_1}x_2\bar{P_1}x_3$ as we get $x_1\bar{P_1}x_3$ by the transitivity of $\bar{P_1}$. Thus, following cases are possible: \begin{itemize} \item $x_1\hat{P_2}x_2\hat{P_2}x_3$: In this case $x_1$ and $x_3$ are not related in $P^c$(else, we have a smaller cyle). Therefore, either $x_1P_2x_3$ or $x_3P_2x_1$. Since $C(x_1x_2x_3)=x_1$, we must have $x_1 P_2x_3$ \item $x_1\bar{P_1}x_2\hat{P_2}x_3$: Note that $C(x_1x_2x_3)\neq x_3$. If $x_3R^cx_1$, then if $C(x_1x_2x_3)= x_2$ then $x_2P_1x_3$ holds, a contradiction. If $C(x_1x_2x_3)= x_1$, we have a $(x_3x_1)$ weak reversal due to $x_2$ implying $x_3R^cx_2$, a contradiction. Therefore $x_1R^cx_3$ and $x_1P_2x_3$. \mbox{\rm E}nd{itemize} Therefore, for any triple $x_i,x_{i+1}$ and $x_{i+2}$, we must have $x_iP_2x_{i+2}$ (and $x_iR^cx_{i+2}$). Now, consider the triple $x_1,x_{3}$ and $x_{4}$. By the above argument $x_1P_2x_3$ (and $x_1R^cx_3$). Now, we show that we must have $x_1P_2x_4$ (and $x_1R^cx_4$). There are two possible cases: (i) $x_3\bar{P_1}x_4$ and (ii) $x_3 \hat{P_2}x_4$. In case (i) $C(x_1x_3x_4) \neq x_3$. If $x_4R^cx_1$ (and $x_4P_2x_1$), then $C(x_1x_3x_4) = x_4$. Given $x_1 \notin \min(\{x_1,x_3,x_4\}, \succ_R)$, we must have $x_4P_1x_1$, which is a contradiction. Therefore, we must have $x_1R^cx_4$ and $x_1P_2x_4$. In case (ii) it is straightforward to see that $x_4 P_2 x_1$ cannot be true, thus $x_1P_2x_4$ (and $x_1R^cx_4$). Using the same argument we can show that $x_1P_2x_i$ for all $i \neq 2$. If we start from any triple $x_i,x_{i+1}$ and $x_{i+2}$ with $i >2$, using the same argument we get $x_i P_2x_1$ (and $x_iR^cx_1)$, a contradiction. Therefore $P^c$ is acyclic.\\ Let $\bar{P^c}= tc(P^c)$, where $tc$ is the transitive closure. Since $\bar{P^c}$ is a partial order, by Szpilrajn theorem, a completion of $\bar{P^c}$ which is a linear order. We denote it as $\hat{P^c}$. \mbox{\rm E}nd{comment} Now, we show that the above defined $(R^c,P^c)$ rationalize the $C$. Consider a set $S$ and $C(S)=x$. Suppose $x \in \min(S,R^c)$. Then there exists $y \in S \setminus \{x\}$ such that $yR^cx$ holds and $ \neg xR^cy'$ for all $ y' \in S \setminus \{x\}$. Note that by WCC$^*$, there exists a sequence of sets ordered in set inclusion from $\{x,y\}$ to $S$ with choices belonging to $\{x,y\}$. R-SARP ensures that there exists a $z \in S$ that causes the $(yx)$ reversal. As argued above, Lemma \ref{corr} and \ref{SR} imply that the reversal is weak. By Lemma \ref{Red}, $y \succ_c x \succ_c z$ and $C(xyz)=x$. Thus, $x \ \tc \ z$, leading to a contradiction. Now we show that $x = \max(S \setminus \min(S,R^c),P^c)$. Consider any such $y$ that $y \notin \min(S, \tc)$ and $ y P^c x$ holds. We know that by construction of $P^c$, we have $x P_1 y (\implies x P^c y)$ holds which contradicts the asymmetry of $P^c$. Therefore $x P^c y$ for all $y \notin \min(S, \tc)$. \mbox{\rm E}nd{proof} Proposition \ref{prop4} and \ref{prop5} prove the sufficiency. \subsection{Proofs of results in Section \ref{axiomsdiscussion}} \label{proofsec4.2} \subsubsection{Proof of Lemma \ref{rwwarp}} Suppose for some $S,S'$ and $S''$ we have $C(S) = C\{x,y\} = x$ and $C(S') = y$. Note that we have a $(xy)$ reversal. By WCC$^*$ there exists a $z \in S$ that causes this reversal. If $C(xz)=x$ then it is a weak $(xy)$ reversal. By definition, $x \ \tc \ y $ and $ y \ \tc \ z$, implying $C(S'') \neq y$ by R-WARP. If $C(xz)=z$, this implies a strong $(xy)$ reversal. Also since $z \ \tc \ x$ there is a weak $(zx)$ reversal due to some $z' \in S$ (by WCC$^*$). Therefore $x \notin \min(S', \tc)$, implying $C(S') \neq y$ by R-WARP \subsection{Proofs of results in Section \ref{sec6}} \subsubsection{Proof of Lemma \ref{SMP}} Follows from proposition \ref{prop3} and lemma \ref{Red}. \subsubsection{Proof of Lemma \ref{Idencor}} Using the lemma \ref{SMP}, we can say that if we have either a weak or a strong reversal, then it will be reflected in the small menu reversals i.e. sets such that $|S| \le 3$. Now consider two choice functions $C$ and $\bar{C}$ with same choices in small menus, but different choice in at least one set $S$ where $|S| > 3$. W.L.O.G., let the choice in that set be $C(S)=x$ and $\bar{C}(S)=y$ where $x \neq y$. Let $C(xy)=x$. Hence, we have a $(xy)$ reversal in the choice function $\bar{C}$. We have argued before that any reversal in a CBR representable choice function can either be a weak or a strong reversal. By lemma \ref{Red}, this reversal will be reflected in the small menus (hence relations required will be common to both the representations). If the reversal is weak, then it will be reflected in small menus giving $yR^*z$ and $yP^*x$. Hence, $x$ cannot be chosen in a set containing $y$ and $z$ which is contradiction as $C(S)=x$. If the reversal is strong, then if we two possible cases: (i) $x \succ_c y \succ_c z \succ_c x$ and $C(xyz)=y$. Here the $(xy)$ reversal is seen in a small menu giving $xP^*y$ and $\neg xR^*y$, $yP^*z$, $zR^*x$ . Note that $C(xyz)=y$ and $C(S)=x$ where $S \supset \{xyz\}$. As $C(xz)=z$, we have a $(zx)$ reversal due to some $w \in S$. Knowing that $zR^*x$ is true, this is a weak reversal, which is seen in small menus implying $xR^*w$ holds. This means $x \notin \min(S, \bar{R})$ contradicting $\bar{C}(S)=y$. (ii) $z \succ_c x \succ_c y$, $z \succ_c y \succ_c w$, $C(xyz)=z$ and $C(yzw)=y$ for some $w \in X$. Note that again we have a weak $(zx)$ reversal due to some $k \in S$, leading to a contradiction as above. \subsubsection{Proof of Theorem \ref{Iden1}} \begin{itemize} \item[(i)] Note that lemma \ref{Rev} and proposition \ref{prop3} imply that $R^c \subset R^*$ . Also note that proof of Proposition \ref{prop3} rationalizes choice function using $R^c$ as the first rationale. This proves that $R^* \subset R^c$. Hence, $R^* = R^c$. \item[(ii)] As the choice procedure chooses the maximal alternative from the set of alternatives which do not belong to $\min(S,R)$, $\hat{P}_{R^c}$ is a subset of any $P$ such that $(R^c,P)$ is a representation. For all the pairs which are not related with respect to $R^c$, both the alternatives are shortlisted. Since, $x \succ_c y$, $xPy$ is true, $P^c \subseteq P^*$ \mbox{\rm E}nd{itemize} \subsubsection{Proof of Theorem \ref{Iden2}} \begin{itemize} \item[(i)] We first show that $\hat{Q} \cap R =\phi$ for any $R$ such that $(R,P)$ represents choice function. Consider the first case with a strong $(xw)$ reversal on $S \ni y$ due to say, $z$. By lemma \ref{Rev} and proposition \ref{prop3}, we know that $zR^*x$, $xP^*w$ and $wP^*z$ holds. For $w$ to be chosen in $S \cup \{z\}$, it must be that $x in \min(S\cup \{z\},R)$ therefore we cannot have $xRy$. Similarly, in the second case, we have $(yw)$ strong reversal on $S \ni x$ due to say, $z$. This implies that $zR^*y$, $yP^*w$ and $wP^*z$ must hold (lemma \ref{Rev} and proposition \ref{prop3}). For $w$ to be chosen in $S \cup \{z\}$, it must be that $y \in \min(S \cup \{z\})$. Now, if $xRy$ holds, then there must exist a $a \in S$ such that $ yRa$ (implying $y \notin \min(S \cup \{z\})$) holds, which contradicts the assumption that $C(S \cup \{z\}) =w$ . In the final case, where we have weak $(wx)$ reversal and $w$ is chosen in the presence of $x,y$. We know that $yP^*w$ holds. For $w$ to be chosen in presence in the presence of $y$, $y$ should not be shortlisted, which is not possible with $xRy$ by the same argument as in the previous case. \\ We have proved above that $R^c$ is the smallest possible $R$. $\bar{R}$ must be a subset of pairwise relation $\succ_c \setminus \hat{Q}$. As $R$ is a transitive relation, $\bar{R}$ must be the largest transitive relation which is a subset of $\succ_c \setminus \hat{Q}$ \item[(ii)] The argument is similar to that of $P^*$ in proof of theorem above, replacing $R^c$ with $R$. \mbox{\rm E}nd{itemize} \subsection{Proof of results in Section \ref{sec7}} \label{proofsec7} Before we begin the proof, we prove some intermediate results \begin{defn} Negative Expansion (NE): For all $S,S' \supset \{x,y\}$, \begin{center} $C(S)= C(S') = x $ implies $C(S \cup S') \neq y$ \mbox{\rm E}nd{center} \mbox{\rm E}nd{defn} \begin{lemma} \label{NE} If $C$ is CBR representable, then $C$ satisfies Negative Expansion \mbox{\rm E}nd{lemma} \begin{proof} If possible, suppose choice function $C$ violates NE. Then there exists $S, S' \supset \{x,y\}$ such that $C(S)=C(S')=x$ and $C(S \cup S')=y$. If $C(xy)=x$, then we have an $(xy)$ reversal. By Proposition \ref{necessity}, $C$ satisfies A(1)-A(4) implying each reversal is either weak or strong. Let $(R,P)$ be the representation of the choice function. A weak $(xy)$ reversal due to $z \in S$ and $z' \in S'$ implies $yPx$, $yRz$ and $yRz'$ (by lemma \ref{Rev}), therefore it must be that $y \notin \min(S,R)$. This implies $C(S) \neq x$. Also, if there is a strong $(xy)$ reversal due to some $z \in S \cup S'$ , then lemma \ref{Rev} implies $zRx$. Given $C(S)=x$, we know that $x \notin \min(S, R)$ and there is a $w \in S$ such that $xRw$ holds. As $x \notin \min(S \cup S',R)$ and by lemma \ref{Rev}, we know that $xPy$ holds. This contradicts $C(S \cup S')= y$. \\ Now, if $C(xy)=y$, we have an $(yx)$ double reversal. A weak $(yx)$ reversal (due to some $z \in S$ and $z' \in S'$) implies $xRz$ and $xRz'$ (by lemma \ref{Rev}). Given $xPy$ and $x \notin \min(S \cup S', R)$, we cannot have $C(S \cup S')=y$. If the $(yx)$ reversal is strong, then by lemma \ref{Rev}, $yPx$ must hold. Since $C(S)=C(S')=x$, we must have $y \in \min(S,R)$ and $y \in \min(S',R)$ (thus $y \in \min(S \cup S')$) contradicting $C(S \cup S')=y$ \mbox{\rm E}nd{proof} \bigbreak \begin{lemma}\label{dbl} If $C$ is CBR representable, then a $(xy)$ double reversal due to $z_1,z_2$ is equivalent to a strong $(xy)$ reversal due to $z_1$ and a weak $(z_1x)$ reversal due to $z_2$ \mbox{\rm E}nd{lemma} \begin{proof} Let $C$ be a CBR representable choice function. A $(xy)$ double reversal due to $z_1$, $z_2$ implies $x \succ_c y$ and $\mbox{\rm E}xists \ S,S'$, $\{x,y\} \subset S' \subset S$ such that $C(S')=x,C(S' \cup z_1)=y \ C(S)=y, C(S \cup z_2)=x$ and for all $T,T', T''$, $\{x,y\} \subset T' \subset T \subset T''$, if $C(T) = C\{x,y\} = x$ and $C(T') = y$, then $C(T'') \neq y$. As $C$ satisfies WCC$^*$ and Exclusivity, each reversal is either weak or strong. If first $(xy)$ reversal is weak, then lemma \ref{Rev} implies $xRy$, $yRz_1$ and $yPx$. As $x,y \notin \min(T,R)$ for all $T \supset S$, there can be no double reversal. Thus, first reversal is strong, implying $z_1Rx$, $xPy$ and $yPz_1$. For $x$ to be chosen again in $S \cup \{z_2\} $, it must be that $xRz_2$ and $xPz_1$ hold. This implies $z_1 \succ_c x \succ_c z_2$ and $C(xz_1 z_2)=x$ and hence a weak $(z_1 x)$ reversal due to $z_2$ \mbox{\rm E}nd{proof} \subsubsection{Proof of Proposition \ref{RSM}} Let us prove the if part. Consider a CBR representable choice function $C$ which is also RSM representable. If possible, for some $x,y$ we have a weak $(xy)$ reversal due to some $z$. By Lemma \ref{Rev}, we have $x \succ_c y \succ_c z$ and $C(xyz)=y$. However, this violates Expansion as $C(xy)=C(xz)=x$, but $C(xyz)=y$. As RSM satisfies Expansion, this is a contradiction. Now consider $C$ which is CBR representable, with no weak reversals. By proposition \ref{necessity}, if there is any reversal, it has to be strong. If possible, let $C$ violate Expansion, i.e. there exists $S, S'$ such that $C(S)=C(S')=x$, but $C(S \cup S') =y \neq x$. If $\{x,y\} \subset S \cap S'$, this violates NE leading to a contradiction (by lemma \ref{NE}). WLOG, let $y \in S \setminus S'$. If $C(xy)=y$, we have double reversal which is a contradiction by lemma \ref{dbl}. Thus, $C(xy)=x$ implying a $(xy)$ strong reversal due to some $z \in S'$. By lemma \ref{Rev}, $xPy$ and $zRx$ hold and since $C(S')=x$, there exists a $w \in S'$ such that $xRw$ is true. Note that this implies $x \notin \min(S \cup S',R)$ which implies $C(S \cup S') \neq y$. Now, if $C$ violates WWARP, given it satisfies R-WARP, there is a double reversal. But, that is equivalent to a strong and a weak reversal which is a contradiction. Thus, $C$ satisfies Expansion and WWARP, implying RSM representation. \subsubsection{Proof of Proposition \ref{TSM}} Argument is analogous to that of proposition \ref{RSM} \begin{comment} \subsubsection{Proof of Proposition \ref{EPH}} Consider a choice function $C$ that is T-CBR representable. We first show that if $C$ is EPH, then $C$ displays no strong reversal. If possible, say there is a strong $(xy)$ reversal due to some $z$. By lemma \ref{SMP}, either (i) $y \succ_c z \succ_c x \succ_c y$ and $C(xyz)=z$; for some $z \in X$ or (ii) $x \succ_c y \succ_c z$, $x \succ_c z \succ_c w$, $C(xwz)=z$ and $C(xyz)=x$ for some $z,w \in X$. (i) is a contradiction to NBC. (ii) contradicts WARP-EP as $x \in \psi(xyz)$ and $z \in \psi(xyzw)$\footnote{$x \in \psi(S)$ if $C(S')=x$ for some $S' \subset S$ }. Now we show that $C$ satisfies Condition A. Suppose there is a $(xy)$ reversal on set $S$. Now $C(S \cup \{z\})=y$, $x \in \psi(S \cup \{z\})$. By WARP-EP, $C(yw)=w$ for all $w \in S\setminus \{x,y\}$. Therefore, there is a $(wy)$ reversal. NC is true by Theorem \ref{thm1}. By lemma \ref{SMP}, there can be no binary cycle as there is no strong reversal. Suppose for some $S,T \supset \{x,y\} $, $C(S)=x, y \in \psi(S)$ and $C(T)=y,x \in \psi(T)$. W.L.O.G $C(xy)=x$. Thus, there is a weak $(xy)$ reversal. By lemma \ref{Rev}, $y \in \min(S, R)$. This implies there exists a $w \in S$ such that $wRy$ holds and for no $w' \in S$, $yRw'$ holds. As $y \in \psi(S)$, there exists a $S' \subset S$, such that $C(S')=y$. By argument above, $y$ is isolated in $S'$ with respect to $R$. As $x$ Now we show that if a choice function is T-CBR representable, displays no strong reversal and satisfies Condition A, it is EPH. We now construct $(R,P)$ such that $R \text{ and } P$ are linear orders. Let $R=\succ_c$ and $P$ be defined as in \ref{PcTCBR}. $\succ_c$ is complete and asymmetric. If there is a cycle in $\succ_c$, we have strong reversal which is a contradiction. Thus, $R$ is a linear order. $P$ is a linear order by arguments above. Now, we show that the above defined $(R,P)$ rationalize the $C$. Consider a set $S$ and $C(S)=x$. Suppose $x = \min(S,R)$. Then there exists $y \in S \setminus \{x\}$ such that $yRx$ holds and $ \neg xRy'$ for all $ y' \in S \setminus \{x\}$. Note that by WCC$^*$, there exists a sequence of sets ordered in set inclusion from $\{x,y\}$ to $S$ with choices belonging to $\{x,y\}$. R-SARP ensures that there exists a $z \in S$ that causes the $(yx)$ reversal. As $C$ displays no strong reversal, the reversal is weak. By Lemma \ref{Red}, $y \succ_c x \succ_c z$ and $C(xyz)=x$. Thus, $x \ \tc \ z$, leading to a contradiction. Now we show that $x = \max(S \setminus \min(S,R^c),P^c)$. Consider any $y$ such that $y \neq \min(S, R)$ and $ y P x$ holds. We know that by construction of $P^c$, we have $x P_1 y (\implies x P^c y)$ holds which contradicts the asymmetry of $P^c$. Therefore $x P^c y$ for all $y \notin \min(S, \tc)$. \mbox{\rm E}nd{comment} \subsection{Independence of axioms} By means of simple examples, we demonstrate that the characterization is tight. \\ Example 1. The choice function below satisfies NBC$^*$, WCC* and R-WARP but violates NC: $X = \{x,y,z\}$ {\footnotesize \begin{center} \begin{tabular}{|c|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} \\ \hline $\{x,y\}$ & $y$ & $\{x,y,z\}$ & $x$ \\ $\{x,z\}$ & $z$ & &\\ $\{y,z\}$ & $y$ & &\\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } Example 2. The choice function below satisfies NBC$^*$, NC and R-WARP but violates WCC*: $X = \{x,y,z, w\}$ {\footnotesize \begin{center} \begin{tabular}{|c|c|c|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} \\ \hline $\{x,y\}$ & $x$ & $\{x,y,z\}$ & $z$ & $\{x,y,z,w\}$ & y \\ $\{x,z\}$ & $z$ & $\{x,y,w\}$ & $w$ & & \\ $\{x,w\}$ & $w$ & $\{x,z,w\}$ & $w$ & &\\ $\{y,z\}$ & $z$ & $\{y,z,w\}$ & $y$ & & \\ $\{y,w\}$ & $y$ & & & & \\ $\{z,w\}$ & $w$ & & & & \\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } Example 3. The choice function below satisfies NC, WCC* and R-WARP but violates NBC$^*$: $X = \{x,y,z, w\}$ {\footnotesize \begin{center} \begin{tabular}{|c|c|c|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} \\ \hline $\{x,y\}$ & $x$ & $\{x,y,z\}$ & $y$ & $\{x,y,z,w\}$ & $y$ \\ $\{x,z\}$ & $z$ & $\{x,y,w\}$ & $y$ & & \\ $\{x,w\}$ & $x$ & $\{x,z,w\}$ & $x$ & &\\ $\{y,z\}$ & $y$ & $\{y,z,w\}$ & $z$ & & \\ $\{y,w\}$ & $y$ & & & & \\ $\{z,w\}$ & $z$ & & & & \\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } Example 4. The choice function below satisfies NC, WCC* and NBC$^*$ but violates R-WARP: $X = \{x,y,z, w\}$ {\footnotesize \begin{center} \begin{tabular}{|c|c|c|c|c|c|} \hline {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} & {\textbf{S}} & {\textbf{C(S)}} \\ \hline $\{x,y\}$ & $x$ & $\{x,y,z\}$ & $y$ & $\{x,y,z,w\}$ & $x$ \\ $\{x,z\}$ & $x$ & $\{x,y,w\}$ & $x$ & & \\ $\{x,w\}$ & $x$ & $\{x,z,w\}$ & $x$ & &\\ $\{y,z\}$ & $y$ & $\{y,z,w\}$ & $y$ & & \\ $\{y,w\}$ & $y$ & & & & \\ $\{z,w\}$ & $z$ & & & & \\ \hline \mbox{\rm E}nd{tabular} \mbox{\rm E}nd{center} } \noindent \setlength{\bibsep}{0.2 cm} \mbox{\rm E}nd{document}
\begin{document} \title{A Complete Solution to Optimal Control and Stabilization for Mean-field Systems: Part I, Discrete-time Case } \author{Huanshui~Zhang$^{\ast}$ and~Qingyuan~Qi~\IEEEmembership{} \thanks{This work is supported by the National Science Foundation of China under Grants 61120106011,61573221, 61633014.} \thanks{H. Zhang and Q. Qi are with School of Control Science and Engineering, Shandong University, Jinan 250061, P.R. China. H. Zhang is the corresponding author([email protected]).} } \maketitle \begin{abstract} Different from most of the previous works, this paper provides a thorough solution to the fundamental problems of linear-quadratic (LQ) control and stabilization for discrete-time mean-field systems under basic assumptions. Firstly, the sufficient and necessary condition for the solvability of mean-field LQ control problem is firstly presented in analytic expression based on the maximum principle developed in this paper, which is compared with the results obtained in literatures where only operator type solvability conditions were given. The optimal controller is given in terms of a coupled Riccati equation which is derived from the solution to forward and backward stochastic difference equation (FBSDE). Secondly, the sufficient and necessary stabilization conditions are explored. It is shown that, under exactly observability assumption, the mean-field system is stabilizable in mean square sense if and only if a coupled algebraic Riccati equation (ARE) has a unique solution $P$ and $\bar{P}$ satisfying $P>0$ and $P+\bar{P}>0$. Furthermore, under the exactly detectability assumption, which is a weaker assumption than exactly observability, we show that the mean-field system is stabilizable in mean square sense if and only if the coupled ARE has a unique solution $P$ and $\bar{P}$ satisfying $P\geq 0$ and $P+\bar{P}\geq 0$. The key techniques adopted in this paper are the maximum principle and the solution to the FBSDE obtained in this paper. The derived results in this paper forms the basis to solve the mean-field control problem for continuous-time systems \cite{con} and other related problems. \end{abstract} \begin{IEEEkeywords} Mean-field LQ control, maximum principle, Riccati equation, optimal controller, stabilizable controller. \end{IEEEkeywords} \IEEEpeerreviewmaketitle \section{Introduction} In this paper, the mean-field linear quadratic optimal control and stabilization problems are considered for discrete-time case. Different from the classical stochastic control problem, mean-field terms appear in system dynamics and cost function, which combines mean-field theory with stochastic control problems. Mean-field stochastic control problem has been a hot research topic since 1950s. System state is described by a controlled mean-field stochastic differential equation (MF-SDE), which was firstly proposed in \cite{kac}, and the initial study of MF-SDEs was given by reference \cite{Mckean}. Since then, many contributions have been made in studying MF-SDEs and related topics by many researchers. See, for example, \cite{dawson}-\cite{gartner} and the references cited therein. The recent development for mean-field control problems can be found in \cite{buck1}, \cite{buck2}, \cite{huangjh}, \cite{yong}, \cite{ni1}, \cite{ni2} and references therein. Reference \cite{yong} dealt with the continuous-time finite horizon mean-field LQ control problem, a sufficient and necessary solvability condition of the problem was presented in terms of operator criteria. By using decoupling technique, the optimal controller was designed via two Riccati equations. Furthermore, the continuous-time mean-field LQ control and stabilization problem for infinite horizon was investigated in \cite{huangjh}, the equivalence of several notions of stability for mean-field system was established. It was shown that the optimal mean-field LQ controller for infinite horizon case can be presented via AREs. For discrete-time mean-field LQ control problem, \cite{ni1} and \cite{ni2} studied the finite horizon case and infinite horizon case respectively. In \cite{ni1}, a necessary and sufficient solvability condition for finite horizon discrete-time mean-field LQ control problem was presented in operator type. Furthermore, under stronger conditions, the explicit optimal controller was derived using matrix dynamical optimization method, which is in fact a sufficient solvability solution to the discrete-time mean-field LQ control problem \cite{ni1}. Besides, for the infinite time case, the equivalence of $L^{2}$ open-loop stabilizability and $L^{2}$ closed-loop stabilizability was studied. Also the stabilizing condition was investigate in \cite{ni2}. However, it should be highlighted that the LQ control and stabilization problems for mean-field systems remain to be further investigated although major progresses have been obtained in the above works \cite{ni1}, \cite{huangjh}, \cite{ni2}, \cite{yong} and references therein. The basic reasons are twofold: Firstly, the solvability for the LQ control was given in terms of operator type condition \cite{ni1}, which is difficult to be verified in practice; Secondly, the stabilization control problem of the mean-field system has not been essentially solved as only sufficient conditions of stabilization were given in the previous works. In this paper, we aim to provide a complete solution to the problems of optimal LQ control and stabilization for discrete-time mean-field systems. Different from previous works, we will derive the maximum principle (MP) for discrete-time mean-field LQ control problem which is new to the best of our knowledge. Then, by solving the coupled state equation (forward) and the costate equation (backward), the optimal LQ controller is obtained from the equilibrium condition naturally, and accordingly the sufficient and necessary solvability condition is explored in explicit expression. The controller is designed via a coupled Riccati equation which is derived from the solution to the FBSDE, and posses the similarity with the case of standard LQ control. Finally, with convergence analysis on the coupled Riccati equation, the infinite horizon LQ controller and the stabilization condition (sufficient and necessary) is explored by defining the Lyapunov function with the optimal cost function. Two stabilization results are obtained under two different assumptions. One is under the standard assumption of exactly observability, it is shown that the mean-field system is stabilizable in mean square sense if and only if a coupled ARE has a unique solution $P$ and $\bar{P}$ satisfying $P>0$ and $P+\bar{P}>0$. The other one is under a weaker assumption of exactly detectability, it is shown that the mean-field system is stabilizable in mean square sense if and only if the coupled ARE admits a unique solution $P$ and $\bar{P}$ satisfying $P\geq 0$ and $P+\bar{P}\geq 0$. It should be pointed out that the presented results are parallel to the solution of the standard stochastic LQ with similar results such as controller design and stabilization conditions under the same assumptions on system and weighting matrices. In particular, the weighting matrices $R_{k}$ and $R_{k}+\bar{R}_{k}$ are only required to be positive semi-definite for optimal controller designed in this paper. It is more standard than the previous works \cite{ni1}, where the matrices are assumed to be positive definite. A preliminary version of this paper was submitted as in \cite{cdc}, in which the finite horizon optimal control for mean-field system was considered. In this paper, both the finite horizon control problem and infinite horizon optimal control and stabilization problems are investigated. The remainder of this paper is organized as follows. Section II presents the maximum principle and the solution to finite horizon mean-field LQ control. In Section III, the infinite horizon optimal control and stabilization problems are investigated. Numerical examples are given in Section IV to illustrate main results of this paper. Some concluding remarks are given in Section V. Finally, relevant proofs are detailed in Appendices. Throughout this paper, the following notations and definitions are used. \textbf{Notations and definitions}: $I_{n}$ means the unit matrix with rank $n$; Superscript $'$ denotes the transpose of a matrix. Real symmetric matrix $A>0$ (or $\geq 0$) implies that $A$ is strictly positive definite (or positive semi-definite). $\mathcal{R}^{n}$ signifies the $n$-dimensional Euclidean space. $B^{-1}$ is used to indicate the inverse of real matrix $B$. $\{\Omega,\mathcal{F},\mathcal{P},\{\mathcal{F}_{k}\}_{k\geq 0}\}$ represents a complete probability space, with natural filtration $\{\mathcal{F}_{k}\}_{k\geq 0}$ generated by $\{x_{0},w_{0},\cdots,w_{k}\}$ augmented by all the $\mathcal{P}$-null sets. $E[\cdot|\mathcal{F}_{k}]$ means the conditional expectation with respect to $\mathcal{F}_{k}$ and $\mathcal{F}_{-1}$ is understood as $\{\emptyset, \Omega\}$. \begin{definition}\label{def} For random vector $x$, if $E(x'x)=0$, we call it zero random vector, i.e., $x=0$. \end{definition} \section{Finite Horizon Mean-field LQ Control Problem} \subsection{Problem Formulation and Preliminaries } \subsubsection{Problem Formulation} Consider the following discrete-time mean-field system \begin{equation}\label{ps1} \left\{ \begin{array}{ll} x_{k+1}=(A_{k}x_{k}+\bar{A}_{k}Ex_{k}+B_{k}u_{k}+\bar{B}_{k}Eu_{k})\\ ~~~~~~~+(C_{k}x_{k}+\bar{C}_{k}Ex_{k}+D_{k}u_{k}+\bar{D}_{k}Eu_{k})w_{k},\\ x_{0}=\xi,\\ \end{array} \right. \end{equation} where $A_{k},\bar{A}_{k},C_{k},\bar{C}_{k}\in \mathcal{R}^{n\times n}$, and $B_{k},\bar{B}_{k},D_{k},\bar{D}_{k}\in \mathcal{R}^{n\times m}$, all the coefficient matrices are given deterministic. $x_{k}\in\mathcal{R}^{n}$ is the state process and $u_{k}\in \mathcal{R}^{m}$ is the control process. The system noise $\{w_{k}\}_{k=0}^{N}$ is scalar valued random white noise with zero mean and variance $\sigma^{2}$. $E$ is the expectation taken over the noise $\{w_{k}\}_{k=0}^{N}$ and initial state $\xi$. Denote $\mathcal{F}_{k}$ as the natural filtration generated by $\{\xi,w_{0},\cdots,w_{k}\}$ augmented by all the $\mathcal{P}$-null sets. By taking expectations on both sides of \eqref{ps1}, we obtain \begin{equation}\label{ps20} Ex_{k+1}=(A_{k}+\bar{A}_{k})Ex_{k}+(B_{k}+\bar{B}_{k})Eu_{k}. \end{equation} The cost function associated with system equation \eqref{ps1} is given by: \begin{align}\label{ps2} J_{N}&=\sum_{k=0}^{N}E\Big[x_{k}'Q_{k}x_{k}+(Ex_{k})'\bar{Q}_{k}Ex_{k}\notag\\ &~~+u_{k}'R_{k}u_{k}+(Eu_{k})'\bar{R}_{k}Eu_{k}\Big]\notag\\ &~~+E(x_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}'P_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}x_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}) \hspace{-1mm}+\hspace{-1mm}(Ex_{N\hspace{-0.5mm}+\hspace{-0.5mm}1})'\bar{P}_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}Ex_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}, \end{align} where $Q_{k},\bar{Q}_{k},R_{k},\bar{R}_{k}$, $P_{N+1},\bar{P}_{N+1}$ are deterministic symmetric matrices with compatible dimensions. The finite horizon mean-field LQ optimal control problem is stated as follows: \begin{problem}\label{prob1} For system \eqref{ps1} associated with cost function \eqref{ps2}, find $\mathcal{F}_{k-1}$-measurable controller $u_{k}$ such that \eqref{ps2} is minimized. \end{problem} To guarantee the solvability of \emph{Problem \ref{prob1}}, the following standard assumption is made as follows. \begin{assumption}\label{ass1} The weighting matrices in \eqref{ps2} satisfy $Q_{k}\geq 0$, $Q_{k}+\bar{Q}_{k}\geq 0$, $R_{k}\geq 0$, $R_{k}+\bar{R}_{k}\geq 0$ for $0\leq k \leq N$ and $P_{N+1}\geq 0$, $P_{N+1}+\bar{P}_{N+1}\geq 0$. \end{assumption} \subsubsection{Preliminaries} In order to solve the above problem, a basic result is firstly presented as below. \begin{lemma}\label{lemma01} For any random vector $x\not=0$, i.e., $E(x'x)\neq 0$ as defined in Definition \ref{def}, $E(x'Mx)\geq0$, if and only if~$M\geq 0$, where $M$ is a real symmetric matrix. \end{lemma} \begin{proof} The proof is straightforward and is omitted here. \end{proof} \begin{remark}\label{rem1} From Lemma \ref{lemma01}, immediately we have 1) For any $x$ satisfying $x=Ex\neq 0$, i.e., $x$ is deterministic, $x'Mx\geq 0$ if and only if $M\geq 0$. 2) For any random vector $x$ satisfying $Ex=0$ and $x\neq 0$, $E(x'Mx)\geq 0$ if and only if $M\geq 0$. \end{remark} \begin{remark}\label{rem2} Note that Lemma \ref{lemma01} and Remark \ref{rem1} also hold if ``$\geq$" in the conclusion is replaced by ``$\leq$", ``$<$", ``$>$" or ``$=$". \end{remark} \subsection{Maximum Principle} In this subsection, we will present a general result for the maximum principle of general mean-field stochastic control problem which is the base to solve the problems studied in this paper. Consider the general discrete-time mean-field stochastic systems \begin{equation}\label{ps3} x_{k+1}=f^{k}(x_{k},u_{k},Ex_{k},Eu_{k},w_{k}), \end{equation} where $x_{k}$ and $u_{k}$ are the system state and control input, respectively. $Ex_{k}$, $Eu_{k}$ are expectation of $x_{k}$ and $u_{k}$. Scalar-valued $w_{k}$ is the random white noise with zero mean and variance $\sigma^{2}$. $f^{k}(x_{k},u_{k},Ex_{k},Eu_{k},w_{k})$, in general, is a nonlinear function. The corresponding scalar performance index is given in the general form \begin{equation}\label{ps04} J_{N}\hspace{-1mm}=\hspace{-1mm}E\Big\{\phi(x_{N+1},Ex_{N+1})\hspace{-1mm}+\hspace{-1mm}\sum_{k=0}^{N}L^{k}(x_{k},u_{k},Ex_{k},Eu_{k})\Big\}, \end{equation} where $\phi(x_{N+1},Ex_{N+1})$ is a function of the final time $N+1$, $x_{N+1}$ and $Ex_{N+1}$. $L^{k}(x_{k},u_{k},Ex_{k},Eu_{k})$ is a function of $x_{k},Ex_{k},u_{k},Eu_{k}$ at each time $k$. From system \eqref{ps3}, we have that \begin{align}\label{mp0002} Ex_{k+1}&=E[f^{k}(x_{k},u_{k},Ex_{k},Eu_{k},w_{k})]\notag\\ &=g^{k}(x_{k},u_{k},Ex_{k},Eu_{k}), \end{align} where $g^{k}(x_{k},u_{k},Ex_{k},Eu_{k})$ is deterministic function. The general maximum principle (necessary condition) to minimize \eqref{ps04} is given in the following theorem. \begin{theorem}\label{maximum} The necessary conditions for the minimizing $J_{N}$ is given as, \begin{align} 0\hspace{-1mm}=\hspace{-1mm}E\hspace{-1mm}\left\{\hspace{-1mm} (L^k_{u_k})'\hspace{-1mm}+\hspace{-1mm}E(L_{Eu_{k}}^{k})'\hspace{-1mm}+\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{k}}^{k} \hspace{-2mm}\\ g_{u_{k}}^{k} \hspace{-2mm} \\ \end{array} \hspace{-2mm}\right]'\hspace{-1mm}\lambda_{k}\hspace{-1mm}+\hspace{-1mm}E\left\{\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{k}}^{k} \hspace{-1mm}\\ g_{Eu_{k}}^{k} \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right]'\hspace{-1mm}\lambda_{k}\right\}\hspace{-1mm}\Bigg| {\mathcal F}_{k-1}\hspace{-1mm}\right\},\label{ps43} \end{align} where costate $\lambda_{k}$ obeys \begin{align} &\lambda_{k-1}\hspace{-1mm}=\hspace{-1mm}E\Big\{\left[\hspace{-2mm} \begin{array}{cc} I_{n}\hspace{-1mm}\\ 0 \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right][L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k})]'\hspace{-1mm}+\hspace{-1mm}[\tilde{f}^k_{x_k}]'\lambda_{k}\Big|\mathcal{F}_{k-1}\Big\},\label{ps41} \end{align} with final condition \begin{equation}\label{ps42} \lambda_{N}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} (\phi_{x_{N+1}})'+E(\phi_{Ex_{N+1}})'\hspace{-1mm} \\ 0 \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right],\\ \end{equation} where \[f_{x_{k}}^{k}=\frac{\partial f_{k}}{\partial x_{k}},~~f_{u_{k}}^{k}=\frac{\partial f_{k}}{\partial u_{k}},f_{Ex_{k}}^{k}=\frac{\partial f_{k}}{\partial Ex_{k}}, f_{Eu_{k}}^{k}=\frac{\partial f_{k}}{\partial Eu_{k}},\] \[g_{x_{k}}^{k}=\frac{\partial g_{k}}{\partial x_{k}},~~g_{u_{k}}^{k}=\frac{\partial g_{k}}{\partial u_{k}},g_{Ex_{k}}^{k}=\frac{\partial g_{k}}{\partial Ex_{k}}, g_{Eu_{k}}^{k}=\frac{\partial g_{k}}{\partial Eu_{k}},\] and \begin{align*} &\phi_{Ex_{N+1}}\hspace{-1.1mm}=\hspace{-1.1mm}\frac{\partial \phi(x_{N+1},\hspace{-0.3mm}Ex_{N+1})}{\partial Ex_{N+1}}, \hspace{-0.5mm} \phi_{x_{N+1}}\hspace{-1.1mm}=\hspace{-1.1mm}\frac{\partial \phi(x_{N+1},\hspace{-0.3mm}Ex_{N+1})}{\partial x_{N+1}},\\ &L_{x_{k}}^{k}\hspace{-0.3mm}=\hspace{-0.3mm}\frac{\partial L_{k}}{\partial x_{k}},L_{u_{k}}^{k}\hspace{-0.3mm}=\hspace{-0.3mm}\frac{\partial L_{k}}{\partial u_{k}},L_{Ex_{k}}^{k}\hspace{-0.3mm}=\hspace{-0.3mm}\frac{\partial L_{k}}{\partial Ex_{k}},L_{Eu_{k}}^{k}\hspace{-0.3mm}=\hspace{-0.3mm}\frac{\partial L_{k}}{\partial Eu_{k}},\\ &\tilde{f}^{k}_{x_{k}}=\left[ \begin{array}{cc} f_{x_{k}}^{k} & f_{Ex_{k}}^{k} \\ g_{x_{k}}^{k} & g_{Ex_{k}}^{k}\\ \end{array} \right],~k=0,\cdots,N. \end{align*} \end{theorem} \begin{proof} See Appendix A. \end{proof} \subsection{Solution to Problem \ref{prob1}} Following Theorem \ref{maximum}, it is easy to obtain the following maximum principle for system \eqref{ps1} associated with the cost function \eqref{ps2}. \begin{lemma}\label{lemma1} The necessary condition of minimizing \eqref{ps2} for system \eqref{ps1} can be stated as: \begin{align} 0&=E\Big\{ R_{k}u_{k}\hspace{-1mm}+\hspace{-1mm}\bar{R}_{k}Eu_{k}\hspace{-1mm}+\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} B_{k}+w_{k}D_{k} \hspace{-1mm}\\ 0 \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right]'\lambda_{k}\hspace{-1mm}\notag\\ &+E\Big\{\left[\hspace{-2mm} \begin{array}{cc} \bar{B}_{k}+w_{k}\bar{D}_{k} \hspace{-1mm}\\ B_{k}+\bar{B}_{k} \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right]'\lambda_{k}\Big\}\Big| {\mathcal F}_{k-1}\Big\},\label{th33} \end{align} where costate $\lambda_{k}$ satisfies the following iteration \begin{align} \lambda_{k-1}&=E\Big\{\left[\hspace{-2mm} \begin{array}{cc} Q_{k}x_{k}+\bar{Q}_{k}Ex_{k}\hspace{-1mm}\\ 0 \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\notag\\ &+\left[ \begin{array}{cc} \hspace{-1mm} A_{k}+w_{k}C_{k} & \bar{A}_{k}+w_{k}\bar{C}_{k}\hspace{-1mm}\\ \hspace{-1mm} 0 & A_{k}+\bar{A}_{k}\hspace{-1mm} \\ \end{array} \right]'\lambda_{k}\Big|\mathcal{F}_{k-1}\Big\},\label{th32} \end{align} with final condition \begin{equation}\label{th31} \lambda_{N}=\left[ \begin{array}{cc} P_{N+1}& \bar{P}_{N+1}^{(1)}\\ \bar{P}_{N+1}^{(2)} & \bar{P}_{N+1}^{(3)} \\ \end{array} \right]\left[ \begin{array}{cc} x_{N+1}\\ Ex_{N+1}\\ \end{array} \right], \end{equation} where $\bar{P}_{N+1}^{(1)}=\bar{P}_{N+1}$, $\bar{P}_{N+1}^{(2)}=\bar{P}_{N+1}^{(3)}=0$, $P_{N+1}$ and $\bar{P}_{N+1}$ are given by the cost function \eqref{ps2}. \end{lemma} In Lemma \ref{lemma1}, $\lambda_{k}$ ($1\leq k\leq N$) is costate and (\ref{th32}) is costate-state equation. \eqref{th32} and state equation \eqref{ps1} form the FBSDE system. \eqref{th33} is termed as equilibrium equation (condition). The main result of this section is stated as below. \begin{theorem}\label{main} Under Assumption \ref{ass1}, \emph{ Problem 1} has a unique solution if and only if $\Upsilon_{k}^{(1)}$ and $\Upsilon_{k}^{(2)}$ for $k=0,\cdots,N$, as given in the below, are all positive definite. In this case, the optimal controller $\{u_{k}\}_{k=0}^{N}$ is given as: \begin{equation}\label{th43}\begin{split} u_{k}&=K_kx_{k}+\bar{K}_k Ex_{k}, \end{split}\end{equation} where \begin{align} K_{k}&=-[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)},\label{kk}\\ \bar{K}_{k}&=-\left\{[\Upsilon_{k}^{(2)}]^{-1}M_{k}^{(2)}-[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)}\right\}, \label{kkbar} \end{align} and $\Upsilon_{k}^{(1)}$, $M_{k}^{(1)}$, $\Upsilon_{k}^{(2)}$, $M_{k}^{(2)}$ are given as \begin{align} \Upsilon_{k}^{(1)}&=R_{k}+B_{k}'P_{k+1}B_{k}+\sigma^{2}D_{k}'P_{k+1}D_{N},\label{upsi1}\\ M_{k}^{(1)}&=B_{k}'P_{k+1}A_{k}+\sigma^{2}D_{k}'P_{k+1}C_{k},\label{h1}\\ \Upsilon_{k}^{(2)}&=R_{k}+\bar{R}_{k}+(B_{k}+\bar{B}_{k})'(P_{k+1}+\bar{P}_{k+1})(B_{k}+\bar{B}_{k})\notag\\ &+\sigma^{2}(D_{k}+\bar{D}_{k})'P_{k+1}(D_{k}+\bar{D}_{k}),\label{upsi2}\\ M_{k}^{(2)}&=(B_{k}+\bar{B}_{k})'(P_{k+1}+\bar{P}_{k+1})(A_{k}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{k})\notag\\ &+\sigma^{2}(D_{k}+\bar{D}_{k})'P_{k+1}(C_{k}+\bar{C}_{k}),\label{h2} \end{align} while $P_{k}$ and $\bar{P}_{k}$ in the above obey the following coupled Riccati equation for $k=0,\cdots,N$. \begin{align} P_{k}&=Q_{k}+A_{k}'P_{k+1}A_{k}\hspace{-1mm}+\hspace{-1mm}\sigma^{2}C_{k}'P_{k+1}C_{k}\notag\\ &-[M_{k}^{(1)}]'[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)},\label{th41}\\ \bar{P}_{k}&=\bar{Q}_{k}+A_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}C_{k}'P_{k+1}\bar{C}_{k}\notag\\ &+\bar{A}_{k}'P_{k+1}A_{k}+\sigma^{2}\bar{C}_{k}'P_{k+1}C_{k}\notag\\ &+\bar{A}_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}\bar{C}_{k}'P_{k+1}\bar{C}_{k}\notag\\ &+(A_{k}+\bar{A}_{k})'\bar{P}_{k+1}(A_{k}+\bar{A}_{k})\notag\\ &+[M_{k}^{(1)}]'[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)}-[M_{k}^{(2)}]'[\Upsilon_{k}^{(2)}]^{-1}M_{k}^{(2)},\label{th42} \end{align} with final condition $P_{N+1}$ and $\bar{P}_{N+1}$ given by \eqref{ps2}. The associated optimal cost function is given by \begin{equation}\label{jnst} J_{N}^{*}=E(x_{0}'P_{0}x_{0})+(Ex_{0})'\bar{P}_{0}(Ex_{0}). \end{equation} Moreover, the costate $\lambda_{k-1}$ in \eqref{th32} and the state $x_{k},~Ex_{k}$ admit the following relationship, \begin{equation}\label{th4} \lambda_{k-1}\hspace{-1mm}=\left[ \begin{array}{cc} P_{k}&\bar{P}_{k}^{(1)}\\ \bar{P}_{k}^{(2)}&\bar{P}_{k}^{(3)} \\ \end{array} \right]\left[ \begin{array}{cc} x_{k}\\ Ex_{k}\\ \end{array} \right], \end{equation} where $P_k$ obeys Riccati equation \eqref{th41}, $\bar{P}_{k}^{(1)}+\bar{P}_{k}^{(2)}+\bar{P}_{k}^{(3)}=\bar{P}_k$, and $\bar{P}_k$ satisfies Riccati equation \eqref{th42}. \end{theorem} \begin{proof} See Appendix B.\end{proof} \begin{remark} We show that the necessary and sufficient solvability conditions for the mean-field LQ optimal control are that the matrices $\Upsilon_{k}^{(1)} $, $\Upsilon_{k}^{(2)}$ are positive definite which are parallel to the solvability condition of standard LQ control. It should be noted the solvability conditions in previous works \cite{yong} and \cite{ni1} for the mean-field LQ optimal control are given with operator type which is not easy to be verified in practice. \end{remark} \begin{remark} It should be noted that the weighting matrices $R_{k}$ and $R_{k}+\bar{R}_{k}$ in cost function \eqref{ps2} are only required to be positive semi-definite in this paper which is more standard than the assumptions made in most of previous works where the matrices are required to be positive definite \cite{ni1}, \cite{yong}. \end{remark} \begin{remark}\label{rem3} The presented results in Theorem \ref{main} contain the standard stochastic LQ control problem as a special case. Actually, when coefficient matrices $\bar{A}_{k}$, $\bar{B}_{k}$, $\bar{C}_{k}$, $\bar{D}_{k}$ in \eqref{ps1} and weighting matrices $\bar{Q}_{k}$, $\bar{R}_{k}$, $\bar{P}_{N+1}$ in \eqref{ps2} are zero for $0\leq k\leq N$, by \eqref{upsi1}-\eqref{h2} and induction method, it is easy to know that $\Upsilon_{k}^{(1)}=\Upsilon_{k}^{(2)}$, $M_{k}^{(1)}=M_{k}^{(2)}$ and thus $\bar{K}_{k}=0$. Furthermore, notice \eqref{pk1}-\eqref{pk3} and \eqref{th42}, we have $\bar{P}_{k}^{(1)}=\bar{P}_{k}^{(2)}=\bar{P}_{k}^{(3)}=\bar{P}_{k}=0$, \eqref{th4} becomes $\lambda_{k-1}\hspace{-1mm}=\left[ \begin{array}{cc} \hspace{-1mm}P_{k}x_{k} \hspace{-1mm}\\ \hspace{-1mm}0\hspace{-1mm}\\ \end{array} \right]$. Refer to reference\cite{xyz}, \cite{det} and \cite{huang}, we know \eqref{th43}, \eqref{jnst} and \eqref{th4} are exactly the solution to standard stochastic LQ control problem. \end{remark} \section{Infinite Horizon Mean-field LQ Control and Stabilization} \subsection{Problem Formulation} In this section, the infinite horizon mean-field stochastic LQ control problem is solved. Besides, the necessary and sufficient stabilization condition for mean-field systems is investigated. To study the stabilization problem for infinite horizon case, we consider the following time invariant system, \begin{equation}\label{ps10} \left\{ \begin{array}{ll} x_{k+1}=(Ax_{k}+\bar{A}Ex_{k}+Bu_{k}+\bar{B}Eu_{k})\\ ~~~~~~~+(Cx_{k}+\bar{C}Ex_{k}+Du_{k}+\bar{D}Eu_{k})w_{k},\\ x_{0}=\xi,\\ \end{array} \right. \end{equation} where $A,~\bar{A},~B,~\bar{B},~C,~\bar{C},~D,~\bar{D}$ are all constant coefficient matrices with compatible dimensions. The system noise $w_{k}$ is defined as in \eqref{ps1}. The associated cost function is given by \begin{equation}\label{ps200}\begin{split} J\hspace{-1mm}=\hspace{-1mm}\sum_{k=0}^{\infty}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm} (Ex_{k})'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm}(Eu_{k})'\bar{R}Eu_{k}], \end{split}\end{equation} where $Q$, $\bar{Q}$, $R$, $\bar{R}$ are deterministic symmetric weighting matrices with appropriate dimensions. Throughout this section, the following assumption is made on the weighting matrices in \eqref{ps200}. \begin{assumption}\label{ass2} $R>0$, $R+\bar{R}>0$, and $Q\geq 0$, $Q+\bar{Q}\geq 0$. \end{assumption} \begin{remark} It should be pointed out that Assumption \ref{ass2} is a basic condition in order to investigate the stabilization for stochastic systems, see \cite{huang}, \cite{yongj}, and so forth. \end{remark} The following notions of stability and stabilization are introduced. \begin{definition} System \eqref{ps10} with $u_{k}=0$ is called asymptotically mean square stable if for any initial values $x_{0}$, there holds \begin{equation*} \lim_{k\rightarrow \infty}E(x_{k}'x_{k})=0. \end{equation*} \end{definition} \begin{definition} System \eqref{ps10} is stabilizable in mean square sense if there exists $\mathcal{F}_{k-1}$-measurable linear controller $u_{k}$ in terms of $x_{k}$ and $Ex_{k}$, such that for any random vector $x_{0}$, the closed loop of system \eqref{ps10} is asymptotically mean square stable. \end{definition} Following from references \cite{huang},\cite{zhangw} and \cite{zhangw2}, the definitions of exactly observability and exactly detectability are respectively given in the below. \begin{definition}\label{ob1} Consider the following mean-field system \begin{equation}\label{mf} \left\{ \begin{array}{ll} x_{k+1}=(Ax_{k}+\bar{A}Ex_{k})+(Cx_{k}+\bar{C}Ex_{k})w_{k},\\ Y_{k}=\mathcal{Q}^{1/2}\mathbb{X}_{k}. \end{array} \right. \end{equation} where $\mathcal{Q}=\left[ \begin{array}{cc} Q& 0\\ 0 & Q+\bar{Q} \\ \end{array} \right]$ and $\mathbb{X}_{k}=\left[ \begin{array}{cc} \hspace{-1mm} x_{k}-Ex_{k}\hspace{-1mm}\\ \hspace{-1mm} Ex_{k} \hspace{-1mm} \\ \end{array} \right]$. System \eqref{mf} is said to be exactly observable, if for any $N\geq 0$, \begin{equation*} Y_{k}= 0, ~\forall~ 0\leq k\leq N~\Rightarrow~x_{0}=0, \end{equation*} where the meaning of $Y_{k}=0$ and $x_{0}=0$ are given by Definition \ref{def}. For simplicity, we rewrite system \eqref{mf} as $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$. \end{definition} \begin{definition}\label{det1} System $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$ in \eqref{mf} is said to be exactly detectable, if for any $N\geq 0$, \begin{equation*} Y_{k}= 0, ~\forall~ 0\leq k\leq N~\Rightarrow~\lim_{k\rightarrow+\infty}E(x_{k}'x_{k})=0. \end{equation*} \end{definition} Now we make the following two assumptions. \begin{assumption}\label{ass3} $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$ is exactly observable. \end{assumption} \begin{assumption}\label{ass4} $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$ is exactly detectable. \end{assumption} \begin{remark}\label{rem13} \begin{itemize} \item It is noted that Definition \ref{det1} gives a different definition of `exactly detectability' from the one given in previous work \cite{ni2}. In fact, \cite{ni2} considers the mean-field system with different observation $y_k$, \begin{equation}\label{mf2} \left\{ \begin{array}{ll} x_{k+1}=(Ax_{k}+\bar{A}Ex_{k})\hspace{-1mm}+\hspace{-1mm}(Cx_{k}+\bar{C}Ex_{k})w_{k},\\ y_{k}=Qx_{k}+\bar{Q}Ex_{k}. \end{array} \right. \end{equation} As sated in \cite{ni2}, system \eqref{mf2} is `exactly detectable', if for any $N\geq 0$, \begin{equation*} y_{k}= 0, ~\forall~ 0\leq k\leq N~\Rightarrow~\lim_{k\rightarrow +\infty}E(x_{k}'x_{k})=0. \end{equation*} Obviously, it is different from the definition given in this paper. \item It should be highlighted that the exactly detectability made in Assumption \ref{ass4} is weaker \eqref{mf} than the exactly detectability made in \cite{ni2}. In fact, if the system is exactly detectable as made in Assumption \ref{ass4} of the paper, then we have that $$Y_{k}=\mathcal{Q}^{1/2}\mathbb{X}_{k}=0\Rightarrow \lim_{k\rightarrow+\infty} E(x_{k}'x_{k})=0.$$ Note that $Y_{k}=\mathcal{Q}^{1/2}\mathbb{X}_{k}=0$ implies \begin{equation}\label{equ}\left[ \begin{array}{cc} Q& 0\\ 0 & Q+\bar{Q} \\ \end{array} \right]^{1/2}\left[ \begin{array}{cc} \hspace{-1mm} x_{k}-Ex_{k}\hspace{-1mm}\\ \hspace{-1mm} Ex_{k} \hspace{-1mm} \\ \end{array} \right]=0.\end{equation} Equation \eqref{equ} indicates that \begin{align}\label{mf3}Q(x_{k}-Ex_{k})=0,~\text{and}~(Q+\bar{Q})Ex_{k}=0,\end{align} and thus, $Qx_{k}+\bar{Q}Ex_{k}=0. $ Hence, if $(A,\bar{A},C,\bar{C},Q,\bar{Q})$ is `exactly detectable' as defined in \cite{ni2}, then $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$ would be exactly detectable as defined in Definition \ref{det1}. \end{itemize} \end{remark} \begin{remark}\label{ob} Definition \ref{ob1} and Definition \ref{det1} can be reduced to the standard exactly observability and exactly detectability for standard stochastic systems, respectively. Actually, with $\bar{A}=0, \bar{C}=0, \bar{Q}=0$ in system \eqref{mf}, Definition \ref{ob1} becomes $Q^{1/2}x_{k}=0\Rightarrow x_{0}=0$, which is exactly the observability definition for standard stochastic linear systems. Similarly, we can show that the exactly detectability given in Definition \ref{det1} can also be reduced to the standard exactly detectability definition for standard stochastic system. One can refer to reference \cite{abou}, \cite{huang}, \cite{zhw}, and so forth. \end{remark} The problems of infinite horizon LQ control and stabilization for discrete-time mean-field systems are stated as the following. \begin{problem}\label{prob2} Find $\mathcal{F}_{k-1}$ measurable linear controller $u_{k}$ in terms of $x_{k}$ and $Ex_{k}$ to minimize the cost function \eqref{ps200} and stabilize system \eqref{ps10} in the mean square sense. \end{problem} \subsection{Solution to Problem \ref{prob2}} For the convenience of discussion, to make the time horizon $N$ explicit for finite horizon mean-field LQ control problem, we re-denote $\Upsilon_{k}^{(1)}$, $\Upsilon_{k}^{(2)}$, $M_{k}^{(1)}$, $M_{k}^{(2)}$ in \eqref{upsi1}-\eqref{h2} as $\Upsilon_{k}^{(1)}(N)$, $\Upsilon_{k}^{(2)}(N)$, $M_{k}^{(1)}(N)$ and $M_{k}^{(2)}(N)$ respectively. Accordingly, $K_{k}$, $\bar{K}_{k}$, $P_{k}$ and $\bar{P}_{k}$ in \eqref{kk}, \eqref{kkbar}, \eqref{th41} and \eqref{th42} are respectively rewritten as $K_{k}(N)$, $\bar{K}_{k}(N)$, $P_{k}(N)$ and $\bar{P}_{k}(N)$. Moreover, the coefficient matrices $A_{k}, \bar{A}_{k}, B_{k}, \bar{B}_{k}, C_{k}, \bar{C}_{k}, D_{k}, \bar{D}_{k}$ in \eqref{th43}-\eqref{th4} are time invariant as in \eqref{ps10}. The terminal weighting matrix $P_{N+1}$ and $\bar{P}_{N+1}$ in \eqref{ps2} are set to be zero. Before presenting the solution to \emph{Problem \ref{prob2}}, the following lemmas will be given at first. \begin{lemma}\label{111} For any $N\geq 0$, $P_{k}(N)$ and $\bar{P}_{k}(N)$ in \eqref{th41}-\eqref{th42} satisfy $P_{k}(N)\geq 0$ and $P_{k}(N)+\bar{P}_{k}(N)\geq 0$. \end{lemma} \begin{proof} See Appendix C.\end{proof} \begin{lemma}\label{lemma3} With the assumption $R>0$ and $R+\bar{R}>0$, \emph{Problem 1} admits a unique solution. \end{lemma} \begin{proof} From Lemma \ref{111}, we know that $P_{k}(N)\geq0$ and $P_{k}(N)+\bar{P}_{k}(N)\geq 0$. Besides, as $R>0$ and $R+\bar{R}>0$, from \eqref{upsi1} and \eqref{upsi2}, we know that $\Upsilon_{k}^{(1)}(N)>0$ and $\Upsilon_{k}^{(2)}(N)>0$ for $0\leq k\leq N$. Apparently from Theorem \ref{main}, we can conclude that \emph{Problem 1} admits a unique solution for any $N>0$. This completes the proof.\end{proof} \begin{lemma}\label{lemma2} Under Assumptions \ref{ass2} and \ref{ass3}, for any $k\geq 0$, there exists a positive integer $N_{0}\geq 0$ such that $P_{k}(N_{0})>0$ and $P_{k}(N_{0})+\bar{P}_{k}(N_{0})>0$. \end{lemma} \begin{proof} See Appendix D.\end{proof} \begin{theorem}\label{theorem2} Under Assumptions \ref{ass2} and \ref{ass3}, if system \eqref{ps10} is stabilizable in the mean square sense, the following assertions hold: 1) For any $k\geq 0$, $P_{k}(N)$ and $\bar{P}_{k}(N)$ are convergent, i.e., $$\lim_{N\rightarrow +\infty}P_{k}(N)=P,~\lim_{N\rightarrow +\infty}\bar{P}_{k}(N)=\bar{P},$$ where $P$ and $\bar{P}$ satisfy the following coupled ARE: \begin{align} P&=Q+A'PA+\sigma^{2}C'PC\hspace{-1mm}-\hspace{-1mm}[M^{(1)}]'[\Upsilon^{(1)}]^{-1}M^{(1)},\label{are1}\\ \bar{P}&=\bar{Q}+A'P\bar{A}+\sigma^{2}C'P\bar{C}+\bar{A}'PA+\sigma^{2}\bar{C}'PC\notag\\ &+\bar{A}'P\bar{A}+\sigma^{2}\bar{C}'P\bar{C}+(A+\bar{A})'\bar{P}(A+\bar{A})\notag\\ &+[M^{(1)}]'[\Upsilon^{(1)}]^{-1}M^{(1)}-[M^{(2)}]'[\Upsilon^{(2)}]^{-1}M^{(2)},\label{are2} \end{align} while \begin{align} \Upsilon^{(1)}&=R+B'PB+\sigma^{2}D'PD\geq R>0,\label{up1}\\ M^{(1)}&=B'PA+\sigma^{2}D'PC,\label{hh1}\\ \Upsilon^{(2)}&=R+\bar{R}+(B+\bar{B})'(P+\bar{P})(B+\bar{B})\notag\\ &~~~~~~+\sigma^{2}(D+\bar{D})'P(D+\bar{D})\geq R+\bar{R}>0,\label{up2}\\ M^{(2)}&=(B+\bar{B})'(P+\bar{P})(A+\bar{A})\notag\\ &~~~~~~+\sigma^{2}(D+\bar{D})'P(C+\bar{C}).\label{hh2} \end{align} 2) $P$ and $P+\bar{P}$ are positive definite. \end{theorem} \begin{proof} See Appendix E.\end{proof} We are now in the position to present the main results of this section. Two results are to be given, one is based on the assumption of exactly observability (Assumption \ref{ass3}), and the other is based on a weaker assumption of exactly detectability (Assumption \ref{ass4}). \begin{theorem}\label{succeed} Under Assumption \ref{ass2} and \ref{ass3}, mean-field system \eqref{ps10} is stabilizable in the mean square sense if and only if there exists a unique solution to coupled ARE \eqref{are1}-\eqref{are2} $P$ and $\bar{P}$ satisfying $P>0$ and $P+\bar{P}>0$. In this case, the stabilizable controller is given by \begin{align}\label{control} u_{k}=Kx_{k}+\bar{K}Ex_{k}, \end{align} where \begin{align} K&=-[\Upsilon^{(1)}]^{-1}M^{(1)},\label{K}\\ \bar{K}&=-\{[\Upsilon^{(2)}]^{-1}M^{(2)}-[\Upsilon^{(1)}]^{-1}M^{(1)}\},\label{KK} \end{align} $\Upsilon^{(1)}$, $M^{(1)}$, $\Upsilon^{(2)}$ and $M^{(2)}$ are given by \eqref{up1}-\eqref{hh2}. Moreover, the stabilizable controller $u_{k}$ minimizes the cost function \eqref{ps200}, and the optimal cost function is given by \begin{equation}\label{cost} J^{*}=E(x_{0}'Px_{0})+Ex_{0}'\bar{P}Ex_{0}. \end{equation} \end{theorem} \begin{proof} See Appendix F.\end{proof} \begin{theorem}\label{succeed2} Under Assumption \ref{ass2} and \ref{ass4}, mean-field system \eqref{ps10} is stabilizable in the mean square sense if and only if there exists a unique solution to coupled ARE \eqref{are1}-\eqref{are2} $P$ and $\bar{P}$ satisfying $P\geq 0$ and $P+\bar{P}\geq 0$. In this case, the stabilizable controller is given by \eqref{control}. Moreover, the stabilizable controller $u_{k}$ minimizes the cost function \eqref{ps200}, and the optimal cost function is as \eqref{cost}. \end{theorem} \begin{proof} See Appendix G.\end{proof} \begin{remark} Theorem \ref{succeed} and \ref{succeed2} propose a new approach to stochastic control problems based on the maximum principle and solution to FBSDE developed in this paper, and thus essentially solve the optimal control and stabilization for mean-field stochastic systems under more standard assumptions which is compared with previous works \cite{ni1} and \cite{ni2}. \end{remark} \section{Numerical Examples} \subsection{The Finite Horizon Case} Consider system \eqref{ps1} and the cost function \eqref{ps2} with $N=4$ and $\sigma^{2}=1$, we choose the coefficient matrices and weighting matrices in \eqref{ps1} and \eqref{ps2} to be time-invariant for $k=1,2,3$ as: \begin{align*} A_{k}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1.1\hspace{-2mm}&\hspace{-2mm} 0.9\hspace{-2mm}&\hspace{-2mm} 0.8\\ 0 \hspace{-2mm} &\hspace{-2mm}0.6 \hspace{-2mm}&\hspace{-2mm} 1.2\\ 0.4 \hspace{-2mm}&\hspace{-2mm}0.9 \hspace{-2mm}&\hspace{-2mm} 1\\ \end{array} \hspace{-2mm}\right],\bar{A}_{k}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.5\hspace{-1mm}&\hspace{-1mm} 1\hspace{-1mm}&\hspace{-1mm} 0.9\\ 0.8 \hspace{-1mm}&\hspace{-1mm}0.7 \hspace{-1mm}&\hspace{-1mm} 1.2\\ 1.1 \hspace{-1mm}&\hspace{-1mm}2 \hspace{-1mm}&\hspace{-1mm} 1.9\\ \end{array} \hspace{-2mm}\right],B_{k}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 2\hspace{-1mm}&\hspace{-1mm} 0.3\\ 1.1 \hspace{-1mm} &\hspace{-1mm}0.6 \\ 0.9 \hspace{-1mm}&\hspace{-1mm}1.3\\ \end{array} \hspace{-2mm}\right],\\ \bar{B}_{k}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1.2\hspace{-2mm}&\hspace{-2mm} 0.6\\ 0.9 \hspace{-2mm}&\hspace{-2mm}1 \\ 0 \hspace{-2mm}&\hspace{-2mm}0.8\\ \end{array} \hspace{-2mm}\right],C_{k}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.8\hspace{-2mm}&\hspace{-2mm}0.9\hspace{-2mm}&\hspace{-2mm} 1.5\\ 1.2 \hspace{-2mm}&\hspace{-2mm}1 \hspace{-2mm}&\hspace{-2mm} 0.8\\ 0 \hspace{-2mm}&\hspace{-2mm}0.6\hspace{-2mm}&\hspace{-2mm} 0.4\\ \end{array} \hspace{-2mm}\right],\bar{C}_{k}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1\hspace{-2mm}&\hspace{-2mm}0\hspace{-2mm}&\hspace{-2mm}0.3\\ 0.5 \hspace{-2mm}&\hspace{-2mm}0.6 \hspace{-2mm}&\hspace{-2mm} 0.9\\ 0.7 \hspace{-2mm}&\hspace{-2mm}1.2 \hspace{-2mm}&\hspace{-2mm} 0.8\\ \end{array} \hspace{-1mm}\right],\\ D_{k}&=\left[\hspace{-2mm} \begin{array}{ccc} 0.5\hspace{-2mm}&\hspace{-2mm}0.4\\ 2 \hspace{-2mm}&\hspace{-2mm}0.9 \\ 1\hspace{-2mm}&\hspace{-2mm}0\\ \end{array} \right],\bar{D}_{k}=\left[\hspace{-2mm} \begin{array}{ccc} 2\hspace{-2mm}&\hspace{-2mm} 1\\ 0.5 \hspace{-2mm}&\hspace{-2mm}0.8 \\ 0 \hspace{-2mm}&\hspace{-2mm}0.5\\ \end{array} \right],\\ Q_{k}&=diag([0,~2,~1]),\bar{Q}_{k}=diag([1,~-1,~0]),\\ R_{k}&=diag([0,~2]),\bar{R}_{k}=diag([1,~-2]),\\ P_{4}&=diag([1,~2,~0]),\bar{P}_{4}=diag([1,~-1,~1]). \end{align*} It is noted that $R_{k}$ and $R_{k}+\bar{R}_{k}$ are semi-positive definite, while not positive definite for $k=1,2,3$. Based on \eqref{th43}-\eqref{th4} of Theorem \ref{main}, the solution to coupled Riccati equation \eqref{th41}-\eqref{th42} can be given as: \begin{align*} P_{3}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.995\hspace{-2mm}&\hspace{-2mm} 0.298\hspace{-2mm}&\hspace{-2mm} -0.115\\ 0.298\hspace{-2mm}&\hspace{-2mm}2.417\hspace{-2mm}& \hspace{-2mm}0.840\\ -0.115\hspace{-2mm}&\hspace{-2mm}0.840\hspace{-2mm}&\hspace{-2mm} 3.360\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm},\bar{P}_{3}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.667\hspace{-2mm}& \hspace{-2mm}0.074\hspace{-2mm}&\hspace{-2mm} -0.006\hspace{-2mm}\\ 0.074 \hspace{-2mm}&\hspace{-2mm}1.033\hspace{-2mm} &\hspace{-2mm} 0.133\hspace{-2mm}\\ -0.006 \hspace{-2mm} &\hspace{-2mm}0.133\hspace{-2mm} &\hspace{-2mm} -1.319\hspace{-2mm}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm},\\ P_{2}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1.658\hspace{-2mm}&\hspace{-2mm} 0.161\hspace{-2mm}&\hspace{-2mm} 0.024\\ 0.161 \hspace{-2mm}&\hspace{-2mm}2.547 \hspace{-2mm}&\hspace{-2mm} 0.839\\ 0.024 \hspace{-2mm}&\hspace{-2mm}0.839 \hspace{-2mm}&\hspace{-2mm} 3.379\\ \end{array} \hspace{-2mm}\right],\bar{P}_{2}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.630\hspace{-2mm}&\hspace{-2mm} 0.919\hspace{-2mm}&\hspace{-2mm} -0.457\\ 0.919 \hspace{-2mm}&\hspace{-2mm}5.282\hspace{-2mm} &\hspace{-2mm} 1.439\\ -0.457 \hspace{-2mm} &\hspace{-2mm}1.439\hspace{-2mm} &\hspace{-2mm} -0.520\\ \end{array} \hspace{-2mm}\right],\\ P_{1}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1.907\hspace{-2mm}&\hspace{-2mm} 0.315\hspace{-2mm}&\hspace{-2mm}0.268 \\ 0.315\hspace{-2mm}&\hspace{-2mm}2.812\hspace{-2mm}&\hspace{-2mm}1.352\\ 0.268\hspace{-2mm}&\hspace{-2mm}1.352\hspace{-2mm}&\hspace{-2mm} 4.408\\ \end{array} \hspace{-2mm}\right],\bar{P}_{1}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 0.982\hspace{-2mm}&\hspace{-2mm}0.924\hspace{-2mm}&\hspace{-2mm} -1.027\\ 0.924 \hspace{-2mm}&\hspace{-2mm}5.306\hspace{-2mm} &\hspace{-2mm} 0.711\\ -1.027 \hspace{-2mm} &\hspace{-2mm}0.711\hspace{-2mm} & \hspace{-2mm} -1.327\\ \end{array} \hspace{-2mm}\right],\\ P_{0}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 2.026\hspace{-2mm}&\hspace{-2mm}0.353\hspace{-2mm}&\hspace{-2mm}0.364\\ 0.353\hspace{-2mm}&\hspace{-2mm}2.896\hspace{-2mm}&\hspace{-2mm}1.472\\ 0.364\hspace{-2mm}&\hspace{-2mm}1.472\hspace{-2mm}&\hspace{-2mm} 4.641\\ \end{array} \hspace{-2mm}\right],\bar{P}_{0}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{ccc} 1.217\hspace{-2mm}&\hspace{-2mm} 1.294\hspace{-2mm}&\hspace{-2mm}-1.198\\ 1.294 \hspace{-2mm}&\hspace{-2mm}6.232\hspace{-2mm} &\hspace{-2mm} 0.644\\ -1.198 \hspace{-2mm} &\hspace{-2mm}0.644\hspace{-2mm} & \hspace{-2mm} -1.498\\ \end{array} \hspace{-2mm}\right], \end{align*} and $\Upsilon_{k}^{(1)}$, $\Upsilon_{k}^{(2)}$ for $k=0,1,2,3$ in \eqref{upsi1} and \eqref{upsi2} can be calculated as \begin{align*} \Upsilon_{3}^{(1)}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} 14.670& 5.720\\ 5.720 &4.590 \\ \end{array} \hspace{-1mm}\right],\Upsilon_{3}^{(2)}\hspace{-1mm}=\hspace{-1mm}\left[ \begin{array}{cc} 45.040& 22.850\\ 22.850 &16.330\\ \end{array} \hspace{-1mm}\right],\\ \Upsilon_{2}^{(1)}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} 29.302& 13.536\\ 13.536 &12.297\\ \end{array} \hspace{-1mm}\right],\Upsilon_{2}^{(2)}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} 73.069&46.750\\ 46.750 &38.789\\ \end{array} \hspace{-1mm}\right],\\ \Upsilon_{1}^{(1)}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} 32.593& 14.480\\ 14.480&12.607\\ \end{array} \hspace{-1mm}\right],\Upsilon_{1}^{(2)}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} 113.585& 76.217\\ 76.217 &64.973\\ \end{array} \hspace{-1mm}\right],\\ \Upsilon_{0}^{(1)}&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{ccc} 42.070&19.232\\ 19.232 &15.875 \\ \end{array} \hspace{-1mm}\right],\Upsilon_{0}^{(2)}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{ccc} 130.398& 82.580\\ 82.580 &68.411 \\ \end{array} \hspace{-1mm}\right],\\ \det[\Upsilon_{3}^{(1)}]&=34.617>0,\det[\Upsilon_{3}^{(2)}]=213.381>0,\\ \det[\Upsilon_{2}^{(1)}]&=117.112>0 ,\det[\Upsilon_{2}^{(2)}]=648.698>0,\\ \det[\Upsilon_{1}^{(1)}]&=201.228>0, \det[\Upsilon_{1}^{(2)}]=1570.987>0 ,\\ \det[\Upsilon_{0}^{(1)}]&=297.946>0, \det[\Upsilon_{0}^{(2)}]=2101.236>0. \end{align*} Since $\Upsilon_{k}^{(1)}>0$ and $\Upsilon_{k}^{(2)}>0$, thus by Theorem \ref{main}, the unique optimal controller can be given as: \begin{equation*} u_{k}=K_{k}x_{k}+\bar{K}_{k}Ex_{k}, k=0,1,2,3, \end{equation*} where \begin{align*} K_{3}&=\left[\hspace{-1mm} \begin{array}{ccc} -0.517& -0.483& -0.471 \\ 0.032 &-0.084 & -0.223\\ \end{array} \right],\\ \bar{K}_{3}&=\left[\hspace{-1mm} \begin{array}{ccc} 0.184& 0.328&0.357 \\ -0.522 &-0.819 & -0.920\\ \end{array} \right]\hspace{-1mm},\\ K_{2}&=\left[\hspace{-1mm} \begin{array}{ccc} -0.385& -0.481& -0.410 \\ -0.030 &-0.247& -0.474\\ \end{array} \right],\\ \bar{K}_{2}&=\left[\hspace{-1mm} \begin{array}{ccc} 0.036& 0.327& 0.336 \\ -0.364 &-0.734& -0.828\\ \end{array} \right],\\ K_{1}&=\left[\hspace{-1mm} \begin{array}{ccc} -0.413& -0.476& -0.394 \\ -0.011&-0.256&-0.500\\ \end{array} \right],\\ \bar{K}_{1}&=\left[\hspace{-1mm} \begin{array}{ccc} 0.071& 0.345& 0.305 \\ -0.334 &-0.699 &-0.820\\ \end{array} \right],\\ K_{0}&=\left[ \begin{array}{ccc} -0.411& -0.487& -0.398 \\ 0.001&-0.259&-0.525\\ \end{array} \right],\\ \bar{K}_{0}&=\left[ \begin{array}{ccc} 0.070& 0.339&0.297 \\ -0.358&-0.692&-0.780\\ \end{array} \right]. \end{align*} \subsection{The Infinite Horizon Case} Consider system \eqref{ps10} and the cost function \eqref{ps200} with the following coefficient matrices and weighting matrices: \begin{align*} A&=1.1,\bar{A}=0.2,B=0.4,\bar{B}=0.1,C=0.9,\bar{C}=0.5,\\ D&=0.8,\bar{D}=0.2,Q=2,\bar{Q}=1,R=1,\bar{R}=1,\sigma^{2}=1. \end{align*} the initial state $x_{0}\sim N(1,2)$, i.e., $x_{0}$ obeys the normal distribution with mean 1 and covariance 2. Note that $Q=2$, $Q+\bar{Q}=3$, $R=1$, $R+\bar{R}=2$ are all positive, then Assumption 3 and Assumption 4 are satisfied. By using coupled ARE \eqref{are1}-\eqref{are2}, we have $P=5.6191$ and $\bar{P}=5.1652$. From \eqref{up1}-\eqref{hh2}, we can obtain $ \Upsilon^{(1)}=5.4953,M^{(1)}=6.5182, \Upsilon^{(2)}=10.3152$, and $M^{(2)}=14.8765$. Notice that $P>0$ and $P+\bar{P}>0$, according to Theorem \ref{succeed}, there exists a unique optimal controller to stabilize mean-field system \eqref{ps10} as well as minimize cost function \eqref{ps200}, the controller in \eqref{control} is presented as \begin{align*} u_{k}=Kx_{k}+\bar{K}Ex_{k}=-1.1861x_{k}-0.2561Ex_{k},~k\geq 0. \end{align*} Using the designed controller, the simulation of system state is shown in Fig. 1. With the optimal controller, the regulated system state is stabilizable in mean square sense as shown in Fig. 1. \begin{figure} \caption{The mean square stabilization of mean-field system.} \label{fig:1} \end{figure} To explore the effectiveness of the main results presented in this paper, we consider mean-field system \eqref{ps10} and cost function \eqref{ps200} with \begin{align*} A&=2,\bar{A}=0.8,B=0.5,\bar{B}=1,C=1,\bar{C}=1,\\ D&=-0.8,\bar{D}=0.6,Q=1,\bar{Q}=1,R=1,\bar{R}=1,\sigma^{2}=1. \end{align*} The initial state are assumed to be the same as that given above. By solving the coupled ARE \eqref{are1}, it can be found that $P$ has two negative roots as $P=-1.1400$ and $P=-0.2492$. Thus, according to Theorem \ref{succeed} and Theorem \ref{succeed2}, we know that system \eqref{ps10} is not stabilizable in mean square sense. Actually, when $P=-1.1400$, it is easily known that equation \eqref{are2} has no real roots for $\bar{P}$. While in the case of $P=-0.2492$, $\bar{P}$ has two real roots which can be solved from \eqref{are2} as $\bar{P}=7.0597$ and $\bar{P}=-0.6476$, respectively. In the latter case, with $P=-0.2492$ and $\bar{P}=7.0597$, we can calculate $K$ and $\bar{K}$ from \eqref{K} and \eqref{KK} as $K=0.0640$, $\bar{K}=1.5939$. Similarly, with $P=-0.2492$ and $\bar{P}=-0.6476$, $K$ and $\bar{K}$ can be computed as $K=0.0640$, $\bar{K}=131.8389$. Accordingly, the controllers are designed as $u_{k}=0.0640x_{k}+1.5939Ex_{k}$, $u_{k}=0.0640x_{k}+131.8389Ex_{k}$, respectively. Simulation results of the corresponding state trajectories with the designed controllers are respectively shown as in Fig. 2 and Fig. 3. As expected, the state trajectories are not convergent. \begin{figure} \caption{Simulation for the state trajectory $E(x_{k} \label{fig:2} \end{figure} \begin{figure} \caption{Simulation for the state trajectory $E(x_{k} \label{fig:3} \end{figure} \section{Conclusion} This paper proposes a new approach to stochastic optimal control with the key tools of maximum principle and solution to FBSDE explored in this paper. Accordingly, with the approach, the optimal control and stabilization problems for discrete-time mean-field systems have been essentially solved. The main results include: 1) The sufficient and necessary solvability condition of finite horizon optimal control problem has been obtained in analytical form via a coupled Riccati equation; 2) The sufficient and necessary conditions for the stabilization of mean-field systems has been obtained. It is shown that, under exactly observability assumption, the mean-field system is stabilizable in the mean square sense if and only if a coupled ARE has a unique solution $P$ and $\bar{P}$ satisfying $P>0$ and $P+\bar{P}>0$. Furthermore, under exactly detectability assumption which is weaker than exactly observability, we show that the mean-field system is stabilizable in the mean square sense if and only if the coupled ARE admits a unique solution $P$ and $\bar{P}$ satisfying $P\geq 0$ and $P+\bar{P}\geq 0$. \appendices \section{Proof of Theorem \ref{maximum}} \begin{proof} For the general stochastic mean-field optimal control problem, the control domain for system \eqref{ps3} to minimize \eqref{ps04} is given by \begin{equation*} \mathcal{U}=\left\{u_{k}\in\mathcal{R}^{m}|~u_{k}~\text{is}~\mathcal{F}_{k-1}~\text{measurable},~E|u_{k}|^{2}<\infty \right\}. \end{equation*} We assume that the control domain $\mathcal{U}$ to be convex. Any $u_{k}\in \mathcal{U}$ is called admissible control. Besides, for arbitrary $u_{k},~\delta u_{k}\in \mathcal{U}$ and $\varepsilon\in(0,1)$, we can obtain $u_{k}^{\varepsilon}=u_{k}+\varepsilon \delta u_{k}\in \mathcal{U}$. Let $x_{k}^{\varepsilon}$, $J_{N}^{\varepsilon}$ be the corresponding state and cost function with $u_{k}^{\varepsilon}$, and $x_{k}$, $J_{N}$ represent the corresponding state and cost function with $u_{k}$. We examine the increment in $J_N$ due to increment in the controller $u_k$. Assume that final time $N+1$ is fixed, by using Taylor's expansion and following cost function \eqref{ps04}, the increment $\delta J_{N}=J_{N}^{\varepsilon}-J_{N}$ can be calculated as follows, \begin{align}\label{mp0001} &\delta J_N=E\Big\{\phi_{x_{N+1}} \delta x_{N+1}+\phi_{Ex_{N+1}} \delta Ex_{N+1}\notag\\ &\hspace{-1mm}+\hspace{-1mm}\sum_{k=0}^{N}\hspace{-1mm}\big[L^k_{x_k}\delta x_{k}\hspace{-1mm}+\hspace{-1mm}L^k_{Ex_k}\delta Ex_{k}\hspace{-1mm}+\hspace{-1mm}L^k_{u_k}\varepsilon\delta u_{k}\hspace{-1mm}+\hspace{-1mm}L^k_{Eu_k}\varepsilon\delta Eu_{k}\big]\Big\}\hspace{-1mm}+\hspace{-1mm}O(\varepsilon^{2})\notag\\ &\hspace{-1mm}=\hspace{-1mm}E\big\{[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]\delta x_{N+1}\hspace{-1mm}+\hspace{-1mm}\sum_{k=0}^{N}[L^k_{u_k}\hspace{-1mm}+\hspace{-1mm}E(L_{Eu_{k}}^{k})]\varepsilon \delta u_{k}\notag\\ &+\sum_{k=0}^{N}[L^k_{x_k}+E(L^k_{Ex_k})] \delta x_{k}\big\}+O(\varepsilon^{2}). \end{align} where $O(\varepsilon^{2})$ means infinitesimal of the same order with $\varepsilon^{2}$. Another thing to note is the variation of the initial state $\delta x_{0}=\delta Ex_{0}=0$. By \eqref{ps1} and \eqref{mp0002}, for $\delta x_{k}=x_{k}^{\varepsilon}-x_{k}$, the following assertion holds, \begin{align}\label{asser} \left[\hspace{-2mm} \begin{array}{cc} \delta x_{k+1} \\ \delta Ex_{k+1} \end{array} \hspace{-3mm}\right]&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k}}^{k}\hspace{-2mm} &\hspace{-2mm} f_{Ex_{k}}^{k} \\ g_{x_{k}}^{k} \hspace{-2mm} &\hspace{-2mm} g_{Ex_{k}}^{k}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \delta x_{k} \\ \delta Ex_{k} \end{array} \hspace{-2mm}\right]\hspace{-1mm}+\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{k}}^{k} \hspace{-2mm}&\hspace{-2mm} f_{Eu_{k}}^{k} \\ g_{u_{k}}^{k}\hspace{-2mm}&\hspace{-2mm} g_{Eu_{k}}^{k}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \varepsilon\delta u_{k} \\ \varepsilon\delta Eu_{k} \end{array} \hspace{-2mm}\right], \end{align} Thus the variation of $\delta x_{k+1}$ can be presented as \begin{align}\label{mp0005} &\delta x_{k+1} =f_{x_{k}}^{k}\delta x_{k}+f_{u_{k}}^{k}\varepsilon\delta u_{k}+f_{Ex_{k}}^{k}\delta Ex_{k}+f_{Eu_{k}}^{k}\varepsilon\delta Eu_{k}\notag\\ &=\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k}}^{k} \hspace{-2mm}&\hspace{-2mm} \hspace{-1mm}f_{Ex_{k}}^{k} \\ \end{array} \hspace{-2mm}\right] \hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \delta x_{k} \\ \delta Ex_{k} \end{array} \hspace{-2mm}\right] +f_{u_{k}}^{k}\varepsilon\delta u_{k}+f_{Eu_{k}}^{k}\varepsilon\delta Eu_{k}\notag \\ &=\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k}}^{k} \hspace{-2mm}&\hspace{-2mm} f_{Ex_{k}}^{k}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k-1}}^{k-1}\hspace{-2mm} &\hspace{-2mm} f_{Ex_{k-1}}^{k-1} \\ g_{x_{k-1}}^{k-1} \hspace{-2mm} &\hspace{-2mm} g_{Ex_{k-1}}^{k-1}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \delta x_{k-1} \\ \delta Ex_{k-1} \end{array} \hspace{-2mm}\right]\notag\\ &+\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k}}^{k} \hspace{-3mm}& \hspace{-3mm}f_{Ex_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{k-1}}^{k-1}\hspace{-3mm} &\hspace{-3mm} f_{Eu_{k-1}}^{k-1} \\ g_{u_{k-1}}^{k-1} \hspace{-3mm}&\hspace{-3mm} g_{Eu_{k-1}}^{k-1}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \varepsilon\delta u_{k-1}\hspace{-1mm} \\ \varepsilon\delta Eu_{k-1}\hspace{-1mm} \end{array} \hspace{-2mm}\right]\hspace{-1mm}+\hspace{-1mm}f_{u_{k}}^{k}\delta u_{k} \hspace{-1mm}+\hspace{-1mm}f_{Eu_{k}}^{k}\delta Eu_{k}\notag \\ &=\tilde{F}_{x}(k,0)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \delta x_{0} \\ \delta Ex_{0} \end{array} \hspace{-2mm}\right]\hspace{-1mm}+\hspace{-1mm}\sum_{l=0}^{k}\tilde{F}_{x}(k,l+1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}& f_{Eu_{l}}^{l} \\ g_{u_{l}}^{l} & g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \varepsilon\delta u_{l} \\ \varepsilon\delta Eu_{l} \end{array} \hspace{-2mm}\right]\notag\\ &\hspace{-1mm}=\hspace{-1mm}\sum_{l=0}^{k}\hspace{-1mm}\tilde{F}_{x}(k,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta u_{l}\hspace{-1mm}+\hspace{-1mm}\sum_{l=0}^{k}\hspace{-1mm}\tilde{F}_{x}(k,l\hspace{-1mm}+\hspace{-1mm}1)\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l} \\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta Eu_{l}, \end{align} where \begin{equation}\label{fx} \tilde{F}_x(k,l)=\left[\hspace{-2mm} \begin{array}{cc} f_{x_{k}}^{k} \hspace{-2mm}&\hspace{-2mm} f_{Ex_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]\tilde{f}^{k-1}_{x_{k-1}} \cdots \tilde{f}^l_{x_l},l=0,\cdots,k; \end{equation} $\tilde{F}_x(k,k+1)=[I_{n}~0]$, and $\tilde{f}^{l}_{x_{l}}=\left[\hspace{-2mm} \begin{array}{cc} f_{x_{l}}^{l} \hspace{-2mm}&\hspace{-2mm} f_{Ex_{l}}^{l} \\ g_{x_{l}}^{l} \hspace{-2mm}&\hspace{-2mm} g_{Ex_{l}}^{l}\\ \end{array} \hspace{-2mm}\right].$ Substituting \eqref{mp0005} into \eqref{mp0001} yields \begin{align}\label{mp02} &\delta J_N\hspace{-1mm}=\hspace{-1mm}E\Big\{[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]\sum_{l=0}^{N}\tilde{F}_{x}(N,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta u_{l}\notag\\ &+[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]\sum_{l=0}^{N}\tilde{F}_{x}(N,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l} \\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta Eu_{l}\notag\\ &+\sum_{k=0}^{N}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\sum_{l=0}^{k-1}\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta u_{l}\notag\\ &+\sum_{k=0}^{N}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\sum_{l=0}^{k-1}\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta Eu_{l}\notag\\ &+\sum_{k=0}^{N}[L^k_{u_k}+E(L_{Eu_{k}}^{k})] \varepsilon\delta u_{k}\Big\}+O(\varepsilon^{2}). \end{align} Note the facts that \begin{align} & ~~E\Big\{[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\sum_{l=0}^{k-1}\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta Eu_{l} \Big\}\notag\\ &\hspace{-1mm}=\hspace{-1mm} E\hspace{-1mm}\Big\{E\Big\{[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\sum_{l=0}^{k-1}\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\Big\}\varepsilon\delta u_{l} \hspace{-1mm} \Big\},\label{exp}\\ & ~~ E\hspace{-1mm}\Big\{[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]\sum_{l=0}^{N}\tilde{F}_{x}(N,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta Eu_{l}\Big\}\notag\\ &\hspace{-1mm}= \hspace{-1mm}E\Big\{E\Big\{\hspace{-1mm}[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]\sum_{l=0}^{N}\hspace{-1mm}\tilde{F}_{x}(N,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\Big\}\varepsilon\delta u_{l} \hspace{-1mm}\Big\}.\label{exp1} \end{align} Also,we have \begin{align}\label{mp3} &\sum_{k=0}^{N}[L^k_{x_k}+E(L^k_{Ex_k})]\sum_{l=0}^{k-1} \tilde{F}_{x}(k-\hspace{-1mm}1,l+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\varepsilon\delta u_{l}\\ &\hspace{-1mm}=\hspace{-1mm}\sum_{l=0}^{N-1}\left\{\sum_{k=l+1}^{N}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l\hspace{-1mm}+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\right\}\varepsilon\delta u_{l},\notag\\ &\notag\\ &\sum_{k=0}^{N}E\hspace{-1mm}\left\{[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]\sum_{l=0}^{k-1} \hspace{-1mm} \tilde{F}_{x}(k-1,l+1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\right\}\hspace{-1mm}\varepsilon\delta u_{l}\notag\\ &\hspace{-1.3mm}=\hspace{-1.6mm}\sum_{l=0}^{N\hspace{-0.5mm}-\hspace{-0.5mm}1}\hspace{-1mm}\left\{\hspace{-1mm}E\hspace{-1mm}\left\{ \sum_{k\hspace{-0.3mm}=\hspace{-0.3mm}l\hspace{-0.3mm}+\hspace{-0.3mm}1}^{N}\hspace{-1mm}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L^k_{Ex_k})]'\tilde{F}_{x}(k\hspace{-1mm}-\hspace{-1mm}1,l+\hspace{-1mm}1)\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\right\}\hspace{-1mm}\right\}\hspace{-1mm}\varepsilon\delta u_{l}.\label{mp3000} \end{align} Therefore, \eqref{mp02} becomes \begin{align}\label{mp4} \delta J_N &\hspace{-1mm}=\hspace{-1mm}E\Big\{\mathcal{G}(N+1,N)\varepsilon\delta u_{N}\hspace{-1mm}+\hspace{-1mm}\sum_{l=0}^{N-1}[\mathcal{G}(l\hspace{-1mm}+\hspace{-1mm}1,N)]\varepsilon\delta u_{l}\Big\}\hspace{-1mm}+\hspace{-1mm}O(\varepsilon^{2}), \end{align} where \begin{align} &\mathcal{G}(N+1,N)=[\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]f_{u_{N}}^{N}\notag\\ &\hspace{-1mm}+\hspace{-1mm}E\Big\{[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]f_{Eu_{N}}^{N}\Big\}\hspace{-1mm}+\hspace{-1mm}[L_{u_{N}}^{N}\hspace{-1mm}+\hspace{-1mm}E(L_{Eu_{N}}^{N})],\label{mp04}\\ &\mathcal{G}(l+1,N)\notag\\ &=[\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]\tilde{F}_{x}(N,l+1)\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\notag\\ &+E\left\{[\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]\tilde{F}_{x}(N,l+1)\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l} \\ \end{array} \hspace{-2mm}\right]\right\}\notag\\ &+\sum_{k=l+1}^{N}[L_{x_{k}}^{k}+E(L_{Ex_{k}}^{k})]\tilde{F}_{x}(k-1,l+1)\left[\hspace{-2mm} \begin{array}{cc} f_{u_{l}}^{l}\\ g_{u_{l}}^{l}\\ \end{array} \hspace{-2mm}\right]\notag\\ &+E\Big\{\sum_{k=l+1}^{N}[L_{x_{k}}^{k}+E(L_{Ex_{k}}^{k})]\tilde{F}_{x}(k-1,l+1)\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{l}}^{l}\\ g_{Eu_{l}}^{l} \\ \end{array} \hspace{-2mm}\right]\Big\}\notag\\ &+[L_{u_{l}}^{l}+E(L_{Eu_{l}}^{l})].\label{mp004} \end{align} Furthermore, \eqref{mp4} can be rewritten as \begin{align}\label{mp9} \delta J_{N} &=E\Big\{ E \left[{\mathcal G}(N+1,N)\mid {\mathcal F}_{N-1}\right]\varepsilon\delta u_{N}\Big\}\notag\\ &+E\Big\{\sum_{l=0}^{N-1} E \left[{\mathcal G}(l+1, N) \mid {\mathcal F}_{l-1}\right]\varepsilon\delta u_l\Big\}+O(\varepsilon^{2})\notag\\ &+E\left\lbrace \left\lbrace {\mathcal G}(N\hspace{-1mm}+\hspace{-1mm}1,N)-E\left[{\mathcal G}(N\hspace{-1mm}+\hspace{-1mm}1,N)\mid {\mathcal F}_{N-1}\right]\right\rbrace \varepsilon\delta u_{N} \right\rbrace \notag\\ &+E\Big\{\sum_{l=0}^{N-1}\left\lbrace {\mathcal G}(l\hspace{-1mm}+\hspace{-1mm}1, N)\hspace{-1mm}-\hspace{-1mm}E \left[{\mathcal G}(l\hspace{-1mm}+\hspace{-1mm}1, N) \mid {\mathcal F}_{l-1}\right] \right\rbrace \varepsilon\delta u_l\Big\}\notag\\ &=E\Big\{ E \left[{\mathcal G}(N+1,N)\mid {\mathcal F}_{N-1}\right]\varepsilon\delta u_{N}\notag\\ &+\sum_{l=0}^{N-1} E \left[{\mathcal G}(l+1, N) \mid {\mathcal F}_{l-1}\right]\varepsilon\delta u_l\Big\}\hspace{-1mm}+\hspace{-1mm}O(\varepsilon^{2}), \end{align} where the following facts are applied in the last equality, \begin{equation*}\begin{split} E\left\lbrace \left\lbrace {\mathcal G}(N+1,N)\hspace{-1mm}-\hspace{-1mm}E\left[{\mathcal G}(N+1,N)\mid {\mathcal F}_{N-1}\right]\right\rbrace \varepsilon\delta u_{N} \right\rbrace &=0, \notag\\ E\Big\{\sum_{l=0}^{N-1}\left\lbrace {\mathcal G}(l\hspace{-1mm}+\hspace{-1mm}1, N)\hspace{-1mm}-\hspace{-1mm}E \left[{\mathcal G}(l\hspace{-1mm}+\hspace{-1mm}1, N) \mid {\mathcal F}_{l-1}\right] \right\rbrace \hspace{-1mm}\varepsilon\delta u_l\hspace{-1mm}\Big\}&=0. \end{split}\end{equation*} Since $\delta u_{l}$ is arbitrary for $0\leq l\leq N$, thus the necessary condition for the minimum can be given from (\ref{mp9}) as \begin{align} 0&=E \left\lbrace {\mathcal G}(N+1,N)\mid {\mathcal F}_{N-1}\right\rbrace,\label{mp11}\\ 0&=E\left\lbrace {\mathcal G}(l+1, N) \mid {\mathcal F}_{l-1} \right\rbrace,~l=0,\cdots,N-1.\label{mp011} \end{align} Now we will show that the equation \eqref{ps43}-\eqref{ps42} is a restatement of the necessary conditions \eqref{mp11}-\eqref{mp011}. In fact, substituting \eqref{ps42} into \eqref{ps43} and letting $k=N$, we have \begin{align}\label{yz1} &E\Big\lbrace (L^{N}_{u_{N}})'\hspace{-1mm}+\hspace{-1mm}E(L_{Eu_{N}}^{N})'\hspace{-1mm}+\hspace{-1mm}(f^{N}_{u_{N}})' [\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]'\notag\\ &+E\big\{(f_{Eu_{N}}^{N})'[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]'\big\}\Big| {\mathcal F}_{N-1}\Big\rbrace \hspace{-1mm}=\hspace{-1mm}0, \end{align} which means that \eqref{yz1} is exactly \eqref{mp11}. Furthermore, noting \eqref{ps41}, we have that \begin{align}\label{mp14} &\lambda_{k-1}\hspace{-1mm}=\hspace{-1mm}E\Big\{\left[\hspace{-2mm} \begin{array}{cc} I_{n}\\ 0 \\ \end{array} \hspace{-2mm}\right][(L^k_{x_k})'\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k})]'\hspace{-1mm}+\hspace{-1mm}[\tilde{f}^k_{x_k}]'\lambda_{k}\Big|\mathcal{F}_{k-1}\Big\}\notag\\ &=E\Big\{\hspace{-1.5mm}\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right]\hspace{-1.5mm}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k})]'\hspace{-1mm}+\hspace{-1mm}(\tilde{f}^k_{x_k})'\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right][L^{k+1}_{x_k+1}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k+1})]'\notag\\ &~~~+(\tilde{f}^k_{x_k})'(\tilde{f}^k_{x_k})'\lambda_{k+1}\Big|\mathcal{F}_{k-1}\Big\}\notag\\ &=E\Big\{\hspace{-1.5mm}\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right]\hspace{-1.5mm}[L^k_{x_k}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k})]'\hspace{-1mm}+\hspace{-1mm}(\tilde{f}^k_{x_k})'\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right][L^k_{x_k+1}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{k}}^{k+1})]'\notag\\ &~~+(\tilde{f}^k_{x_k})'(\tilde{f}^{k+1}_{x_k+1})'\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right][L^{k+2}_{x_k+2}+E(L_{Ex_{k+2}}^{k+2})]'+\cdots\notag\\ &~~+(\tilde{f}^k_{x_k})'(\tilde{f}^{k+1}_{x_k+1})'\cdots(\tilde{f}^{N-1}_{x_N-1})'\left[\hspace{-2mm} \begin{array}{cc} I_{n} \\ 0 \\ \end{array} \hspace{-2mm}\right][L^{N}_{x_N}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{N}}^{N})]'\notag\\ &~~+(\tilde{f}^k_{x_k})'(\tilde{f}^{k+1}_{x_k+1})'\cdots(\tilde{f}^{N}_{x_N})'\lambda_{N}\Big|\mathcal{F}_{k-1}\Big\}\notag\\ &=E\Big\{\sum_{j=k}^{N}\tilde{F}_{x}'(j-1,k)[L_{x_{j}}^{j}+E(L_{Ex_{j}}^{j})]\notag\\ &~~~+\tilde{F}_{x}'(N,k)[\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]'\Big|\mathcal{F}_{k-1}\Big\}. \end{align} Substituting \eqref{mp14} into \eqref{ps43}, one has \begin{align}\label{mp15} 0&\hspace{-1mm}=\hspace{-1mm}E\Bigg\{ [L^k_{u_k}+E(L_{Eu_{k}}^{k})]'\hspace{-1mm}\notag\\ &+\hspace{-1mm} \sum_{j=k+1}^{N} \left[\hspace{-2mm} \begin{array}{cc} f_{u_{k}}^{k} \\ g_{u_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]' \left\{\tilde{F}'_{x}(j-1,k+1)[L_{x_{j}}^{j}+E(L_{Ex_{j}}^{j})]'\right\}\notag\\ &+\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{u_{k}}^{k} \\ g_{u_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]' \left\{\tilde{F}_{x}'(N,k+1)[\phi_{x_{N+1}}+E(\phi_{Ex_{N+1}})]'\right\}\notag\\ &\hspace{-1mm}+\hspace{-1mm}E\hspace{-1mm}\bigg\{\hspace{-1mm}\sum_{j=k+1}^{N}\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{k}}^{k} \\ g_{Eu_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]'\hspace{-1mm}\left\{\tilde{F}'_{x}(j\hspace{-1mm}-\hspace{-1mm}1,k\hspace{-1mm}+\hspace{-1mm}1)[L_{x_{j}}^{j}\hspace{-1mm}+\hspace{-1mm}E(L_{Ex_{j}}^{j}]'\right\} \bigg\}\notag\\ &\hspace{-1mm}+\hspace{-1mm}E\bigg\{\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} f_{Eu_{k}}^{k} \\ g_{Eu_{k}}^{k} \\ \end{array} \hspace{-2mm}\right]'\hspace{-1mm}\left\{\tilde{F}'_{x}(N,k\hspace{-1mm}+\hspace{-1mm}1)[\phi_{x_{N+1}}\hspace{-1mm}+\hspace{-1mm}E(\phi_{Ex_{N+1}})]'\hspace{-1mm}\right\}\hspace{-1mm} \bigg\}\hspace{-1mm}\Bigg|\mathcal{F}_{k-1} \hspace{-1mm}\Bigg\},\notag\\ & ~~~k=0,\cdots, N, \end{align} which is \eqref{mp011}. It has been proved that \eqref{ps43}-\eqref{ps42} are exactly the necessary conditions for the minimum of $J_{N}$. The proof is complete. \end{proof} \section{Proof of Theorem \ref{main}} \begin{proof} ``Necessity": Under Assumption \ref{ass1}, if \emph{Problem 1} has a unique solution, we will show by induction that $\Upsilon_{k}^{(1)},~\Upsilon_{k}^{(2)}$ are all strictly positive definite and the optimal controller is given by \eqref{th43}. Firstly, we denote $J(k)$ as below \begin{align} J(k)&\triangleq \sum_{j=k}^{N}E\Big[x_{j}'Q_{j}x_{j}+(Ex_{j})'\bar{Q}_{j}Ex_{j}\notag\\ &+u_{j}'R_{j}u_{j}+(Eu_{j})'\bar{R}_{j}Eu_{j}\Big]\notag\\ &+E[x_{N+1}'P_{N+1}x_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}] \hspace{-1mm}+\hspace{-1mm}(Ex_{N\hspace{-0.5mm}+\hspace{-0.5mm}1})'\bar{P}_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}Ex_{N\hspace{-0.5mm}+\hspace{-0.5mm}1}.\label{barjk} \end{align} For $k=N$, equation \eqref{barjk} becomes \begin{align} & J(N)=E\Big[x_{N}'Q_{N}x_{N}+(Ex_{N})'\bar{Q}_{N}Ex_{N}\notag\\ &~~+u_{N}'R_{N}u_{N}+(Eu_{N})'\bar{R}_{N}Eu_{N}\Big]\notag\\ &~~+E[x_{N+1}'P_{N+1}x_{N+1}]\hspace{-1mm}+\hspace{-1mm}(Ex_{N+1})'\bar{P}_{N+1}Ex_{N+1}.\label{barjn} \end{align} Using system dynamics \eqref{ps1}, $J(N)$ can be calculated as a quadratic form of $x_{N}$, $Ex_{N}$, $u_{N}$ and $Eu_{N}$. By Assumption \ref{ass1}, we know that the minimum of \eqref{barjn} must satisfy $J^{*}(N)\geq 0$. Let $x_{N}=0$, since it is assumed \emph{Problem \ref{prob1}} admits a unique solution, thus it is clear that $u_{N}=0$ is the optimal controller and optimal cost function is $J^{*}(N)=0$. Hence, $J(N)$ must be strictly positive for any nonzero $u_{N}$, i.e., for $u_{N}\neq 0$, we can obtain \begin{align} J(N)&=E[(u_{N}\hspace{-1mm}-\hspace{-1mm}Eu_{N})'\Upsilon_{N}^{(1)}(u_{N}\hspace{-1mm}-\hspace{-1mm}Eu_{N})]\hspace{-1mm} +\hspace{-1mm}Eu_{N}'\Upsilon_{N}^{(2)}Eu_{N}\notag\\ &>0.\label{barjn1} \end{align} Following Lemma \ref{lemma01}, clearly we have $\Upsilon_{N}^{(1)}>0$ and $\Upsilon_{N}^{(2)}>0$ from \eqref{barjn1}. In fact, in the case $Eu_{N}=0$ and $u_{N}\neq 0$, equation \eqref{barjn1} becomes \begin{equation*} J(N)=E[u_{N}'\Upsilon_{N}^{(1)}u_{N}]>0. \end{equation*} Thus $\Upsilon_{N}^{(1)}>0$ can be obtained by using Lemma \ref{lemma01} and Remark \ref{rem1}. On the other hand, if $u_{N}=Eu_{N}\neq 0$, i.e., $u_{N}$ is deterministic controller, then \eqref{barjn1} can be reduced to \begin{equation*} J(N)=u_{N}'\Upsilon_{N}^{(2)}u_{N}>0. \end{equation*} Similarly, it holds from Lemma \ref{lemma01} and Remark \ref{rem1} that $\Upsilon_{N}^{(2)}>0$. Further the optimal controller $u_{N}$ is to be calculated as follows. Using \eqref{ps1} and \eqref{th31}, from \eqref{th33} with $k$ replaced by $N$, we have that \begin{align}\label{nc2} &0\hspace{-1mm}=\hspace{-1mm}E\Big\{ R_{N}u_{N}\hspace{-1mm}+\hspace{-1mm}\bar{R}_{N}Eu_{N}\hspace{-1mm}+\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} B_{N}+w_{N}D_{N} \\ 0 \\ \end{array} \hspace{-1mm}\right]'\lambda_{N}\hspace{-1mm}\notag\\ &~~~~~~~~~+\hspace{-1mm}E\left[\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} \bar{B}_{N}+w_{N}\bar{D}_{N} \\ B_{N}+\bar{B}_{N} \\ \end{array} \hspace{-1mm}\right]'\lambda_{N}\right]\hspace{-1mm}\Big| {\mathcal F}_{N-1}\hspace{-1mm}\Big\}\notag\\ &=E\Big\{R_{N}u_{N}+\bar{R}_{N}Eu_{N}\notag\\ &~~~+(B_{N}+w_{N}D_{N})'(P_{N+1}x_{N+1}+\bar{P}_{N+1}^{(1)}Ex_{N+1})\notag\\ &~~~+E[(\bar{B}_{N}\hspace{-1mm}+\hspace{-1mm}w_{N}\bar{D}_{N})'(P_{N+1}x_{N+1}\hspace{-1mm}+\hspace{-1mm}\bar{P}_{N+1}^{(1)}Ex_{N+1})]\notag\\ &~~~+E[(B_{N}\hspace{-1mm}+\hspace{-1mm}\bar{B}_{N})'(\bar{P}_{N+1}^{(2)}x_{N+1}\hspace{-1mm}+\hspace{-1mm}\bar{P}_{N+1}^{(3)}Ex_{N+1})]\Big|\mathcal{F}_{N-1}\Big\}\notag\\ &=(R_{N}+B_{N}'P_{N+1}B_{N}+\sigma^{2}D_{N}'P_{N+1}D_{N})u_{N}\notag\\ &~~~+\Big[\bar{R}_{N}+B_{N}'P_{N+1}\bar{B}_{N}+\sigma^{2}D_{N}'P_{N+1}\bar{D}_{N}\notag\\ &~~~+B_{N}'\bar{P}_{N+1}^{(1)}(B_{N}+\bar{B}_{N})+\bar{B}_{N}'\bar{P}_{N+1}^{(1)}(B_{N}\hspace{-1mm}+\hspace{-1mm}\bar{B}_{N})\notag\\ &~~~+\bar{B}_{N}'P_{N+1}B_{N}+\sigma^{2}\bar{D}_{N}'P_{N+1}D_{N}\notag\\ &~~~+\bar{B}_{N}'P_{N+1}\bar{B}_{N}+\sigma^{2}\bar{D}_{N}'P_{N+1}\bar{D}_{N}\notag\\ &~~~+(B_{N}+\bar{B}_{N})'(\bar{P}_{N+1}^{(2)}+\bar{P}_{N+1}^{(3)})(B_{N}+\bar{B}_{N})\Big]Eu_{N}\notag\\ &~~~+(B_{N}'P_{N+1}A_{N}+\sigma^{2}D_{N}'P_{N+1}C_{N})x_{N}\notag\\ &~~~+\Big[B_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}D_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+B_{N}'\bar{P}_{N+1}^{(1)}(A_{N}+\bar{A}_{N})+\bar{B}_{N}'\bar{P}_{N+1}^{(1)}(A_{N}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{N})\notag\\ &~~~+\bar{B}_{N}'P_{N+1}A_{N}+\sigma^{2}\bar{D}_{N}'P_{N+1}C_{N}\notag\\ &~~~+\bar{B}_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}\bar{D}_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+(B_{N}+\bar{B}_{N})'(\bar{P}_{N+1}^{(2)}\hspace{-1mm}+\hspace{-1mm}\bar{P}_{N+1}^{(3)})(A_{N}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{N})\Big]Ex_{N}. \end{align} Note that $\bar{P}_{N+1}^{(1)}+\bar{P}_{N+1}^{(2)}+\bar{P}_{N+1}^{(3)}=\bar{P}_{N+1}$, it follows from \eqref{nc2} that \begin{align} 0&=\Upsilon_{N}^{(1)}u_{N}+[\Upsilon_{N}^{(2)}-\Upsilon_{N}^{(1)}]Eu_{N}\notag\\ &~~+M_{N}^{(1)}x_{N}+[M_{N}^{(2)}-M_{N}^{(1)}]Ex_{N},\label{nc3} \end{align} where $\Upsilon_{N}^{(1)}, \Upsilon_{N}^{(2)}, M_{N}^{(1)},M_{N}^{(2)}$ are given by \eqref{upsi1}-\eqref{h2} for $k=N$. Therefore, taking expectations on both sides of \eqref{nc3}, we have \begin{equation}\label{nc03} \Upsilon_{N}^{(2)}Eu_{N}+M_{N}^{(2)}Ex_{N}=0. \end{equation} Since $\Upsilon_{N}^{(1)}$, and $\Upsilon_{N}^{(2)}$ has been proved to be strictly positive, thus $Eu_{N}$ can be presented as \begin{equation}\label{nc003} Eu_{N}=-[\Upsilon_{N}^{(2)}]^{-1} M_{N}^{(2)}Ex_{N}. \end{equation} By plugging \eqref{nc003} into \eqref{nc3}, the optimal controller $u_{N}$ given by \eqref{th43} with $k=N$ can be verified. Next we will show $\lambda_{N-1}$ has the form of \eqref{th4} associated with \eqref{th41}-\eqref{th42} for $k=N$. Notice \eqref{th31} and \eqref{th32}, we have that \begin{align}\label{nc8} \lambda_{N-1}&=E\Big\{\left[\hspace{-1mm} \begin{array}{cc} Q_{N}x_{N}+\bar{Q}_{N}Ex_{N}\\ 0 \\ \end{array} \hspace{-1mm}\right]\hspace{-1mm}\notag\\ &~+\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} A_{N}+w_{N}C_{N}&\bar{A}_{N}+w_{N}\bar{C}_{N}\\ 0 & A_{N}+\bar{A}_{N} \\ \end{array} \hspace{-1mm}\right]'\lambda_{N}\Big|\mathcal{F}_{N-1}\Big\}\notag\\ &=\hspace{-1mm}E\Big\{\left[\hspace{-1mm} \begin{array}{cc} Q_{N}x_{N}+\bar{Q}_{N}Ex_{N}\\ 0 \\ \end{array} \hspace{-1mm}\right]\hspace{-1mm}\notag\\ &~+\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} A_{N}\hspace{-1mm}+\hspace{-1mm}w_{N}C_{N}& \bar{A}_{N}\hspace{-1mm}+\hspace{-1mm}w_{N}\bar{C}_{N}\\ 0 & A_{N}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{N} \\ \end{array} \hspace{-1mm}\right]'\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} P_{N+1}& \bar{P}_{N+1}^{(1)}\\ \bar{P}_{N+1}^{(2)}& \bar{P}_{N+1}^{(3)} \\ \end{array} \hspace{-1mm}\right]\notag\\ &~~~\times\left[ \begin{array}{cc} x_{N+1}\\ Ex_{N+1} \\ \end{array} \right]\hspace{-1mm}\Big|\mathcal{F}_{N-1}\Big\}. \end{align} By using the optimal controller \eqref{th43} and the system dynamics \eqref{ps1}, each element of $\lambda_{N-1}$ can be calculated as follows, \begin{align} &E[(A_{N}+w_{N}C_{N})'P_{N+1}x_{N+1}|\mathcal{F}_{N-1}]\notag\\ &=\Big(A_{N}'P_{N+1}A_{N}+\sigma^{2}C_{N}'P_{N+1}C_{N}\notag\\ &~~~+A_{N}'P_{N+1}B_{N}K_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N}K_{N}\Big)x_{N}\notag\\ &~~~+\Big[A_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}C_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+A_{N}'P_{N+1}B_{N}\bar{K}_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N}\bar{K}_{N}\notag\\ &~~~+A_{N}'P_{N+1}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+\sigma^{2}C_{N}'P_{N+1}\bar{D}_{N}(K_{N}+\bar{K}_{N})\Big ]Ex_{N}, \label{nc9}\\ &E[(A_{N}+w_{N}C_{N})'\bar{P}_{N+1}^{(1)}Ex_{N+1}|\mathcal{F}_{N-1}]\notag\\ &=\Big\{A_{N}'\bar{P}_{N+1}^{(1)}(A_{N}+\bar{A}_{N})\notag\\ &~~~+A_{N}'\bar{P}_{N+1}^{(1)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N})\Big\}Ex_{N},\label{nc90}\\ &E\Big\{\big[(\bar{A}_{N}\hspace{-1mm}+\hspace{-1mm}w_{N}\bar{C}_{N})'P_{N+1}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}\big]x_{N+1}\Big|\mathcal{F}_{N-1}\Big\}\notag\\ &=\Big\{\bar{A}_{N}'P_{N+1}A_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}C_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}B_{N}K_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}D_{N}K_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}A_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}B_{N}K_{N}\Big\}x_{N}\notag\\ &~~~+\Big\{\bar{A}_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}B_{N}\bar{K}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}D_{N}\bar{K}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{D}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}B_{N}\bar{K}_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}\bar{A}_{N}\Big\}Ex_{N},\label{nc10} \end{align} and \begin{align}\label{nc101} &E\{[(\bar{A}_{N}\hspace{-1mm}+\hspace{-1mm}w_{N}\bar{C}_{N})'\bar{P}_{N+1}^{(1)}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(3)}]Ex_{N+1}|\mathcal{F}_{N-1}\}\notag\\ &=\Big\{\bar{A}_{N}'\bar{P}_{N+1}^{(1)}(A_{N}+\bar{A}_{N})\notag\\ &~~~+\bar{A}_{N}'\bar{P}_{N+1}^{(1)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(3)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(3)}(A_{N}+\bar{A}_{N})\Big\}Ex_{N}. \end{align} By plugging \eqref{nc9}-\eqref{nc101} into \eqref{nc8}, we know that $\lambda_{N-1}$ is given as, \begin{equation}\label{nc08} \lambda_{N-1}=\left[ \begin{array}{cc} P_{N}&\bar{P}_{N}^{(1)}\\ \bar{P}_{N}^{(2)} &\bar{P}_{N}^{(3)} \\ \end{array} \right]\left[ \begin{array}{cc} x_{N}\\ Ex_{N} \\ \end{array} \right], \end{equation} where $\bar{P}_{N}^{(1)}$, $\bar{P}_{N}^{(2)}$, $\bar{P}_{N}^{(3)}$ are respectively calculated in the following, \begin{align} \bar{P}_{N}^{(1)}&=\bar{Q}_{N}+A_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}C_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+A_{N}'P_{N+1}B_{N}\bar{K}_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N}\bar{K}_{N}\notag\\ &~~~+A_{N}'P_{N+1}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+\sigma^{2}C_{N}'P_{N+1}\bar{D}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+A_{N}'\bar{P}_{N+1}^{(1)}(A_{N}+\bar{A}_{N}) \notag\\ &~~~+A_{N}'\bar{P}_{N+1}^{(1)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N}),\label{pk1}\\ \bar{P}_{N}^{(2)}&=\bar{A}_{N}'P_{N+1}A_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}C_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}B_{N}K_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}D_{N}K_{N}\notag\\ &~~~+(A_{N}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{N})'\bar{P}_{N+1}^{(2)}A_{N}\hspace{-1mm}+\hspace{-1mm} (A_{N}\hspace{-1mm}+\hspace{-1mm}\bar{A}_{N})'\bar{P}_{N+1}^{(2)}B_{N}K_{N},\label{pk2}\\ \bar{P}_{N}^{(3)}&=\bar{A}_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}B_{N}\bar{K}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}D_{N}\bar{K}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{D}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}\bar{A}_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}B_{N}\bar{K}_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(2)}\bar{B}_{N}(K_{N}+\bar{K}_{N})\notag\\ &~~~+\bar{A}_{N}'\bar{P}_{N+1}^{(1)}(A_{N}+\bar{A}_{N})\notag\\ &~~~+\bar{A}_{N}'\bar{P}_{N+1}^{(1)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(3)}(B_{N}+\bar{B}_{N})(K_{N}+\bar{K}_{N})\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}^{(3)}(A_{N}+\bar{A}_{N}),\label{pk3} \end{align} with $\bar{P}_{N+1}^{(1)}=\bar{P}_{N+1}$, $\bar{P}_{N+1}^{(2)}=\bar{P}_{N+1}^{(3)}=0$. Similarly, $P_N$ is given as \begin{align}\label{pn} P_{N}&=Q_{N}+A_{N}'P_{N+1}A_{N}+\sigma^{2}C_{N}'P_{N+1}C_{N}\notag\\ &~~~+A_{N}'P_{N+1}B_{N}K_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N}K_{N}\notag\\ &=Q_{N}+A_{N}'P_{N+1}A_{N}+\sigma^{2}C_{N}'P_{N+1}C_{N}\notag\\ &~~~-(A_{N}'P_{N+1}B_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N})[\Upsilon_{N}^{(1)}]^{-1}M_{N}^{(1)}\notag\\ &=Q_{N}+A_{N}'P_{N+1}A_{N}+\sigma^{2}C_{N}'P_{N+1}C_{N}\notag\\ &~~~-[M_{N}^{(1)}]'[\Upsilon_{N}^{(1)}]^{-1}M_{N}^{(1)}, \end{align} which is exactly \eqref{th41} for $k=N$. Now we show $\bar{P}_{N}=\bar{P}_{N}^{(1)}+\bar{P}_{N}^{(2)}+\bar{P}_{N}^{(3)}$ obeys \eqref{th42}. In fact, it holds from \eqref{pk1}-\eqref{pn} that \begin{align}\label{barpn} &\bar{P}_{N}=\bar{P}_{N}^{(1)}+\bar{P}_{N}^{(2)}+\bar{P}_{N}^{(3)}\notag\\ &=\bar{Q}_{N}+A_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}C_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}A_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}C_{N}\notag\\ &~~~+\Big[A_{N}'P_{N+1}B_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}B_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}D_{N}\notag\\ &~~~+A_{N}'P_{N+1}\bar{B}_{N}+\sigma^{2}C_{N}'P_{N+1}\bar{D}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}\bar{B}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{D}_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}(A_{N}+\bar{A}_{N})\Big](K_{N}+\bar{K}_{N})\notag\\ &~~~-(A_{N}'P_{N+1}B_{N}+\sigma^{2}C_{N}'P_{N+1}D_{N})K_{N}\notag\\ &=\bar{Q}_{N}+A_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}C_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}\bar{A}_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}\bar{C}_{N}\notag\\ &~~~+\bar{A}_{N}'P_{N+1}A_{N}+\sigma^{2}\bar{C}_{N}'P_{N+1}C_{N}\notag\\ &~~~+(A_{N}+\bar{A}_{N})'\bar{P}_{N+1}(A_{N}+\bar{A}_{N})\notag\\ &~~~+[M_{N}^{(1)}]'[\Upsilon_{N}^{(1)}]^{-1}M_{N}^{(1)}-[M_{N}^{(2)}]'[\Upsilon_{N}^{(2)}]^{-1}M_{N}^{(2)}. \end{align} where $\bar{P}_{N+1}^{(1)}+\bar{P}_{N+1}^{(2)}+\bar{P}_{N+1}^{(3)}=\bar{P}_{N+1}$ has been inserted to the second equality of \eqref{barpn}. Thus, \eqref{th4} associated with \eqref{th41}-\eqref{th42} have been verified for $k=N$. Therefore we have shown the necessity for $k=N$ in the above. To complete the induction, take $0\leq l\leq N$, for any $k\geq l+1$, we assume that: \begin{itemize} \item $\Upsilon_{k}^{(1)}$ and $\Upsilon_{k}^{(2)}$ in \eqref{upsi1} and \eqref{upsi2} are all strictly positive; \item The costate $\lambda_{k-1}$ is given by \eqref{th4}, $P_{k}$ satisfies \eqref{th41} and $\bar{P}_{k}^{(1)}$, $\bar{P}_{k}^{(2)}$, $\bar{P}_{k}^{(3)}$ satisfy \eqref{pk1}-\eqref{pk3} with $N$ replaced by $k$, respectively. Furthermore, $\bar{P}_{k}^{(1)}+\bar{P}_{k}^{(2)}+\bar{P}_{k}^{(3)}=\bar{P}_{k}$ and $\bar{P}_{k}$ obeys \eqref{th42}; \item The optimal controller $u_{k}$ is as in \eqref{th43}. \end{itemize} We will show the above statements are also true for $k=l$. Firstly, we show $\Upsilon_{l}^{(1)}$ and $\Upsilon_{l}^{(2)}$ are positive definite if {\em Problem 1} has a unique solution. By applying the maximum principle \eqref{th33}-\eqref{th32} and \eqref{ps1}, we can obtain \begin{align*} &~~E\Big\{\left[\hspace{-1mm} \begin{array}{cc} x_{k}\\ Ex_{k}\\ \end{array} \hspace{-1mm}\right]'\lambda_{k-1}-\left[\hspace{-1mm} \begin{array}{cc} x_{k+1}\\ Ex_{k+1}\\ \end{array} \hspace{-1mm}\right]'\lambda_{k}\Big\}\\ &\hspace{-1mm}=\hspace{-1mm} E\Big\{\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'E\Big\{\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} A_{k}+w_{k}C_{k} & \bar{A}_{k}+w_{k}\bar{C}_{k} \\ 0 & A_{k}+\bar{A}_{k} \\ \end{array} \hspace{-1mm}\right]'\hspace{-1mm}\lambda_{k}\Big|\mathcal{F}_{k-1}\Big\}\\ &+\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[\hspace{-1mm} \begin{array}{cc} Q_{k}x_{k}+\bar{Q}_{k}Ex_{k} \\ 0 \\ \end{array} \hspace{-1mm}\right]\\ &-\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[\hspace{-1mm} \begin{array}{cc} A_{k}+w_{k}C_{k} & \bar{A}_{k}+w_{k}\bar{C}_{k} \\ 0 & A_{k}+\bar{A}_{k} \\ \end{array} \hspace{-1mm}\right]'\lambda_{k}\\ &-\left[\hspace{-1mm} \begin{array}{cc} u_{k} \\ Eu_{k} \\ \end{array} \hspace{-1mm}\right]'\left[\hspace{-1mm} \begin{array}{cc} B_{k}+w_{k}D_{k} & \bar{B}_{k}+w_{k}\bar{D}_{k} \\ 0 & B_{k}+\bar{B}_{k} \\ \end{array} \hspace{-1mm}\right]'\lambda_{k} \hspace{-1mm}\Big\}\\ &\hspace{-1mm}=\hspace{-1mm} E\Big\{\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'E\Big\{\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} A_{k}+w_{k}C_{k} & \bar{A}_{k}+w_{k}\bar{C}_{k} \\ 0 & A_{k}+\bar{A}_{k} \\ \end{array} \hspace{-1mm}\right]'\hspace{-1mm}\lambda_{k}\Big|\mathcal{F}_{k-1}\Big\}\\ &+\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[ \begin{array}{cc} Q_{k}x_{k}+\bar{Q}_{k}Ex_{k} \\ 0 \\ \end{array} \hspace{-1mm}\right]\\ &-\left[ \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[\hspace{-1mm} \begin{array}{cc} A_{k}+w_{k}C_{k} & \bar{A}_{k}+w_{k}\bar{C}_{k} \\ 0 & A_{k}+\bar{A}_{k} \\ \end{array} \hspace{-1mm}\right]'\lambda_{k}\\ &\hspace{-1mm}-\hspace{-1mm}u_{k}'\left[ \begin{array}{cc} B_{k}+w_{k}D_{k} \\ 0 \\ \end{array} \hspace{-1mm}\right]'\lambda_{k}\hspace{-1mm}-\hspace{-1mm}u_{k}'E\Big\{\left[\hspace{-1mm} \begin{array}{cc} \bar{B}_{k}+w_{k}\bar{D}_{k}\\ B_{k}+\bar{B}_{k} \\ \end{array} \hspace{-1mm}\right]'\lambda_{k}\Big\}\Big\}\\ &\hspace{-1mm}=\hspace{-1mm}E\hspace{-1mm}\Big\{\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} Q_{k}x_{k}\hspace{-1mm}+\hspace{-1mm}\bar{Q}_{k}Ex_{k} \\ 0 \\ \end{array} \hspace{-1mm}\right] \hspace{-1mm}\Big\}\hspace{-1mm}+\hspace{-1mm}E(u_{k}'R_{k}u_{k}\hspace{-1mm}+\hspace{-1mm}Eu_{k}'\bar{R}_{k}Eu_{k})\\ &=E(x_{k}'Q_{k}x_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}_{k}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'R_{k}u_{k}\hspace{-1mm}+\hspace{-1mm}Eu_{k}'\bar{R}_{k}Eu_{k}). \end{align*} Adding from $k=l+1$ to $k=N$ on both sides of the above equation, we have \begin{align*} &E\Big\{\hspace{-1mm} \left[\hspace{-1mm} \begin{array}{cc} x_{l+1} \\ Ex_{l+1} \\ \end{array} \hspace{-1mm}\right]'\hspace{-1mm}\lambda_{l}\hspace{-1mm} -\hspace{-1mm}x_{N+1}'P_{N+1}x_{N+1}\hspace{-1mm}-\hspace{-1mm}Ex_{N+1}'P_{N+1}Ex_{N+1}\Big\}\\ &\hspace{-1mm}=\hspace{-1mm}\sum_{k=l+1}^{N}\hspace{-1mm}E(x_{k}'Q_{k}x_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}_{k}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'R_{k}u_{k}\hspace{-1mm}+\hspace{-1mm}Eu_{k}'\bar{R}_{k}Eu_{k}). \end{align*} Thus, it follows from \eqref{barjk} that \begin{align}\label{jnnn} & J(l)= E(x_{l}'Q_{l}x_{l}+Ex_{l}'\bar{Q}_{l}Ex_{l}+u_{l}'R_{l}u_{l}+Eu_{l}'\bar{R}_{l}Eu_{l})\notag\\ &+\sum_{k=l+1}^{N}\hspace{-1mm}E(x_{k}'Q_{k}x_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}_{k}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'R_{k}u_{k}\hspace{-1mm}+\hspace{-1mm}Eu_{k}'\bar{R}_{k}Eu_{k})\notag\\ &+E(x_{N+1}'P_{N+1}x_{N+1}\hspace{-1mm}+\hspace{-1mm}Ex_{N+1}'P_{N+1}Ex_{N+1})\notag\\ &=E\Big\{x_{l}'Q_{l}x_{l}+Ex_{l}'\bar{Q}_{l}Ex_{l}+u_{l}'R_{l}u_{l}+Eu_{l}'\bar{R}_{l}Eu_{l}\notag\\ &+\left[\hspace{-1mm} \begin{array}{cc} x_{l+1} \\ Ex_{l+1} \\ \end{array} \hspace{-1mm}\right]'\lambda_{l}\Big\}, \end{align} Note that \eqref{th4} is assumed to be true for $k=l+1$, i.e., \begin{align}\label{ldan} \lambda_{l}&=\left[ \begin{array}{cc} P_{l+1} & \bar{P}_{l+1}^{(1)} \\ \bar{P}_{l+1}^{(2)} & \bar{P}_{l+1}^{(3)} \\ \end{array} \right]\left[ \begin{array}{cc} x_{l+1} \\ Ex_{l+1} \\ \end{array} \right], \end{align} where $P_{l+1}$ follows the iteration \eqref{th41} and $\bar{P}_{l+1}^{(1)}$, $\bar{P}_{l+1}^{(2)}$, $\bar{P}_{l+1}^{(3)}$ is calculated as \eqref{pk1}-\eqref{pk3} with $N$ replaced by $l+1$, respectively, and $\bar{P}_{l+1}^{(1)}+\bar{P}_{l+1}^{(2)}+\bar{P}_{l+1}^{(3)}=\bar{P}_{l+1}$, where $\bar{P}_{l+1}$ is given as \eqref{th42}. By substituting \eqref{ldan} into \eqref{jnnn} and using the system dynamics \eqref{ps1}, $J(l)$ can be calculated as \begin{align}\label{jk0} &~~J(l)\notag\\ &=E(x_{l}'Q_{l}x_{l}+Ex_{l}'\bar{Q}_{l}Ex_{l}+u_{l}'R_{l}u_{l}+Eu_{l}'\bar{R}_{l}Eu_{l}\notag\\ &+x_{l+1}'P_{l+1}x_{l+1}+Ex_{l+1}'\bar{P}_{l+1}Ex_{l+1})\notag\\ &=E\Big\{x_{l}'\left(Q_{l}+A_{l}'P_{l+1}A_{l}+\sigma^{2}C_{l}'P_{l+1}C_{l}\right)x_{l}\notag\\ &+Ex_{l}'\Big[\bar{Q}_{l}+A_{l}'P_{l+1}\bar{A}_{l}+\sigma^{2}C_{l}'P_{l+1}\bar{C}_{l}+\bar{A}_{l}'P_{l+1}A_{l}\notag\\ &~~~~+\sigma^{2}\bar{C}_{l}'P_{l+1}C_{l}+\bar{A}_{l}'P_{l+1}\bar{A}_{l}+\sigma^{2}\bar{C}_{l}'P_{l+1}\bar{C}_{l}\notag\\ &~~~~+(A_{l}+\bar{A}_{l})'\bar{P}_{l+1}(A_{l}+\bar{A}_{l})\Big]Ex_{l}\notag\\ &+x_{l}'\left(A_{l}'P_{l+1}B_{l}+\sigma^{2}C_{l}'P_{l+1}D_{l}\right)u_{l}\notag\\ &+u_{l}'\left(B_{l}'P_{l+1}A_{l}+\sigma^{2}D_{l}'P_{l+1}C_{l}\right)x_{l}\notag\\ &+Ex_{l}'\Big[A_{l}'P_{l+1}\bar{B}_{l}+\sigma^{2}C_{l}'P_{l+1}\bar{D}_{l}+\bar{A}_{l}'P_{l+1}B_{l}\notag\\ &~~~~+\sigma^{2}\bar{C}_{l}'P_{l+1}D_{l}+\bar{A}_{l}'P_{l+1}\bar{B}_{l}+\sigma^{2}\bar{C}_{l}'P_{l+1}\bar{D}_{l}\notag\\ &~~~~+(A_{l}+\bar{A}_{l})'\bar{P}_{l+1}(B_{l}+\bar{B}_{l})\Big]Eu_{l}\notag\\ &+Eu_{l}'\Big[B_{l}'P_{l+1}\bar{A}_{l}+\sigma^{2}D_{l}'P_{l+1}\bar{C}_{l}+\bar{B}_{l}'P_{l+1}A_{l}\notag\\ &~~~~+\sigma^{2}\bar{D}_{l}'P_{l+1}C_{l}+\bar{B}_{l}'P_{l+1}\bar{A}_{l}+\sigma^{2}\bar{D}_{l}'P_{l+1}\bar{C}_{l}\notag\\ &~~~~+(B_{l}+\bar{B}_{l})'\bar{P}_{l+1}(A_{l}+\bar{A}_{l})\Big]Ex_{l}\notag\\ &+u_{l}'\left(R_{l}+B_{l}'P_{l+1}B_{l}+\sigma^{2}D_{l}'P_{l+1}D_{l}\right)u_{l}\notag\\ &+Eu_{l}'\Big[B_{l}'P_{l+1}\bar{B}_{l}+\sigma^{2}D_{l}'P_{l+1}\bar{D}_{l}+\bar{B}_{l}'P_{l+1}B_{l}\notag\\ &~~~~+\sigma^{2}\bar{D}_{l}'P_{l+1}D_{l}+\bar{B}_{l}'P_{l+1}\bar{B}_{l}+\sigma^{2}\bar{D}_{l}'P_{l+1}\bar{D}_{l}\notag\\ &~~~~+\bar{R}_{l}+(B_{l}+\bar{B}_{l})'\bar{P}_{l+1}(B_{l}+\bar{B}_{l})\Big]Eu_{l}\Big\}\notag\\ &=E(x_{l}'P_{l}x_{l}+Ex_{l}'\bar{P}_{l}Ex_{l})\notag\\ &+E\Big\{[u_{l}-Eu_{l}-K_{l}(x_{l}-Ex_{l})]'\Upsilon_{l}^{(1)}\notag\\ &~~~~~\times[u_{l}-Eu_{l}-K_{l}(x_{l}-Ex_{l})]\Big\}\notag\\ &+[Eu_{l}\hspace{-1mm}-\hspace{-1mm}(K_{l}\hspace{-1mm}+\hspace{-1mm}\bar{K}_{l})Ex_{l}]' \Upsilon_{l}^{(2)} [Eu_{l}\hspace{-1mm}-\hspace{-1mm}(K_{l}\hspace{-1mm}+\hspace{-1mm}\bar{K}_{l})Ex_{l}], \end{align} where $\Upsilon_{l}^{(1)}$ and $\Upsilon_{l}^{(2)}$ are respectively given by \eqref{upsi1} and \eqref{upsi2} for $k=l$. Equation \eqref{barjk} indicates that $x_{l}$ is the initial state in minimizing $J(l)$. Now we show $\Upsilon_{l}^{(1)}>0$ and $\Upsilon_{l}^{(2)}>0$. We choose $x_{l}=0$, then \eqref{jk0} becomes \begin{align}\label{zhang1} J(l)\hspace{-1mm}=\hspace{-1mm}E\left\{(u_{l}\hspace{-1mm}-\hspace{-1mm}Eu_{l})' \Upsilon_{l}^{(1)}(u_{l}\hspace{-1mm}-\hspace{-1mm}Eu_{l})\hspace{-1mm}+\hspace{-1mm}Eu_{l}'\Upsilon_{l}^{(2)}Eu_{l}\right\}. \end{align} It follows from Assumption \ref{ass1} that the minimum of $J(l)$ satisfies $J^{*}(l)\geq 0$. By \eqref{zhang1}, it is obvious that $u_{l}=0$ is the optimal controller and the associated optimal cost function $J^{*}(l)=0$. The uniqueness of the optimal control implies that for any $u_{l}\neq 0$, $J(l)$ must be strictly positive. Thus, following the discussion of \eqref{barjn1} for $J(N)$, we have $\Upsilon_{l}^{(1)}>0$ and $\Upsilon_{l}^{(2)}>0$. Since $\Upsilon_{l}^{(1)}>0$ and $\Upsilon_{l}^{(2)}>0$, the optimal controller can be given from \eqref{nc2}-\eqref{nc3} as \eqref{th43} for $k=l$, and the optimal cost function is given as \eqref{jnst} for $k=l$. Now we will show that \eqref{th4} associated with \eqref{th41}-\eqref{th42} are true for $k=l$. Since \eqref{th4} is assumed to be true for $k=l+1$, i.e., $\lambda_{l}$ is given by \eqref{ldan}. By substituting \eqref{ldan} into \eqref{th32} for $k=l$, and applying the same lines for \eqref{nc8}-\eqref{barpn}, it is easy to verify that \eqref{th4} is true with $P_{l}$ satisfying \eqref{th41} and $\bar{P}_{l}^{(1)}$, $\bar{P}_{l}^{(2)}$, $\bar{P}_{l}^{(3)}$ given as \eqref{pk1}-\eqref{pk3} with $N$ replaced by $l$, furthermore $\bar{P}_{l}^{(1)}+\bar{P}_{l}^{(2)}+\bar{P}_{l}^{(3)}=\bar{P}_{l}$, and $\bar{P}_{l}$ obeys \eqref{th42} for $k=l$. Therefore, the proof of necessity is complete by using induction method. ``Sufficiency": Under Assumption \ref{ass1}, suppose $\Upsilon_{k}^{(1)},$ and $\Upsilon_{k}^{(2)}$, $k=0,\cdots,N$ are strictly positive definite, we will show that {\em Problem 1} is uniquely solvable. $V_{N}(k,x_{k})$ is denoted as \begin{equation}\label{vnn}\begin{split} V_{N}(k,x_{k})\triangleq E(x_{k}'P_{k}x_{k})+Ex_{k}'\bar{P}_{k}Ex_{k}, \end{split}\end{equation} where $P_k$ and $\bar{P}_k$ satisfy \eqref{th41} and \eqref{th42} respectively. It follows that \begin{align}\label{vn} &~~V_{N}(k,x_{k})-V_{N}(k+1,x_{k+1})\notag\\ &=E\Big\{x_{k}'P_{k}x_{k}+Ex_{k}'\bar{P}_{k}Ex_{k}\notag\\ &-x_{k}'\left(A_{k}'P_{k+1}A_{k}+\sigma^{2}C_{k}'P_{k+1}C_{k}\right)x_{k}\notag\\ &-Ex_{k}'\Big[A_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}C_{k}'P_{k+1}\bar{C}_{k}\notag\\ &~~~~+\bar{A}_{k}'P_{k+1}A_{k}+\sigma^{2}\bar{C}_{k}'P_{k+1}C_{k}\notag\\ &~~~~+\bar{A}_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}\bar{C}_{k}'P_{k+1}\bar{C}_{k}\notag\\ &~~~~+(A_{k}+\bar{A}_{k})'\bar{P}_{k+1}(A_{k}+\bar{A}_{k})\Big]Ex_{k}\notag\\ &-x_{k}'\left(A_{k}'P_{k+1}B_{k}+\sigma^{2}C_{k}'P_{k+1}D_{k}\right)u_{k}\notag\\ &-u_{k}'\left(B_{k}'P_{k+1}A_{k}+\sigma^{2}D_{k}'P_{k+1}C_{k}\right)x_{k}\notag\\ &-Ex_{k}'\Big[A_{k}'P_{k+1}\bar{B}_{k}+\sigma^{2}C_{k}'P_{k+1}\bar{D}_{k}+\bar{A}_{k}'P_{k+1}B_{k}\notag\\ &~~~~+\sigma^{2}\bar{C}_{k}'P_{k+1}D_{k}+\bar{A}_{k}'P_{k+1}\bar{B}_{k}+\sigma^{2}\bar{C}_{k}'P_{k+1}\bar{D}_{k}\notag\\ &~~~~+(A_{k}+\bar{A}_{k})'\bar{P}_{k+1}(B_{k}+\bar{B}_{k})\Big]Eu_{k}\notag\\ &-Eu_{k}'\Big[B_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}D_{k}'P_{k+1}\bar{C}_{k}+\bar{B}_{k}'P_{k+1}A_{k}\notag\\ &~~~~+\sigma^{2}\bar{D}_{k}'P_{k+1}C_{k}+\bar{B}_{k}'P_{k+1}\bar{A}_{k}+\sigma^{2}\bar{D}_{k}'P_{k+1}\bar{C}_{k}\notag\\ &~~~~+(B_{k}+\bar{B}_{k})'\bar{P}_{k+1}(A_{k}+\bar{A}_{k})\Big]Ex_{k}\notag\\ &-u_{k}'\left(B_{k}'P_{k+1}B_{k}+\sigma^{2}D_{k}'P_{k+1}D_{k}\right)u_{k}\notag\\ &-Eu_{k}'\Big[B_{k}'P_{k+1}\bar{B}_{k}+\sigma^{2}D_{k}'P_{k+1}\bar{D}_{k}+\bar{B}_{k}'P_{k+1}B_{k}\notag\\ &~~~~+\sigma^{2}\bar{D}_{k}'P_{k+1}D_{k}+\bar{B}_{k}'P_{k+1}\bar{B}_{k}+\sigma^{2}\bar{D}_{k}'P_{k+1}\bar{D}_{k}\notag\\ &~~~~+(B_{k}+\bar{B}_{k})'\bar{P}_{k+1}(B_{k}+\bar{B}_{k})\Big]Eu_{k}\Big\}\notag\\ &=E\Big\{x_{k}'\{Q_{k}-[M_{k}^{(1)}]'[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)}\}x_{k}\notag\\ &+Ex_{k}'\Big\{\bar{Q}_{k}+[M_{k}^{(1)}]'[\Upsilon_{k}^{(1)}]^{-1}M_{k}^{(1)}\notag\\ &~~~~~-[M_{k}^{(2)}]'[\Upsilon_{k}^{(2)}]^{-1}M_{k}^{(2)}\Big\}Ex_{k}\notag\\ &-x_{k}'[M_{k}^{(1)}]'u_{k}-u_{k}'M_{k}^{(1)}x_{k}+u_{k}'R_{k}u_{k}+Eu_{k}'\bar{R}_{k}Eu_{k}\notag\\ &-Ex_{k}'[M_{k}^{(2)}-M_{k}^{(1)}]'Eu_{k}-Eu_{k}'[M_{k}^{(2)}-M_{k}^{(1)}]Ex_{k}\notag\\ &-u_{k}'\Upsilon_{k}^{(1)}u_{k}-Eu_{k}'[\Upsilon_{k}^{(2)}-\Upsilon_{k}^{(1)}]Eu_{k}\Big\}\notag\\ &=E\{x_{k}'Qx_{k}+Ex_{k}'\bar{Q}Ex_{k}+u_{k}'Ru_{k}+Eu_{k}'\bar{R}Eu_{k}\}\notag\\ &-E\Big\{[u_{k}-Eu_{k}-K_{k}(x_{k}-Ex_{k})]'\Upsilon_{k}^{(1)}\notag\\ &~~~~~\times[u_{k}-Eu_{k}-K_{k}(x_{k}-Ex_{k})]\Big\}\notag\\ &-[Eu_{k}\hspace{-1.2mm}-\hspace{-1.2mm}(K_{k}\hspace{-1.2mm}+\hspace{-1mm}\bar{K}_{k})Ex_{k}]'\Upsilon_{k}^{(\hspace{-.3mm}2\hspace{-.3mm})} [Eu_{k}\hspace{-1.2mm}-\hspace{-1.2mm}(K_{k}\hspace{-1.2mm}+\hspace{-1.2mm}\bar{K}_{k})Ex_{k}], \end{align} where $K_{k}$ and $\bar{K}_{k}$ are respectively as in \eqref{kk} and \eqref{kkbar}. Adding from $k=0$ to $k=N$ on both sides of \eqref{vn}, the cost function \eqref{ps2} can be rewritten as \begin{align}\label{jna} J_{N} &=\sum_{k=0}^{N}E\Big\{\Big[u_{k}-Eu_{k}-K_{k}(x_{k}-Ex_{k})\Big]'\Upsilon_{k}^{(1)}(N)\notag\\ &~~~~~~~~~~~\times\Big[u_{k}-Eu_{k}-K_{k}(x_{k}-Ex_{k})\Big]\Big\}\notag\\ &+\sum_{k=0}^{N}\left[Eu_{k}-(K_{k}+\bar{K}_{k})Ex_{k}\right]'\Upsilon_{k}^{(2)}(N)\notag\\ &~~~~~~~~~~~\times \left[Eu_{k}-(K_{k}+\bar{K}_{k})Ex_{k}\right]\notag\\ &+E(x_{0}'P_{0}x_{0})+Ex_{0}'\bar{P}_{0}Ex_{0}. \end{align} Notice $\Upsilon_{k}^{(1)}>0$ and $\Upsilon_{k}^{(2)}>0$, we have $$J_N\geq E(x_{0}'P_{0}x_{0})+Ex_{0}'\bar{P}_{0}Ex_{0},$$ thus the minimum of $J_{N}$ is given by \eqref{jnst}, i.e., \begin{align*} J^{*}_{N}=E(x_{0}'P_{0}x_{0})+Ex_{0}'\bar{P}_{0}Ex_{0}. \end{align*} In this case the controller will satisfy that \begin{align} u_{k}-Eu_{k}-K_{k}(x_{k}-Ex_{k})&=0,\label{bbb1} \\ Eu_{k}-(K_{k}+\bar{K}_{k})Ex_{k}&=0.\label{bbb2} \end{align} Hence, the optimal controller can be uniquely obtained from \eqref{bbb1}-\eqref{bbb2} as \eqref{th43}. In conclusion, \emph{Problem 1} admits a unique solution. The proof is complete. \end{proof} \section{Proof of Lemma \ref{111}} \begin{proof} Since $K_{k}(N)=-[\Upsilon_{k}^{(1)}(N)]^{-1}M_{k}^{(1)}(N)$, then it holds from \eqref{th41} that \begin{equation*}\begin{split} &~~~[M_{k}^{(1)}(N)]'[\Upsilon_{k}^{(1)}(N)]^{-1}M_{k}^{(1)}(N)\\ &=-[M_{k}^{(1)}(N)]'K_{k}(N)-K_{k}(N)'M_{k}^{(1)}(N)\\ &~~~~-K_{k}(N)'\Upsilon_{k}^{(1)}(N)K_{k}(N). \end{split}\end{equation*} Thus, $P_{k}(N)$ in \eqref{th41} can be calculated as \begin{align}\label{pnn1} &~~~P_{k}(N)\notag\\ &=\hspace{-1mm}Q\hspace{-1mm}+\hspace{-1mm}A'P_{k+1}(N)A\hspace{-1mm}+\hspace{-1mm}\sigma^{2}C'P_{k+1}(N)C\hspace{-1mm}+ \hspace{-1mm}[M_{k}^{(1)}(N)]'K_{k}(\hspace{-0.5mm}N\hspace{-0.5mm})\notag\\ &+K_{k}'(N)M_{k}^{(1)}(N)+K_{k}'(N)\Upsilon_{k}^{(1)}(N)K_{k}(N)\notag\\ &=Q+K_{k}'(N)RK_{k}(N)\notag\\ &+[A+BK_{k}(N)]'P_{k+1}(N)[A+BK_{k}(N)]\notag\\ &+\sigma^{2}[C+DK_{k}(N)]'P_{k+1}(N)[C+DK_{k}(N)]. \end{align} Notice from Assumption \ref{ass1} that $Q\geq 0$ and$P_{N+1}(N)=P_{N+1}=0$, \eqref{pnn1} indicates that $P_{N}(N)\geq 0$. Using induction method, assume $P_{k}(N)\geq 0$ for $l+1\leq k\leq N$, by \eqref{pnn1}, immediately we can obtain $P_{l}(N)\geq 0$. Therefore, for any $0\leq k\leq N$, $P_{k}(N)\geq 0$. Moreover, using similar derivation with \eqref{pnn1}, from \eqref{upsi1}-\eqref{h2} we have that \begin{align*} &~~~[M_{k}^{(2)}(N)]'[\Upsilon_{k}^{(2)}(N)]^{-1}M_{k}^{(2)}(N)\\ &=\hspace{-1mm}-\hspace{-0.8mm}[M_{k}^{(2)}(N)]'[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]\hspace{-1mm}-\hspace{-1mm}[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]'M_{k}^{(2)}\\ &~~~-[K_{k}(N)+\bar{K}_{k}(N)]'\Upsilon_{k}^{(2)}(N)[K_{k}(N)+\bar{K}_{k}(N)]. \end{align*} Thus, $P_{k}(N)+\bar{P}_{k}(N)$ can be calculated as \begin{align}\label{pnn2} &~~~P_{k}(N)+\bar{P}_{k}(N)\notag\\ &=Q+\bar{Q}+(A+\bar{A})'[P_{k+1}(N)+\bar{P}_{k+1}(N)](A+\bar{A})\notag\\ &\hspace{-1mm}+\sigma^{2}(C+\bar{C})'P_{k+1}(N)(C+\bar{C})\notag\\ &\hspace{-1mm}-[M_{k}^{(2)}(N)]'\Upsilon_{k}^{(2)}(N)[M_{k}^{(2)}(N)]\notag\\ &\hspace{-1mm}=\hspace{-1mm}Q+\bar{Q}+[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]' (R\hspace{-1mm}+\hspace{-1mm}\bar{R})[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]\notag\\ &\hspace{-1mm}+\hspace{-1mm}\big\{A\hspace{-1mm}+\hspace{-1mm}\bar{A}\hspace{-1mm} +\hspace{-1mm}(B\hspace{-1mm}+\hspace{-1mm}\bar{B})[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]\big\}' [P_{k+1}(N)\hspace{-1mm}+\hspace{-1mm}\bar{P}_{k+1}(\hspace{-0.5mm}N\hspace{-0.5mm})]\notag\\ &\hspace{-1mm}\times\hspace{-1mm}\big\{A\hspace{-1mm}+\hspace{-1mm}\bar{A}\hspace{-1mm} +\hspace{-1mm}(B\hspace{-1mm}+\hspace{-1mm}\bar{B})[K_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{K}_{k}(N)]\big\}\notag\\ &\hspace{-1mm}+\sigma^{2}\{C+\bar{C}+(D+\bar{D})[K_{k}(N)+\bar{K}_{k}(N)]\}'P_{k+1}(N)\notag\\ &\hspace{-1mm}\times \{C+\bar{C}+(D+\bar{D})[K_{k}(N)+\bar{K}_{k}(N)]\}. \end{align} Since $Q+\bar{Q}\geq 0$ as in Assumption \ref{ass1}, and $P_{N+1}=\bar{P}_{N+1}=0$, then $P_{N+1}(N)+\bar{P}_{N+1}(N)=P_{N+1}+\bar{P}_{N+1}= 0$. Furthermore, using induction method as above, we conclude that $P_{k}(N)+\bar{P}_{k}(N)\geq 0$ for any $0\leq k\leq N$. The proof is complete. \end{proof} \section{Proof of Lemma \ref{lemma2}} \begin{proof} If follows from Lemma \ref{111} that $P_{k}(N)\geq 0$ and $P_{k}(N)+\bar{P}_{k}(N)\geq 0$ for all $N\geq 0$. Via a time-shift, we can obtain $P_{k}(N)=P_{0}(N-k)$. Therefore, what we need to show is that there exists $\bar{N}_{0}>0$ such that $P_{0}(\bar{N}_{0})>0$ and $P_{0}(\bar{N}_{0})+\bar{P}_{0}(\bar{N}_{0})>0$. Suppose this is not true, i.e., for arbitrary $N>0$, $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are both strictly semi-definite positive. Now we construct two sets as follows, \begin{align} X_{N}^{(1)}&\triangleq \Big\{x^{(1)}:x^{(1)}\neq0, E\{[x^{(1)}]'P_{0}(N)x^{(1)}\}=0,\notag\\ &~~~~~~Ex^{(1)}=0\Big\},\label{zz1}\\ X_{N}^{(2)}&\triangleq \Big\{x^{(2)}: x^{(2)}\neq 0,[x^{(2)}]'[P_{0}(N)+\bar{P}_{0}(N)]x^{(2)}=0,\notag\\ &~~~~~~x^{(2)}=Ex^{(2)} ~\text{is deterministic}\Big\}.\label{zz2} \end{align} From Lemma \ref{lemma01} and Remark \ref{rem1}, we know that $X_{N}^{(1)}$ and $X_{N}^{(2)}$ are not empty. Recall from Theorem \ref{main}, to minimize the cost function \eqref{ps2} with the weighting matrices, coefficient matrices being time-invariant and final condition $P_{N+1}(N)=\bar{P}_{N+1}(N)=0$, the optimal controller is given by \eqref{th43}, and the optimal cost function is presented as \eqref{jnst}, i.e., \begin{align}\label{opti} &~~~J_{N}^{*}\notag\\ &=\min\{\sum_{k=0}^{N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm}Eu_{k}'\bar{R}Eu_{k}]\}\notag\\ &=\sum_{k=0}^{N}E[x_{k}^{*'}Qx_{k}^{*}+Ex_{k}^{*'}\bar{Q}Ex_{k}^{*}+u_{k}^{*'}Ru_{k}^{*}+Eu_{k}^{*'}\bar{R}Eu_{k}^{*}]\notag\\ &=E[x_{0}'P_{0}(N)x_{0}]+(Ex_{0})'\bar{P}_{0}(N)(Ex_{0})\notag\\ &=E[(x_{0}-Ex_{0})'P_{0}(N)(x_{0}-Ex_{0})]\notag\\ &~~~+(Ex_{0})'[P_{0}(N)+\bar{P}_{0}(N)](Ex_{0}). \end{align} In the above equation, $x_{k}^{*}$ and $u_{k}^{*}$ represent the optimal state trajectory and the optimal controller, respectively. Since $J_{N}\leq J_{N+1}$, then for any initial state $x_{0}$, we have $J_{N}^{*}\leq J_{N+1}^{*}$, it holds from \eqref{opti} that \begin{align}\label{opti2} &~~~E[(x_{0}-Ex_{0})'P_{0}(N)(x_{0}-Ex_{0})]\notag\\ &~~~+(Ex_{0})'[P_{0}(N)+\bar{P}_{0}(N)](Ex_{0})\notag\\ &\leq E[(x_{0}-Ex_{0})'P_{0}(N+1)(x_{0}-Ex_{0})]\notag\\ &~~~+(Ex_{0})'[P_{0}(N+1)+\bar{P}_{0}(N+1)](Ex_{0}). \end{align} For any initial state $x_{0}\neq 0$ with $Ex_{0}=0$, \eqref{opti2} can be reduced to \begin{equation*} E[x_{0}'P_{0}(N)x_{0}]\leq E[x_{0}'P_{0}(N+1)x_{0}], \end{equation*} i.e., $E\{x_{0}'[P_{0}(N)-P_{0}(N+1)]x_{0}\}\leq 0$. By Lemma \ref{lemma01} and Remark \ref{rem1}, therefore we can obtain \begin{align}\label{pi1} P_{0}(N)\leq P_{0}(N+1), \end{align} which implies that $P_{0}(N)$ increases with respect to $N$. On the other hand, for arbitrary initial state $x_{0}\neq 0$ with $x_0=Ex_{0}$, i.e., $x_{0}\in \mathcal{R}^{n}$ is arbitrary deterministic, equation \eqref{opti2} indicates that \begin{equation*} x_{0}'[P_{0}(N)+\bar{P}_{0}(N)]x_{0}\hspace{-1mm}\leq\hspace{-1mm} x_{0}'[P_{0}(N+1)+\bar{P}_{0}(N+1)]x_{0}. \end{equation*} Note that $x_{0}$ is arbitrary, then using Remark \ref{rem1}, we have \begin{align}\label{pi2} P_{0}(N)+\bar{P}_{0}(N)\leq P_{0}(N+1)+\bar{P}_{0}(N+1), \end{align} which implies that $P_{0}(N)+\bar{P}_{0}(N)$ increases with respect to $N$, too. Furthermore, the monotonically increasing of $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ indicates that \begin{itemize} \item If $E\{[x^{(1)}]'P_{0}(N+1)x^{(1)}\}=0$ holds, then we can conclude $E\{[x^{(1)}]'P_{0}(N)x^{(1)}=0\}$; \item If $[x^{(2)}]'[P_{0}(N+1)+\bar{P}_{0}(N+1)]x^{(2)}=0$, then we can obtain $[x^{(2)}]'[P_{0}(N)+\bar{P}_{0}(N)]x^{(2)}=0$. \end{itemize} i.e., $X_{N+1}^{(1)} \subset X_{N}^{(1)}$ and $X_{N+1}^{(2)} \subset X_{N}^{(2)}$. As $\{X_{N}^{(1)}\}$ and $\{X_{N}^{(2)}\}$ are both non-empty finite dimensional sets, thus \[1\leq \cdots \leq dim(X_{2}^{(1)}) \leq dim(X_{1}^{(1)}) \leq dim(X_{0}^{(1)})\leq n,\] and \[1\leq \cdots \leq dim(X_{2}^{(2)}) \leq dim(X_{1}^{(2)}) \leq dim(X_{0}^{(2)})\leq n.\] where $dim$ means the dimension of the set. Hence, there exists positive integer $N_{1}$, such that for any $N>N_{1}$, we can obtain \begin{align*} dim(X_{N}^{(1)})=dim(X_{N_{1}}^{(1)}),~dim(X_{N}^{(2)})=dim(X_{N_{1}}^{(2)}), \end{align*} which leads to $X_{N}^{(1)}=X_{N_{1}}^{(1)}$, and $X_{N}^{(2)}=X_{N_{1}}^{(2)}$, i.e., \begin{align*} \bigcap_{N\geq 0}X_{N}^{(1)}=X_{N_{1}}^{(1)}\neq 0,~\bigcap_{N\geq 0}X_{N}^{(2)}=X_{N_{1}}^{(2)}\neq 0. \end{align*} Therefore, there exists nonzero $x^{(1)}\in X_{N_{1}}^{(1)}$ and $x^{(2)}\in X_{N_{1}}^{(2)}$ satisfying \begin{align} E\{[x^{(1)}]'P_{0}(N)x^{(1)}\}&=0,\label{x1}\\ [x^{(2)}]'[P_{0}(N)+\bar{P}_{0}(N)]x^{(2)}&=0.\label{x2} \end{align} 1) Let the initial state of system \eqref{ps10} be $x_0=x^{(1)}$, where $x^{(1)}$ is as defined in \eqref{zz1}, then from \eqref{opti} and using \eqref{x1}, the optimal value of the cost function can be calculated as \begin{align}\label{jnstar} J_N^{*}&=\sum_{k=0}^{N}E[x_{k}^{*'}Qx_{k}^{*}\hspace{-1mm}+\hspace{-1mm}Ex_{k}^{*'}\bar{Q}Ex_{k}^{*}\hspace{-1mm}+\hspace{-1mm}u_{k}^{*'}Ru_{k}^{*}\hspace{-1mm}+\hspace{-1mm}Eu_{k}^{*'}\bar{R}Eu_{k}^{*}]\notag\\ &=E\{[x^{(1)}]'P_{0}(N)x^{(1)}\}=0, \end{align} where $Ex^{(1)}=0$ has been used in the last equality. Notice that $R>0$, $R+\bar{R}>0$, $Q\geq 0$ and $Q+\bar{Q}\geq 0$, from \eqref{jnstar}, we obtain that \[u_{k}^{*}=0,~Eu_{k}^{*}=0,~0\leq k\leq N,\] and \begin{align*} 0&=E[x_{k}^{*'}Qx_{k}^{*}+Ex_{k}^{*'}\bar{Q}Ex_{k}^{*}],~~0\leq k\leq N,\\ &=E[(x_{k}^{*}-Ex_{k}^{*})'Q(x_{k}^{*}-Ex_{k}^{*})+Ex_{k}^{*'}(Q+\bar{Q})Ex_{k}^{*}], \end{align*} i.e., $Q^{1/2}(x_{k}^{*}-Ex_{k}^{*})=0$, and $(Q+\bar{Q})^{1/2}Ex_{k}^{*}=0$. By Assumption \ref{ass3}, $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$ is exactly observation, i.e., $\left[ \begin{array}{cc} Q& 0\\ 0 & Q+\bar{Q} \\ \end{array} \right]^{1/2}\left[ \begin{array}{cc} \hspace{-1mm} x_{k}-Ex_{k}\hspace{-1mm}\\ \hspace{-1mm} Ex_{k} \hspace{-1mm} \\ \end{array} \right]=0\Rightarrow x_{0}=0$, then we have $x^{(1)}=x_0=Ex_0=0$, which is a contradiction with $x^{(1)}\neq 0$. Thus, there exists $\bar{N}_{0}>0$, such that $P_{0}(\bar{N}_{0})>0$. 2) Let the initial state of system \eqref{ps10} be $x_{0}=x^{(2)}$, where $x^{(2)}$ is given by \eqref{zz2}, then by using \eqref{opti} and \eqref{x2}, the minimum of cost function can be rewritten as \begin{equation*}\begin{split} J_{N}^{*}&=\sum_{k=0}^{N}E[x_{k}^{*'}Qx_{k}^{*}\hspace{-1mm}+\hspace{-1mm}Ex_{k}^{*'}\bar{Q}Ex_{k}^{*}\hspace{-1mm}+\hspace{-1mm} u_{k}^{*'}Ru_{k}^{*}\hspace{-1mm}+\hspace{-1mm}Eu_{k}^{*'}\bar{R}Eu_{k}^{*}]\\ &=[x^{(2)}]'[P_{0}(N)+\bar{P}_{0}(N)]x^{(2)}=0. \end{split}\end{equation*} Using similar method with that in 1), by Assumption \ref{ass3}, we can conclude that $x^{(2)}=x_{0}=Ex_{0}=0$, which is a contradiction with $x^{(2)}\neq 0$. In conclusion, there exists $\bar{N}_{0}>0$ such that $P_{0}(\bar{N}_{0})>0$ and $P_{0}(\bar{N}_{0})+\bar{P}_{0}(\bar{N}_{0})>0$. Via a time-shift, hence we have, for any $k\geq 0$, there exists a positive integer $N_{0}\geq 0$ such that $P_{k}(N_{0})>0$ and $P_{k}(N_{0})+\bar{P}_{k}(N_{0})>0$. The proof is complete. \end{proof} \section{Proof of Theorem \ref{theorem2}} \begin{proof} 1) Firstly, from the proof of Lemma \ref{lemma2}, we know that $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are monotonically increasing, i.e., for any $N>0$, \begin{align*} P_{0}(N)&\leq P_{0}(N+1),\\ P_{0}(N)+\bar{P}_{0}(N)&\leq P_{0}(N+1)+\bar{P}_{0}(N+1). \end{align*} Next we will show that $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are bounded. Since system \eqref{ps10} is stabilizable in the mean square sense, there exists $u_{k}$ has the form \begin{equation}\label{uk} u_{k}=Lx_{k}+\bar{L}Ex_{k}, \end{equation} with constant matrices $L$ and $\bar{L}$ such that the closed-loop system \eqref{ps10} satisfies \begin{equation}\label{asy} \lim_{k\rightarrow +\infty}E(x_{k}'x_{k})=0. \end{equation} As $(Ex_{k})'Ex_{k}+E(x_{k}-Ex_{k})'(x_{k}-Ex_{k})=E(x_{k}'x_{k})$, thus, equation \eqref{asy} implies $\lim_{k\rightarrow +\infty}(Ex_{k})'Ex_{k}=0$. Substituting \eqref{uk} into \eqref{ps10}, we can obtain \begin{align} &x_{k+1}=[(A+w_{k}C)+(B+w_{k}D)L]x_{k}\label{xkexk}\\ &~~\hspace{-1mm}+\hspace{-1mm}[(B\hspace{-1mm}+\hspace{-1mm}w_{k}D)\bar{L}\hspace{-1mm}+\hspace{-1mm} (\bar{A}\hspace{-1mm}+\hspace{-1mm}w_{k}\bar{C})\hspace{-1mm}+\hspace{-1mm}(\bar{B}\hspace{-1mm}+\hspace{-1mm}w_{k}\bar{D}) (L\hspace{-1mm}+\hspace{-1mm}\bar{L})]Ex_{k},\notag\\ & Ex_{k+1}=[(A+\bar{A})+(B+\bar{B})(L+\bar{L})]Ex_{k}.\label{exk} \end{align} Denote $ X_{k}\triangleq \left[ \begin{array}{cc} x_{k} \\ Ex_{k} \\ \end{array} \right],$ and $ \mathcal{X}_{k}\triangleq E[X_{k}X_{k}']. $ Following from \eqref{xkexk} and \eqref{exk}, it holds \begin{align}\label{xk1} X_{k+1}=\mathcal{A}X_{k}, \end{align} where $\mathcal{A}=\left[ \begin{array}{cc} A_{11} & A_{12} \\ 0 &A_{22} \\ \end{array} \right]$, $A_{11}=(A+w_{k}C)+(B+w_{k}D)L$, $A_{12}=(B\hspace{-1mm}+\hspace{-1mm}w_{k}D)\bar{L}\hspace{-1mm}+\hspace{-1mm} (\bar{A}\hspace{-1mm}+\hspace{-1mm}w_{k}\bar{C})\hspace{-1mm}+\hspace{-1mm}(\bar{B}\hspace{-1mm}+\hspace{-1mm}w_{k}\bar{D}) (L\hspace{-1mm}+\hspace{-1mm}\bar{L}),$ and $A_{22}=(A+\bar{A})+(B+\bar{B})(L+\bar{L}).$ The mean square stabilization of $\lim_{k\rightarrow+\infty}E(x_{k}'x_{k})\hspace{-1mm}=\hspace{-1mm}0$ implies $\lim_{k\rightarrow+\infty}\mathcal{X}_{k}\hspace{-1mm}=\hspace{-1mm}0$, thus, it follows from \cite{rami} that \begin{equation*} \sum_{k=0}^{\infty}E(x_{k}'x_{k})< +\infty,~\text{and}~ \sum_{k=0}^{\infty}(Ex_{k})'(Ex_{k})< +\infty. \end{equation*} Therefore, there exists constant $c$ such that \begin{align}\label{cnc} \sum_{k=0}^{\infty}E(x_{k}'x_{k})\leq cE(x_{0}'x_{0}). \end{align} Since $Q\geq 0$, $Q+\bar{Q}\geq 0$, $R>0$ and $R+\bar{R}>0$, thus there exists constant $\lambda$ such that $\left[\hspace{-1mm} \begin{array}{cc} Q & 0 \\ 0 & Q+\bar{Q} \\ \end{array} \hspace{-1mm}\right]\leq \lambda I$ and $\left[\hspace{-1mm} \begin{array}{cc} L'RL & 0 \\ 0 & (L+\bar{L})'(R+\bar{R})(L+\bar{L}) \\ \end{array} \hspace{-1mm}\right]\leq \lambda I$, using \eqref{uk} and \eqref{cnc}, we obtain that \begin{align}\label{jjj} J&=\sum_{k=0}^{\infty}E[x_{k}'Qx_{k}+u_{k}'Ru_{k}+Ex_{k}'\bar{Q}Ex_{k}+Eu_{k}'\bar{R}Eu_{k}]\notag\\ &=\sum_{k=0}^{\infty}E\Big\{x_{k}'(Q+L'RL)x_{k}+Ex_{k}'\big[\bar{Q}+L'R\bar{L}+\bar{L}'RL\notag\\ &+\bar{L}'R\bar{L}+(L+\bar{L})'\bar{R}(L+\bar{L})\big]Ex_{k}\Big\}\notag\\ &=\sum_{k=0}^{\infty}E\Big\{ \left[\hspace{-1mm} \begin{array}{cc} x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[\hspace{-1mm} \begin{array}{cc} Q & 0 \\ 0 & Q+\bar{Q} \\ \end{array} \hspace{-1mm}\right]\left[\hspace{-1mm} \begin{array}{cc} x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]\Big\}\notag\\ &+\sum_{k=0}^{\infty}E\Big\{\left[\hspace{-1mm} \begin{array}{cc} x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right]'\left[ \begin{array}{cc} L'RL & 0 \\ 0 & (L\hspace{-1mm}+\hspace{-1mm}\bar{L})'(R\hspace{-1mm}+\hspace{-1mm}\bar{R})(L\hspace{-1mm}+\hspace{-1mm}\bar{L}) \\ \end{array} \hspace{-1mm}\right]\notag\\ &~~~~~~~\times\left[\hspace{-1mm} \begin{array}{cc} x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k} \\ Ex_{k} \\ \end{array} \hspace{-1mm}\right] \Big\}\notag\\ &\leq 2\lambda\sum_{k=0}^{\infty}E[(Ex_{k})'Ex_{k}+(x_{k}-Ex_{k})'(x_{k}-Ex_{k})]\notag\\ &=2\lambda\sum_{k=0}^{\infty}E(x_{k}'x_{k})\leq 2\lambda cE(x_{0}'x_{0}). \end{align} On the other hand, by \eqref{jnst}, notice the fact that $$ E[x_{0}'P_{0}(N)x_{0}]+(Ex_{0})'\bar{P}_{0}(N)(Ex_{0})=J_{N}^{*}\leq J, $$ thus, \eqref{jjj} yields \begin{equation}\label{x0} E[x_{0}'P_{0}(\hspace{-0.5mm}N\hspace{-0.5mm})x_{0}]\hspace{-1mm}+\hspace{-1mm}(Ex_{0})'\bar{P}_{0}(\hspace{-0.5mm}N\hspace{-0.5mm})(Ex_{0})\hspace{-1mm} \leq \hspace{-1mm} 2\lambda cE(x_{0}'x_{0}). \end{equation} Now we let the state initial value be random vector with zero mean, i.e., $Ex_{0}=0$, it follows from \eqref{x0} that \begin{equation*} E[x_{0}'P_{0}(N)x_{0}]\leq 2\lambda cE(x_{0}'x_{0}). \end{equation*} Since $x_0$ is arbitrary with $Ex_0=0$, by Lemma \ref{lemma01} and Remark \ref{rem1}, we have \begin{align*} P_{0}(N)\leq 2\lambda cI. \end{align*} Similarly, let the state initial value be arbitrary deterministic i.e., $x_{0}=Ex_{0}$, \eqref{x0} yields that \begin{equation*}\begin{split} x_{0}'[P_{0}(N)+\bar{P}_{0}(N)]x_{0}=J_{N}^{*}\leq J\leq 2\lambda cx_{0}'x_{0}, \end{split}\end{equation*} which implies \begin{equation*} P_{0}(N)+\bar{P}_{0}(N)\leq 2\lambda cI. \end{equation*} Therefore, both $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are bounded. Recall that $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are monotonically increasing, we conclude that $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are convergent, i.e., there exists $P$ and $\bar{P}$ such that \begin{equation*} \lim_{N\rightarrow +\infty}P_{k}(N)=\lim_{N\rightarrow +\infty}P_{0}(N-k)=P, \end{equation*} \begin{equation*} \lim_{N\rightarrow +\infty}\bar{P}_{k}(N)=\lim_{N\rightarrow +\infty}\bar{P}_{0}(N-k)=\bar{P}. \end{equation*} Furthermore, in view of \eqref{upsi1}-\eqref{h2}, we know that $\Upsilon_{k}^{(1)}(N)$, $M_{k}^{(1)}(N)$, $\Upsilon_{k}^{(2)}(N)$ and $M_{k}^{(2)}(N)$ are convergent, i.e., \begin{align} \lim_{N\rightarrow+ \infty}\Upsilon_{k}^{(1)}(N)&=\Upsilon^{(1)}\geq R>0,\label{u1}\\ \lim_{N\rightarrow +\infty}M_{k}^{(1)}(N)&=M^{(1)},\label{hhh1}\\ \lim_{N\rightarrow +\infty}\Upsilon_{k}^{(2)}(N)&=\Upsilon^{(2)}\geq R+\bar{R}>0,\label{u2}\\ \lim_{N\rightarrow +\infty}M_{k}^{(2)}(N)&=M^{(2)}.\label{hhh2} \end{align} where $\Upsilon^{(1)},M^{(1)},\Upsilon^{(2)},M^{(2)}$ are given by \eqref{up1}-\eqref{hh2}. Taking limitation on both sides of \eqref{th41} and \eqref{th42}, we know that $P$ and $\bar{P}$ satisfy the coupled ARE \eqref{are1}-\eqref{are2}. 2) From Lemma \ref{lemma2}, for any $k\geq 0$, there exists $N_{0}>0$ such that, $P_{k}(N_{0})>0$ and $P_{k}(N_{0})+\bar{P}_{k}(N_{0})>0$, hence we have \begin{align*} &P=\lim_{N\rightarrow +\infty}P_{k}(N)\geq P_{k}(N_{0})>0,\\ &P\hspace{-1mm}+\hspace{-1mm}\bar{P}\hspace{-1mm}=\hspace{-1mm}\lim_{N\rightarrow +\infty}[P_{k}(N)\hspace{-1mm}+\hspace{-1mm}\bar{P}_{k}(N)]\geq P_{k}(N_{0})\hspace{-1mm}+\hspace{-1mm}\bar{P}_{k}(N_{0})>0. \end{align*} This ends the proof. \end{proof} \section{Proof of Theorem \ref{succeed}} \begin{proof} \emph{``Sufficiency"}: Under Assumptions \ref{ass2} and \ref{ass3}, we suppose that $P$ and $\bar{P}$ are the solution of \eqref{are1}-\eqref{are2} satisfying $P>0$ and $P+\bar{P}>0$, we will show \eqref{control} stabilizes \eqref{ps10} in mean square sense. Similar to \eqref{vnn}, we define the Lyapunov function candidate $V(k,x_{k})$ as \begin{align}\label{lya} V(k,x_{k})&\triangleq E(x_{k}'Px_{k})+Ex_{k}'\bar{P}Ex_{k}. \end{align} Apparently we have \begin{align}\label{vnk} V(k,x_{k})&=E[(x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k})'P(x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k})\hspace{-1mm}+ \hspace{-1mm}Ex_{k}'(P\hspace{-1mm}+\hspace{-1mm}\bar{P})Ex_{k}]\notag\\ &\geq 0. \end{align} We claim that $V(k,x_{k})$ monotonically decreases. Actually, following the derivation of \eqref{vn}, we have \begin{align}\label{lya1} &~~V(k,x_{k})-V(k+1,x_{k+1})\notag\\ &=E[x_{k}'Qx_{k}+Ex_{k}'\bar{Q}Ex_{k}+u_{k}'Ru_{k}+Eu_{k}'\bar{R}Eu_{k}]\notag\\ &~-E\{[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]'\Upsilon^{(1)}\notag\\ &~~~~~~~~~~~~~~\times[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]\}\notag\\ &-[Eu_{k}-(K+\bar{K})Ex_{k}]'\Upsilon^{(2)} [Eu_{k}-(K+\bar{K})Ex_{k}]\notag\\ &=E[x_{k}'Qx_{k}+Ex_{k}'\bar{Q}Ex_{k}+u_{k}'Ru_{k}+Eu_{k}'\bar{R}Eu_{k}]\notag\\ &\geq 0,~~k\geq 0, \end{align} where $u_{k}=Kx_{k}+\bar{K}Ex_{k}$ is used in the last identity. The last inequality implies that $V(k,x_{k})$ decreases with respect to $k$, also from \eqref{vnk} we know that $V(k,x_{k})\geq 0$, thus $V(k,x_{k})$ is convergent. Let $l$ be any positive integer, by adding from $k=l$ to $k=l+N$ on both sides of \eqref{lya1}, we obtain that \begin{align}\label{lmi1} &\sum_{k=l}^{l+N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &=[V(l,x_{l})-V(l+N+1,x_{l+N+1})]. \end{align} Since $V(k,x_{k})$ is convergent, then by taking limitation of $l$ on both sides of \eqref{lmi1}, it holds \begin{align}\label{lmi} &\lim_{l\rightarrow+\infty}\sum_{k=l}^{l+N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &=\lim_{l\rightarrow+\infty}[V(l,x_{l})-V(l+N+1,x_{l+N+1})]=0. \end{align} Recall from \eqref{opti} that \begin{align} &~~~~J_{N}=\sum_{k=0}^{N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &\geq J_{N}^{*}=E[x_{0}'P_{0}(N)x_{0}]+Ex_{0}'\bar{P}_{0}(N)Ex_{0}.\label{jn} \end{align} Thus, taking limitation on both sides of \eqref{jn}, via a time-shift of $l$ and using \eqref{lmi}, it yields that \begin{align}\label{ly1} &0\hspace{-1mm}=\hspace{-1mm}\lim_{l\rightarrow+\infty}\sum_{k=l}^{l+N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &\geq\lim_{l\rightarrow+\infty} E\left[x_{l}'P_{l}(l+N)x_{l}+Ex_{l}'\bar{P}_{l}(l+N)Ex_{l}\right]\notag\\ &=\lim_{l\rightarrow+\infty} E\Big\{(x_{l}-Ex_{l})'P_{l}(l+N)(x_{l}-Ex_{l})\notag\\ &~~~~~+Ex_{l}'[P_{l}(l+N)+\bar{P}_{l}(l+N)]Ex_{l}\Big\}\notag\\ &=\lim_{l\rightarrow+\infty} E\Big\{(x_{l}-Ex_{l})'P_{0}(N)(x_{l}-Ex_{l})\notag\\ &~~~~~+Ex_{l}'[P_{0}(N)+\bar{P}_{0}(N)]Ex_{l}\Big\}\geq 0. \end{align} Hence, it follows from \eqref{ly1} that \begin{align} \lim_{l\rightarrow+\infty}E[(x_{l}-Ex_{l})'P_{0}(N)(x_{l}-Ex_{l})]&=0,\label{conc1}\\ \lim_{l\rightarrow+\infty}Ex_{l}'[P_{0}(N)+\bar{P}_{0}(N)]Ex_{l}&=0.\label{conc2} \end{align} By Lemma \ref{lemma2}, we know that there exists $N_{0}\geq 0$ such that $P_{0}(N)>0$ and $P_{0}(N)+\bar{P}_{0}(N)>0$ for any $N>N_{0}$. Thus from \eqref{conc1} and \eqref{conc2}, we have \begin{align}\label{geq} \lim_{l\rightarrow+\infty}\hspace{-2mm}E[(x_{l}\hspace{-1mm}-\hspace{-1mm}Ex_{l})'(x_{l}\hspace{-1mm}-\hspace{-1mm}Ex_{l})]\hspace{-1mm}=\hspace{-1mm}0, \lim_{l\rightarrow+\infty}\hspace{-2mm}Ex_{l}'Ex_{l}\hspace{-1mm}=\hspace{-1mm}0, \end{align} which indicates that $\lim_{l\rightarrow+\infty}E(x_{l}'x_{l})=0.$ In conclusion, \eqref{control} stabilizes \eqref{ps10} in the mean square sense. Next we will show that controller \eqref{control} minimizes the cost function \eqref{ps200}. For \eqref{lya1}, adding from $k=0$ to $k=N$, we have \begin{align}\label{kon} &\sum_{k=0}^{N}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &=V(0,x_{0})-V(N+1,x_{N+1})\notag\\ &+\sum_{k=0}^{N}E\Big\{[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]'\Upsilon^{(1)}\notag\\ &~~~~~~~~~~~~~~\times[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]\Big\}\notag\\ &\hspace{-1mm}+\hspace{-1mm}\sum_{k=0}^{N}\hspace{-0.3mm}[Eu_{k}\hspace{-1mm}-\hspace{-1mm}(K\hspace{-1mm}+\hspace{-1mm}\bar{K})Ex_{k}]'\Upsilon^{(\hspace{-0.3mm}2\hspace{-0.3mm})} [Eu_{k}\hspace{-1mm}-\hspace{-1mm}(K\hspace{-1mm}+\hspace{-1mm}\bar{K})Ex_{k}]. \end{align} Moreover, following from \eqref{lya} and \eqref{geq}, we have that \begin{align*} 0&\hspace{-1mm}\leq \hspace{-1mm}\lim_{k\rightarrow+\infty} V(k,x_{k})\hspace{-1mm}=\hspace{-1mm}\lim_{k\rightarrow+\infty} E\{x_{k}'Px_{k}+Ex_{k}'\bar{P}Ex_{k}\}=0. \end{align*} Thus, taking limitation of $N\rightarrow+\infty$ on both sides of \eqref{kon} and noting \eqref{ps200}, we have \begin{align}\label{kon1} &J=\sum_{k=0}^{\infty}E[x_{k}'Qx_{k}\hspace{-1mm}+\hspace{-1mm}Ex_{k}'\bar{Q}Ex_{k}\hspace{-1mm}+\hspace{-1mm}u_{k}'Ru_{k}\hspace{-1mm}+\hspace{-1mm} Eu_{k}'\bar{R}Eu_{k}]\notag\\ &=E[(x_{0}-Ex_{0})'P(x_{0}-Ex_{0})]+Ex_{0}'(P+\bar{P})Ex_{0}\notag\\ &+\sum_{k=0}^{\infty}E\Big\{[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]'\Upsilon^{(1)}\notag\\ &~~~~~~~~~~~~~~\times[u_{k}-Eu_{k}-K(x_{k}-Ex_{k})]\Big\}\notag\\ &\hspace{-1mm}+\hspace{-1mm}\sum_{k=0}^{\infty}[Eu_{k}\hspace{-1mm}-\hspace{-1mm}(K\hspace{-1mm}+\hspace{-1mm}\bar{K})Ex_{k}]' \Upsilon^{(\hspace{-0.4mm}2\hspace{-0.4mm})} [Eu_{k}\hspace{-1mm}-\hspace{-1mm}(K\hspace{-1mm}+\hspace{-1mm}\bar{K})Ex_{k}]. \end{align} Note that $\Upsilon^{(1)}>0$ and $\Upsilon^{(2)}>0$, following the discussion in the sufficiency proof of Theorem \ref{main}, thus, the cost function \eqref{ps200} can be minimized by controller \eqref{control}. Furthermore, directly from \eqref{kon1}, the optimal cost function can be given as \eqref{cost}. \emph{``Necessity"}: Under Assumptions \ref{ass2} and \ref{ass3}, if \eqref{ps10} is stablizable in mean square sense, we will show that the coupled ARE \eqref{are1}-\eqref{are2} has unique solution $P$ and $P+\bar{P}$ satisfying $P>0$ and $P+\bar{P}>0$. The existence of the solution to \eqref{are1}-\eqref{are2} satisfying $P>0$ and $P+\bar{P}>0$ has been verified in Theorem \ref{theorem2}. The uniqueness of the solution remains to be shown. Let $S$ and $\bar{S}$ be another solution of \eqref{are1}-\eqref{are2} satisfying $S>0$ and $S+\bar{S}>0$, i.e., \begin{align} S&=Q+A'SA+\sigma^{2}C'SC-\hspace{-1mm}[T^{(1)}]'[\Delta^{(1)}]^{-1}T^{(1)},\label{are3}\\ \bar{S}&=\bar{Q}+A'S\bar{A}+\sigma^{2}C'S\bar{C}+\bar{A}'SA+\sigma^{2}\bar{C}'SC\notag\\ &~~+\bar{A}'S\bar{A}+\sigma^{2}\bar{C}'S\bar{C}+(A+\bar{A})'\bar{S}(A+\bar{A})\notag\\ &~~+[T^{(1)}]'[\Delta^{(1)}]^{-1}T^{(1)}-[T^{(2)}]'[\Delta^{(2)}]^{-1}T^{(2)},\label{are4} \end{align} where \begin{align*} \Delta^{(1)}&=R+B'SB+\sigma^{2}D'SD,\\ T^{(1)}&=B'SA+\sigma^{2}D'SC,\\ \Delta^{(2)}&=R+\bar{R}+(B+\bar{B})'(S+\bar{S})(B+\bar{B})\\ &~~~~~+\sigma^{2}(D+\bar{D})'S(D+\bar{D}),\\ T^{(2)}&=(B+\bar{B})'(S+\bar{S})(A+\bar{A})\\ &~~~~~+\sigma^{2}(D+\bar{D})'S(C+\bar{C}). \end{align*} Notice that the optimal cost function has been proved to be \eqref{cost}, i.e., \begin{align}\label{cost2} J^{*}&=E(x_{0}'Px_{0})+Ex_{0}'\bar{P}Ex_{0}\notag\\ &=E(x_{0}'Sx_{0})+Ex_{0}'\bar{S}Ex_{0}. \end{align} For any initial state $x_{0}$ satisfying $x_{0}\neq 0$ and $Ex_{0}=0$, equation \eqref{cost2} implies that \begin{equation*} E[x_{0}'(P-S)x_{0}]=0, \end{equation*} By Lemma \ref{lemma01} and Remark \ref{rem1}, we can conclude that $P=S$. Moreover, if $x_{0}=Ex_{0}$ is arbitrary deterministic initial state, it follows from \eqref{cost2} that \begin{equation*} x_{0}'(P+\bar{P}-S-\bar{S})x_{0}=0, \end{equation*} which indicates $P+\bar{P}=S+\bar{S}$. Hence we have $S=P$ and $\bar{S}=\bar{P}$, i.e., the uniqueness has been proven. The proof is complete. \end{proof} \section{Proof of Theorem \ref{succeed2}} \begin{proof} ``Necessity:" Under Assumption \ref{ass2} and \ref{ass4}, suppose mean-field system \eqref{ps10} is stabilizable in mean square sense, we will show that the coupled ARE \eqref{are1}-\eqref{are2} has a unique solution $P$ and $\bar{P}$ with $P\geq 0$ and $P+\bar{P}\geq 0$. Actually, from \eqref{opti}-\eqref{pi2} in the proof of Lemma \ref{lemma2}, we know that $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are monotonically increasing, then following the lines of \eqref{uk}-\eqref{x0}, the boundedness of $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ can be obtained. Hence, $P_{0}(N)$ and $P_{0}(N)+\bar{P}_{0}(N)$ are convergent. Then there exists $P$ and $\bar{P}$ such that \begin{align*} \lim_{N\rightarrow+\infty}P_{k}(N)=\lim_{N\rightarrow+\infty}P_{0}(N-k)&=P,\\ \lim_{N\rightarrow+\infty}\bar{P}_{k}(N)=\lim_{N\rightarrow+\infty}\bar{P}_{0}(N-k)&=\bar{P}. \end{align*} From Lemma \ref{111}, we know that $P_{k}(N)\geq 0$ and $P_{k}(N)+\bar{P}_{k}(N)\geq 0$, thus we have $P\geq 0$ and $P+\bar{P}\geq 0$. Furthermore, in view of \eqref{upsi1}-\eqref{h2}, $\Upsilon^{(1)},\Upsilon^{(2)},M^{(1)},M^{(2)}$ in \eqref{up1}-\eqref{hh2} can be obtained. Taking limitation on both sides of \eqref{th41} and \eqref{th42}, we know that $P$ and $\bar{P}$ satisfy the coupled ARE \eqref{are1} and \eqref{are2}. Under Assumption \ref{ass2}, Lemma \ref{lemma3} yields that \emph{Problem 1} has a unique solution, then following the steps of \eqref{are3}-\eqref{cost2} in Theorem \ref{succeed}, the uniqueness of $P$ and $\bar{P}$ can be obtained. Finally, taking limitation on both sides of \eqref{th43} and \eqref{jnst}, the unique optimal controller can be given as \eqref{control}, and optimal cost function is presented by \eqref{cost}. The necessity proof is complete. ``Sufficiency:" Under Assumption \ref{ass2} and \ref{ass4}, if $P$ and $\bar{P}$ are the unique solution to \eqref{are1}-\eqref{are2} satisfying $P\geq 0$ and $P+\bar{P}\geq 0$, we will show that \eqref{control} stabilizes system \eqref{ps10} in mean square sense. Following from \eqref{pnn1}-\eqref{pnn2}, the coupled ARE \eqref{are1}-\eqref{are2} can be rewritten as follows: \begin{align} P&=Q+K'RK+(A+BK)'P(A+BK)\notag\\ &+\sigma^{2}(C+DK)'P(C+DK),\label{ly01}\\ P+\bar{P}&=Q+\bar{Q}+(K+\bar{K})'(R+\bar{R})(K+\bar{K})\notag\\ &+[A+\bar{A}+(B+\bar{B})(K+\bar{K})]'(P+\bar{P})\notag\\ &\times[A+\bar{A}+(B+\bar{B})(K+\bar{K})]\notag\\ &+\sigma^{2}[C+\bar{C}+(D+\bar{D})(K+\bar{K})]'P\notag\\ &\times[C+\bar{C}+(D+\bar{D})(K+\bar{K})],\label{ly2} \end{align} in which $K$ and $\bar{K}$ are respectively given as \eqref{K} and \eqref{KK}. Recalling that the Lyapunov function candidate is denoted as in \eqref{lya} and using optimal controller \eqref{control}, we rewrite \eqref{lya1} as \begin{align}\label{lya2} &V(k,x_{k})-V(k+1,x_{k+1})\notag\\ &=E\{x_{k}'(Q+K'RK)x_{k}+Ex_{k}'[\bar{Q}+\bar{K}'RK+K'R\bar{K}\notag\\ &+\bar{K}'R\bar{K}+(K+\bar{K})'\bar{R}(K+\bar{K})]Ex_{k}\}\notag\\ &=E\{(x_{k}-Ex_{k})'(Q+K'RK)(x_{k}-Ex_{k})+Ex_{k}'[Q\notag\\ &+\bar{Q}+(K+\bar{K})'(R+\bar{R})(K+\bar{K})]Ex_{k}\}\notag\\ &=E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})\geq 0. \end{align} where $\tilde{\mathcal{Q}}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} Q\hspace{-1mm}+\hspace{-1mm}K'RK& 0\\ 0 & Q\hspace{-1mm}+\hspace{-1mm}\bar{Q}\hspace{-1mm}+\hspace{-1mm} (K\hspace{-1mm}+\hspace{-1mm}\bar{K})'(R\hspace{-1mm}+\hspace{-1mm}\bar{R})(K\hspace{-1mm}+\hspace{-1mm}\bar{K}) \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}\geq\hspace{-1mm} 0$, and $\mathbb{X}_{k}=\left[ \begin{array}{cc} \hspace{-1mm} x_{k}-Ex_{k}\hspace{-1mm}\\ \hspace{-1mm} Ex_{k} \hspace{-1mm} \\ \end{array} \right]$. Taking summation on both sides of \eqref{lya2} from $0$ to $N$ for any $N>0$, we have that \begin{align}\label{lya3} &~~\sum_{k=0}^{N}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})=V(0,x_{0})-V(N+1,x_{N+1})\notag\\ &=E(x_{0}'Px_{0})+(Ex_{0})'\bar{P}Ex_{0}\notag\\ &-[E(x_{N+1}'Px_{N+1})+(Ex_{N+1})'\bar{P}Ex_{N+1}]\notag\\ &=E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})-E(\mathbb{X}_{N+1}'\mathbb{P}\mathbb{X}_{N+1}), \end{align} in which $\mathbb{P}=\left[ \begin{array}{cc} P& 0\\ 0 & P+\bar{P} \\ \end{array} \right]$. Using the symbols denoted above, mean-field system \eqref{ps10} with controller \eqref{control} can be rewritten as \begin{align}\label{fb} \mathbb{X}_{k+1}&=\tilde{\mathbb{A}}\mathbb{X}_{k}+\tilde{\mathbb{C}}\mathbb{X}_{k}w_{k}, \end{align} where $\tilde{\mathbb{A}}=\left[\hspace{-2mm} \begin{array}{cc} A\hspace{-1mm}+\hspace{-1mm}BK& 0\\ 0 & A\hspace{-1mm}+\hspace{-1mm}\bar{A}\hspace{-1mm}+\hspace{-1mm}(B\hspace{-1mm}+\hspace{-1mm}\bar{B}) (K\hspace{-1mm}+\hspace{-1mm}\bar{K}) \\ \end{array} \hspace{-2mm}\right]$ and $\tilde{\mathbb{C}}=\left[\hspace{-2mm} \begin{array}{cc} C\hspace{-1mm}+\hspace{-1mm}DK& C\hspace{-1mm}+\hspace{-1mm}\bar{C}\hspace{-1mm}+\hspace{-1mm}(D\hspace{-1mm}+\hspace{-1mm}\bar{D}) (K\hspace{-1mm}+\hspace{-1mm}\bar{K})\\ 0 & 0\\ \end{array} \hspace{-2mm}\right]$. Thus, the stabilization of system \eqref{ps10} with controller \eqref{control} is equivalent to the stability of system \eqref{fb}, i.e., $(\tilde{\mathbb{A}},\tilde{\mathbb{C}})$ for short. Following the proof of \emph{Theorem 4} and \emph{Proposition 1} in \cite{zhangw}, we know that the exactly detectability of system \eqref{mf}, i.e., $(A,\bar{A},C,\bar{C},\mathcal{Q}^{1/2})$, implies that the following system is exactly detectable \begin{equation}\label{mf01} \left\{ \begin{array}{ll} \mathbb{X}_{k+1}=\tilde{\mathbb{A}}\mathbb{X}_{k}+\tilde{\mathbb{C}}\mathbb{X}_{k}w_{k},\\ \tilde{Y}_{k}=\tilde{\mathcal{Q}}^{1/2}\mathbb{X}_{k}. \end{array} \right. \end{equation} i.e., for any $N\geq 0$, \begin{equation*} \tilde{Y}_{k}= 0, ~\forall~ 0\leq k\leq N~\Rightarrow~\lim_{k\rightarrow+\infty}E(\mathbb{X}_{k}'\mathbb{X}_{k})=0. \end{equation*} Now we will show that the initial state $\mathbb{X}_{0}$ is an unobservable state of system \eqref{mf01}, i.e., $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$ for simplicity, if and only if $\mathbb{X}_{0}$ satisfies $E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})=0$. In fact, if $\mathbb{X}_{0}$ satisfies $E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})=0$, from \eqref{lya3} we have \begin{align}\label{lll} 0\leq \sum_{k=0}^{N}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})=-E(\mathbb{X}_{N+1}'\mathbb{P}\mathbb{X}_{N+1})\leq 0, \end{align} i.e., $\sum_{k=0}^{N}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})=0$. Thus, we can obtain \begin{equation*} \sum_{k=0}^{N}E(Y_{k}'Y_{k})=\sum_{k=0}^{N}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})=0, \end{equation*} which means for any $k\geq 0$, $\tilde{Y}_{k}=\tilde{\mathcal{Q}}^{1/2}\mathbb{X}_{k}=0$. Hence, $\mathbb{X}_{0}$ is an unobservable state of system $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$. On the contrary, if we choose $\mathbb{X}_{0}$ as an unobservable state of $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$, i.e., $\tilde{Y}_{k}=\tilde{\mathcal{Q}}^{1/2}\mathbb{X}_{k}\equiv 0$, $k\geq 0$. Noting that $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$ is exactly detectable, it holds $\lim_{N\rightarrow +\infty}E(\mathbb{X}_{N+1}'\mathbb{P}\mathbb{X}_{N+1})=0$. Thus, from \eqref{lya3} we can obtain that \begin{equation}\label{130} E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})\hspace{-1mm}=\hspace{-1mm} \sum_{k=0}^{\infty}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})\hspace{-1mm}= \hspace{-1mm}\sum_{k=0}^{\infty}E(\tilde{Y}_{k}'\tilde{Y}_{k})\hspace{-1mm}=\hspace{-1mm}0. \end{equation} Therefore, we have shown that $\mathbb{X}_{0}$ is an unobservable state if and only if $\mathbb{X}_{0}$ satisfies $E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})=0$. Next we will show system \eqref{ps10} is stabilizable in mean square sense in two different cases. 1) $\mathbb{P}>0$, i.e., $P>0$ and $P+\bar{P}>0$. In this case, $E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})=0$ implies that $\mathbb{X}_{0}=0$, i.e., $x_{0}=Ex_{0}=0$. Following the discussions as above we know that system $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$ is exactly observable. Thus it follows from Theorem \ref{succeed} that mean-field system \eqref{ps10} is stabilizable in mean square sense. 2) $\mathbb{P}\geq 0$. Firstly, it is noticed from \eqref{ly01} and \eqref{ly2} that $\mathbb{P}$ satisfies the following Lyapunov equation: \begin{equation}\label{ly4} \mathbb{P}=\tilde{\mathcal{Q}}+\tilde{\mathbb{A}}'\mathbb{P}\tilde{\mathbb{A}}+\sigma^{2}[\tilde{\mathbb{C}}^{(1)}]'\mathbb{P}\tilde{\mathbb{C}}^{(1)} +\sigma^{2}[\tilde{\mathbb{C}}^{(2)}]'\mathbb{P}\tilde{\mathbb{C}}^{(2)}, \end{equation} where $\tilde{\mathbb{C}}^{(1)}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} C\hspace{-1mm}+\hspace{-1mm}DK\hspace{-2mm}&\hspace{-2mm} 0\\ 0 \hspace{-2mm}&\hspace{-2mm} 0\\ \end{array} \hspace{-2mm}\right]$, $\tilde{\mathbb{C}}^{(2)}\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} 0\hspace{-2mm}&\hspace{-2mm} C\hspace{-1mm}+\hspace{-1mm}\bar{C}\hspace{-1mm}+\hspace{-1mm}(D\hspace{-1mm}+\hspace{-1mm}\bar{D}) (K\hspace{-1mm}+\hspace{-1mm}\bar{K})\\ 0 \hspace{-2mm}&\hspace{-2mm} 0\\ \end{array} \hspace{-2mm}\right]$ and $\tilde{\mathbb{C}}^{(1)}+\tilde{\mathbb{C}}^{(2)}=\tilde{\mathbb{C}}$. Since $\mathbb{P}\geq 0$, thus there exists orthogonal matrix $U$ with $U'=U^{-1}$ such that \begin{align}\label{upu} U'\mathbb{P}U=\left[ \begin{array}{cc} 0& 0\\ 0 & \mathbb{P}_{2} \\ \end{array} \right], \mathbb{P}_{2}>0. \end{align} Obviously from \eqref{ly4} we can obtain that \begin{align}\label{ly5} U'\mathbb{P}U&=U'\tilde{\mathcal{Q}}U+U'\tilde{\mathbb{A}}'U\cdot U'\mathbb{P}U\cdot U'\tilde{\mathbb{A}}U\notag\\ &+\sigma^{2}U'[\tilde{\mathbb{C}}^{(1)}]'U\cdot U'\mathbb{P}U\cdot U'\tilde{\mathbb{C}}^{(1)}U\notag\\ &+\sigma^{2}U'[\tilde{\mathbb{C}}^{(2)}]'U\cdot U'\mathbb{P}U\cdot U'\tilde{\mathbb{C}}^{(2)}U. \end{align} Assume $U'\tilde{\mathbb{A}}U=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{A}}_{11}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{A}}_{12}\\ \tilde{\mathbb{A}}_{21}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{A}}_{22} \\ \end{array} \hspace{-2mm}\right]$, $U'\tilde{\mathcal{Q}}U=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathcal{Q}}_{1}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathcal{Q}}_{12}\\ \tilde{\mathcal{Q}}_{21}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathcal{Q}}_{2} \\ \end{array} \hspace{-2mm}\right]$, $U'\tilde{\mathbb{C}}^{(1)}U=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{C}}_{11}^{(1)}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{12}^{(1)}\\ \tilde{\mathbb{C}}_{21}^{(1)}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{22}^{(1)} \\ \end{array} \hspace{-2mm}\right]$ and $U'\tilde{\mathbb{C}}^{(2)}U=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{C}}_{11}^{(2)}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{12}^{(2)}\\ \tilde{\mathbb{C}}_{21}^{(2)}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{22}^{(2)} \\ \end{array} \hspace{-2mm}\right]$, we have that \begin{align*}U'\tilde{\mathbb{A}}'U\hspace{-1mm}\cdot\hspace{-1mm} U'\mathbb{P}U\hspace{-1mm}\cdot\hspace{-1mm} U'\tilde{\mathbb{A}}U&=\left[ \begin{array}{cc} \tilde{\mathbb{A}}_{21}'\mathbb{P}_{2}\tilde{\mathbb{A}}_{21}& \tilde{\mathbb{A}}_{21}'\mathbb{P}_{2}\tilde{\mathbb{A}}_{22}\\ \tilde{\mathbb{A}}_{22}'\mathbb{P}_{2}\tilde{\mathbb{A}}_{21}& \tilde{\mathbb{A}}_{22}'\mathbb{P}_{2}\tilde{\mathbb{A}}_{22} \\ \end{array} \right],\\ U'\{\tilde{\mathbb{C}}^{(1)}\}'U\hspace{-1mm}\cdot\hspace{-1mm} U'\mathbb{P}U\hspace{-1mm}\cdot\hspace{-1mm} U'\tilde{\mathbb{C}}^{(1)}U&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \{\tilde{\mathbb{C}}_{21}^{(1)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{21}^{(1)}\hspace{-3mm}&\hspace{-3mm} \{\tilde{\mathbb{C}}_{21}^{(1)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(1)}\hspace{-1mm}\\ \{\tilde{\mathbb{C}}_{22}^{(1)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{21}^{(1)}\hspace{-3mm}&\hspace{-3mm} \{\tilde{\mathbb{C}}_{22}^{(1)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(1)}\hspace{-1mm} \end{array} \hspace{-2mm}\right]\\ U'\{\tilde{\mathbb{C}}^{(2)}\}'U\hspace{-1mm}\cdot\hspace{-1mm} U'\mathbb{P}U\hspace{-1mm}\cdot\hspace{-1mm} U'\tilde{\mathbb{C}}^{(2)}U&\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \{\tilde{\mathbb{C}}_{21}^{(2)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{21}^{(2)}\hspace{-3mm}&\hspace{-3mm} \{\tilde{\mathbb{C}}_{21}^{(2)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(2)}\hspace{-1mm}\\ \{\tilde{\mathbb{C}}_{22}^{(2)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{21}^{(2)}\hspace{-3mm}&\hspace{-3mm} \{\tilde{\mathbb{C}}_{22}^{(2)}\}'\mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(2)}\hspace{-1mm} \end{array} \hspace{-2mm}\right]\end{align*} Thus, by comparing each block element on both sides of \eqref{ly5} and noting $\mathbb{P}_{2}>0$, we have that $\tilde{\mathbb{A}}_{21}=0$, $\tilde{\mathbb{C}}_{21}^{(1)}=\tilde{\mathbb{C}}_{21}^{(2)}=0$ and $\tilde{\mathcal{Q}}_{1}=\tilde{\mathcal{Q}}_{12}=\tilde{\mathcal{Q}}_{21}=0$, i.e., \begin{align}\label{ly7} U'\tilde{\mathbb{A}}U\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{A}}_{11}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{A}}_{12}\\ 0\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{A}}_{22} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm}, U'\tilde{\mathbb{C}}U\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{C}}_{11}\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{12}\\ 0\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{22} \\ \end{array} \hspace{-2mm}\right]\hspace{-1mm},U'\tilde{\mathcal{Q}}U\hspace{-1mm}=\hspace{-1mm}\left[\hspace{-2mm} \begin{array}{cc} 0\hspace{-2mm}&\hspace{-2mm} 0\\ 0 \hspace{-2mm}&\hspace{-2mm} \tilde{\mathcal{Q}}_{2} \\ \end{array} \hspace{-2mm}\right], \end{align} where $\tilde{\mathcal{Q}}_{2}\geq 0$, $ \tilde{\mathbb{C}}_{11}= \tilde{\mathbb{C}}_{11}^{(1)}+ \tilde{\mathbb{C}}_{11}^{(2)}$, $ \tilde{\mathbb{C}}_{12}= \tilde{\mathbb{C}}_{12}^{(1)}+ \tilde{\mathbb{C}}_{12}^{(2)}$ and $ \tilde{\mathbb{C}}_{22}= \tilde{\mathbb{C}}_{22}^{(1)}+ \tilde{\mathbb{C}}_{22}^{(2)}$. Substituting \eqref{upu} and \eqref{ly7} into \eqref{ly5} yields that \begin{equation}\label{ly6} \mathbb{P}_{2}=\tilde{\mathcal{Q}}_{2}+\tilde{\mathbb{A}}_{22}' \mathbb{P}_{2}\tilde{\mathbb{A}}_{22}+\sigma^{2}\{\tilde{\mathbb{C}}_{22}^{(1)}\}' \mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(1)}+ \sigma^{2}\{\tilde{\mathbb{C}}_{22}^{(2)}\}' \mathbb{P}_{2}\tilde{\mathbb{C}}_{22}^{(2)}. \end{equation} Define $U'\mathbb{X}_{k}=\bar{\mathbb{X}}_{k}=\left[\hspace{-2mm} \begin{array}{cc} \bar{\mathbb{X}}_{k}^{(1)}\hspace{-1mm}\\ \bar{\mathbb{X}}_{k}^{(2)} \hspace{-1mm} \\ \end{array} \hspace{-2mm}\right]$, where the dimension of $\bar{\mathbb{X}}_{k}^{(2)} $ is the same as the rank of $\mathbb{P}_{2}$. Thus, from \eqref{fb} we have \begin{align*} U'\mathbb{X}_{k+1}&=U'\tilde{\mathbb{A}}UU'\mathbb{X}_{k}+U'\tilde{\mathbb{C}}UU'\mathbb{X}_{k}w_{k},\end{align*} i.e., \begin{align} \bar{\mathbb{X}}_{k+1}^{(\hspace{-0.3mm}1\hspace{-0.3mm})}&=\tilde{\mathbb{A}}_{11}\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}1\hspace{-0.3mm})} \hspace{-1mm}+\hspace{-1mm}\tilde{\mathbb{A}}_{12}\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}2\hspace{-0.3mm})} \hspace{-1mm}+\hspace{-1mm}(\tilde{\mathbb{C}}_{11}\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}1\hspace{-0.3mm})}\hspace{-1mm}+\hspace{-1mm}\tilde{\mathbb{C}}_{12}\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}2\hspace{-0.3mm})})w_{k},\label{lly1}\\ \bar{\mathbb{X}}_{k+1}^{(2)}&=\tilde{\mathbb{A}}_{22}\bar{\mathbb{X}}_{k}^{(2)}+\tilde{\mathbb{C}}_{22}\bar{\mathbb{X}}_{k}^{(2)}w_{k}.\label{lly2} \end{align} Next we will show the stability of $(\tilde{\mathbb{A}}_{22},\tilde{\mathbb{C}}_{22})$. Actually, recall from \eqref{lya3} and \eqref{ly7}, we have that \begin{align}\label{lya30} &~~\sum_{k=0}^{N}E[(\bar{\mathbb{X}}_{k}^{(2)})'\tilde{\mathcal{Q}}_{2}\bar{\mathbb{X}}_{k}^{(2)}]=\sum_{k=0}^{N}E(\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k})\notag\\ &=E(\mathbb{X}_{0}'\mathbb{P}\mathbb{X}_{0})-E(\mathbb{X}_{N+1}'\mathbb{P}\mathbb{X}_{N+1})\notag\\ &=E[(\bar{\mathbb{X}}_{0}^{(2)})'\mathbb{P}_{2}\bar{\mathbb{X}}_{0}^{(2)}]-E[(\bar{\mathbb{X}}_{N+1}^{(2)})'\mathbb{P}_{2}\bar{\mathbb{X}}_{N+1}^{(2)}]. \end{align} Similar to the discussions from \eqref{lll} to \eqref{130}, we conclude $\bar{\mathbb{X}}_{0}^{(2)}$ is an unobservable state of $(\tilde{\mathbb{A}}_{22},\tilde{\mathbb{C}}_{22},\tilde{\mathcal{Q}}_{2}^{1/2})$ if and only if $\bar{\mathbb{X}}_{0}^{(2)}$ obeys $E[(\bar{\mathbb{X}}_{0}^{(2)})'\mathbb{P}_{2}\bar{\mathbb{X}}_{0}^{(2)}]=0$. Since $\mathbb{P}_{2}>0$, thus $(\tilde{\mathbb{A}}_{22},\tilde{\mathbb{C}}_{22},\tilde{\mathcal{Q}}_{2}^{1/2})$ is exactly observable as discussed in 1). Therefore, following from Theorem \ref{succeed}, we know that \begin{equation}\label{l2l} \lim_{k\rightarrow +\infty}E(\bar{\mathbb{X}}_{k}^{(2)})'\bar{\mathbb{X}}_{k}^{(2)}=0, \end{equation} i.e., $(\tilde{\mathbb{A}}_{22},\tilde{\mathbb{C}}_{22})$ is stable in mean square sense. Thirdly, the stability of $(\tilde{\mathbb{A}}_{11},\tilde{\mathbb{C}}_{11})$ will be shown as below. We might as well choose $\bar{\mathbb{X}}_{0}^{(2)}=0$, then from \eqref{lly2} we have $\bar{\mathbb{X}}_{k}^{(2)}=0$ for any $k\geq 0$. In this case, \eqref{lly1} becomes \begin{equation}\label{zz} \mathbb{Z}_{k+1}=\tilde{\mathbb{A}}_{11}\mathbb{Z}_{k}+\tilde{\mathbb{C}}_{11}\mathbb{Z}_{k}w_{k}, \end{equation} where $\mathbb{Z}_{k}$ is the value of $\bar{\mathbb{X}}_{k}^{(1)}$ with $\bar{\mathbb{X}}_{k}^{(2)}=0$. Thus, for arbitrary initial state $\mathbb{Z}_{0}=\bar{\mathbb{X}}_{0}^{(1)}$, we have \begin{equation}\label{lly3} E[\tilde{Y}_{k}'\tilde{Y}_{k}]=E[\mathbb{X}_{k}'\tilde{\mathcal{Q}}\mathbb{X}_{k}]=E[(\bar{\mathbb{X}}_{k}^{(2)})'\tilde{\mathcal{Q}}_{2}\bar{\mathbb{X}}_{k}^{(2)}]\equiv 0. \end{equation} From the exactly detectability of $(\tilde{\mathbb{A}},\tilde{\mathbb{C}},\tilde{\mathcal{Q}}^{1/2})$, it holds \begin{align}\label{xxx} \lim_{k\rightarrow +\infty}\hspace{-2mm}E(\bar{\mathbb{X}}_{k}'\bar{\mathbb{X}}_{k})\hspace{-1mm}=\hspace{-1mm}\lim_{k\rightarrow +\infty}\hspace{-2mm}E(\bar{\mathbb{X}}_{k}'U'U\bar{\mathbb{X}}_{k})\hspace{-1mm}=\hspace{-1mm}\lim_{k\rightarrow +\infty}\hspace{-2mm}E(\mathbb{X}_{k}'\mathbb{X}_{k})\hspace{-1mm}=\hspace{-1mm}0. \end{align} Therefore, in the case of $\bar{\mathbb{X}}_{0}^{(2)}=0$, \eqref{xxx} indicates that \begin{align}\label{l1l} &~~ \lim_{k\rightarrow +\infty}\hspace{-2mm}E(\mathbb{Z}_{k}'\mathbb{Z}_{k})\hspace{-1mm}=\hspace{-1mm} \lim_{k\rightarrow +\infty}\hspace{-2mm} E[(\bar{\mathbb{X}}_{k}^{(1)})'\bar{\mathbb{X}}_{k}^{(1)}]\\ &\hspace{-1mm}=\hspace{-2mm}\lim_{k\rightarrow +\infty}\hspace{-2mm} \{E[(\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}1\hspace{-0.3mm})})'\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}1\hspace{-0.3mm})}] \hspace{-1mm}+\hspace{-1mm}E[(\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}2\hspace{-0.3mm})})'\bar{\mathbb{X}}_{k}^{(\hspace{-0.3mm}2\hspace{-0.3mm})}]\} \hspace{-1mm}=\hspace{-2mm}\lim_{k\rightarrow +\infty}\hspace{-2mm}E(\bar{\mathbb{X}}_{k}'\bar{\mathbb{X}}_{k})\hspace{-1mm}=\hspace{-1mm}0.\notag \end{align} i.e., $(\tilde{\mathbb{A}}_{11},\tilde{\mathbb{C}}_{11})$ is mean square stable. Finally we will show that system \eqref{ps10} is stabilizable in mean square sense. In fact, we denote $\tilde{\mathcal{A}}=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{A}}_{11}\hspace{-2mm}&\hspace{-2mm} 0\\ 0\hspace{-2mm}& \hspace{-2mm}\tilde{\mathbb{A}}_{22} \\ \end{array} \hspace{-2mm}\right]$, $\tilde{\mathcal{C}}=\left[\hspace{-2mm} \begin{array}{cc} \tilde{\mathbb{C}}_{11}\hspace{-2mm}&\hspace{-2mm} 0\\ 0\hspace{-2mm}&\hspace{-2mm} \tilde{\mathbb{C}}_{22} \\ \end{array} \hspace{-2mm}\right]$. Hence, \eqref{lly1}-\eqref{lly2} can be reformulated as \begin{align}\label{133} \bar{\mathbb{X}}_{k+1}\hspace{-1mm}=\hspace{-1mm}\{\tilde{\mathcal{A}}\bar{\mathbb{X}}_{k}+\left[\hspace{-1mm} \begin{array}{cc} \tilde{\mathbb{A}}_{12}\\ 0\\ \end{array} \hspace{-1mm}\right]\mathbb{U}_{k}\}\hspace{-1mm}+\hspace{-1mm}\{\tilde{\mathcal{C}}\bar{\mathbb{X}}_{k}\hspace{-1mm}+\hspace{-1mm}\left[\hspace{-1mm} \begin{array}{cc} \tilde{\mathbb{C}}_{12}\\ 0\\ \end{array} \hspace{-1mm}\right]\mathbb{U}_{k}\}w_{k}, \end{align} where $\mathbb{U}_{k}$ is as the solution to equation \eqref{lly2} with initial condition $\mathbb{U}_{0}=\mathbb{X}_{0}^{(2)}$. The stability of $(\tilde{\mathbb{A}}_{11},\tilde{\mathbb{C}}_{11})$ and $(\tilde{\mathbb{A}}_{22},\tilde{\mathbb{C}}_{22})$ as proved above indicates that $(\tilde{\mathcal{A}},\tilde{\mathcal{C}})$ is stable in mean square sense. Obviously from \eqref{l2l} it holds $\lim_{k\rightarrow +\infty}E(\mathbb{U}_{k}'\mathbb{U}_{k})=0$ and $\sum_{k=0}^{\infty}E(\mathbb{U}_{k}'\mathbb{U}_{k})<+\infty$. By using \emph{Proposition 2.8} and \emph{Remark 2.9} in \cite{abb}, we know that there exists constant $c_{0}$ such that \begin{align}\label{xu} \sum_{k=0}^{\infty}E(\bar{\mathbb{X}}_{k}'\bar{\mathbb{X}}_{k})<c_{0}\sum_{k=0}^{\infty}E(\mathbb{U}_{k}'\mathbb{U}_{k})<+\infty. \end{align} Hence, $\lim_{k\rightarrow +\infty}E(\bar{\mathbb{X}}_{k}'\bar{\mathbb{X}}_{k})=0$ can be obtained from \eqref{xu}. Furthermore, it is noted from \eqref{xxx} that \begin{align*} &\lim_{k\rightarrow +\infty}\hspace{-2mm}E(x_{k}'x_{k})\hspace{-1mm}=\hspace{-1mm}\lim_{k\rightarrow +\infty}\hspace{-2mm}[(x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k})'(x_{k}\hspace{-1mm}-\hspace{-1mm}Ex_{k})\hspace{-1mm}+\hspace{-1mm}Ex_{k}'Ex_{k}]\notag\\ &=\lim_{k\rightarrow +\infty}\hspace{-2mm}E(\mathbb{X}_{k}'\mathbb{X}_{k})\hspace{-1mm}=\hspace{-1mm}\lim_{k\rightarrow +\infty}\hspace{-2mm}E(\bar{\mathbb{X}}_{k}'\bar{\mathbb{X}}_{k})\hspace{-1mm}=\hspace{-1mm}0. \end{align*} Note that system $(\tilde{\mathbb{A}},\tilde{\mathbb{C}})$ given in \eqref{fb} is exactly mean-field system \eqref{ps10} with controller \eqref{control}. In conclusion, mean-field system \eqref{ps10} can be stabilizable in the mean square sense. The proof is complete. \end{proof} \ifCLASSOPTIONcaptionsoff \fi \end{document}
\begin{document} \title{A Family of Almost Invertible Infinite Matrices} \text{Aut}hor{Daniel P. Bossaller} \address{Department of Mathematics and Computer Science, John Carroll University, University Heights, OH, 44118} \email{[email protected]} \maketitle \begin{abstract} An algebraic analogue of the family of Fredholm operators is introduced for the family of row and column finite matrices, dubbed ``Fredholm matrices." In addition, a measure is introduced which indicates how far a Fredholm matrix is from an invertible matrix. It is further shown that this measure respects multiplication, is invariant under perturbation by a matrix from $M_\infty(K)$, and is invariant under conjugation by an invertible row and column finite matrix. \end{abstract} \section{Introduction} In the study of rings and algebras, the family of invertible elements is of keen interest, mainly because the presence (or lack) of units is the primary dividing line between rings and fields. As such, many articles have been written studying this family of elements and various generalizations of invertibility. In many cases these articles have opened up new lines of inquiry in ring theory. For example, in \cite{onesidedinverses}, Jacobson studied elements which were one-sided invertible, that is, there exists elements $x$ and $y$ such that $xy = 1$ but $yx \neq 1$. From this work grew the study of directly finite algebras (where every element is left and right invertible) and one of the most well-known open problems in non-commutative ring theory, the Kaplansky Direct Finiteness Conjecture: Every group algebra $KG$ is directly finite, \cite{kapfield}. Another example of the primacy of invertibility and its generalizations can be found in the theory of von Neumann regular rings. These are rings where every element $x$ satisfies the following weak invertiblity property: there is an element $y$ such that $xyx = x$. This is broader set of objects than the one-sided invertible elements. In fact, one can see that any one-sided invertible element is von Neumann regular. However, the converse is not true; any idempotent is von Neumann regular but not necessarily one-sided invertible, for example. See \cite{goodearl} for a wide survey on this topic. Notions of invertibility related to von Neumann regularity include strong regularity (also called local invertibility by the author in \cite{BossallerLopez2}), $\pi$-regularity, Drazin inverses, and Moore-Penrose inverses. See \cite{generalizedinverses} for a treatment of the latter two topics and many other invertibility-adjacent topics. This note introduces a family of nearly invertible infinite matrices which is very closely tied to the one-sided invertibility of Jacobson. These ``algebraically Fredholm" matrices are inspired by their Fredholm operator counterparts in the theory of $C^*$-algebras. These are the bounded linear operators $T$ such that $T + K$ is invertible for some compact operator $K$. For an introduction and explanation of the importance of Fredholm operators functional analysis see \cite{schechter}, many of the results of the present note are inspired by Schechter's treatment of this topic. In the purely algebraic case, i.e. removing the notion of convergence, the analogue of the set of compact linear operators on a Hilbert space $\mathcal H$, $\mathcal K(\mathcal H)$, is the set of infinite matrices indexed by $\mathbb{Z}^+ \times \mathbb{Z}^+$ over the field $\mathbb{C}$ with only finitely many nonzero entries, which will be denoted by $M_\infty(\mathbb{C})$. It can be shown that $\mathcal K(\mathcal H)$ is the completion, in the appropriate norm of $M_\infty(\mathbb{C})$. In a similar way, the algebraic analogue of the bounded operators are the row and column finite matrices, $\rcfm \mathbb{C}$. These are the infinite matrices over $\mathbb{C}$ where each row and column has only finitely many nonzero entries. It is straightforward to see that $M_\infty(\mathbb{C})$ is an ideal of $\rcfm \mathbb{C}$. Furthermore, as shown in \cite{regmul}, $\rcfm \mathbb{C}$ is the largest unital $\mathbb{C}$-algebra which contains $M_\infty(\mathbb{C})$ as an essential ideal, that is, for any nonzero ideal $J$ has non-trivial intersection with $M_\infty(\mathbb{C})$. We define the quotient algebra \[Q(\mathbb{C}) = \rcfm \mathbb{C}/M_\infty(\mathbb{C}).\] The results of this article do not require the fact that $\mathbb{C}$ is algebraically closed. Due to this, we assume that the entries of our matrices come from some field $K$ of characteristic zero. \section{Algebraic Fredholm Theory} In many articles concerning the classification of extensions of $C^*$-algebras, for instance \cite{paschkesalinas}, the Fredholm index of an operator is used to parameterize the various extensions of an algebra $A$ by $B$. The goal of this article is to introduce an algebraic version of this index by examining matrices $A \in \rcfm K$ whose image under the canonical surjection $\pi: \rcfm K \mapsto \rcfm K / M_\infty(K)$ is invertible. First, let us establish the following, easily verified facts about $M_\infty(K)$. Suppose that $A \in \rcfm K$; we adopt the notation that $\mathcal L_A$ and $\mathcal R_A$ denote the linear transformations (on a countably infinite dimensional, $K$-vector space $V$), $\mathcal L_A(x) = Ax$ and $\mathcal R_A(y) = yA$ for a column vector $x$ and row vector $y$. \begin{lemma} Let $A$ be any infinite matrix. \begin{enumerate} \item $A \in M_\infty(K)$ if, and only if, $\mathcal L_A$ and $\mathcal R_A$ are linear transformations of $V$ and $\dim(\im \mathcal L_A) < \infty$ and $\dim(\im \mathcal R_A) < \infty$. \item$M_\infty(K)$ is the unique minimal ideal of $\rcfm K$. \end{enumerate} \end{lemma} \begin{proof} The first is established through straightforward calculation, then it follows that $M_\infty(K)$ is an ideal of $\rcfm K$. To show minimality of $M_\infty(K)$, suppose there were an ideal $\{0\} \neq I \subsetneq M_\infty(K)$. Then there must be some nonzero element $a \in I$ such that $\rcfm K a \rcfm K \neq \{0\}$ for otherwise, $\mathcal L_a(x) = 0$ and $\mathcal R_a(x) = 0$ for all $x \in \rcfm K$, which implies that $a = 0$ since $I_\infty(K) \in \rcfm K$. Write $a$ in terms of the matrix units of $M_\infty(K)$: \[a = k_{ij} e_{ij} + \sum_{mn} k_{mn} e_{mn}\] such that $m \neq i$ or $n \neq j$. Then any matrix unit $e_{kl}$ may be written as $e_{kl} = e_{ki}ie_{jl}$. Thus the two-sided ideal generated by $a$ must equal $M_\infty(K)$. Now suppose that there were another minimal ideal $J$ of $\rcfm K$. Let $a \in J$. By construction, there is at least one $e_{ii} \in M_\infty(K)$ such that $e_{ii}a \neq 0$, thus $e_{ii}a \in M_\infty(K) \cap J$, which is an ideal of $\rcfm K$ contained in $M_\infty(K)$. This is a contradiction of the minimality of $M_\infty(K)$ unless $J = M_\infty(K)$. \end{proof} \begin{defn} A matrix $A \in \rcfm K$ is called {\bf algebraically Fredholm} if the image of $A$ under the natural surjection $\pi: \rcfm K \rightarrow Q(K)$ is invertible in $Q(K)$. In the remainder of this article, we will refer to such matrices simply as ``Fredholm matrices." \end{defn} In other words, $A$ is Fredholm if and only if there exists $A_1, A_2 \in \rcfm K$ and $S_1, S_2 \in M_\infty$ such that $AA_1 = I_\infty - S_1$ and $A_2 A = I_\infty - S_2$. A consequence of this characterization: since $A_1$ and $A_2$ differ only by some element $R \in M_\infty(K)$, we can select some $A_0$ as which functions as both a left and a right Fredholm inverse. That is, there exists $A_0$, $S_1$ and $S_2$ such that $A A_0 = I_\infty + S_1$ and $A_0 A = I_\infty + S_2$. Note that this also implies that Fredholm inverses are unique up to perturbation by some element of $M_\infty(K)$; when we say ``the" Fredholm inverse of a matrix, it is understood within this context. \begin{prop} The family of Fredholm matrices is closed under multiplication. \end{prop} \begin{proof} Say that $A$ and $B$ are matrices such that $\overline A$ and $\overline B$ are invertible in $Q(K)$. Suppose $A$ has Fredholm inverse $A_0$ and $B$ has Fredholm inverse $B_0$. Then consider $\overline{AB} = \bar{A} \bar{B}$ which is clearly invertible in $Q(K)$ with Fredholm inverse $\bar{B_0}\bar{A_0}$. \end{proof} In the theory of Banach algebras, Fredholm operators are defined as those operators $T$ with closed range such that $\dim(\ker(T))$ and $\dim(\ker(T'))$ are finite (where $T'$ is the Hilbert space adjoint of $T$). The following result establishes that if a matrix is Fredholm, then $\dim(\ker(T))$ and $\dim(V/TV)$ are finite. Furthermore, using this fact we will introduce the ``index" of a matrix which will function as a measurement for how far a given Fredholm matrix is from being invertible. In this article, we will use the notation $\im(A)$, $\ker(A)$, and $\coker(A)$ for the image, kernel, and cokernel of the linear transformation $\mathcal L_{A}$. \begin{lemma}\label{fredholm dimension} Let $A \in \rcfm K$ be a Fredholm matrix. Then $\ker(A)$ and $\coker(A)$ are finite dimensional subspaces of $V$. \end{lemma} \begin{proof}If $A$ is Fredholm, then there exists $A_0\in \rcfm K$ and $R, S \in M_\infty(K)$ be matrices such that $A_0A = I_\infty - R$ and $AA_0 = I_\infty - S$. To show that $\ker(A)$ is finite dimensional, suppose that there is an infinite, linearly independent set of elements in $\ker(A)$, $\{b_i \; | \; i \in \mathbb{Z}^+\}$. Then $Ab_i = 0$ for each $i \in \mathbb{Z}^+$. Construct a matrix $B = (b_1 \; |\; b_2\; |\; \cdots\; )$. By construction $AB = 0$. Then \[0 = A_0(AB) = (A_0A)B = B + RB.\] Thus $B = -RB$; this is a contradiction since $RB$ is a matrix with finitely many nonzero rows, but $B$, being constructed from an infinite linearly independent set of vectors, must have infinitely many nonzero rows. Thus for a Fredholm matrix $A$, $\ker(A)$ must be finite dimensional. For the other claim, first note that $\im(AA') \subseteq \im(A)$; thus $\im(I_\infty - S) \subseteq \im(A)$ which means that $\coker(A) \subseteq \coker(I_\infty - S)$. Thus \[\dim(\coker(A)) \leq \dim(\coker(I_\infty - S)).\] We claim that $\dim(\coker(I_\infty - S))$ is finite-dimensional. Note that \[\coker(I_\infty - S) = V / \im(I_\infty - S) = \{v \in V \; | \; v = S v\} \subseteq \im(S).\] Since the dimension of $\im(S)$ is finite, the dimension of the cokernel must be finite also, which proves the claim. \end{proof} \begin{rmk} In the study of Fredholm operators in functional analysis, the corresponding result to the previous is a bijection. However, it is an open question as to whether this result can be extended similarly. The key problem is the existence of row and column finite matrices which are not invertible in $\rcfm K$ but are invertible in $\cfm K$. An example of one such pair of matrices is the following: \[P = \begin{pmatrix} 1 &-1 &0 &\cdots\\ 0 &1 &-1 &\cdots\\ 0 &0 &1 &\cdots\\ \vdots &\vdots &\vdots &\ddots \end{pmatrix} \text{ and } P^{-1} = \begin{pmatrix} 1 &1 &1 &\cdots\\ 0 &1 &1 &\cdots\\ 0 &0 &1 &\cdots\\ \vdots &\vdots &\vdots &\ddots \end{pmatrix}.\] In the theory of Banach spaces, one may appeal to the Bounded Inverse Theorem which guarantees that a bounded operator $T: X \rightarrow Y$ which has $\im(T) = Y$ and $\ker(T) = \{0\}$ has an inverse $T^{-1}$ which is also bounded. The proof of the analytic analogue involves restricting the Fredholm operator $T$ to a operator which is guaranteed to have a bounded inverse. \end{rmk} \begin{ex} Many of the intuitions developed in a linear algebra course break down in the face of infinite matrices. The dimensions of the kernel and cokernel of a linear transformation is precisely one of those cases. Recall the rank-nullity theorem; given an endomorphism $f$ of an $n$-dimensional $K$-vector space $V$, $\dim(\im(f)) + \dim(\ker(f)) = n$. The dimension of the cokernel $V/f(V)$ equals the dimension of the kernel since \[\dim(\coker(f)) = n - \dim(\im(f)) = \dim(\ker(f)).\] To see how this intuition fails in the case of infinite matrices, consider the matrix $S_{-1} = \sum_{i = 1}^\infty e_{i,i+1}$ where $e_{ij}$ is the matrix unit with $1$ in the $(i,j)^\text{th}$ entry and zeroes elsewhere. The matrix $S_{-1}$ can be visualized as the infinite matrix which has $1$'s along the first super-diagonal. It's clear that $S_{-1}$ is Fredholm with inverse $S_1 = \sum_{j = 1}^\infty e_{j+1,j}$. The kernel of $\mathcal L_{S_{-1}}$ is the following subspace of $V$, $\{(k, 0, 0, \ldots)^T \; | \; k \in K\}$, which has dimension 1. However the cokernel $V/\im(S_1) = \{0\}$, which has dimension 0. We'll return to this matrix and other similar matrices after the following definition. \end{ex} Since we can think of Fredholm matrices as those which are ``almost invertible," we propose the following as a measurement of how close a given Fredholm matrix is to being invertible \begin{defn} Let $A$ be a Fredholm matrix, we define the {\bf index} of $A$ to be \[\ind(A) = \dim(\ker(A)) - \dim(\coker(A)).\] \end{defn} \begin{ex}\label{Si generators} Define the following set of matrices. For an integer $i$, $S_i$ will denote the matrix \[S_i = \sum_{j = 1}^\infty e_{i+j,j} \text{ for all } j \text{ such that } j>-i.\] These are the matrices which have $1$'s along the $i^\text{th}$ sub-diagonal and zeroes elsewhere; in the case where $i < 0$, then the matrices have $1$'s along the $-i^\text{th}$ super-diagonal. Because these matrices amount to shift operators on the infinite-dimensional vector space $V$, it is straightforward to calculate kernels and cokernels of the matrices and see that \[\ind(S_i) = -i\] \end{ex} We now establish the following facts about the index. \begin{prop}\label{fredholm properties} Let $A$ and $B$ be Fredholm matrices and let $T \in M_\infty(K)$, and let $A_0$, $R$, and $S$ be as in the proof of Lemma \ref{fredholm dimension}. \begin{enumerate} \item $\ind(AB) = \ind(A) + \ind(B)$. \item $\ind(A_0) = - \ind(A)$. \item $A+T$ is Fredholm, and $\ind(A + T) = \ind(A)$. \end{enumerate} \end{prop} \begin{proof} To prove (1), we divide $V$ up into four subspaces $V_1$, $V_2$, $V_3$, and $V_4$. \[\begin{array}{r c l} V_1 &= &\ker(A) \cap \im(B),\\ \im(B) &= &V_1 \oplus V_2,\\ \ker(A) &= &V_1 \oplus V_3 \text{, and from here we get}\\ V &= &\im(B) \oplus V_3 \oplus V_4. \end{array}\] Note that by Lemma \ref{fredholm dimension}, $V_1$, $V_3$, and $V_4$ are finite dimensional subspaces of $V$ since they are subspaces of $\ker(A)$ or $\coker(B)$ which are finite dimensional. Let $d_i = \dim(V_i)$ for $i \in \{1, 3, 4\}$. In additon we can find two more subspaces $W, X \subseteq V$ by writing $\ker(AB) = \ker(B) \oplus W$ and $\im(A) = \im(AB) \oplus X$. Since $W \subseteq \ker(AB)$ and $X \subseteq \coker(AB)$, both $W$ and $X$ are finite dimensional. Note that $W$ is the subspace of all vectors $v$ such that $v \in \im(B)$ but $v \in \ker(A)$, so $\dim(W) = d_1$. Also note that $\im(A) = \mathcal{L}_A(V) = \mathcal{L}_A(\im(B) \oplus V_3 \oplus V_4) = \im(AB) \oplus \mathcal{L}_A(V_4)$. Since $\ker(A) = V_1 \oplus V_3$, $\mathcal{L}_A$ must be a one-to-one linear transformation from $V_4$ to $W$ which implies that $V_4$ and $X$ must have the same dimension, $\dim(X) = d_4$. Collecting our work from the previous paragraphs, we have that \[\begin{array}{r c l} \dim(\ker(AB) &= &\dim(\ker(B)) + d_1\\ \dim(\coker(AB)) &= &\dim(\coker(A)) + d_4\\ \dim(\ker(A)) &= &d_1 + d_3\\ \dim(\coker(B) &= &d_3 + d_4 \end{array}.\] So we calculate $\ind(AB) = \dim(\ker(B)) + d_1 - \dim(\coker(A)) - d_4$. On the other hand, $\ind(A) + \ind(B) = \dim(\ker(A)) - \dim(\coker(A)) + \dim(\ker(B)) - \dim(\coker(B)) = d_1 + d_3 - \dim(\coker(A)) + \dim(\ker(B)) - d_3 + d_4$, which gives the desired equality. The proof of (2) follows from the fact that $\ker(I_\infty - R) = \{v \in V \; | \; v - R v = 0\} = \{v \in V \; | \; v = R v\} = \coker(I_\infty - R)$. Because those subspaces have finite dimension, we calculate \[0 = \ind(I_\infty - S) = \ind(A_0 A) = \ind(A_0) + \ind(A).\] To show that (3) holds, define $R' = (R -A_0T)$ and $S' = (S - T A_0)$, and note \[\begin{array}{r c l} A_0(A + T) &= &I_\infty - R + A_0 T = I_\infty - R'\\ (A+T)A_0 &= &I_\infty - S + TA_0 = I_\infty - S' \end{array}.\] Thus $A+T$ is Fredholm (with the same inverse to boot). Finally, \[\ind(A_0) + \ind(A+T) = \ind(A_0(A+T) = \ind(I_\infty - R') = 0.\] Since $\ind(A_0) = - \ind(A)$, we have that $\ind(A) = \ind(A+T)$. \end{proof} We finish this section with the following corollary. \begin{cor}\label{conjugation} Let $U$ be an invertible matrix in $\rcfm K$, and let $A$ and $B$ be Fredholm matrices. Furthermore, let $B_0$ be a Fredholm inverse of $B$. Then the following properties hold. \begin{enumerate} \item $\ind(U) = 0$ \item $\ind(A) = \ind(U^{-1} A U)$. \item $\ind(A) = \ind(B_0 A B)$. \end{enumerate} \end{cor} \begin{proof} The first claim follows from the fact that if $U$ is invertible, its kernel and cokernel are trivial. The second follows from the first, and the third follows from Proposition \ref{fredholm properties}. \end{proof} \section{Embeddings of the Toeplitz-Jacobson Algebra into B(K)} The {\bf Toeplitz-Jacobson algebra} is the $K$-algebra with presentation \[\mathcal T = \langle x,y \; | \; xy = 1\rangle\] which was first investigated by Jacobson in \cite{onesidedinverses}. This algebra can be thought of as being generated by an element $x$ which is right invertible, but not left invertible and its right inverse $y$. Jacobson's article also included the first of the following embedding of $\mathcal T$ into the $K$-algebra of row and column finite matrices $\rcfm K$. \begin{ex} (\cite{onesidedinverses}, Equation 6) Recall the definition of $S_i$ from Example \ref{Si generators}: \[S_i = \sum_{j = 1}^\infty e_{i+j,j} \text{ for all } j \text{ such that } j>-i.\] One can see that there is an isomorphism $\Phi: \mathcal T \rightarrow \langle S_{-1}, S_1 \rangle \subseteq \rcfm K$ such that $\Phi(x) = S_{-1}$ and $\Phi(y) = S_1$. For ease of reference, we will call this embedding the ``Jacobson embedding" of $\mathcal T$. However, the Jacobson embedding of $\mathcal T$ is far from the only embedding of $\mathcal T$ into $\rcfm K$. Two more are given by $\Psi, \Xi: \mathcal T \rightarrow \rcfm K$, where \[\Psi(x) = T_{-1} = \sum_{i=1}^\infty \frac{1}{i+1}e_{i,i+1} \text{ and } \Psi(y) = T_{1} = \sum_{j =1}^\infty (j+1) e_{j+1,j}\] and \[\Xi(x) = S_{-2} \text{ and } \Xi(y) = S_2.\] \end{ex} From Theorem 4 of \cite{onesidedinverses} and the construction of the three homorphisms in the previous example, we have the following fact: \begin{prop} Each of the three homomorphisms $\Phi$, $\Psi$, and $\Xi$ from the previous example are injective maps embedding $\mathcal T$ into $\rcfm K$. \end{prop} We close this article with an example which shows how the index may be used to classify and distinguish between these three embeddings. \begin{ex} In \cite{Bossaller1}, the author defines two embeddings $E_1$ and $E_2$ of $\mathcal T$ into $\rcfm K$ as {\bf equivalent} if there is an invertible $U \in \rcfm K$ which conjugates $E_1$ into $E_2$. With this notion of equivalence in mind, one can see that the embeddings determined $\Phi$ and $\Psi$ share an equivalence class. The matrix \[U = \text{Diag}(1, 2, 6, \ldots, n!, \ldots)\] is an invertible row and column finite matrix such that $US_i U^{-1} = T_i$ for $i \in \{1,2\}$. Because the $S_i$ and $T_j$ are generators for $\Phi(\mathcal T)$ and $\Psi( \mathcal T)$, respectively, $U$ must conjugate the first into the second, thus the embeddings share the same equivalence class. Note however that $\Phi(\mathcal T)$ and $\Xi(\mathcal T)$ cannot share the same equivalence class. For if there were an invertible row and column finite matrix $V$ which conjugates $S_1$ into $S_2$ and $S_{-1}$ into $S_{-2}$, Corollary \ref{conjugation} would then force $S_1$ and $S_2$ to have the same index, which is not true by Example \ref{Si generators}. \end{ex} \end{document}
\begin{document} \title{Estimating the probability law of the codelength as a function of the approximation error in image compression} \author{Fran\c{c}ois Malgouyres\footnotemark[2]} \maketitle \renewcommand{\arabic{footnote}}{\fnsymbol{footnote}} \footnotetext[2]{LAGA/L2TI, Universit\'e Paris 13, 99 avenue Jean-Batiste Cl\'ement, 93430 Villetaneuse, France. \\ [email protected]\\ http://www.math.univ-paris13.fr/$\sim$malgouy/ } \renewcommand{\arabic{footnote}}{\arabic{footnote}} \begin{abstract} After a recollection on compression through a projection onto a polyhedral set (which generalizes the compression by coordinates quantization), we express, in this framework, the probability that an image is coded with $K$ coefficients as an explicit function of the approximation error. \end{abstract} \section{Introduction}\label{intro-sec} In the past twenty years, many image processing tasks have been addressed with two distinct mathematical tools : Image decomposition in a basis and optimization. The first mathematical approach proved very useful and is supported by solid theoretical foundations which guarantee its efficiency as long as the basis is adapted to the information contained in images. Modeling the image content by appropriate function spaces (of infinite dimension), mathematical theorems tell us how the coordinates of an image in a given basis behave. As instances, it is possible to characterize Besov spaces (see \cite{Meyerbook1}) and the space of bounded variation (it is ``almost characterized'' in \cite{CohenDahmenDaubechiesDevore}) with wavelet coefficients. As a consequence of these characterizations, one can obtain performance estimate for practical algorithms (see Th 9.6, pp. 386, in \cite{mallatbook} and \cite{cdpx,CohenDaubechiesGuleryusOrchard} for more complex analyses). Image compression and restoration are the typical applications where such analyses are meaningful. The optimization methods which have been considered to solve those practical problems also proved very efficient (see \cite{rof}, for a very famous example). However, the theory is not able to assess how well they perform, given an image model. Interestingly, most of the community who was primarily involved in the image decomposition approaches is now focusing on optimization models (see, for instance, the work on Basis Pursuit \cite{ChenDonoho} or compressed sensing \cite{DonohoCompSens}). The main reason for that is probably that optimization provides a more general framework (\cite{cdll,MalgouyresIeee02,MalgouyresCompression}). The framework which seems to allow both a good flexibility for practical applications (see \cite{ChenDonoho} and other papers on Basis Pursuit) and good properties for theoretical analysis is the projection onto polyhedron or polytopes. For theoretical studies, it shares simple geometrical properties with the usual image decomposition models (see \cite{geomoptim}). This might allow the derivation of approximation results. The aim of this paper is to state a rigorous\footnote{The theorem concerning compression in \cite{geomoptim} is false. The situation turns out to be more complex than we thought at the time it was written.} theorem which relates, asymptotically as the precision grows, the approximation error and the number of coefficients which are coded (which we abusively call codelength, for simplicity). More precisely, when the initial datum is assumed random in a convex set, we give the probability that the datum is coded by $K$ coefficients, as a function of the approximation error (see theorem \ref{LeTHM} for details). This result is given in a framework which generalizes the usual coding of the quantized coefficients (``non-linear approximation''), as usually performed by compression standards (for instance, JPEG and JPEG2000). \section{Recollection on variational compression}\label{hypf-sec} Here and all along the paper $N$ is a non-negative integer, $I=\{1,\ldots, N\}$ and $\BB = (\psi_i)_{i\in I}$ is a basis of $\RR^N$. We will also denote for $\tau > 0$ (all along the paper $\tau$ denotes a non-negative real number) and for all $k\in \ZZ$, $\tau_k = \tau (k-\frac{1}{2})$. For any $(k_i)_{i \in I} \in \ZZ^N$, we denote \begin{equation}\label{constraint} \CC\left( (k_i)_{i \in I} \right) = \left\{\sum_{i\in I} u_i \psi_i, \forall i\in I, \tau_{k_i}\leq u_i \leq \tau_{k_i+1}\right\}. \end{equation} We then consider the optimization problem \[(\tilde P)\left( (k_i)_{i \in I} \right) : \left\{\begin{array}{l} \mbox{minimize } f(v) \\ \mbox{under the constraint } v\in \CC\left( (k_i)_{i \in I} \right), \end{array}\right. \] where $f$ is a norm, is continuously differentiable away from $0$ and its level sets are strictly convex. In order to state Theorem \ref{LeTHM}, we also need $f$ to be {\em curved}. This means that the inverse of the homeomorphism\footnote{We prove in \cite{geomoptim} that, under the above hypotheses, $h$ actually is an homeomorphism.} $h$ below is Lipschitz. \[\begin{array}{rcl} h:\{u\in\RR^N,\ f(u)=1\} & \rightarrow & \{g\in\RR^N, \ \|g\|_2=1\} \\ u & \mapsto & \frac{\nabla f(u)}{\|\nabla f(u) \|_2}. \end{array} \] (The notation $\|.\|_2$ refers to the euclidean norm in $\RR^N$.) We denote, for any $(k_i)_{i \in I} \in \ZZ^N$, \[\tilde J \left( (k_i)_{i \in I} \right) = \{i \in I, u^*_i = \tau_{k_i} \mbox{ or }u^*_i = \tau_{k_i+1} \}, \] where $u^* = \sum_{i\in I} u^*_i \psi_i $ is the solution to $(\tilde P)\left( (k_i)_{i \in I} \right)$. The interest for these optimization problems comes from the fact that, as explained in \cite{MalgouyresCompression}, we can recover $(k_i)_{i \in I}$ from the knowledge of $(\tilde J , (u^*_i)_{j\in \tilde J} )$ (where $\tilde J = \tilde J \left( (k_i)_{i \in I} \right)$). The problem $(P)$ can therefore be used for compression. Given a datum $u=\sum_{i\in I} u_i \psi_i\in\RR^N$, we consider the unique $ (k_i(u))_{i \in I}\in\ZZ^N$ such that (for instance) \begin{equation}\label{approx} \forall i\in I, \tau_{k_i(u)} \leq u_i <\tau_{k_i(u)+1}. \end{equation} The information $(\tilde J , (u^*_i)_{j\in \tilde J} )$, where $\tilde J = \tilde J \left( (k_i(u))_{i \in I} \right)$, is then used to encode $u$. In the remainder, we denote the set of indexes that need to be coded to describe $u$ by $\tilde J(u) = \tilde J\left((k_i(u))_{i \in I}\right)$. Notice that we can also show (see \cite{MalgouyresCompression}) that the coding performed by the standard image processing compression algorithms (JPEG and JPEG2000) corresponds to the above model when, for instance, \[f(\sum_{i\in I} u_i \psi_i) = \sum_{i\in I} |u_i|^2. \] \section{The estimate}\label{estim-sec} The theorem takes the form : \begin{theorem}\label{LeTHM} Let $\tau'>0$ and $U$ be a random variable whose low is uniform in $\LS{\tau'}$, for a norm $f_d$. Assume $f$ satisfies the hypotheses given in Section \ref{hypf-sec}. For any norm $\|.\|$ and any $K\in\{1,\ldots N\}$ there exists $D_K$ such that for all $\varepsilon >0$, there exists $T>0$ such that for all $\tau<T$ \[\PROBA{\# \tilde J\left(U \right) = K} \leq D_K E^{\frac{N-K}{N+1}} + \varepsilon, \] where $E$ is the approximation error\footnote{When computing the approximation error, we consider the center of $\CC\left((k_i)_{i\in I}\right)$ has been chosen to represent all the elements such that $ (k_i)_{i \in I} = (k_i(u))_{i \in I} $. } : \[ E = \EXPECT{\|U-\tau \sum_{i\in I} k_i(U) \psi_i \| }. \] Moreover, if $f(\sum_{i\in I} u_i \psi_i) = \sum_{i\in I } |u_i|^2$, we also have\footnote{This assumption is very pesimistic. For instance, the lower bound seems to hold for almost every basis $\BB$ of $\RR^N$, when $f$ is fixed. We have not worked the details of the proof of such a statement out though.} \[\PROBA{\# \tilde J\left( U \right) =K} \geq D_K E^{\frac{N-K}{N+1}} - \varepsilon. \] \end{theorem} When compared to the kind of results evoked in Section \ref{intro-sec}, the above theorem original is in several ways : First, it concerns variational models which are more general than the model in which the results of Section \ref{intro-sec} are usually stated. This is probably the main interest of the current result. For instance, by following reasonings similar to those which led to Theorem \ref{LeTHM}, it is probably possible to obtain approximation results with redundant transforms. Secondly, it expresses the distribution of the number of coefficients as a function of the approximation error, while former results do the opposite. Typically, they bound the approximation error (quantified by the $L^2$ norm) by a function of the number of coefficients that are coded. The comparative advantages and drawbacks of the two kind of statements is not very clear. In the framework of Theorem \ref{LeTHM}, the larger $D_K$ (for $K$ small), the better the model compresses the data. However, it is clear that, as the approximation error goes to $0$, we have more and more chances to obtain a code of size $N$. With this regard, the constant $D_{K-1}$ seems to play a particular role since it dominates (asymptotically as $\tau$ goes to $0$) the probability not to obtain a code of length $N$. Thirdly, it is stated in finite dimension and, as a consequence, it does not impose apriori links between the data distribution (the function $f_d$) and the model (the function $f$ and the basis $\BB$). The ability of the model to represent the data is always defined. For instance, this allows the comparison of two bad models (which is not possible in infinite dimension). The analog of Theorem \ref{LeTHM} in infinite dimension might be interesting, though. \section{Proof of Theorem \ref{LeTHM}} \subsection{First properties and recollection} \subsubsection{Rewriting $(\tilde P)$} For any $u\in \RR^N$, $(P)(u)$ denotes the optimization problem \[(P)\left( u \right) : \left\{\begin{array}{l} \mbox{minimize } f(v-u) \\ \mbox{under the constraint } v\in \CC\left( 0\right), \end{array}\right. \] where $0$ denotes the origin in $\ZZ^N$ and $\CC(.)$ is defined by \eqref{constraint}. We then denote, for any $u = \sum_{i\in I}u_i \psi_i \in \CC\left( 0\right) $, \[J(u) = \{i \in I, u_i = \frac{\tau}{2} \mbox{ or } u_i = - \frac{\tau}{2} \}. \] With this notation, the set of active constraints of the solution $u^*$ to $(P)\left( u \right)$ is simply $J(u^*)$. \begin{e-proposition} For any $(k_i)_{i \in I} \in \ZZ^N$ \[\tilde J \left( (k_i)_{i \in I} \right) = J(u^*), \] where $u^*$ is the solution to $(P)\left(\tau \sum_{i \in I}k_i \psi_i \right)$. \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces Denoting $\tilde u^*$ the solution of $(\tilde P)\left( (k_i)_{i \in I} \right)$ and $u^*$ the solution to $(P)\left( \tau \sum_{i \in I}k_i \psi_i \right)$, we have \begin{equation}\label{equivP} \tilde u^* = u^* + \tau \sum_{i \in I}k_i \psi_i. \end{equation} This can be seen from the fact that $(P)\left( \sum_{i \in I}k_i \psi_i \right)$ is exactly $(\tilde P)\left( (k_i)_{i \in I} \right)$, modulo a "global translation" by $\tau \sum_{i \in I}k_i \psi_i$. (The rigorous proof of \eqref{equivP} can easily be established using Kuhn-Tucker conditions, see \cite{Rockafellar}, Th 28.3, pp. 281.) The proposition is then obtained by identifying the coordinates of $\tilde u^*$ and $u^*$ in the basis $\BB$. $\Box$\newline\newline \subsubsection{On projection onto polytopes}\label{proj-sec} We can now adapt the definitions and notations of \cite{geomoptim} to the problems $(P)(.)$. Beside Proposition \ref{ssat-prop1}, all the results stated in this section are proved in \cite{geomoptim}. We consider a norm $f_d$ (which will be used latter on to define the data distribution law) and define for any $C\subset \RR^N$ and any $A\subset \RR$ \[\SSAT{C}{A} = \left\{u \in \RR^N, \exists u^* \in C, u^*\mbox{ is solution to } (P)(u) \mbox{ and } f_d(u-u^*) \in A \right\}. \] This corresponds to all the optimization problems whose solution is in $C$ (we also control the distance between $u$ and the result of $(P)(u)$). Notice that $\SSAT{C}{A}$ depends on $\tau$. We do not make this dependence explicit since it does not create any confusion, in practice. We also define the equivalence relationship over $\CC(0)$ \[u\sim v \Longleftrightarrow J(u) = J(v). \] For any $u\in \CC(0)$, we denote $\overline u$ the equivalence class of $u$. In the context of this paper, we obviously have for all $u = \sum_{i\in I}u_i \psi_i \in \CC(0)$ \begin{equation}\label{ubarre} \overline u = \left\{u^c + \tau \sum_{j\not\in J(u)}\beta_j \psi_j, \forall j\not\in J(u), -\frac{1}{2} <\beta_j <\frac{1}{2} \right\}, \end{equation} where \[u^c = \sum_{j\in J(u)} u_j \psi_j. \] (Here and all along the paper the notation $j\not\in J$ stands for $j\in I\setminus J$.) Let us give some descriptions of $\SSAT{.}{.}$. \begin{e-proposition}\label{ssat-transl} For any $u^*\in \partial \CC(0)$ and any $v\in \overline{u^*}$, \[\SSAT{v}{1} = (v-u^*) + \SSAT{u^*}{1}. \] \end{e-proposition} In words, $\SSAT{v}{1}$ is a translation of $\SSAT{u^*}{1}$. \begin{e-proposition}\label{ssat-rescale} For any $u^*\in \partial \CC(0)$, any $v\in \SSAT{u^*}{]0,+\infty[}$ and any $\lambda>0$ \[u^*+\lambda (v-u^*) \in \SSAT{u^*}{]0,+\infty[}. \] \end{e-proposition} \begin{theorem}\label{thm1} For any $u^* \in \partial \CC(0)$ and any $\tau' >0$, \[\SSAT{\overline{u^*}}{]0,\tau']} = \left\{v + \lambda(u-u^*), \mbox{for } v\in \overline{u^*}, \lambda \in ]0,\tau'] \mbox{ and } u \in \SSAT{u^*}{1} \right\} \] \end{theorem} We also have (see \cite{geomoptim}) \begin{e-proposition}\label{ssat-prop0} If $f$ satisfies the hypotheses given in Section \ref{hypf-sec}, for any $u^*\in\partial \CC(0)$, $\SSAT{u^*}{1}$ is a non-empty, compact Lipschitz manifold of dimension $\#J(u^*)-1$. \end{e-proposition} Another useful result for the purpose of this paper is the following. \begin{e-proposition}\label{ssat-prop1} If $f$ satisfies the hypotheses given in Section \ref{hypf-sec}, for any $u^*\in\partial \CC(0)$ and any $\tau'>0$, $\SSAT{u^*}{]0,\tau']}$ is a non-empty, bounded Lipschitz manifold of dimension $\#J(u^*)$. \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces In order to prove the proposition, we consider $u^*=\sum_{i\in I} u^*_i \psi_i \in \partial\CC(0)$ and $u^c = \sum_{i\in J(u^*)} u^*_i \psi_i$. We are going to prove the proposition in the particular case where $u^c=u^*$. Proposition \ref{ssat-transl} and \ref{ssat-rescale} permit indeed to generalize the latter result obtained to any $\SSAT{u^*}{]0,\tau']}$, for $u^*\in \overline{u^c}$. (They indeed guarantee that $\SSAT{u^*}{]0,\tau']}$ is obtained by translating $\SSAT{u^c}{]0,\tau']}$.) In order to prove that $\SSAT{u^c}{]0,\tau']}$ is a bounded Lipschitz manifold of dimension $\#J(u^*)$, we prove that the mapping $h'$ defined below is a Lipschitz homeomorphism. \begin{equation}\label{hprime} \begin{array}{rrcl} h' : & \SSAT{u^c}{1}\times ]0,\tau'] & \longrightarrow & \SSAT{u^c}{]0,\tau']} \\ & (u,\lambda) & \longmapsto & u^c + \lambda (u-u^c). \end{array} \end{equation} The conclusion then directly follows from Proposition \ref{ssat-prop0}. Notice first that we can deduce from Proposition \ref{ssat-rescale}, that $h'$ is properly defined. Let us prove that $h'$ is invertible. For this purpose, we consider $\lambda_1$ and $\lambda_2$ in $]0,\tau']$ and $u_1$ and $u_2$ in $\SSAT{u^c}{1}$ such that \begin{equation}\label{tmp98656} u^c+\lambda_1 (u_1-u^c) = u^c+\lambda_2 (u_2-u^c). \end{equation} We have \begin{eqnarray*} \lambda_1 & = & f_d(\lambda_1 (u_1-u^c)) \\ & = & f_d(\lambda_2 (u_2-u^c)) \\ & = & \lambda_2. \end{eqnarray*} Using \eqref{tmp98656}, we also obtain $u_1=u_2$ and $h'$ is invertible. Finally, $h'$ is Lipschitz since, for any $\lambda_1$ and $\lambda_2$ in $]0,\tau']$ and any $u_1$ and $u_2$ in $ \SSAT{u^c}{1}$, \begin{eqnarray*} \|\lambda_1 (u_1-u^c) - \lambda_2 (u_2-u^c) \|_2 & = & \|\lambda_1 (u_1-u_2) + (\lambda_1-\lambda_2) (u_2-u^c) \|_2, \\ & \leq & \tau' \|u_1-u_2\|_2 + C |\lambda_1-\lambda_2|, \end{eqnarray*} where $C$ is such that for all $u\in \SSAT{u^c}{1}$, \[\|u-u^c\|_2 \leq C. \] (Remember $\SSAT{u^c}{1}$ is compact, see Proposition \ref{ssat-prop0}.) $\Box$\newline\newline \subsection{The estimate} We denote the discrete grid by \[\DD = \{\tau \sum_{i\in I} k_i \psi_i, (k_i)_{i\in I} \in \ZZ^N\}, \] and, for $u^*\in\partial \CC(0)$ and $(k_j)_{j\in J(u^*)}\in \ZZ^{J(u^*)}$, \[\Dom{ (k_j)_{j\in J(u^*)} }= \{\tau \sum_{j\in J(u^*)} k_j \psi_j +\tau \sum_{i\not\in J(u^*)} k_i \psi_i , \mbox{ where } (k_i)_{i\not\in J(u^*)} \in \ZZ^{I\setminus J(u^*)}\}. \] The set $\Dom{ (k_j)_{j\in J(u^*)} }$ is a slice in $\DD$. \begin{e-proposition}\label{major1} Let $\tau'>0$, $u^*\in\partial \CC(0)$ and $(k_j)_{j\in J(u^*)}\in \ZZ^{J(u^*)}$, \[\#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)} }\right) \leq 1. \] \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces Taking the notations of the proposition and assuming $ \SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)}} \neq \emptyset$, we consider $(k_i^1)_{i\in I}$ and $(k_i^2)_{i\in I}$ such that \[\tau \sum_{i\in I} k_i^1 \psi_i \in \SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)} } \] and \[\tau \sum_{i\in I} k_i^2 \psi_i \in \SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)} }. \] Theorem \ref{thm1} guarantees there exist $v_1$ and $v_2$ in $\overline{u^*}$, $\lambda_1$ and $\lambda_2$ in $]0,\tau']$ and $u_1$ and $u_2$ in $\SSAT{u^*}{1}$ such that \[\tau \sum_{i\in I} k_i^1 \psi_i = v_1+\lambda_1(u_1-u^* ) \] and \[\tau \sum_{i\in I} k_i^2 \psi_i = v_2+\lambda_2(u_2-u^* ). \] So \[v_1+\lambda_1(u_1-u^* ) = v_2+\lambda_2(u_2-u^* ) + \tau \sum_{i\not\in J(u^*)} ( k_i^1 - k_i^2 )\psi_i. \] Using \eqref{ubarre}, we know there exists $(\beta_i^1)_{i\not\in J(u^*)}$ and $(\beta_i^2)_{i\not\in J(u^*)}$ such that \[\forall i\not\in J(u^*), -\frac{1}{2}< \beta_i^1 <\frac{1}{2} \mbox{ and } -\frac{1}{2}< \beta_i^2 <\frac{1}{2}, \] \[v_1 = u^c + \tau \sum_{i\not\in J(u^*)} \beta_i^1 \psi_i \] and \[v_2 = u^c + \tau \sum_{i\not\in J(u^*)} \beta_i^2 \psi_i, \] with $u^c = \sum_{j\in J(u^*)} u^*_j \psi_j$, where $u^* = \sum_{i\in I} u^*_i \psi_i$. So, letting for all $i\not\in J(u^*)$, $\alpha_i =k_i^1 - k_i^2 + \beta_i^2 - \beta_i^1$, we finally have \begin{equation}\label{tmp678} \lambda_1(u_1-u^* ) = \lambda_2(u_2-u^* ) + \tau \sum_{i\not\in J(u^*)} \alpha_i \psi_i. \end{equation} Let us assume \begin{equation}\label{hyp} \max_{i\not\in J(u^*)} |\alpha_i| >0, \end{equation} and consider $0<\lambda \leq 1$ such that \begin{equation}\label{lambda} \lambda < \frac{1}{2 \max_{i\not\in J(u^*)} |\alpha_i| }. \end{equation} We have, using \eqref{tmp678}, \begin{eqnarray*} u^c + \lambda\lambda_1 [(u_1-u^*+u^c)-u^c ] & = & u^c + \lambda\lambda_1 (u_1-u^* ) \\ & = & u^c + \lambda \tau \sum_{i\not\in J(u^*)} \alpha_i \psi_i + \lambda \lambda_2(u_2-u^* ) \\ & = & v + \lambda \lambda_2 [(u_2-u^*+v)-v ], \end{eqnarray*} where $v= u^c + \lambda\tau \sum_{i\not\in J(u^*)} \alpha_i \psi_i$. Moreover, using \eqref{ubarre} and \eqref{lambda}, we know that $v\in \overline{u^c}$. Using Proposition \ref{ssat-transl}, we know that \[u_1-u^*+u^c \in \SSAT{u^c}{1} \mbox{ and } u_2-u^*+v \in \SSAT{v}{1}. \] Finally, applying Theorem \ref{thm1}, we obtain \[u^c + \lambda\lambda_1 (u_1-u^* ) \in \SSAT{u^c}{]0,\tau']}\cap \SSAT{v}{]0,\tau']}. \] Since the solution to $(P)(u^c + \lambda\lambda_1 (u_1-u^* ))$ is unique, we necessarily have $u^c=v$ and therefore $\max_{i\not\in J(u^*)} |\alpha_i| =0$. This contradicts \eqref{hyp} and guarantees that \[\max_{i\not\in J(u^*)} |\alpha_i| =0. \] Using the definition of $\alpha_i$, we obtain, for all $i\not\in J(u^*)$, \[|k_i^1 - k_i^2| = |\beta_i^1 - \beta_i^2| <1. \] This implies $k_i^1 = k_i^2$, for all $i\in I$. $\Box$\newline\newline Let us denote, for $u^*\in\partial \CC(0)$, the projection onto $Span\left( \psi_j, j\in J(u^*)\right)$ by \[\begin{array}{rrcl} p : & \RR^N & \longrightarrow & Span\left( \psi_j, j\in J(u^*)\right)\\ & \sum_{i\in I} \alpha_i \psi_i & \longmapsto & \sum_{j\in J(u^*) } \alpha_j \psi_j. \end{array} \] It is not difficult to see that, for any $\tau'>0$, $u^*\in\partial \CC(0)$ and $(k_j)_{j\in J(u^*)}\in \ZZ^{J(u^*)}$, \begin{equation}\label{bbbb} \#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)} }\right) = 1 \Longrightarrow \tau \sum_{j\in J(u^*)} k_j\psi_j\in p\left( \SSAT{\overline{u^*}}{]0,\tau']} \right). \end{equation} \begin{remark} Notice that the converse implication does not hold in general. It is indeed possible to build counter examples where $\SSAT{\overline{u^*}}{]0,\tau']} $ passes between the points of the discrete grid $\DD$. However, it is not difficult to see that, if $\tau \sum_{j\in J(u^*)} k_j\psi_j\in p\left( \SSAT{\overline{u^*}}{]0,\tau']} \right)$ and $\SSAT{\overline{u^*}}{]0,\tau']} \cap \Dom{ (k_j)_{j\in J(u^*)} }= \emptyset$, we can build $(k_i)_{i\not\in J(u^*)} \in \ZZ^{J\setminus J(u^*)}$ such that \[\tau \sum_{j\in J(u^*)} k_j\psi_j +\tau \sum_{i\not\in J(u^*)} (k_i+\frac{1}{2}) \psi_i \in \SSAT{u^c}{]0,\tau']}, \] where \[u^c = \sum_{j\in J(u^*)} u^*_j \psi_j. (u^*_j \mbox{ are the coordinates of }u^*) \] This means that the set $\SSAT{u^c}{]0,\tau']}$, which is a manifold of dimension $\# J(u^c)$ living in $\RR^N$, intersects a discrete grid. This is obviously a very rare event. Typically, adding to the basis $\BB$ some kind of randomness (for instance adding a very small Gaussian noise to every $\psi_i$) would make it an event of probability $0$. Notice, with this regard, that when $f(\sum_{i\in I} u_i \psi_i) = \sum_{i \in I} |u_i|^2$, we trivially have the equivalence in \eqref{bbbb}. \end{remark} A simple consequence of \eqref{bbbb} is that \begin{equation}\label{borne-1} \#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \DD\right) \leq \#\left(p\left( \SSAT{\overline{u^*}}{]0,\tau']} \right) \cap \left\{\tau \sum_{j\in J(u^*)} k_j\psi_j, (k_j)_{j\in J(u^*)}\in \ZZ^{J(u^*)}\right\} \right). \end{equation} Notice finally that, for $u^*=\sum_{i\in I} u^*_i\psi_i \in\partial \CC(0)$, Proposition \ref{ssat-transl} and Equation \eqref{ubarre} guarantees that \[p\left(\SSAT{u^c}{1}\right) = p\left(\SSAT{u^*}{1}\right), \] for $u^c = \sum_{j\in J(u^*)} u^*_j \psi_j$. We therefore have, using also Theorem \ref{thm1}, Proposition \ref{ssat-rescale} and Equation \eqref{ubarre}, \begin{eqnarray*} p\left( \SSAT{\overline{u^*}}{]0,\tau']} \right) & = &\{ p(v) +\lambda (p(u) - p(u^*)), \mbox{ for } v\in \overline{u^*}, \lambda\in ]0,\tau'] \mbox{ and } u\in \SSAT{u^*}{1} \}, \\ & = &\{ u^c +\lambda (p(u) - u^c), \mbox{ for } \lambda\in ]0,\tau'] \mbox{ and } u\in \SSAT{u^c}{1}\}, \\ & = & p\left( \SSAT{u^c}{]0,\tau']} \right). \end{eqnarray*} Finally, \begin{equation}\label{borne0} \#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \DD\right) \leq \#\left(p\left( \SSAT{u^c}{]0,\tau']} \right) \cap \left\{\tau \sum_{j\in J(u^c)} k_j\psi_j, (k_j)_{j\in J(u^c)}\in \ZZ^{J(u^c)}\right\} \right). \end{equation} \begin{e-proposition}\label{mesurable} If $f$ satisfies the hypotheses given in Section \ref{hypf-sec} then, for any $u^* = \sum_{i\in I} u^*_i \psi_i \in\partial \CC(0)$, $p\left(\SSAT{u^c}{]0,\tau']}\right)$ (where $u^c = \sum_{j\in J(u^*)} u^*_j \psi_j$) is a non-empty, bounded Lipschitz manifold of dimension $\#J(u^*)$. \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces Thanks to Proposition \ref{ssat-prop1}, it suffices to establish that the restriction of $p$ : \[\begin{array}{rrcl} p' : & \SSAT{u^c}{]0,\tau']} & \longrightarrow & p\left(\SSAT{u^c}{]0,\tau']}\right)\\ & u & \longmapsto & p(u). \end{array} \] is a Lipschitz homeomorphism. This latter result is immediate once we have established that $p'$ is invertible. This proof is similar to the one of Proposition \ref{major1}. Taking the notations of the proposition, we assume that there exist $u_1$ and $u_2$ in $ \SSAT{u^c}{]0,\tau']}$ and $(\alpha_i)_{i\not\in J(u^*)}\in \RR^{J(u^*)}$ satisfying \[u_1 = u_2 + \tau \sum_{i\not\in J(u^*)} \alpha_i \psi_i. \] If we assume $\max_{i\not\in J(u^*)} |\alpha_i|\neq 0$, we have for $0<\lambda < \min(1,\frac{1}{2 \max_{i\not\in J(u^*)} |\alpha_i|})$, \begin{eqnarray*} u^c+\lambda(u_1-u^c) & = & u^c + \tau \sum_{i\not\in J(u^*)} \lambda \alpha_i \psi_i + \lambda (u_2-u^c) \\ & = & v + \lambda \left(u_2 + \tau \sum_{i\not\in J(u^*)} \lambda \alpha_i \psi_i -v\right) \end{eqnarray*} for $v = u^c + \tau \sum_{i\not\in J(u^*)} \lambda \alpha_i \psi_i $. Since $v\in \overline{u^c}$ (see \eqref{ubarre}), Proposition \ref{ssat-transl} guarantees that $u_2 + \tau \sum_{i\not\in J(u^*)} \lambda \alpha_i \psi_i= u_2+v-u^c\in \SSAT{v}{]0,\tau]} $. As a consequence, applying Proposition \ref{ssat-rescale}, we know that \[u^c+\lambda (u_1-u^c) \in \SSAT{u^c}{\lambda} \cap \SSAT{v}{]0,+\infty[}. \] Since $(P)(u^c+\lambda (u_1-u^c))$ has a unique solution, we obtain a contradiction and can conclude that for all $i\not\in J(u^*)$, $\max_{i\not\in J(u^*)} |\alpha_i|= 0$. As a consequence, $p'$ is invertible. It is then obviously a Lipschitz homeomorphism. $\Box$\newline\newline Proposition \ref{mesurable} guarantees that $p\left(\SSAT{u^c}{]0,\tau']}\right)$ is Lebesgue measurable in $\RR^{\#J(u^*)}$. Moreover, its Lebesgue measure in $\RR^{\#J(u^*)}$ (denoted $\LEB{\#J(u^*)}{p\left(\SSAT{u^c}{]0,\tau']}\right)}$) is finite and strictly positive : \[0< \LEB{\#J(u^*)}{p\left(\SSAT{u^c}{]0,\tau']}\right)} <\infty. \] Another consequence takes the form of the following proposition. \begin{e-proposition}\label{estime1} Let $\tau'>0$ and $u^*\in\partial \CC(0)$ \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \DD\right) \leq \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} \] where $K=\#J(u^*)$. Moreover, if the equality holds in \eqref{borne-1} (or equivalently : the equality holds in \eqref{borne0}) \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\overline{u^*}}{]0,\tau']} \cap \DD\right) = \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)}. \] \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces In order to prove the proposition, we are going to prove that, denoting $K=\#J(u^c)$, \begin{equation}\label{09u82t} \lim_{\tau\rightarrow 0} \tau^{K} \#\left(p\left( \SSAT{u^c}{]0,\tau']} \right) \cap \left\{\tau \sum_{j\in J(u^c)} k_j\psi_j, (k_j)_{j\in J(u^c)}\in \ZZ^{J(u^c)}\right\} \right) = \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} \end{equation} The conclusion follows from \eqref{borne0}. Let us first remark that, unlike $\SSAT{u^c}{]0,\tau']}$, the set \[A = p\left(\SSAT{u^c}{]0,\tau']}\right) - u^c \] does not depend on $\tau$. This is due to Proposition 9\footnote{The definition of $\SSAT{C}{A}$ given in the current paper does not allow the rewriting of the proposition 9 of \cite{geomoptim}. This is why we have not adapted it in Section \ref{proj-sec}.}, in \cite{geomoptim}. Notice also that, because of Proposition \ref{mesurable}, both $A$ and $p\left(\SSAT{u^c}{]0,\tau']}\right)$ are Lebesgue measurable (in $\RR^K$) and that \[\LEB{K}{A} = \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)}. \] In order to prove the upper bound in \eqref{09u82t}, we consider the sequence of functions, defined over $\RR^K$ \[f_n(u) = \max\left(0 , 1-n \ \inf_{v\in A} \|u-v\|_2 \right). \] This is a sequence of functions which are both Lebesgue and Riemann integrable and the sequence converges in $L^1(\RR^K)$ to $\one_{A}$ (the indicator function of the set $A$). So, for any $\varepsilon>0$, there exists $n\in \NN$ such that \[\int f_n \leq \int \one_A + \varepsilon. \] Moreover, we have, for all $u\in\RR^K$ and all $n\in\NN$, \[\one_{A}(u) \leq f_n(u). \] So, denoting $V_{\tau}= \left\{\tau \sum_{j\in J(u^c)} k_j\psi_j - u^c, (k_j)_{j\in J(u^c)}\in \ZZ^{J(u^c)}\right\}$, \begin{eqnarray*} \lim_{\tau\rightarrow 0}\tau^{K} \#\left(p\left( \SSAT{u^c}{]0,\tau']} \right) \cap \left\{\tau \sum_{j\in J(u^c)} k_j\psi_j, (k_j)_{j\in J(u^c)}\in \ZZ^{J(u^c)}\right\} \right) & = & \lim_{\tau\rightarrow 0}\tau^{K} \sum_{v\in V_{\tau} }\one_{A}(v) \\ & \leq & \lim_{\tau\rightarrow 0}\tau^{K} \sum_{v\in V_{\tau} } f_n(v) \\ & \leq & \int f_n \\ & \leq & \int \one_A + \varepsilon \\ & \leq & \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} + \varepsilon. \end{eqnarray*} So, \[\lim_{\tau\rightarrow 0}\tau^{K} \#\left(p\left( \SSAT{u^c}{]0,\tau']} \right) \cap \left\{\tau \sum_{j\in J(u^c)} k_j\psi_j, (k_j)_{j\in J(u^c)}\in \ZZ^{J(u^c)}\right\} \right) \leq \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} \] The lower bound in \eqref{09u82t} is obtained in a similar way, by considering an approximation of $\one_{A}$ by a function smaller than $\one_{A}$ which is Riemann integrable. (For instance : $f_n(u) = 1 - \max\left(0 , 1-n \ \inf_{v\not\in A} \|u-v\|_2 \right))$.) $\Box$\newline\newline From now on , we will denote for all $K\in\{1,\ldots, N\}$ \[C_K = \left\{\tau\sum_{j\in J} u_j \psi_j, \mbox{ where } J\subset I, \#J=K \mbox{ and } \forall j\in J, u_j =-\frac{1}{2} \mbox{ or } u_j =\frac{1}{2} \right\} \] The set $C_K$ contains all the "centers" of the equivalence classes of codimension $K$. Similarly, we denote \[\Cl{K} = \left\{u^* \in \partial \CC(0), \#J(u^*) = K \right\}. \] We obviously have, for all $K\in\{1,\ldots, N\}$, \[\Cl{K} = \cup_{u^c\in C_K} \overline{u^c}. \] Since, for all $K\in\{1,\ldots, N\}$, $C_K$ is finite, it is clear from Proposition \ref{estime1} that, for any $\tau'>0$, \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\Cl{K}}{]0,\tau']} \cap \DD\right) \leq \sum_{u^c \in C_K} \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} < +\infty \] Moreover, we have an equality between the above two terms, as soon as the equality holds in \eqref{borne-1}. We can finally express the following estimate. \begin{e-proposition}\label{finalprop} Let $\tau'>0$ \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\Cl{K}}{]0,\infty[} \cap \LS{\tau'}\cap \DD\right) \leq \sum_{u^c \in C_K} \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} \] where $K=\#J(u^*)$. Moreover, if the equality holds in \eqref{borne-1} for all $u^c\in C_K$ (or equivalently : the equality holds in \eqref{borne0}) \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\Cl{K}}{]0,\infty[}\cap \LS{\tau'} \cap \DD\right) = \sum_{u^c \in C_K}\LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)}. \] \end{e-proposition} \par\noindent{\it Proof}. \ignorespaces We consider \[M = \sup_{\{u=\sum_{i\in I} u_i \psi_i, \forall i\in I, |u_i|\leq \frac{1}{2}\}} f_d(u) \] We have, for all $u^*\in\partial \CC(0)$, \begin{equation}\label{M} f_d(u^*) \leq M \tau. \end{equation} We therefore have for all $u\in \LS{\tau'}$ and for $u^*$ the solution to $(P)(u)$, \begin{eqnarray*} f_d(u-u^*) & \leq & f_d(u) + f_d(u^*) \\ & \leq & \tau' + M\tau. \end{eqnarray*} So \[\SSAT{\Cl{K}}{]0,\infty[}\cap \LS{\tau'} \subset \SSAT{\Cl{K}}{]0,\tau'+M \tau]}. \] Moreover, it is not difficult to see that (remember $h'$ defined by \eqref{hprime} is an homeomorphism) \[\lim_{\tau\rightarrow 0} \sum_{u^c \in C_K} \LEB{K}{p\left( \SSAT{u^c}{]0,\tau'+M \tau]}\right)} = \sum_{u^c \in C_K} \LEB{K}{p\left( \SSAT{u^c}{]0,\tau']}\right)}. \] We can therefore deduce (from Proposition \ref{estime1}) that \[\lim_{\tau\rightarrow 0} \tau^K \#\left( \SSAT{\Cl{K}}{]0,\infty[} \cup \LS{\tau'}\cap \DD\right) \leq \sum_{u^c \in C_K} \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)} \] In order to prove the last statement of the proposition, we consider $u^*\in \partial \CC(0)$ and $u\in\SSAT{u^*}{]0,\tau']}$, we know that \begin{eqnarray*} f_d(u) & \leq & f_d(u-u^*) + f_d(u^*) \\ & \leq & \tau'+ M\tau \\ \end{eqnarray*} So \[ \SSAT{\Cl{K}}{]0,\tau'-M \tau]} \subset\SSAT{\Cl{K}}{]0,\infty[}\cap \LS{\tau'}. \] Since (again) \[\lim_{\tau\rightarrow 0} \sum_{u^c \in C_K} \LEB{K}{p\left( \SSAT{u^c}{]0,\tau'-M \tau]}\right)} = \sum_{u^c \in C_K} \LEB{K}{p\left( \SSAT{u^c}{]0,\tau']}\right)}, \] we know that the second statement of the proposition holds. $\Box$\newline\newline Another immediate result is useful to state the final theorem. Notice first that we have, for any $(k_i)_{i\in I} \in \ZZ^N$ and any norm $\|.\|$, \[\int_{v\in \CC((k_i)_{i\in I})} \|v-\tau \sum_{i\in I} k_i \psi_i \| dv = C \tau^{N+1}, \] where \[C = \int_{\{v = \sum_{i\in I} v_i \psi_i, \forall i\in I, |v_i|\leq \frac{1}{2}\}} \|v\| dv \] only depends on the particular norm $\|.\|$ and the basis $(\psi_i)_{i\in I}$. So, denoting $U$ a random variable whose law is uniform in $\LS{\tau'}$ and $(k_i(U))_{i\in I}$ the discrete point defined by \eqref{approx}, we have \begin{equation}\label{errorespect} \lim_{\tau\rightarrow 0} \frac{\EXPECT{\|U-\tau \sum_{i\in I} k_i(U) \psi_i \| } } {\tau^{N+1}} = C. \end{equation} This follows from the fact that the number of points $(k_i)_{i\in I}$ such that $\CC((k_i)_{i\in I})$ intersects both $\LS{\tau'}$ and its complement in $\RR^N$ becomes negligible with regard to the number of points $(k_i)_{i\in I}$ such that $\CC((k_i)_{i\in I})$ is included in $\LS{\tau'}$, when $\tau$ goes to $0$. We can now state the final result. \begin{theorem} Let $\tau'>0$ and $U$ be a random variable whose low is uniform in $\LS{\tau'}$, for a norm $f_d$. For any norm $\|.\|$, any $K\in\{1,\ldots N\}$ and any $\varepsilon >0$, there exists $T>0$ such that for all $\tau<T$ \[\PROBA{\# \tilde J\left(U \right) =K} \leq D_K E^{\frac{N-K}{N+1}} + \varepsilon, \] where $E$ is the approximation error\footnote{When computing the approximation error, we consider the center of $\CC\left((k_i)_{i\in I}\right)$ has been chosen to represent all the elements coded by $(\tilde P)\left( (k_i)_{i \in I} \right)$. } : \[ E = \EXPECT{\|U-\tau \sum_{i\in I} k_i(U) \psi_i \| }, \] Moreover, if the equality holds in \eqref{borne-1} (or equivalently : the equality holds in \eqref{borne0}) for all $u^c\in C_K$, then we also have \[\PROBA{\# \tilde J\left( U \right) =K} \geq D_K E^{\frac{N-K}{N+1}} - \varepsilon. \] The constant $D_K$ is given by \[D_K=\frac{A_K}{BC^{\frac{N-K}{N+1}}}, \] with \[A_K= \sum_{u^c \in C_K} \LEB{K}{p\left(\SSAT{u^c}{]0,\tau']}\right)}, \] \[B= \frac{ \LEB{N}{\LS{\tau'}} } {\LEB{N}{\{v = \sum_{i\in I} v_i \psi_i, \forall i \in I, |v_i|\leq \frac{1}{2}\}} } \] and \[C = \int_{\{v = \sum_{i\in I} v_i \psi_i, \forall i \in I, |v_i|\leq \frac{1}{2}\}} \|v\| dv. \] \end{theorem} \par\noindent{\it Proof}. \ignorespaces Remark first that, for any $(k_i)_{i\in I}\in \ZZ^N$, the probability that \[\tau_{k_i} \leq U_i \leq \tau_{k_i+1}, \] when $U=\sum_{i\in I} U_i \psi_i$ follows a uniform law in $\LS{\tau'}$, is \[\frac{\LEB{N}{\CC((k_i)_{i\in I}) \cap \LS{\tau'} }}{\LEB{N}{\LS{\tau'}}}. \] Therefore, taking the notation of the theorem \[\PROBA{\# \tilde J(U) = K } = \sum_{(k_i)_{i\in I}\in \ZZ^N} \one_{\tau\sum_{i\in I} k_i \psi_i \in \SSAT{\Cl{K}}{[0,+\infty[} } \frac{\LEB{N}{\CC((k_i)_{i\in I}) \cap \LS{\tau'} }}{\LEB{N}{\LS{\tau'}}}. \] If $(k_i)_{i\in I}$ is such that $\LEB{N}{\CC((k_i)_{i\in I}) \cap \LS{\tau'} } \neq 0$, there exists $v\in \CC(0)$ such that $v + \tau \sum_{i\in I} k_i \psi_i \in \LS{\tau'}$. So, we have \begin{eqnarray*} f_d(\tau \sum_{i\in I} k_i \psi_i) & \leq & \tau' + f_d(v) \\ & \leq & \tau' + M\tau , \end{eqnarray*} where $M$ is given by \eqref{M}. We therefore have \[\PROBA{\# \tilde J(U) = K } \leq \frac{\LEB{N}{\CC(0)}}{\LEB{N}{\LS{\tau'}}} \# \left( \SSAT{\Cl{K}}{ ]0,+\infty [ } \cap \LS{\tau'+M\tau} \cap \DD \right). \] The lower bound is obtained with a similar estimation and we obtain \[\PROBA{\# \tilde J(U) = K } \geq \frac{\LEB{N}{\CC(0)}}{\LEB{N}{\LS{\tau'}}} \# \left( \SSAT{\Cl{K}}{]0,+\infty[} \cap \LS{\tau'-M\tau} \cap \DD \right). \] Notice finally that \[\lim_{\tau\rightarrow 0} \frac{\# \left( \SSAT{\Cl{K}}{]0,+\infty[} \cap \LS{\tau'} \cap \DD \right)}{\# \left( \SSAT{\Cl{K}}{]0,+\infty[} \cap \LS{\tau'\pm M\tau} \cap \DD \right)} =1. \] The proof is now a straightforward consequence of Proposition \ref{finalprop} and \eqref{errorespect}. More precisely, taking the notations of the theorem and $\varepsilon>0$, we know that there exists $T>0$ such that, for all $\tau<T$, \[ \tau^K \#\left( \SSAT{\Cl{K}}{]0,\infty[} \cup \LS{\tau'+M\tau}\cap \DD\right) \leq A_K + \varepsilon, \] and \[\frac{E^{\frac{1}{N+1}} } {C^{\frac{1}{N+1}}} \geq \tau - \varepsilon. \] So \begin{eqnarray*} \PROBA{\# \tilde J\left((K_i)_{i\in I} \right) =K} & \leq & \frac{\tau^N}{B} \frac{A_K+\varepsilon}{\tau^K} \\ & \leq &\frac{A_K+\varepsilon}{B}\left( \left(\frac{E}{C} \right)^{\frac{1}{N+1}} + \varepsilon \right)^{N-K} \\ & \leq & \frac{A_K}{B C^{\frac{N-K}{N+1}}} E^{\frac{N-K}{N+1}} + o(1), \end{eqnarray*} where $o(1)$ is a function of $\varepsilon$ which goes to $0$, when $\varepsilon$ goes to $0$. The first inequality of the theorem follows. The proof of the second inequality of the theorem is similar to one above. $\Box$\newline\newline \markboth{}{} \end{document}
\begin{document} \title{External Time-Varying Fields and Electron Coherence} \author{Jen-Tsung Hsiang} \email[Jen-Tsung Hsiang: ]{[email protected]} \author{L. H. Ford} \email[L. H. Ford: ]{[email protected]} \affiliation{Institute of Cosmology, Department of Physics and Astronomy\\ Tufts University\\ Medford, Massachusetts, 02155} \date{\today} \begin{abstract} The effect of time-varying electromagnetic fields on electron coherence is investigated. A sinusoidal electromagnetic field produces a time varying Aharonov-Bohm phase. In a measurement of the interference pattern which averages over this phase, the effect is a loss of contrast. This is effectively a form of decoherence. We calculate the magnitude of this effect for various electromagnetic field configurations. The result seems to be sufficiently large to be observable. \end{abstract} \pacs{03.75.-b,03.65.Yz,41.75.Fr} \maketitle The well-known Aharonov-Bohm phase~\cite{AB1959} arises when coherent electrons traverse two distinct paths in the presence of an electromagnetic field. Let the two paths in spacetime be denoted by $C_1$ and $C_2$. The phase difference due to the electromagnetic field, the Aharonov-Bohm phase, is the line integral of the vector potential around the closed spacetime path $\partial\Omega=C_1-C_2$: \begin{equation} \vartheta=-e\oint_{\partial\Omega} dx_{\mu}A^{\mu}(x)\,. \label{E:phase_difference} \end{equation} By Stoke's theorem, it can also be expressed as a surface integral of the field strength tensor over a two dimensional surface $\Omega$ bounded by $\partial\Omega$: \begin{equation} \vartheta= -\frac{1}{2} e\int_{\Omega} d\sigma_{\mu \nu}\, F^{\mu \nu}(x)\,. \label{E:phase_difference2} \end{equation} This leads to the remarkable result that the electron interference pattern is sensitive to shifts in the field strength in regions from which the electrons are excluded. The reality of the Aharonov-Bohm effect has been confirmed by numerous experiments, beginning with the work of Chambers~\cite{rC1960} and continuing with that of Tonomura and coworkers~\cite{aT1982} using electron holography. If the electromagnetic field undergoes fluctuations on a time scale shorter than the integration time of the experiment, then the effect is a loss of contrast in the interference pattern. The role of a fluctuating Aharonov-Bohm phase in decoherence has been discussed by several authors~\cite{SAI1990,lF1993,lF1995,lF1997,BP2001,BP2000,MPV2003}. The amplitude of the interference oscillations is reduced by a factor of \begin{equation} \Upsilon = \left\langle {\rm e}^{i\vartheta}\right\rangle \,, \label{eq:upsilon} \end{equation} where the angular brackets can denote either an ensemble or a time average. In the case of Gaussian or quantum fluctuations with $\langle \vartheta \rangle=0$, this factor becomes \begin{equation} \Upsilon = {\rm e}^{-\frac{1}{2}\langle \vartheta^2 \rangle} \, . \label{eq:Gaussian_fluct} \end{equation} This form also holds in the case of thermal fluctuations~\cite{BP2001}. In our treatment, we assume an approximation in which the electrons move on classical trajectories. More generally, the electrons are in wavepacket states. However, under many circumstances, the sizes of the wavepackets can be small compared to the path separation, so the classical path approximation is good. Wavepacket sizes which have been realized in experiments~\cite{NH93} can be less than $1\, {\rm \mu m}$, which is one to two orders of magnitude smaller than the other length scales characterizing the paths. A more detailed discussion of the effects of finite wavepacket size was given in Ref.~\cite{lF1997}. The purpose of the present paper is to discuss a particularly simple version of this type of decoherence produced by a classical, sinusoidal electromagnetic field. If the period of oscillation of the field is short compared to the time scale over which the interference pattern can be measured, then a time average must be taken in Eq.~(\ref{eq:upsilon}), with a resulting loss of contrast. We consider the case of a linearly polarized, monochromatic electromagnetic wave of frequency $\omega$ which propagates in a direction perpendicular to the plane containing the electron beams. Let the wave be polarized in the $z$-direction and propagate in the $y$-direction, with the plane of the electron paths being the $x$-$z$ plane. For a path confined to this plane, we have \begin{equation} \frac{1}{2} \,d\sigma_{\mu \nu}\, F^{\mu \nu} = dt\, dx \, F^{tx} + dt\, dz \, F^{tz} + dx\, dz \, F^{xz} \,. \end{equation} In the present case, where $E^x = B^y =0$, Eq.~(\ref{E:phase_difference2}) becomes \begin{equation} \vartheta= e \int dt\,dz \,E^z \,. \label{E:phase_difference3} \end{equation} Let the $z$-component of the electric field take the form \begin{equation} E^z(x^{\mu})=\mathbb{E}(x,y,z)\cos(k\,y-\omega\,t), \label{eq:Efield} \end{equation} where the real modulated amplitude $\mathbb{E}(x,y,z)$ is assumed to be a slowly varying function of $y$, compared with the sinusoidal oscillation. We can write \begin{equation}\label{E:vartheta} \vartheta(t_0) = e\!\int_{\Omega}dt\,dz\, \mathbb{E}(x,y,z)\cos(k\,y-\omega\,t-\omega\,t_0)\, , \end{equation} where $t_0$ is the electron emission time. More precisely, it is the time at which the center of a localized wavepacket is emitted. If the measuring process takes a sufficiently long time compared with the electron flight time, we will observe a result which is averaged over $t_0$. Therefore, let $t_0$ be a random variable and take the time average over that variable. That is, for a function $f$ of a random time variable $\xi$, the time average is defined by \begin{equation} \bigl<f(\xi)\bigr>\equiv\lim_{\Xi\rightarrow\infty} \frac{1}{2\,\Xi}\int_{-\Xi}^{+\Xi}d\xi\,f(\xi). \end{equation} However, before taking the time average, we will rewrite Eq.~\eqref{E:vartheta} as \begin{equation}\label{E:vartheta_1} \vartheta=\mathbb{A}\cos(\omega\,t_0)+\mathbb{B}\sin(\omega\,t_0), \end{equation} where \begin{align} &\mathbb{A}=e\!\int_{\Omega}dt\,dz\,\mathbb{E}(x,y,z)\cos(k\,y-\omega\,t),\\ &\mathbb{B}=e\!\int_{\Omega}dt\,dz\,\mathbb{E}(x,y,z)\sin(k\,y-\omega\,t), \end{align} and we have the average of the time-varying phase factor given by, \begin{align} \Upsilon = \bigl<e^{i\vartheta}\bigr> &=\lim_{\Xi\rightarrow\infty}\frac{1}{2\,\Xi}\! \int_{-\Xi}^{+\Xi}\!dt_0\;e^{i\,\bigl[\mathbb{A}\cos(\omega\,t_0)+ \mathbb{B}\sin(\omega\,t_0)\bigr]},\notag\\ &=J_0\bigl(\left|\,\mathbb{C}\,\right|\bigr), \label{eq:Upsilon} \end{align} where $J_0$ is a Bessel function and \begin{align}\label{E:const_C} \mathbb{C} &=\mathbb{A}+i\,\mathbb{B}\notag\\ &=e\!\int_{\Omega}dt\,dz\,\mathbb{E}(x,y,z)\,e^{i(k\,y-\omega\,t)}. \end{align} Note that in the limit that $|\mathbb{C}| \ll 1$, we can Taylor expand the Bessel function $J_0$ and write \begin{equation} \Upsilon \approx 1 - \frac{1}{4} |\mathbb{C}|^2 + \frac{1}{64} |\mathbb{C}|^4 + \cdots \,. \end{equation} This agrees through order $|\mathbb{C}|^2$ with the result that would be obtained from Eq.~(\ref{eq:Gaussian_fluct}) for Gaussian fluctuations, as $\langle \vartheta^2 \rangle =\frac{1}{2} |\mathbb{C}|^2$. As the strength of the applied field increases, the contrast factor $\Upsilon$ will monotonically decrease until the first zero of $J_0$ at $|\mathbb{C}| = 2.405$ is reached. Beyond that point, the contrast will begin to increase and then undergo damped oscillations. This behavior is quite different from that produced by Gaussian fluctuations, Eq.~(\ref{eq:Gaussian_fluct}). Now we study the possible effect on the electron interference if we shine a non-localized beam over the electron paths. Because the plane wave extends to infinity in the transverse direction, it is inevitable that the electron will have direct interaction with the electromagnetic fields, However, it will be shown later that the direct interaction with the electromagnetic fields is extremely small, so it can be ignored. Some years ago, Dawson and Fried~\cite{DF1967} discussed the effect of a laser beam on coherent electrons. However, these authors were concerned with a change in phase, rather than the loss of contrast with which we are concerned. \begin{figure} \caption{The two possible electron paths, $C_1$ and $C_2$ are illustrated. The electrons start at point $A$ and end at point $D$ after traversing a path which is approximated by three straight line segments. Here $\Theta$ is the time required for the first and last segments, and $T$ is the time required for the middle segment.} \label{Fi:fig_1} \end{figure} Assume that the transverse plane wave of amplitude $\mathcal{E}_0$ propagates along the $y$ axis and is polarized in the $z$ direction. The electron paths lie on the $y=0$ plane and are illustrated in Fig.~\ref{Fi:fig_1}. The quantity $\mathbb{C}$ is then given by \begin{equation} \mathbb{C}=4\,e\,\mathcal{E}_0\biggl(\frac{2c}{\omega^2\Theta}\biggr) \sin\biggl[\frac{\omega\,\Theta}{2}\biggr]\sin\biggl[\frac{1}{2}\, \omega\left(T+\Theta\right)\biggr]. \end{equation} Here $2c$ is the maximum separation between the elctron paths. Experimentally attainable separations are of the order of $100\, \mu m$~\cite{Hasselbach}. The quantity $\left|\,\mathbb{C}\,\right|^2$ is written as \begin{align}\label{E:C2} \left|\,\mathbb{C}\,\right|^2 &=16\,e^2\mathcal{E}_0^2\biggl(\frac{2c}{\omega^2\Theta}\biggr)^2\sin^2 \biggl[\frac{\omega\,\Theta}{2}\biggr]\sin^2\biggl[\frac{1}{2}\, \omega\left(T+\Theta\right)\biggr]\notag\\ &\approx\frac{32\pi}{137}\,\rho\,\biggl(\frac{2c}{\omega^2\Theta}\biggr)^2\, , \end{align} where the squares of the sine functions have been replaced by their average value of $1/2$ and the averaged energy density $\rho$ is given by \begin{equation} \rho=\frac{1}{2}\,\mathcal{E}_0^2. \end{equation} We use Lorentz-Heaviside units with $\hbar$ and the speed of light set equal to unity. Thus, $\rho$ is also the energy flux in the electromagnetic wave. Note that $\Theta = s/v$, where $v$ is the electron's speed and $s=\sqrt{c^2+l^2}$ is the length of the first and third segments of the paths. If the electron's speed is nonrelativistic, we can write \begin{equation} \left|\,\mathbb{C}\,\right|^2 = \left(\frac{E_k}{5\,{\rm keV}}\right) \left(\frac{\rho}{1\,{\rm W/cm}^2}\right) \left(\frac{2c}{s}\right)^2 \left(\frac{\lambda}{100\,\mu {\rm m}}\right)^4 \, , \end{equation} where $E_k$ is the electron kinetic energy and $\lambda$ is the wavelength of the electromagnetic wave. Thus it seems plausible that one could arrange to have $\left|\,\mathbb{C}\,\right|^2$ large enough to produce experimentally observable effects. There are some comments on this calculation: First, we assumed electron paths with sharp corners for simplicity. If one were to round out the corners slightly to make more realistic paths, the result need not change significantly. This is because we are integrating a regular integrand which varies on a time scale of the order of $1/\omega$. If the actual time scale for the electron to change direction is small compared to this time, then our piecewise trajectory is a good approximation. Note that here we are discussing the change in contrast due to the applied field. Sharp corners will tend to cause emission of photons, which in turn lead to decoherence even in the absence of an applied field. A second comment is that the contributions of each of the three regions, I, II and III, in Fig.~\ref{Fi:fig_1} is large compared to the final result for $\mathbb{C}$ by a factor of the order of $\Theta\, \omega$. However, the leading terms cancel when the three contributions are summed, leading to Eq.~(\ref{E:C2}). Finally, we have assumed that the electron paths are localized, whereas in an actual experiment the classical trajectories will be replaced by bundles of finite thickness. What is required here is that the electron beams be localized in the $y$-direction on a scale small compared to the wavelength of the electromagnetic field. Since the electron passes through the region where the electromagnetic fields are non-zero, it has a direct interaction with the fields. Due to the fact that the electron is in non-relativistic motion, in the low energy limit, only Thomson scattering is considered. Let $n$ be the mean number density of photons, which can approximately be expressed in terms the electromagnetic energy density $\rho$ and the angular frequency $\omega$ as \begin{equation} n\simeq\frac{\rho}{\omega}, \end{equation} for very large $n$. As a result, the mean free path $l_{\rm mfp}$ of the Thomson scattering is given by \begin{align} l_{\rm mfp} &=\frac{1}{n\,\sigma_T}=\frac{\omega}{\sigma_T\,\rho}\\ &=9\times10^{13}\textit{m}\, \Bigl(\frac{\rho}{{\rm W/cm}^2}\Bigr)^{-1}\Bigl(\frac{\lambda} {\mu {\rm m}}\Bigr)^{-1}\,, \end{align} where $\sigma_T$ is the Thomson cross section. We can see that it is possible to have an incident flux which is large enough to produce observable decoherence but for which any effect from the electron-photon scattering may be ignored. That is, loss of phase coherence due to direct electron-photon scattering arises from the random accumulated electron wavefunction phase shifts from one or more such scattering events. However, in many realistic situations, the probability of even one such event per electron is close to zero. The above analysis shows that the change of contrast is really due to a variant of the Aharonov-Bohm effect, the averaging over the time-dependent Aharonov-Bohm phase created by fields in the interior of the electron path. It is not due to direct scattering between electrons and photons. Nonetheless, it is also of interest to consider a configuration where the applied electromagnetic field is localized in a region between the electron paths. An example is a Gaussian beam. Let the electric field in the plane of the paths be given by \begin{equation} E^z({\bf \rho})= \mathcal{E}_0 \,\exp\left(-\frac{{\boldsymbol{\rho}}^2}{\sigma^2}\right)\, \cos(\omega\,t) \, , \label{eq:Gauss_field} \end{equation} where $\boldsymbol{\rho}$ is the radius vector in the plane and $\sigma$ is the effective width of the beam in this plane. This form is a good approximation to the electric field of a linearly polarized laser beam. Suppose that this beam is normally incident upon the electron paths illustrated in Fig.~\ref{Fi:fig_1}, with the center of the beam being at the origin in this figure. A calculation which will be presented in detail in Ref.~\cite{HF2004} leads to the result, for the case that $\sigma \alt 2 c$ and $\sigma \alt 2 d$, \begin{equation} \mathbb{C} \approx - \frac{8\sqrt{\pi} e \mathcal{E}_0 d^2}{\omega^2 T \sigma} \, (1-\cos\theta)\, \cos\left(\frac{\omega T}{2}\right) \, \exp\left(-\frac{d^2}{\sigma^2}\right) \,. \end{equation} The crucial feature of this result is the factor of $\exp(-d^2/\sigma^2)$, which is extremely small in the limit of a highly localized beam, $\sigma \ll d$. To summarize, in this paper we have investigated the effects of a rapidly varying Aharonov-Bohm phase upon an electron interference pattern. If the time scale for the variation is short compared to the time during which the pattern is measured, then averaging over the phase variations leads to a loss of contrast. This is a form of decoherence. In principle, the lost contrast could be restored if one were able to select only those electrons which start at a fixed point in the cycle of an oscillatory Aharonov-Bohm phase. The form of decoherence studied here is an example of zero temperature decoherence. Other forms of zero temperature decoherence, which do not rely upon thermal effects, have been discussed in Refs.~\cite{Sinha97,WMJ98,WG01}. We have calculated the size of the decoherence effect produced by a monochromatic, linearly polarized electromagnetic field. The result seems to be large enough to be observable. We primarily treated the case of a non-localized plane wave. In this case, although the electromagnetic field is nonzero at the location of the electrons, we argued that one can have an observable loss of contrast even when the probability of an electron scattering from a photon is extremely small. A unique signature of the decoherence produced by sinusoidal fields is that the interference pattern can disappear and then reappear as the field strength is increased. \begin{acknowledgments} We would like to thank Ken Olum for valuable discussion. This work was supported in part by the National Science Foundation under Grant PHY-0244898. \end{acknowledgments} \end{document}
\begin{document} \title[Landau equation for hard potentials]{ Stability, well-posedness and regularity of the homogeneous Landau equation for hard potentials } \author{Nicolas Fournier} \author{Daniel Heydecker} \address{N. Fournier : Sorbonne Universit\'e, LPSM-UMR 8001, Case courrier 158,75252 Paris Cedex 05, France.} \varepsilonmail{[email protected]} \address{D. Heydecker : University of Cambridge, Centre for Mathematical Sciences, Wilberforce Road, CB30WA, United Kingdom.} \varepsilonmail{[email protected].} \subjclass[2010]{82C40,60K35} \keywords{Fokker-Planck-Landau equation, existence, uniqueness, stability, regularity, Monge-Kantorovitch distance, Wasserstein distance, coupling, stochastic differential equations.} \begin{abstract} We establish the well-posedness and some quantitative stability of the spatially homogeneous Landau equation for hard potentials, using some specific Monge-Kantorovich cost, assuming only that the initial condition is a probability measure with a finite moment of order $p$ for some $p>2$. As a consequence, we extend previous regularity results and show that all non-degenerate measure-valued solutions to the Landau equation, with a finite initial energy, immediately admit analytic densities with finite entropy. Along the way, we prove that the Landau equation instantaneously creates Gaussian moments. We also show existence of weak solutions under the only assumption of finite initial energy. \varepsilonnd{abstract} \maketitle \section{Introduction and main results} \subsection{The Landau equation} We study the spatially homogeneous (Fokker-Planck-)Landau equation, which governs the time-evolution of the distribution $f_t, t\ge 0$ of velocities in a plasma: \begin{align} \label{LE} \partial_t f_t(v) = \frac 1 2 \mathrm {div}_v \Big( \int_{\rr^3} a(v-v_*)[ f_t(v_*) \nabla f_t(v) - f_t(v) \nabla f_t(v_*) ]\,\mathrm {d} v_* \Big) \varepsilonnd{align} where $a$ is the nonnegative, symmetric matrix \[ a(x) = |x|^{2+\gamma} \Pi_{x^\perp}; \quad \Pi_{x^\perp}= \mathbf{I}_3 - \frac{xx^*}{|x|^2} \] and $\gamma \in [-3,1]$ parametrises a range of models, depending on the interactions between particles. While the most physically relevant case is $\gamma=-3$, which models Coulomb interaction, we will study the cases $\gamma \in (0,1]$ of \varepsilonmph{hard potentials}, where the Landau equation \varepsilonqref{LE} may be understood as a limit of the Boltzmann equation in the asymptotic of grazing collisions, see Desvillettes \cite{d2} and Villani \cite{v:nc,v:h}. \vskip.13cm This equation was studied in detail by Desvillettes and Villani \cite{dv1,dv2}, who give results on existence, uniqueness, regularising effects and large-time behavior. Regarding stability, we refer to \cite{fgui}, on which the present work builds. Let us also mention the work of Carrapatoso \cite{c} on exponential convergence to equilibrium, some recent works of Chen, Li and Wu \cite{ch,ch2} and Morimoto, Pravda-Starov and Xu \cite{morimoto} extending the regularity results, as well as the recent gradient flow approach by Carrillo, Delgadino, Desvillettes and Wu \cite{cddw}. \color{black} \subsection{Notation} We denote by ${\mathcal P}({\mathbb{R}}^3)$ the set of probability measures on ${\rr^3}$, and for $p>0$, we set ${\mathcal P}_p$ to be those probability measures with a finite $p^\text{th}$ moment: ${\mathcal P}_p({\rr^3})=\{f\in{\mathcal P}({\rr^3})\;:\;m_p(f)<\infty\}$, where $m_p(f)=\int_{\rr^3} |v|^p f(\mathrm {d} v)<\infty$. \vskip.13cm We will use the following family of transportation costs to measure the distance between two solutions. For $p>0$ and $f,\tilde f\in{\mathcal P}_p({\rr^3})$, we write ${\mathcal H}(f,\tilde f)$ for the set of all couplings $${\mathcal H}(f,\tilde f) = \bigl\{ R \in {\mathcal P}({\mathbb{R}}^3\times{\mathbb{R}}^3) \; : \; R \text{ has marginals } f \text{ and } \tilde f \bigr\}.$$ With this notation, we define the optimal transportation cost$$ {\mathcal T}_{p}(f,\tilde f)= \inf \Big\{\int_{\rr^3}rd (1+|v|^p+|\tilde v|^p)\frac{|v-\tilde v|^2}{1+|v-\tilde v|^2} R(\mathrm {d} v,\mathrm {d} \tilde v) \; : \; R \in {\mathcal H}(f,\tilde f) \Big\}. $$ The form of this optimal transportation cost is key to our stability and uniqueness arguments; the major improvement in Theorem \ref{main} below relies on a negative term which appears due to a {\it Pozvner effect} of the prefactor $(1+|v|^p+|\tilde v|^p)$. Note that for each $p\geq 2$, there is a constant $C>0$ such that $|v-\tilde v|^p \leq C(1+|v|^p+|\tilde v|^p)\frac{|v-\tilde v|^2}{1+|v-\tilde v|^2}$, so that for ${\mathcal W}_p$ the usual Wasserstein distance of order $p$, we have ${\mathcal W}_p^p \leq C {\mathcal T}_p$. It can also be checked that convergence in ${\mathcal W}_p$ implies convergence in ${\mathcal T}_p$. It follows that both ${\mathcal W}_p$ and ${\mathcal T}_p$ \color{black} generate the same topology, equivalent to weak convergence plus convergence of the $p^\text{th}$ moments. \vskip.13cm We will also consider regularity of solutions. For $k, s\ge 0$, we define the weighted Sobolev norm $$ \|u\|_{H^k_s({\rr^3})}^2=\sum_{|\alpha|\le k}\int_{\rr^3} |\partial_\alpha u(v)|^2(1+|v|^2)^{s/2} \mathrm {d} v $$ and the weighted Sobolev space $H^k_s({\rr^3})$ for those $u$ where this is finite. By an abuse of notation, we say that $f\in {\mathcal P}({\rr^3})$ belongs to $H^k_s({\rr^3})$ if $f$ admits a density $u$ with respect to the Lebesgue measure with $u\in H^k_s({\rr^3})$, and in this case we write $\|f\|_{H^k_s({\rr^3})}=\|u\|_{H^k_s({\rr^3})}$. Similarly, we say that $f\in {\mathcal P}({\rr^3})$ is analytic if $f$ admits an analytic density. \vskip.13cm We finally define the entropy $H(f)$ of a probability measure $f\in {\mathcal P}({\rr^3})$ by $$ H(f)=\begin{cases} \int_{\rr^3} u(v)\log u(v) \mathrm {d} v & \text{if }f \text{ has a density }u; \\ \infty & \text{otherwise.} \varepsilonnd{cases} $$ \subsection{Weak solutions} We define, for $x\in {\rr^3}$, \begin{equation}\label{db} b(x)=\mathrm {div} \; a(x)=-2 |x|^\gamma x. \varepsilonnd{equation} For $(f_t)_{t\geq 0}$ a family of probability measures on ${\rr^3}$ and for $p,q>0$, we say that $(f_t)_{t\geq 0}$ belongs to $L^\infty_{loc}([0,\infty),{\mathcal P}_{p}({\rr^3})) \cap L^1_{loc}([0,\infty),{\mathcal P}_{q}({\rr^3}))$ if $$ \sup_{t \in [0,T]} m_p(f_t)+\int_0^T m_q(f_t) \mathrm {d} t <\infty \quad \hbox{for all $T>0$.} $$ We will use the following classical notion of weak solutions, see Villani \cite{v:nc} and Goudon \cite{gou}. \begin{defi}\label{ws} Let $\gamma \in (0,1]$. We say that $(f_t)_{t\geq 0}$ is a weak solution to \varepsilonqref{LE} if it belongs to $L^\infty_{loc}([0,\infty),{\mathcal P}_{2}({\rr^3})) \cap L^1_{loc}([0,\infty),{\mathcal P}_{2+\gamma}({\rr^3}))$, if $m_2(f_t)\leq m_2(f_0)$ for all $t\geq 0$ and if for all $\varphi\in C^2_b({\mathbb{R}}^3)$, all $t\geq 0$, \begin{align}\label{wf} \int_{\rr^3} \varphi(v)f_t(\mathrm {d} v) = \int_{\rr^3} \varphi(v)f_0(\mathrm {d} v) + \int_0^t \int_{\rr^3} \int_{\rr^3} {\mathcal L}\varphi(v,v_*) f_s(\mathrm {d} v_*)f_s(\mathrm {d} v) \mathrm {d} s, \varepsilonnd{align} where \begin{equation} \label{L} {\mathcal L}\varphi(v,v_*)= \frac 1 2 \sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(v-v_*)\partial^2_{k\varepsilonll}\varphi(v)+ \sum_{k=1}^3 b_{k}(v-v_*)\partial_{k}\varphi(v). \varepsilonnd{equation} \varepsilonnd{defi} Since $|{\mathcal L}\varphi(v,v_*)|\leq C_\varphi (1+|v|+|v_*|)^{2+\gamma}$ for $\varphi\in C^2_b({\mathbb{R}}^3)$, every term makes sense in \varepsilonqref{wf}. \subsection{Existence and properties of weak solutions} First we summarise some results of Desvillettes and Villani. \begin{thm}[Desvillettes \& Villani, Theorems 3 and 6 in \cite{dv1}]\label{ddv} Fix $\gamma \in (0,1]$, $p\geq 2$ and $f_0\in {\mathcal P}_p({\rr^3})$. \vskip.13cm (a) If $(f_t)_{t\ge 0}$ is any weak solution to \varepsilonqref{LE} starting at $f_0$, then we have conservation of the kinetic energy, i.e. $m_2(f_t)= m_2(f_0)$ for all $t\geq0$, and the estimates $\sup_{s\in[0,\infty)} m_p(f_s)<\infty$ and $\int_0^t m_{p+\gamma}(f_s)\mathrm {d} s <\infty$ for all $t\geq 0$. Further, for all $q>0$ and $t_0>0$, $\sup_{t\geq t_0}m_q(f_t)<\infty$. \vskip.13cm (b) If $p>2$, then a weak solution starting at $f_0$ exists. \vskip.13cm (c) If $p>2$ and if $f_0$ is not concentrated on a line, then there exists a weak solution $(f_t)_{t\ge 0}$ starting at $f_0$ such that for all $t>0$, $f_t$ has finite entropy $H(f_t)<\infty$ and \begin{equation} \label{eq: sobolev regularity} \hbox{for all $k, s\ge 0$ and all $t_0>0$,} \quad \sup_{t\ge t_0} \|f_t\|_{H^k_s({\rr^3})}<\infty. \varepsilonnd{equation} \varepsilonnd{thm} Let us remark that the cited theorem makes the additional assumption in (a) that $f_t$ has a density for all $t\geq 0$, but this is not used in the proof. Regarding (b), the cited theorem assumes that $f_0$ has a density, but this is only required to show that the weak solution they build has a density, for all times. Concerning (c), Desvillettes and Villani also assume that $f_0$ has a density, but only use that $f_0$ is not concentrated on a line, see the remark under Lemma 9 of the cited work. To be more explicit, \varepsilonmph{$f_0$ not concentrated on a line} means that for any $x_0,u_0 \in {\rr^3}$, setting $L=\{x_0+\lambda u_0 : \lambda \in {\mathbb{R}}\}$, there holds $f_0({\rr^3} \setminus L)>0$. \vskip.13cm Regarding existence, we are able to prove the following extension to (b) above, removing the condition that $f_0 \in {\mathcal P}_{p}({\rr^3})$ for some $p>2$ and requiring only $f_0\in {\mathcal P}_2({\rr^3})$. \begin{thm}\label{mainexist} Let $\gamma \in (0,1]$ and \color{black} $f_0\in {\mathcal P}_2({\rr^3})$. There exists a weak solution to \varepsilonqref{LE} starting at $f_0$. \varepsilonnd{thm} Let us now state the following strengthening of (c), due to Chen, Li and Xu. \begin{thm}[Chen, Li \& Xu, Theorem 1.1 in \cite{ch2}]\label{analytic regularity} Fix $\gamma \in (0,1]$. Let $(f_t)_{t\ge 0}$ be a weak solution to \varepsilonqref{LE} such that the estimate \varepsilonqref{eq: sobolev regularity} holds. Then $f_t$ is analytic for all $t>0$. \varepsilonnd{thm} Our main result on regularity is as follows, and shows that the conclusions above apply to all weak solutions to \varepsilonqref{LE}, aside from the degenerate case of point masses. \begin{thm}\label{mainregularity} Fix $\gamma \in (0,1]$. \color{black} Let $f_0\in {\mathcal P}_2({\rr^3})$ be a probability measure which is not a Dirac mass, and let $(f_t)_{t\ge 0}$ be any weak \color{black} solution to \varepsilonqref{LE} starting at $f_0$. Then the estimate \varepsilonqref{eq: sobolev regularity} holds and for all $t>0$, $f_t$ is analytic and has a finite entropy. \varepsilonnd{thm} We emphasise that Theorem \ref{mainregularity} applies to \varepsilonmph{any} weak solution, while Theorems \ref{ddv}-(c) and \ref{analytic regularity} only show that there exists such a regular solution (see the remarks after Theorem 6 in \cite{dv1}). This follows from Theorem \ref{main} below, although we are not able to prove uniqueness under the sole assumption that $f_0 \in {\mathcal P}_2({\rr^3})$. Let us also remark that, in the excluded case where $f_0=\delta_{v_0}$ is a point mass, then the unique solution is $f_t=\delta_{v_0}$ for all $t\ge 0$ by conservation of energy and momentum, and so there is no hope of regularity. \vskip.13cm As a step towards our main stability result below, we will prove the following proposition, which improves on the appearance of moments in item (a) above and may be of independent interest. \begin{prop}\label{expo} Fix $\gamma \in (0,1]$ and consider a weak solution $(f_t)_{t\geq 0}$ to \varepsilonqref{LE}. There are some constants $a>0$ and $C>0$, both depending only on $\gamma$ and $m_2(f_0)$, such that $$ \int_{\rr^3} e^{a|v|^2}f_t(\mathrm {d} v) \leq C\varepsilonxp[Ct^{-2/\gamma}] \quad \hbox{for all $t>0$.} $$ \varepsilonnd{prop} Since the preliminary version of this work, the first author has studied the Boltzmann equation with hard potentials and without cutoff, which produces some exponential moments of the form $\int_{\rr^3} e^{a|v|^\rho}f_t(dv)$, with $\rho \in (\gamma,2]$ depending on the singularity of the angular collision kernel. In the case with cutoff, only exponential moments of the form $\int_{\rr^3} e^{a|v|^\gamma}f_t(dv)$ become finite for $t>0$, see Alonso-Gamba-Taskovic \cite{agt}. \subsection{Uniqueness and stability} Let us mention the following result, due to the first author and Guillin, which can be compared to our result and on which we build. \begin{thm}[Fournier \& Guillin, Theorem 2 in \cite{fgui}]\label{uniqueness} Fix $\gamma \in (0,1]$ and let $f_0\in {\mathcal P}({\rr^3})$ be such that \begin{equation}\label{emfg} \mathcal{E}_\alpha(f_0)=\int_{\rr^3} e^{|v|^\alpha}f_0(\mathrm {d} v)<\infty \quad \hbox{for some $\alpha >\gamma$.} \varepsilonnd{equation} Then there exists a unique weak solution $(f_t)_{t\ge 0}$ to \varepsilonqref{LE} starting at $f_0$. Moreover, if $\varepsilonta \in (0,1)$, $ \lambda\in (0,\infty)$ and $T>0$, then there exists a constant $C=C(T,\varepsilonta,\mathcal{E}_\alpha(f_0),\lambda)$ such that, if $(\tilde f_t)_{t\ge 0}$ is another solution satisfying $\sup_{t\in[0,T]} m_{2+\gamma}(\tilde f_t) \le \lambda$, then $$ \sup_{t\in[0,T]}\mathcal{W}_2(f_t, \tilde f_t) \le C [\mathcal{W}_2(f_0, \tilde f_0)]^{1-\varepsilonta} $$ where $\mathcal{W}_2$ is the usual Wasserstein distance with quadratic cost. \varepsilonnd{thm} The main result of this paper is the following, which consists in relaxing the condition \varepsilonqref{emfg} and in replacing, {\it via} another transportation cost, the H\"older dependance in the initial condition by some Lipschitz dependance. \begin{thm}\label{main} Fix $\gamma \in (0,1]$ and $p>2$ and two weak solutions $(f_t)_{t\geq 0}$ and $(\tilde f_t)_{t\geq 0}$ to \varepsilonqref{LE} starting from $f_0$ and $\tilde f_0$, both belonging to ${\mathcal P}_{p}({\rr^3})$. There is a constant $C$, depending only on $p$ and $\gamma$, such that for all $t\geq 0$, \begin{equation} {\mathcal T}_p(f_t,\tilde f_t)\leq {\mathcal T}_p(f_0,\tilde f_0) \varepsilonxp\Big(C \Big[1+\sup_{s\in[0,t]}m_p(f_s+\tilde f_s)\Big] \Big[1+\int_0^t (1+m_{p+\gamma}(f_s+\tilde f_s))\mathrm {d} s\Big] \Big). \label{eq: conclusion of main} \varepsilonnd{equation} \varepsilonnd{thm} Together with Theorem \ref{ddv}, this shows that when $f_0 \in {\mathcal P}_p({\rr^3})$ for some $p>2$, \varepsilonqref{LE} has a unique weak solution and this provides a quantitative stability estimate. \subsection{Discussion} This current paper is primarily concerned with stability, continuing the \color{black} previous analyses of the Cauchy problem for the Landau equation with hard potentials by Arsen’ev-Buryak \cite{ar}, Desvillettes-Villani \cite{dv1}, see also \cite{fgui}. In addition to the mathematical interest of uniqueness and stability, these are physically relevant criteria: if the equation is not well-posed, then it cannot be a complete description of the system and additional information is needed. Let us also note that stability estimates play a key role in the functional framework of Mischler-Mouhot \cite{mm} and Mischler-Mouhot-Wennberg \cite{mmw} for proving \varepsilonmph{propagation of chaos} for interacting particle systems, and these have been applied to Kac's process \cite{k}. See also the work of Norris \cite{n} and \cite{h1}. In this context, it is particularly advantageous that our result requires neither regularity nor exponential moments, as these are not readily applicable to the empirical measures of the particle system. It is also satisfying to get a Lipschitz dependance in the initial condition, so that error terms will not increase too much as time evolves. \vskip.13cm The study of stability via coupling, on which this work builds, goes back to Tanaka \cite{t} for the Boltzmann equation in the case of Maxwell molecules; let us mention the later works \cite{fm,fmi,h2} which apply the same principle in the context of hard potentials. The same idea was applied to the Landau equation by Funaki \cite{f} and has previously been applied by the first author \cite{fgui} in the context of stability and propagation of chaos. See \cite{fp} for a review of \color{black} coupling methods for PDEs. \vskip.13cm Compared to the previous literature regarding uniqueness and stability for the Landau equation with hard potentials, our main result is substantially stronger and more general. The uniqueness result of Desvillettes and Villani \cite{dv1} requires that the initial data $f_0$ has a density $u_0$ satisfying \begin{equation}\label{cudv} \int_{\rr^3} (1+|v|^2)^{p/2}u_0^2(v) \mathrm {d} \color{black} v<\infty \quad \hbox{for some $p>15+5\gamma$,} \varepsilonnd{equation} while the result of the \cite{fgui} recalled in Theorem \ref{uniqueness} above allows measure solutions, but requires a finite exponential moment. Our result therefore allows much less localisation than either of the results above, while also not requiring any regularity on the initial data $f_0, \tilde f_0$. \vskip.13cm The case $\gamma=0$ of Maxwell molecules is particularly simple, and results of Villani \cite{v:max} show existence and uniqueness only assuming finite energy, i.e. that $f_0\in{\mathcal P}_2({\rr^3})$. Note that the Boltzmann equation for hard potentials with cutoff has also been shown to be well-posed by Mischler-Wennberg \cite{mw} as soon as $f_0\in{\mathcal P}_2({\rr^3})$, by a completely different method breaking down in the case without cutoff. Hence our condition on $f_0$ for Theorem \ref{main}, namely the finiteness of a moment of order $p>2$, seems almost optimal. While this is feasable for existence, we did not manage to prove uniqueness assuming only a finite initial energy. \vskip.13cm To summarize, our uniqueness and stability statement Theorem \ref{main} is much stronger than the previous results and almost optimal, since we assume that $f_0 \in {\mathcal P}_{2+}({\rr^3})$ instead of \varepsilonqref{cudv} as in \cite{dv1} or \varepsilonqref{emfg} as in \cite{fgui}; we slightly improve in Theorem \ref{mainexist} the existence result of \cite{dv1}, assuming that $f_0 \in {\mathcal P}_2({\rr^3})$ instead of $f_0 \in {\mathcal P}_{2+}({\rr^3})$; we are able to prove in Theorem \ref{mainregularity} the smoothness of \varepsilonmph{any} weak solution with $f_0 \in {\mathcal P}_2({\rr^3})$, instead of showing the existence of one smooth solution when $f_0 \in {\mathcal P}_{2+}({\rr^3})$ as in \cite{dv1} and \cite{ch2}; and we prove the appearance of some Gaussian moments for any weak solution with $f_0 \in {\mathcal P}_2({\rr^3})$. \color{black} \vskip.13cm Let us finally mention that in the case $\gamma=0$, a stronger `ulta-analytic' regularity is known, see Morimoto, Pravda-Starov and Xu \cite{morimoto}; in this case, one has the advantage that the coefficients of \varepsilonqref{LE} are already analytic (polynomial) functions. Another approach to regularity results similar is the use of Malliavin calculus, see Gu\'erin \cite{gu}.\color{black} \vskip.13cm Finally, let us mention the current theory for other Landau equations. In the case of soft potentials $\gamma\in (-3, 0)$, we refer to \cite{fgue,fh}. The Coulomb case $\gamma=-3$, which is most directly physically relevant and significantly more difficult, has also received significant attention, including by Villani \cite{v:nc}, Desvillettes \cite{d} and the first author \cite{fc}. \color{black} Let us mention a number of works by Guo \cite{guo}, He-Yang \cite{hy}, Golse-Imbert-Mouhot-Vasseur \cite{go} and Mouhot \cite{mou} on the Cauchy problem for the full, spatially inhomogeneous, Landau equation. Finally, the Landau-Fermi-Dirac equation has been recently studied by Alonso, Bagland and Lods \cite{abl}. \color{black} \subsection{Strategy} We emphasise that the main result is the stability and uniqueness result Theorem \ref{main}; Theorem \ref{mainregularity} about regularity will then follow from previous works. \color{black} For Theorem \ref{main}, our strategy is to build on the techniques of \cite{fgui}, \cite{n} and \cite{h2}. \color{black} The key new idea is a Povzner-type inequality \cite{p}. Considering a weighted cost of the form $(1+|v|^p+|\tilde v|^p)|v-\tilde v|^2$ instead of $|v-\tilde v|^2$, an additional, negative `Povzner term' arises which produces an advantageous cancelation and allows us to use a Gr\"onwall inequality. We rather study (at the price of technical difficulties) the cost $(1+|v|^p+|\tilde v|^p)|v-\tilde v|^2/(1+|v-\tilde v|^2)$, because it requires less moments to be well-defined. \vskip.13cm In the case of the Boltzmann equation for hard potentials without cutoff \cite{h2}, this technique leads to stability under the assumption only of some $p^\text{th}$ moment, for some computable, but potentially large, $p$, improving on previous results which required exponential moments \cite{fm}. In the case of the Landau equation, the calculations become more tractable; we find explicit, rather than explicit\varepsilonmph{able} constants, and are able to use tricks of \cite{fgui} and \cite{fh}. In this context, we seek to minimise the number $p$ of moments required, and very delicate calculations are needed, see Lemma \ref{cent} and its proof, to allow for any $p>2$. \subsection{Plan of the Paper} The paper is structured as follows. In Section \ref{pre}, we will present some preliminary calculations which are used throughout the paper. In Section \ref{moments}, we will prove some useful moment properties, including Proposition \ref{expo}. \vskip.13cm Section \ref{coupling} - \ref{proof of main} are devoted to the proof of our stability result Theorem \ref{main}. Section \ref{coupling} introduces the Tanaka-style coupling and presents the key estimate without proof. This allows us to prove Theorem \ref{main} in Section \ref{proof of main}, and we finally return to prove the central estimate in Section \ref{proof of cent}. Informally, the main important points of the proof are Proposition \ref{coup}, where we introduce the coupling between two given weak solutions, Lemmas \ref{ito} and \ref{cent}, containing the central computation, and Lemma \ref{mainexp} where we establish the stability estimate \varepsilonqref{eq: conclusion of main} under some additional conditions. Since the proof of the central computation Lemma \ref{cent} is rather technical, it is deferred until Section \ref{proof of cent}. \vskip.13cm Section \ref{existence} consists of a self-contained proof of our existence result Theorem \ref{mainexist}, building only on Theorem \ref{ddv} and using the de La Vall\'ee Poussin theorem and a compactness argument. \vskip.13cm In Section \ref{pf of regularity}, we prove Theorem \ref{mainregularity} about smoothness. We show a very mild regularity result (Lemma \ref{weak regularity}): solutions do not remain concentrated on lines. This allows us to apply Theorems \ref{ddv}-(c) and \ref{analytic regularity}, exploiting the uniqueness from Theorem \ref{main}. \vskip.13cm Finally, Section \ref{proof of cent} contains the proof of the estimate Lemma \ref{cent}. \section{Preliminaries}\label{pre} We introduce a few notation and handle some computations of constant use. We denote by $| \cdot |$ the Euclidean norm on ${\rr^3}$ and for $A$ and $B$ two $3\times 3$ matrices, we put $\| A \|^2 = \mathrm{Tr} (AA^*)$ and $\langle \!\langle A,B \rangle\!\rangle=\mathrm{Tr} (A B^*)$. \subsection{A few estimates of the parameters of the Landau equation} For $x\in {\rr^3}$, we introduce $$\sigma(x)=[a(x)]^{1/2}=|x|^{1+\gamma/2}\Pi_{x^\perp}.$$ For $x,\tilde x \in {\rr^3}$, it holds that \begin{equation}\label{p1} ||\sigma(x)||^2=2|x|^{\gamma+2} \;\;\; \hbox{and}\;\;\; \langle \!\langle \sigma(x),\sigma(\tilde x)\rangle\!\rangle =|x|^{1+\gamma/2}|\tilde x|^{1+\gamma/2}\Big(1+ \frac{(x\cdot \tilde x)^2}{|x|^2|\tilde x|^2} \Big) \geq 2 |x|^{\gamma/2}|\tilde x|^{\gamma/2}(x\cdot \tilde x). \varepsilonnd{equation} Indeed, it suffices to justify the second assertion, and a simple computation shows that $\Pi_{x^\perp}\Pi_{\tilde x^\perp}=\mathbf{I}_3 - |x|^{-2}xx^*-|\tilde x|^{-2}\tilde x\tilde x^*+|x|^{-2}|\tilde x|^{-2}(x\cdot\tilde x)x\tilde x^*$, from which we conclude that \begin{align*} \langle \!\langle \sigma(x),\sigma(\tilde x)\rangle\!\rangle=&|x|^{1+\gamma/2}|\tilde x|^{1+\gamma/2}\mathrm{Tr} \; (\Pi_{x^\perp}\Pi_{\tilde x^\perp}) =|x|^{1+\gamma/2}|\tilde x|^{1+\gamma/2}[1+|x|^{-2}|\tilde x|^{-2}(x\cdot\tilde x)^2], \varepsilonnd{align*} which is greater than $2 |x|^{\gamma/2}|\tilde x|^{\gamma/2}(x\cdot \tilde x)$ because $1+a^{2}\geq 2a$. \vskip.13cm For $a,b\geq 0$ and $\alpha \in (0,1)$, there holds \begin{equation}\label{ttaacc} |a^\alpha-b^{\alpha}| \leq (a\lor b)^{\alpha-1}|a-b|. \varepsilonnd{equation} Indeed, if e.g. $a\geq b$, then $a^\alpha-b^{\alpha}=a^{\alpha}[1-(b/a)^{\alpha}]\leq a^{\alpha}(1-b/a)=a^{\alpha-1}(a-b)$. \vskip.13cm For $x,\tilde x \in {\rr^3}$, recalling that $b(x)=-2|x|^\gamma x$, \color{black} we have \begin{equation}\label{p2} |b(x)-b(\tilde x)|\leq 2|x|^\gamma|x-\tilde x|+2|\tilde x| ||x|^\gamma-|\tilde x|^\gamma| \leq 2(|x|^\gamma+|\tilde x|^\gamma)|x-\tilde x|, \varepsilonnd{equation} because $|\tilde x| ||x|^\gamma-|\tilde x|^\gamma|\leq |\tilde x| (|x|\lor|\tilde x|)^{\gamma-1}|x-\tilde x|\leq |\tilde x|^\gamma|x-\tilde x|$ by \varepsilonqref{ttaacc}. We also have, thanks to \varepsilonqref{p1}, \begin{equation}\label{p3} ||\sigma(x)-\sigma(\tilde x)||^2\leq 2|x|^{\gamma+2}+2|\tilde x|^{\gamma+2}-4|x|^{\gamma/2}|\tilde x|^{\gamma/2}(x\cdot\tilde x) =2||x|^{\gamma/2}x-|\tilde x|^{\gamma/2}\tilde x|^2. \varepsilonnd{equation} Proceeding as for \varepsilonqref{p2}, we deduce that \begin{equation}\label{p4} ||\sigma(x)-\sigma(\tilde x)||^2\leq 2 (|x|^{\gamma/2}|x-\tilde x|+|\tilde x|||x|^{\gamma/2}-|\tilde x|^{\gamma/2}|)^2 \leq 2(|x|^{\gamma/2}+|\tilde x|^{\gamma/2}|)^2|x-\tilde x|^2. \varepsilonnd{equation} Finally, for $v,v_* \in {\rr^3}$, $\sigma(v-v_*)v=\sigma(v-v_*)v_*$, because $\Pi_{(v-v_*)^\perp}(v-v_*)=0$, and so \begin{equation}\label{tr} |\sigma(v-v_*)v|\leq C||\sigma(v-v_*)|| (|v|\land|v_*|)\leq C |v-v_*|^{1+\gamma/2} (|v|\land|v_*|) \leq C |v-v_*|^{\gamma/2}|v||v_*|, \varepsilonnd{equation} because $|v-v_*|(|v|\land|v_*|)\leq (|v|+|v_*|)(|v|\land|v_*|)\leq 2 |v||v_*|$. \subsection{Transport costs} For technical reasons, we will have to play with a larger family of transport costs. For $p>0$ and $\varepsilon> 0$, for $f,\tilde f\in{\mathcal P}_p({\rr^3})$, we define $$ {\mathcal T}_{p,\varepsilon}(f,\tilde f)= \inf \Big\{\int_{\rr^3}rd c_{p,\varepsilon}(v,\tilde v)R(\mathrm {d} v,\mathrm {d} \tilde v) \; : \; R \in {\mathcal H}(f,\tilde f) \Big\}, $$ where \begin{equation}\label{cpe} c_{p,\varepsilon}(v,\tilde v)=(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon(|v-\tilde v|^2) \quad \hbox{and}\quad \varphi_\varepsilon(r)=\frac r {1+\varepsilon r}. \varepsilonnd{equation} We have ${\mathcal T}_{p}={\mathcal T}_{p,1}$. This definition also makes sense in the case $\varepsilon=0, \phi_0(r)=r$, in which case we require $f, \tilde f \in {\mathcal P}_{p+2}({\rr^3})$ for the integral to be well-defined. In either case, it is straightforward to see that there exists a coupling attaining the infimum; we refer to Villani \cite{v: ot} for many details of such costs. \color{black} Since $\varphi'_\varepsilon(r)=(1+\varepsilon r)^{-2}$ and $\varphi''_\varepsilon(r)=-2\varepsilon(1+\varepsilon r)^{-3}$, \begin{align}\label{ve} r\varphi_\varepsilon'(r)\leq \varphi_\varepsilon(r),\quad 0\leq \varphi_\varepsilon'(r)\leq 1 \quad \hbox{and}\quad \varphi_\varepsilon''(r)\leq 0. \varepsilonnd{align} Let us remark that the cost $c_{p,\varepsilon}$ satisfies a relaxed triangle inequality: for some $C>0$ depending only on $p>0$ and $\varepsilon\geq 0$, for all $v,w,y\in {\rr^3}$, \begin{equation}\label{eq: rti}c_{p,\varepsilon}(v,y)\le C [c_{p,\varepsilon}(v,w)+c_{p,\varepsilon}(w,y) ] \color{black}. \varepsilonnd{equation} The case where $\varepsilon=0$ was treated in \cite[Section 2]{h2}. If now $\varepsilon>0$, ${\frac{1}{2}(\varepsilon^{-1}\land r)}\le \varphi_\varepsilon(r)\le (\varepsilon^{-1}\land r)$, so that it suffices to prove \varepsilonqref{eq: rti} with the cost $c_{p,\varepsilon}(v,\tilde v)$ replaced by $(1+|v|^p+|\tilde v|^p)(|v-\tilde v|^2\land \varepsilon^{-1})$. This can be deduced from the case where $\varepsilon=0$, case-by-case, depending on which of $|v-w|^2$, $|w-y|^2$, $|v-y|^2$ are less than $\varepsilon^{-1}$. \vskip.13cm It follows that the optimal transportation costs ${\mathcal T}_{p,\varepsilon}$ are semimetrics in that one replaces the usual triangle inequality with the bound, for all $f,g,h\in {\mathcal P}_p$, \begin{equation} \label{eq: rti2} {\mathcal T}_{p,\varepsilon}(f,h)\le C [{\mathcal T}_{p,\varepsilon}(f,g)+{\mathcal T}_{p,\varepsilon}(g,h)]. \varepsilonnd{equation} \section{Moment Properties of the Landau Equation}\label{moments} This section is devoted to some moment estimates. We start with the appearance of Gaussian moments, following the strategy introduced by Bobylev \cite{b} for the Boltzmann equation. \begin{proof}[Proof of Proposition \ref{expo}] We consider any weak solution $(f_t)_{t\geq 0}$ to \varepsilonqref{LE}. \color{black} By Theorem \ref{ddv}, we know that $m_2(f_t)=m_2(f_0)$ for all $t\geq 0$. If $m_2(f_0)=0$, we deduce that $f_t=\delta_0$ for all $t>0$, so the result is obvious. We thus assume that $m_2(f_0)>0$ and, by scaling, that $m_2(f_0)=1$. During the proof, $C$ will denote a constant which may only depend on $\gamma$, but may vary from line to line. \vskip.13cm {\bf Step 1.} Here we prove that for all $p\geq 2$, all $t>0$, $$ \frac d{dt} m_p(f_t) \leq -p m_{p+\gamma}(f_t) + p m_p(f_t) + C p^2[m_{p-2+\gamma}(f_t)+m_{p-2}(f_t) m_{2+\gamma}(f_t)]. $$ By Theorem \ref{ddv}, we know that for all $q>0$, all $t_0>0$, $\sup_{t\geq t_0}m_q(f_t)<\infty$, so that we can apply \varepsilonqref{wf} with $\varphi(v)=|v|^p$ on $[t_0,\infty)$. We deduce that $m_p(f_t)$ is of class $C^1$ on $(0,\infty)$ and get \begin{align}\label{dmp} \frac d{dt} m_p(f_t)= \int_{\rr^3}\int_{\rr^3} {\mathcal L} \varphi(v,v_*) f_t(\mathrm {d} v_*) f_t(\mathrm {d} v) \quad \hbox{for all $t>0$.} \varepsilonnd{align} Since \color{black} $\varphi(v)=|v|^p=(v_1^2+v_2^2+v_3^2)^{p/2}$, we have $$ \partial_{k}\varphi(v)=p|v|^{p-2}v_k \quad \hbox{and}\quad \partial^2_{k\varepsilonll}\varphi(v)=p|v|^{p-2}\indiq_{\{k=\varepsilonll\}}+ p(p-2)|v|^{p-4}v_kv_\varepsilonll. $$ We set $x=v-v_*$ and note that, since $\sigma(x)=[a(x)]^{1/2}$ is symmetric, $\sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(x)\indiq_{\{k=\varepsilonll\}} = \mathrm{Tr} \; a(x)= ||\sigma(x)||^2$ and $\sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(x)v_kv_\varepsilonll = \sum_{k,\varepsilonll,j=1}^3 \sigma_{kj}(x)\sigma_{\varepsilonll j}(x)v_kv_\varepsilonll =|\sigma(x)v|^2 $. Thus \color{black} \begin{align}\label{tto} {\mathcal L}\varphi(v,v_*)=p|v|^{p-2}v\cdot b(x)+\frac p2|v|^{p-2}||\sigma(x)||^2 +\frac{p(p-2)}2|v|^{p-4}|\sigma(x)v|^2. \varepsilonnd{align} Recalling that $b(x)=-2|x|^\gamma x$ and that $||\sigma(x)||^2=2|x|^{\gamma+2}$ by \varepsilonqref{p1}, \color{black} we have $$ v\cdot b(x) + \frac12||\sigma(x)||^2=-2|x|^\gamma(v-v_*)\cdot v +|x|^\gamma (|v|^2+|v_*|^2 -2v\cdotv_*) =-|x|^\gamma |v|^2+|x|^\gamma |v_*|^2. $$ Since moreover $|\sigma(x)v|\leq C |x|^{\gamma/2}|v||v_*|$ by \varepsilonqref{tr}, we find that \begin{align*} {\mathcal L}\varphi(v,v_*)\leq -p|x|^\gamma |v|^p + C p^2 |x|^\gamma |v|^{p-2}|v_*|^2. \varepsilonnd{align*} Using now that $|x|^\gamma \geq |v|^\gamma -|v_*|^\gamma$ and that $|x|^\gamma \leq |v|^\gamma +|v_*|^\gamma$, we conclude that \begin{align}\label{tto2} {\mathcal L}\varphi(v,v_*)\leq& -p |v|^{p+\gamma} +p|v|^p|v_*|^\gamma + C p^2(|v|^{p-2+\gamma}|v_*|^2+|v|^{p-2}|v_*|^{2+\gamma}). \varepsilonnd{align} Plugging this into \varepsilonqref{dmp}, we find that \begin{align*} \frac d{dt} m_p(f_t)\leq& -p m_{p+\gamma}(f_t) +pm_p(f_t) m_\gamma(f_t) +C p^2 (m_{p-2+\gamma}(f_t) m_2(f_t) + m_{p-2}(f_t) m_{2+\gamma}(f_t) ). \varepsilonnd{align*} The conlusion follows, since $m_\gamma(f_t)\leq [m_2(f_t)]^{\gamma/2}=1$. \vskip.13cm {\bf Step 2.} We now deduce that for all $p\geq 4$, $$ \frac d{dt} m_p(f_t) \leq -p [m_p(f_t)]^{1+\gamma/(p-2)} + p m_p(f_t)+ C p^2 [m_p(f_t)]^{1-(2-\gamma)/(p-2)}. $$ For any $\beta>\alpha\geq 2$, since $|v|^2f_t(\mathrm {d} v)$ is a probability measure, $$ m_\alpha(f_t)=\int_{\rr^3} |v|^{\alpha-2} |v|^2 f_t(\mathrm {d} v) \leq \Big(\int_{\rr^3} |v|^{\beta-2} |v|^2 f_t(\mathrm {d} v) \Big)^{(\alpha-2)/(\beta-2)} =[m_\beta(f_t)]^{(\alpha-2)/(\beta-2)}. $$ We deduce that $m_p(f_t) \leq [m_{p+\gamma}(f_t)]^{(p-2)/(p+\gamma-2)}$, whence $$ m_{p+\gamma}(f_t) \geq [m_p(f_t)]^{(p+\gamma-2)/(p-2)}=[m_p(f_t)]^{1+\gamma/(p-2)}, $$ that $$ m_{p-2+\gamma}(f_t) \leq [m_p(f_t)]^{(p-4+\gamma)/(p-2)}, $$ and that $$ m_{p-2}(f_t) m_{2+\gamma}(f_t) \leq [m_p(f_t)]^{(p-4)/(p-2)+\gamma/(p-2)}=[m_p(f_t)]^{(p-4+\gamma)/(p-2)}. $$ This completes the step, since $(p-4+\gamma)/(p-2)=1-(2-\gamma)/(p-2)$. \vskip.13cm {\bf Step 3.} For $u:(0,\infty)\to(0,\infty)$ of class $C^1$ satisfying, for some $a,b,c,\alpha,\beta>0$, for all $t>0$, $$ u'(t) \leq -a [u(t)]^{1+\alpha} +b u(t) + c[u(t)]^{1-\beta}, $$ it holds that $$ \forall \; t>0, \quad u(t) \leq \Big(\frac{2}{a\alpha t} \Big)^{1/\alpha} + \Big(\frac{4b}{a} \Big)^{1/\alpha} +\Big(\frac{4c}{a} \Big)^{1/(\alpha+\beta)}. $$ Indeed, we set $h(r)=-a r^{1+\alpha} +b r + cr^{1-\beta}$ and we observe that $$ h(r) \leq -\frac a2 r^{1+\alpha} \quad \hbox{for all $r\geq u_*=\max\{(4b/a)^{1/\alpha},(4c/a)^{1/(\alpha+\beta)}\}$.} $$ We now fix $t_0>0$. \vskip.13cm (a) If $u(t_0)\leq u_*$, we have $u(t) \leq u_*$ for all $t\geq t_0$ because $h(u_*)\leq 0$ and $u'(t)\leq h(u(t))$. \vskip.13cm (b) If now $u(t_0)>u_*$, we set $t_1=\inf\{t>t_0 : u(t)\leq u_*\}$ and observe that for $t\in [t_0,t_1)$, $$u'(t) \leq h(u(t))\leq -\frac a2 [u(t)]^{1+\alpha}.$$ Integrating this inequality, we conclude that, for all $t\in [t_0,t_1)$, $$ u(t) \leq \Big[u^{-\alpha}(t_0)+ \frac{a \alpha (t-t_0)}2\Big]^{-1/\alpha} \leq \Big[\frac 2{a\alpha (t-t_0)} \Big]^{1/\alpha}. $$ This implies that $t_1$ is finite. Since now $u(t_1)=u_*$ by definition, we deduce from (a) that $u(t)\leq u_*$ for all $t\geq t_1$. \vskip.13cm Hence in any case, for any $t_0>0$, any $t>t_0$, $u(t)\leq \max\{u_*,[2/(a\alpha(t-t_0))]^{1/\alpha}\}$. Letting $t_0\to 0$, we deduce that $u(t)\leq \max\{u_*,[2/(a\alpha t)]^{1/\alpha}\}$ for all $t>0$, which completes the step. \vskip.13cm {\bf Step 4.} Using Step 2 and applying Step 3 with $a=p$, $b=p$, $c=Cp^2$, $\alpha=\gamma/(p-2)$ and $\beta=(2-\gamma)/(p-2)$, we find that for all $p\ge 4$, all $t>0$, $$ m_p(f_t)\leq \Big(\frac{2(p-2)}{p\gamma t} \Big)^{(p-2)/\gamma} + 4^{(p-2)/\gamma} \color{black} +\Big(4Cp \Big)^{(p-2)/2}. $$ Changing again the value of $C$, we conclude that for all $p\ge 4$, all $t>0$, $$ m_p(f_t)\leq \Big(1+\frac{2}{\gamma t} \Big)^{p/\gamma} +(Cp)^{p/2}. $$ {\bf Step 5.} For $a>0$ and $t>0$, we write, using that $m_0(f_t)=m_2(f_t)=1$, $$ \int_{\rr^3} e^{a|v|^2}f_t(\mathrm {d} v) = \sum_{k\geq 0} \frac{a^k m_{2k}(f_t)}{k!} =1+a + \sum_{k\geq 2} \frac{a^k m_{2k}(f_t)}{k!}. $$ By Step 4, $$ \int_{\rr^3} e^{a|v|^2}f_t(\mathrm {d} v) \leq 1+a+\sum_{k\geq 2} \frac 1{k!}\Big[a^k\Big(1+\frac{2}{\gamma t} \Big)^{2k/\gamma} + a^k(2Ck)^{k} \Big]. $$ But $\sum_{k\geq 2} (k!)^{-1}(x k)^k <\infty$ if $x<1/e$ by the Stirling formula. Hence if $a<1/(2Ce)$, $$ \int_{\rr^3} e^{a|v|^2}f_t(\mathrm {d} v) \leq 1+a+\varepsilonxp\Big[a\Big(1+\frac{2}{\gamma t}\Big)^{2/\gamma}\Big]+C. $$ The conclusion follows. \varepsilonnd{proof} We next prove some technical uniform integrability property. \begin{lem}\label{ui} Fix $\gamma \in (0,1]$ and $p>2$. Let $(f_t)_{t\ge 0}$ be a weak solution to \varepsilonqref{LE}, with initial moment $m_p(f_0)<\infty$. Then, for all $\varepsilonpsilon>0$, there exists $M<\infty$ such that $$ \limsup_{t\downarrow 0} \int_{\rr^3} (1+|v|^p) \indiq_{\{|v|>M\}}f_t(\mathrm {d} v)<\varepsilonpsilon.$$ \varepsilonnd{lem} \begin{proof} Let $\psi: \mathbb{R} \rightarrow [0,1]$ be a smooth function such that $\indiq_{\{r\leq 1\}} \leq \psi (r)\leq \indiq_{\{r \le 2\}}$. Now, for $M\ge 1$, define $\chi_M:{\rr^3} \rightarrow [0,1]$ by $\chi_M(v)=\psi(|v|/M)$; these functions are smooth, and satisfy \begin{equation*} |v||\nabla \chi_M(v)|\le C; \qquad |v|^2|\nabla^2 \chi_M(v)|\le C \varepsilonnd{equation*} for some constant $C$, independent of $M$. A rough computation using that $|b(x)|\leq C|x|^{1+\gamma}$ and $||a(x)||\leq C |x|^{2+\gamma}$ shows that the smooth functions $\varphi_M(v)=(1+|v|^p)\chi_M(v)$ satisfy \begin{align*} |{\mathcal L} \varphi_M(v,v_*)|\le& C[|b(v-v_*)| |\nabla\varphi_M(v)|+ ||a(v-v_*)|||\nabla^2\varphi_M(v)| ] \\ \leq & C [|v-v_*|^{1+\gamma} (1+|v|^{p-1})+ |v-v_*|^{2+\gamma} (1+|v|^{p-2})]\\ \leq& C (1+|v|^{p+\gamma}+|v_*|^{p+\gamma}) \varepsilonnd{align*} \color{black} for some $C$ which does not depend on $M$. It follows from \varepsilonqref{wf} \color{black} that, for all $M$, $$ \Big|\int_{\rr^3} \varphi_M(v)(f_t-f_0)(\mathrm {d} v)\Big|\le C\int_0^t m_{p+\gamma}(f_s)\mathrm {d} s.$$ Now, fix $\varepsilonpsilon>0$. Since $m_{p+\gamma}(f_s)$ is locally integrable by Theorem \ref{ddv}, there is $t_0>0$ such that for all $t\in [0,t_0]$ and all $M\ge 1$, \begin{equation}\label{ap1} \Big|\int_{\rr^3} \varphi_M(v)(f_t-f_0)(\mathrm {d} v)\Big|\le \frac\varepsilon3. \varepsilonnd{equation} We next fix $M\geq 1$ \color{black} such that \begin{equation}\label{ap2} \int_{\rr^3} (1+|v|^p)\indiq_{\{|v|\geq M/2\}}f_0(\mathrm {d} v)<\frac{\varepsilonpsilon}{3}. \varepsilonnd{equation} For \color{black} any $t \in [0,t_0]$, any $M'\ge M$, \begin{align*} \int_{\rr^3} (1&+|v|^p)\indiq_{\{M<|v|\le M'\}}f_t(\mathrm {d} v)\le \int_{\rr^3} (\varphi_{M'}-\varphi_{M/2})(v)f_t(\mathrm {d} v)\\ =& \int_{\rr^3} \varphi_{M'}(v)(f_t-f_0)(\mathrm {d} v)-\int_{\rr^3} \varphi_{M/2}(v)(f_t-f_0)(\mathrm {d} v) + \int_{\rr^3} (\varphi_{M'}-\varphi_{M/2})(v)f_0(\mathrm {d} v) \leq \varepsilon. \varepsilonnd{align*} For the two first terms, we used \varepsilonqref{ap1}, while for the last term, we used that $(\varphi_{M'}-\varphi_{M/2})(v)\leq (1+|v|^p)\indiq_{\{|v|\geq M/2\}}$ and \varepsilonqref{ap2}. Taking the limit $M'\rightarrow \infty$ now gives the result. \varepsilonnd{proof} \section{Tanaka-style Coupling of Landau Processes}\label{coupling} In the spirit of Tanaka \cite{t} for the Boltzmann equation, see Funaki \cite{f} and Gu\'erin \cite{g} for the Landau equation, we will use the following coupling between solutions. For $E={\rr^3}$ or ${\rr^3}\times{\rr^3}$, we denote by $C^2_p(E)$ the set of $C^2$ functions on $E$ of which the derivatives of order $0$ to $2$ have at most polynomial growth. \begin{prop}\label{coup} Fix $\gamma \in (0,1]$, consider two weak solutions $(f_t)_{t\geq 0}$ and $(\tilde f_t)_{t\geq 0}$ to \varepsilonqref{LE} such that $\int_{\rr^3} e^{a |v|^2}(f_0+\tilde f_0)(\mathrm {d} v)<\infty$ for some $a>0$, and fix $R_0 \in {\mathcal H}(f_0,\tilde f_0)$. There exists a family $(R_t)_{t\geq 0}$ of probability measures on ${\rr^3}\times{\rr^3}$ such that for all $t\geq 0$, $R_t \in {\mathcal H}(f_t,\tilde f_t)$ and for all $\psi \in C^2_p({\rr^3}\times{\rr^3})$, \begin{align}\label{wec} \int_{\rr^3}rd \psi(v,\tilde v) R_t(\mathrm {d} v,\mathrm {d} \tilde v)=& \int_{\rr^3}rd \psi(v,\tilde v) R_0(\mathrm {d} v,\mathrm {d} \tilde v)\\ &+ \int_0^t \int_{\rr^3}rd \int_{\rr^3}rd {\mathcal L}L\psi(v,v_*,\tilde v,\tilde vs) R_s(\mathrm {d} v_*,\mathrm {d} \tilde vs) R_s(\mathrm {d} v,\mathrm {d} \tilde v) \mathrm {d} s,\notag \varepsilonnd{align} where \begin{align*} {\mathcal L}L\psi(v,\tilde v,v_*,\tilde vs)=&\sum_{k=1}^3 [b_k(v-v_*) \partial_{v_k}\psi(v,\tilde v) + b_k(\tilde v-\tilde vs) \partial_{\tilde v_k} \psi(v,\tilde v) ] \\ &+\frac{1}{2}\sum_{k,\varepsilonll=1}^3 [a_{k\varepsilonll}(v-v_*)\partial^2_{v_kv_\varepsilonll}\psi(v,\tilde v)+ a_{k\varepsilonll}(\tilde v-\tilde vs)\partial^2_{\tilde v_k\tilde v_\varepsilonll}\psi(v,\tilde v)]\\ & +\sum_{j,k,\varepsilonll=1}^3 \sigma_{k j}(v-v_*)\sigma_{\varepsilonll j}(\tilde v-\tilde vs) \partial^2_{v_k \tilde v_\varepsilonll}\psi(v,\tilde v). \varepsilonnd{align*} \varepsilonnd{prop} \begin{rk} Let us make the following observations. \vskip.13cm (i) This is the key coupling of $f_t, \tilde f_t$ which we will use, for some well-chosen $R_0$, to obtain an upper bound of ${\mathcal T}_p(f_t, \tilde f_t)$ to prove Theorem \ref{main}. \vskip.13cm (ii) This equation has a natural probabilistic meaning: the equation governing $(R_t)_{t\geq 0}$ is the Kolmogorov equation for the solution $(V_t,\tilde V_t)_{t\geq 0}$ to the nonlinear stochastic differential equation \begin{equation*} \label{full sde}\begin{cases} V_t=V_0+\int_0^t \int_{\rr^3}rd b(V_s-v_*) R_s(\mathrm {d}v_*,\mathrm {d}\tilde vs)\mathrm {d} s+ \int_0^t \int_{\rr^3}rd \sigma(V_s-v_*)N(\mathrm {d}v_*, \mathrm {d}\tilde vs,\mathrm {d} s) ; \\ \tilde V_t=\tilde V_0+\int_0^t \int_{\rr^3}rd b(\tilde V_s-\tilde vs) R_s(\mathrm {d}v_*,\mathrm {d}\tilde vs)\mathrm {d} s + \int_0^t \int_{\rr^3}rd \sigma(\tilde V_s-\tilde vs)N(\mathrm {d}v_*, \mathrm {d}\tilde vs,\mathrm {d} s) ; \\ R_t={\rm Law}(V_t,\tilde V_t) \varepsilonnd{cases} \varepsilonnd{equation*} where $N=(N^1, N^2,N^3)$ is a $3D$-white noise on ${\rr^3}\times{\rr^3}\times[0,\infty)$ with covariance measure $R_s(\mathrm {d} v_*,\mathrm {d}\tilde vs)\mathrm {d} s$; see Walsh \cite{w}. We think of this nonlinear equation as describing the time evolution of the velocities $(V_t,\tilde V_t)_{t\geq0}$ of a `typical' pair of particles, with $V_t \sim f_t$ and $\tilde V_t \sim \tilde f_t$. \vskip.13cm (iii) Since $R_s \in {\mathcal H}(f_s,\tilde f_s)$, we have $\int_{\rr^3}rd b(V_s-v_*) R_s(\mathrm {d}v_*,\mathrm {d}\tilde vs)\mathrm {d} s=\int_{\rr^3} b(V_s-v_*) f_s(\mathrm {d}v_*)\mathrm {d} s$. Similarly, $\int_0^t \int_{\rr^3}rd \sigma(V_s-v_*)N(\mathrm {d}v_*, \mathrm {d}\tilde vs,\mathrm {d} s) =\int_0^t \int_{\rr^3} \sigma(V_s-v_*) W(\mathrm {d}v_*,\mathrm {d} s)$, for some $3D$-white noise on ${\rr^3}\times[0,\infty)$ of covariance measure $f_s(\mathrm {d} v_*) \mathrm {d} s$. Hence {\varepsilonm in law}, the first SDE (for $(V_t)_{t\geq 0}$) does not depend on $(\tilde f_t)_{t\geq 0}$. \vskip.13cm (iv) The specific form of this coupling is important, rather than coupling processes using the same Brownian motion. The main idea is that we want $V_t$ and $\tilde V_t$ to be as close as possible. Using the white noise in this way, we isolate the effect of a coupled pair $(v_*, \tilde vs)$, with $v_*$ as close as possible to $\tilde vs$, in the background against our process $(V_s, \tilde V_s)$. It is also important that the white-noise covariance measure is $R_t(\mathrm {d} v_*,\mathrm {d} \tilde vs)\mathrm {d} s$, with $R_t$ the law of $(V_t,\tilde V_t)$. Replacing $R_t$, in the covariance measure of the white noise, with any other coupling (e.g. the optimal coupling for ${\mathcal T}_p(f_t,\tilde f_t)$) \color{black} would not allow us to use some symmetry arguments. \vskip.13cm (v) We do not claim the uniqueness of solutions to \varepsilonqref{wec}; existence is sufficient for our needs. \varepsilonnd{rk} \begin{proof}[Proof of Proposition \ref{coup}] We sketch the proof, as the key points are standard for nonlinear diffusion equations and the Landau equation, see Gu\'erin \cite{g}. We fix $k\geq 1$ and define the truncated {\it two level} coefficients $B_k:{\rr^3}\times{\rr^3} \rightarrow {\rr^3}\times{\rr^3}$ and $\Sigma_k: {\rr^3}\times{\rr^3}\rightarrow {\mathcal M}_{6\times 3}({\mathbb{R}})$ by $$ B_k\begin{pmatrix} x \\ \tilde x\varepsilonnd{pmatrix}=\begin{pmatrix} b_k(x) \\ b_k(\tilde x)\varepsilonnd{pmatrix};\qquad \Sigma_k \begin{pmatrix} x \\ \tilde x\varepsilonnd{pmatrix}=\begin{pmatrix} \sigma_k(x) \\ \sigma_k(\tilde x)\varepsilonnd{pmatrix}, $$ where $b_k(x)=-2(|x|\land k)^{\gamma} x$ and $\sigma_k(x)=(|x|\land k)^{\gamma/2} |x| \Pi_{x^\perp}$. Proceeding as in \varepsilonqref{p2} and \varepsilonqref{p4}, one realises that $B_k$ and $\Sigma_k$ are globally Lipschitz continuous. \vskip.13cm Now, let $W=(W^1, W^2, W^3)$ be a white noise on $[0,\infty)\times (0,1)$ with covariance measure $\mathrm {d} s\mathrm {d}\alpha$. The usual arguments for nonlinear SDEs \cite{g} imply that there exists a process $X^k_t=(V^k_t, \tilde V^k_t)$ with initial distribution $X^k_0\sim R_0$, and a copy $Y^k_t$ defined on the probability space \color{black} $((0,1), \mathcal{B}(0,1),d\alpha)$, with ${\rm Law}(X^k_t)={\rm Law}(Y^k_t)$ and for all $t\geq 0$, \color{black} \begin{equation*} X^k_t=X^k_0+\int_0^t \int_{(0,1)} B_k(X^k_s-Y^k_s(\alpha))\hspace{0.1cm} \mathrm {d}\alpha \mathrm {d} s +\int_0^t \int_{(0,1)} \Sigma_k(X^k_s-Y^k_s(\alpha)) W(\mathrm {d} s,\mathrm {d} \alpha). \varepsilonnd{equation*} For $\psi \in C^2_p({\rr^3}\times{\rr^3})$, applying It\^o's formula and taking expectations, we find \begin{align*} \mathbb{E}[\psi(X^k_t)]=&\mathbb{E}[\psi(X^k_0)]+ \int_0^t \int_{(0,1)} \mathbb{E}[\nabla \psi(X^k_s)\cdot B_k(X^k_s-Y^k_s(\alpha))] \mathrm {d}\alpha \mathrm {d} s\\ & +\frac12 \sum_{i,j=1}^6 \int_0^t \int_{(0,1)} \mathbb{E}[\partial_{ij} \psi(X^k_s) [\Sigma_k(X^k_s-Y^k_s(\alpha)) \Sigma_k^*(X^k_s-Y^k_s(\alpha))]_{ij} ]\mathrm {d} \alpha \mathrm {d} s. \varepsilonnd{align*} Writing $R^k_t$ for the law of $X^k_t$ (and of $Y^k_t$), we thus get \begin{align*} \int_{{\rr^3}\times{\rr^3}}\!\! \psi(x) R_t^k(\mathrm {d} x)=&\int_{{\rr^3}\times{\rr^3}} \!\!\psi(x) R_0(\mathrm {d} x)+ \int_0^t \!\! \int_{\rr^3}rd\! \int_{\rr^3}rd \!\!\nabla \psi(x)\cdot B_k(x-x_*) R_s^k(\mathrm {d} x) R_s^k(\mathrm {d} x_*)\mathrm {d} s\\ &\!\!+\frac12 \sum_{i,j=1}^6 \int_0^t \!\! \int_{\rr^3}rd\!\int_{\rr^3}rd \!\!\partial_{ij} \psi(x) [\Sigma_k(x-x_*) \Sigma_k^*(x-x_*)]_{ij} ]R_s^k(\mathrm {d} x) R_s^k(\mathrm {d} x_*)\mathrm {d} s. \varepsilonnd{align*} This precisely rewrites as \begin{align}\label{eq: approximate equations} \int_{{\rr^3}\times{\rr^3}} \psi(v,\tilde v) R_t^k(\mathrm {d} v,\mathrm {d} \tilde v)=&\int_{{\rr^3}\times{\rr^3}} \psi(v,\tilde v) R_0(\mathrm {d} v,\mathrm {d} \tilde v)\\ &+ \int_0^t \int_{\rr^3}rd \int_{\rr^3}rd {\mathcal L}L_k \psi(v,\tilde v,v_*,\tilde vs) R_s^k(\mathrm {d} v,\mathrm {d} \tilde v) R_s^k(\mathrm {d} v_*,\mathrm {d} \tilde vs)\mathrm {d} s,\notag \varepsilonnd{align} where ${\mathcal L}L_k\psi$ is defined as ${\mathcal L}L\psi$, replacing everywhere $b$, $\sigma$ and $a=\sigma\sigma^*$ by $b_k$, $\sigma_k$ and $a_k=\sigma_k\sigma^*_k$. \vskip.13cm For $\psi(v,\tilde v)=\phi(v)+\phi(\tilde v)$, we have ${\mathcal L}L_k \psi(v,\tilde v,v_*,\tilde vs)={\mathcal L}_k\phi(v,v_*)+{\mathcal L}_k\phi(\tilde v,\tilde vs)$, where ${\mathcal L}_k \phi$ is defined as ${\mathcal L}\phi$, replacing $b$ and $a$ by $b_k$ and $a_k$. It is then straightforward to check that the approximate equation \varepsilonqref{eq: approximate equations} propagates \color{black} moments, uniformly in $k$, using arguments similar to those of \cite[Theorem 3]{dv1} or Step 1 of the proof of Proposition \ref{expo}. In particular, under our initial Gaussian moment assumption, all moments of $R^k_t$ are bounded, uniformly $k\geq1$, locally uniformly in $t\geq 0$. \vskip.13cm It is then very classical to let $k\to \infty$ in \varepsilonqref{eq: approximate equations}, using a compactness argument, and to deduce the existence of a family of probability measures $(R_t)_{t\geq 0}$ solving \varepsilonqref{wec} for all $\psi \in C^2_p({\rr^3}\times{\rr^3})$. See Section \ref{existence} for a similar procedure (with much less moment estimates). \color{black} \vskip.13cm Finally, we address the claim that $R_t$ is a coupling $R_t\in {\mathcal H}(f_t, \tilde f_t)$. Let us write $g_t, \tilde{g}_t$ for the two marginals of $R_t$. For any $\varphi\in C^2_b({\rr^3})$, we set $\psi(v,\tilde v)=\varphi(v)$ and observe that ${\mathcal L}L\psi(v,\tilde v,v_*,\tilde vs)={\mathcal L}\varphi(v,v_*)$, so that \varepsilonqref{wec} tells us that $$\int_{\rr^3} \varphi(v)g_t(\mathrm {d} v)=\int_{\rr^3} \varphi(v)f_0(\mathrm {d} v) +\int_0^t \int_{\rr^3} {\mathcal L}\varphi(v,v_*) g_s(\mathrm {d}v_*)g_s(\mathrm {d} v) \mathrm {d} s.$$ \color{black} In other words, $(g_t)_{t\geq 0}$ is a weak solution to \varepsilonqref{LE} which starts at $f_0$. Since $f_0$ is assumed to have a Gaussian moment, the uniqueness result Theorem \color{black} \ref{uniqueness} applies and so $(g_t)_{t\geq 0}=(f_t)_{t\geq 0}$ as desired. The argument that $(\tilde{g}_t)_{t\geq 0}=(\tilde f_t)_{t\geq 0}$ is identical. \varepsilonnd{proof} We now carefully apply the coupling operator to our cost functions. \begin{lem}\label{ito} Adopt the notation of Proposition \ref{coup} and fix $p\geq 2$ and $\varepsilon\in [0,1]$, and let $c_{p,\varepsilonpsilon}$ be the transport cost defined in \varepsilonqref{cpe}. For $v,v_*,\tilde v,\tilde vs \in {\rr^3}$, \begin{align*} {\mathcal L}L c_{p,\varepsilon}(v,v_*,\tilde v,\tilde vs) \leq& k_{p,\varepsilon}^{(1)}(v,v_*,\tilde v,\tilde vs)+k_{p,\varepsilon}^{(2)}(v,v_*,\tilde v,\tilde vs) +k_{p,\varepsilon}^{(2)}(\tilde v,\tilde vs,v,v_*)\\ &+k_{p,\varepsilon}^{(3)}(v,v_*,\tilde v,\tilde vs)+k_{p,\varepsilon}^{(3)}(\tilde v,\tilde vs,v,v_*), \varepsilonnd{align*} where, setting $x=v-v_*$ and $\tilde x=\tilde v-\tilde vs$, \begin{align*} k_{p,\varepsilon}^{(1)}(v,v_*,\tilde v,\tilde vs)=& (1+|v|^p+|\tilde v|^p)\varphi_{\varepsilon}'(|v-\tilde v|^2)\Big[2(v-\tilde v)\cdot(b(x)-b(\tilde x)) + ||\sigma(x)-\sigma(\tilde x)||^2 \Big],\\ k_{p,\varepsilon}^{(2)}(v,v_*,\tilde v,\tilde vs)=& \varphi_{\varepsilon}(|v-\tilde v|^2)\Big[p|v|^{p-2}v\cdot b(x)+\frac p2|v|^{p-2}||\sigma(x)||^2 +\frac{p(p-2)}2|v|^{p-4}|\sigma(x)v|^2\Big], \\ k_{p,\varepsilon}^{(3)}(v,v_*,\tilde v,\tilde vs)=&2p |v|^{p-2}\varphi_{\varepsilon}'(|v-\tilde v|^2) [\sigma(x)v]\cdot[(\sigma(x)-\sigma(\tilde x))(v-\tilde v)]. \varepsilonnd{align*} \varepsilonnd{lem} \begin{proof} Fix $p\geq 2$, $\varepsilon\ge 0$ and let $\psi(v,\tilde v)=c_{p,\varepsilon}(v,\tilde v)=(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon(|v-\tilde v|^2)$. We have $$ \partial_{v_k}\psi(v,\tilde v)=p|v|^{p-2}v_k\varphi_\varepsilon(|v-\tilde v|^2)+2(v_k-\tilde v_k)(1+|v|^p+|\tilde v|^p)\varphi'_\varepsilon(|v-\tilde v|^2)$$ and a symmetric expression for $ \partial_{\tilde v_k}\psi(v,\tilde v)$. Differentiating again, we find \begin{align*} \partial^2_{v_k v_\varepsilonll}\psi(v,\tilde v)=&p|v|^{p-2}\indiq_{\{k=\varepsilonll\}}\varphi_\varepsilon(|v-\tilde v|^2)+ p(p-2)|v|^{p-4} v_kv_\varepsilonll\varphi_\varepsilon(|v-\tilde v|^2) \\ &+ 2p|v|^{p-2}v_k(v_\varepsilonll-\tilde v_\varepsilonll)\varphi'_\varepsilon(|v-\tilde v|^2)+2\indiq_{\{k=\varepsilonll\}}(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon'(|v-\tilde v|^2) \\ & + 4(v_k-\tilde v_k)(v_\varepsilonll-\tilde v_\varepsilonll)(1+|v|^p+|\tilde v|^p)\varphi''_\varepsilon(|v-\tilde v|^2) \\ & +2p|v|^{p-2} (v_k-\tilde v_k)v_\varepsilonll \varphi_\varepsilon'(|v-\tilde v|^2) \varepsilonnd{align*} and a symmetric expression for $\partial^2_{\tilde v_k \tilde v_\varepsilonll}\psi(v,\tilde v)$. Concerning the cross terms, \begin{align*} \partial^2_{v_k \tilde v_\varepsilonll}\psi(v,\tilde v)=&2p|v|^{p-2} v_k(\tilde v_\varepsilonll-v_\varepsilonll)\varphi'_\varepsilon(|v-\tilde v|^2) +2p|\tilde v|^{p-2} (v_k-\tilde v_k)\tilde v_\varepsilonll \varphi'_\varepsilon(|v-\tilde v|^2)\\ &-4(v_k-\tilde v_k)(v_\varepsilonll-\tilde v_\varepsilonll)(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon''(|v-\tilde v|^2) \\ & -2\indiq_{\{k=\varepsilonll\}}(1+|v|^p+|\tilde v|^p)\varphi'_\varepsilon(|v-\tilde v|^2). \varepsilonnd{align*} Let us now examine the sums in the definition of ${\mathcal L}L \psi$ one by one. First, \begin{align*} &\sum_{k=1}^3 [b_k(v-v_*) \partial_{v_k}\psi(v,\tilde v) + b_k(\tilde v-\tilde vs) \partial_{\tilde v_k} \psi(v,\tilde v)] \\ =& p |v|^{p-2} v\cdot b(v-v_*)\varphi_\varepsilon(|v-\tilde v|^2) & (=A_1) \\ &+ p |\tilde v|^{p-2} \tilde v\cdot b(\tilde v-\tilde vs)\varphi_\varepsilon(|v-\tilde v|^2) & (=A_2) \\ & + 2(1+|v|^p+|\tilde v|^p)(v-\tilde v)\cdot (b(v-v_*)-b(\tilde v-\tilde vs))\varphi'_\varepsilon(|v-\tilde v|^2).& (=A_3) \varepsilonnd{align*} Next, using that for $x,y,z \in {\rr^3}$, $\mathrm{Tr} \; a(x)=||\sigma(x)||^2$ and $\sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(x)y_kz_\varepsilonll= [\sigma(x)y]\cdot[\sigma(x)z]$, \begin{align*} \frac{1}{2}\sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(v-v_*)&\partial^2_{v_kv_\varepsilonll}\psi(v,\tilde v) = \frac{p}{2}|v|^{p-2}\|\sigma(v-v_*)\|^2\varphi_\varepsilon(|v-\tilde v|^2) &(=B_1)\\ &+\frac{p(p-2)}{2}|v|^{p-4}|\sigma(v-v_*)v|^2\varphi_\varepsilon(|v-\tilde v|^2) &(=B_2)\\ &+2p |v|^{p-2}[\sigma(v-v_*)v]\cdot[\sigma(v-v_*)(v-\tilde v)] \varphi'_\varepsilon(|v-\tilde v|^2) &(=B_3) \\ &+ (1+|v|^p+|\tilde v|^p)\|\sigma(v-v_*)\|^2\varphi'_\varepsilon(|v-\tilde v|^2) &(=B_4) \\ & +2 (1+|v|^p+|\tilde v|^p)|\sigma(v-v_*)(v-\tilde v)|^2\varphi''_\varepsilon(|v-\tilde v|^2). &(=B_5) \varepsilonnd{align*} Similarly, \begin{align*} \frac{1}{2}\sum_{k,\varepsilonll=1}^3 a_{k\varepsilonll}(\tilde v-\tilde vs)&\partial^2_{\tilde v_k\tilde v_\varepsilonll}\psi(v,\tilde v) = \frac{p}{2}|\tilde v|^{p-2}\|\sigma(\tilde v-\tilde vs)\|^2\varphi_\varepsilon(|v-\tilde v|^2)&(=C_1)\\ &+\frac{p(p-2)}{2}|\tilde v|^{p-4}|\sigma(\tilde v-\tilde vs)\tilde v|^2\varphi_\varepsilon(|v-\tilde v|^2) &(=C_2)\\ &+2p|\tilde v|^{p-2}[\sigma(\tilde v-\tilde vs)\tilde v]\cdot[\sigma(\tilde v-\tilde vs)(\tilde v-v)]\varphi'_\varepsilon(|v-\tilde v|^2)&(=C_3) \\ &+ (1+|v|^p+|\tilde v|^p)\|\sigma(\tilde v-\tilde vs)\|^2\varphi'_\varepsilon(|v-\tilde v|^2)&(=C_4) \\ & +2 (1+|v|^p+|\tilde v|^p)|\sigma(\tilde v-\tilde vs)(\tilde v-v)|^2\varphi''_\varepsilon(|v-\tilde v|^2).&(=C_5) \varepsilonnd{align*} Finally, we look at the cross-terms: \begin{align*} &\sum_{j,k,\varepsilonll=1}^3 \sigma_{kj}(v-v_*)\sigma_{\varepsilonll j}(\tilde v-\tilde vs) \partial^2_{v_k\tilde v_\varepsilonll}\psi(v,\tilde v)&\\ =& -2p |v|^{p-2}[\sigma(v-v_*)v]\cdot[\sigma(\tilde v-\tilde vs)(v-\tilde v)]\varphi'_\varepsilon(|v-\tilde v|^2) &(=D_1)\\ & +2p |\tilde v|^{p-2}[\sigma(v- v_* )(v-\tilde v)]\cdot[\sigma(\tilde v-\tilde vs)\tilde v]\varphi'_\varepsilon(|v-\tilde v|^2) &(=D_2)\\ &-4(1+|v|^p+|\tilde v|^p) [\sigma(v-v_*)(v-\tilde v)]\cdot [\sigma(\tilde v-\tilde vs)(v-\tilde v)]\varphi''_\varepsilon(|v-\tilde v|^2)&(=D_3)\\ & -2(1+|v|^p+|\tilde v|^p) \langle \!\langle \sigma(v-v_*),\sigma(\tilde v-\tilde vs)\rangle\!\rangle \varphi'_\varepsilon(|v-\tilde v|^2). &(=D_4) \varepsilonnd{align*} Recalling the notation $x=v-v_*$ and $\tilde x=\tilde v-\tilde vs$, we find that \begin{align*} A_3+B_4+C_4+D_4=&k_{p,\varepsilon}^{(1)}(v,v_*,\tilde v,\tilde vs),\\ A_1+B_1+B_2=&k_{p,\varepsilon}^{(2)}(v,v_*,\tilde v,\tilde vs),\\ A_2+C_1+C_2=&k_{p,\varepsilon}^{(2)}(\tilde v,\tilde vs,v,v_*),\\ B_3+D_1=&k_{p,\varepsilon}^{(3)}(v,v_*,\tilde v,\tilde vs),\\ C_3+D_2=&k_{p,\varepsilon}^{(3)}(\tilde v,\tilde vs,v,v_*), \varepsilonnd{align*} and finally that \begin{align*} B_5+C_5+D_3=&2 (1+|v|^p+|\tilde v|^p)|(\sigma(x)-\sigma(\tilde x))(v-\tilde v)|^2\varphi''_\varepsilon(|v-\tilde v|^2)\leq 0 \varepsilonnd{align*} since $\varphi_\varepsilon''$ is nonpositive, see \varepsilonqref{ve}. \varepsilonnd{proof} We finally state the following central inequality. \begin{lem}\label{cent} There is a constant $C$, depending only on $p\geq 2$ and $\gamma \in (0,1]$, such that for all $\varepsilon \in (0,1]$, all $v,v_*,\tilde v,\tilde vs \in {\rr^3}$, \begin{align*} {\mathcal L}L c_{p,\varepsilon}(v,v_*,\tilde v,\tilde vs) \leq& [2 - p] c_{p+\gamma,\varepsilon}(v,\tilde v)\\ &+ C\sqrt\varepsilon (1+|v_*|^p+|\tilde vs|^p) c_{p+\gamma,\varepsilon}(v,\tilde v)\\ &+ C\sqrt\varepsilon (1+|v|^p+|\tilde v|^p) c_{p+\gamma,\varepsilon}(v_*,\tilde vs)\\ &+ \frac{C}{\sqrt \varepsilon} (1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v) \\ &+ \frac{C}{\sqrt \varepsilon} (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})c_{p,\varepsilon}(v_*,\tilde vs). \varepsilonnd{align*} \varepsilonnd{lem} Let us now highlight the main features of this bound, which motivate our strategy. The last two lines are amenable to a Gr\"onwall-type estimate, provided $\int_0^T m_{p+\gamma}(f_s+\tilde f_s)\mathrm {d} s <\infty$, but this is prevented by the appearance of $c_{p+\gamma, \varepsilon}$ in the earlier terms; in \cite{fgui}, analagous terms are handled using an exponential moment estimate. The key observation is that, by choosing $p>2$, the first line gives a negative multiple of this `bad' term, which can absorb the second and third lines if $\varepsilonpsilon>0$ is small enough (and if we know that $\sup_{[0,T]} m_{p}(f_s+\tilde f_s)<\infty$), allowing us to use a Gr\"onwall estimate. \vskip.13cm Let us mention that a rather direct computation, with $\varepsilon=0$, i.e. with the cost $c_{p,0}(v,\tilde v)=(1+|v|^p+|\tilde v|^p)|v-\tilde v|^2$, relying on the simple estimates \varepsilonqref{p2}, \varepsilonqref{p4} and \varepsilonqref{tr}, shows that \begin{align*} {\mathcal L}L c_{p,0}(v,v_*,\tilde v,\tilde vs) \leq& [32 - p] c_{p+\gamma,0}(v,\tilde v)\\ &+C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma}) c_{p,0}(v,\tilde v) + C(1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma}) c_{p,0}(v_*,\tilde vs). \varepsilonnd{align*} Choosing $p=32$, the first term is nonpositive, and this would lead to a stability result for the cost ${\mathcal T}_{32,0}$, for initial conditions in ${\mathcal P}_{34}({\rr^3})$, since ${\mathcal T}_{32,0}$ requires some moments of order $34$ to be well-defined. \color{black} \vskip.13cm The proof of Lemma \ref{cent} is much more complicated; we have to be very careful and to use many cancelations to replace $[32-p]$ by $[2-p]$. Moreover, we have to deal with $c_{p,\varepsilon}$ with $\varepsilon>0$ instead of $c_{p,0}$, because ${\mathcal T}_{p,0}$ requires moments of order $p+2$ to be well-defined. All this \color{black} is crucial to obtain a stability result in ${\mathcal P}_{p}({\rr^3})$, for any $p>2$. Since the proof is rather lengthy, it is deferred to Section \ref{proof of cent} for the ease of readability. \section{Stability}\label{proof of main} We now give the proof of our stability estimate. We first deal with the case when the initial data have a finite Gaussian moment, and then carefully relax this assumption. \begin{lem}\label{mainexp} Fix $\gamma\in (0,1]$ and let $(f_t)_{t\ge 0}$, $(\tilde f_t)_{t\ge 0}$ be weak solutions to \varepsilonqref{LE} with initial moments $ \int_{\rr^3} e^{a|v|^2}(f_0+\tilde f_0)(\mathrm {d} v)<\infty$ for some $a>0$. Then the stability estimate \varepsilonqref{eq: conclusion of main} holds true. \varepsilonnd{lem} \begin{proof} We fix $p>2$, consider $\varepsilon\in(0,1]$ to be chosen later and introduce $R_0\in{\mathcal H}(f_0,\tilde f_0)$ such that $$ {\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0)=\int_{\rr^3}rd c_{p,\varepsilon}(v,\tilde v) R_0(\mathrm {d} v,\mathrm {d} \tilde v). $$ Note that $R_0$ depends on $\varepsilon$, but this is not an issue. We then introduce $(R_t)_{t\geq 0}$ as in Proposition \ref{coup}, which is licit thanks to our initial Gaussian moment condition. We know that for each $t\geq 0$, $R_t \in {\mathcal H}(f_t,\tilde f_t)$, from which we conclude that \begin{equation}\label{ww} u_\varepsilon(t)=\int_{\rr^3}rd c_{p,\varepsilon}(v,\tilde v) R_t(\mathrm {d} v,\mathrm {d} \tilde v) \geq {\mathcal T}_{p,\varepsilon}(f_t,\tilde f_t). \varepsilonnd{equation} By Proposition \ref{coup}, and since $u_\varepsilon(0)={\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0)$, it holds that for all $t\geq 0$, $$ u_\varepsilon(t)= \color{black} {\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0) + \int_0^t \int_{\rr^3}rd\int_{\rr^3}rd {\mathcal L}L c_{p,\varepsilon}(v,v_*,\tilde v,\tilde vs) R_s(\mathrm {d} v_*,\mathrm {d} \tilde vs) R_s(\mathrm {d} v,\mathrm {d} \tilde v) \mathrm {d} s. $$ Using next Lemma \ref{cent} and a symmetry argument, we find that $$ u_\varepsilon(t)\leq {\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0) + \int_0^t (I_{1,\varepsilon}(s)+I_{2,\varepsilon}(s)+I_{3,\varepsilon}(s))\mathrm {d} s, $$ where, for some constant $C>0$ depending only on $p$ and $\gamma$, \begin{align*} I_{1,\varepsilon}(s)=& [2-p] \int_{\rr^3}rd c_{p+\gamma,\varepsilon}(v,\tilde v) R_s(\mathrm {d} v,\mathrm {d} \tilde v) ,\\ I_{2,\varepsilon}(s)=& C \sqrt \varepsilon \int_{\rr^3}rd\int_{\rr^3}rd (1+|v_*|^p+|\tilde vs|^p)c_{p+\gamma,\varepsilon}(v,\tilde v) R_s(\mathrm {d} v_*,\mathrm {d} \tilde vs) R_s(\mathrm {d} v,\mathrm {d} \tilde v),\\ I_{3,\varepsilon}(s)=& \frac{C}{\sqrt \varepsilon}\int_{\rr^3}rd\int_{\rr^3}rd (1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v) R_s(\mathrm {d} v_*,\mathrm {d} \tilde vs) R_s(\mathrm {d} v,\mathrm {d} \tilde v). \varepsilonnd{align*} Using that $R_s\in{\mathcal H}(f_s,\tilde f_s)$, we conclude that \begin{align*} I_{2,\varepsilon}(s)\leq & C \sqrt \varepsilon (1+m_p(f_s+\tilde f_s))\int_{\rr^3}rd c_{p+\gamma,\varepsilon}(v,\tilde v)R_s(\mathrm {d} v,\mathrm {d} \tilde v),\\ I_{3,\varepsilon}(s)\leq & \frac C {\sqrt \varepsilon} (1+m_{p+\gamma}(f_s+\tilde f_s)) u_\varepsilon(s). \varepsilonnd{align*} We now fix $t>0$ and work on $[0,t]$. Setting $m_{p,\infty}([0,t])= \sup_{s\in [0,t]} m_{p}(f_s+\tilde f_s)$ and choosing $$ \varepsilon= \Big[ \frac{p-2}{p-2+C(1+m_{p,\infty}([0,t]))}\Big]^2, $$ so that $\varepsilon \in (0,1]$ and $2-p+C \sqrt \varepsilon (1+m_p(f_s+\tilde f_s)) \leq 0$ for all $s\in [0,t]$, we conclude that $I_{1,\varepsilon}(s)+I_{2,\varepsilon}(s) \leq 0$ for all $s\in [0,t]$, whence $$ u_\varepsilon(r)\leq {\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0) + \frac C {\sqrt \varepsilon}\int_0^r (1+m_{p+\gamma}(f_s+\tilde f_s)) u_\varepsilon(s) \mathrm {d} s $$ for all $r \in [0,t]$. The Gr\"onwall \color{black} lemma then tells us that $$ {\mathcal T}_{p,\varepsilon}(f_t,\tilde f_t)\leq u_\varepsilon(t)\leq{\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0) \varepsilonxp \Big( \frac C {\sqrt \varepsilon}\int_0^t (1+m_{p+\gamma}(f_s+\tilde f_s))\mathrm {d} s \Big). $$ Using finally that ${\mathcal T}_p={\mathcal T}_{p,1}$ and that $c_{p,1} \leq c_{p,\varepsilon} \leq \varepsilon^{-1} c_{p,1}$, we deduce that $$ {\mathcal T}_p(f_t,\tilde f_t)\leq{\mathcal T}_{p,\varepsilon}(f_t,\tilde f_t) \quad \hbox{and}\quad {\mathcal T}_{p,\varepsilon}(f_0,\tilde f_0) \leq \frac 1\varepsilon {\mathcal T}_{p}(f_0,\tilde f_0). $$ We thus end with $$ {\mathcal T}_{p}(f_t,\tilde f_t)\leq \frac 1 \varepsilon {\mathcal T}_{p}(f_0,\tilde f_0) \varepsilonxp \Big( \frac C {\sqrt \varepsilon}\int_0^t (1+m_{p+\gamma}(f_s+\tilde f_s))\mathrm {d} s \Big). $$ Recalling our choice for $\varepsilon$ and allowing the value of $C$, still depending only on $p$ and $\gamma$, to change from line to line, we find that \begin{align*} {\mathcal T}_{p}(f_t,\tilde f_t)\leq& C(1+m_{p,\infty}([0,t]))^2 {\mathcal T}_{p}(f_0,\tilde f_0) \varepsilonxp \Big( C[1+m_{p,\infty}([0,t])]\int_0^t (1+m_{p+\gamma}(f_s+\tilde f_s))\mathrm {d} s \Big)\\ \leq&{\mathcal T}_{p}(f_0,\tilde f_0) \varepsilonxp \Big( C[1+m_{p,\infty}([0,t])]\Big[1+\int_0^t (1+m_{p+\gamma}(f_s+\tilde f_s))\mathrm {d} s\Big] \Big), \varepsilonnd{align*} which was our goal. \varepsilonnd{proof} In order to relax the initial Gaussian moment condition, we will use the following convergence. \begin{lem}\label{tp convergence} Fix $\gamma \in (0,1]$ and $p>2$. Let $(f_t)_{t\ge 0}$ be a weak solution to \varepsilonqref{LE}, with initial moment $m_p(f_0)<\infty$. Then ${\mathcal T}_p(f_t, f_0)\rightarrow 0$ as $t\rightarrow 0$. \varepsilonnd{lem} \begin{proof} First, thanks to the density of $C^2_b({\rr^3})$ in $C_b({\rr^3})$, we deduce from \varepsilonqref{wf} that $f_t\rightarrow f_0$ weakly. It classically follows that $\lim_{t\to 0}d(f_t,f_0)=0$, where $d$ is the following distance that classicaly metrises weak convergence on probability measures: \begin{equation*} d(f,g) =\inf\Big\{\int_{\rr^3}rd (1\land |v-w|) S(\mathrm {d} v,\mathrm {d} w): S\in {\mathcal H}(f, g) \Big\}. \varepsilonnd{equation*} Moreover, for each $t\geq 0$, there exists a coupling $S_t \in {\mathcal H}(f_t,f_0)$ attaing the minimum $d(f_t,f_0)= \int_{\rr^3}rd (1\land |v-w|) S_t(\mathrm {d} v,\mathrm {d} w)$. \color{black} Now, fix $\varepsilonpsilon>0$; by Lemma \ref{ui}, there exist $M<\infty$ and $t_0>0$ such that $$ \int_{\rr^3} (1+|v|^p)\indiq_{\{|v|>M\}} f_t(\mathrm {d} v)<\varepsilonpsilon \quad \hbox{for all $t\in [0,t_0]$}. $$ Since now $c_{p,1}(v,w) \leq (1+|v|^p+|w|^p)(|v-w|\land 1) \leq (1+|v|^p)(|v-w|\land 1)+ (1+|w|^p)(|v-w|\land 1)$ and since ${\mathcal T}_p={\mathcal T}_{p,1}$, we have \begin{align*} {\mathcal T}_p(f_t, f_0) \leq & \int_{\rr^3}rd c_{p,1}(v,w)S_t(\mathrm {d} v,\mathrm {d} w) \\ \leq & (1+M^p) d(f_t,f_0)+ \int_{\rr^3}rd (1+|v|^p)\indiq_{\{|v|>M\}}S_t(\mathrm {d} v,\mathrm {d} w)\\ &+(1+M^p) d(f_t,f_0)+ \int_{\rr^3}rd (1+|w|^p)\indiq_{\{|w|>M\}}S_t(\mathrm {d} v,\mathrm {d} w)\\ =& 2(1+M^p)d(f_t,f_0)+ \int_{\rr^3} (1+|v|^p)\indiq_{\{|v|>M\}} f_t(\mathrm {d} v)+\int_{\rr^3} (1+|w|^p)\indiq_{\{|w|>M\}} f_0(\mathrm {d} w), \varepsilonnd{align*} the last equality using that $S_t \in {\mathcal H}(f_t,f_0)$. We conclude that for all $t\in [0,t_0]$, $$ {\mathcal T}_p(f_t, f_0) \leq 2(1+M^p)d(f_t,f_0)+ 2\varepsilon, $$ whence $\limsup_{t\to 0} {\mathcal T}_p(f_t, f_0) \leq 2\varepsilon$ and we are done, as $\varepsilonpsilon>0$ was arbitrary. \varepsilonnd{proof} We are now ready to remove the additional assumptions and prove the full stability statement. \begin{proof}[Proof of Theorem \ref{main}] We fix $\gamma \in (0,1]$, $p>2\color{black}$ and we consider two weak solutions $(f_t)_{t\geq 0}$ and $(\tilde f_t)_{t\geq 0}$ to \varepsilonqref{LE} such that $m_p(f_0+\tilde f_0)<\infty$. \vskip.13cm Fix $t>0$ and let $0<s\le t$; thanks to Proposition \ref{expo}, we have $\int_{\rr^3} e^{a|v|^2}(f_s+\tilde f_s)(\mathrm {d} v)<\infty$ for some $a>0$. Lemma \ref{mainexp} therefore applies to $(f_u)_{u\ge s}, (\tilde f_u)_{u\ge s}$, so that, setting $m_{p,\infty}([s,t])=\sup_{r\in [s,t]}m_p(f_r+\tilde f_r)$, \begin{align}\label{conclustion st} {\mathcal T}_p(f_t, \tilde f_t)\le & {\mathcal T}_p(f_s,\tilde f_s)\varepsilonxp \Big( C[1+m_{p,\infty}([s,t])] \Big[1+\int_s^t (1+m_{p+\gamma}(f_u+\tilde f_u))\mathrm {d} u\Big] \Big)\\ \leq &{\mathcal T}_p(f_s,\tilde f_s)\varepsilonxp \Big( C[1+m_{p,\infty}([0,t])] \Big[1+\int_0^t (1+m_{p+\gamma}(f_u+\tilde f_u))\mathrm {d} u\Big] \Big). \notag \varepsilonnd{align} Recalling the relaxed triangle inequality \varepsilonqref{eq: rti2}, we have, for some constant $C$ depending only on $p$, $${\mathcal T}_p(f_s, \tilde f_s)\le C[{\mathcal T}_p(f_s, f_0)+{\mathcal T}_p(f_0, \tilde f_0)+{\mathcal T}_p(\tilde f_0, \tilde f_s)] $$ and as $s\rightarrow 0$, the first and third terms converge to $0$ by Lemma \ref{tp convergence}, so $$ \limsup_{s\rightarrow 0} {\mathcal T}_p(f_s, \tilde f_s)\le C{\mathcal T}_p(f_0, \tilde f_0).$$ We thus can take $s\downarrow 0$ in \varepsilonqref{conclustion st} to obtain the desired result. \varepsilonnd{proof} \section{Existence}\label{existence} \begin{proof}[Proof of Theorem \ref{mainexist}] Let us start from $f_0\in {\mathcal P}_2$. By the de La Vall\'ee Poussin theorem, there exists a $C^2$-function $h:[0,\infty)\rightarrow [0,\infty)$ such that $h'' \geq 0$, $h'(\infty)=\infty$ and \begin{equation} \label{eq: h integrable} \int_{\rr^3} h(|v|^2) f_0(\mathrm {d} v)<\infty . \varepsilonnd{equation} We can also impose that $h''\le 1$ and that $h'(0)=1$. \vskip.13cm {\bf Step 1.} We consider $n_0\geq 1$ such that for all $n\geq n_0$, $\alpha_n= \int_{\rr^3} \indiq_{\{|v|\le n\}}f_0(\mathrm {d} v) \geq 1/2$ and set, for $n\geq n_0$, $$ f^n_0(\mathrm {d} v)=\alpha_n^{-1}\indiq_{\{|v|\le n\}}f_0(\mathrm {d} v) \in {\mathcal P}({\rr^3}).$$ Since $f^n_0$ is compactly supported, it has all moments finite and there exists a weak solution $(f^n_t)_{t\geq 0}$ to \varepsilonqref{LE} starting at $f^n_0$ by Theorem \ref{ddv}. Of course, $f^n_0$ converges weakly to $f_0$ as $n\to \infty$. {\bf Step 2.} We now show that for all $T>0$, there is a finite constant $K_T$ such that for all $n\geq n_0$, \begin{equation} \label{eq: UI in existence proof} \sup_{t\in [0,T]} \int_{\rr^3} h(|v|^2)f^n_t (\mathrm {d} v) + \int_0^T \int_{\rr^3} |v|^{2+\gamma}h'(|v|^2)f^n_t (\mathrm {d} v) \mathrm {d} t \leq K_T.\varepsilonnd{equation} By Theorem \ref{ddv}, all polynomial moments of $f^n_t$ are bounded, uniformly in $t\geq 0$ (but not necessarily in $n$). We can therefore apply \varepsilonqref{wf} to the function $\varphi(v)=h(|v|^2)$: arguing as in \varepsilonqref{tto}, $$ \partial_k\varphi(v)=2v_kh'(|v|^2); \qquad \partial^2_{k\varepsilonll}\varphi(|v|^2) =2h'(|v|^2)\indiq_{\{k=\varepsilonll\}}+4v_kv_lh''(|v|^2)$$ and so, setting $x=v-v_*$ as usual, $$ {\mathcal L} \varphi(v,v_*)=h'(|v|^2)[2v\cdot b(x)+\|\sigma(x)\|^2]+2|\sigma(x)v|^2h''(|v|^2). $$ Recalling \varepsilonqref{tr} and that $0\leq h''\leq 1$, \color{black} the last term is bounded by $$ 2|\sigma(x)v|^2h''(|v|^2) \le C |x|^\gamma |v|^2|v_*|^2 \color{black} \le C(|v|^{2+\gamma}|v_*|^2+ |v|^2|v_*|^{2+\gamma}). $$ Meanwhile, since $b(x)=-2|x|^\gamma x$ and $||\sigma(x)||^2=2 |x|^{\gamma+2}$, the first term is \begin{align*} h'(|v|^2)&[2v\cdot b(x)+\|\sigma(x)\|^2] =2h'(|v|^2)[-|x|^\gamma|v|^2+|x|^\gamma|v_*|^2] \\ &\le -2h'(|v|^2)|v|^{2+\gamma} +2h'(|v|^2)|v_*|^\gamma|v|^2 +2h'(|v|^2)|v|^\gamma|v_*|^2 + 2h'(|v|^2)|v_*|^{2+\gamma}\\ &\le -h'(|v|^2)|v|^{2+\gamma} +C (1+|v|^2) |v_*|^{\gamma+2}. \varepsilonnd{align*} We used that $|x|^\gamma \geq |v|^\gamma -|v_*|^\gamma$, that $|x|^\gamma \leq |v|^\gamma -|v_*|^\gamma$ and, for the last inequality, that there is $C>0$ such that $|v_*|^\gamma|v|^2+|v|^\gamma|v_*|^2 \leq \frac12|v|^{2+\gamma}+C|v_*|^{2+\gamma}$ and that $h'(r)\leq 1+r$. All in all, $$ {\mathcal L} \varphi(v,v_*) \leq -h'(|v|^2)|v|^{2+\gamma} +C (1+|v|^2) |v_*|^{\gamma+2}+C(1+|v_*|^2) |v|^{\gamma+2}. $$ We thus find, by \varepsilonqref{wf}, recalling that $m_2(f^n_t)=m_2(f^n_0)$, that \begin{align*} &\int_{\rr^3} h(|v|^2)f^n_t(\mathrm {d} v) + \int_0^t \int_{\rr^3} h'(|v|^2)|v|^{2+\gamma} f^n_s(\mathrm {d} v) \mathrm {d} s\\ \leq&\int_{\rr^3} h(|v|^2)f^n_0(\mathrm {d} v) +2C(1+m_2(f_0^n))\int_0^t \int_{\rr^3} |v|^{2+\gamma} f^n_s(\mathrm {d} v) \mathrm {d} s\\ \leq & 2\int_{\rr^3} h(|v|^2)f_0(\mathrm {d} v) +2C(1+2m_2(f_0))\int_0^t \int_{\rr^3} |v|^{2+\gamma} f^n_s(\mathrm {d} v) \mathrm {d} s, \varepsilonnd{align*} since $f^n_0 \leq 2f_0$. But since $h'(\infty)=\infty$, there is a constant $\kappa$ (depending on $m_2(f_0)$) such that $2C(1+2m_2(f_0)) |v|^{2+\gamma} \leq \frac 12 h'(|v|^2)|v|^{2+\gamma} + \kappa$ for all $v\in {\rr^3}$. We finally get $$ \int_{\rr^3} h(|v|^2)f^n_t(\mathrm {d} v)+ \frac12 \int_0^t \int_{\rr^3} h'(|v|^2)|v|^{2+\gamma} f^n_s(\mathrm {d} v) \mathrm {d} s \leq 2\int_{\rr^3} h(|v|^2)f_0(\mathrm {d} v) + \kappa t, $$ and this completes the step. \vskip.13cm {\bf Step 3.} Here we show that the family $((f^n_t)_{t\geq 0})_{n\geq n_0}$ is relatively compact in $C([0,\infty),{\mathcal P}({\rr^3}))$, where ${\mathcal P}({\rr^3})$ is endowed with the usual weak convergence. This last convergence can be metrised by the distance on ${\mathcal P}({\rr^3})$: $$ \delta(f,g)=\sup_{\varphi \in C^2_{b,1}}\Big|\int_{\rr^3} \varphi(v)(f-g)(\mathrm {d} v)\Big|, $$ where $C^2_{b,1}$ is the set of $C^2$ functions on ${\rr^3}$ such that $||\varphi||_\infty+||\nabla \varphi||_\infty +||\nabla^2 \varphi||_\infty \leq 1$. By the Arzel\`a-Ascoli theorem, it suffices to check that \vskip.13cm \noindent (a) for all $t\geq 0$, the family $(f^n_t)_{n\geq n_0}$ is relatively compact in ${\mathcal P}({\rr^3})$ and \vskip.13cm \noindent (b) for all $T>0$, $\lim_{\varepsilon\to 0} \sup_{n\geq n_0} \sup_{s,t \in [0,T], |t-s|\leq \varepsilon} \delta(f^n_t,f^n_s) = 0$. \vskip.13cm Point (a) is obvious, since for all $t\geq 0$, all $n\geq n_0$, $m_2(f^n_t)\leq 2 m_2(f_0)$ and since the set $\{f \in {\mathcal P}({\rr^3}) : m_2(f) \leq a\}$ is compact for any $a>0$. Concerning point (b), we recall that there is a constant $C$ such that for all $\varphi \in C^2_{b,1}$, $|{\mathcal L}\varphi(v,v_*)|\leq C(1+|v|^{\gamma+2}+|v_*|^{\gamma+2})$. We thus deduce from \varepsilonqref{wf} that for all $t\geq s \geq 0$, all $n\geq n_0$, $$ \delta(f^n_t,f^n_s) \leq C \int_s^t \int_{\rr^3}rd (1+|v|^{\gamma+2}+|v_*|^{\gamma+2}) f^n_s(\mathrm {d} v_*)f^n_s(\mathrm {d} v)\mathrm {d} s \leq 2C \int_s^t \int_{\rr^3} (1+|v|^{\gamma+2})f^n_s(\mathrm {d} v)\mathrm {d} s. $$ Now for $0\leq s \leq t \leq T$ with $t-s\leq \varepsilon$, for any $n\geq n_0$, any $A>0$, separating the cases $|v|\leq A$ and $|v|\geq A$, \begin{align*} \delta(f^n_t,f^n_s) \leq& 2C (1+A^{\gamma+2})(t-s) + \frac{2C}{h'(A^2)} \int_s^t \int_{\rr^3} (1+|v|^{\gamma+2})h'(|v|^2) f^n_s(\mathrm {d} v)\mathrm {d} s \\ \leq& 2C (1+A^{\gamma+2}) \varepsilon + \frac{2CK_T}{h'(A^2)} \varepsilonnd{align*} because $h'$ is nondecreasing and with $K_T$ introduced in Step 2. \color{black} Now for $\varepsilonta>0$ fixed, we choose $A_\varepsilonta>0$ large enough so that $\frac{2CK_T}{h'(A_\varepsilonta^2)} \leq \frac \varepsilonta 2$ and conclude that, as soon as $\varepsilon \leq \frac{\varepsilonta}{4C (1+A_\varepsilonta^{\gamma+2})}$, \color{black} we have $\delta(f^n_t,f^n_s) \leq \varepsilonta$ for all $n\geq n_0$ and all $s,t \in [0,T]$ such that $|t-s|\leq \varepsilon$. \vskip.13cm {\bf Step 4.} By Step 3, we can find a (not relabelled) subsequence such that $(f^n_t)_{t\geq 0}$ converges to a limit $(f_t)_{t\geq 0}$ in $C([0,\infty),{\mathcal P}({\rr^3}))$; this also implies that $(f^n_t\otimes f^n_t)_{t\geq 0}$ tends to $(f_t\otimes f_t)_{t\geq 0}$. Hence for all $T>0$, all $\psi \in C^2_b({\rr^3})$ and all $\Psi \in C^2_b({\rr^3}\times{\rr^3})$, \begin{equation}\label{ttty} \sup_{[0,T]} \Big[ \Big|\int_{\rr^3} \psi(v) (f^n_t(\mathrm {d} v)-f_t(\mathrm {d} v))\Big|+ \Big|\int_{\rr^3}rd \Psi(v,v_*) (f^n_t(\mathrm {d} v)f^n_t(\mathrm {d} v_*)- f_t(\mathrm {d} v)f_t(\mathrm {d} v_*))\Big| \to 0 \varepsilonnd{equation} as $n\to \infty$. It remains to check that this limit is indeed a weak solution to \varepsilonqref{LE} starting from $f_0$. \vskip.13cm First, using the uniform integrability property \varepsilonqref{eq: UI in existence proof}, $$ \sup_{n\geq n_0}\sup_{t\in [0,T]} \int_{\rr^3} h(|v|^2)f^n_t (\mathrm {d} v) <\infty $$ and recalling that $\lim_{r\to \infty} r^{-1}h(r) = \infty$, one easily check that for all $t\geq 0$, $m_2(f_t)=\lim_n m_2(f^n_t)$. Since now $m_2(f^n_t)=m_2(f^n_0)\to m_2(f_0)$, we deduce that $(f_t)_{t\geq 0}$ is energy-conserving as desired. \vskip.13cm Next, we fix $\varphi \in C^2_b({\rr^3})$ and recall that ${\mathcal L} \varphi$ is continuous on ${\rr^3}\times{\rr^3}$ and satisfies the growth bound $|{\mathcal L}\varphi(v,v_*)| \leq C(1+|v|^{2+\gamma}+|v_*|^{2+\gamma})$. We can then let $n\to \infty$ in the formula $$ \int_{\rr^3} \varphi(v)f_t^n(\mathrm {d} v) = \int_{\rr^3} \varphi(v)f_0^n(\mathrm {d} v) + \int_0^t \int_{\rr^3} \int_{\rr^3} {\mathcal L}\varphi(v,v_*) f_s^n(\mathrm {d} v_*)f_s^n(\mathrm {d} v) \mathrm {d} s, $$ and conclude that \varepsilonqref{wf} is satisfied, using \varepsilonqref{ttty} and the uniform integrability given by \varepsilonqref{eq: UI in existence proof} (recall that $\lim_{r\to \infty} h'(r)=\infty$), i.e. $$ \sup_{n\geq n_0}\int_0^t \int_{\rr^3} |v|^{2+\gamma}h'(|v|^2)f^n_s (\mathrm {d} v) \mathrm {d} s <\infty. $$ \color{black} The proof is complete. \varepsilonnd{proof} \section{Regularity}\label{pf of regularity} We now prove our regularity result Theorem \ref{mainregularity}. We begin with the following very mild regularity principle, which guarantees that the hypotheses of Theorem \ref{ddv}-(c) apply at some small time, provided that $f_0$ has $4$ moments. We then `bootstrap' to the claimed result, using Theorems \ref{ddv} and \ref{analytic regularity} \color{black} and our uniqueness result. \begin{lem}\label{weak regularity} Let $\gamma \in (0,1]$ and \color{black} $f_0\in {\mathcal P}_{4}({\rr^3})$ be a measure which is not a Dirac mass, and let $(f_t)_{t\ge 0}$ be the weak solution to \varepsilonqref{LE} starting at $f_0$. Then, for any $t_0>0$, there exists $t_1\in [0,t_0)$ such that $f_{t_1}$ is not concentrated on a line. \varepsilonnd{lem} \begin{proof} If $f_0$ is already not concentrated on a line, there is nothing to prove. We thus assume that $f_0$ concentrates on a line and, by translational and rotational invariance, that $f_0$ concentrates on the $z$-axis $L_0=\{(0,0,z) : z\in {\mathbb{R}}\}$. \color{black} Further, since $f_0$ is not a point mass, we can find two disjoint compact intervals $K_1,K_2 \subset L_0$ such that $f_0(K_1)>0$ and $f_0(K_2)>0$. \vskip.13cm {\bf Step 1.} We introduce the following averaged coefficients: for $v\in {\rr^3}$ and $f\in {\mathcal P}_2({\rr^3})$, define $$ b(v, f)=\int_{\rr^3} b(v-v_*)f(\mathrm {d} v_*), \qquad a(v,f)=\int_{\rr^3} a(v-v_*)f(\mathrm {d} v_*)$$ and let $\sigma(v,f)$ be a square root of $a(v,f)$. Now, let $(B_t)_{t\geq 0}$ be a 3-dimensional Brownian motion, and $V_0$ an independent random variable in ${\rr^3}$. From \cite[Proposition 10]{fgui}, the It\^o stochastic differential equation \begin{equation}\label{eq: SDE} V_t=V_0+\int_0^t b(V_s, f_s)\mathrm {d} s+\int_0^t \sigma(V_s, f_s)\mathrm {d} B_s \varepsilonnd{equation} has a pathwise unique solution and, if $V_0$ is $f_0$-distributed, then $V_t\sim f_t$ for all $t\ge 0$. We will denote by $\mathbb{P}_{v_0}$, $\mathbb{E}_{v_0}$ the probability and expectation concerning the process started from the deterministic initial condition $V_0=v_0$. We thus have $f_t(A)=\int_{\rr^3} \mathbb{P}_{v_0}(V_t \in A) f_0(\mathrm {d} v)$ for any $A \in {\mathcal B}({\rr^3})$, any $t\geq 0$. \color{black} \vskip.13cm {\bf Step 2.} We now claim that if $F:{\rr^3} \rightarrow \mathbb{R}$ is bounded and continuous and $Z\sim \mathcal{N}(0, I_3)$, then \begin{equation} \label{eq: uniform convergence in law} \lim_{\varepsilon\to 0}\sup_{v_0\in K_1}\Big|\mathbb{E}_{v_0}\Big[F\Big(\frac{V_\varepsilon-v_0}{\sqrt{\varepsilon}}\Big)\Big] -\mathbb{E} \Big[F\Big(\sigma(v_0, f_0)Z\Big)\Big]\Big|=0. \varepsilonnd{equation} Let $U$ be an open ball containing $K_1$, and for $v\in {\rr^3}$, let $\pi(v)$ be the unique minimiser of $|v-\tilde v|$ over $\tilde v\in \overline{U}$. Recalling the growth bounds $$|b(v-v_*)|\le C|v-v_*|^{1+\gamma}, \qquad \|a(v-v_*)\|\le C|v-v_*|^{2+\gamma}, $$ that $\sup_{t\geq 0} m_4(f_t)<\infty$ by Theorem \ref{ddv}-(a), one checks that $|b(v,f_s)|+||\sigma(f_s,v)|| \leq C(1+|v|^{1+\gamma})$ and, since $f_t\to f_0$ weakly as $t\to 0$, that $a(v, f_t)\to a(v, f_0)$, and thus $\sigma(v,f_t)\to \sigma(v,f_0)$, uniformly over $v\in \overline{U}$, as $t\to 0$. We now define $$ b_t(v)=b(\pi(v), f_t); \qquad \sigma_t(v)=\sigma(\pi(v), f_t)$$ so that $b_t(v)$ and $\sigma_t(v)$ are bounded, globally Lipschitz in $v$, agree with $b(v, f_t), \sigma(v,f_t)$ for $v \in \overline{U}$ and $\sigma_t(v)$ converges uniformly on ${\rr^3}$ as $t\downarrow 0$. Now, let $\tilde V_t$ be the solution to the stochastic differential equation \varepsilonqref{eq: SDE} with these coefficients in place of $b(v, f_t)$ and $\sigma(v,f_t)$, and let $T$ be the stopping time when $\tilde V_t$ first leaves $U$. By uniqueness, we have $V_t=\tilde V_t$ for all $t\in [0,T]$. Using now that $b_t$ and $\sigma_t$ are bounded, that $\sigma_t \to \sigma_0$ uniformly and that $\tilde V_t \to v_0$ as $t\to 0$, \color{black} we see that \begin{align}\label{klm} &\limsup_{\varepsilon \to 0} \sup_{v_0 \in K_1} \mathbb{E}_{v_0}\Big[ \Big| \frac{\tilde V_\varepsilon - v_0}{\sqrt \varepsilon} - \sigma_0(v_0) \frac{B_\varepsilon}{\sqrt \varepsilon}\Big|^2 \Big]\\ \leq& \limsup_{\varepsilon \to 0} \sup_{v_0 \in K_1} \frac 1\varepsilon \mathbb{E}_{v_0}\Big[2\Big(\int_0^\varepsilon b_s(\tilde V_s)\mathrm {d} s\Big)^2 +2\Big(\int_0^\varepsilon (\sigma_s(\tilde V_s)-\sigma_0(v_0)) \mathrm {d} B_s\Big)^2\Big]=0. \notag \varepsilonnd{align} Recalling that $\sigma_0(v_0)=\sigma(v_0,f_0)$ when $v_0 \in K_1$ \color{black} and that $\frac{B_\varepsilon}{\sqrt \varepsilon}\sim \mathcal{N}(0, I_3)$, we conclude that \begin{align*} &\sup_{v_0\in K_1}\Big|\mathbb{E}_{v_0}\Big[F\Big(\frac{V_\varepsilon-v_0}{\sqrt{\varepsilon}}\Big)\Big] -\mathbb{E}_{v_0}[F(\sigma(v_0,f_0) Z)]\Big|\\ \le& \sup_{v_0\in K_1}\Big|\mathbb{E}_{v_0}\Big[F\Big(\frac{\tilde V_\varepsilon-v_0}{\sqrt{\varepsilon}}\Big)\Big] -\mathbb{E}_{v_0}\Big[F\Big(\sigma_0 (v_0)\frac{B_\varepsilon}{\sqrt \varepsilon}\Big)\Big]\Big| + 2\|F\|_\infty \hspace{0.1cm} \sup_{v_0\in K_1} \mathbb{P}(T<\varepsilon)\rightarrow 0 \varepsilonnd{align*} where the final convergence follows \varepsilonqref{klm} and the fact that $\sup_{v_0\in K_1} \mathbb{P}(T<\varepsilon)\to 0$ because $d(K_1, U^\mathrm{c})=\inf\{|v-\tilde v|:v\in K_1, \tilde v\not \in U\}>0$ and because $b_t$ and $\sigma_t$ are bounded. The proof of the claim is complete. \vskip.13cm {\bf Step 3.} We now construct three test functions $F_i$ to which apply Step 2: let $B_i\subset \mathbb{R}^2, i=1,2,3$ be disjoint open balls in the plane such that no line (in the plane) meets all three, and let $\chi_i:\mathbb{R}^2\rightarrow [0,1]$ be nonzero, smooth bump functions, supported on each $B_i$. Now, we define $\rho:{\rr^3}\to{\mathbb{R}}^2$ the projection $\rho(v_1,v_2,v_3)=(v_1,v_2)$. We then introduce the bounded smooth functions $F_i:{\rr^3}\rightarrow [0,1]$ defined \color{black} by $F_i(v)=\chi_i(\rho(v))$. Observe that $F_i(v)\leq \indiq_{\{\rho(v)\in B_i\}}$. \vskip.13cm Since $f_0$ concentrates on the $z$-axis $L_0$, denoting by $e_3=(0,0,1)$, we have, for all $v_0 \in L_0$, $$ a(v_0,f_0)=\int_{\rr^3} |v_0-v|^{\gamma+2}\Pi_{(v-v_0)^\perp}f_0(\mathrm {d} v) = h(v_0) \Pi_{e_3^\perp} = h(v_0)\begin{pmatrix} 1 & 0 & 0 \\ 0 & 1 & 0 \\ 0 &0&0 \varepsilonnd{pmatrix} , $$ where $h(v_0)=\int_{\rr^3} |v_0-v|^{\gamma+2}f_0(\mathrm {d} v)$. One easily checks that $h$ is bounded from above and from below on $K_1$, since $\sup_{v_0 \in K_1} h(v_0) \leq C(1+m_{2+\gamma}(f_0))$ and $\inf_{v_0 \in K_1} h(v_0) \geq \alpha^{\gamma+2}f_0(K_2)$, where $\alpha>0$ is the distance between $K_1$ and $K_2$. Since $\sigma(v_0,f_0)=[a(v_0,f_0)]^{1/2}$ and since $\rho(Z)\sim \mathcal{N}(0, I_2)$, we deduce that for some $\delta>0$ and all $i=1,2,3$, $$ \inf_{v_0\in K_1} \mathbb{E}_{v_0} [F_i(\sigma(v_0, f_0)Z)]= \inf_{v_0\in K_1} \mathbb{E} [\chi_i(h^{1/2}(v_0)\rho(Z))] \ge 2\delta>0.$$ Thanks to \varepsilonqref{eq: uniform convergence in law}, we can find $\varepsilon_0>0$ such that for all $\varepsilon\in (0,\varepsilon_0)$, all $i=1,2,3$, $$ \inf_{v_0\in K_1} \mathbb{E}_{v_0}\Big[F_i\Big(\frac{V_{\varepsilon}-v_0}{\sqrt{\varepsilon}}\Big)\Big]\ge \delta \quad \hbox{whence}\quad \inf_{v_0\in K_1}\mathbb{P}_{v_0}\Big(\rho\Big(\frac{V_{\varepsilon}-v_0}{\sqrt{\varepsilon}}\Big)\in B_i \Big)\ge \delta. $$ {\bf Step 4.} Now, we fix $t_0>0$ as in the statement, and consider $t_1 \in (0,\varepsilon_0\land t_0)$. For a given line $L=\{x_0+\lambda u_0 : \lambda \in {\mathbb{R}}\}\subset{\rr^3}$ and for $v_0\in K_1$, we denote by $L_{t_1,v_0}=\rho((L-v_0)/\sqrt{t_1})$, which is a line (or a point) in ${\mathbb{R}}^2$. There is $i\in \{1,2,3\}$, possibly depending on $t_1$ and on $v_0$, such that $L_{t_1,v_0} \cap B_i=\varepsilonmptyset$, so that \begin{align*} \mathbb{P}_{v_0}(V_{t_1} \in L)= &\mathbb{P}_{v_0}\Big( \frac{V_{t_1}-v_0}{\sqrt{{t_1}}} \in \frac{L-v_0}{\sqrt{{t_1}}}\Big)\\ \leq & \mathbb{P}_{v_0}\Big( \rho\Big(\frac{V_{t_1}-v_0}{\sqrt{{t_1}}}\Big) \in L_{{t_1},v_0}\Big)\\ \leq& 1 - \mathbb{P}_{v_0}\Big( \rho\Big(\frac{V_{t_1}-v_0}{\sqrt{{t_1}}}\Big) \in B_i\Big)\\ \leq &1-\delta \varepsilonnd{align*} by Step 3. \color{black} In other words, for all $v_0 \in K_1$, $\mathbb{P}_{v_0}(V_{t_1} \in {\rr^3}\setminus L) \geq \delta$, whence $$ f_{t_1}({\rr^3}\setminus L)=\int_{\rr^3} \mathbb{P}_{v_0}(V_{t_1}\not \in L)f_0(\mathrm {d} v_0) \ge \delta f_0(K_1)>0.$$ The proof is complete. \varepsilonnd{proof} We now prove our claimed result. \begin{proof}[Proof of Theorem \ref{mainregularity}] Let $f_0\in {\mathcal P}_2({\rr^3})$ \color{black} not be a point mass, and let $(f_t)_{t\ge 0}$ be any weak solution to \varepsilonqref{LE} starting at $f_0$. Fix $t_0>0$. By Theorem \ref{ddv}-(a), picking $t_1\in (0, t_0)$ arbitrarily, we have $m_{4}(f_{t_1})<\infty$ and, due to conservation of energy and momentum, $f_{t_1}$ is not a point mass. We can therefore apply Lemma \ref{weak regularity} to find $t_2\in [t_1, t_0)$ such that $f_{t_2}$ is not concentrated on a line, and we also have $m_{4}(f_{t_2})<\infty$, still by Theorem \ref{ddv}-(a), because $t_2>0$. \vskip.13cm Now, by Theorem \ref{ddv}-(c), there exists \varepsilonmph{a} solution $(g_t)_{t\ge 0}$ to \varepsilonqref{LE} starting at $g_0=f_{t_2}$ such that, for all $s, k\ge 0$ and $\delta>0$, $$\sup_{t\ge \delta}\|g_t\|_{H^k_s({\rr^3})}<\infty$$ and such that $H(g_t)<\infty$ for all $t>0$; by Theorem \ref{analytic regularity}, $g_t$ is further analytic for all $t>0$. \vskip.13cm By uniqueness, see Theorem \ref{main} and recall that $m_{4}(f_{t_2})<\infty$, there is a unique weak solution to \varepsilonqref{LE} starting at $g_0=f_{t_2}$, whence $g_t=f_{t_2+t}$ for all $t\ge 0$. In particular, $f_{t_0}=g_{t_0-t_2}$ is analytic and has finite entropy and (choosing $\delta=t_0-t_2$), for all $s, k\ge 0$, $\sup_{t\ge t_0}\|f_t\|_{H^k_s({\rr^3})}<\infty$. \varepsilonnd{proof} \section{Proof of the central inequality} \label{proof of cent} We finally handle the \color{black} \begin{proof}[Proof of Lemma \ref{cent}] We introduce the shortened notation $x=v-v_*$, $\tilde x=\tilde v-\tilde vs$ and recall that \begin{align}\label{i0} {\mathcal L}L c_{p,\varepsilon}(v,v_*,\tilde v,\tilde vs) \leq k_{p,\varepsilon}^{(1)}+k_{p,\varepsilon}^{(2)}+ \tilde k_{p,\varepsilon}^{(2)}+k_{p,\varepsilon}^{(3)}+\tilde k_{p,\varepsilon}^{(3)}, \varepsilonnd{align} where $k_{p,\varepsilon}^{(1)}=k_{p,\varepsilon}^{(1)}(v,v_*,\tilde v,\tilde vs)$, $k_{p,\varepsilon}^{(2)}=k_{p,\varepsilon}^{(2)}(v,v_*,\tilde v,\tilde vs)$, $\tilde k_{p,\varepsilon}^{(2)}=k_{p,\varepsilon}^{(2)}(\tilde v,\tilde vs,v,v_*)$, etc. In the whole proof, $C$ is allowed to change from line to line and to depend (only) on $p$ and $\gamma$. \vskip.13cm {\bf Step 1.} Here we show, and this is the most tedious estimate, that \begin{align}\label{i1} k_{p,\varepsilon}^{(1)}\leq& 2c_{p+\gamma,\varepsilon}(v,\tilde v)\\ &+C\sqrt\varepsilon (1+|v_*|^p+|\tilde vs|^p)c_{p+\gamma,\varepsilon}(v,\tilde v) \notag\\ &+ C\sqrt\varepsilon (1+|v|^p+|\tilde v|^p) c_{p+\gamma,\varepsilon}(v_*,\tilde vs)\notag\\ &+ \frac{C}{\sqrt \varepsilon} (1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v) \notag\\ &+ \frac{C}{\sqrt \varepsilon} (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})c_{p,\varepsilon}(v_*,\tilde vs).\notag \varepsilonnd{align} We start from $$ k_{p,\varepsilon}^{(1)}=(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon'(|v-\tilde v|^2)[g_1+g_2+g_3], $$ where \begin{align*} g_1=&[(v-\tilde v)-(v_*-\tilde vs)]\cdot(b(x)-b(\tilde x)) + ||\sigma(x)-\sigma(\tilde x)||^2,\\ g_2=&(v-\tilde v)\cdot(b(x)-b(\tilde x)) ,\\ g_3=&(v_*-\tilde vs)\cdot (b(x)-b(\tilde x)). \varepsilonnd{align*} {\it Step 1.1.} Recalling that $b(x)=-2|x|^\gamma x$ and using \varepsilonqref{p3}, we find \begin{align*} g_1 \leq& 2(x-\tilde x)\cdot[-|x|^\gamma x+|\tilde x|^\gamma\tilde x] + 2|x|^{\gamma+2}+2|\tilde x|^{\gamma+2}-4|x|^{\gamma/2}|\tilde x|^{\gamma/2}(x\cdot \tilde x) \\ =& 2(|x|^\gamma+|\tilde x|^\gamma ) (x\cdot\tilde x) -4 |x|^{\gamma/2}|\tilde x|^{\gamma/2}(x\cdot \tilde x) \\ =& 2 (x\cdot\tilde x) (|x|^{\gamma/2}-|\tilde x|^{\gamma/2})^2. \varepsilonnd{align*} Using now \varepsilonqref{ttaacc} with $\alpha=\gamma/2$, $$ g_1 \leq 2 |x||\tilde x|(|x|\lor|\tilde x|)^{\gamma-2}(|x|-|\tilde x|)^2= 2 (|x|\land|\tilde x|)(|x|\lor|\tilde x|)^{\gamma-1}(|x|-|\tilde x|)^2 \leq 2 (|x|\land|\tilde x|)^\gamma |x-\tilde x|^2. $$ Since $|x-\tilde x|=|(v-\tilde v)-(v_*-\tilde vs)|$, we end with $$ g_1 \leq 2(|x|\land|\tilde x|)^\gamma |v-\tilde v|^2+ 2(|x|\land|\tilde x|)^\gamma (2|v-\tilde v||v_*-\tilde vs| + |v_*-\tilde vs|^2). $$ {\it Step 1.2.} We next study $g_2$, assuming without loss of generality that $|x|\geq |\tilde x|$. We write, using \varepsilonqref{ttaacc} with $\alpha=\gamma$, \begin{align*} g_2=&2(v-\tilde v)\cdot[-|x|^\gamma (x-\tilde x)+(|\tilde x|^\gamma-|x|^\gamma)\tilde x]\\ \leq& -2|x|^\gamma(v-\tilde v)\cdot (x-\tilde x) + 2 |v-\tilde v| |\tilde x| (|x|\lor|\tilde x|)^{\gamma-1}||x|-|\tilde x||\\ \leq & -2|x|^\gamma(v-\tilde v)\cdot (x-\tilde x) + 2 |v-\tilde v| |\tilde x|^{\gamma}|x-\tilde x|. \varepsilonnd{align*} Since now $x=v-v_*$ and $\tilde x=\tilde v-\tilde vs$, we see that \begin{align*} g_2\leq& - 2|x|^\gamma |v-\tilde v|^2+2|x|^\gamma |v-\tilde v||v_*-\tilde vs|+ 2|\tilde x|^\gamma[|v-\tilde v|^2+|v-\tilde v||v_*-\tilde vs|]\\ \leq& 2(|x|^\gamma+|\tilde x|^\gamma) |v-\tilde v||v_*-\tilde vs| \varepsilonnd{align*} since $|x|\geq |\tilde x|$ by assumption. By symmetry, the same bound holds when $|x|\leq |\tilde x|$. \vskip.13cm {\it Step 1.3.} Using now \varepsilonqref{p2}, we see that \begin{align*} g_3\leq& 2|v_*-\tilde vs|[|x|^\gamma+|\tilde x|^\gamma] |x-\tilde x| \leq 2(|x|^\gamma+|\tilde x|^\gamma) [|v-\tilde v||v_*-\tilde vs|+|v_*-\tilde vs|^2]. \varepsilonnd{align*} {\it Step 1.4.} Gathering Steps 1.1, 1.2, 1.3, we have checked that $$ k_{p,\varepsilon}^{(1)} \leq (1+|v|^p+|\tilde v|^p)\varphi_\varepsilon'(|v-\tilde v|^2) \Big[2(|x|\land|\tilde x|)^\gamma |v-\tilde v|^2 +C(|x|^\gamma+|\tilde x|^\gamma) (|v-\tilde v||v_*-\tilde vs|+|v_*-\tilde vs|^2)\Big]. $$ Recalling that $r\varphi_\varepsilon'(r)\leq \varphi_\varepsilon(r)$ by \varepsilonqref{ve} and that $|x|^\gamma\leq|v|^\gamma+|v_*|^\gamma$ and $|\tilde x|^\gamma\leq|\tilde v|^\gamma+|\tilde vs|^\gamma$, we may write $k_{p,\varepsilon}^{(1)}\leq k_{p,\varepsilon}^{(11)}+ k_{p,\varepsilon}^{(12)}$, where \begin{align*} k_{p,\varepsilon}^{(11)}=& 2(1+|v|^p+|\tilde v|^p)[(|v|^\gamma+|v_*|^\gamma)\land (|\tilde v|^\gamma+|\tilde vs|^\gamma)] \varphi_\varepsilon(|v-\tilde v|^2),\\ k_{p,\varepsilon}^{(12)}=& C(1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon'(|v-\tilde v|^2) (|v-\tilde v||v_*-\tilde vs|+|v_*-\tilde vs|^2). \varepsilonnd{align*} First, \begin{align*} k_{p,\varepsilon}^{(11)}\leq & 2(|v|^\gamma+|v_*|^\gamma)\varphi_\varepsilon(|v-\tilde v|^2) + 2 |v|^p(|v|^\gamma+|v_*|^\gamma)\varphi_\varepsilon(|v-\tilde v|^2) + 2 |\tilde v|^p(|\tilde v|^\gamma+|\tilde vs|^\gamma) \varphi_\varepsilon(|v-\tilde v|^2)\\ =& 2(|v|^{p+\gamma}+|\tilde v|^{p+\gamma}) \varphi_\varepsilon(|v-\tilde v|^2) + 2(|v|^\gamma+|v_*|^\gamma+|v|^p|v_*|^\gamma+|\tilde v|^p|\tilde vs|^\gamma)\varphi_\varepsilon(|v-\tilde v|^2)\\ \leq & 2(|v|^{p+\gamma}+|\tilde v|^{p+\gamma}) \varphi_\varepsilon(|v-\tilde v|^2) + C(1+|v_*|^\gamma+|\tilde vs|^\gamma)(1+|v|^p+|\tilde v|^p)\varphi_\varepsilon(|v-\tilde v|^2)\\ =&2 c_{p+\gamma,\varepsilon}(v,\tilde v) + C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v). \varepsilonnd{align*} We next use that $ab\leq \varepsilon^{1/2} a^2 + \varepsilon^{-1/2} b^2$ to write \begin{align*} k_{p,\varepsilon}^{(12)}\leq & C \sqrt \varepsilon (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma) \varphi_\varepsilon'(|v-\tilde v|^2)|v-\tilde v|^2 \\ &+ \frac C {\sqrt\varepsilon} (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon'(|v-\tilde v|^2) |v_*-\tilde vs|^2 \\ \leq& C \sqrt \varepsilon (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma) \varphi_\varepsilon(|v-\tilde v|^2) \\ &+ \frac C {\sqrt\varepsilon} (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma)|v_*-\tilde vs|^2, \varepsilonnd{align*} because $r\varphi_\varepsilon'(r)\leq \varphi_\varepsilon(r)$ and $\varphi_\varepsilon'(r)\leq 1$ by \varepsilonqref{ve}. We carry on with \begin{align*} k_{p,\varepsilon}^{(12)}\leq & C \sqrt \varepsilon (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})\varphi_\varepsilon(|v-\tilde v|^2) + C \sqrt \varepsilon (1+|v|^{p}+|\tilde v|^{p})(|v_*|^\gamma+|\tilde vs|^\gamma) \varphi_\varepsilon(|v-\tilde v|^2)\\ & + \frac C {\sqrt\varepsilon} (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma) (1+\varepsilon|v_*-\tilde vs|^2)\varphi_\varepsilon(|v_*-\tilde vs|^2)\\ \leq & C \sqrt \varepsilon c_{p+\gamma,\varepsilon}(v,\tilde v) + C \sqrt \varepsilon (1+|v_*|^\gamma+|\tilde vs|^\gamma) c_{p,\varepsilon}(v,\tilde v)\\ & + \frac C {\sqrt\varepsilon} (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon(|v_*-\tilde vs|^2)\\ &+ C \sqrt\varepsilon (1+|v|^p+|\tilde v|^p)(|v|^\gamma+|v_*|^\gamma+|\tilde v|^\gamma+|\tilde vs|^\gamma)(|v_*|^2+|\tilde vs|^{2}) \varphi_\varepsilon(|v_*-\tilde vs|^2)\\ \leq & C \sqrt \varepsilon c_{p+\gamma,\varepsilon}(v,\tilde v) + C \sqrt \varepsilon (1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma}) c_{p,\varepsilon}(v,\tilde v)\\ & + \frac C {\sqrt\varepsilon} (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})(1+|v_*|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon(|v_*-\tilde vs|^2)\\ &+ C \sqrt\varepsilon (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})(|v_*|^2+|\tilde vs|^{2}) \varphi_\varepsilon(|v_*-\tilde vs|^2)\\ &+ C \sqrt\varepsilon (1+|v|^{p}+|\tilde v|^{p})(1+|v_*|^{2+\gamma}+|\tilde vs|^{2+\gamma})\varphi_\varepsilon(|v_*-\tilde vs|^2). \varepsilonnd{align*} Since $p\geq 2$, since $\gamma\in (0,1)$ and since $\varepsilon\in(0,1]$, we end with \begin{align*} k_{p,\varepsilon}^{(12)}\leq & C \sqrt \varepsilon c_{p+\gamma,\varepsilon}(v,\tilde v) +C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v)\\ &+ \frac C {\sqrt\varepsilon} (1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})c_{p,\varepsilon}(v_*,\tilde vs)\\ &+ C \sqrt \varepsilon (1+|v|^{p}+|\tilde v|^{p})c_{p+\gamma,\varepsilon}(v_*,\tilde vs). \varepsilonnd{align*} Summing the bounds on $k_{p,\varepsilon}^{(11)}$ and $k_{p,\varepsilon}^{(12)}$ leads us to \varepsilonqref{i1}. \vskip.13cm {\bf Step 2.} We next prove that \begin{align}\label{i2p} k^{(2)}_{p,\varepsilon}\leq - p |v|^{p+\gamma}\varphi_{\varepsilon}(|v-\tilde v|^2)+ C(1+|v_*|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v), \varepsilonnd{align} and this will imply, still allowing $C$ to change from line to line and to depend on $p$, that \begin{align}\label{i2} k^{(2)}_{p,\varepsilon}+\tilde k^{(2)}_{p,\varepsilon} \leq& - p (|v|^{p+\gamma}+|\tilde v|^{p+\gamma})\varphi_{\varepsilon}(|v-\tilde v|^2) + C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v)\\ = & - p (c_{p+\gamma}(v,\tilde v)-\varphi_\varepsilon(|v-\tilde v|^2)) + C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v)\notag\\\le & - p c_{p+\gamma}(v,\tilde v) + C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v),\notag \varepsilonnd{align} where the equality uses the definition \varepsilonqref{cpe} of $c_{p+\gamma, \varepsilon}$, and in the final line we absorb $\varphi(|v-\tilde v|^2)\le c_{p,\varepsilon}(v,\tilde v)$ into the second term. \vskip.13cm By \varepsilonqref{tto}-\varepsilonqref{tto2} and by definition of $k^{(2)}_{p,\varepsilon}$, we see that \begin{align*} k^{(2)}_{p,\varepsilon} \leq& \varphi_{\varepsilon}(|v-\tilde v|^2)\Big[ -p |v|^{p+\gamma} +p|v|^p|v_*|^\gamma + C p^2 (|v|^{p-2+\gamma}|v_*|^2+|v|^{p-2}|v_*|^{2+\gamma})\Big] .\\ \leq & \varphi_{\varepsilon}(|v-\tilde v|^2)\Big[ -p |v|^{p+\gamma} + C(1+|v_*|^{2+\gamma})(1+|v|^p)\Big], \varepsilonnd{align*} from which \varepsilonqref{i2p} follows. \vskip.13cm {\bf Step 3.} We finally prove that \begin{align}\label{i3} k^{(3)}_{p,\varepsilon}+\tilde k^{(3)}_{p,\varepsilon} \leq & C (1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v) \\ &+C(1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma})c_{p,\varepsilon}(v_*,\tilde vs) \notag\\ &+ C\sqrt\varepsilon(1+|v_*|^{p}+|\tilde vs|^{p})c_{p+\gamma,\varepsilon}(v,\tilde v)\notag\\ &+ C\sqrt\varepsilon(1+|v|^{p}+|\tilde v|^{p})c_{p+\gamma,\varepsilon}(v_*,\tilde vs).\notag \varepsilonnd{align} By symmetry, it suffices to treat the case of $k^{(3)}_{p,\varepsilon}$. Recalling that $|\sigma(x)v|\leq C |x|^{\gamma/2}|v||v_*|$ by \varepsilonqref{tr}, and that $||\sigma(x)-\sigma(\tilde x)||\leq C(|x|^{\gamma/2}+|\tilde x|^{\gamma/2})|x-\tilde x|$ by \varepsilonqref{p4}, we directly find \begin{align*} k^{(3)}_{p,\varepsilon}\leq& C |v|^{p-1}|v_*||x|^{\gamma/2}(|x|^{\gamma/2}+|\tilde x|^{\gamma/2})|x-\tilde x||v-\tilde v|\varphi_\varepsilon'(|v-\tilde v|^2) \\ \leq & C |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma) (|v-\tilde v|^2+|v-\tilde v||v_*-\tilde vs|)\varphi_\varepsilon'(|v-\tilde v|^2)\\ =& k^{(31)}_{p,\varepsilon}+k^{(32)}_{p,\varepsilon}, \varepsilonnd{align*} where \begin{align*} k^{(31)}_{p,\varepsilon} =& C |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma)|v-\tilde v|^2\varphi_\varepsilon'(|v-\tilde v|^2),\\ k^{(32)}_{p,\varepsilon} =& C |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma)|v-\tilde v||v_*-\tilde vs| \varphi_\varepsilon'(|v-\tilde v|^2). \varepsilonnd{align*} Since $r\varphi_\varepsilon'(r)\leq \varphi_\varepsilon(r)$ by \varepsilonqref{ve}, we have \begin{align*} k^{(31)}_{p,\varepsilon} \leq& C(1+|v_*|^{1+\gamma}+|\tilde vs|^{1+\gamma})(1+|v|^{p-1+\gamma}+|\tilde v|^{p-1+\gamma})\varphi_\varepsilon(|v-\tilde v|^2)\\ \leq& C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma})c_{p,\varepsilon}(v,\tilde v). \varepsilonnd{align*} Next, we use that, with $a=|v-\tilde v|$ and $a_*=|v_*-\tilde vs|$, since $a[\varphi_\varepsilon'(a^2)]\leq \sqrt{a^2 \varphi_\varepsilon'(a^2)} \leq \sqrt{\varphi_\varepsilon(a^2)}$ by \varepsilonqref{ve}, $$ a a_* \varphi_\varepsilon'(a^2) \leq \sqrt{\varphi_\varepsilon(a^2)} \sqrt{\varphi_\varepsilon(a_*^2)(1+\varepsilon a_*^2)} \leq [\varphi_\varepsilon(a^2)+\varphi_\varepsilon(a_*^2)](1+\sqrt{\varepsilon}a_*) $$ to write $k^{(32)}_{p,\varepsilon}\leq k^{(321)}_{p,\varepsilon}+k^{(322)}_{p,\varepsilon}+k^{(323)}_{p,\varepsilon}+k^{(324)}_{p,\varepsilon}$, where \begin{align*} k^{(321)}_{p,\varepsilon} =& C |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon(|v-\tilde v|^2),\\ k^{(322)}_{p,\varepsilon} =& C |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma)\varphi_\varepsilon(|v_*-\tilde vs|^2),\\ k^{(323)}_{p,\varepsilon} =& C \sqrt\varepsilon |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma) |v_*-\tilde vs|\varphi_\varepsilon(|v-\tilde v|^2),\\ k^{(324)}_{p,\varepsilon} =& C \sqrt\varepsilon |v|^{p-1}|v_*|(|v|^\gamma+|\tilde v|^\gamma+|v_*|^\gamma+|\tilde vs|^\gamma) |v_*-\tilde vs|\varphi_\varepsilon(|v_*-\tilde vs|^2).\\ \varepsilonnd{align*} We have \begin{align*} k^{(321)}_{p,\varepsilon} \leq& C(1+|v_*|^{1+\gamma}+|\tilde vs|^{1+\gamma})(1+|v|^{p-1+\gamma}+|\tilde v|^{p-1+\gamma}) \varphi_\varepsilon(|v-\tilde v|^2)\\ \leq& C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma}) c_{p,\varepsilon}(v,\tilde v), \varepsilonnd{align*} as well as \begin{align*} k^{(322)}_{p,\varepsilon} \leq& C(1+|v_*|^{1+\gamma}+|\tilde vs|^{1+\gamma})(1+|v|^{p-1+\gamma}+|\tilde v|^{p-1+\gamma}) \varphi_\varepsilon(|v_*-\tilde vs|^2)\\ \leq& C(1+|v|^{p+\gamma}+|\tilde v|^{p+\gamma}) c_{p,\varepsilon}(v_*,\tilde vs), \varepsilonnd{align*} and, dropping $\sqrt \varepsilon$ and using that $|v_*-\tilde vs|\leq |v_*|+|\tilde vs|$, \begin{align*} k^{(323)}_{p,\varepsilon} \leq& C(1+|v_*|^{2+\gamma}+|\tilde vs|^{2+\gamma})(1+|v|^{p-1+\gamma}+|\tilde v|^{p-1+\gamma}) \varphi_\varepsilon(|v-\tilde v|^2)\\ \leq& C(1+|v_*|^{p+\gamma}+|\tilde vs|^{p+\gamma}) c_{p,\varepsilon}(v,\tilde v). \varepsilonnd{align*} Finally, using again the bound $|v_*-\tilde vs|\leq|v_*|+|\tilde vs|$, \begin{align*} k^{(324)}_{p,\varepsilon} \leq& C\sqrt\varepsilon (1+|v|^{p-1+\gamma}+|\tilde v|^{p-1+\gamma}) (1+|v_*|^{2+\gamma}+|\tilde vs|^{2+\gamma})\varphi_\varepsilon(|v_*-\tilde vs|^2)\\ \leq& C\sqrt\varepsilon (1+|v|^{p}+|\tilde v|^{p})c_{p+\gamma,\varepsilon}(v_*,\tilde vs). \varepsilonnd{align*} Summing the bounds on $k^{(31)}_{p,\varepsilon}$, $k^{(321)}_{p,\varepsilon}$, $k^{(322)}_{p,\varepsilon}$, $k^{(323)}_{p,\varepsilon}$ and $k^{(324)}_{p,\varepsilon}$ ends the step. \vskip.13cm Gathering \varepsilonqref{i0}, \varepsilonqref{i1}, \varepsilonqref{i2} and \varepsilonqref{i3} completes the proof since $\varepsilon\in(0,1]$. \varepsilonnd{proof} \begin{thebibliography}{99} \bibitem{abl}{\sc Alonso, R., Bagland, V. and Lods, B.} \newblock Long time dynamics for the Landau-Fermi-Dirac equation with hard potentials. \newblock {\varepsilonm J. Differential Equations}, 270 (2021), 596--663. \color{black} \bibitem{agt} {\sc Alonso, R., Gamba, I.M. and Taskovic, M.} \newblock Exponentially-tailed regularity and time asymptotic for the homogeneous Boltzmann equation. \newblock {\varepsilonm arXiv:1711.06596.} \bibitem{ar} {\sc Arsen'ev, A.A. and Buryak, O.E.} \newblock On the connection between a solution of the Boltzmann equation and a solution of the Landau-Fokker-Planck equation. \newblock {\varepsilonm Mathematics of the USSR-Sbornik.} 69 (1991), 465. \bibitem{b} {\sc Bobylev, A.V.} \newblock Moment inequalities for the Boltzmann equation and applications to spatially homogeneous problems. \newblock {\varepsilonm J. Statist. Phys.} 88 (1997), 1183--1214. \bibitem{c} {\sc Carrapatoso, K.} \newblock Exponential convergence to equilibrium for the homogeneous Landau equation with hard potentials. \newblock {\varepsilonm Bull. Sci. Math.} 139 (2015), 777--805. \bibitem{cddw} {\sc Carrillo, J.A., Delgadino, M.G., Desvillettes, L. and Wu, J.} \newblock The Landau equation as a Gradient Flow. \newblock {\varepsilonm arXiv:2007.08591.} \bibitem{ch} {\sc Chen, H., Li, W. and Xu, C.J.} \newblock Gevrey regularity for solution of the spatially homogeneous Landau equation. \newblock {\varepsilonm Acta Math. Sci. Ser. B} 29 (2009), 673--686. \color{black} \bibitem{ch2} {\sc Chen, H., Li, W.X. and Xu, C.J.} \newblock Analytic smoothness effect of solutions for spatially homogeneous Landau equation. \newblock {\varepsilonm J. Differential Equations} 248 (2010), 77--94. \bibitem{d} {\sc Desvillettes, L.} \newblock Entropy dissipation estimates for the Landau equation in the Coulomb case and applications. \newblock {\varepsilonm J. Funct. Anal.} 269 (2015), 1359--1403. \bibitem{d2} {\sc Desvillettes, L.} \newblock On asymptotics of the Boltzmann equation when the collisions become grazing. \newblock {\varepsilonm Transport Theory Statist. Phys.} 21 (1992), 259--276. \bibitem{dv1} {\sc Desvillettes and L., Villani, C.} \newblock On the spatially homogeneous Landau equation for hard potentials, Part I : existence, uniqueness and smothness. \newblock {\varepsilonm Comm. Partial Differential Equations} 25 (2000), 179--259. \bibitem{dv2} {\sc Desvillettes and L., Villani, C.} \newblock On the spatially homogeneous Landau equation for hard potentials, Part II: H-Theorem and Applications. \newblock {\varepsilonm Comm. Partial Differential Equations} 25 (2000), 261--298. \bibitem{fc} {\sc Fournier, N.} \newblock Uniqueness of bounded solutions for the homogeneous Landau equation with a Coulomb potential. \newblock {\varepsilonm Comm. Math. Phys.} 299 (2010), 765--782. \bibitem{fgue} {\sc Fournier, N. and Gu\'erin, H.} \newblock Well-posedness of the spatially homogeneous Landau equation for soft potentials. \newblock {\varepsilonm J. Funct. Anal.} 256 (2009), 2542--2560. \bibitem{fgui} {\sc Fournier, N. and Guillin, A.} \newblock From a Kac-like particle system to the Landau equation for hard potentials and Maxwell molecules. \newblock {\varepsilonm Ann. Sci. \'Ec. Norm. Sup\'er.} 50 (2017), 157--199. \bibitem{fh} {\sc Fournier, N. and Hauray, M.} \newblock Propagation of chaos for the Landau equation with moderately soft potentials. \newblock {\varepsilonm Ann. Probab.} 44 (2016), 3581--3660. \bibitem{fmi} {\sc Fournier, N. and Mischler, S.} \newblock Rate of convergence of the Nanbu particle system for hard potentials and Maxwell molecules. \newblock {\varepsilonm Ann. Probab.} 44(1) (2016), 589--627. \bibitem{fm} {\sc Fournier, N. and Mouhot, C.} \newblock On the well-posedness of the spatially homogeneous Boltzmann equation with a moderate angular singularity. \newblock {\varepsilonm Comm. Math. Phys.} 289 (2009), 803--824. \bibitem{fp} {\sc Fournier, N. and Perthame, B.} \newblock Monge-Kantorovich distance for PDEs: the coupling method. \newblock {\varepsilonm EMS Surv. Math. Sci.} 7 (2020), 1--31. \color{black} \bibitem{fo} {\sc Fournier, N.} \newblock On exponential moments of the homogeneous Boltzmann equation for hard potentials without cutoff. \newblock {\varepsilonm arXiv preprint arXiv:2012.02982} \color{black} \bibitem{f} {\sc Funaki, T}, \newblock The diffusion approximation of the spatially homogeneous Boltzmann equation \newblock {\varepsilonm Duke Math. J.} 52 (1985), 1--23. \bibitem{go} {\sc Golse, F., Imbert, C., Mouhot, C. and Vasseur, A.} \newblock Harnack inequality for kinetic Fokker-Planck equations with rough coefficients and application to the Landau equation. \newblock {\varepsilonm Ann. Sc. Norm. Super. Pisa Cl. Sci.} (5) 19 (2019), 253--295. \bibitem{gou} {\sc Goudon, T.} \newblock On Boltzmann Equations and Fokker-Planck Asymptotics: Influence of Grazing Collisions. \newblock {\varepsilonm J. Statist. Phys.} 89 (1997), 751--776. \bibitem{gu} {\sc Gu\'erin, H.} \newblock Existence and regularity of a weak function-solution for some Landau equations with a stochastic approach. \newblock {\varepsilonm Stochastic Process. Appl.} 101 (2002), 303--325. \bibitem{g} {\sc Gu\'erin, H.} \newblock Solving Landau equation for some soft potentials through a probabilistic approach. \newblock {\varepsilonm Ann. Appl. Probab.} 13 (2003), 515--539. \bibitem{guo} {\sc Guo, Y.} \newblock The Landau equation in a periodic box, \newblock {\varepsilonm Comm. Math. Phys.} 231 (2002), 391--434. \bibitem{hy} {\sc He, L. and Yang, X.} \newblock Well-posedness and asymptotics of grazing collisions limit of Boltzmann equation with Coulomb interaction. \newblock {\varepsilonm SIAM J. Math. Anal.} 46 (2014), 4104--4165. \bibitem{h1} {\sc Heydecker, D.} \newblock Pathwise convergence of the hard spheres Kac process. \newblock {\varepsilonm Ann. Appl. Probab.} 29 (2019), 3062--3127. \bibitem{h2} {\sc Heydecker, D.} \newblock Kac's Process with Hard Potentials and a Moderate Angular Singularity. \newblock arXiv:2008.12943 \bibitem{k} {\sc Kac, M.} \newblock Foundations of kinetic theory. \newblock {\varepsilonm Proceedings of the Third Berkeley Symposium on Mathematical Statistics and Probability, 1954--1955, vol. III}, University of California Press, 171--197. \bibitem{mm} {\sc Mischler, S. and Mouhot, C.} \newblock Kac's Program in Kinetic Theory. \newblock {\varepsilonm Invent. Math.} 193 (2013), 1--147. \bibitem{mmw} {\sc Mischler, S., Mouhot, C. and Wennberg, B.} \newblock A new approach to quantitative propagation of chaos for drift, diffusion and jump processes. \newblock{\varepsilonm Probab. Theory Related Fields} 161 (2015), 1--59. \bibitem{mw} {\sc Mischler, S and Wennberg, B.} \newblock On the spatially homogeneous Boltzmann equation. \newblock{\varepsilonm Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire} 16 (1999), 467--501. \bibitem{morimoto}{\sc Morimoto, Y., Pravda-Starov, K. and Xu, C.J.} \newblock A remark on the ultra-analytic smoothing properties of the spatially homogeneous Landau equation. \newblock {\varepsilonm Kinet. Relat. Models} 6 (2013), 715--727. \bibitem{mou} {\sc Mouhot, C.} \newblock Explicit coercivity estimates for the linearized Boltzmann and Landau operators. \newblock{\varepsilonm Comm. Partial Differential Equations} 31 (2006), 1321--1348. \bibitem{n} {\sc Norris, J.} \newblock A consistency estimate for Kac's model of elastic collisions in a dilute gas. \newblock {\varepsilonm Ann. Appl. Probab.} 26 (2016), 1029--1081. \bibitem{p} {\sc Povzner, A. Ja.} \newblock On the Boltzmann equation in the kinetic theory of gases. \newblock {\varepsilonm Mat. Sb. (N.S.)} 58 (1962), 65--86. \bibitem{t} {\sc Tanaka, H.}, \newblock Probabilistic treatment of the Boltzmann equation of Maxwellian molecules. \newblock {\varepsilonm Z. Wahrsch. und Verw. Gebiete} 46 (1978/79), 67--105. \bibitem{v:max} {\sc Villani, C.} \newblock On the spatially homogeneous Landau equation for Maxwellian molecules. \newblock {\varepsilonm Math. Models Methods Appl. Sci.} 8 (1998), 957--983. \bibitem{v:nc} {\sc Villani, C.}, \newblock On a new class of weak solutions to the spatially homogeneous Boltzmann and Landau equations. \newblock {\varepsilonm Arch. Rational Mech. Anal.} 143 (1998), 273--307. \bibitem{v:h} {\sc Villani, C.} \newblock A review of mathematical topics in collisional kinetic theory. \newblock {\varepsilonm Handbook of mathematical fluid dynamics Vol. I,} 71--305, North-Holland, Amsterdam, 2002. \bibitem{v: ot} {\sc Villani, C.} \newblock Topics in Optimal Transportation. \newblock {\varepsilonm American Mathematical Society}, No. 58, 2003. \color{black} \bibitem{w} {\sc Walsh, J.B.} \newblock An introduction to stochastic partial differential equations. \newblock \'Ecole d'\'et\'e de Probabilit\'es de Saint-Flour XIV, Lect. Notes in Math. {{1180}}, 265-437, 1986. \varepsilonnd{thebibliography} \varepsilonnd{document}
\begin{document} \newtheorem{unique}{Proposition}[section] \newtheorem{tpdiag}[unique]{Proposition} \newtheorem{unit}{Proposition}[section] \newtheorem{algorithm}[unit]{Lemma} \newtheorem{linear}[unit]{Example} \newtheorem{flatsem}[unit]{Example} \newtheorem{uflatsem}[unit]{Example} \newtheorem{powset}[unit]{Example} \newtheorem{amenconst}[unit]{Theorem} \newtheorem{amenconst1}[unit]{Corollary} \newtheorem{gradedalga}{Proposition}[section] \newtheorem{gradedalg}[gradedalga]{Theorem} \newtheorem{gradedlinear}[gradedalga]{Proposition} \newtheorem{dalesq}[gradedalga]{Proposition} \title[Amenability constants]{Amenability constants for semilattice algebras} \author{Mahya Ghandehari, Hamed Hatami and Nico Spronk} \begin{abstract} For any finite commutative idempotent semigroup $S$, a {\it semilattice}, we show how to compute the amenability constant of its semigroup algebra $\ell^1(S)$, which is always of the form $4n+1$. We then show that these give lower bounds to amenability constants of certain Banach algebras graded over semilattices. We also demonstrate an example of a commutative Clifford semigroup $G$ for which amenability constant of $\ell^1(G)$ is not of the form $4n+1$. We also show there is no commutative semigroup with amenability constant between $5$ and $9$. \end{abstract} \maketitle \footnote{ 2000 {\it Mathematics Subject Classification.} Primary 46H20, 43A20; Secondary 20M14, 43A30. {\it Key words and phrases.} amenable/contractible Banach algebra, semilattice, graded Banach algebra. Research of the third named author supported by NSERC Grant 312515-05.} In conjunction with V.\ Runde \cite{rundes1}, the third named author proved that for a locally compact group $G$, $G$ is compact if and only if its Fourier-Stieltjes algebra $\mathrm{B}(G)$ is operator amenable with operator amenability constant less than $5$. In a subsequent article \cite{rundes2}, examples of non-compact groups $G_1$ were found for which the operator amenability constant is exactly $5$. In related work of Dales, Lau and Strauss \cite[Corollary 10.26]{dalesls}, improving on \cite[Theorem 3.2]{stokke}, it was shown that a semigroup algebra $\ell^1(S)$ has amenability constant less than $5$, if and only if $S$ is an amenable group. For the multiplicative semigroup $L_1=\{0,1\}$, it is known that the amenability constant of $\ell^1(L_1)$ is $5$. These parallel facts are not coincidences since for the special groups $G_1$, mentioned above, $\mathrm{B}(G_1)$ is {\it $\ell^1$-graded} over $L_1$, i.e.\ there are $1$-operator amenable subalgebras $\fA_0$ and $\fA_1$ such that $\mathrm{B}(G_1)= \fA_0\oplus_{\ell^1}\fA_1$, and $\fA_0$ is an ideal. We are thus led to consider the general situation of Banach algebras graded over semilattices, i.e.\ commutative idempotent semigroups, which we define in Section \ref{sec:graded}. To do this, in Section \ref{sec:semilattice} we develop a method for computing the amenability constants associated to finite semilattice algebras. The results in Section \ref{sec:semilattice} have a similar flavour to some results from those in the recent monograph \cite{dalesls}, and are very similar to some results of Duncan and Namioka \cite{duncann}. However, our method is explicit and quantitative, and thus is a nice complement to their work. In Section \ref{sec:graded} we obtain a lower bound for the amenability constant of Banach algebras graded over finite semilattices. We show a surprising example which indicates our lower bound is not, in general the amenability constant. We show, at least for certain finite dimensional algebras graded over linear semilattices, that our lower bound is achieved. We close with an answer to a question asked of us by H.G. Dales: we show that there does not exist a commutative semigroup $G$ such that $5<\mathrm{AM}(\ell^1(G))<9$. There are natural examples of Banach algebras from harmonic analysis, due to Taylor \cite{taylor}, Inoue \cite{inoue}, and Ilie and Spronk \cite{ilies,ilies1}, to which our techniques apply. We recommend the reader to \cite{ilies} and \cite{rundes1} for more on this. We feel that ideas developed here may lead to a tool to help classify which locally compact groups admit operator amenable Fourier-Stieltjes algebras $\mathrm{B}(G)$. Our hope is that the operator amenability constants $\mathrm{AM}_{op}(\mathrm{B}(G))$ can all be computed. We conjecture they are a subset of $\{4n+1:n\in\En\}$, motived by Theorem \ref{theo:amenconst} and Theorem \ref{theo:gradedalg}, below. We hope that these values will serve as a tool for classifying for which groups $G$, $\mathrm{B}(G)$ is operator amenable. Interest in amenability of semigroup algebras, in particular for inverse semigroups and Clifford semigroups, goes back at least as far as Duncan and Namioka \cite{duncann}. Gr{\o}nb{\ae}k \cite{groenbaek} characterised commutative semigroups $G$ for which $\ell^1(G)$ is amenable. A recent extensive treatise on $\ell^1$-algebras of semigroups has been written by Dales, Lau and Strauss \cite{dalesls}, which includes a charaterisation of all semigroups $G$ for which $\ell^1(G)$ is amenable. Biflatness of $\ell^1(S)$, for a semilattice $S$, has recently been characterised by Choi \cite{choi}. \subsection{Preliminaries} Let $\fA$ be a Banach algebra. Let $\fA\otimes^\gam\fA$ denote the projective tensor product. We let $m: \fA\otimes^\gam\fA\to\fA$ denote the multiplication map and we have left and right module actions of $\fA$ on $\fA\otimes^\gam\fA$ given on elementary tensors by \[ a\mult(b\otimes c)=(ab)\otimes c\aand (b\otimes c)\mult a=b\otimes (ca). \] A {\it bounded approximate diagonal} (b.a.d.) is a bounded net $(D_\alp)$ in $\fA\otimes^\gam\fA$ such that $(m(D_\alp))$ is a bounded approximate identity in $\fA$, i.e.\ \begin{equation}\label{eq:appdiag1} \lim_\alp am(D_\alp)=a\aand\lim_\alp m(D_\alp)a=a\text{ for each } a\iin\fA \end{equation} and $(D_\alp)$ is asymptotically central for the $\fA$-actions, i.e.\ \begin{equation}\label{eq:appdiag2} \lim_\alp(a\mult D_\alp-D_\alp\mult a)=0\text{ for each }a\iin\fA. \end{equation} Following Johnson \cite{johnson}, we will say that a Banach algebra $\fA$ is {\it amenable} if it admits a b.a.d. A quantitative feature of amenability was introduced by Johnson in \cite{johnson95}, for applications to Fourier algebras of finite groups. The {\it amenability constant} of an amenable Banach algebra $\fA$ is given by \[ \mathrm{AM}(\fA)=\inf\left\{\sup_\alp\norm{D_\alp}_\gam:(D_\alp) \text{ is a b.a.d.\ for }\fA\right\}. \] The problem of understanding amenable semigroup algebras in terms of their amenability constants has attracted some attention \cite{stokke,dalesls}. We call $\fA$ {\it contractible} if it admits a {\it diagonal}, i.e.\ an element $D\iin\fA\otimes^\gam\fA$ for which \begin{align} am(D)=&a=m(D)a\;\aand\label{eq:diag1} \\ a\mult D&=D\mult a\label{eq:diag2} \end{align} for each $a\iin\fA$. Note, in particular, then $\fA$ must be unital and the norm of the unit is bounded above by $\mathrm{AM}(\fA)$. If $\fA$ is a finite dimensional amenable Banach algebra, then $\fA\otimes^\gam\fA$ is a finite dimensional Banach space, so any b.a.d.\ admits a cluster point $D$. Since any subnet of a b.a.d.\ is also a b.a.d., the cluster point must be a diagonal, whence $\fA$ is contractible. We record the following simple observation. \begin{unique}\label{prop:unique} If $\fA$ is a contractible commutative Banach algebra, then the diagonal is unique. \end{unique} \proof We note that $\fA\otimes^\gam\fA$ is a Banach algebra in an obvious way: $(a\otimes b)(c\otimes d)=(ac)\otimes(bd)$. If $D$ is a diagonal, then $(a\otimes b)D=a\mult D\mult b=(ab)\mult D$ for $a,b\iin\fA$, by commutativity. Hence if $D'$ is another diagonal \[ D'D=m(D')\mult D=1\mult D=D \] and, similarly, $D'D=DD'=D'$.\endpf It will also be useful to observe the following. \begin{tpdiag}\label{prop:tpdiag} Let $\fA$ and $\fB$ be contractible Banach algebras, with respective diagonals $D_\fA$ and $D_\fB$, then $\fA\otimes^\gam\fB$ has diagonal \[ D_\fA\otimes D_\fB\in(\fA\otimes^\gam\fA)\otimes^\gam(\fB\otimes^\gam\fB) \cong(\fA\otimes^\gam\fB)\otimes^\gam(\fA\otimes^\gam\fB). \] \end{tpdiag} \proof It is simple to check the diagonal axioms (\ref{eq:diag1}) and (\ref{eq:diag2}). \endpf \section{Amenability constants for semilattice algebras} \label{sec:semilattice} A {\it semilattice} is a commutative semigroup $S$ in which each element is idempotent, i.e.\ if $s\in S$ then $ss=s$. If $s,t\in S$ we write \begin{equation}\label{eq:po} s\leq t\quad\iff\quad st=s. \end{equation} It is clear that this defines a partial order on $S$. We note that if $S$ is a finite semilattice, then $o=\prod_{s\in S}s$ is a minimal element for $S$ with respect to this partial order. We note that if $S$ has a minimal element, then it is unique. Also if $S$ has a unit $1$, then $1$ is the maximal element in $S$. A basic example of a semilattice is $\fP(T)$, the set of all subsets of a set $T$, where we define $\sig\tau=\sig\cap \tau$ for $\sig,\tau\iin\fP(T)$. The minimal element is $\varnothing$, and the maximal element is $T$. We call any subsemilattice of a semilattice $\fP(T)$ a {\it subset semilattice}. This type of semilattice is universal as we have a semilattice ``Cayley Theorem": for any semilattice $S$, the map $s\mapsto\{t\in S:t\leq s\}:S\to\fP(S)$ (or $s\mapsto\{t\in S\setdif\{o\}:t\leq s\}:S\to\fP(S\setdif\{o\})$) is an injective semilattice homomorphism (by which $o\mapsto\varnothing$). For any semilattice $S$ we define \[ \ell^1(S)=\left\{x=\sum_{s\in S}x(s)\del_s:\text{each }x(s)\in\Cee\aand \norm{x}_1=\sum_{s\in S}|x(s)|<\infty\right\} \] where each $\del_s$ is the usual ``point mass'' function. Then $\ell^1(S)$ is a commutative Banach algebra under the norm $\norm{\cdot}_1$ with the product \[ \left(\sum_{s\in S}x(s)\del_s\right)\con\left(\sum_{t\in S}x(t)\del_t\right) =\sum_{r\in S}\left(\sum_{st=r}x(s)y(t)\right)\del_r. \] In particular we have $\del_s\con\del_t=\del_{st}$. We shall consider the Banach space $\ell^\infty(S)$, of bounded functions from $S$ to $\Cee$ with supremum norm, to be an algebra under usual pointwise operations. The Cayley map, indicated above, extends to an algebra homomorphism $\Sig:\ell^1(S)\to\ell^\infty(S)$, given on each $\del_s$ by \begin{equation}\label{eq:schutz} \Sig(\del_s)=\chi_{\{t\in S:t\leq s\}} \end{equation} and extended linearly and continuously to all of $\ell^1(S)$. Here, $\chi_T$ is the indicator function of $T\subset S$. The map $\Sig$ is called the {\it Sch\"{u}tzenburger map}; see \cite[\S 4]{choi} and references therein. We note that if $S$ is finite, then $\Sig$ is a bijection. In this case a formula for its inverse is given by \begin{equation}\label{eq:mobius} \Sig^{-1}(\chi_s)=\sum_{t\leq s}\mu(t,s)\del_t \end{equation} where $\chi_s=\chi_{\{s\}}$ and $\mu:\{(t,s):S\cross S:t\leq s\}\to\Ree$ is the {\it M\"{o}bius function} of the partially ordered set $(S,\leq)$ as defined in \cite[\S 3.7]{stanley}. Our computations in this section will be equivalent to explicitly computating $\mu$, though we will never need to know $\mu$ directly. It follows from \cite[Theorem 10]{duncann} that $\ell^1(S)$ is amenable if and only if $S$ is finite. Thus it follows (\ref{eq:diag1}) that $\ell^1(S)$ is unital if $S$ is finite. If $S$ is unital, then $\del_1$ is the unit for $\ell^1(S)$. If $S$ is not unital, the unit is more complicated. We let $M(S)$ denote the set of maximal elements in $S$ with respect to the partial ordering (\ref{eq:po}). \begin{unit}\label{prop:unit} If $S$ is a finite semilattice then the unit is given by $u=\sum_{p\in S}u(p)\del_p$ where \begin{equation}\label{eq:unit} u(p)=1-\sum_{t>p}u(t) \end{equation} for each $p\iin S$ and we adopt the convention that an empty sum is $0$. Moreover \begin{equation}\label{eq:unit1} \sum_{s\in S}u(s)=1. \end{equation} \end{unit} \proof While we have already established existence of the unit above, let us note that we can gain a very elementary proof of its existence. Indeed since $\Sig:\ell^1(S)\to\ell^\infty(S)$ is a bijection, $u=\Sig^{-1}(\chi_S)$ is the unit for $\ell_1(S)$. If $p\in S$ then \[ \del_p=\del_p\con u=\left(\sum_{s\geq p}u(s)\right)\del_p +\sum_{s<p}\left(\sum_{\substack{t\in S \\ tp=s}}u(t)\right)\del_s. \] and thus, inspecting the coefficient of $\del_p$, we obtain (\ref{eq:unit}). Note that if $p\in M(S)$ the formula above gives $u(p)=1$, and for any $s\iin S\setdif M(S)$ we have $\sum_{t\in S,tp=s}u(t)=0$. Thus, if we select $p\iin M(S)$ we have \[ \sum_{s\in S}u(s)=u(p) +\sum_{s<p}\left(\sum_{\substack{t\in S \\ tp=s}}u(t)\right)=1 \] and thus obtain (\ref{eq:unit1}). \endpf We note that if $S$ is a finite semilattice then $S\setdif M(S)$ is a subsemilattice, in fact an ideal, of $S$. We also note that $S\cross S$ is also a semilattice and the partial order there satisfies \[ (s,t)\leq(p,q)\quad\iff\quad s\leq p\aand t\leq q. \] The following gives an algorithm for computing the diagonal for $\ell^1(S)$. \begin{algorithm}\label{lem:algorithm} Let $S$ be a finite semilattice. Then the diagonal \[ D=\sum_{(s,t)\in S\times S}d(s,t)\del_s\otimes\del_t \] satisfies, for all $(p,q)\iin S\cross S$, {\bf (a)} $\displaystyle d(p,p)=u(p) -\sum_{\substack{(s,t)>(p,p) \\ st=p}}d(s,t)$; {\bf (b)} if $q\not\geq p$, then $\displaystyle d(p,q)=-\sum_{t>q}d(p,t)$ and $\displaystyle d(q,p)=-\sum_{s>q}d(s,p)$; and {\bf (c)} $d(p,q)=d(q,p)$. \noindent Thus, each $d(p,q)$ is an integer, and for distinct elements $p,q\iin M(S)$ we have $d(p,p)=1$ and $d(p,q)=0$. \end{algorithm} \proof The equation (\ref{eq:diag1}) gives us \begin{equation}\label{eq:diag11} \sum_{p\in S}u(p)\del_p=u=\sum_{(s,t)\in S\times S}d(s,t)\del_{st} =\sum_{p\in S}\left(\sum_{\substack{(s,t)\in S\times S \\ st=p}}d(s,t)\right) \del_p \end{equation} Since $st=p$ necessitates $(s,t)\geq(p,p)$, we examine the coefficient of $\del_p$ to find \begin{equation}\label{eq:plevel} u(p) =\sum_{\substack{(s,t)\geq(p,p) \\ st=p}}d(s,t) \end{equation} from which we obtain (a). In particular, if $p\in M(S)$ we obtain an empty sum in (a) and find $d(p,p)=1$. The equation (\ref{eq:diag2}) implies that $\del_q\mult D=D\mult\del_q$ and hence we obtain \begin{equation}\label{eq:diag22} \sum_{(s,t)\in S\times S}d(s,t)\del_{qs}\otimes\del_t =\sum_{(s,t)\in S\times S}d(s,t)\del_s\otimes\del_{tq}. \end{equation} If $q\not\geq p$ then there is no $s\iin S$ for which $qs=p$. Hence examining the coefficient of $\del_p\otimes\del_q$ and $\del_q\otimes\del_p$, respectively, in (\ref{eq:diag22}), yields \begin{equation}\label{eq:dptsum} 0=\sum_{t\geq q}d(p,t)\quad\aand\quad \sum_{s\geq q}d(s,p)=0. \end{equation} Hence we have established (b). In particular, if $q,p\in M(S)$ we have an empty sum in (b), so $d(p,q)=0$. We can see for any pair $(p,q)$ with $p\not=q$, so $p\not\leq q$ or $q\not\leq p$, that $d(p,q)$ is determined by coefficients $(s,t)>(p,q)$. Hence by induction, using the coeficients $d(p,p)$ and $d(p,q)$ for distict maximal $p,q$ as a base, we obtain (c). For example, if $q\in M(S\setdif M(S))$, then (b) implies for every $p>q$ that \[ d(p,q)=-\sum_{t>q}d(p,t)=-d(p,p)=-1 \] and, similarly, $d(q,p)=-1$. It is clear, form the above induction, that each $d(p,q)$ is an integer. \endpf Let us see how Lemma \ref{lem:algorithm} allows us to compute the diagonal $D$ of $\ell^1(S)$ for a finite semilattice $S$. {\bf Step 1.} We inductively define \begin{equation}\label{eq:ideals} S_0=S,S_1=S\setdif M(S),\dots,S_{k+1}=S_k\setdif M(S_k) \end{equation} and we let $n(S)=\min\{k:S_{k+1}=\varnothing\}$, so $S_{n(S)}=\{o\}$ and $S_{n(S)+1}=\varnothing$. {\bf Step 2.} We label $S=\{s_0,s_1,\dots,s_{|S|-1}\}$ in any manner for which \[ i\geq j\aand s_i\in S_k\quad\implies\quad s_j\in S_k. \] Thus, the elements of $M(S_k)$ comprise the last part of the list of $S_k$ for $k=1,\dots,n(S)$. In particular, $s_0=o$ and $s_{|S|-1}\in M(S)$. {\bf Step 3.} The diagonal $D$ will be represented by an $|S|\cross|S|$ matrix $[D]=[d(s_i,s_j)]$. The lower rightmost corner will be the $|M(S)|\cross|M(S)|$ identity matrix. We can then proceed, using formulas (b) and (a) from the lemma above, to compute the remaining entries of the lower rightmost $(|M(S)|+1)\cross(|M(S)|+1)$ corner of $[D]$, etc., until we are done. In order to describe certain semilattices $S$, we define the {\it semilattice graph} $\Gamma(S)=(S,e(S))$, where the vertex set is $S$ and the edge set is given by ordered pairs \[ e(S)=\{(s,t)\in S\cross S:s>t\text{ and there is no }r\iin S\text{ for which } s>r>t\}. \] To picture such a graph for a finite semilattice $S$ it is helpful to describe levels. Let $S_0,S_1,\dots,S_{n(S)}$ be the sequence of ideals of $S$ given in (\ref{eq:ideals}). For $s\iin S$ we let the {\it level} of $s$ be given by \[ \lam(s)=n(S)-k\wwhere s\in M(S_k). \] Note that for the power set semilattice $\fP(T)$, $\lam(\sig)=|\sig|$, the cardinality of $\sig$. However, this relation need not hold for a subsemilattice of $\fP(T)$, as is evident from the Example \ref{ex:flatsem}, below. A 6-element, 4-level semilattice is illustrated in (\ref{ex:unrsl}). We apply this algorithm to obtain the following examples. We denote, for a finite semilattice $S$, the amenability constant \[ \mathrm{AM}(S)=\mathrm{AM}(\ell^1(S))=\norm{D}_1= \sum_{(s,t)\in S\times S}|d(s,t)| \] where we recall the well-known isometric identification $\ell^1(S)\otimes^\gam\ell^1(S)\cong\ell^1(S\cross S)$. \begin{linear}\label{ex:linear} Let $L_n=\{0,1,2,\dots,n\}$ be a ``linear" semilattice with operation $st=s\wedge t=\min\{s,t\}$. Then we obtain diagonal with $(n+1)\cross(n+1)$ matrix \[ [D]=\begin{bmatrix} \phantom{-}2 & -1 & \hdots & \phantom{-}0 & \phantom{-}0 \\ -1 & \phantom{-}2 & \ddots & \phantom{-}0 & \phantom{-}0 \\ \phantom{-}\vdots & \ddots & \ddots & \ddots & \phantom{-}\vdots \\ \phantom{-}0 & \phantom{-}0 & \ddots & \phantom{-}2 & -1 \\ \phantom{-}0 & \phantom{-}0 & \hdots & -1 & \phantom{-}1 \end{bmatrix}. \] Hence $\mathrm{AM}(L_n)=4n+1$. \end{linear} \begin{flatsem}\label{ex:flatsem} Let $F_n=\{o,s_1,\dots,s_n\}$ be the $n+1$ element ``flat'' semilattice with multiplications $s_is_j=o$ if $i\not=j$. Then we obtain unit \[ u=\del_{s_1}+\dots+\del_{s_n}+(1-n)\del_o \] and diagonal with $(n+1)\cross(n+1)$-matrix \[ [D]=\begin{bmatrix} n+1 & -1 & -1 & \hdots & -1 \\ -1 & \phantom{-}1 & \phantom{-}0 & \hdots & \phantom{-}0 \\ -1 & \phantom{-}0 & \phantom{-}1 & \ddots & \phantom{-}\vdots \\ \phantom{-}\vdots & \phantom{-}\vdots & \ddots & \ddots & \phantom{-}0 \\ -1 & \phantom{-}0 & \hdots & \phantom{-}0 & \phantom{-}1 \end{bmatrix}. \] Hence $\mathrm{AM}(F_n)=4n+1$. \end{flatsem} \begin{uflatsem}\label{ex:uflatsem} Let $F_n^1=\{o,s_1,\dots,s_n,1\}$ be the unitasation of $F_n$, above. Then we obtain diagonal with $(n+2)\cross(n+2)$ matrix \[ [D]=\begin{bmatrix} n^2-n+2 & -n & \hdots & -n & n-1 \\ -n & \phantom{-}2 & \hdots & \phantom{-}1 & -1 \\ \vdots & \phantom{-}\vdots & \ddots & \phantom{-}\vdots & \vdots \\ -n & \phantom{-}1 & \hdots & \phantom{-}2 & -1 \\ n-1 & -1 & \hdots & -1 & \phantom{-}1\end{bmatrix}. \] Hence $\mathrm{AM}(F_n^1)=4n^2+4n+1$. \end{uflatsem} The next example is less direct than the previous ones, so we offer a proof. \begin{powset}\label{ex:powset} Let $P_n=\fP(\{1,\dots,n\})$ with multiplication $st=s\cap t$. Then the diagonal $D$ has $2^n\cross 2^n$ matrix which is, up to permutative similarity, the Kronecker product \[ \begin{bmatrix} \phantom{-}2 & -1 \\ -1 & \phantom{-}1\end{bmatrix}\otimes\dots \otimes\begin{bmatrix} \phantom{-}2 & -1 \\ -1 & \phantom{-}1 \end{bmatrix}\;(n\text{ times}). \] Hence $\mathrm{AM}(P_n)=5^n$. \end{powset} \proof If $s\in P_n$ let $\chi_s:\{1,\dots,n\}\to\{0,1\}=L_1$ be its indicator function. It is easily verified that the map $s\mapsto\chi_s:P_n\to L_1^n$ is a semilattice isomorphism. Thus there is an isometric identification $\ell^1(P_n)\cong \ell^1(L_1)\otimes^\gam\dots\otimes^\gam\ell^1(L_1)$. Then it follows from Proposition \ref{prop:tpdiag} above that $D=D_1\otimes\dots\otimes D_1$ where $D_1$ is the diagonal for $\ell^1(L_1)$, which, by the algorithm has matrix \[ [D_1]=\begin{bmatrix} \phantom{-}2 & -1 \\ -1 & \phantom{-}1\end{bmatrix}. \] The amenability constant $\mathrm{AM}(P_n)$ can be easily computed by induction. \endpf We have the following summary result. \begin{amenconst}\label{theo:amenconst} If $S$ is a finite semilattice, then $\mathrm{AM}(S)=4n+1$ for some integer $n\geq 0$. All such numbers are achieved. \end{amenconst} \proof We first establish that for $p\iin S$, $d(p,p)\geq 0$. This does not seem obvious from Lemma \ref{lem:algorithm}. We use a calculation from \cite[\S 3]{choi} which exploits the M\"{o}bius function. We have that $\Sig:\ell^1(S)\to\ell^\infty(S)$ is invertible and $\til{D}=\sum_{r\in S}\chi_r\otimes\chi_r$ is the diagonal for $\ell^\infty(S)$. Thus, using (\ref{eq:mobius}), we have that \begin{align*} D=\Sig^{-1}\otimes\Sig^{-1}(\til{D}) &=\sum_{r\in S}\left(\sum_{s\in S}\til{\mu}(s,r)\del_s\right) \otimes\left(\sum_{t\in S}\til{\mu}(t,r)\del_t\right) \\ &=\sum_{(s,t)\in S\times S}\left(\sum_{r\in S}\til{\mu}(s,r) \til{\mu}(t,r)\right)\del_s\otimes\del_t \end{align*} is the diagonal for $\ell^1(S)$, where $\til{\mu}(s,t)=\mu(s,t)$ if $s\leq t$ and $\til{\mu}(s,t)=0$, otherwise. Inspecting the coeficient of $\del_p\otimes\del_p$ we obtain \begin{equation}\label{eq:dpppos} d(p,p)=\sum_{r\in S}\til{\mu}(p,r)^2\geq 1> 0 \end{equation} since $\til{\mu}(p,p)=\mu(p,p)=1$ by \cite[\S 3.7]{stanley}. We now observe, using (\ref{eq:plevel}) and then (\ref{eq:unit1}), that \[ \sum_{(s,t)\in S\times S}d(s,t)= \sum_{p\in S}\sum_{\substack{(s,t)\in S\times S \\ st=p}}d(s,t) =\sum_{p\in S}u(p)=1. \] By symmetry, if $p\not=q$ then $|d(p,q)|+|d(q,p)|\equiv d(p,q)+d(q,p)\mod 4$. Hence we have \[ \mathrm{AM}(S)\equiv\sum_{(s,t)\in S\times S}|d(s,t)| \equiv\sum_{(s,t)\in S\times S}d(s,t)\equiv 1\mod 4. \] Finally, Examples \ref{ex:linear} and \ref{ex:flatsem} provide us with semilattices admitting amenability constants $4n+1$, for each integer $n\geq 0$. \endpf We now gain a crude lower bound for $\mathrm{AM}(S)$ which we will require for Proposition \ref{prop:dalesq}. \begin{amenconst1}\label{cor:amenconst1} For any finite semilattice $S$ we have $\mathrm{AM}(S)\geq 2|S|-1$. \end{amenconst1} \proof We have from (\ref{eq:dpppos}) that $d(p,p)\geq 1$ for each $p\iin S$. It then follows from (\ref{eq:dptsum}) that for $p>o$ we have $\sum_{t\geq o}d(p,t)=0$ from which we obtain $\sum_{t\not=p}|d(p,t)|\geq 1$. It then follows that \begin{align*} \mathrm{AM}(S)=\sum_{(s,t)\in S\times S}|d(s,t)| &\geq d(o,o)+\sum_{p>0}\left(d(p,p)+\sum_{t\not=p}|d(p,t)|\right) \\ &\geq 1+(|S|-1)2 \end{align*} and we are done. \endpf We note that if $S$ is unital, then for $p<1$, $u(p)=0$ and since $d(s,t)=d(t,s)$ for $(s,t)>(p,p)$ we find from Lemma \ref{lem:algorithm} (a) that $d(p,p)$ is even; in particular $d(p,p)\geq 2$. The proof above may be adapted to show $\mathrm{AM}(S)\geq 4|S|-3$, in this case. We conjecture the estimate $\mathrm{AM}(S)\geq 4|S|-3$ holds for any finite semilattice $S$. \section{Banach algebras graded over semilattices} \label{sec:graded} A Banach algebra $\fA$ is {\it graded} over a semigroup $S$ if we have closed subspaces $\fA_s$ for each $s\iin S$ such that \[ \fA=\ell^1\text{-}\bigoplus_{s\in S}\fA_s\aand \fA_s\fA_t\subset\fA_{st} \ffor s,t\iin S. \] We will be interested strictly in the case where $S$ is a finite semilattice. Notice in this case each $\fA_s$ is a closed subalgebra of $\fA$. The next proposition can be proved by a straightforward adaptation of the proof of \cite[Proposition 3.1]{rundes2}. However, we offer another proof. \begin{gradedalga}\label{prop:gradedalga} Let $S$ be a finite semilattice and $\fA$ be graded over $S$. Then $\fA$ is amenable if and only if each $\fA_s$ is amenable. \end{gradedalga} \proof Suppose $\fA$ is amenable. If $s\in S$, then $\fA^s=\bigoplus_{t\leq s}\fA_t$ is an ideal in $\fA$ which is complemented and hence an amenable Banach algebra (see \cite[Theorem 2.3.7]{rundeB}, for example). It is easy the check that the projection $\pi_s:\fA^s\to\fA_s$ is a quotient homomorphism. Hence it follows that if $(D^s_\alp)$ is an approximate diagonal for $\fA^s$ then $\bigl(\pi_s\otimes\pi_s(D^s_\alp)\bigr)$ is an approximate diagonal for $\fA_s$. (This is quotient argument is noted in \cite[Corollary 2.3.2]{rundeB} and \cite[Proposition 2.5]{dalesls}.) Now suppose that each $\fA_s$ is amenable. Let $S_0,S_1,\dots, S_{n(S)}$ be the sequence of ideals from (\ref{eq:ideals}). For each $n=0,1,\dots,n(S)$ we set $\fA_n=\bigoplus_{s\in S_n}\fA_s$ and observe, for each $n=0,1,\dots,n(S)-1$, that we have an isometrically isomorphic identification \[ \fA_n/\fA_{n+1}=\ell^1\text{-}\!\!\!\bigoplus_{s\in M(S_n)}\fA_s \] where multiplication in the latter is pointwise, i.e.\ $\fA_s\fA_t=\{0\}$ if $s\not=t\iin M(S_n)$. The pointwise algebra $\ell^1\text{-}\bigoplus_{s\in M(S_n)}\fA_s$ is amenable as each $\fA_s$ is amenable; if $(D_{s,\alp})$ is a bounded approximate diagonal for each $\fA_s$, then in \[ \left(\ell^1\text{-}\!\!\!\bigoplus_{s\in M(S_n)}\fA_s\right)\otimes^\gam \left(\ell^1\text{-}\!\!\!\bigoplus_{s\in M(S_n)}\fA_s\right) \cong\;\;\;\ell^1\text{-}\!\!\!\!\!\!\!\!\!\!\!\!\!\! \bigoplus_{(s,t)\in M(S_n)\times M(S_n)}\fA_s\otimes^\gam\fA_t \] the net of elements $D_\alp=\sum_{s\in M(S_n)}D_{s,\alp}$ is an approximate diagonal. Thus if $\fA_{n+1}$ is amenable, then $\fA_n$ must be too by \cite[Theorem 2.3.10]{rundeB}. The algebra $\fA_{n(S)}=\fA_o$ is amenable, and hence we may finish by an obvious induction. \endpf In the computations which follow, we will require one of the following {\it linking assumptions} which are very natural for our examples. \parbox{4.5in}{{\bf (LA1)} For each $s\iin S$ there is a bounded approximate identity $(u_{s,\alp})_\alp$ in $\fA_s$, such that for each $t\leq s$ and $a_t\in\fA_t$ we have $\lim_\alp u_{s,\alp} a_t=a_t=\lim_\alp a_tu_{s,\alp}$.} \parbox{4.5in}{\noindent {\bf (LA2)} For each $s\in S$ there is a contractive character $\chi_s:\fA_s\to\Cee$ such that for each $s,t\iin S$, $a_s\in\fA_s$ and $a_t\in\fA_t$, we have $\chi_{st}(a_sb_t)=\chi_s(a_s)\chi_t(a_t)$.} \noindent Notice that in (LA1), each $(u_{s,\alp})_\alp$ is a bounded approximate identity for $\fA^s=\ell^1\text{-}\bigoplus_{t\leq s}\fA_t$. Thus since $\fA^s$ is an $\fA_s$-module, Cohen's factorisation theorem \cite[32.22]{hewittrII} tells us that \begin{equation}\label{eq:cohen} \text{for each }a\iin\fA^s\text{ there is }v_s\in\fA_s\aand a'\iin\fA^s \text{ such that }a=v_sa'. \end{equation} There is a right factorisation analogue, and the result also holds on each $\fA_s$ module $\fA_t$, where $t\leq s$. We note that (LA2) is equivalent to having a contractive character $\chi:\fA\to\Cee$ such that $\chi|_{\fA_s}=\chi_s$ for each $s$. We note that many natural Banach algebras, graded over semilattices, which arise in harmonic analysis, satisfy (LA2). However, (LA1) can be used whenever each component algebra $\fA_s$ admits no characters. For example, if we have a (finite unital) semilattice $S$, a family of algebras $\{\fA_s\}_{s\in S}$ each having no characters, and a system $\{\eta^s_t:s,t\in S, s\geq t\}$ of homomorphisms, we can make $\ell^1\text{-}\bigoplus_{s\in S}\fA_s$ into a Banach algebra by setting $a_sa_t=\eta^s_{st}(a_s)\eta^t_{st}(a_t)$ for $a_s\iin \fA_s$ and $a_t\iin\fA_t$. (This construction is analagous to that of the Clifford semigroup algebras which will be presented in Section \ref{ssec:clifford}, below.) This brings us to the main result of this article. \begin{gradedalg}\label{theo:gradedalg} Let $\fA$ be a Banach algebra graded over a finite semilattice $S$ such that each $\fA_s$ is amenable. If we have either that (LA1) holds, or (LA2) holds, then $\mathrm{AM}(\fA)\geq\mathrm{AM}(S)$. \end{gradedalg} \proof $\fA$ is amenable by the proposition above. Let us suppose (LA1) holds. We let for each $p\iin S$, $\pi_p:\fA\to\fA_p$ the contractive projection. We define for $a,b\in\fA$, $\pi_p(a\otimes b)=\pi_p(a)\otimes b$ and $(a\otimes b)\pi_p=a\otimes\pi_p(b)$. Clearly these actions extend linearly and continuously to define $\pi_pD$ and $D\pi_p$ for any $D\in\fA\otimes^\gam\fA$. We let $(D_\alp)$ be a bounded approximate diagonal for $\fA$ and \[ D=\sum_{(s,t)\in S\times S}d(s,t)\del_s\otimes\del_t \] be the unique diagonal for $\ell^1(S)$. We will prove that for $p,q\in S$ and $a\in\fA^p$, $b\in\fA^q$ that \[ \lim_\alp am(\pi_pD_\alp\pi_q)b=d(p,q)ab.\tag{$\bigstar$} \] This requires induction and we will need some preliminary steps. Suppose that $q\not=p$ in $S$, say $q\not\geq p$. If $v_q\in\fA_q$ then (\ref{eq:appdiag2}) implies that \begin{equation}\label{eq:capdiagvanish} \lim_\alp \pi_p(D_\alp\mult v_q)\pi_q =\lim_\alp\pi_p(v_q\mult D_\alp)\pi_q=0. \end{equation} We note that on an elementary tensor in $\fA\otimes\fA$ we have \begin{equation}\label{eq:tensform1} m(\pi_p(a\otimes b\mult v_q)\pi_q)=\sum_{t\geq q}\pi_p(a)\pi_t(b)v_q =\sum_{t\geq q}m(\pi_p(a\otimes b)\pi_t)v_q \end{equation} Now if $b\in\fA^q$ we find $v_q\in\fA_q$ and $b'\iin\fA^q$ such that $b=v_qb'$ by (\ref{eq:cohen}). We then have, in analogy to Lemma \ref{lem:algorithm} (b), using (\ref{eq:capdiagvanish}) and (\ref{eq:tensform1}) \[ \lim_\alp\sum_{t\geq q}m(\pi_pD_\alp\pi_t)b =\lim_\alp m\bigl(\pi_p(D_\alp\mult v_q)\pi_q\bigr)b'=0\mult b'=0. \tag{b$_1$'} \] Similarly we see \[ \lim_\alp\sum_{s\geq q}bm(\pi_sD_\alp\pi_p)=0. \tag{b$_2$'} \] Note that if $p,q\in M(S)$ with $p\not=q$, then then (b$_1$') takes the form \[ \lim_\alp m(\pi_pD_\alp\pi_q)b=0=d(p,q)b \] and a simlar version holds for (b$_2$'). Thus ($\bigstar$) holds in this case. Now we show that for $p\in S$ and $b\iin \fA^p$ that \begin{equation}\label{eq:approxuatp} \lim_\alp\pi_p\bigl(m(D_\alp)\bigr)b=u(p)b \end{equation} where $u=\sum_{p\in S}u(p)\del_p$ is the unit for $\ell^1(S)$. By (\ref{eq:cohen}) there are $v_p\iin\fA_p$ and $b'\iin\fA^p$ such that $b=v_pb'$. We have that \begin{align*} v_p&=\lim_\alp m(D_\alp)v_p =\lim_\alp\sum_{s\in S}\pi_s\bigl(m(D_\alp)\bigr)v_p \\ &=\lim_\alp\left[\sum_{s\geq p}\pi_s\bigl(m(D_\alp)\bigr)v_p +\sum_{s\not\geq p}\pi_s\bigl(m(D_\alp)\bigr)v_p\right] \end{align*} from which it follows that \[ \lim_\alp\sum_{s\geq p}\pi_s\bigl(m(D_\alp)\bigr)v_p =\lim_\alp\pi_p\bigl(m(D_\alp)v_p\bigr)=v_p \] and hence \begin{equation}\label{eq:approxuatp1} \lim_\alp\sum_{s\geq p}\pi_s\bigl(m(D_\alp)\bigr)b =\lim_\alp\sum_{s\geq p}\pi_s\bigl(m(D_\alp)\bigr)v_pb'=v_pb'=b. \end{equation} In particular, if $p\in M(S)$, then \[ \lim_\alp\pi_p\bigl(m(D_\alp)\bigr)b=b=u(p)b. \] Then the equation (\ref{eq:approxuatp}) follows inductively from (\ref{eq:approxuatp1}) and (\ref{eq:unit}), using the case of maximal $p$ as a base. Now we establish an analogue of Lemma \ref{lem:algorithm} (a). For an elementary tensor $a\otimes b$ in $\fA\otimes\fA$, we have \begin{equation}\label{eq:tensorform} \pi_p(ab)=\sum_{\substack{(s,t)\geq (p,p) \\ st=p}}\pi_s(a)\pi_t(b) =\sum_{\substack{(s,t)\geq (p,p) \\ st=p}}m\bigl(\pi_s(a\otimes b)\pi_t\bigr). \end{equation} It then follows from (\ref{eq:approxuatp}) and (\ref{eq:tensorform}) that for $b\in\fA^p$ \[ u(p)b=\lim_\alp\sum_{\substack{(s,t)\geq (p,p) \\ st=p}} m(\pi_sD_\alp\pi_t)b. \tag{a'} \] Note that if $p\in M(S)$, then by Proposition \ref{prop:unit}, (a') becomes \[ d(p,p)b=b=\lim_\alp m(\pi_pD_\alp\pi_p)b. \] Thus ($\bigstar$) holds in this case. We now prove ($\bigstar$) by induction on pairs $(p,q)\iin S\cross S$ with pairs $(p,q)\in M(S)\cross M(S)$ as a base. If $p\in S$, the induction hypothesis is that for $a,b\in\fA^p$ \[ \lim_\alp am(\pi_sD_\alp\pi_t)b=d(s,t)ab\ffor (s,t)>(p,p)\wwith st=p. \] Notice that in the hypothesis above we have $\fA^p\subset\fA^s\cap\fA^t$, and, moreover, either $t\not\geq s$ or $s\not\geq t$. But then it follows from (a') and Lemma \ref{lem:algorithm} (a) that \[ \lim_\alp am(\pi_pD_\alp\pi_p)b=\left[u(p)-\sum_{\substack{(s,t)>(p,p) \\ st=p}}d(s,t)\right]ab=d(p,p)ab \] which establishes ($\bigstar$) in this case. Also, if $q\not=p$, say $q\not\geq p$, then for $a\iin\fA^p$ and $b\iin\fA^q$ the induction hypothesis is that \[ \lim_\alp am(\pi_pD_\alp\pi_t)b=d(p,t)ab\ffor t>q. \] Combining this with (b$_1$') and Lemma \ref{lem:algorithm} (b) we obtain the equation ($\bigstar$) for this case. We can use (b$_2$') in place of (b$_1$') above, to acheive ($\bigstar$) with $p$ and $q$ interchanged. We now use ($\bigstar$) to finish the proof. Let for $p,q\iin S$ \[ \eta(p,q)=\sup_{a\in\fA^p,b\in\fA^q}\frac{\norm{ab}}{\norm{a}\norm{b}}. \] We note that our assumption (LA1) provides that $\eta(p,q)>0$. For $\eps>0$ let $a_\eps\iin\fA^p$ and $b_\eps\iin\fA^q$ be so $\frac{\norm{a_\eps b_\eps}}{\norm{a_\eps}\norm{b_\eps}}\geq (1-\eps)\eta(p,q)$. Then by ($\bigstar$) we have \begin{align*} |d(p,q)|\norm{a_\eps b_\eps} &=\lim_\alp\norm{a_\eps m(\pi_pD_\alp\pi_q)b_\eps} \leq\liminf_\alp\norm{a_\eps m(\pi_pD_\alp\pi_q)}\norm{b_\eps} \\ &\leq\liminf_\alp\frac{\norm{a_\eps m(\pi_pD_\alp\pi_q)}} {\norm{a_\eps}\norm{m(\pi_pD_\alp\pi_q)}} \norm{a_\eps}\norm{b_\eps}\norm{m(\pi_pD_\alp\pi_q)} \\ &\leq \eta(p,q) \norm{a_\eps}\norm{b_\eps} \liminf_\alp\norm{m(\pi_pD_\alp\pi_q)} \end{align*} which implies \[ (1-\eps)|d(p,q)|\leq\liminf_\alp\norm{m(\pi_pD_\alp\pi_q)} \leq\liminf_\alp\norm{\pi_pD_\alp\pi_q}_\gam. \] Thus \begin{align*} \mathrm{AM}(S)&=\sum_{(p,q)\in S\times S}|d(p,q)| \leq\sum_{(p,q)\in S\times S}\liminf_\alp\norm{\pi_pD_\alp\pi_q}_\gam \\ &\leq \liminf_\alp\sum_{(p,q)\in S\times S}\norm{\pi_pD_\alp\pi_q}_\gam \overset{(\dagger)}=\liminf_\alp \norm{D_\alp}\leq\sup_\alp\norm{D_\alp}_\gam \end{align*} where the equality $(\dagger)$ holds because of the isometric identification \[ \fA\otimes^\gam\fA=\left( \ell^1\text{-}\bigoplus_{s\in S}\fA_s\right)\otimes^\gam \left(\ell^1\text{-}\bigoplus_{t\in S}\fA_t\right) \cong\ell^1\text{-}\!\!\!\!\!\bigoplus_{(s,t)\in S\times S} \fA_s\otimes^\gam\fA_t. \] Thus we have finished the case where we assumed (LA1). Now suppose we have (LA2). The map \[ \Pi:\fA\to\ell^1(S),\quad \Pi(a)=\sum_{s\in S}\chi_s\bigl(\pi_s(a)\bigr)\del_s \] is a contractive homomorphism. Hence it follows that if $(D_\alp)$ is a bounded approximate diagonal for $\fA$ then $\bigl(\Pi(D_\alp)\bigr)$ is an approximate diagonal for $\ell^1(S)$. Thus the limit point, i.e.\ unique cluster point, $D$ of $\bigl(\Pi(D_\alp)\bigr)$ satisfies $\norm{D}_\gam=\mathrm{AM}(S)$, whence $\sup_\alp\norm{D_\alp}_\gam\geq\lim_\alp\norm{\Pi(D_\alp)}_\gam \geq\mathrm{AM}(S)$. \endpf It might seem plausible that in the situation of the theorem above, if it were the case that $\mathrm{AM}(\fA_s)=1$, for each $s$, then $\mathrm{AM}(\fA)=\mathrm{AM}(S)$. Indeed this phenomenon was observed for $S=L_1$, in a special case in \cite[Theorem 2.3]{rundes2}. However this does not seem to hold in general, as we shall see below. \subsection{Clifford semigroup algebras}\label{ssec:clifford} Let $S$ be a semilattice, and for each $s\iin S$ suppose we have a group $G_s$, and for each $t\leq s$ a homomorphism $\eta^s_t:G_s\to G_t$ such that for $r\geq s\geq t\iin S$ we have \[ \eta^s_s=\mathrm{id}_{G_s}\quad\aand\quad \eta^r_s\comp\eta^s_t=\eta^r_t \] then $G=\bigsqcup_{s\in S}G_s$ (disjoint union) admits a semigroup operation given by \[ x_sy_t=\eta^s_{st}(x_s)\eta^t_{st}(y_t) \] for $x_s\iin G_s$ and $y_t\iin G_t$. It is straightforward to check that $G$ is a semigroup, and is called a {\it Clifford semigroup}, as such a semigroup was first described in \cite{clifford}. We note that the set of idempotents $E(G)$ is $\{e_s\}_{s\in S}$, where $e_s$ is the neutral element of $G_s$, and $E(G)$ is a subsemigroup, isomorphic to $S$. It is clear that \[ \ell^1(G)=\ell^1\text{-}\bigoplus_{s\in S}\ell^1(G_s) \] and that $\ell^1(G)$ is thus graded over $S$. Note that $\ell^1(G)$ satisfies (LA1) by design, and satisfies (LA2) where the augmentation character is used on each $\ell^1(G_s)$. As with semilattices we will write $\mathrm{AM}(G)=\mathrm{AM}(\ell^1(G))$ Consider the semilattice $S=\{o,s_1,s_2,s_3,s_4,1\}$ whose graph is given below. \begin{equation}\label{ex:unrsl} \xymatrix{ & & 1\ar@{-}[dl]\ar@{-}[dr] & \\ & s_3\ar@{-}[dl]\ar@{-}[dr] & & s_4\ar@{-}[ddl] \\ s_1\ar@{-}[drr] & & s_2\ar@{-}[d] & & \\ & & o & & } \end{equation} Using the algorithm following Lemma \ref{lem:algorithm}, with the semilattice ordered as presented, we obtain diagonal $D$ with matrix \begin{equation}\label{eq:unrslm} [D]=\begin{bmatrix} \phantom{-}6 & -2 & -2 & \phantom{-}0 & -2 & \phantom{-}1 \\ -2 & \phantom{-}2 & \phantom{-}1 & -1 & \phantom{-}0 & \phantom{-}0 \\ -2 & \phantom{-}1 & \phantom{-}2 & -1 & \phantom{-}0 & \phantom{-}0 \\ \phantom{-}0 & -1 & -1 & \phantom{-}2 & \phantom{-}1 & -1 \\ -2 & \phantom{-}0 & \phantom{-}0 & \phantom{-}1 & \phantom{-}2 & -1 \\ \phantom{-} 1 & \phantom{-}0 & \phantom{-}0 & -1 & -1 & \phantom{-}1 \end{bmatrix}. \end{equation} Thus we obtain amenability constant $\mathrm{AM}(S)=41$. Now let $n\geq 2$ be an integer and $G_n$ be the Clifford semigroup graded over $S$ for which \[ G_{n,s_3}=\{e_3,a,\dots, a^{n-1}\}\quad\aand\quad G_{n,s_i}=\{e_i\} \text{ for all }i\not=3 \] and all connecting homomorphisms are trivial. Here, $\{e_3,a,\dots,a^{n-1}\}$ is a cyclic group, and each other $\{e_i\}$ is the trivial group. This is a finite dimensional commutative amenable algebra, and hence admits a unique diagonal by Proposition \ref{prop:unique}. It is straightforward to verify that if we order the semigroup $\{o,e_1,e_2,e_3,a,\dots,a^{n-1},e_4,1\}$ we obtain matrix for the diagonal \[ \begin{bmatrix} \phantom{-}6 & -2 & -2 & (1-n)/n & \phantom{-}1/n & \hdots & \phantom{-}1/n & -2 & \phantom{-}1 \\ -2 & \phantom{-}2 & \phantom{-}1 & -1/n & -1/n & \hdots & -1/n & \phantom{-}0 & \phantom{-}0 \\ -2 & \phantom{-}1 & \phantom{-}2 & -1/n & -1/n & \hdots & -1/n & \phantom{-}0 & \phantom{-}0 \\ (1-n)/n & -1/n & -1/n & (n+1)/n & \phantom{-}0 & \hdots & \phantom{-}0 & \phantom{-}1 & -1 \\ \phantom{-}1/n & -1/n & -1/n & \phantom{-}0 & & \iddots & \phantom{-}1/n & \phantom{-}0 & \phantom{-}0 \\ \phantom{-}\vdots & \phantom{-}\vdots & \phantom{-}\vdots & \phantom{-}\vdots & \phantom{-}\iddots & \iddots & & \phantom{-}\vdots & \phantom{-}\vdots \\ \phantom{-}1/n & -1/n & -1/n & \phantom{-}0 & \phantom{-}1/n & & \phantom{-}0 & \phantom{-}0 & \phantom{-}0 \\ -2 & \phantom{-}0 & \phantom{-}0 & \phantom{-}1 & \phantom{-}0 & \hdots & \phantom{-}0 & \phantom{-}2 & -1 \\ \phantom{-}1 & \phantom{-}0 & \phantom{-}0 & -1 & \phantom{-}0 & \hdots & \phantom{-}0 & -1 & \phantom{-}1 \end{bmatrix}. \] Notice that values in positions $(o,e_3),\dots, (o,a^{n-1})$ sum to $0$, the value in the $(o,s_3)$ position in (\ref{eq:unrslm}) above. Similar results holds for all submatirices with indicies from $\{e_3,a,\dots,a^{n-1}\}$. Summing absolute values of all entries in the matrix we obtain amenability constant $\mathrm{AM}(G_n)=41+4(n-1)/n$. Thus \[ \mathrm{AM}(G_n)=41+4\frac{n-1}{n}>41=\mathrm{AM}(S). \] The constant $\mathrm{AM}(G_2)=43$ is the smallest amenability constant we can find for an commutative semigroup which is not of the form $4n+1$. \subsection{Algebras graded over linear semilattices} We note that if $G$ is a finite Clifford semigroup, graded over a linear semilattice $L_n$, then $\mathrm{AM}(G)=\mathrm{AM}(L_n)=4n+1$. Indeed, this holds more generally, by the following proposition. \begin{gradedlinear}\label{prop:gradedlinear} If $\fA=\ell_1\text{-}\bigoplus_{k\in L_n}\fA_k$ is a graded Banach algebra which satisfies (LA1), and $\fA_k$ is contractible with $\mathrm{AM}(\fA_k)=1$ for each $k\iin L_n$, then $\mathrm{AM}(\fA)=4n+1$. \end{gradedlinear} \proof We have from Theorem \ref{theo:gradedalg} that $\mathrm{AM}(\fA) \geq\mathrm{AM}(L_n)=4n+1$, hence it suffices to exhibit a diagonal $D$ with $\norm{D}_\gam\leq 4n+1$. We will show that such $D$ exists by induction. Write $L_n=\{0,1,\dots,n\}$. We identify $L_k$ as an ideal of $L_n$ for each $k=0,1,\dots,n-1$ in the usual way. Let us note that if $(u_{k,\alp})$ is a bounded approximate identity for $\fA_k$, which satisfies (LA1), then the unit $e_k$ of $\fA_k$ is the limit point of $(u_{k,\alp})$, and hence $e_k$ is the unit for $\fA^k=\ell^1\text{-}\bigoplus_{j\in L_k}\fA_j$. Note, moreover, that the assumption that $\mathrm{AM}(\fA_k)=1$ forces $\norm{e_k}=1$. Let $\eps>0$. Suppose for $k<n$ we have a diagonal $D^k$ for $\fA^k$ with $\norm{D^k}_\gam<4k+1+\eps$. For $k=0$, such a diagonal exists as $\mathrm{AM}(\fA_0)=1$. We let \[ D_{k+1}=\sum_{i=1}^\infty a_i\otimes b_i,\quad a_i,b_i\in\fA_{k+1} \] be a diagonal for $\fA_{k+1}$ with $\norm{D_{k+1}}_\gam\leq \sum_{i=1}^\infty\norm{a_i}\norm{b_i}<1+\eps$. We then set \[ D^{k+1} =\sum_{i=1}^\infty a_i\mult\bigl((e_{k+1}-e_k)\otimes(e_{k+1}-e_k) +D^k\bigr)\mult b_i. \] Clearly \[ \norm{D^{k+1}}_\gam\leq (4+(4k+1+\eps))(1+\eps)=4(k+1)+1+O(\eps). \] Applying the multiplication map, and noting that $m(D^k)=e_k$, we have \begin{align*} m(D^{k+1})&=\sum_{i=1}^\infty a_i\bigl(e_{k+1}-e_k-e_k+e_k+m(D^k)\bigr)b_i \\ &= \sum_{i=1}^\infty a_ie_{k+1}b_i=m(D_{k+1})=e_{k+1} \end{align*} so (\ref{eq:diag1}) for $D^{k+1}$ is satisfied. Now if $a\in\fA_{k+1}$ then by property (\ref{eq:diag2}) for $D_{k+1}$ we have $\sum_{i=1}^\infty (aa_i)\otimes b_i= \sum_{i=1}^\infty a_i\otimes(b_ia)$, so it follows that $a\mult D^{k+1}= D^{k+1}\mult a$. Now if $a\in\fA^k$, then each $aa_i\in\fA^k$ so \begin{align*} a\mult D^{k+1}&=\sum_{i=1}^\infty (aa_i)\mult\bigl((e_{k+1}-e_k)\otimes(e_{k+1}-e_k)+ D^k\bigr)\mult b_i \\ &=\sum_{i=1}^\infty \bigl([aa_i(e_{k+1}-e_k)]\otimes(e_{k+1}-e_k)+(aa_i)\mult D^k \bigr)\mult b_i \\ &=\sum_{i=1}^\infty D^k\mult(aa_ib_i)=D^k\mult a=a\mult D^k \end{align*} which, by symmetric argument, is exactly the value of $D^{k+1}\mult a$. Since any $a\in\fA^{k+1}$ is a sum $a=\pi_{k+1}(a)+(a-\pi_{k+1}(a))$ where, $\pi_{k+1}(a)\in\fA_{k+1}$ and $a-\pi_{k+1}(a)\in\fA^k$, we obtain (\ref{eq:diag2}) for $D^{k+1}$. \endpf We note that to generalise our proof of the preceding result to amenable but not contractible Banach algebras, we would require at each stage approximate diagonals $D^k_\alp$ such that $\norm{m(D^k_\alp)}=1$, which we do not know how to construct, in general. We point the reader to \cite[Theorem 2.3]{rundes1} to see a computation performed on a Banach algebra graded over $L_1$. We note that we can modify the proof of Proposition \ref{prop:gradedlinear} to see that {\it a Banach algebra $\fA=\ell_1\text{-}\bigoplus_{s\in F_2^1}\fA_s$ graded over $F_2^1$, where each $\fA_s$ is contractible with $\mathrm{AM}(\fA_s)=1$, satisfies $\mathrm{AM}(\fA)\leq 45$}. This is larger than $\mathrm{AM}(F_2^1)=25$ from Example \ref{ex:flatsem}. We have found no examples of such Banach algebras $\fA$ with $\mathrm{AM}(\fA)>25$. However, we conjecture only for semilattices $S=L_n$, that {\it a Banach algebra $\fA=\ell_1\text{-}\bigoplus_{s\in S}\fA_s$ graded over $S$, where each $\fA_s$ is amenable with $\mathrm{AM}(\fA_s)=1$, satisfies $\mathrm{AM}(\fA)=\mathrm{AM}(S)$.} It would be interesting to find non-linear unital semilattices over which this conjecture holds. \subsection{On allowable amenability constants} We close by partially answering a question posed in \cite{dalesls}. There it is proved, that there is no semigroup $G$ such that $1<\mathrm{AM}(G)<5$. It is further conjectured that there are no semigroups $G$ for which $\mathrm{AM}(G) \in(5,7)\cup(7,9)$. In \cite{dalesls} there is an example given of a noncommutative semigroup $G$ with $\mathrm{AM}(G)=7$. For commutative semigroups there is a further gap. \begin{dalesq}\label{prop:dalesq} There is no commutative semigroup $G$ such that \[ 5<\mathrm{AM}(G)<9. \] \end{dalesq} \proof Since $G$ is commutative, it is proved in \cite[Theorem 2.7]{groenbaek} that if $\ell^1(G)$ is amenable, then $G$ is a Clifford semigroup, whose component groups are abelian, graded over a finite semilattice $S$. If $\mathrm{AM}(G)<9$, then by Theorem \ref{theo:gradedalg} then $\mathrm{AM}(S)<9$ and hence by Theorem \ref{theo:amenconst} and the corollary which follows it we have \[ 2|S|-1\leq \mathrm{AM}(S)\leq 5 \] so $|S|\leq 3$. Clearly, if $|S|=1$, $S=L_0$, and if $|S|=2$, $S=L_1$. If $|S|=3$ then $S$ is either unital, in which case $S=L_2$, or $S$ has 2 maximal elements, in which case $S=F_2$; in either case $\mathrm{AM}(S)=9$, contradicting our assumptions. Thus $S=L_0\oor L_1$. But it then follows by a straighforward adaptation of \cite[Theorem 2.3]{rundes1} that $\mathrm{AM}(G) =1\oor 5$. In particular $\mathrm{AM}(G)\leq 5$. \endpf {\bf Acknowledgements.} The authors are grateful to H.G. Dales for valuable questions and discussion, and the Y. Choi for providing a preprint of his article \cite{choi}. { } Mahya Ghandehari Address: {\sc Department of Pure Mathematics, University of Waterloo, Waterloo, ON\quad N2L 3G1, Canada} E-mail: {\tt [email protected]} Hamed Hatami Address: {\sc Department of Computer Science, University of To\-ron\-to, Toronto, ON\quad M5S 3G4, Canada} E-mail:{\tt [email protected]} Nico Spronk Address: {\sc Department of Pure Mathematics, University of Waterloo, Waterloo, ON\quad N2L 3G1, Canada} E-mail: {\tt [email protected]} \end{document}
\begin{document} \title{The boundedness locus and baby Mandelbrot sets for some generalized Mc{M}ullen maps} \author{Suzanne Boyd} \address{Department of Mathematical Sciences, University of Wisconsin Milwaukee, PO Box 413\\ Milwaukee, Wisconsin 53201 USA\\ [email protected]} \author{Alexander J.\ Mitchell} \address{Physical Sciences and Mathematics Department, Wayne State College, 1111 Main Street\\ Wayne, Nebraska 68787 USA\\ [email protected]} \date{\today} \begin{abstract} In this paper we study rational functions of the form \noindent \mbox{$R_{n,a,c}(z) = z^n + \dfrac{a}{z^n} + c,$} with $n$ fixed and at least $3$, and hold either $a$ or $c$ fixed while the other varies. We locate some homeomorphic copies of the Mandelbrot set in the $c$-parameter plane for certain ranges of $a$, as well as in the $a$-plane for some $c$-ranges. We use techniques first introduced by Douady and Hubbard in \cite{douhub} that were applied for the subfamily \mbox{$R_{n,a,0}$} by Devaney in \cite{dhalo}. These techniques involve polynomial-like maps of degree two. \end{abstract} \maketitle \markboth{\textsc{S. Boyd and A. Mitchell}} {\textit{Mandelbrot for generalized McMullen maps}} \footnotetext[1]{010 MSC: Primary: 37F10; Secondary: 37F46. Keywords: Complex Dynamical Systems, Mandelbrot set, Polynomial-Like Map, Rational Map, Iteration} \footnotetext[2]{We would like to thank Robert Devaney and Laura DeMarco for helpful conversations, and Brian Boyd for the computer program ``Dynamics Explorer" which generated all of the Mandelbrot and Julia images in this paper.} \section{Introduction} \label{sec:introduction} As a simple starting example we consider the family of quadratic polynomials $$ P_c(z) = z^2 + c,~c \in \mathbb{C}. $$ We define the \textit{Fatou set} of $P_c$ in the typical way, as the set of values in the domain where the iterates of $P_c$ is a normal family in the sense of Montel. The \textit{Julia set} of $P_c$ is also defined the usual way as the complement to the Fatou set. The \textit{filled Julia set} is the union of the Julia set and the bounded Fatou components. The \textit{Mandelbrot Set}, $\mathcal{M}$, is the set of $c$-values such that the critical orbit of $P_c$ is bounded, here that is the orbit of $0$. Figure \ref{Mandelbrotone} (left) is the Mandelbrot set drawn in the $c$-parameter plane of $P_c$. For other functions, the set of parameter values where at least one critical orbit is bounded will be called the \textit{boundedness locus}. \begin{figure} \caption{Parameter Planes of $P_c$ (left) and $R_{5,a,0} \label{Mandelbrotone} \end{figure} The study of the Mandelbrot set has become more accessible as computers have advanced. Adrien Douady and John Hubbard were able to show that this set can result from other iterative processes as well, in \cite{douhub}. They showed that multiple homeomorphic copies of the Mandelbrot set occur when Newton's Method is applied to a cubic polynomial family with a single parameter, and defined what it means for a map to behave like $P_c$, calling such a map {\em polynomial-like of degree two} (see Section \ref{preliminaries_section}). Mc{M}ullen (\cite{mcmullen}) shows that every non-empty bifurcation locus of any analytic family will contain quasiconformal copies of the Mandelbrot set of $P_c$ (or of $z^n + c$, based on the multiplicity of critical points), but in this paper we will use Douady and Hubbard's approach to prove that Mandelbrot set copies exist in some specific locations in some parameter planes, for the following family. The family of functions of interest in this paper is: $$ R_{n,a,c}(z) = z^n + \dfrac{a}{z^n} + c~,~n \in \mathbb{N},~a \in \mathbb{C}\backslash \lbrace0\rbrace,~c \in \mathbb{C}. $$ In this article, we restrict to integers $n \geq 3$. This family, including the subfamily with $c=0$, has been studied previously by Robert Devaney and colleagues, as well as the first author and colleagues. In \cite{boydschul}, Boyd and Schulz study the geometric limit as $n\to \infty$ of Julia sets and of the boundedness locus, for $R_{n,c,a}$ for any complex $c$ and any complex, non-zero $a$. Devaney and Garijo in \cite{devgar} study Julia sets as the parameter $a$ tends to $0$, for the cases of $n,d\geq 2$, and $c=0$. In \cite{bdgr} and \cite{devkoz}, the authors study the family in the case of $c$ at the center of a hyperbolic component of the Mandelbrot set for $P_c$ (that is, the critical point is a fixed point). For $n\geq 2$, Devaney and colleagues study the subfamily with $c=0$, ``McMullen maps", in papers such as \cite{dhalo} and \cite{devsurv}. Our goal in this article is to generalize to the case $c\neq 0$ their result establishing the location of $n-1$ homeomorphic copies of the Mandelbrot set in the boundedness locus in the $a$-parameter plane of $R_{n,a,0}$ (see Figure \ref{Mandelbrotone} (right) for an example). In \cite{dhalo} and \cite{jangso} the authors find $n$ homeomorphic copies of Mandelbrot sets for a different generalization of McMullen Maps, $z \mapsto z^n + \dfrac{a}{z^d}$. We note that in \cite{xiaoqiu}, Xiao, Qiu, and Yongchen establish a topological description of the Julia sets (and Fatou components) of $R_{n,a,c}$ according to the dynamical behavior of the orbits of its free critical points. This work includes a result that if there is a critical component of the filled Julia set which is periodic, then the Julia set consists of infinitely many homeomorphic copies of a quadratic Julia set, and uncountably many points. In order to find baby Mandelbrot sets in our parameter planes of interest, we will first locate baby Julia sets, but using different techniques (based on specific parameter ranges rather than the type of dynamical behavior). Here, we consider the case where $c \neq 0$ (but $n=d$), and find homeomorphic copies of the Mandelbrot set in both the $a$ and $c$-parameter planes of $R_{n,a,c}$. Our main results are as follows. \begin{main} \label{Main_Theorem_APlane} For the set of $n$ and $c$ values below, the boundedness locus in the $a$-parameter plane of $R_{n,a,c}$ contains a homeomorphic copy of the Mandelbrot set in the subset $\mathbf{W}_{n,c}$: \begin{enumerate} \item[(i)] $n\geq 3$ and $-1 \leq c \leq 0$; \item[(ii)] odd $n \geq 3$ and $0 \leq c \leq 1$. \end{enumerate} \end{main} Item (i) is established in Theorem \ref{V+Mandel_APlane_Cnegative_theorem}, Item (ii) is shown in Corollary \ref{APlane_V-_Corollary}. See Equation~\ref{eqn:defnW} for the definition of the set $\mathbf{W}_{n,c}$. \begin{main} \label{Main_Theorem_CPlane} For the set of $n$ and $a$ values below, the boundedness locus in the $c$-parameter plane of $R_{n,a,c}$ contains one or more homeomorphic copies of the Mandelbrot set, as follows. \begin{enumerate} \item[(i)] For $n\geq 5$ and $1 \leq a \leq 4$, there are $n$ baby Mandelbrot sets, one in each subset $\mathbf{W}_{n,a}k$ for $k \in \lbrace 0,1,...,n-1 \rbrace$; \\ if $n$ is odd there are at least $2n$, one within each $\mathbf{W}_{n,a}k$ and one within its reflection over the imaginary axis; \item[(ii)] For $n \geq 11$ and $\frac{1}{10} \leq a \leq 1$, there is a baby Mandelbrot set in $\mathcal{W}_{n,a}$; if $n$ is odd there are at least two. \end{enumerate} \end{main} Item (i) is established in Theorems~\ref{v+_Mandels_exist_multiple_cplane} and \ref{v-_Mandels_exist_multiple_through_symmetry_theorem}. See Equation~\ref{eqn:defnWck} for the definition of $\mathbf{W}_{n,a}k$. Item (ii) is shown in Theorem~\ref{Tighter_radius_mandel_exists_in_cPlane_Theorem} and Corollariy~\ref{v-_mandel_exists_in_cPlane_all_aValues_corollary}. See Equation~\ref{eqn:defnWp} for the definition of $\mathcal{W}_{n,a}$. To establish these results, we will take advantage of the many symmetries present in the family $R_{n,a,c}$. Our proof will follow the same general outline as in the case of $c=0$, but some additional complexities must be dealt with when $c\neq0$; for instance, there are multiple critical orbits to track. We now discuss how the parameter planes of $R_{n,a,c}$ are drawn. With multiple critical orbits, it is more complicated than drawing $\mathcal{M}$ of $P_c$. To draw an $a$(or $c$)-parameter plane of $R_{n,a,c}$ we first fix a value of $n$ and $c$ (or $a$). Then using each critical orbit, we color every point in the picture of the parameter plane as follows. First we assign a color (preferably unique) to each critical orbit. Since there are two critical orbits here, $v_+$ and $v_-$, we assign green and purple, respectively. For each parameter value in the picture we test both critical orbits for boundedness and assign a color for each orbit. If the critical orbit is bounded we assign black. Else, if it escapes we assign that critical orbit's unique color, shaded based on rate of escape as is typical; that is, the shade of the color depends on the number of iterations it took for the orbit to escape a pre-defined escape radius. Once the testing is complete, each parameter value has two RGB colors values assigned. The computer will then \emph{average} the two values at each point, resulting in a single assigned color for that parameter value. Therefore a parameter value with both critical orbits bounded will be colored black; a parameter with the critical orbit of $v_+$ bounded while $v_-$ escapes is colored dark purple and vice-versa is colored dark green; a parameter with both critical orbits escaping will be colored with the RGB average of the two colors. Note purple and green average to gray, and the colors only truly average if the rate of escapes match - if one escapes more slowly, that color is more intense, so it shades the grey toward purple or green. Figures \ref{aPlane_Mandelbrot_Example_Figure} and \ref{cPlane_Multiple_mandelbrot_example} give examples of this coloring scheme used to draw the $a$ and $c$-parameter planes, respectively. \begin{figure} \caption{\label{aPlane_Mandelbrot_Example_Figure} \label{aPlane_Mandelbrot_Example_Figure} \end{figure} \begin{figure} \caption{\label{cPlane_Multiple_mandelbrot_example} \label{cPlane_Multiple_mandelbrot_example} \end{figure} We close this introduction by previewing the organizaton of the sections. In Section \ref{preliminaries_section} we provide some background information, including Douady and Hubbard's criteria to prove existence of a Mandelbrot set in a region in parameter space, as well as some basic properties of the family $R_{n,a,c}$. Section \ref{Main_One_Section} contains the main body of work needed to prove Main Theorem \ref{Main_Theorem_APlane}, in the $a$-plane. In Section \ref{Main_Two_Section} we turn to the $c$-plane and provide the proof of Main Theorem \ref{Main_Theorem_CPlane}-(i). Finally, in section \ref{Extend_Results_Section} we remain in the $c$-plane but since $a=0$ is a degenerate case, we push toward results for smaller $a$-values - and prove Main Theorem \ref{Main_Theorem_CPlane}-(ii), and provide some additional results about situations in which baby Mandelbrot sets overlap. \section{Preliminaries} \label{preliminaries_section} \begin{notation} The Mandelbrot set will be denoted throughout by $\mathcal{M}$, and we refer to a homeomorphic copy of $\mathcal{M}$ as a \textbf{baby $\mathcal{M}$}. \end{notation} To establish the existence of baby $\mathcal{M}$'s in a region in a parameter plane, we will use the definition of a polynomial-like map given by Douady and Hubbard: \begin{definition} \label{Polynomial-like_definition} \cite{douhub} A map $F: \mathbf{U}' \rightarrow F(\mathbf{U}')=\mathbf{U}$ is \textbf{polynomial-like} if \begin{itemize} \item $\mathbf{U}'$ and $\mathbf{U}$ are bounded, open, simply connected subsets of $\mathbb{C}$, \item $\mathbf{U}'$ relatively compact in $\mathbf{U}$, \item $F$ is analytic and proper. \end{itemize} Further $F$ is polynomial-like of \textbf{degree two} if $F$ is a $2$-to-$1$ map except at finitely many points, and $\mathbf{U}'$ contains a unique critical point of $F$. The \textbf{filled Julia set of a polynomial-like map} is the set of points whose orbits remain in $\mathbf{U}'$: $\left\{z \in \mathbf{U}' ~ \middle| ~ F^k(z) \in \mathbf{U}', \forall k \in \mathbb{N} \right\}$. \end{definition} For a map satisfying this Definition \ref{Polynomial-like_definition}, Douady and Hubbard showed the following: \begin{theorem} \label{DH_Ploynomial-Like_theorem} \cite{douhub} A polynomial-like map of degree two is topologically conjugate on its filled Julia set to a quadratic polynomial on that polynomial's filled Julia set. \end{theorem} We will use this result later to locate homeomorphic copies of the filled Julia sets of $P_c$ in some particular dynamical planes of $R_{n,a,c}$. Douady and Hubbard provided criteria under which a family of polynomial-like functions possesses a baby $\mathcal{M}$ in a region $W$: \begin{theorem} \label{DH_Mandelbrot_existence_Criterion_Theorem} Assume we are given a family of polynomial-like maps $F_\lambda: \mathbf{U}'_\lambda \rightarrow \mathbf{U}_\lambda$ that satisfies the following: \begin{itemize} \item $\lambda$ is in an open set in $\mathbb{C}$ which contains a closed disk $W$; \item The boundaries of $\mathbf{U}'_\lambda$ and $\mathbf{U}_\lambda$ vary analytically as $\lambda$ varies; \item The map $(\lambda, z) \mapsto F_\lambda(z)$ depends analytically on both $\lambda$ and $z$; \item Each $F_\lambda$ is polynomial-like of degree two with a unique critical point $c_\lambda$ in $\mathbf{U}'$. \end{itemize} Suppose for all $\lambda \in \partial W$ that $F_\lambda(c_\lambda) \in \mathbf{U} - \mathbf{U}'$ and that $F_\lambda(c_\lambda)$ makes a closed loop around the outside of $\mathbf{U}'$ as $\lambda$ winds once around $\partial W$. If all this occurs, then the set of $\lambda$-values for which the orbit of $c_\lambda$ does not escape from $\mathbf{U}'$ is homeomorphic to the Mandelbrot set. \end{theorem} Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem} is key to establishing the location of some baby $\mathcal{M}$'s, like we see in Figures \ref{aPlane_Mandelbrot_Example_Figure} and \ref{cPlane_Multiple_mandelbrot_example}. Because $\infty$ is a super-attracting fixed point of $R_{n,a,c}$, as it is for $P_c$, we can define the {\bf filled Julia set of} $R_{n,a,c}$ as the set of points whose orbits do not escape to $\infty$. One thing to note about $R_{n,a,c}$ is that it has $2n$ critical points, $a^{\frac{1}{2n}}$. Though this could make it difficult to observe all critical orbits, it turns out that each of the critical points map to one of two values, $v_{\pm} = c \pm 2\sqrt{a}$. Thus there are only two free critical orbits no matter the value of $n$. Later we will study the effect of these two critical orbits. We will exploit the following involution symmetry of $R_{n,a,c}$, to not only locate where the Julia set lies, but also to establish some cases in which $R_{n,a,c}$ is polynomial-like. \begin{lemma} \label{involution_prop} $R_{n,c,a}$ is symmetric under the involution map $h_{a}(z)=\dfrac{a^\frac{1}{n}}{z}$. \end{lemma} \begin{proof} $$ R_{n,c,a}(h_{a}(z))=\left(\frac{a^\frac{1}{n}}{z}\right)^n+\dfrac{a}{\left(\frac{a^\frac{1}{n}}{z}\right)^n}+c=\dfrac{a}{z^{n}}+z^{n}+c=R_{n,c,a}(z). $$ \end{proof} This symmetry will be used in both cases of the $a$ and $c$ parameter planes. We will also use the following notation: \begin{notation} $\mathbb{D}(z_0,r)$ represents the disc $\left\{z ~ \middle|~ \lvert z - z_0 \rvert < r \right\}$. \end{notation} \begin{notation} $\mathbb{A}(r,R)$ represents the annulus $\left\{z ~ \middle|~ r < \lvert z \rvert < R \right\}$. \end{notation} \section{The Case of $c$ fixed, $a$ varying} \label{Main_One_Section} In this section we establish Main Theorem~\ref{Main_Theorem_APlane}. Throughout, we will be under the following parameter restrictions: \hangindent=0.7cm \begin{itemize} \item $n\geq 3$, \item $\lvert c \rvert \leq 1$ and $c \in \mathbb{R}$, \item $\dfrac{c^2}{4}\leq ~\lvert a \rvert~\leq \left( 1-\dfrac{c}{2}\right)^2$. \end{itemize} \subsection{Dynamical Plane Results} Within these parameters we will restrict the location of the Julia set of $R_{n,a,c}$ (in Lemma \ref{EscapeAnnulus_lemma_aplaneCase}). After that, we prove $R_{n,a,c}$ is polynomial-like of degree two (in Proposition ~\ref{R_Polynomial_like_on_first_U'_aplane_prop}). First we take advantage of a result from \cite{boydschul}: \begin{lemma} \label{Julia_Set_Restriction_lemma} \cite{boydschul} For any $c \in \mathbb{C}$ and any $a \in \mathbb{C}$, given any $\varepsilonsilon > 0$, there is an $N \geq 2$ such that for all $n \geq N$ the filled Julia set of $R_{n,a,c}$ must lie in $\mathbb{D}(0,1+\varepsilonsilon)$, the disk of radius $1 + \varepsilonsilon$ centered at the origin. \end{lemma} This happens as the orbit of any point outside a radius of $1+\varepsilonsilon$ escapes to $\infty$, thus such a point with this behavior is not in the filled Julia set. We apply this result to our case of restrictions on $n$, $a$, and $c$. \begin{lemma} \label{EscapeRadius_lemma_aplaneCase} For $n \geq 3$, $\lvert c \rvert \leq 1$, and $\dfrac{c^2}{4}\leq ~\lvert a \rvert~\leq \left( 1-\dfrac{c}{2}\right)^2$, the filled Julia set of $R_{n,a,c}$ lies in the closed disk of radius $2$ centered at the origin. \end{lemma} \begin{proof} The proof of Lemma \ref{Julia_Set_Restriction_lemma} in \cite{boydschul} says that if $N$ satisfies $(1+\varepsilonsilon)^N > 3 \text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace$ then for $n \geq N$ we have an escape radius of $1+\varepsilonsilon$. That is, the orbits of values $\lvert z \rvert > 1+\varepsilonsilon$ tend to $\infty$. Setting $\varepsilonsilon=1$, by our constraints on $a$ and $c$, we have: $$ 3\text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace = 3\text{Max} \lbrace 1, \left( 1-\dfrac{c}{2}\right)^2, 1 \rbrace = 3(2.25) = 6.75 $$ for $a$ and $c$ at their greatest moduli. So when we solve this equation for $N$, we find $N > \dfrac{\ln(6.75)}{\ln(2)} \approx 2.75$, thus $n \geq 3$ will satisfy the criterion. Therefore, the orbit of any $\lvert z \rvert > 2$ will escape to $\infty$ under iteration by $R_{n,a,c}$, hence the filled Julia set must lie in $\mathbb{D}(0,2)$. \end{proof} Combining this with Lemma \ref{involution_prop} restricts further the location of the filled Julia set of $R_{n,a,c}$. \begin{lemma} \label{EscapeAnnulus_lemma_aplaneCase} With the same assumptions on $n$, $a$, and $c$ as Lemma \ref{EscapeRadius_lemma_aplaneCase}, the filled Julia set of $R_{n,a,c}$ lies within the annulus $\mathbb{A}\left(\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2} ,~2\right)$. \end{lemma} \begin{proof} Given any $\lvert z \rvert \leq \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}$ and the involution symmetry of Lemma \ref{involution_prop}, then $\lvert R_{n,a,c}(z) \rvert \geq 2$ and thus the orbits of these values also escape to $\infty$. Therefore the filled Julia is a subset of $\mathbb{A} \left( \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}, 2 \right)$. \end{proof} Figure \ref{Julia_set_in_Annulus_Figure} shows a Julia set of $R_{n,a,c}$ lying in this annulus. We see various black shapes appearing in this dynamical plane and will actually prove below that these shapes are homeomorphic copies of a filled Julia set of a $P_{c}$. (The one in the figure appears to be a baby basilica $K_{-1}$ for which the critical value lies in a period two cycle). This occurs because $R_{n,a,c}$ is polynomial-like of degree two on those regions, which we prove in Proposition \ref{R_Polynomial_like_on_first_U'_aplane_prop}. \begin{figure} \caption{\label{Julia_set_in_Annulus_Figure} \label{Julia_set_in_Annulus_Figure} \end{figure} Now we define the region on which we will show $R_{n,a,c}$ is polynomial-like of degree two: \begin{equation} \label{UPrime_Equation_Definition} \boxed{ \mathbf{U}' = \mathbf{U}'_{n,a}=\left\{ z=re^{i\theta} \ \middle| \ \ \frac{\lvert a \rvert^{\frac{1}{n}}}{2}<~r~<2~~and~~\dfrac{\psi-\pi}{2n}<~\theta~<\dfrac{\psi+\pi}{2n} \right\} } \end{equation} where $\psi = \Arg(a)$, and we set \begin{equation} \label{U_Equation_Definition} \boxed{ \mathbf{U} = \mathbf{U}_{n,c,a} = R_{n,c,a}(\mathbf{U}'_{n,a})}~. \end{equation} We see that $\mathbf{U}'$ is slice of $\mathbb{A}\left( \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2},2 \right)$ so it contains a portion the Julia set of $R_{n,a,c}$. $\mathbf{U}'$ also contains exactly one of the critical points of $R_{n,a,c}$, specifically $\lvert a \rvert^{1 / 2n}e^{\psi / 2n}$, since $\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}<~\lvert a \rvert^{1 / 2n}~<2$ is true for $\lvert a \rvert < 2^{2n}$. The range of $a$ we work in is well below that threshold. The argument of the critical point, $\dfrac{\psi}{2n}$, is the midpoint of the angular range of $\mathbf{U}'$. The rest of the critical points of $R_{n,a,c}$ are spread out in intervals of $\dfrac{\pi}{n}$ radians and these don't fall within the angular interval of $\left( \dfrac{\psi - \pi}{2n}~,~\dfrac{\psi + \pi}{2n}\right)$. Thus $\mathbf{U}'$ contains a unique critical point of $R_{n,a,c}$ and we have established one of the criteria of Definition \ref{Polynomial-like_definition}. To satisfy the rest of Definition \ref{Polynomial-like_definition}, we start by describing $\mathbf{U}$ more precisely: \begin{lemma} \label{uhalfellipse_lemma} $\mathbf{U}$ is half an ellipse centered at $c$ and rotated by $\psi /2$. \end{lemma} \begin{proof} Ignoring the restriction on argument, we consider the set $$ \left\{ R_{n,c,a} \left( 2e^{i\theta} \right) \ \middle| \ \ 0\leq \theta \leq 2\pi \right\}. $$ This set contains the image of the outer and inner arcs of $\mathbf{U}'$ by Lemma \ref{involution_prop}. Because we are considering all angles of $\theta$, our set is independent of the starting angle. We can apply an angular shift and the image set will remain the same, thus we instead consider the set $$ \left\{ R_{n,c,a} \left( 2e^{i\left(\theta+\frac{\psi}{2n}\right)} \right) \ \middle| \ \ 0\leq \theta \leq 2\pi \right\}. $$ So \begin{eqnarray} & ~ &R_{n,c,a}\left(2e^{i\left(\theta+\frac{\psi}{2n}\right)}\right) \notag\\ & = & \left(2*\exp \left(i\left(\theta+\frac{\psi}{2n}\right)\right) \right) ^{n}+\frac{a}{\left( 2*\exp \left(i\left(\theta+\frac{\psi}{2n}\right)\right) \right) ^{n}}+c \notag \\ & = & 2^{n}*\exp \left(i\left(n\theta+\frac{\psi}{2}\right)\right)+\frac{\lvert a \rvert e^{i\psi}}{2^{n}*\exp \left(i\left(n\theta+\frac{\psi}{2}\right)\right)}+c\notag \\ & = & 2^{n}*\exp \left(i\left(n\theta+\frac{\psi}{2}\right)\right)+\frac{\lvert a \rvert}{2^{n}}*\exp \left(i\left(\psi - \left(n\theta+\frac{\psi}{2}\right)\right)\right)+c \notag \\ & = & e^{i\frac{\psi}{2}}\left(2^{n}e^{in\theta}+\frac{\lvert a \rvert}{2^{n}}e^{-in\theta}\right)+c \notag \\ & = & e^{i\frac{\psi}{2}}\left(2^{n}\left(\cos\left( n\theta \right)+i\sin \left(n\theta \right) \right)+\frac{\lvert a \rvert}{2^{n}}\left( \cos \left(n\theta \right)-i\sin \left(n\theta \right)\right)\right)+c \notag \\ \label{ellipse} & = & e^{i\frac{\psi}{2}} \left( \left(2^{n}+\frac{\lvert a \rvert}{2^{n}}\right)\cos(n\theta)+i\left(2^{n}-\frac{\lvert a \rvert}{2^{n}}\right)\sin(n\theta) \right) + c. \end{eqnarray} Note the above is of the form $e^{i\frac{\psi}{2}} \left( x + iy \right) + c,~\text{where:}$ \begin{equation} \label{paraellipse} \begin{array}{lcl} & x= & \left(2^{n}+\frac{\lvert a \rvert}{2^{n}}\right)\cos(n\theta) \\ & y= & \left(2^{n}-\frac{\lvert a \rvert}{2^{n}}\right)\sin(n\theta). \end{array} \end{equation} Compare this to the parametric equation of an ellipse centered at the origin: \begin{eqnarray*} & x= & b\cos(\phi) \\ & y= & d\sin(\phi) \end{eqnarray*} where $0\leq \phi \leq 2\pi$, $b$ is half the length of the major axis and $d$ is half the length of the minor axis. These axes lie respectively on the real and imaginary axes of the complex plane. Thus the equation set \eqref{paraellipse} is an ellipse centered at the origin with a major axis length of $2\left( 2^{n}+\frac{\lvert a \rvert}{2^{n}}\right)$, and a minor axis length $2\left( 2^{n}-\frac{\lvert a \rvert}{2^{n}}\right)$ that wraps around $n$ times. Going back to \eqref{ellipse} we find our image set to be the ellipse described above, rotated by $\psi /2$ and centered at $c$. By our independence of starting angle, this gives us equality to the first set described, and $\left\{ R_{n,c,a}(2e^{i\theta}) \ \middle| \ \ 0\leq \theta \leq 2\pi \right\}$ is this exact ellipse as well. \textbf{Hence we define the ellipse $\mathcal{E}$ by Equation \eqref{ellipse}}. Now we look at the image of the rays $\displaystyle re^{i\frac{\psi \pm \pi}{2n}}$ for $\frac{\lvert a \rvert^{\frac{1}{n}}}{2}<~r~<2$. \begin{eqnarray*} R_{n,c,a} \left(re^{i\frac{\psi \pm \pi}{2n}} \right) & = & \left( r*\exp\left(i\frac{\psi \pm \pi}{2n}\right) \right) ^{n}+\frac{a}{\left( r*\exp\left(i\frac{\psi \pm \pi}{2n}\right) \right) ^{n}}+c \\ & = & r^{n}*\exp\left(i\frac{\psi \pm \pi}{2}\right)+\frac{\lvert a \rvert e^{i\psi}}{r^{n}*\exp\left(i\frac{\psi \pm \pi}{2}\right)}+c \\ & = & r^{n}*\exp\left(i\frac{\psi \pm \pi}{2}\right)+\frac{\lvert a \rvert}{r^{n}}*\exp\left(i\frac{\psi \mp \pi}{2}\right)+c \\ & = & \exp \left(i\frac{\psi}{2}\right) \left(r^{n}*\exp\left(\pm i\frac{\pi}{2}\right)+\frac{\lvert a \rvert}{r^{n}}*\exp\left(\mp i\frac{\pi}{2}\right)\right)+c \\ & = & \pm e^{i\frac{\psi}{2}}\left(r^{n} - \frac{\lvert a \rvert}{r^{n}}\right)i+c. \end{eqnarray*} This is a line segment on the imaginary axis from $-\left(2^{n} - \frac{\lvert a \rvert}{2^{n}}\right)i$ to \\ $\left(2^{n} - \frac{\lvert a \rvert}{2^{n}}\right)i$, rotated by $\psi /2$, then shifted by $c$. In fact, this is actually the minor axis of $\mathcal{E}$. Finally we investigate the original restriction of $\theta$, $$ \dfrac{\psi-\pi}{2n}<~\theta~<\dfrac{\psi+\pi}{2n}, $$ and find that \eqref{paraellipse} gives us $$ \dfrac{\psi}{2}-\dfrac{\pi}{2} < n\theta < \dfrac{\psi}{2}+\dfrac{\pi}{2}. $$ We see the angular range is $\pi$ radians in size, hence yielding half an ellipse. Combine this curve with the minor axis of $\mathcal{E}$ (the image of the rays) and we have that $\mathbf{U}$ is a half ellipse rotated by $\psi/2$ and centered at $c$. (See Figure \ref{Ellipse_Figure}) \end{proof} \begin{figure} \caption{A sketch of $\mathbf{U} \label{Ellipse_Figure} \end{figure} It turns out that the foci of $\mathcal{E}$ are values of importance, in fact, they are the critical values of the map $R_{n,c,a}$. \begin{lemma} \label{Ellipse_Foci_Are_CritValues_Lemma} $v_{\pm}$ are the foci of $\mathcal{E}$. \end{lemma} \begin{proof} For any ellipse, the foci lie on the major axis. The square of the distance of a focal point from the center is equal to difference of the squares of half the major and minor axis lengths. So we get: $$ \sqrt{\left(2^n + \dfrac{a}{2^n} \right)^2 - \left(2^n - \dfrac{a}{2^n} \right)^2}= \sqrt{4a} = 2\sqrt{a}. $$ Since the center of $\mathcal{E}$ is $c$, the foci of $\mathcal{E}$ must be $c \pm 2\sqrt{a} = v_{\pm}$. \end{proof} Using Lemma \ref{Ellipse_Foci_Are_CritValues_Lemma} we can describe $\mathcal{E}$ via another equation, \begin{equation} \label{Ellipse_Equation_Second_Definition} \boxed{ \mathcal{E} = \left\{ z \ \middle| ~ \lvert z-v_- \rvert + \lvert z-v_+ \rvert = 2^{n+1} + \frac{\lvert a \rvert}{2^{n-1}} \right\} }~. \end{equation} Being able to describe $\mathcal{E}$ as \eqref{Ellipse_Equation_Second_Definition} helps in the proof of our next lemma. Now we begin to satisfy more criteria of Definition \ref{Polynomial-like_definition}. \begin{lemma} \label{u'inellipse} Given $n \geq 3$, $\dfrac{c^2}{4}\leq ~\lvert a \rvert~\leq \left( 1-\dfrac{c}{2}\right)^2$, and $\lvert c \rvert \leq 1$ then $\mathbf{U}' \subseteq \mathcal{E}$. \end{lemma} \begin{proof} Since $\mathbf{U}' \subseteq \overline{\mathbb{D}(0,2)}$ we will assume $\lvert z \rvert \leq 2$ and prove $\overline{\mathbb{D}(0,2)} \subseteq \mathcal{E}$, and thus $\mathbf{U}' \subseteq \mathcal{E}$. So \begin{eqnarray*} & ~ & \lvert z-v_- \rvert + \lvert z-v_+ \rvert \\ & = & \lvert z-(c-2\sqrt{a}) \rvert + \lvert z-(c+2\sqrt{a}) \rvert \\ & \leq & 2\lvert z \rvert + 2\lvert c \rvert +4\sqrt{\lvert a \rvert} \\ & \leq & 2(2) + 2(1) + 4\sqrt{\left( 1-\frac{c}{2}\right)^2} \\ & \leq & 4 + 2 + 4(1.5) \\ & < & 16 \\ (since ~ n \geq 3)& \leq & 2^{n+1} ~ \leq ~ 2^{n+1} + \frac{\lvert a \rvert}{2^{n-1}}. \end{eqnarray*} By Lemma \ref{Ellipse_Foci_Are_CritValues_Lemma} and the description of $\mathcal{E}$ in Equation \eqref{Ellipse_Equation_Second_Definition}, these inequalities yield that $\mathbf{U}' \subseteq \mathcal{E}$ under the restrictions on $\lvert a \rvert$ and $\lvert c \rvert$. \end{proof} Having $\mathbf{U}'$ contained in $\mathcal{E}$ is helpful, but we need to restrict further to one half of $\mathcal{E}$. The critical point in $\mathbf{U}'$ maps to $v_+$ which is in the right half of $\mathcal{E}$. We will restrict the argument of $a$ to $\lvert \psi \rvert \leq \frac{\pi}{n-1}$ which is bounded by $\frac{\pi}{2}$ for $n \geq 3$. Since $\mathcal{E}$ is a horizontal ellipse rotated by $\frac{\psi}{2}$, then under this restriction $\mathcal{E}$ is rotated by $\frac{\pi}{4}$ at most. In our next lemma we give criteria for which the minor axis of $\mathcal{E}$ does not intersect $\mathbf{U}'$ and at worst intersects $\partial \overline{\mathbf{U}'}$ on the left side. This will then give us that $\mathbf{U}' \subset \mathbf{U}$. \begin{lemma} \label{noaxisintercept} For $n \geq 3$ and real $c < \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$, the minor axis of $\mathcal{E}$ does not intersect $\mathbf{U}'$. \end{lemma} \begin{proof} Keeping the restriction to the argument of $a$, $\lvert \psi \rvert \leq \frac{\pi}{2}$, we determine the value of $c$ for which the minor axis intersects $\mathbf{U}'$ when $\psi = \pm \frac{\pi}{2}$. Based on Figure \ref{Ellipse_intersecting_UPrime_Figure}, if we start with $\psi = \frac{\pi}{2}$ then the minor axis will be rotated by $\frac{\pi}{4}$ with respect to the imaginary-axis and then shifted by $c$. We shall determine the value of $c$ for which the minor axis hits the lower left vertex of $\mathbf{U}'$ at modulus $\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}$ and argument $\dfrac{\frac{\pi}{2}-\pi}{2n} ~ = ~ -\dfrac{\pi}{4n}$. \begin{figure} \caption{The minor axis of $\mathcal{E} \label{Ellipse_intersecting_UPrime_Figure} \end{figure} The coordinates of this intersection point are\\ {$P_2={\left( \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\cos\left( \dfrac{\pi}{4n}\right) ~ , ~ -\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\sin\left( \dfrac{\pi}{4n}\right) \right)}$}. With a closer view, we have a right triangle as shown on the right of Figure \ref{Ellipse_intersecting_UPrime_Figure} and because $\frac{\psi}{2} = \frac{\pi}{4}$, both legs of the triangle are equal length. The coordinates of the other two points of the triangle are \begin{itemize} \item $P_0=\left(0 ,c \right)$, \item $P_1=\left( \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\cos\left( \dfrac{\pi}{4n}\right) ~ , ~0 \right)$. \end{itemize} The length of the vertical leg is the absolute value of the imaginary component of $P_2$. The length of the horizontal leg is the difference between the real components of $P_0$ and $P_1$. We set these values equal to each other, $$ \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\sin\left( \dfrac{\pi}{4n}\right) ~ = ~ \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\cos\left( \dfrac{\pi}{4n}\right) - c,\\ $$ then solve for $c$ in terms of $a$: \begin{eqnarray*} c & = & \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\left( \cos\left( \dfrac{\pi}{4n}\right) - \sin\left( \dfrac{\pi}{4n}\right) \right)\\ & \geq & \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\left( \cos\left( \dfrac{\pi}{12}\right) - \sin\left( \dfrac{\pi}{12}\right) \right) ~(\text{Since}~n~\geq~3)\\ & = & \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}. \end{eqnarray*} Thus the smallest $c$ value for which the minor axis of $\mathcal{E}$ intersects $\mathbf{U}'$ is $c = \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$, so choosing $-1 \leq c < \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$ yields $\partial \mathbf{U}' \cap \partial \mathbf{U} = \emptyset$. If we go to the other extreme and let $\psi = -\frac{\pi}{2}$, the work will be just the same. Here our intersection point is now $\left( \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\cos\left( \dfrac{\pi}{4n}\right) ~ , ~ \dfrac{\lvert a \rvert^{\frac{1}{n}}}{2}\sin\left( \dfrac{\pi}{4n}\right) \right)$, and the triangle is just a reflection of the previous case across the real axis. Therefore our result is the same as above. Finally, we argue that for $c=\dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$, the minor axis will only touch the corners of $\mathbf{U}'$ at $\psi = \pm \frac{\pi}{2}$ and there will be no intersection for any $\psi$ or $c$ value smaller than these bounds. \begin{figure} \caption{\label{fig:axisnotintersect} \label{fig:axisnotintersect} \end{figure} When we change $\psi$, the overall change in angle of the minor axis will be larger than the overall change in angle of the rays of $\mathbf{U}'$. Starting at $\psi = -\frac{\pi}{2}$ and increasing by some value $\gamma$, we find the change in angle of the minor axis to be $$ \dfrac{\psi + \gamma}{2}-\dfrac{\psi}{2}=\dfrac{\gamma}{2} $$ and the change in angle of the upper ray of $\mathbf{U}'$ to be $$ \dfrac{(\psi + \gamma)+\pi}{2n}-\dfrac{\psi + \pi}{2n}=\dfrac{\gamma}{2n}. $$ Since $n \geq 3$, then $\frac{\gamma}{2} > \frac{\gamma}{2n}$ and the minor axis won't touch $\mathbf{U}'$ again until it goes too far and hits the ``lower left" corner of $\mathbf{U}'$. As shown above though, this won't occur until $\psi = \frac{\pi}{2}$.\\ Therefore for $c ~ < ~ \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$ we find the minor axis of $\mathcal{E}$ will not intersect $\mathbf{U}'$. \end{proof} Figure \ref{UPrime_Subset_U_Graphic} shows one example of $\mathbf{U}' \subset \mathbf{U}$ (there is nothing particularly special about these parameter values, they are merely round numbers which satisfy $\mathbf{U}' \subset \mathbf{U}$ ). \begin{figure} \caption{An example of $\mathbf{U} \label{UPrime_Subset_U_Graphic} \end{figure} Now we can show that $R_{n,a,c}$ is polynomial-like on $\mathbf{U}'$. \begin{proposition} \label{R_Polynomial_like_on_first_U'_aplane_prop} $R_{n,a,c}: \mathbf{U}' \rightarrow \mathbf{U}$ is polynomial-like of degree two when $n \geq 3$, $\dfrac{c^2}{4}\leq ~\lvert a \rvert~\leq \left( 1-\dfrac{c}{2}\right)^2$, and $-1 \leq c \leq 0$. \end{proposition} \begin{proof} Both $\mathbf{U}'$ and $\mathbf{U}$ are bounded, open, and simply connected. Combining the results from Lemma \ref{u'inellipse} and Lemma \ref{noaxisintercept} with the restriction that $-1 \leq c \leq 0 < \dfrac{\lvert a \rvert ^{\frac{1}{n}}}{2\sqrt{2}}$ yields that $\mathbf{U}'$ is relatively compact in $\mathbf{U}$. Also, Lemma \ref{involution_prop} plus the fact that $\mathbf{U}'$ contains a unique critical point yields that $R_{n,a,c}$ is a two-to-one map on $\mathbf{U}'$. Last, $R_{n,a,c}$ is analytic on $\mathbf{U}'$ because $a \neq 0$ and $\mathbf{U}'$ does not contain the origin. Therefore $R_{n,a,c}$ satisfies Definition \ref{Polynomial-like_definition} and is polynomial-like of degree two. \end{proof} Now because of the polynomial-like behavior of $R_{n,a,c}$, we see the reason for baby quadratic Julia sets appearing in the dynamical plane of $R_{n,a,c}$. \begin{corollary} \label{Baby_Julia_Corollary_First} With the same assumptions on $n$, $a$, and $c$ as in Proposition \ref{R_Polynomial_like_on_first_U'_aplane_prop}, the collection of points in $\mathbf{U}'$ whose orbits do not escape $\mathbf{U}'$ is homeomorphic to the filled Julia set of a quadratic polynomial. \end{corollary} \begin{proof} Having established that $R_{n,a,c}$ is polynomial-like of degree two in Proposition \ref{R_Polynomial_like_on_first_U'_aplane_prop}, by Theorem \ref{DH_Ploynomial-Like_theorem}{} $R_{n,a,c}$ is topologically conjugate to some quadratic polynomial on that polynomial's filled Julia set. This polynomial's Julia set is the baby Julia set we find in the dynamical plane of $R_{n,a,c}$. \end{proof} This confirms that the five obvious black shapes in Figure \ref{Julia_set_in_Annulus_Figure} are baby Julia sets occurring in the dynamical plane of $R_{n,a,c}$. \subsection{Parameter Plane Results: $a$-plane} Now we turn to locating a homeomorphic copy of $\mathcal{M}$ in the boundedness locus in the $a$-parameter plane of $R_{n,a,c}$. We need to show we satisfy the criteria of Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem}, so we first define the set $\mathbf{W}$ mentioned in its hypothesis: \begin{equation} \boxed{ \mathbf{W}_{n,c} = \left\{ a \ \middle| \ \ \frac{c^2}{4}\leq ~\lvert a \rvert~\leq \left( 1-\frac{c}{2}\right)^2~~and~~\lvert \psi \rvert \leq \frac{\pi}{n-1} \right\}}~. \label{eqn:defnW} \end{equation} Remember that $\psi=\Arg(a)$. Now we show how the other parts of the hypotheses from Theorem~\ref{DH_Mandelbrot_existence_Criterion_Theorem} are satisfied. \begin{proposition} \label{allnvplusloopW} For $n \geq 3$ and $-1 \leq c \leq 0$, as $a$ travels around $\partial \mathbf{W}_{n,c}$, $v_+$ loops around $\mathbf{U} - \mathbf{U}'$. \end{proposition} \begin{proof} We start with $\lvert a \rvert = \dfrac{c^2}{4}$ on the inside arc of $\mathbf{W}_{n,c}$. Here $$ Re(v_+)=Re\left( c+2\sqrt{a} \right) = c+2\sqrt{\lvert a \rvert}\cos\left( \dfrac{\psi}{2} \right) = c+\lvert c \rvert \cos \left( \dfrac{\psi}{2} \right) \leq c+ \lvert c \rvert = 0 $$ since $c \leq 0$. This means $v_+$ never lies in the right half plane. The inside arc of $\mathbf{U}'$ is always in the right half plane because $$ \lvert \Arg(z \in \mathbf{U}') \rvert \leq \left| \dfrac{\pm \frac{\pi}{n-1}\pm \pi}{2n} \right| \leq \dfrac{\pi}{2(n-1)} < \dfrac{\pi}{2} $$ for $\lvert \Arg(a) \rvert \leq \frac{\pi}{n-1}$ and $n \geq 3$. This means that $v_+$ lies to the left of $\mathbf{U}'$ when $a$ is on the inside arc of $\mathbf{W}_{n,c}$. Now for $a$ on the upper ray of $\mathbf{W}_{n,c}$,~ $\Arg(a)=\frac{\pi}{n-1}$. Thus for any $z \in \mathbf{U}'$, $$ \Arg(z) \leq \frac{\pi}{2(n-1)}. $$ Because $c$ is real and non-positive, we find \begin{eqnarray*} \Arg(v_+) & = & \Arg(c+2\sqrt{a})\\ & \geq & \Arg(2\sqrt{a}) \\ & = & \dfrac{\psi}{2} \\ & = & \dfrac{1}{2}\dfrac{\pi}{n-1} \\ & \geq & \Arg(z \in \mathbf{U}'). \end{eqnarray*} So at worst $v_+$ touches the upper ray of $\mathbf{U}'$ when $a$ is on the upper ray of $\mathbf{W}_{n,c}$. This is permissible as $\mathbf{U}'$ is open. When we reach the outer arc of $\mathbf{W}_{n,c}$ where $\lvert a \rvert = \left( 1-\frac{c}{2}\right)^2$, we find that $\lvert v_+ \rvert = \lvert c+2\sqrt{a} \rvert $ $$ = \sqrt{c^2 + 4\lvert a \rvert + 4c\sqrt{\lvert a \rvert}\cos \left(\dfrac{\psi}{2} \right)} = \sqrt{4-4c+2c^2-2c(c-2)\cos \left(\dfrac{\psi}{2} \right)}. $$ To find the minimum of this, we take a derivative with respect to $\psi$ and get $$ \dfrac{c(c-2)\sin\left( \frac{\psi}{2} \right)}{2\sqrt{4-4c+2c^2-2c(c-2)\cos \left( \frac{\psi}{2} \right)}}, $$ and the modulus has a critical point at $\psi=0$. Using the second derivative test we find a minimum occurs at $\psi=0$, and evaluating the modulus at $\psi = 0$ gives $$ \lvert v_+ \rvert = \sqrt{4-4c+2c^2-2c(c-2)(1)}=2. $$ Therefore, at worst $v_+$ just touches the outer boundary arc of $\mathbf{U}'$ as $a$ travels along the outer arc of $\mathbf{W}_{n,c}$ (still permissible with $\mathbf{U}'$ open). Now for $a$ on the lower ray, $\Arg(a)=-\frac{\pi}{n-1}$. Using this and the fact that $\Arg(c)=-\pi$, we find $$ \Arg(z \in \mathbf{U}') > -\dfrac{1}{2} \dfrac{\pi}{n-1} = \Arg(2\sqrt{a}) \geq \Arg(c+2\sqrt{a}). $$ So $\Arg(z \in \mathbf{U}') > \Arg(v_+)$ for all $a$ on the lower ray of $\mathbf{W}_{n,c}$ and $v_+$ lies below $\mathbf{U}'$. Therefore as $a$ comes back to its starting position in $\partial \mathbf{W}_{n,c}$, $v_+$ has finished a closed loop around the outside of $\mathbf{U}'$. \end{proof} With Propositions \ref{R_Polynomial_like_on_first_U'_aplane_prop} and \ref{allnvplusloopW} we satisfy the necessary conditions of Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem} for the existence of a baby $\mathcal{M}$ lying in $\mathbf{W}_{n,c}$, in a subset of the boundedness locus. \begin{theorem} \label{V+Mandel_APlane_Cnegative_theorem} For $n \geq 3$ and $-1 \leq c \leq 0$, the set of $a$-values within $\mathbf{W}_{n,c}$ for which the orbit of $v_+$ does not escape $\mathbf{U}'$ is homeomorphic to $\mathcal{M}$. \end{theorem} See Figure \ref{baby_mandel_in_W_Figure} for an example of such a baby $\mathcal{M}$. Theorem \ref{V+Mandel_APlane_Cnegative_theorem} is part of Main Theorem \ref{Main_Theorem_APlane}. We will finish Main Theorem \ref{Main_Theorem_APlane} by taking advantage of some symmetries in the family $R_{n,a,c}$. \begin{figure} \caption{A baby $\mathcal{M} \label{baby_mandel_in_W_Figure} \end{figure} \begin{lemma} \label{symmetry_through_negz_negc} For $n$ odd, $R^{m}_{n,a,-c}(-z) = -R^{m}_{n,a,c}(z)$ for all $m \in \mathbb{N}$. \end{lemma} \begin{proof} We prove this inductively, so let the base case be the first iterate of $R_{n,a,c}$: \begin{eqnarray*} R_{n,a,-c}(-z)&=& (-z)^n + \dfrac{a}{(-z)^n} - c\\ &=& -z^n - \dfrac{a}{z^n} - c \hspace{1cm} \text{(Since n is odd)}\\ &=& -\left( z^n + \dfrac{a}{z^n} + c \right)\\ &=& -R_{n,a,c}(z).\\ \end{eqnarray*} Now assuming the hypothesis is true for $m-1$, $R^{m-1}_{n,a,-c}(-z) = -R^{m-1}_{n,a,c}(z)$, then \begin{eqnarray*} R^{m}_{n,a,-c}(-z)&=& R^{m-1}_{n,a,-c}(R_{n,a,-c}(-z))\\ &=& R^{m-1}_{n,a,-c}(-(R_{n,a,c}(z))) \hspace{1cm} \text{(from the base case)}\\ &=& -R^{m-1}_{n,a,c}(R_{n,a,c}(z))\hspace{1cm} \text{(from the inductive assumption)}\\ &=& -R^{m}_{n,a,c}(z).\\ \end{eqnarray*} And this result holds for all iterates of $R_{n,a,c}$. \end{proof} \begin{lemma} \label{Crit_Orbits_Symm_Through_NegC_Lemma_APlane} For $n$ odd, $R^{m}_{n,a,-c}(v_-) = -R^{m}_{n,a,c}(v_+)$, that is that the behavior of the critical orbits are symmetric through $c$ and $-c$. \end{lemma} \begin{proof} Using Lemma \ref{symmetry_through_negz_negc} we find for every positive integer $m$, \begin{eqnarray*} R_{n,a,-c}^m(v_-) &=& R_{n,a,-c}^m(-c-2\sqrt{a})\\ &=& R_{n,a,-c}^m(-(c+2\sqrt{a})) = -R_{n,a,c}^m(c+2\sqrt{a}) = -R_{n,a,c}^m(v_+) \end{eqnarray*} and the critical orbits are symmetric about $c$ and $-c$. \end{proof} Lemma \ref{Crit_Orbits_Symm_Through_NegC_Lemma_APlane} says that the boundedness locus in the $a$-parameter plane for $c$ and $-c$ are the same when $n$ is odd. With this we can combine our results to gain existence of another baby $\mathcal{M}$. \begin{corollary} \label{APlane_V-_Corollary} For odd $n \geq 3$ and $0 \leq c \leq 1$, the set of $a$-values within $\mathbf{W}_{n,c}$ for which the orbit of $v_-$ does not escape $\mathbf{U}'$ is homeomorphic to $\mathcal{M}$. \end{corollary} \begin{proof} The proof of this comes from the existence of the $v_+$ baby $\mathcal{M}$ in Theorem \ref{V+Mandel_APlane_Cnegative_theorem} and the symmetry of the critical orbits in Lemma \ref{Crit_Orbits_Symm_Through_NegC_Lemma_APlane}. \end{proof} Figure \ref{aPlane_n5_vMinus_Mandel_Figure} shows an example $a$-plane of $R_{n,a,c}$ showing the existence of this $v_-$ baby $\mathcal{M}$ in the same spot that a $v_+$ baby $\mathcal{M}$ would be promised by the symmetry. This result finishes Main Theorem \ref{Main_Theorem_APlane}. \begin{figure} \caption{\label{aPlane_n5_vMinus_Mandel_Figure} \label{aPlane_n5_vMinus_Mandel_Figure} \end{figure} \section{The case of $a$ fixed, $c$ varying} \label{Main_Two_Section} In this section, our goal is to establish Main Theorem \ref{Main_Theorem_CPlane}, item (i). We work under the following assumptions: \begin{itemize} \item $n \geq 5$, \item $1 \leq a \leq 4$, \item $c$ chosen such that $\lvert v_+ \rvert \leq 2$. \end{itemize} \subsection{Dynamical Plane Results} As with the case of $c$ fixed and $a$ varying, we start with some results about the dynamical plane under these new restrictions, and use the result from \cite{boydschul} to restrict where the Julia set of $R_{n,a,c}$ may lie under these new conditions. \begin{lemma} \label{EscapeRadius_lemma_cplaneCase} If $n \geq 5$, $1 \leq a \leq 4$, and $c$ is chosen such that $\lvert v_+ \rvert \leq 2$, then the filled Julia set of $R_{n,a,c}$ lies in $\mathbb{D}(0,2)$. \end{lemma} \begin{proof} Once again, by the proof of Lemma \ref{Julia_Set_Restriction_lemma} in \cite{boydschul} for any $\varepsilonsilon > 0$, if $N$ satisfies $(1+\varepsilonsilon)^N > 3\text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace$ then for $n \geq N$ we have the escape radius of $1+\varepsilonsilon$. That is, the orbits of values $\lvert z \rvert > 1+\varepsilonsilon$ escape to $\infty$. We set $\varepsilonsilon=1$. The largest possible $\lvert c \rvert$ such that $\lvert v_+ \rvert<2$ is $c = -6$, since $\lvert -6+2\sqrt{4} \rvert = 2$. Therefore the modulus of $c$ is bounded by $6$. hence, $$ 3\text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace = 3\text{Max} \lbrace 1, 4, 6 \rbrace = 18. $$ So when we solve $(1+\varepsilonsilon)^N = 2^N > 18$ for $N$ we find $N > \dfrac{\ln(18)}{\ln(2)} \approx 4.17$ and $n \geq 5$ will satisfy the criterion. Because the filled Julia set of $R_{n,a,c}$ is the points whose orbits are bounded, then the Julia set must lie within $\mathbb{D}(0,2)$. \end{proof} Combining this with Lemma \ref{involution_prop} again yields the same annulus as in the $a$-plane case. \begin{lemma} \label{EscapeAnnulus_lemma_cplaneCase} With the same assumptions on $n$, $a$, and $c$ as Lemma \ref{EscapeRadius_lemma_cplaneCase}, the filled Julia set of $R_{n,a,c}$ lies within the annulus $\mathbb{A}\left(\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2} ,~2\right)$. \end{lemma} In this case of $a$ fixed, when observing the $c$-parameter plane we can actually show there exist $2n$ baby $\mathcal{M}$'s. Each baby $\mathcal{M}$ has a unique corresponding $\mathbf{U}'$ that is a rotational copy of our original definition from Equation \eqref{UPrime_Equation_Definition}: \begin{equation} \boxed{ \mathbf{U}'_k = \mathbf{U}'_{n,k,a}=\left\{ z=re^{i\theta} \ \middle| \ \ \frac{\lvert a \rvert^{\frac{1}{n}}}{2}<~r~<2, \ \ \dfrac{4\pi k-\pi}{2n}<~\theta~<\dfrac{4\pi k+\pi}{2n} \right\},} \end{equation} for $k=0,...,n-1$. Note that $\mathbf{U}'_0$ is the same as Equation \eqref{UPrime_Equation_Definition} in the case the $a$ is real, hence $\psi = 0$. Each $k$ represents a different rotational copy inside $\mathbb{A}\left(\dfrac{\lvert a \rvert^{\frac{1}{n}}}{2} ,~2\right)$. Now we define $n$ $\mathbf{W}$ sets in the $c$-parameter plane that will contain baby $\mathcal{M}$s (we will produce $n$ more using symmetry): \begin{equation} \boxed{ \mathbf{W}_{n,a}k = \left\{ c \ \middle| \ \ \dfrac{a^{1/n}}{2} \leq \left| v_+ \right| \leq 2,\ \ \dfrac{4\pi k-\pi}{2n} \leq \Arg(v_+) \leq \dfrac{4\pi k+\pi}{2n} \right\}}. \label{eqn:defnWck} \end{equation} for $k=0,1,..,n-1$. For a fixed $k$, $\mathbf{W}_{n,a}k$ is the set of $c$ values such that $v_+ \in \overline{\mathbf{U}'_k}$. Thus each $\mathbf{W}_{n,a}k$ is associated with a unique $\mathbf{U}'_k$. Conveniently, each $\mathbf{U}'_k$ maps to the same image. \begin{lemma} $R_{n,a,c}(\mathbf{U}'_k)=R_{n,a,c}(\mathbf{U}'_0)$ for all $k$ in $\lbrace 0,1,...,n-1 \rbrace$. \end{lemma} \begin{proof} The image of the outside and inside curves of each $\mathbf{U}'_k$ will still map to the same curve in $\mathbf{U}$ as before by Lemma \ref{involution_prop}, so we examine the image of the rays of $\mathbf{U}'_k$. \begin{eqnarray*} &~& R_{n,c,a}\left( r*\exp \left(i\frac{4\pi k \pm \pi}{2n} \right) \right)\\ & = & \left( r*\exp \left(i\frac{4\pi k \pm \pi}{2n} \right) \right) ^{n}+\frac{a}{\left( r*\exp \left(i\frac{4\pi k \pm \pi}{2n} \right) \right) ^{n}}+c\\ & = & r^{n}*\exp \left(i\frac{4\pi k \pm \pi}{2} \right)+\frac{a}{r^{n}*\exp \left(i\frac{4\pi k \pm \pi}{2} \right)}+c\\ & = & r^{n}*\exp \left(i\frac{4\pi k \pm \pi}{2} \right)+\frac{a}{r^{n}}*\exp \left(i\frac{4\pi k \mp \pi}{2} \right)+c\\ & = & \exp \left(i{2\pi k} \right) \left(r^{n}*\exp \left(\pm i\frac{\pi}{2} \right)+\frac{a}{r^{n}}*\exp \left(\mp i\frac{\pi}{2} \right) \right)+c \\ & = & \pm \left(r^{n} - \frac{a}{r^{n}}\right)i+c. \end{eqnarray*} This being equal to the image of the rays of $\mathbf{U}'_0$ yields our result and $R_{n,a,c}(\mathbf{U}'_k)=R_{n,a,c}(\mathbf{U}'_0)$ for all $k$ in $\lbrace 0,1,...,n-1 \rbrace$. \end{proof} Now we continue the process by showing that $R_{n,a,c}$ is polynomial-like of degree two on each $\mathbf{U}'_k$. First we show that each $\mathbf{U}'_k$ is contained in $\mathcal{E}$. \begin{lemma} \label{Uprime_contained_in_ellipse_afixed_case_lemma} Let $n \geq 5$, $1 \leq a \leq 4$ and $c \in \mathbf{W}_{n,a}k$ for $k=0,1...n-1$. Then $\overline{\mathbf{U}'_k} \subset \mathcal{E}$. \end{lemma} \begin{proof} Since each $\mathbf{U}'_k \subset \mathbb{D}(0,2)$, we just need to prove $\overline {\mathbb{D}(0,2)} \subset \mathcal{E}$ under these restrictions of parameters. Since $c$ lies in some $\mathbf{W}_{n,a}k$, we have $v_+ \in \mathbf{U}'_k$. This means $v_+$ has a maximum modulus of 2 so we start with the equation $$ c+2\sqrt{a}=2e^{i\theta},~\theta \in [0,2\pi). $$ Solving for $c$ gets us $c=2e^{i\theta}-2\sqrt{a}$ and we attain bounds for $v_-$ as well: \begin{eqnarray*} & ~ & \lvert v_+ \rvert = 2 \\ & \Rightarrow & \lvert v_- \rvert = \lvert c-2\sqrt{a} \rvert = \lvert ( 2e^{i\theta}- 2\sqrt{a} ) - 2\sqrt{a} \rvert = \sqrt{4+16a-16 \cos(\theta) \sqrt{a}}.\\ \end{eqnarray*} To find the largest value of $\lvert v_- \rvert$, we take a derivative with respect to $\theta$: $$ \dfrac{8\sqrt{a}\sin(\theta)}{\sqrt{4+16a-16 \cos(\theta) \sqrt{a}}}. $$ This means $\lvert v_- \rvert$ has critical points at $\theta=\lbrace 0,\pi\rbrace \in [0,2\pi)$. By the second derivative test, $\theta=\pi$ gives us a maximum modulus of $2+4\sqrt{a}$. As in the proof of Lemma \ref{u'inellipse}, we use the Equation \eqref{Ellipse_Equation_Second_Definition} description of $\mathcal{E}$. Using the facts that $\lvert z \rvert \leq 2$, $\lvert c+2\sqrt{a} \rvert \leq 2$ and $\lvert c-2\sqrt{a} \rvert \leq 2 + 4\sqrt{a}$ we find \begin{eqnarray*} &~& \lvert z-v_- \rvert + \lvert z-v_+ \rvert\\ &=& \lvert z-(c-2\sqrt{a}) \rvert + \lvert z-(c+2\sqrt{a}) \rvert\\ & \leq & 2\lvert z \rvert + \lvert c+2\sqrt{a} \rvert + \lvert c-2\sqrt{a} \rvert \\ & \leq & 4 + (2) + (2 + 4\sqrt{a})\\ & = & 8 + 4\sqrt{4} \\ & = & 16 \\ (\text{thus since}~n \geq 5)& < & 2^{n+1} \leq ~ 2^{n+1} + \frac{\lvert a \rvert}{2^{n-1}}. \end{eqnarray*} Hence $\mathbf{U}'_k \subset \overline{\mathbb{D}(0,2)} \subset \mathcal{E}$ for each $k=0,1,...n-1$. \end{proof} Continuing further, we prove that each $\mathbf{U}'_k$ is contained in its image $\mathbf{U}$ under the correct restrictions of parameters. \begin{lemma} \label{UPrimeK_Contained_ForAllK_Lemma} Let $n \geq 5$, $1 \leq a \leq \dfrac{(2^{n+1}-8)^2}{16}$, and fix $k$ from $\lbrace 0,1,...,n-1 \rbrace$. If~$c \in \mathbf{W}_{n,a}k$, then $\mathbf{U}'_k \subset \mathbf{U}$. \end{lemma} \begin{proof} By the proof of Lemma \ref{Uprime_contained_in_ellipse_afixed_case_lemma}, $\mathbf{U}'_k \subset \mathbb{D}(0,2) \subset \mathcal{E}$ for all $k$, so we need to show the minor axis of $\mathcal{E}$ just intersects $\partial \mathbf{U}'_k$ at worst. \textbf{CASE 1: $\mathbf{U}'_k$ lies in the right half plane:} Here the minor axis of $\mathcal{E}$ is a vertical line since $\frac{\Arg(a)}{2}=0$ and crosses through $c$. Thus the value of $Re(c)$ determines the horizontal position of the leftmost point of $\mathbf{U}$. In the proof of Lemma~\ref{Uprime_contained_in_ellipse_afixed_case_lemma} we found for $\lvert v_+ \rvert = 2$ (its greatest modulus) that $c=2e^{i\theta}-2\sqrt{a}$. Therefore $$ Re(c)=2\cos(\theta) - 2\sqrt{a} \leq 2 - 2 = 0 $$ since $a \geq 1$. Thus if $Re(c)$ is non-positive, then the minor axis of $\mathcal{E}$ is never in the right half dynamical plane. Therefore the minor axis never enters the right-half plane and $\mathbf{U}'_k \subset \mathbf{U}$ for any $\mathbf{U}'_k$ in this case. (See Figure \ref{fig:uprime_k_cases} (left).) \begin{figure} \caption{Cases 1 (left) and 2 (right) of Lemma 15.} \label{fig:uprime_k_cases} \end{figure} \textbf{CASE 2: $\mathbf{U}'_k$ lies in the left half plane:} Now $\mathbf{U}'_k$ lies between the real values of $-2$ and $0$. Thus at its greatest, $$ Re(v_+)=0 \Rightarrow Re(c)=-2\sqrt{a} \leq -2 $$ since $a \geq 1$. Here the position of the minor axis will go no farther right than $R(c) = -2$ and the minor axis only touches $\partial \mathbf{U}'_k$ which is admissible since $\mathbf{U}'_k$ is open. Thus $\mathbf{U}'_k \subset \mathbf{U}$ for any $\mathbf{U}'_k$ lying purely in the left-half plane. (See Figure \ref{fig:uprime_k_cases} (right).) \textbf{CASE 3: $\mathbf{U}'_k$ intersects the imaginary axis:} The angular width of $\mathbf{U}'_k$ is $$ \dfrac{4\pi k+\pi}{2n} - \dfrac{4\pi k-\pi}{2n}=\dfrac{\pi}{n} \leq \dfrac{\pi}{3} $$ since $n \geq 3$. Assuming for now $\mathbf{U}'_k$ intersects the positive imaginary axis, we define a set: $$ \mathbf{S}_{t} = \left\{ z \ \middle| \ \ \lvert z \rvert \leq 2~~and~~\dfrac{\pi}{6}+t\dfrac{\pi}{3} \leq \Arg(z)\leq \dfrac{\pi}{2}+t\dfrac{\pi}{3} \right\} ~ for ~ t \in [0,1]. $$ For any $t$, $\mathbf{S}_{t}$ is a wedge of angular width $\frac{\pi}{3}$ that intersects the imaginary axis so any $\mathbf{U}'_k$ that intersects the imaginary axis must be contained in a $\mathbf{S}_{t}$ for some $t \in [0,1]$. Figure \ref{fig:uprime_k_case3} is a sketch of $\mathbf{S}_{t}$, the dotted line represents the range of $\mathbf{S}_{t}$. \begin{figure} \caption{\label{fig:uprime_k_case3} \label{fig:uprime_k_case3} \end{figure} In determining the \textit{real-diameter} of $\mathbf{S}_{t}$, we calculate the distance between the left-most and right-most points of $\mathbf{S}_{t}$. The leftmost point lies on the ray of argument $\frac{\pi}{2}+t\frac{\pi}{3}$ while the rightmost point lies on the ray of argument $\frac{\pi}{6}+t\frac{\pi}{3}$. The real values of these points are $r_1\cos \left( \frac{\pi}{2}+t\frac{\pi}{3} \right)$ and $r_2\cos \left( \frac{\pi}{6}+t\frac{\pi}{3} \right)$ respectively, with $0 \leq r_1,r_2 \leq 2$. Note for this range on $t$, $-1 \leq \cos \left( \frac{\pi}{2}+t\frac{\pi}{3} \right) \leq 0$ and $0 \leq \cos \left( \frac{\pi}{6}+t\frac{\pi}{3} \right) \leq 1$. This means that the endpoints of these rays, at $r_1=r_2=2$, will be the farthest left and right points of $\mathbf{S}_{t}$. The real-diameter is the difference of these two values, \begin{equation} \label{set_S_Width_equation} 2\cos \left( \dfrac{\pi}{6}+t\dfrac{\pi}{3} \right) - 2\cos \left( \dfrac{\pi}{2}+t\dfrac{\pi}{3} \right). \end{equation} Using differentiation to find the maximum on Equation \eqref{set_S_Width_equation}, we find the maximum value of the width occurs at $t=\frac{1}{2}$. Plugging this in, we find the width to be 2, thus $\mathbf{S}_{t}$ is no wider than 2 units. This means the ``real-width'' of $\mathbf{U}'_k$ is at most 2 (since $\mathbf{U}'_k \subset \mathbf{S}_{t}$). Remembering that $Re(c)$ is the position of the minor axis of $\mathcal{E}$, we see $$ Re\left( c+2\sqrt{a} \right) - Re(c) = Re(c)+2\sqrt{a} - Re(c)=2\sqrt{a} \geq 2 $$ since $a\geq 1$. Therefore when $v_+$ is at its right-most point on $\partial \mathbf{U}'_k$, the minor axis will be a distance of at least 2 units to the left, a distance greater than or equal to the ``real-width'' of $\mathbf{U}'_k$ (see Figure \ref{fig:uprime_k_minor_axis}). \begin{figure} \caption{\label{fig:uprime_k_minor_axis} \label{fig:uprime_k_minor_axis} \end{figure} This means the minor axis of $\mathcal{E}$ will not intersect $\mathbf{U}'_k$. A symmetrical argument can be used for any $\mathbf{U}'_k$ that intersects the negative imaginary axis, and thus $\mathbf{U}'_k \subset \mathbf{U}$ for all $k=0,1,...,n-1$. \end{proof} Knowing $\mathbf{U}'_k \subset \mathbf{U}$ for all ${k=0,1,...,n-1}$, we meet the requirements for a polynomial-like map. \begin{proposition} \label{R_polynomial_like_Each_UK_prop} With the same hypotheses as the previous lemma, $R_{n,a,c}$ is a polynomial-like map of degree two on $\mathbf{U}'_k$ when $c \in \mathbf{W}_{n,a}k$. \end{proposition} \begin{proof} By design, each $\mathbf{U}'_k$ is centered around a unique critical point of $R_{n,a,c}$, specifically $\lvert a \rvert^{\frac{1}{2n}}e^{i\frac{2\pi k}{n}}$. $R_{n,a,c}$ is also a two-to-one map on each $\mathbf{U}'_k$ by the discussion above. Additionally it is clear that $R_{n,a,c}$ is analytic on each $\mathbf{U}'_k$. With this and Lemma \ref{UPrimeK_Contained_ForAllK_Lemma}, we fulfill Definition \ref{Polynomial-like_definition} and $R_{n,a,c}$ is a polynomial-like map of degree two on each $\mathbf{U}'_k$. \end{proof} \subsection{Parameter Plane Results: $c$ Plane} Knowing $R_{n,a,c}$ is polynomial-like of degree two on each $\mathbf{U}'_k$, we next show how the hypothesis of Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem} is satisfied, in this case of $a$ fixed, and we locate multiple baby $\mathcal{M}$'s in the $c$-parameter plane. We first pick a $k$ from $\lbrace 0,1,...,n-1 \rbrace$, and observe $\left\{ R_{n,a,c} \right\}_{c \in \mathbf{W}_{n,a}k}$ as the family of functions in Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem}. We have already shown each member of this family is polynomial-like of degree two on $\mathbf{U}'_k$. Both $\partial \mathbf{U}'_k$ and $\mathbf{U}$ clearly vary analytically with $c$, as well as $R_{n,a,c}(z)$. Last, the implicit definition of $\mathbf{W}_{n,a}k$ makes $v_+$ take a closed loop around $\partial \mathbf{U}'_k$ as $c$ loops around $\partial \mathbf{W}_{n,a}k$. Now we satisfy the hypotheses of Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem} and achieve our result. \begin{theorem} \label{v+_Mandels_exist_multiple_cplane} Given $n \geq 5$, $1 \leq a \leq 4$, and a fixed $k \in \lbrace 0,1,...,n-1 \rbrace$, the set of $c \in \mathbf{W}_{n,a}k$ such that the critical orbit of $v_+$ does not escape $\mathbf{U}'_k$ is homeomorphic to $\mathcal{M}$. \end{theorem} See Figure \ref{fig:cplaneWthree} for an example. This is part of Main Theorem \ref{Main_Theorem_CPlane}. \begin{figure} \caption{\label{fig:cplaneWthree} \label{fig:cplaneWthree} \end{figure} \begin{figure} \caption{\label{Mandel_Zoom_From_W_Figure} \label{Mandel_Zoom_From_W_Figure} \end{figure} Now we find more baby $\mathcal{M}$'s, but they're associated with the other critical value $v_-$. First we set up some more symmetries present in $R_{n,a,c}$, similar to the previous case. \begin{lemma} \label{R_Is_Conjugate_over_Real_Axis_Lemma_c_plane} If $a$ is real, then for every positive integer $m$, $$ \overline{R_{n,a,\overline{c}}^m(\overline{z})} = R_{n,a,c}^m(z). $$ \end{lemma} \begin{proof} Proving by induction, we first establish the base case: \begin{eqnarray*} \overline{R_{n,a,\overline{c}}(\overline{z})} ~=~ \overline{\overline{z}^n + \dfrac{a}{\overline{z}^n} + \overline{c}} ~=~ \overline{\overline{z^n}} + \dfrac{a}{\overline{\overline{z^n}}} + \overline{\overline{c}} ~=~ z^n + \dfrac{a}{z^n} + c ~=~ R_{n,a,c}(z). \end{eqnarray*} Assuming the statement is true for $m-1$, we have: \begin{eqnarray*} & ~ & \overline{R_{n,a,\overline{c}}^m(\overline{z})}\\ & = & \overline{R_{n,a,\overline{c}}^{m-1}(R_{n,a,\overline{c}}(\overline{z}))}\\ & = & \overline{R_{n,a,\overline{c}}^{m-1}(\overline{R_{n,a,c}(z)})} \hspace{1cm} \text{(By the base case)}\\ & = & R_{n,a,c}^{m-1}(R_{n,a,c}(z)) \hspace{1cm} \text{(By the induction assumption)}\\ & = & R_{n,a,c}^m(z) \end{eqnarray*} and we have established our result by induction. \end{proof} Using Lemma \ref{R_Is_Conjugate_over_Real_Axis_Lemma_c_plane} on the critical orbits of $R_{n,a,c}$, we get: \begin{lemma} \label{cParameter_Plane_Symmetric_over_realAxis_lemma} If $a\in \mathbb{R}$, then $\overline{R_{n,a,\overline{c}}^m(v_{\pm})} =R_{n,a,c}^m(v_{\pm})$ for all $m \in \mathbb{N}$. \end{lemma} \begin{proof} Using Lemma \ref{R_Is_Conjugate_over_Real_Axis_Lemma_c_plane} and the fact that $a$ is real, we get: \begin{eqnarray*} \overline{R_{n,a,\overline{c}}^m(v_{\pm})} = \overline{R_{n,a,\overline{c}}^m(\overline{c} \pm 2\sqrt{a})} = \overline{R_{n,a,\overline{c}}^m(\overline{c \pm 2\sqrt{a}})} \\ = R_{n,a,c}^m(c \pm 2\sqrt{a}) = R_{n,a,c}^m(v_{\pm}). \end{eqnarray*} \end{proof} Lemma \ref{cParameter_Plane_Symmetric_over_realAxis_lemma} yields that the dynamics of the critical orbits above the real axis of the $c$-plane will be the same as the dynamics below. Given these above lemmas we now see that for $n$ odd and $a$ real, the $c$-parameter plane of $R_{n,a,c}$ is symmetric over the real-axis and through the origin. Combining these two symmetries yields the next Lemma. \begin{lemma} \label{cParameter_Plane_Symmetric_over_imaginaryAxis_lemma} For $n \geq 3$, odd, and $a$ real, the boundedness locus in the $c$-parameter plane is symmetric across the imaginary axis in the $c$-plane. \end{lemma} We can now finish Main Theorem \ref{Main_Theorem_CPlane}-(i). \begin{theorem} \label{v-_Mandels_exist_multiple_through_symmetry_theorem} If $n \geq 5$, odd, and $1 \leq a \leq 4$, then for each baby $\mathcal{M}$ associated with the critical orbit of $v_+$, there exists a matching baby $\mathcal{M}$ associated with the critical orbit of $v_-$. Each $v_-$ baby $\mathcal{M}$ is a reflection of a $v_+$ baby $\mathcal{M}$ over the imaginary axis of the $c$-plane. \end{theorem} \begin{proof} This is a result of Theorem \ref{v+_Mandels_exist_multiple_cplane} and Lemma \ref{cParameter_Plane_Symmetric_over_imaginaryAxis_lemma}. Therefore there are $2n$ baby $\mathcal{M}$'s in the $c$-plane under these restrictions of $n$ and $a$. \end{proof} Now we have shown all the black figures we see in Figure \ref{fig:cplaneWthree} are indeed homeomorphic to $\mathcal{M}$. \section{Extending Results: the case of small $a$} \label{Extend_Results_Section} Having located $2n$ baby $\mathcal{M}$'s, we now look to push the range of fixed parameter values in which they exist, toward smaller values of $a$, approaching the degenerate $a=0$ case. We shall find baby $\mathcal{M}$'s in the $c$-plane for $a$ as small as $\frac{1}{10}$, but this requires increasing the minimum bound on the degree $n$. One could push $a$ even smaller, but then raising $n$ would be necessary. A potential direction for future work would be to study the needed lower bound on $n$ as $a$ decreases to $0$. \subsection{Dynamical Plane Results} Smaller values of $a$ force us to decrease our escape radius of $R_{n,a,c}$, as well as further restrict the degree of the rational functions. We shall restrict the argument of $v_+$ to a small interval around $0$, centering our domain around the positive real axis, the same as $\mathbf{U}'_0$ from before. First some notation. \begin{definition} Let $ A^*(\infty)$ denote the Basin of Attraction of Infinity, also called the escape locus. That is, $z\in A^*(\infty)$ iff the orbit of $z$ under $R_{n,a,c}$ escapes to $\infty$. \end{definition} We will prove for $\frac{1}{10} \leq \lvert a \rvert \leq 1$ that any point outside of a modulus of 1.25 will escape to $\infty$ under $R_{n,a,c}$ (instead of using 2 as before). Given this escape radius and $a$ at its maximum, having $\lvert c \rvert \geq 3.25$ will guarantee that $v_+ \in A^*(\infty)$ (i.e. $v_+$ lies outside the escape radius). \begin{lemma} \label{TighterEscapeRadius} If $n \geq 11$, $\lvert c \rvert \leq 3.25$, $\frac{1}{10} \leq a \leq 1$, then any $z$ such that $\lvert z \rvert > \frac{5}{4}$ will lie in $A^*(\infty)$ of $R_{n,a,c}$. \end{lemma} \begin{proof} Once again using the results in \cite{boydschul}, given any $\varepsilonsilon > 0$ and for $n$ sufficiently large, the filled Julia set of $R_{n,a,c}$ is contained in $\mathbb{D} \left( 0,1+\varepsilonsilon \right)$. Anything outside the radius of $1+\varepsilonsilon$ escapes to $\infty$. Similar to Lemmas \ref{EscapeRadius_lemma_aplaneCase} and \ref{EscapeRadius_lemma_cplaneCase}, if $N$ satisfies $(1+\varepsilonsilon)^N > 3\text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace$, then for $n \geq N$ the orbits of values $\lvert z \rvert > 1+\varepsilonsilon$ must tend to $\infty$. Here, we have $\varepsilonsilon=0.25$, and $3\text{Max} \lbrace 1,\lvert a \rvert, \lvert c \rvert \rbrace = 9.75$ for $a$ and $c$ at their greatest moduli. So when we solve this equation for $N$, we find $N > \dfrac{\ln(9.75)}{\ln(1.25)} \approx 10.2$, thus $n \geq 11$ will satisfy the criterion. \end{proof} Combining this new escape criterion with Lemma \ref{involution_prop} yields that the orbit of any ${\lvert z \rvert < \frac{4}{5}a^{\frac{1}{n}}}$ will tend to $\infty$, thus we get the following lemma. \begin{lemma} Under the same hypothesis as Lemma \ref{TighterEscapeRadius}, the filled Julia set of $R_{n,a,c}$ is contained in the annulus $\mathbb{A} \left( \frac{4}{5}a^{\frac{1}{n}}, \frac{5}{4} \right)$. \end{lemma} With this new restriction on the location of the filled Julia set of $R_{n,a,c}$, we define a set $\mathbf{U}p'$ that takes on the same role as that of $\mathbf{U}'_0$ from earlier. Remember that $\Arg(v_+)$ is restricted to a neighborhood of zero. We prove $R_{n,a,c}$ is polynomial-like of degree two on the set \begin{equation} \boxed{ \mathbf{U}p' =\left\{ z=re^{i\theta} \ \middle| \ \ \dfrac{4}{5}a^{\frac{1}{n}}<~r~<\dfrac{5}{4}~~and~~\dfrac{\psi-\pi}{2n}<~\theta~<\dfrac{\psi+\pi}{2n} \right\},} \end{equation} where $\psi = \Arg(a) = 0$ since $a \in \mathbb{R}^+$. We also define $\mathbf{U}p = R_{n,a,c}(\mathbf{U}p')$. The critical point $a^{\frac{1}{2n}}$ is contained in this new $\mathbf{U}p'$ and is mapped to $v_+$. With this change to the inner and outer boundaries of $\mathbf{U}p'$, the image under $R_{n,a,c}$ is still half an ellipse cut by the minor axis and centered at $c$ but now has $$ \text{semi-major~axis~length}: ~ \left( \dfrac{5}{4} \right)^n + {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert $$ $$ \text{semi-minor~axis~length}: ~ \left( \dfrac{5}{4} \right)^n - {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert $$ We refer to this new ellipse as $\mathcal{L}$ and can use a different representation: \begin{equation} \label{New_Ellipse_Equation_Definition} \boxed{ \mathcal{L} = \left\{ \begin{array}{lcl} & x= & \left( \left( \dfrac{5}{4} \right)^n + {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert \right)\cos(n\theta) \\ & y= & \left(\left( \dfrac{5}{4} \right)^n - {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert\right)\sin(n\theta) \end{array} \right\} }~. \end{equation} Similar to before, we must show that $R_{n,a,c}$ is polynomial-like of degree two on $\mathbf{U}p'$. First we will show $\mathbf{U}p'$ is contained in $\mathcal{L}$, and then show further containment of $\mathbf{U}p'$ inside $\mathbf{U}p$. \begin{lemma} \label{tighter_radius_Uprime_in_ellipse_lemma} $\mathbf{U}p' \subset \mathcal{L}$ for $n \geq 11$, $\frac{1}{10} \leq a \leq 1$, and $c$ such that $\lvert v_+ \rvert \leq \frac{5}{4}$. \end{lemma} \begin{proof} Similar to the proof of Lemma \ref{Uprime_contained_in_ellipse_afixed_case_lemma}, we shall define the ellipse by its alternate definition: \begin{equation} \left\{ z \ \middle| ~ \lvert z-(c-\sqrt{a}) \rvert + \lvert z-(c+\sqrt{a}) \rvert \leq 2\left( \left( \dfrac{5}{4} \right)^n + {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert \right) \right\}. \end{equation} Since $\mathbf{U}p' \subset \mathbb{D}(0,\frac{5}{4})$, showing $\mathbb{D}(0,\frac{5}{4})$ is contained in the ellipse will suffice. The largest values of $\lvert v_{\pm} \rvert$ will be observed. We start with $\lvert v_+ \rvert = \frac{5}{4}$ on the outer boundary and find the largest possible $\lvert v_- \rvert$: \begin{eqnarray*} \lvert v_+ \rvert &=& \lvert c + 2\sqrt{a} \rvert = \dfrac{5}{4} ~ \Rightarrow ~ c= \dfrac{5}{4}e^{i\theta} - 2\sqrt{a}\\ &\Rightarrow & ~ \lvert v_- \rvert = \lvert c - 2\sqrt{a} \rvert = \left| \dfrac{5}{4}e^{i\theta} - 4\sqrt{a} \right| \\ \end{eqnarray*} for some $\theta \in [0,2\pi)$. Using derivatives we find the maximum of $\lvert v_- \rvert$ occurs at $\theta=\pi$, so the maximum $\lvert v_- \rvert$ is $\frac{5}{4} + 4\sqrt{a}$, and we have bounds on the foci. Now we have: \begin{eqnarray*} & &\lvert z-(c-\sqrt{a}) \rvert + \lvert z-(c+\sqrt{a}) \rvert\\ & \leq & 2\lvert z \rvert + \lvert c+2\sqrt{a} \rvert + \lvert c-2\sqrt{a} \rvert \\ & \leq & 2\left(\dfrac{5}{4}\right) + \left(\dfrac{5}{4}\right) + \left(\dfrac{5}{4} + 4\sqrt{a}\right)\\ & = & 5 + 4\sqrt{a} \\ & \leq & 5+ 4 \hspace{1cm} (\text{Since}~a \leq 1)\\ & \leq & 2\left( \dfrac{5}{4} \right)^{11} \leq ~ 2\left( \left( \dfrac{5}{4} \right)^n + {\left( \dfrac{4}{5} \right)^n}\lvert a \rvert \right). \\ \end{eqnarray*} Thus the image of any point in $\mathbf{U}p' \subset \mathbb{D}(0,\frac{5}{4})$ will be contained in this ellipse. \end{proof} With $\mathbf{U}p'$ contained in the ellipse, we just need to show the minor axis of the ellipse does not intersect $\mathbf{U}p'$. This will guarantee that $\mathbf{U}p' \subset \mathbf{U}p$. \begin{lemma} \label{tighter_radius_uprime_in_U} For $n \geq 11$, $\frac{1}{10} \leq a \leq 1$, and $c$ such that $\lvert v_+ \rvert \leq \frac{5}{4}$, $\mathbf{U}p' \subset \mathbf{U}p$. \end{lemma} \begin{proof} $\mathbf{U}p$ is the half of the ellipse that contains $v_+$ which is the right-half in this case. We just have to show the minor axis of the ellipse does not intersect $\mathbf{U}p'$ and in fact lies to the left to $\mathbf{U}p'$. The minor axis is a straight vertical line centered at $c$. Its horizontal position is $Re(c)$, a value that depends on $v_+$. At its greatest modulus we observe $v_+$ on the outer arc of $\mathbf{U}p'$, so $c+2\sqrt{a} = \frac{5}{4}e^{i\theta}$ for $\lvert \theta \rvert \leq \frac{\pi}{2n}$, thus $$ Re(c) = \dfrac{5}{4}\cos(\theta) - 2\sqrt{a}~\leq~\dfrac{5}{4} - 2\sqrt{a}. $$ Now we show the minor axis lies to the left of the inner arc of $\mathbf{U}p'$. This happens if $$ Re(c) < Re \left( \dfrac{4}{5}a^{\frac{1}{n}}e^{i\theta} \right),~i.e.~ \dfrac{5}{4} - 2\sqrt{a} < \dfrac{4}{5}a^{\frac{1}{n}}\cos(\theta). $$ Since $n \geq 11$, $\cos(\theta)$ will be minimal at $\theta = \pm \frac{\pi}{2n}$ and thus the minimum value of $Re \left( \frac{4}{5}a^{\frac{1}{n}}e^{i\theta} \right)$ will be $\frac{4}{5}a^{\frac{1}{n}}\cos\left( \dfrac{\pi}{2n} \right)$ with respect to $\theta$. To minimize further, we take a derivative with respect to $n$ and find $$ \dfrac{\partial}{\partial_{n}}\left( \dfrac{4}{5}a^{\frac{1}{n}}\cos\left( \dfrac{\pi}{2n} \right) \right) = \dfrac{2a^{1/n}\left( \pi\sin \left( \dfrac{\pi}{2n} \right) - 2\cos\left( \dfrac{\pi}{2n} \right)\ln(a) \right)}{5n^2} > 0 $$ since $a \leq 1$. Therefore the derivative is positive and the horizontal position of the inner arc of $\mathbf{U}p'$ increases as n increases. As $\frac{4}{5}a^{\frac{1}{n}}\cos\left( \frac{\pi}{2n} \right)$ increases with $a$ as well, the minimum value of this equation occurs at the minimal values of $n=11$ and $a= \frac{1}{10}$. Thus $$ Re \left( \dfrac{4}{5}a^{\frac{1}{n}}e^{i\theta} \right) \geq \dfrac{4}{5}\left(\frac{1}{10}\right)^{\frac{1}{11}}\cos\left( \dfrac{\pi}{22} \right) \approx 0.6423. $$ At this point the minor axis will be located at $\frac{5}{4}-2\sqrt{\frac{1}{10}} \approx 0.618$, and we have that the minor axis lies to the left of $\mathbf{U}p'$. The horizontal position of the minor axis does not depend on $n$, and moves to the left as $a$ increases. With this, it is assured that the minor axis of the ellipse will never pass through $\mathbf{U}p'$ and $\mathbf{U}p' \subset \mathbf{U}p$. \end{proof} \begin{proposition} \label{R_polynomial_Like_on_tighter_UPrime} Under the same assumptions as Lemma \ref{tighter_radius_uprime_in_U}, $R_{n,a,c}: \mathbf{U}p' \rightarrow \mathbf{U}p$ is a polynomial-like map of degree two. \end{proposition} \begin{proof} $R_{n,a,c}$ is analytic on $\mathbf{U}p'$ by choice of the boundaries, as well as two-to-one with a single critical point. Thus, $R_{n,a,c}$ satisfies the definition of a polynomial-like map of degree two by Lemma \ref{tighter_radius_uprime_in_U}. \end{proof} \subsection{Parameter Plane Results} Now that we have a family of degree two polynomial-like maps, we define a $\mathcal{W}_{n,a}$ in this case by \begin{equation} \boxed{ \mathcal{W}_{n,a} = \left\{ c \ \middle| \ \ \dfrac{4}{5}a^{1/n} \leq \left| v_+ \right| \leq \dfrac{5}{4} ~~ and ~~ \dfrac{-\pi}{2n} \leq \Arg(v_+) \leq \dfrac{\pi}{2n} \right\}} \label{eqn:defnWp} \end{equation} to invoke Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem} and follow the same criteria to show that a baby $\mathcal{M}$ is contained in this $\mathcal{W}_{n,a}$. \begin{theorem} \label{Tighter_radius_mandel_exists_in_cPlane_Theorem} For $\frac{1}{10} \leq a \leq 1$ and $n \geq 11$, the set of $c$-values contained in $\mathcal{W}_{n,a}$ such that the critical orbit of $v_+$ does not escape $\mathbf{U}p'$ is homeomorphic to $\mathcal{M}$. \end{theorem} \begin{proof} By design of $\mathcal{W}_{n,a}$ it is clear that as $c$ loops around $\partial \mathcal{W}_{n,a}$, we have that $v_+$ will make a loop around $\partial \mathbf{U}p' \subset \mathbf{U}p - \mathbf{U}p'$. $R_{n,a,c}$ is polynomial-like of degree two on $\mathbf{U}p'$ by Proposition \ref{R_polynomial_Like_on_tighter_UPrime} and thus we have satisfied the criteria of Theorem \ref{DH_Mandelbrot_existence_Criterion_Theorem}. Thus there exists a baby $\mathcal{M}$ in $\mathcal{W}_{n,a}$. \end{proof} We have now extended the interval of $a$-values in which a baby $\mathcal{M}$ associated with $v_+$ exists in the $c$-plane, but restricted on the degree of $R_{n,a,c}$. Because of the existing symmetries in the $c$-plane, we get a baby $\mathcal{M}$ associated with $v_-$ under these criteria on $a$ and $n$ as well. \begin{corollary} \label{v-_mandel_exists_in_cPlane_all_aValues_corollary} For $n \geq 11$, odd, and $\frac{1}{10} \leq a \leq 1$, there exists a baby $\mathcal{M}$ associated with $v_-$ lying in the $c$-parameter plane of $R_{n,a,c}$, within the reflection over the imaginary axis of the set $\mathcal{W}_{n,a}$. \end{corollary} \begin{proof} This follows from the existence of a baby $\mathcal{M}$ associated with $v_+$ in Theorem~\ref{Tighter_radius_mandel_exists_in_cPlane_Theorem}, as well as the symmetry given in Lemma \ref{cParameter_Plane_Symmetric_over_imaginaryAxis_lemma}. Therefore the baby $\mathcal{M}$ associated with $v_-$ is a reflection of the baby $\mathcal{M}$ associated with $v_+$ from Theorem~\ref{Tighter_radius_mandel_exists_in_cPlane_Theorem}, over the imaginary axis of the $c$-parameter plane of $R_{n,a,c}$. \end{proof} In Figure \ref{CPlane_n11_a022_With_Zoom_2Mandels_Figure} we can see an example of these baby $\mathcal{M}$'s existing under these new criteria. A zoom in is necessary as the baby $\mathcal{M}$'s shrink as $n$ grows. The green baby $\mathcal{M}$ represents the orbit of $v_-$ and the purple baby $\mathcal{M}$ represents the orbit of $v_+$. \begin{figure} \caption{\label{CPlane_n11_a022_With_Zoom_2Mandels_Figure} \label{CPlane_n11_a022_With_Zoom_2Mandels_Figure} \end{figure} \subsection{Mandelbrots Passing through each other} Now we will take a closer look at two baby $\mathcal{M}$'s which intersect in the $c$-plane and pass through one another along a line of $a$ values. We define the center of a baby $\mathcal{M}$ in the $c$-parameter plane to be the $c$-value for which the critical point is the same as the critical value; i.e., the critical point is a fixed point of $R_{n,a,c}$. If we wish to find the center of a baby $\mathcal{M}$ associated with $v_+$, then we are solving the equation $a^{1/2n} = c + 2\sqrt{a}$ for $c$, which is $$ c_+ = a^{1/2n} - 2\sqrt{a}. $$ By Lemma \ref{cParameter_Plane_Symmetric_over_imaginaryAxis_lemma}, the baby $\mathcal{M}$ associated with $v_-$ is just a reflection over the imaginary axis of the $c$-parameter plane. We reflect $c_+$ over the imaginary axis to find the center of the baby $\mathcal{M}$ associated with $v_-$ to be $$ c_- = 2\sqrt{a} - a^{1/2n}. $$ Using this, we can give an exact case when the two baby $\mathcal{M}$'s overlap. \begin{proposition} \label{Mandels_With_Same_Center_Proposition} For $n \geq 11$, odd, and $a = \left( \frac{1}{4} \right)^{\frac{n}{n-1}}$, two baby $\mathcal{M}$'s in the $c$-parameter plane associated with $v_+$ and $v_-$ respectively overlap and have the same center. Further, this same center is the origin of the $c$-plane. \end{proposition} \begin{proof} We first solve the equation $c_+ = c_-:$ $$ a^{1/2n} - 2\sqrt{a} = 2\sqrt{a} - a^{1/2n} \Rightarrow 2\sqrt{a} = a^{1/2n} \Rightarrow a = 2^{\frac{1}{\frac{1}{2n} - \frac{1}{2}}} = \left( \dfrac{1}{4} \right)^{\dfrac{n}{n-1}} $$ and find when we plug this value into $c_+$: \begin{eqnarray*} &~& \left(\left( \dfrac{1}{4} \right)^{\dfrac{n}{n-1}} \right)^{1/2n}-2\sqrt{\left( \dfrac{1}{4} \right)^{\dfrac{n}{n-1}}}\\ &=& \left( \dfrac{1}{2} \right)^{\dfrac{1}{n-1}} -\left(\dfrac{1}{2} \right)^{\dfrac{n}{n-1}-1}\\ &=& \left( \dfrac{1}{2} \right)^{\dfrac{1}{n-1}} - \left(\dfrac{1}{2} \right)^{\dfrac{1}{n-1}}\\ &=& 0. \end{eqnarray*} \end{proof} For any odd $n \geq 11$ we are thus given an $a$-value for which two baby $\mathcal{M}$'s of opposite critical orbits will intersect at the origin and have the same center (see Figure \ref{Overlapping_Mandels_Same_Center_n11} for an example). Knowing that these baby $\mathcal{M}$'s intersect allows us to find an interval of $a$-values, such that as $a$ varies continuously from one end to the other, the baby $\mathcal{M}$ sets will pass completely through each other. \begin{figure} \caption{\label{Overlapping_Mandels_Same_Center_n11} \label{Overlapping_Mandels_Same_Center_n11} \end{figure} \begin{lemma} \label{Mandels_intersect_and_lie_completely_on_real_axis_lemma} For any odd $n \geq 11$, and $\frac{1}{10} \leq a \leq 4$, two baby $\mathcal{M}$'s associated with $v_-$ and $v_+$ are centered at the real axis of the $c$-plane, and move along it continuously as $a$ varies continuously. \end{lemma} \begin{proof} In Lemma \ref{cParameter_Plane_Symmetric_over_realAxis_lemma}, we showed these baby $\mathcal{M}$'s are symmetric about the real axis when $a$ is positive and real. Thus, as $a$ varies along the real axis, the baby $\mathcal{M}$'s vary, keeping their center and only axis of symmetry in the real axis. \end{proof} Knowing that the centers of these two baby $\mathcal{M}$'s stay on the real axis as they change position, we can prove that the two sets actually pass completely through one another as $a$ changes continuously in the range of interest. \begin{proposition} Letting $n \geq 11$ and odd, as $a$ increases from $\frac{1}{10}$ to $1$, two baby $\mathcal{M}$'s associated with $v_-$ and $v_+$ move along the real axis of the $c$-parameter plane of $R_{n,a,c}$ in opposite directions and completely pass through each other. \end{proposition} \begin{proof} By Theorem \ref{Tighter_radius_mandel_exists_in_cPlane_Theorem} the baby $\mathcal{M}$ associated with $v_+$ lies in $\mathcal{W}_{n,a}$ with the tighter radius, this means that $$ \dfrac{4}{5}a^{1/n} \leq \lvert c + 2\sqrt{a} \rvert \leq \dfrac{5}{4} ~ \Rightarrow ~ Re(c) \in \left[\dfrac{4}{5}a^{1/n} - 2\sqrt{a}~,~\dfrac{5}{4}-2\sqrt{a}\right]. $$ We refer to this interval as $\mathbf{I}_1$ and call the respective endpoints $\omega_1$ and $\omega_2$. So, any real $c$ value in this baby $\mathcal{M}$ must lie in $\mathbf{I}_1$. Because the other baby $\mathcal{M}$ associated with $v_-$ is a reflection of the first over the imaginary axis, the real values that lie in this baby $\mathcal{M}$ are just a reflection of $\mathbf{I}_1$ across the imaginary axis and so the real values of the baby $\mathcal{M}$ associated with $v_-$ lie in $$ \mathbf{I}_2 = \left[-\omega_2~,-\omega_1 \right] = \left[2\sqrt{a} - \dfrac{5}{4}~,~2\sqrt{a} - \dfrac{4}{5}a^{1/n} \right]. $$ Now both of these intervals are well defined as long as $a < \left(\frac{25}{16} \right)^n$ which is true here as $a \leq 1$. Now as we start at $a=\frac{1}{10}$ and $n=11$, $\omega_1 > 0$ and increases with $n$ so $-\omega_1 < \omega_1$ for $a=\frac{1}{10}$ and all $n \geq 11$ and therefore $\mathbf{I}_2$ lies to the left of $\mathbf{I}_1$. As $a$ increases $\omega_1$ and $\omega_2$ decrease in value, which conversely means that $-\omega_1$ and $-\omega_2$ increase. Therefore as $a$ increases, $\mathbf{I}_1$ will move to the left as $\mathbf{I}_2$ moves to the right. Now at $a=1$, $\omega_2 = -\frac{3}{4}$ which is less than $-\omega_2$. Since $\omega_2$ does not depend on $n$, then for all $n \geq 11$ and $a=1$, $\mathbf{I}_1$ lies to the left of $\mathbf{I}_2$. Therefore the two intervals have passed through each other as they are both intervals of real values. We know that they have to intersect at at least one point since Proposition \ref{Mandels_With_Same_Center_Proposition} gives an $a$ value for which both baby $\mathcal{M}$'s have the same center. Lemma \ref{Mandels_intersect_and_lie_completely_on_real_axis_lemma} shows us that the two baby $\mathcal{M}$'s had to have passed through each other since their centers never left the real axis. \end{proof} Figure \ref{Phases_of_mandels_intersecting_figure} illustrates some different phases of the baby $\mathcal{M}$'s passing through each other in $c$-plane slices. \begin{figure} \caption{\label{Phases_of_mandels_intersecting_figure} \label{Phases_of_mandels_intersecting_figure} \end{figure} \end{document}
\begin{document} \begin{frontmatter} \title{Weighted operator least squares problems and the $J$-trace in Krein spaces} \author[FI,IAM]{Maximiliano Contino\corref{ca}} \ead{[email protected]} \author[FI,IAM]{Alejandra Maestripieri} \ead{[email protected]} \author[IAM,IV,UNGS]{Stefania Marcantognini} \ead{[email protected]} \cortext[ca]{Corresponding author} \address[FI]{ Facultad de Ingenier\'{\i}a, Universidad de Buenos Aires\\ Paseo Col\'on 850 \\ (1063) Buenos Aires, Argentina } \address[IAM]{ Instituto Argentino de Matem\'atica ``Alberto P. Calder\'on'' \\ CONICET\\ Saavedra 15, Piso 3\\ (1083) Buenos Aires, Argentina } \address[IV]{ Departamento de Matem\'atica -- Instituto Venezolano de Investigaciones Cient\'ificas \\ Km 11 Carretera Panamericana \\ Caracas, Venezuela } \address[UNGS]{ Universidad Nacional de General Sarmiento -- Instituto de Ciencias \\ Juan Mar\'ia Gutierrez \\ (1613) Los Polvorines, Pcia. de Buenos Aires, Argentina } \begin{abstract} Given $B, C$ and $W$ operators in the algebra $L(\mathcal{H})$ of bounded linear operators on the Krein space $\mathcal{H},$ the minimization problem $\min \ (BX - C)^{\#}W(BX -C),$ for $X\in L(\mathcal{H}),$ is studied when the weight $W$ is selfadjoint. The analogous maximization and min-max problems are also considered. Complete answers to these problems and to those naturally associated to trace clase operators on Krein spaces are given. \end{abstract} \begin{keyword} Weighted operator approximation \sep Krein spaces \sep oblique projections 47A58 \sep 47B50 \sep 41A65 \end{keyword} \end{frontmatter} \section{Introduction} In estimation theory one would like to approximate the values of certain quantities that are not directly observable from the values of some sampled measurements. The solution to the problem of estimating the unobservable quantities given the observable ones depends on the model one uses to describe the relation between them and the optimality criterion one chooses to determine the desired estimates. The weighted least squares method is the standard approach in situations when it may not be feasible to assume that every observation should be treated equally. It works by incorporating a {\emph{weight}} to each data point as a way to describe its influence over the estimates. The Krein space estimation theory developed by Hassibi et al. \cite{HassibipartI} has brought into play {\emph{indefinite}} weighted least squares problems. Some of those problems were studied in their ``pointwise'' form, for linear operators on infinite-dimensional spaces in \cite{GiribetKrein} and, for matrices with complex entries in \cite{HassibipartII, Hassibietal}. Roughly speaking, if one is given an infinite or finite-dimensional linear space $\mathcal{H}$, a weight $W,$ bounded linear operators or matrices $B, C$, and a vector $y\in \mathcal{H}$, then the problem is to find an ``extremal'' vector $x_0\in \mathcal{H}$ for the quadratic form $[W(Bx - Cy), Bx-Cy]$ with $\K{ \ }{ \ }$ a Krein space inner product on $\mathcal{H}$. If $R(B)$, the range of $B$, is closed and $W$-nonnegative, the vector $x_0$ one seeks minimizing the above quadratic form is called a {\emph{weighted indefinite least squares solution of}} $Bx = Cy$. In this work we look instead for a ``global" solution of the problem, meaning a bounded linear operator $X_0$ acting as a $W${\emph{-inverse of}} $B$. Broadly speaking, we consider a Krein space $(\mathcal{H}, \K{ \ }{ \ })$, a selfadjoint operator $W$ on $\mathcal{H}$ and bounded linear operators $B, C$ on $\mathcal{H}$. We then determine whether there exists $X_0$ such that, for each $y \in \mathcal{H}$, $X_0 y$ is a weighted indefinite least squares solution of $Bx=Cy$. For a positive weight $W$, the notion of $W$-inverse was introduced by Mitra and Rao in the case of matrices \cite{Mitra}, and later on extended to Hilbert space operators in \cite{WGI, Contino}. Here we say that $X_0$ is an {\emph{indefinite minimum solution of $BX-C=0$ with weight $W$}} if $X_0$ realizes the minimum of $(BX-C)^{\#}W(BX-C)$ as $X$ runs over $L(\mathcal{H})$, the space of the bounded linear operators on $\mathcal{H}$, where the order is induced by the cone of $\K{ \ }{ \ }$-positive operators of $L(\mathcal{H})$. Necessary and sufficient conditions for the existence of such a solution are given and we show that the solution of $BX - I=0$, if it exists, is none other than the Schur complement of $W$ to $R(B)$; i.e., $$W_{/ [R(B)]} = \underset{X \in L(\mathcal{H})}{\min} \ (BX-I)^{\#}W(BX-I).$$ Given the $W$-indefiniteness of the range of $B$, it is natural to consider min-max problems. In fact, any factorization of $B$ as the sum of two operators, one with $W$-nonnegative range and the other with $W$-nonpositive range, yields a min-max problem. As with the minimization problem, we give necessary and sufficient conditions for the solvability of the min-max problem and we obtain another characterization of the Schur complement. Furthermore, even though the decomposition of $B$ depends on the chosen signature operator $J$, the solutions to the min-max problem does not. In the Hilbert space setting an associated minimizing problem can be considered in the context of unitarily invariant norms, particularly, in the $p$-Schatten class norms $\| \ \|_p,$ in which case -- and under the assumption that $W$ is positive -- it takes the form of the Procrustes problem $\underset{X \in L(\mathcal{H})}{\min} \ \|W^{1/2}(BX-C)\|_p$. Indeed, these two kinds of problems are closely related, as \cite{Nashed, Gold1, Contino} have shown. Inspired by the work of Kintzel on an indefinite Procrustes problem expressed as a max-min problem on traces of matrices \cite{Ulric}, we define a $J$-trace, $\tr_J$, and study the corresponding min-max problem. We find that, if the problem is solvable for every $C$, the solution is unique and equals $\tr_J(C^{\#}W_{/[ R(B)]}C)$. In addition, if $\tr_J(T)< \infty$ for some signature operator $J$, then $\tr_{J'}(T) <\infty$ for any other signature operator $J',$ though it may happen that $\tr_J(T) \ne \tr_{J'}(T)$. Consequently, the min-max value for the $\tr_J$ depends on $J$, but the set of solutions where this value is attained for each $J,$ is independent of $J$. The paper may be thought of as the second part of \cite{Contino3}, for it contains the weighted versions of the operator least squares problems we studied there. There the fundamental tool for solving the least squares problems was given by the {\emph{indefinite inverse}}. In this work the {\emph{Schur complement}}, as defined and studied in \cite{Contino4}, plays this role. The paper has four additional sections. Section 2 fixes notation and recalls the basics of Krein spaces, Section 3 gives a brief account of the fundamental results on the Schur complement from \cite{Contino4}. In Section 4 we turn to weighted least squares problems. Subsection 4.1 is entirely devoted to the weighted min-max problems and contains the main results. Section 5 extends the notion of the trace of an operator to the Krein space setting, and applies the results obtained in the previous section to trace-type min and min-max problems for operators. \section{Preliminaries} We assume that all Hilbert spaces are complex and separable. If $\mathcal{H}$ is a Hilbert space, $L(\mathcal{H})$ stands for the algebra of bounded linear operators on $\mathcal{H}$ and $L(\mathcal{H})^+$ for the cone of positive semidefinite operators in $L(\mathcal{H}).$ We write $CR(\mathcal{H})$ to indicate the subset of $L(\mathcal{H})$ of operators with closed range. The range and nullspace of any $A \in L(\mathcal{H})$ are denoted by $R(A)$ and $N(A)$, respectively. Given a subset $\mathcal{T} \subseteq \mathcal{H},$ the preimage of $\mathcal{T}$ under $A$ is denoted by $A^{-1}(\mathcal{T})$ so $A^{-1}(\mathcal{T})=\{ h \in \mathcal{H}: \ Ah \in \mathcal{T} \}.$ Given two operators $S, T \in L(\mathcal{H}),$ the notation $T \leq_{\mathcal{H}} S$ signifies that $S-T \in L(\mathcal{H})^+.$ For any $T \in L(\mathcal{H}),$ $\vert T \vert := (T^*T)^{1/2}$ is the modulus of $T$ and $T=U\vert T\vert$ is the polar decomposition of $T,$ with $U$ the partial isometry such that $N(U)=N(T).$ The direct sum of two closed subspaces $\mathcal{M}$ and $\mathcal{N}$ of $\mathcal{H}$ is represented by $\mathcal{M} \dot{+} \mathcal{N}.$ If $\mathcal{H}$ is decomposed as $\mathcal{H}=\mathcal{M} \dot{+} \mathcal{N},$ the projection onto $\mathcal{M}$ with nullspace $\mathcal{N}$ is denoted by $P_{\mathcal{M} {\mathbin{\!/\mkern-3mu/\!}} \mathcal{N}}$ and abbreviated $P_{\mathcal{M}}$ when $\mathcal{N} = \mathcal{M}^{\perp}.$ $\mathcal{Q}$ indicates the subset of oblique projections in $L(\mathcal{H}),$ namely, $\mathcal{Q}:=\{Q \in L(\mathcal{H}): Q^{2}=Q\}.$ \subsection*{\textbf{Krein Spaces}} A linear space $\mathcal H$ endowed with an indefinite inner product (a Hermitian sesquilinear form) $\K{ \ }{ \ }$ is a {\emph{Krein space}} if $\mathcal {H}$ is the algebraic direct sum of two subspaces $\mathcal {H}_+$ and $\mathcal {H}_-$ such that: (1)~$\K{x_+}{x_-} =0$ for every $x_\pm \in \mathcal{H}_\pm$, and (2)~$(\mathcal{H}_+, \K{ \ }{ \ })$ and $(\mathcal{H}_-, -\K{ \ }{ \ })$ are Hilbert spaces. We write \begin{equation} \label{fundamentaldecom} \mathcal{H}=\mathcal{H}_+ \ [\dotplus] \ \mathcal{H}_- \end{equation} to indicate that the Krein space $\mathcal{H}$ is the $\K{ \ }{ \ }$-orthogonal direct sum of $\mathcal{H}_+$ and $\mathcal{H}_-$, and we say that (\ref{fundamentaldecom}) is a {\emph{fundamental decomposition}} of $\mathcal{H}$. In general, all geometrical notions on a Krein space are to be understood with respect to the indefinite inner product. In particular, the {\emph{orthogonal companion}} of a set $\mathcal{T}$ in $\mathcal{H}$, which we denote by $\mathcal{T}^{[\perp]}$, is the subspace of those $h \in \mathcal{H}$ such that $[h,x] = 0$ for all $x\in \mathcal{T}$. Every fundamental decomposition $\mathcal{H}=\mathcal{H}_+ \ [\dotplus] \ \mathcal{H}_-$ of a given Krein space $(\mathcal{H}, \K{ \ }{ \ })$ induces a Hilbert space inner product $\PI{ \ }{ \ }$ on $\mathcal H$. Namely, $\langle x , y \rightarrowngle :=[x_+,y_+] - [x_-,y_-],$ for $x, y \in \mathcal H$, $x = x_+ + x_-$ and $y = y_++y_-.$ In this situation the operator $J$ defined on $x =x_+ +x_-$ by $Jx:= x_+ - x_-$ is called a {\it{signature operator}} of $\mathcal H$. If $\mathcal{H}$ is a Krein space, $L(\mathcal{H})$ stands for the vector space of all the linear operators on $\mathcal{H}$ which are bounded in an associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ Since the norms generated by different fundamental decompositions of a Krein space $\mathcal{H}$ are equivalent (see, for instance, \cite[Theorem 7.19]{Azizov}), $L(\mathcal{H})$ does not depend on the chosen underlying Hilbert space. The symbol $T^{\#}$ stands for the $\K{ \ }{ \ }$-adjoint of $T \in L(\mathcal{H})$. The set of the operators $T \in L(\mathcal{H})$ such that $T=T^{\#}$ is denoted $L(\mathcal{H})^s$. If $T\in L(\mathcal{H})^s$ and $\K{Tx}{x} \geq 0 \mbox{ for every } x \in \mathcal{H},$ $T$ is said to be {\emph{positive}}; the notation $S \leq T$ signifies that $T-S$ is positive. Given $W \in L(\mathcal{H})^s$ and $\mathcal{S}$ a closed subspace of $\mathcal{H},$ we say that $\mathcal{S}$ is $W$-\emph{positive} if $\K{Ws}{s} > 0$ for every $s \in \mathcal{S}, \ s\not =0.$ $W$-\emph{nonnegative}, $W$-\emph{neutral}, $W$-\emph{negative} and $W$-\emph{nonpositive} subspaces are defined likewise. If $\mathcal{S}$ and $\mathcal{T}$ are two closed subspaces of $\mathcal{H},$ the notation $\mathcal{S} \ [\dotplus]_{W} \ \mathcal{T}$ is used to indicate the direct sum of $\mathcal{S}$ and $\mathcal{T}$ when, additionally, $\K{Ws}{t}=0 \mbox{ for every } s \in \mathcal{S} \mbox{ and } t \in \mathcal{T}.$ Standard references on Krein space theory are \cite{AndoLibro}, \cite{Azizov} and \cite{Bognar}. We also refer to \cite{DR} and \cite{DR1} as authoritative accounts of the subject. \section{Schur complement in Krein Spaces} In this section we include several results on the Schur complement in Krein spaces that will be useful along the paper. For the proofs the reader is referred to \cite{Contino4}. The notion of Schur complement (or shorted operator) of $A$ to $\mathcal{S}$ for a positive operator $A$ on a Hilbert space $\mathcal{H}$ and $\mathcal{S} \subseteq \mathcal{H}$ a closed subspace, was introduced by M.G.~Krein \cite{Krein}. He proved that the set $\{ X \in L(\mathcal{H}): \ 0\leq_{\mathcal{H}} X\leq_{\mathcal{H}} A \mbox{ and } R(X)\subseteq \mathcal{S}^{\perp}\}$ has a maximum element, which he defined as the {{Schur complement}} $A_{/ \mathcal{S}}$ of $A$ to $\mathcal{S}.$ This notion was later rediscovered by Anderson and Trapp \cite{Shorted2}. If $A$ is represented as the $2\times 2$ block matrix $\begin{pmatrix} a & b\\ b^* & c \end{pmatrix}$ with respect to the decomposition of $\mathcal{H} = \mathcal{S} \oplus \mathcal{S}^{\perp},$ they established the formula $$A_{/ \mathcal{S}}= \begin{pmatrix} 0 & 0\\ 0& c - y^*y\end{pmatrix}$$ where $y$ is the unique solution of the equation $b = a^{1/2} x$ such that the range inclusion $R(y) \subseteq \overline{R(a)}$ holds. The solution always exists because $A$ is positive: in this case, $a$ is also positive and the range inclusion $R(b) \subseteq R(a^{1/2})$ holds. In \cite{AntCorSto06} Antezana et al., extended the notion of Schur complement to any bounded operator $A$ satisfying a weak complementability condition with respect to a given pair of closed subspaces $\mathcal{S}$ and $\mathcal{T},$ by giving an Anderson-Trapp type formula. In particular, if $A$ is a bounded selfadjoint operator, $\mathcal{S}=\mathcal{T}$ and $A=\begin{pmatrix} a & b\\ b^* & c \end{pmatrix},$ this condition reads $R(b) \subseteq R(\vert a \vert^{1/2}),$ which as noted, is automatic for positive operators. In this case, let $f$ be the unique solution of the equation $b = \vert a \vert^{1/2} x$ such that the range inclusion $R(f) \subseteq \overline{R(a)}$ holds and $a=u\vert a \vert$ the polar decomposition of $a.$ Then, the Schur complement of $A$ to $\mathcal{S}$ is defined as $$A_{/ \mathcal{S}}= \begin{pmatrix} 0 & 0\\ 0& c - f^*uf\end{pmatrix}.$$ In \cite{Contino4}, the notions of $\mathcal{S}$-complementability, $\mathcal{S}$-weak complementability and the Schur complement were extended to the Krein space setting in the following fashion. \begin{Def} Let $W \in L(\mathcal{H})^s$ and $\mathcal{S}$ be a closed subspace of $\mathcal{H}.$ The operator $W$ is called $\mathcal{S}$-\emph{complementable} if $$\mathcal{H}=\mathcal{S} + W^{-1}(\mathcal{S}^{[\perp]}).$$ \end{Def} If $W$ is $\mathcal{S}$-complementable then, for any fundamental decomposition $\mathcal{H}=\mathcal{H}_+ \ [\dotplus] \ \mathcal{H}_-$ with signature operator $J,$ we get that $\mathcal{H}=\mathcal{S} + (JW)^{-1}(\mathcal{S}^{\perp}).$ Therefore, $W$ is $\mathcal{S}$-complementable if and only if the pair $(JW,\mathcal{S})$ is \emph{compatible} in (the Hilbert space) $(\mathcal{H}, \PI{ \ }{ \ })$ for any (and then for every) signature operator $J,$ meaning that there exists a projection $Q$ onto $\mathcal{S},$ such that $JWQ=Q^{*}JW,$ see \cite{CMSSzeged}. From this, it follows that $W$ is $\mathcal{S}$-complementable if and only if there exists a projection $Q$ onto $\mathcal{S}$ such that $WQ=Q^{\#}W.$ In a similar way the $\mathcal{S}$-weak complementability in Krein spaces, with respect to a fixed signature operator $J,$ is defined. \begin{Def} Let $W \in L(\mathcal{H})^s$ and $\mathcal{S}$ be a closed subspace of $\mathcal{H}.$ The operator $W$ is $\mathcal{S}$-\emph{weakly complementable} with respect to a signature operator $J$ if $JW$ is $\mathcal{S}$-weakly complementable in $(\mathcal{H}, \PI{ \ }{ \ }).$ \end{Def} In this case, if the matrix representation of $JW$ induced by $\mathcal{S}$ is \begin{equation} \label{Wdes} JW=\begin{bmatrix} a & b \\ b^* & c \\ \end{bmatrix}, \end{equation} the $\mathcal{S}$-weak complementability of $W$ is equivalent to $R(b)\subseteq R(\vert a\vert^{1/2}).$ The $\mathcal{S}$-weak complementability of $W$ does not depend on the signature operator, see \cite[Theorem 4.4]{Contino4}. Then, we simply say that $W$ is $\mathcal{S}$-weakly complementable, whenever $W$ is $\mathcal{S}$-weakly complementable with respect to a signature operator $J.$ Let $W \in L(\mathcal{H})^ s$ and $\mathcal{S}$ a closed subspace of $\mathcal{H}.$ Then, by applying the spectral theorem for Hilbert space selfadjoint operators to $A=JW,$ with $J$ any signature operator, $\mathcal{S}$ can be decomposed as \begin{equation} \label{WdecompSKrein} \mathcal{S}=\mathcal{S}_+ \ [\dotplus]_{W} \ \mathcal{S}_-, \end{equation} where $\mathcal{S}_{+}$ and $\mathcal{S}_-$ are closed, $\mathcal{S}_+$ is $W$-nonnegative, $\mathcal{S}_-$ is $W$-nonpositive and $\mathcal{S}_{+} \perp \mathcal{S}_{-}.$ Notice that the decomposition in \eqref{WdecompSKrein} need not be unique. The following is a characterization of the $\mathcal{S}$-weak complementability \cite[Proposition 4.7]{Contino4}. \begin{prop} \label{PropWC} Let $W \in L(\mathcal{H})^s$ and $\mathcal{S}$ be a closed subspace of $\mathcal{H}.$ Suppose that $\mathcal{S}=\mathcal{S}_+ \ [\dotplus]_{W} \ \mathcal{S}_-$ is any decomposition as in \eqref{WdecompSKrein} for some signature operator $J.$ Then the following statements are equivalent: \begin{enumerate} \item[i)] $W$ is $\mathcal{S}$-weakly complementable, \item [ii)] there exist $W_1, W_2, W_3 \in L(\mathcal{H})^s,$ $W_2, W_3 \geq 0$ such that $W=W_1+W_2-W_3$ and $\mathcal{S} \subseteq N(W_1),$ $\mathcal{S}_- \subseteq N(W_2),$ $\mathcal{S}_+ \subseteq N(W_3),$ \item [iii)] $W$ is $\mathcal{S}_{\pm}$-weakly complementable. \end{enumerate} \end{prop} \begin{Def} Let $W \in L(\mathcal{H})^s,$ $\mathcal{S}$ be a closed subspace of $\mathcal{H}$ and $J$ a signature operator. Suppose that $W$ is $\mathcal{S}$-weakly complementable. The \emph{Schur complement} of $W$ to $\mathcal{S}$ corresponding to $J$ is $$W_{/ [\mathcal{S}]}^J =J (JW)_{ / \mathcal{S}},$$ and the $\mathcal{S}$-\emph{compression} of $W$ is $W_{ [\mathcal{S}]}^J = W- W_{/ [\mathcal{S}]}^J.$ \end{Def} In \cite[Theorem 4.5]{Contino4} it was proved that the Schur complement does not depend on the fundamental decomposition of $\mathcal{H}.$ Henceforth we write $W_{/ [\mathcal{S}]}$ for this operator and $W_{ [\mathcal{S}]}$ for the $\mathcal{S}$-compression. Also, suppose that $\mathcal{S}=\mathcal{S}_+ \ [\dotplus]_{W} \ \mathcal{S}_-$ is any decomposition as in \eqref{WdecompSKrein} for some signature operator $J.$ If $W$ is $\mathcal{S}$-weakly complementable then \begin{equation} \label{ShortedsupinfKrein} W_{/ [\mathcal{S}]}= (W_{/ [ \mathcal{S}_+]} )_{/ [\mathcal{S}_-]}=(W_{/[ \mathcal{S}_-]})_{/ [\mathcal{S}_+]}. \end{equation} Also, if $W=W_1+W_2-W_3$ as in Proposition \ref{PropWC} then \begin{equation} \label{ShortedKrein} W_{/ [\mathcal{S}]}= W_1+{W_2}_{/ [ \mathcal{S}_+]}-{W_3}_{/ [ \mathcal{S}_-]}. \end{equation} Moreover, if $W$ is $\mathcal{S}$-complementable then \begin{equation}\label{ShortedmaxminKrein} W_{/ [\mathcal{S}]}=W(I-Q), \end{equation} for any projection $Q$ onto $\mathcal{S}$ such that $WQ=Q^{\#}W.$ The following result was proved in \cite[Corollary 4.12]{Contino4}. \begin{prop} \label{ShortedC2} Let $W \in L(\mathcal{H})^s$ and $\mathcal{S}$ be a closed subspace of $\mathcal{H}.$ Suppose that $\mathcal{S}$ is $W$-nonnegative. Then $W$ is $\mathcal{S}$-weakly complementable if and only if there exists $\inf \ \{ E^{\#}WE: E=E^2, \ N(E)=\mathcal{S}\}.$ In this case, $$W_{/ [\mathcal{S}]}=\inf \ \{ E^{\#}WE: E=E^2, \ N(E)=\mathcal{S}\}.$$ \end{prop} \section{Weighted least squares problems in Krein spaces} Consider the following problem: given the operators $W \in L(\mathcal{H})^s,$ $B\in CR(\mathcal{H})$ and $C\in L(\mathcal{H}),$ determine the existence of \begin{equation} \underset{X \in L(\mathcal{H})}{\min} (BX-C)^{\#}W(BX-C). \label{eq61} \end{equation} \begin{Def} Let $W \in L(\mathcal{H})^s,$ $B\in CR(\mathcal{H})$ and $C\in L(\mathcal{H}).$ An operator $X_0 \in L(\mathcal{H})$ is an {\emph{indefinite minimum solution of $BX-C=0$ with weight $W$}} ($W$-ImS) if $X_0$ is a solution of Problem \eqref{eq61}. \end{Def} In a similar fashion, the analogous maximization problem can be considered. Along this section all the results are stated for problem \eqref{eq61} but similar results hold for the maximum problem. Consider $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C\in L(\mathcal{H})$ and define \begin{equation} \label{FX} F(X):=(BX-C)^{\#}W(BX-C). \end{equation} We begin by giving conditions for the existence of the infimum in $L(\mathcal{H})$ of the family $\{ F(X): X \in L(\mathcal{H})\}$ when $C=I.$ \begin{prop} \label{propinfimum1} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H})$ such that $R(B)$ is $W$-nonnegative. Then the following are equivalent: \begin{itemize} \item [i)] There exists $\underset{X \in L(\mathcal{H})}{\inf} \ (BX-I)^{\#}W(BX-I)=:Z_0 \in L(\mathcal{H})$ and $R(B)$ is $Z_0$-nonnegative, \item [ii)] $W$ is $R(B)$-weakly complementable. \end{itemize} In this case, $Z_0=W_{/ [R(B)]}.$ \end{prop} \begin{dem} Suppose that $W$ is $R(B)$-weakly complementable. Let $F(X)$ be as in \eqref{FX} for $C=I.$ Then, for any $X\in L(\mathcal{H}),$ $F(X)=W_{/[R(B)]}+(BX-I)^{\#}W_{[R(B)]}(BX-I) \geq W_{/[R(B)]},$ because $R(B) \subseteq N(W_{/[R(B)]})$ and the fact that $R(B)$ is $W$-nonnegative yields $W_{[R(B)]} \geq 0.$ Hence $W_{/[R(B)]}$ is a lower bound of $\{ F(X) :X \in L(\mathcal{H}) \}.$ Let $T \in L(\mathcal{H})$ be any other lower bound of $F(X).$ In particular, given $E \in \mathcal{Q}$ such that $R(I-E)=R(B),$ by Douglas' Lemma \cite{Douglas}, there exists $X_0 \in L(\mathcal{H})$ satisfying $I-E=BX_0;$ i.e., such that $-E=BX_0-I.$ Then $$T\leq E^{\#}WE \mbox { for every } E \in \mathcal{Q} \mbox{ such that } N(E)=R(B).$$ By Proposition \ref{ShortedC2}, $$T \leq \inf \ \{ E^{\#}WE: \ E \in \mathcal{Q}, \ N(E)=R(B) \}=W_{/ [R(B)]}.$$ Therefore, $W_{/ [R(B)]}=\underset{X \in L(\mathcal{H})} {\inf} \ F(X)$ and, since $R(B) \subseteq N(W_{/[R(B)]}),$ $R(B)$ is $W_{/ [R(B)]}$-nonnegative. Conversely, if $Z_0$ exists and $R(B)$ is $Z_0$-nonnegative, then taking $X=0,$ the inequality $Z_0 \leq W$ shows that $Z_0 \in L(\mathcal{H})^s.$ As before, $$Z_0\leq E^{\#}WE \mbox { for every } E \in \mathcal{Q} \mbox{ such that } N(E)=R(B).$$ Fix a signature operator $J$ and let $(\mathcal{H}, \PI{ \ }{ \ })$ be the corresponding Hilbert space; consider $E=P_{R(B)^{\perp}}.$ Since $Z_0 \in L(\mathcal{H})^s,$ $(JZ_0)^*=JZ_0$ and \begin{equation} \label{infR(B)} JZ_0\leq_{\mathcal{H}} P_{R(B)^{\perp}}JWP_{R(B)^{\perp}}. \end{equation} Let $JW=\begin{bmatrix} a & b \\ b^* & c \\ \end{bmatrix}$ and $JZ_0=\begin{bmatrix} z_{11} & z_{12} \\ z_{12}^* & z_{22}\\ \end{bmatrix}$ be the matrix representation of $JW$ and $JZ_0$ induced by $R(B),$ respectively. By \eqref{infR(B)}, $$P_{R(B)^{\perp}}JWP_{R(B)^{\perp}}-JZ_0=\begin{bmatrix} -z_{11} & -z_{12} \\ -z_{12}^* & c-z_{22}\\ \end{bmatrix} \geq_{\mathcal{H}}0.$$ Then, $z_{11} \leq_{\mathcal{H}} 0$ and $R(z_{12}) \subseteq R((-z_{11})^{1/2}).$ Since $R(B)$ is $Z_0$-nonnegative, $z_{11} \geq_{\mathcal{H}}0.$ So $z_{11}=z_{12}=z_{12}^*=0$ and $R(JZ_0) \subseteq R(B)^{\perp}$ or equivalently, $R(Z_0) \subseteq R(B)^{[\perp]}.$ Therefore, $W=(W-Z_0)+Z_0,$ with $W-Z_0 \geq 0$ and $R(Z_0) \subseteq R(B)^{[\perp]}.$ Then, by Proposition \ref{PropWC}, $W$ is $R(B)$-weakly complementable. \end{dem} \begin{cor} \label{corinfimum1} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H})$ such that $R(B)$ is $W$-nonnegative and $W$ is $R(B)$-weakly complementable. Then, for every $C \in L(\mathcal{H}),$ $$\underset{X \in L(\mathcal{H})}{\inf} \ (BX-C)^{\#}W(BX-C) =C^{\#}W_{/ [R(B)]}C.$$ \end{cor} \begin{dem} If $W \geq 0,$ by \cite[Lemma 4.1]{Contino}, $$\inf \ \{ C^{\#}E^{\#}WEC: \ E \in \mathcal{Q}, \ N(E)=R(B) \}=C^{\#}W_{/ [R(B)]}C.$$ By Proposition \ref{PropWC}, $W=W_1+W_2,$ with $R(B) \subseteq N(W_1)$ and $W_2 \geq0.$ Then, given $E \in \mathcal{Q}$ such that $N(E)=R(B),$ $$C^{\#}E^{\#}WEC=C^{\#}W_1C+C^{\#}E^{\#}W_2EC.$$ Hence \begin{equation*} \label{infC} \begin{split} &\inf \ \{ C^{\#}E^{\#}WEC: \ E \in \mathcal{Q}, \ N(E)=R(B) \}=\\ &\quad\quad=C^{\#}W_1C+\inf \ \{ C^{\#}E^{\#}W_2EC: \ E \in \mathcal{Q}, \ N(E)=R(B) \} \\ &\quad\quad=C^{\#}W_1C+C^{\#}W_2{_{/ [R(B)]}}C=C^{\#}W_{/ [R(B)]}C. \end{split} \end{equation*} Using this equality, the result follows in a similar way as in the first part of the proof of Proposition \ref{propinfimum1}. \end{dem} The next theorem establishes when the infimum in Proposition \ref{propinfimum1} is attained. \begin{thm} \label{thminimum2} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H}).$ Then the following are equivalent: \begin{itemize} \item [i)] there exists a $W$-ImS of $BX-I=0,$ \item [ii)] $R(B)$ is $W$-nonnegative and $W$ is $R(B)$-complementable, \item [iii)] $R(B)$ is $W$-nonnegative and the normal equation \begin{equation} \label{NEqW2} B^{\#}W(BX-I)=0 \end{equation} admits a solution. \end{itemize} In this case, $$\underset{X \in L(\mathcal{H})}{\min} \ (BX-I)^{\#}W(BX-I)=W_{/ [R(B)]}.$$ \end{thm} \begin{dem} $i) \Leftrightarrow iii):$ Suppose that $X_0$ is a $W$-ImS of $BX-I=0.$ Then $$ \K{W(BX_0-I)x}{(BX_0-I)x} \leq \K{W(BX-I)x}{(BX-I)x}$$ $ \mbox{for every } x \in \mathcal{H} \mbox{ and every } X \in L(\mathcal{H}).$ Let $z \in \mathcal{H}$ be arbitrary. Then, for every $x \in \mathcal{H} \setminus \{0\},$ there exists $X \in L(\mathcal{H})$ such that $z=Xx.$ Therefore $$ \K{W(BX_0-I)x}{(BX_0-I)x} \leq \K{W(Bz-x)}{Bz-x}$$ for every $x, z \in \mathcal{H}.$ Thus, for every $x \in \mathcal{H},$ $X_0x$ is a weighted indefinite least squares solution of $Bz=x.$ So, by \cite[Proposition 3.2]{GiribetKrein} (see also \cite[Chapter I, Theorem 8.4]{Bognar}), $R(B)$ is $W$-nonnegative and $X_0x$ is a solution of $B^{\#}W(By-x)=0$ for every $x \in \mathcal{H},$ or equivalently, $X_0$ is a solution of \eqref{NEqW2}. The converse follows in a similar way, applying again \cite[Proposition 3.2]{GiribetKrein}. $ii) \Leftrightarrow iii):$ Suppose that $\mathcal{H} = R(B) + \ W^{-1}(R(B)^{[\perp]}),$ then $R(B^{\#}W) \subseteq R(B^{\#}WB).$ Hence, by Douglas' Lemma, the equation $B^{\#}W(BX-I)=0$ admits a solution. The converse follows analogously. In this case, by Proposition \ref{propinfimum1}, $$\underset{X \in L(\mathcal{H})}{\min} (BX-I)^{\#}W(BX-I)=W_{/ [R(B)]}.$$ \end{dem} The next corollaries follow from Theorem \ref{thminimum2}. \begin{cor} \label{cormin2} Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ Then the following are equivalent: \begin{itemize} \item [i)] there exists a $W$-ImS of $BX-C=0,$ \item [ii)] $R(B)$ is $W$-nonnegative and $R(C) \subseteq R(B)+W^{-1}(R(B)^{[\perp]}),$ \item [iii)] $R(B)$ is $W$-nonnegative and the normal equation \begin{equation} \label{NEq3} B^{\#}W(BX-C)=0 \end{equation} admits a solution. \end{itemize} In this case, $X_0$ is a $W$-ImS of $BX-C=0$ if and only $X_0$ is a solution of \eqref{NEq3}. \end{cor} \begin{dem} This follows in a similar way as in the proof of Theorem \ref{thminimum2} using the fact that $u$ is a weighted indefinite least squares solution of the equation $Bz=Cx$ if and only if $R(B)$ is $W$-nonnegative and $u$ is a solution of $B^{\#}W(By-Cx)=0,$ see \cite[Proposition 3.2]{GiribetKrein}. \end{dem} \begin{cor} \label{corWminimum2} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H}).$ Then there exists a $W$-ImS of $BX-C=0$ for every $C \in L(\mathcal{H})$ if and only if $R(B)$ is $W$-nonnegative and $W$ is $R(B)$-complementable. In this case, $$\underset{X \in L(\mathcal{H})}{\min} (BX-C)^{\#}W(BX-C)=C^{\#}W_{/ [R(B)]}C.$$ \end{cor} \begin{dem} Suppose that there exists a $W$-ImS of $BX-C=0$ for every $C \in L(\mathcal{H}).$ Then the conclusion follows by applying Theorem \ref{thminimum2} for $C=I.$ Conversely, if $W$ is $R(B)$-complementable and $R(B)$ is $W$-nonnegative, then $B^{\#}W(BX-I)=0$ admits a solution. Therefore $B^{\#}W(BX-C)=0$ admits a solution for every $C \in L(\mathcal{H})$ and, by Corollary \ref{cormin2}, there exists a $W$-ImS of $BX-C=0.$ In this case, let $Q \in \mathcal{Q}$ be such that $R(Q)=R(B)$ and $WQ=Q^{\#}W.$ Then, by Douglas' Lemma, there exists $X_0\in L(\mathcal{H})$ such that $BX_0=QC.$ Therefore $B^{\#}W(BX_0-C)=B^{\#}W(Q-I)C=0,$ because $R(I-Q)=N(Q)\subseteq N(B^{\#}W)$ \cite[Lemma 3.2]{CMSSzeged}. Then, $X_0$ is a $W$-ImS of $BX-C=0.$ Hence, $\underset{X \in L(\mathcal{H})}{\min} F(X)=C^{\#}W_{/ [R(B)]}C,$ since $W_{/ [R(B)]}=W(I-Q),$ by \eqref{ShortedmaxminKrein}. \end{dem} \subsection{Weighted Min-Max problems} A necessary condition for the minimization (maximization) problem to be solvable is that the range of the operator $\!B$ is $W$-nonnegative ($\!W$-nonpositive). In what follows, we are interested in posing (and solving) a problem similar to the one in \eqref{eq61}, that does not require the range of $B$ to be $W$-definite in order to admit a solution. To do so, we begin by expressing the range of $B$ as the sum of suitable $W$-definite subspaces. For a fix signature operator $J,$ the spectral theorem for Hilbert space selfadjoint operators applied to $JW$ gives that $\mathcal{S}:=R(B)$ can be decomposed as $\mathcal{S}=\mathcal{S}_+ \ [\dotplus]_{W} \ \mathcal{S}_-$ (compare with \eqref{WdecompSKrein}). If $P_{\pm}=P_{\mathcal{S}_{\pm}}$ and $B_{\pm}=P_{\pm}B$ then $\mathcal{S}_{\pm}=R(B_{\pm})$ and the following result holds. \begin{lema} \label{lemmadecomp} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H}).$ Then, given a signature operator $J,$ $B$ can be written as \begin{equation} \label{DescomposicionB} B=B_+ + B_- \end{equation} with $R(B_+)$ closed and $W$-nonnegative, $R(B_-)$ closed and $W$-nonpositive, $R(B_+) \perp R(B_-)$ and $R(B)=R(B_+) \ [\dotplus]_{W} \ R(B_-).$ \end{lema} Fix a descomposition of $R(B)$ as in \eqref{DescomposicionB} and define $$F_J(X,Y)=(B_+X+B_-Y-C)^{\#}W(B_+X+B_-Y-C).$$ Notice that $F_J(X,X)=F(X).$ Consider the following problem: determine the existence of $$\underset{Y \in L(\mathcal{H}) }{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y) \right).$$ \begin{prop} \label{propsupinfimum1} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H})$ such that $W$ is $R(B)$-weakly complementable and $B$ is represented as in \eqref{DescomposicionB} for some signature operator $J.$ Then, for every $C \in L(\mathcal{H}),$ $$ \underset{ Y \in L(\mathcal{H})}{\sup} \left( \underset{X \in L(\mathcal{H})}{\inf} F_J(X,Y) \right)=\underset{ X \in L(\mathcal{H})}{\inf} \left( \underset{Y \in L(\mathcal{H})}{\sup} F_J(X,Y) \right) =C^{\#} W_{/ [R(B)]} C. $$ \end{prop} \begin{dem} Write $W=W_1+W_2-W_3,$ with $R(B) \subseteq N(W_1),$ $R(B_-) \subseteq N(W_2),$ $R(B_+) \subseteq N(W_3)$ and $W_2, W_3 \geq0$ (see Proposition \ref{PropWC}). Then $$F_J(X,Y)\!=\!C^{\#}W_1C\!+\!(B_+X\!-\!C)^{\#}W_2(B_+X\!-\!C)\!-\!(B_-Y\!-\!C)^{\#}W_3(B_-Y\!-\!C). $$ By Proposition \ref{PropWC}, $W$ is $R(B_{\pm})$-weakly complementable. Also, $W$ is $R(B_+)$-weakly complementable and $R(B_+)$ is $W$-nonnegative if and only if $W_2$ is $R(B_+)$-weakly complementable and $R(B_+)$ is $W_2$-nonnegative. Applying Corollary \ref{corinfimum1}, $$\underset{X \in L(\mathcal{H})}{\inf} (B_+X-C)^{\#}W_2(B_+X-C)=C^{\#}{W_2}_{ / [R(B_+)]}C.$$ Therefore, for each $Y \in L(\mathcal{H}),$ $$\underset{X \in L(\mathcal{H})}{\inf}F_J(X,Y)=C^{\#}W_1C+C^{\#}{W_2}_{ / [R(B_+)]}C-(B_-Y-C)^{\#}W_3(B_-Y-C).$$ In the same way, by applying Corollary \ref{corinfimum1} and \eqref{ShortedKrein} \noindentndent $\! \underset{Y \in L(\mathcal{H})}{\sup}\left( \underset{X \in L(\mathcal{H})}{\inf} F_J(X,Y) \right)=C^{\#}W_1C+C^{\#}{W_2}_{ / [R(B_+)]}C-C^{\#}{W_3}_{ / [R(B_-)]}C=\\=C^{\#} W_{/ [R(B)]} C.$ \noindentndent The second equality can be proved similarly. \end{dem} \begin{Def} Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ Suppose that $B$ is represented as in \eqref{DescomposicionB} for some signature operator $J.$ An operator $Z \in L(\mathcal{H})$ is an {\emph{indefinite min-max solution of $BX-C=0$ with weight $W$}} ($W$-ImMS) (corresponding to the decomposition given by $J$) if \begin{equation} \label{eqMinMax2} (BZ-C)^{\#}W(BZ-C)=\underset{Y \in L(\mathcal{H}) }{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y) \right). \end{equation} \end{Def} When the weight is the identity, it was proved in \cite[Theorem 5.1 and Corollary 5.2]{Contino3}, that an operator $Z \in L(\mathcal{H})$ is an $I$-ImMS of $BX-C=0,$ for some fundamental decomposition of $\mathcal{H},$ if and only if $$Z=Z_1+Z_2$$ where $B^{\#}(BZ_1-C)=0$ and $(BZ_2)^{\#}BZ_2=0.$ Therefore, an $I$-ImMS of $BX-C=0$ is independent of the selected fundamental decomposition of $\mathcal{H}.$ Also, there exists an $I$-ImMS of $BX-C=0$ if and only if $R(C) \subseteq R(B) + R(B)^{[\perp]}.$ A similar result holds for a general weight: \begin{thm} \label{TeominmaxW} Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ An operator $Z$ is a $W$-ImMs of $BX-C=0$ for some (and, hence, any) fundamental decomposition of $\mathcal{H},$ if and only if $$Z=Z_1+Z_2$$ where $B^{\#}W(BZ_1-C)=0$ and $(BZ_2)^{\#}WBZ_2=0.$ \end{thm} The proof follows from Corollary \ref{cormin2}, using similar arguments to those found in the proof of \cite[Theorem 5.1]{Contino3}. \begin{obs} Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ Suppose that $B$ is represented as in \eqref{DescomposicionB} for some signature operator $J.$ Then $$\underset{Y \in L(\mathcal{H}) }{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y) \right)=\underset{X \in L(\mathcal{H})}{\min} \left( \underset{Y \in L(\mathcal{H}) }{\max}\ F_J(X,Y) \right).$$ \end{obs} This follows from Theorem \ref{TeominmaxW} and using similar arguments to those found in the proof of \cite[Remark after Theorem 5.1]{Contino3}. \begin{cor} \label{WCorollaryminmax} Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ Then, there exists a $W$-ImMS of $BX-C=0$ if and only if $R(C) \subseteq R(B) + W^{-1}(R(B)^{[\perp]}).$ \end{cor} \begin{dem} Suppose that $Z$ is a $W$-ImMs of $BX-C=0.$ Then, by Theorem \ref{TeominmaxW}, $Z=Z_1+Z_2$ where $B^{\#}W(BZ_1-C)=0$ and $(BZ_2)^{\#}WBZ_2=0.$ Therefore $$R(C) \subseteq R(B) + W^{-1}(R(B)^{[\perp]}).$$ Conversely, if $R(C) \subseteq R(B) + W^{-1}(R(B)^{[\perp]})$ then $R(B^{\#}WC) \subseteq R(B^{\#}WB).$ By Douglas's Lemma, there exists a solution of the normal equation $B^{\#}W(BX-C)=0,$ say $Z_1 \in L(\mathcal{H}).$ Put $Z_2=0$ and apply Theorem \ref{TeominmaxW} to get that $Z_1$ is a $W$-ImMs of $BX-C=0.$ \end{dem} \begin{cor} \label{CorminmaxregW} Let $W \in L(\mathcal{H})^s$ and $B \in CR(\mathcal{H}).$ Then, there exists a $W$-ImMS of $BX-C=0$ for every $C \in L(\mathcal{H})$ if and only if W is $R(B)$-complementable. In this case, for every signature operator $J,$ $$\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y)\right)=C^{\#}W_{/ [R(B)]}C=C^{\#}W(I-Q)C,$$ where $Q$ is any projection onto $R(B)$ such that $WQ=Q^{\#}W.$ \end{cor} \begin{dem} If W is $R(B)$-complementable then, for every $C \in L(\mathcal{H}),$ $R(C) \subseteq R(B) + W^{-1}(R(B)^{[\perp]})$ and, by Corollary \ref{cormin2}, there exists a $W$-ImMS of $BX-C=0.$ Conversely, assume that, for every $C \in L(\mathcal{H})$ there exists a $W$-ImMS of $BX-C=0.$ Set $C=I$ and apply the corollary once again to get that W is $R(B)$-complementable as $\mathcal{H}=R(I) \subseteq R(B)+ W^{-1}(R(B)^{[\perp]}).$ In this case, like in the proof of Corollary \ref{corWminimum2}, let $Q \in \mathcal{Q}$ be such that $R(Q)=R(B)$ and $WQ=Q^{\#}W.$ Then, by Douglas' Lemma, there exists $Z_1\in L(\mathcal{H})$ such that $BZ_1=QC$ and $B^{\#}W(BZ_1-C)=0.$ Then, by Theorem \ref{TeominmaxW}, $Z_1$ is a $W$-ImMS of $BX-C=0.$ Therefore, $$\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y)\right) =F(Z_1)=C^{\#}W_{/ [R(B)]}C=C^{\#}W(I-Q)C.$$ \end{dem} \section{Minimization problems in the indefinite trace space} In the present section the notion of trace of an operator is extended to the Krein space setting with the aim of applying the results previously obtained to trace-type problems on operators. We denote by $S_p$ the $p${\emph{-Schatten class}} for $1 \leq p < \infty.$ The reader is referred to \cite{Ringrose, Simon} for further details on $S_p$-operators. Let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space. If $J$ is a signature operator for $\mathcal{H},$ fix the Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }),$ where $\PI{x }{ y }=\K{Jx}{y}$ for all $x, y \in \mathcal{H}.$ The operator $T$ belongs to the Schatten class $S_p(J)$ if $T \in S_p$ when viewed as acting on the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ The next lemma shows that if $T \in S_p(J_a)$ for some fundamental decomposition of $\mathcal{H}$ with signature operator $J_a$ then $T \in S_p(J_b)$ for any other fundamental decomposition of $\mathcal{H}$ with signature operator $J_b.$ To prove this assertion we will use the following result, see \cite[Theorem 2.1.3]{Ringrose}. \begin{thm} \label{thmSp} Let $\mathcal{H}$ be a Hilbert space, $T\in L(\mathcal{H})$ and $1 \leq p < \infty.$ Then $T \in S_p$ if and only if there exists a sequence $\{F_n\}_{n \in \mathbb{N}}$ of operators on $\mathcal{H}$ such that $F_n$ has finite rank not greater than $n$ and $$\sum_{n \geq 1} \Vert T - F_n \Vert^p < \infty.$$ \end{thm} \begin{lema} \label{LemaSpKrein} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space with signature operators $J_a$ and $J_b$. Fix the Hilbert spaces $(\mathcal{H}, \PI{ \ }{ \ }_a)$ and $(\mathcal{H}, \PI{ \ }{ \ }_b).$ Then $T \in S_p(J_a)$ if and only if $T \in S_p(J_b)$. \end{lema} \begin{dem} The result is readily obtained by applying Theorem \ref{thmSp} and from the fact that $ \PI{ \ }{ \ }_a$ and $ \PI{ \ }{ \ }_b$ are equivalent. \end{dem} On account of the above lemma we just write $S_p$ instead of $S_p(J).$ \begin{Def} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a separable Krein space with signature operator $J$ and fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ If $T \in S_1$ and $\{e_n : n\in \mathbb{N} \}$ is an orthonormal basis of $(\mathcal{H}, \PI{ \ }{ \ }),$ then the $J${\emph{-trace}} of $T,$ denoted by $\tr_{J}(T),$ is defined as $$\tr_{J}(T)=\sum_{n=1}^{\infty} \K{Te_n}{e_n}.$$ \end{Def} Notice that $\tr_J(T)$ equals $\tr(JT)$ in the inner product $\PI{ \ }{ \ } =\K{J \ }{ \ }$ see \cite{Ringrose, Simon}. Whence the $J$-trace of $T$ does not depend on the particular choice of the orthonormal basis (see \cite[Lemma 2.2.1]{Ringrose}). The next lemma gathers the basic properties of the $J$-trace. By using the definition of $\tr_J$ and the properties of the trace of an operator in a Hilbert space the proof is straightforward. \begin{lema} \label{Proptr} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space with signature operator $J$ and fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ Let $T, S \in S_1$ and $\alpha, \beta \in \mathbb{C},$ then \begin{enumerate} \item [i)] $\tr_{J}(\alpha T + \beta S)= \alpha \ \tr_{J}(T) + \beta \ \tr_{J}(S),$ \item [ii)] $\tr_{J}(T^{\#})=\overline{\tr_{J}(T)},$ \item [iii)] $\tr_{J} (T)= \tr(JT),$ where the trace is calculated with respect to the inner product $\PI{ \ }{ \ } =\K{J \ }{ \ },$ \item [iv)] $\tr_{J}(TS) = \tr_{J}(JSJT)=\tr_{J}(SJTJ),$ \item [v)] $\vert \tr_{J}(T) \vert \leq \Vert T \Vert_{1}.$ \end{enumerate} \end{lema} The next example shows that the $J$-trace depends on the signature operator $J.$ \begin{example} Consider $\mathbb{C}^2$ with the indefinite metric $\K{(x_1, x_2)}{(y_1, y_2)}=x_1\overline{y_1} - x_2 \overline{y_2}.$ Then $(\mathbb{C}^2, \K{ \ } { \ })$ is a Krein space with fundamental decompositions: $\mathbb{C}^2= span \{ (1,0 )\} \ [\dotplus] \ span \{ (0,1)\}$ and $ \mathbb{C}^2= span \{ (2,1 )\} \ [\dotplus] \ span \{ (1,2)\}.$ Let $J_a$ and $J_b$ be the corresponding signature operators. Observe that $\{ (1,0), (0,1) \}$ is an orthonormal basis in $(\mathbb{C}^2, \K{J_a \ } { \ })$ and $\left\{ \frac{1}{\sqrt{3}}(2,1), \frac{1}{\sqrt{3}}(1,2) \right\}$ is an orthonormal basis in $(\mathbb{C}^2, \K{J_b \ } { \ }).$ Set $T: \mathbb{C}^2 \rightarrow \mathbb{C}^2,$ $T(x_1,x_2) :=(x_1+x_2,0).$ A straightforward computation gives $\tr_{J_a}(T)=1 \not = 3= \tr_{J_b}(T).$ \end{example} \begin{lema} \label{propFund} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a separable Krein space with signature operators $J_a$ and $J_b$. Fix the Hilbert spaces $(\mathcal{H}, \PI{ \ }{ \ }_a)$ and $(\mathcal{H}, \PI{ \ }{ \ }_b).$ If $T \in S_1$ then $$\tr_{J_b}(T)=\tr_{J_a}(J_b T J_a).$$ \end{lema} \begin{dem} We use the notation $\tr_{\PI{ \ }{ \ }}$ when we want to highlight the inner product on which the trace is calculated. Let $\alpha = J_a J_b.$ Then $\alpha$ is an invertible operator on $\mathcal{H}$ such that, for every $x, y \in \mathcal{H}$, $$\PI{\alpha x}{y}_a=\PI{J_aJ_bx}{y}_a=\K{J_bx}{y}=\PI{x}{y}_b.$$ In particular, $\PI{\alpha x}{x}_a \geq 0$ for every $x \in \mathcal{H}.$ Let $\{ e_n : n \in \mathbb{N} \}$ be an orthonormal basis in $(\mathcal{H}, \PI{ \ }{ \ }_b).$ Then $$\delta_{ij}=\PI{e_i}{e_j}_b=\PI{\alpha e_i}{e_j}_a=\PI{\alpha^{1/2}e_i}{\alpha^{1/2}e_j}_a.$$ Hence, $\{ \alpha^{1/2} e_n : n \in \mathbb{N} \}$ is an orthonormal basis in $(\mathcal{H}, \PI{ \ }{ \ }_a).$ \noindentndent So, if $T \in S_1$ then \\ $\tr_{J_b}(T)= \tr_{\PI{ \ }{ \ }_b}(J_b T)=\tr_{\PI{ \ }{ \ }_b}(T J_b)=\sum_{ n \geq 1} \PI{TJ_be_n}{e_n}_b=\\=\sum_{ n \geq 1} \PI{\alpha TJ_a \alpha e_n}{e_n}_a =\sum_{ n \geq 1} \PI{ ( \alpha^{1/2} TJ_a \alpha^{1/2})\alpha^{1/2} e_n}{\alpha^{1/2} e_n}_a= \\ =\tr_{\PI{ \ }{ \ }_a}(\alpha^{1/2} T J_a \alpha^{1/2}) = \tr_{\PI{ \ }{ \ }_a}(\alpha T J_a)= \tr_{\PI{ \ }{ \ }_a}(J_a J_b T J_a)=\\ = \tr_{J_a}(J_b T J_a).$ \end{dem} \subsection*{Fr\'echet derivative of the $J$-trace} Let $(\mc{E}, \mathcal{N}C{\cdot})$ be a Banach space and $\mathcal U \subseteq \mc{E}$ be an open set. We recall that a function $f: \mc{E} \rightarrow \mathbb{R}$ is said to be {\emph{Fr\'echet differentiable}} at $X_0 \in \mathcal U$ if there exists $Df(X_0): \mc{E} \rightarrow \mathbb{R}$ a bounded linear functional such that $$\lim\limits_{Y\rightarrow 0} \frac{|f(X_0+Y)-f(X_0) - Df(X_0)(Y)|}{\Vert Y \Vert}=0.$$ If $f$ is Fr\'echet differentiable at every $X_0 \in \mc{E}$, $f$ is called Fr\'echet differentiable on $\mc{E}$ and the function $Df$ which assigns to every point $X_0 \in \mc{E}$ the derivative $Df(X_0),$ is called the Fr\'echet derivative of the function $f.$ If, in addition, the derivative $Df$ is continuous, $f$ is said to be a {\emph{class $\mc{C}^1$-function}}, in symbols, $f \in \mc{C}^1(\mc{E}, \mathbb{R}).$ Let $W \in L(\mathcal{H})^s,$ $B \in CR(\mathcal{H})$ and $C\in L(\mathcal{H}).$ Recall that $F(X)=(BX-C)^{\#}W(BX-C)$ and consider $f_J: L(\mathcal{H}) \rightarrow \mathbb{R}$ defined by $$f_J(X):=\tr_J(F(X)).$$ In the following lemma we give the formula for the Fr\'echet derivative of $f_J(X),$ see \cite{Gold1} for the finite-dimensional case. \begin{lema} \label{LemaDiff} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space with signature operator $J.$ Fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ Let $W \in S_1,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ Then $f_J$ is Fr\'echet differentiable on $L(\mathcal{H})$ and $$Df_J(X)(Y)=2 \ Re \ \tr_{J}(Y^{\#}B^{\#}W(BX-C)).$$ Moreover, $f_J \in \mc{C}^1({L(\mathcal{H}), \mathbb{R}}).$ \end{lema} \begin{dem} For all $X, Y \in L(\mathcal{H}),$ $$f_J(X+Y)=f_J(X)+ 2 Re \ \tr_{J} ((BY)^{\#}W(BX-C)) + \tr_{J} ((BY)^{\#}W(BY)).$$ Then $$\frac{\vert f_J(X+Y)-f_J(X) - 2 Re \ \tr_{J} ((BY)^{\#}W(BX-C)) \vert }{\Vert Y \Vert} =$$ $$=\frac{\vert \tr_{J} ((BY)^{\#}W(BY)) \vert}{\Vert Y \Vert} \leq \frac{\Vert BY \Vert^2 \Vert W \Vert_1}{\Vert Y \Vert}\leq \Vert B \Vert^2 \Vert W \Vert_{1} \Vert Y \Vert$$ (see Lemma \ref{Proptr}). Hence $f_J$ is Fr\'echet differentiable on $L(\mathcal{H})$ and $$Df_J(X)(Y)=2 Re \ \tr_{J} ((BY)^{\#}W(BX-C)).$$ Finally, since \begin{align*} \vert Df_J(X_1)(Y) - Df_J(X_2)(Y) \vert &= 2 \vert Re \ \tr_{J} ((BY)^{\#}W(B(X_1-X_2))\vert\\ &\leq 2 \Vert B \Vert^2 \Vert Y \Vert \Vert W \Vert_{1} \Vert X_1-X_2\Vert. \end{align*} (once again by Lemma \ref{Proptr}), it follows that $f_J \in \mc{C}^1({L(\mathcal{H}), \mathbb{R}}).$ \end{dem} In this section we deal with the following problems: let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space with signature operator $J.$ Fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ Given $B \in CR(\mathcal{H}),$ $C \in L(\mathcal{H})$ and $W \in S_1\cap L(\mathcal{H})^s,$ we analyze whether there exists the \begin{equation} \underset{X \in L(\mathcal{H})}{\min} \tr_{J}((BX-C)^{\#}W(BX-C)) \label{eq71} \end{equation} and the corresponding maximum. Finally, if $B$ is represented as in \eqref{DescomposicionB} and $F_J(X,Y)=(B_+X+B_-Y-C)^{\#}W(B_+X+B_-Y-C),$ we also analyze the existence of \begin{equation} \underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} \tr_{J}(F_J(X,Y))\right). \label{eq772} \end{equation} It follows from the last lemma that, if $f_J : L(\mathcal{H}) \times L(\mathcal{H}) \rightarrow \mathbb{R}$ is given by \begin{equation} \label{GJ} f_J(X,Y):= \tr_J(F_J(X,Y)), \end{equation} then $f_J \in \mc{C}^1(L(\mathcal{H}) \times L(\mathcal{H}), \mathbb{R})$ and the partial derivatives of $f_J$ in every $(X_0,Y_0) \in L(\mathcal{H}) \times L(\mathcal{H})$ are $$D_{X} f_J(X_0,Y_0)(H)=2 Re \ \tr_{J} ((B_+H)^{\#}W(B_+X_0+B_-Y_0-C)), $$ $$D_{Y} f_J(X_0,Y_0)(K)=2 Re \ \tr_{J} ((B_-K)^{\#}W(B_+X_0+B_-Y_0-C)),$$ for all $H, K \in L(\mathcal{H}).$ \begin{thm} \label{thmtrJ} Let $W \in L(\mathcal{H})^s$ such that $W \in S_1,$ $B \in CR(\mathcal{H})$ and $C \in L(\mathcal{H}).$ The following assertions hold: \begin{enumerate} \item Assume that $R(B)$ is $W$-nonnegative. Then, $X_0 \in L(\mathcal{H})$ realizes \eqref{eq71} for any signature operator $J$ if and only if $X_0$ is a $W$-ImS of the equation $BX-C=0.$ \item Let $B$ be represented as in \eqref{DescomposicionB} for some signature operator $J.$ Then, the min-max in \eqref{eq772} exists for every $C \in L(\mathcal{H})$ if and only if $W$ is $R(B)$-complementable. In this case, $$\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} \tr_{J}(F_J(X,Y))\right)=\tr_J(C^{\#}W_{/[ R(B)]}C).$$ The operator $Z \in L(\mathcal{H})$ realizes \eqref{eq772} if and only if $Z$ is a $W$-ImMS of $BX-C=0.$ \end{enumerate} \end{thm} \begin{dem} Let $J$ be a signature operator of $\mathcal{H}$ and fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ }).$ Suppose that $X_0$ is a solution of Problem \eqref{eq71}. If $f_J$ is as in Lemma \ref{LemaDiff} then $X_0$ is a global minimum of $f_J$. Since $f_J$ is a $\mc{C}^1$-function, $X_0$ is a critical point of $f_J(X);$ i.e., for every $Y \in L(\mathcal{H}),$ $Df_J(X_0)(Y)=0$ or equivalently, $$0=2 Re \ \tr_{J} ((BY)^{\#}W(BX_0-C))=2 Re \ \tr (J(BY)^{\#}W(BX_0-C)).$$ Thus, considering a suitable $Y,$ it follows that $$B^{\#}W(BX_0-C)=0.$$ So, by Corollary \ref{cormin2}, $X_0$ is a $W$-ImS of $BX-C=0.$ As for the converse, suppose that $X_0$ is a $W$-ImS of $BX-C=0.$ Let $\{e_n : n\in \mathbb{N} \}$ be any orthonormal basis in $(\mathcal{H}, \PI{ \ }{ \ }).$ Then $$\K{W(BX_0-C)e_n}{(BX_0-C)e_n} \leq \K{W(BX-C)e_n}{(BX-C)e_n}$$ $\mbox{ for every } n \in \mathbb{N} \mbox{ and every } X \in L(\mathcal{H}).$ Therefore $$\tr_{J}(F(X_0)) \leq \tr_{J}(F(X))$$ for every $X \in L(\mathcal{H}).$ Hence $X_0$ is a solution of Problem \eqref{eq71} and the proof of the item $1$ is complete. As for the item $2,$ suppose that $W$ is $R(B)$-complementable and $Z'$ is a solution of $B^{\#}W(BX-I)=0.$ Then, for any $C\in L(\mathcal{H}),$ $Z=Z'C$ is a solution of $B^{\#}W(BX-C)=0$ and, by Theorem \ref{TeominmaxW}, $Z$ is a $W$-ImMS of $BX-C=0,$ i.e., $$(BZ-C)^{\#}W(BZ-C)=\underset{Y \in L(\mathcal{H}) }{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y)\right).$$ Let $\{e_n : n\in \mathbb{N} \}$ be any orthonormal basis in $(\mathcal{H}, \PI{ \ }{ \ }). $ Then, for every $n \in \mathbb{N}$ and any $X, Y \in L(\mathcal{H}),$ $$\K{(B_+Z+B_-Y-C)^{\#}W(B_+Z+B_-Y-C)e_n}{e_n} \leq$$ $$\leq \K{(B_+Z+B_-Z-C)^{\#}W(B_+Z+B_-Z-C)e_n}{e_n} $$ $$\leq \K{(B_+X+B_-Z-C)^{\#}W(B_+X+B_-Z-C)e_n}{e_n}.$$ Therefore \begin{align*} \tr_{J}(F_J(Z,Z))=\tr_J(F(Z))&=\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} \tr_{J}(F_J(X,Y))\right)=\\ &=\tr_J(C^{\#}W_{/[ R(B)]}C), \end{align*} where we used Corollary \ref{CorminmaxregW}. Hence $Z$ is a solution of Problem \eqref{eq772}. Conversely, if $Z \in L(\mathcal{H})$ is a solution of Problem \eqref{eq772} for any $C\in L(\mathcal{H}),$ then $$f_J(Z,Y) \leq f_J(Z,Z) \leq f_J(X,Z) \mbox{ for every } X,\ Y \in L(\mathcal{H}),$$ where $f_J$ is as in \eqref{GJ}. Hence, $Z$ is a global minimum of $f_J(X,Z)$ and $Z$ is a global maximum of $f_J(Z,Y).$ Therefore, for every $H, K \in L(\mathcal{H}),$ $$D_{X} f_J(Z,Z)(H)=D_{Y} f_J(Z,Z)(K)=0$$ or equivalently, \noindentndent $Re \ \tr_{J} ((B_+H)^{\#}W(B_+Z+B_-Z-C))=Re \ \tr_{J} ((B_-K)^{\#}W(B_+Z+B_-Z-C))=0.$ Then, considering suitable $H,K,$ it follows that $$B_+^{\#}W(B_+Z+B_-Z-C)=B_-^{\#}W(B_+Z+B_-Z-C)=0.$$ Thus $$B^{\#}W(BZ-C)=0$$ and, by Theorem \ref{TeominmaxW} once again, $Z$ is a $W$-ImMS of $BX-C=0.$ \end{dem} The following theorem synthesizes the results of the last two sections. \begin{thm} Let $W \in L(\mathcal{H})^s$ such that $W \in S_1$ and $B \in CR(\mathcal{H}).$ Then the following statements are equivalent: \begin{itemize} \item [i)] there exists a $W$-ImMS of $BX-C=0$ for every $C \in L(\mathcal{H}),$ \item [ii)] the $\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} \tr_{J}(F_J(X,Y))\right)$ is attained, for every $C \in L(\mathcal{H}),$ \item [iii)] $W$ is $R(B)$-complementable, \item [iv)] the equation $B^{\#}W(BX-C)=0$ admits a solution for every $C \in L(\mathcal{H}).$ \end{itemize} In this case, $$\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} F_J(X,Y)\right)=C^{\#}W_{/ [R(B)]}C$$ and $$\underset{Y \in L(\mathcal{H})}{\max} \left( \underset{X \in L(\mathcal{H})}{\min} \tr_{J}(F_J(X,Y))\right)=\tr_J(C^{\#}W_{/[ R(B)]}C).$$ Moreover, $Z$ is a $W$-ImMS of $BX-C=0$ and the min-max in $ii)$ is attained in $Z$ if and only $Z=Z_1+Z_2,$ where $B^{\#}W(BZ_1-C)=0$ and $(BZ_2)^{\#}WBZ_2=0.$ \end{thm} \subsection*{\textbf{Final remark: the $\mathbf{J}$-$\mathbf{S_2}$ space}} Let $(\mathcal{H}, \K{ \ }{ \ })$ be a Krein space with signature operator $J.$ Fix the associated Hilbert space $(\mathcal{H}, \PI{ \ }{ \ })$ and set $$\K{S}{T}_J:=\tr_{J}(T^{\#}S), \quad S,T \in S_2.$$ It can be readily seen that $\K{ \ } { \ }_J$ is an indefinite inner product on $S_2.$ Moreover, $(S_2, \K{ \ } { \ }_J)$ is a Krein space and $$\tr_{J}(T^{\#}T)=\Vert P_+ T \Vert_2^2-\Vert P_- T \Vert_2^2,$$ where $P_{\pm}=\frac{I\pm J}{2}.$ \section*{References} \end{document}
\begin{document} \title{Determining hyperbolicity of compact orientable 3-manifolds with torus boundary} \author{Robert C. Haraway, III\thanks{Research partially supported by NSF grant DMS-1006553.}} \date{\today} \maketitle \begin{abstract} Thurston's hyperbolization theorem for Haken manifolds and normal surface theory yield an algorithm to determine whether or not a compact orientable 3-manifold with nonempty boundary consisting of tori admits a complete finite-volume hyperbolic metric on its interior. A conjecture of Gabai, Meyerhoff, and Milley reduces to a computation using this algorithm. \end{abstract} \section{Introduction} The work of J\o rgensen, Thurston, and Gromov in the late `70s showed (\cite{Thurston82}) that the set of volumes of orientable hyperbolic 3-manifolds has order type $\omega^\omega$. Cao and Meyerhoff in \cite{CM01} showed that the first limit point is the volume of the figure eight knot complement. Agol in \cite{Agol10} showed that the first limit point of limit points is the volume of the Whitehead link complement. Most significantly for the present paper, Gabai, Meyerhoff, and Milley in \cite{GMM09} identified the smallest, closed, orientable hyperbolic 3-manifold (the Weeks-Matveev-Fomenko manifold). The proof of the last result required distinguishing hyperbolic 3-manifolds from non-hyperbolic 3-manifolds in a large list of 3-manifolds; this was carried out in \cite{Milley}. The method of proof was to see whether the \texttt{canonize} procedure of SnapPy (\cite{SnapPy}) succeeded or not; identify the successes as census manifolds; and then examine the fundamental groups of the 66 remaining manifolds by hand. This method made the analysis of non-hyperbolic Mom-4 manifolds, of which there are 762 combinatorial types, prohibitively time-consuming. The algorithm presented here determines whether or not a compact 3-manifold admits a complete finite-volume hyperbolic metric, i.e.\,is \emph{hyperbolic}, assuming the manifold in question has nonempty boundary consisting of tori. The Mom-4s have such boundaries. The current implementation of this algorithm using Regina (see \cite{Regina}) classifies them, yielding the following result. \begin{table} \begin{center} \begin{tabular}{| r | r | r | r | r | r | r |}\hline m125 & m129 & m202 & m203 & m292 & m295 & m328\\ m329 & m357 & m359 & m366 & m367 & m388 & m391\\ m412 & s441 & s443 & s503 & s506 & s549 & s568\\ s569 & s576 & s577 & s578 & s579 & s596 & s602\\ s621 & s622 & s638 & s647 & s661 & s774 & s776\\ s780 & s782 & s785 & s831 & s843 & s859 & s864\\ s880 & s883 & s887 & s895 & s898 & s906 & s910\\ s913 & s914 & s930 & s937 & s940 & s941 & s948\\ s959 & t10281 & t10700 & t11166 & t11710 & t12039 & t12044\\ t12046 & t12047 & t12048 & t12049 & t12052 & t12053 & t12054\\ t12055 & t12057 & t12060 & t12064 & t12065 & t12066 & t12067\\ t12143 & t12244 & t12412 & t12477 & t12479 & t12485 & t12487\\ t12492 & t12493 & t12496 & t12795 & t12840 & t12841 & t12842\\ v2124 & v2208 & v2531 & v2533 & v2644 & v2648 & v2652\\ v2731 & v2732 & v2788 & v2892 & v2942 & v2943 & v2945\\ v3039 & v3108 & v3127 & v3140 & v3211 & v3222 & v3223\\ v3224 & v3225 & v3227 & v3292 & v3294 & v3376 & v3379\\ v3380 & v3383 & v3384 & v3385 & v3393 & v3396 & v3426\\ v3429 & v3450 & v3456 & v3468 & v3497 & v3501 & v3506\\ v3507 & v3518 & v3527 & v3544 & v3546 & & \\\hline \end{tabular} \caption{Names of hyperbolic Mom-4s.}\label{tbl:all} \end{center} \end{table} \begin{Thm} Table \ref{tbl:all} constitutes the complete list of hyperbolic Mom-4s. \qed \end{Thm} \begin{proof} Put the Python modules \texttt{fault} and \texttt{mom} in one directory also containing the data \texttt{test\textunderscore mom4s\textunderscore out.txt} from \cite{GMM11}. Then from a \texttt{bash} prompt in a POSIX environment, run \begin{verbatim} python mom.py test_mom4s_out.txt | grep \| | awk -F \| '{print $1}' \end{verbatim} One finds that the resulting output has 138 lines, each of which is a distinct name of a cusped manifold on a census, either the original SnapPea census or Thistlethwaite's more recent census of manifolds with eight tetrahedra. Therefore, Conjecture 5.3 from \cite{GMM11} is correct. \end{proof} Of course, now we should discuss what is in these modules. \begin{Rmk} The author would like to thank Tao Li, for helpful discussions about normal surface theory and Seifert fiberings; Dave Futer for pointing out an error in a previous version of the paper; and Neil Hoffman for suggesting the use of Berge and Gabai's work for an improved $T^2 \times I$-homeomorphism test. \qed \end{Rmk} \section{Background} \textbf{Conventions.} All manifolds herein are assumed to be compact and piecewise-linear. All maps between these are assumed to be piecewise-linear and proper (that is, such that the preimage of compacta are again compacta). In particular, all homeomorphisms are piecewise-linear with piecewise-linear inverses. \qed Thurston's hyperbolicity theorem for Haken manifolds merits a succinct formulation. Shoving some complications from the original theorem into definitions and restricting attention to manifolds with nonempty torus boundary yields the following theorem. \begin{Thm}[\cite{Thurston82}, Thm. 2.3]\label{thm:common} Let $M$ be a compact orientable 3-manifold with nonempty boundary consisting of tori. $M$ is hyperbolic with finite volume if and only if $M$ has no faults. \qed \end{Thm} The above uses the following definitions. \begin{Def} A manifold is \emph{hyperbolic} when its interior admits a complete hyperbolic metric---a complete Riemannian metric of constant negative curvature. \qed \end{Def} \begin{Def} Let $s$ be an embedding of a manifold into a connected manifold $M$. By abuse of notation, also let $s$ denote the image of $s$ in $M$. Suppose $s$ has codimension 1. Pick a metric on $M$ compatible with its p.l. structure, and let $M'$ be the path-metric completion of $M \smallsetminus s$. When $M'$ is disconnected, $s$ \emph{separates} $M$. When $M'$ has two connected components $N,N'$, $s$ \emph{cuts off $N$ from $M$}, or, if $M$ is understood from context, \emph{$s$ cuts off $N$}. If $N$ is homeomorphic to some common 3-manifold $X$, $s$ \emph{cuts off an $X$}; if, in addition, $N'$ is not homeomorphic to $X$, $s$ cuts off \emph{one} $X$. \qed \end{Def} \begin{Def} A properly embedded surface $s$ in an orientable 3-manifold $M$ is a \emph{fault} when $\chi(s) \geq 0$ and it satisfies one of the following: \begin{itemize} \item $s$ is nonorientable. \item $s$ is a sphere that does not cut off a 3-ball. \item $s$ is a disc that does not off one 3-ball. \item $s$ is a torus that does not cut off a $T^2 \times I$, and does not cut off a $\partial$-compressible manifold. \item $s$ is an annulus that does not cut off a 3-ball, and does not cut off one solid torus, and $M$ has none of the above types of fault. \end{itemize} \qed \end{Def} \begin{proof} This is a corollary of common knowledge surrounding Thurston's hyperbolization theorem for Haken manifolds. Specifically, it's commonly known that an irreducible, $\partial$-incompressible, geometrically atoroidal 3-manifold with nonempty boundary consisting of tori is either hyperbolic or Seifert-fibered. All Seifert-fibered spaces with at least two boundary components admit essential tori, which are faults. A Seifert-fibered space with one boundary component may admit no essential tori. In this case, the base orbifold $\Sigma$ is a disc with at most two cone points. Let $\alpha$ be a properly embedded arc which separates $\Sigma$ into discs with at most one cone point each; then the vertical fiber over $\alpha$ is an annulus fault. Hence all Seifert-fibered spaces with nonempty boundary admit faults. Consequently, a compact orientable 3-manifold with nonempty boundary consisting of tori which admits no faults is irreducible, $\partial$-incompressible, Haken, and geometrically atoroidal, and it admits no annulus faults. So it must be hyperbolic. In fact, Thurston proved something more, namely that unless this manifold is $T^2 \times I$, then its metric has finite volume. Now, $T^2 \times I$ admits faults---non-separating annuli, in fact. Since we assumed the manifold had no faults, its metric must have finite volume. Conversely, orientable hyperbolic 3-manifolds of finite volume admit no orientable faults---they have no essential spheres, no compressing discs, no incompressible tori which aren't $\partial$-parallel, and no annuli which are both incompressible and $\partial$-incompressible. Finally, orientable hyperbolic 3-manifolds of finite volume don't admit any faults at all, since they admit no properly embedded nonorientable surfaces of nonnegative Euler characteristic. \end{proof} \section{A hyperbolicity algorithm} We can turn Theorem \ref{thm:common} into an algorithm as follows. \begin{Def} A 3-triangulation is a face-pairing of distinct tetrahedra. It is \emph{valid} when it does not identify an edge to itself backwards. It is \emph{material} when the link of every vertex is a sphere or disc. \qed \qed \end{Def} \begin{Thm}\label{thm:ideal} Let $T$ be a valid material triangulation of a compact orientable 3-manifold $M$. Then $M$ has a fault precisely when $T$ has a fundamental fault. \qed \end{Thm} \begin{proof} This is a simple consequence of standard results in normal surface theory. If $M$ has an embedded projective plane or essential sphere, then it has such a surface among fundamental normal surfaces (\cite{Matveev}, Theorem 4.1.12). If $M$ is irreducible and has a compressing disc, then it has a compressing disc among fundamental normal surfaces (\cite{Matveev}, proof of Theorem 4.1.13). If $M$ is $\partial$-irreducible and has an embedded Klein bottle or essential torus, then it has such a surface among fundamental normal surfaces (\cite{Matveev}, Lemma 6.4.7). If $M$ is $\partial$-irreducible and has an embedded essential annulus or M\"{o}bius band, then it has such a surface among fundamental normal surfaces (\cite{Matveev}, Lemma 6.4.8). Thus, if $M$ has an essential sphere, disc, torus, or annulus, or a nonorientable surface of nonnegative Euler characteristic, then $T$ has such a surface among its fundamental surfaces. Essential surfaces are the same as faults, except for essential annuli. An essential annulus might not be a fault, since it might cut off one solid torus. If $M$ is not irreducible, $\partial$-irreducible, and geometrically atoroidal, then $M$ has a non-annulus fault, and one can find such a fault among fundamental surfaces. Otherwise, $M$ is either hyperbolic or Seifert-fibered. If $M$ is hyperbolic then it has no faults. If $M$ is not hyperbolic, then since it is geometrically atoroidal, it is Seifert-fibered over $S^2$ with three exceptional fibers, over $D^2$ with at most two exceptional fibers, or over the annulus with at most one exceptional fiber. In the first case, $M$ has no faults. Consider the second case. With one exceptional fiber, $M$ is a solid torus and has a compressing disc. With two exceptional fibers, $M$ has up to isotopy only one essential surface, a vertical annulus separating the exceptional fibers. This annulus cuts off \emph{two} solid tori, not just one, so it is a fault. Consider now the third case. With no exceptional fibers, $M$ is $T^2\times I$. Up to equivalence, $M$ has only one essential annulus, $\gamma \times I$ with $\gamma$ essential in $T^2$. This annulus cuts $M$ into one solid torus, but does not cut this torus \emph{off}, so this annulus is a fault. With one exceptional fiber, $M$ has three essential annuli up to isotopy, all vertical: two recurrent annuli that cut off one solid torus, and one nonseparating annulus $A$. We contend that there is a surface isotopic to $A$ among the fundamental surfaces, or there is some other, more easily detectable fault. Indeed, we can isotope $A$ to be normal. Suppose $A = A' + A''$. If, say, $A'$ had positive Euler characteristic, then $A''$ would be compressible, and would compress to a surface isotopic to $A$, since $A$ is essential. Let $D$ be a complete set of compressing discs for $A''$, compressing it to $A$. After normalizing this surface following the shrinking moves of \cite{JR03}, one has a normal surface isotopic to $A$, but with smaller total weight. Suppose instead that $\chi(A') = \chi(A'') = 0$. If, say, $A'$ were an annulus, then it would necessarily also be a nonseparating annulus, but of less total weight. If, instead, neither were an annulus, then both would be M\"{o}bius strips. But then $M$ would have an embedded M\"{o}bius strip, a fault. \end{proof} Assuming $T$ is an ideal triangulation of a compact orientable 3-manifold $M$ with nonempty boundary consisting of tori, Algorithm \ref{alg:hyp} determines whether or not $M$ is hyperbolic. \begin{algorithm} \caption{Hyperbolicity test for link exteriors}\label{alg:hyp} \begin{algorithmic}[1] \Procedure{Hyp}{$T$}\Comment{$T$ is assumed to be valid, material, orientable, and not closed.} \State Let $l$ be the list of fundamental normal surfaces in $T$. \If{$l$ has an embedded nonorientable surface $\Sigma$ with $\chi(\Sigma) \geq 0$} \State \textbf{return} false \ElsIf{$l$ has an essential sphere} \State \textbf{return} false \ElsIf{$l$ has a compressing disc} \State \textbf{return} false \ElsIf{$l$ has an essential torus} \State \textbf{return} false \ElsIf{$l$ has an essential annulus} \State \textbf{return} false \Else \State {\textbf{return} true} \EndIf \EndProcedure \end{algorithmic} \end{algorithm} Of course, this algorithm depends upon enumerating fundamental normal surfaces, and upon determining whether or not a normal surface is a fault. Now, any connected compact nonorientable surface of nonnegative Euler characteristic is a fault. A sphere is a fault when, as above, it does not cut off one 3-ball. Regina has methods for cutting along surfaces and determining whether or not a 3-manifold is a 3-ball. So we can readily determine whether or not a sphere is a fault in Regina. A disc is a fault when the same thing happens. So we could detect whether or not a disc is a fault in Regina. An annulus is a fault when it does not cut off a 3-ball and does not cut off one solid torus. Regina also has a test for homeomorphism to the solid torus. So we can test whether or not an annulus is a fault in Regina. Finally, a torus is a fault when it does not cut off a component admitting a compressing disc, and does not cut off a $T^2\times I$. Regina also has a test for admitting a compressing disc, but does not have a test for homeomorphism to $T^2\times I$. So to implement the above hyperbolicity algorithm, it remains for us to implement a $T^2\times I$-homeomorphism test. Such tests already exist in the literature, but have not been implemented using triangulations due to their reliance on boundary patterns. \section{A new test for homeomorphism to $T^2\times I$} We can notice first that admitting a non-separating annulus is a necessary condition for being $T^2 \times I$. We note that a further necessary condition for being $T^2 \times I$ is that splitting along any such annulus is a solid torus. Now, if a 3-manifold $M$ split along a non-separating annulus is a solid torus, then $M$ is a Seifert fibering with base orbifold an annulus or M\"{o}bius band with at most a single cone point, i.e. $M = M(0,2;r)$ or $M = M(-0,1;r)$ for some $r \in \mathbb{Q}$. In the latter case, $M$ has only one boundary component, so it cannot possibly be $T^2 \times I$. Thus we may restrict our attention to the case $M = M(0,2;r)$. Recall the following results about Seifert fiberings: \begin{Prop}[\cite{Hatcher}, 2.1]\label{prp:h21} Every orientable Seifert fibering is isomorphic to one of the models $M(\pm g,b; s_1, \ldots, s_k).$ Any two Seifert fiberings with the same $\pm g$ and $b$ are isomorphic when their multisets of slopes are equal modulo 1 after removing integers, assuming $b > 0$. \qed \end{Prop} \begin{Thm}[\cite{Hatcher}, 2.3]\label{thm:h23} Orientable manifolds admitting Seifert fiberings have unique such fiberings up to isomorphism, except for $M(0,1;s)$ for all $s \in \mathbb{Q}$ (the solid torus), $M(0,1;1/2, 1/2) = M(-1,1;)$ (not the solid torus), and three others without boundary. \qed \end{Thm} It is quite easy to compute slopes differing mod 1 after simplifying the cusps' induced triangulations. \begin{Prop}\label{prp:neqv} In a triangulation of the torus $T^2$ by one vertex, for any nontrivial element $g$ of $H_1(T^2)$, the edges of the triangulation represent homology classes not all equivalent mod $g$. \qed \end{Prop} \begin{proof} Suppose $v,w,x \in H_1(T^2)$ and $v+w = x$. Let $\equiv$ denote equivalence in $H_1(T^2) \mod g$. If $v \equiv x$, then $v + w \equiv x + w$, i.e.\, $x \equiv x + w$ (since $v + w = x$). Thus $0 \equiv w$. Now, if it were the case that also $v \equiv w$, then also $v$ and $x$ would be $0$ mod $g$. But then $v,w,x$ would all be multiples of $g$. However, they generate $H_1(T^2)$, which is not cyclic. That is a contradiction. So not all of $v,w,x$ are equivalent mod $g$. \end{proof} Although one could already use just the above theorems to develop a simpler test than the one in \cite{JT95} or \cite{Matveev} using boundary patterns, it still involves a search for annuli, and cuts along such annuli. Searches for annulus faults are expensive. Neil Hoffman has kindly called my attention to the work of Berge and Gabai on knots in solid tori (see \cite{Berge} and \cite{Gabai89}, \cite{Gabai90}). This work enables the following simple algorithm to determine homeomorphism to $T^2 \times I$. \begin{Thm} Algorithm \ref{alg:t2i} determines whether or not a compact, orientable, 3-manifold $M$ with nonempty boundary consisting of tori is $T^2 \times I$. \begin{algorithm} \caption{Homeomorphism to $T^2 \times I$}\label{alg:t2i} \begin{algorithmic}[1] \Procedure{$T^2\times I$?}{$T$} \If{$T$ is not a homology $T^2\times I$} \State \textbf{return false} \EndIf \State Simplify the boundary components of $T$ to have one vertex each. \State Pick a boundary component $\kappa$ of $T$; it has three edges. \ForAll{edges $e$ of $\kappa$} \State Let $T_e$ be $T$ folded along $e$. \If{not $D^2\times S^1$?$(T_e)$} \State \textbf{return false} \EndIf \EndFor \State \textbf{return true} \EndProcedure \end{algorithmic} \end{algorithm} \qed \end{Thm} \begin{proof} If $T$ Dehn fills to a manifold which is not a solid torus, then $T$ is not $T^2 \times I$. Thus the \texttt{return False} statements are correct. It remains to show that if $T$ Dehn fills to $D^2 \times S^1$ along the three given slopes, then in fact $T$ is $T^2 \times I$. $T$ Dehn fills to $D^2 \times S^1$, so it is the complement of a knot $k$ in $D^2 \times S^1$. $k$ admits a nontrivial filling (actually, two fillings) to $D^2 \times S^1$, so by Theorem 1.1 of \cite{Gabai89}, $k$ is either a 0- or 1-bridge braid. (In particular, $T$ is irreducible, so we need not even test irreducibility of $T$.) Now, 1-bridge braids which are not 0-bridge admit only two $D^2 \times S^1$ fillings, by Lemma 3.2 of \cite{Gabai90}. Therefore, $k$ is 0-bridge. Furthermore, by Example 3.1 of \cite{Gabai90}, for every nontrivial 0-bridge knot complement $N$ in $D^2 \times S^1$ with knot-neighborhood boundary $T$, there is a slope $\beta$ on $T$ such that for every slope $\alpha$ on $T$ along which $N$ fills to a $D^2 \times S^1$, we have $\langle \alpha, \beta \rangle = 1 \mod 2$. But we've found three $D^2 \times S^1$ slopes $a,b,c$ such that $a + b + c = 0 \mod 2$. If $T$ were a nontrivial 0-bridge knot complement, then \[ 0 = \langle a + b + c, \beta \rangle = \langle a, \beta \rangle + \langle b, \beta \rangle + \langle c, \beta \rangle = 1 + 1 + 1 = 1 \mod 2, \] impossible. Therefore, $T$ is a trivial 0-bridge knot complement. That it, $T = T^2 \times I$. \end{proof} It remains to describe how to ``simplify'' a triangulation to induce a minimal triangulation on a boundary components, usually called a \emph{cusp} in this context; and how to fill along a slope in a simplified cusp. One may find an algorithm in SnapPy for simplifying boundary components, a special, simpler case of which is presented here.\footnote{SnapPy's approach to cusp filling is reminiscent of the \emph{layered-triangulations} developed in \cite{JR06}.} There is a more efficient method for accomplishing such a simplification, but it requires the technique of \emph{crushing} developed in \cite{JR03}. We use the following terminology. \begin{Def} First, suppose $M$ is materially triangulated. Let $T$, $T'$ be boundary triangles adjacent along an edge $e$. Orient $e$ so that $T$ lies to its left and $T'$ to its right. Let $\Delta$ be a fresh tetrahedron, and let $\tau$, $\tau'$ be boundary triangles of $\Delta$ adjacent along an edge $\eta$. Orient $\eta$ so that $\tau$ lies to its left and $\tau'$ to its right. Without changing $M$'s topology we may glue $\Delta$ to $T$ by gluing $\eta$ to $e$, $\tau$ to $T'$ and $\tau'$ to $T$. This is called a \emph{two-two} move. \qed \end{Def} In the above definition, the edge $\eta'$ opposite $\eta$ in $\Delta$ becomes a boundary edge of the new material triangulation. \begin{Def} We say $e$ is \emph{embedded} when its vertices are distinct. We say $e$ is \emph{coembedded} when $\eta'$ as defined above is embedded. Equivalently, $e$ is coembedded when the vertices in $T,T'$ opposite $e$ are distinct. Given a boundary edge $e$ between two boundary triangles $T$ and $T'$, one may glue $T$ to $T'$ and $e$ to itself via a valid, orientation-reversing map from $T$ to $T'$. In \cite{Regina} and \cite{SnapPy} this is called the \emph{close-the-book move along $e$}. \qed \end{Def} \begin{Prop} Given a material triangulation $D$ with boundary consisting of tori, Algorithm \ref{alg:bdy} constructs a material triangulation $T$ of the same underlying space such that $T$ induces a one-vertex triangulation on every boundary component. \qed \end{Prop} \begin{algorithm} \caption{Boundary simplification}\label{alg:bdy} \begin{algorithmic}[1] \Procedure{$\partial$-simplify}{$T$} \State Let $D$ be a copy of $T$ \While{$D$ has an embedded boundary edge $e$} \State Layer on $e$. \While{$D$ has a coembedded boundary edge $f$} \State Close the book along $f$. \EndWhile \EndWhile \EndProcedure \end{algorithmic} \end{algorithm} \begin{proof} Two-two moves never change the topology. Suppose $e$ is a boundary edge of $T$ lying in distinct triangles $t$ and $t'$. Closing the book along $e$ will leave the topology of $T$ invariant if and only if the vertices opposite $e$ in $t$ and $t'$ are distinct in $T$, i.e.\,if and only if $e$ is coembedded. Notice that closing the book along a coembedded edge decreases the number of boundary triangles, and performing a two-two move on an embedded edge produces a coembedded edge and preserves the number of boundary triangles. Therefore, the above \texttt{while} loops terminate, using number of boundary triangles as a variant function. The obvious postcondition of the outer while loop is that there is no embedded boundary edge. Since the boundary is still triangulated, this is equivalent to each boundary component having only one vertex on it. \end{proof} \begin{Rmk} The routine in SnapPy is more complicated because, rather than filling in a cusp any old way, SnapPy wants to make sure the filling compresses some given slope in the cusp. \end{Rmk} This concludes the present sketch of an algorithm to determine hyperbolicity of a compact, orientable 3-manifold with nonempty boundary consisting of tori. Both literate and raw implementations of this algorithm as a Regina-Python module \texttt{unhyp} reside at \cite{carrot}. Also available at \cite{carrot} is a Regina Python module \texttt{mom} for interpreting Milley's data as manifolds in Regina. \section{Two Useful Heuristics} Although the above does yield an algorithm to determine hyperbolicity, it frequently happens that one can easily disprove hyperbolicity from a group presentation. The following is a particularly useful kind of presentation. \begin{Def} A \emph{common axis relator} is a word of the form $a^p b^q$ or $a^p b^q a^{-p} b^{-q} (= [a^p,b^q])$ for some integers $p,q$. A \emph{common axis presentation} is a finite presentation with two generators, and with at least one common axis relator. \end{Def} \begin{Lem} A group admitting a common axis presentation is not the fundamental group of a hyperbolic link exterior. \end{Lem} \begin{proof} Suppose $G$ is a subgroup of $Isom^+(H^3)$ admitting a common axis presentation, i.e. admitting generators $a,b$ such that for some $p,q \in \mathbf{Z}$, $a^p b^q$ or $[a^p,b^q]$ is trivial. In this case, $a$ and $b$ must have the same axis in $H^3$. But then since $a$ and $b$ generate $G$, $G$ preserves an axis in $H^3$. If this axis is a line, then $a$ and $b$ are commuting loxodromic elements. If this axis is a point at infinity, then $a$ and $b$ are commuting parabolic elements. In both cases, $G$ is abelian, and therefore does not have finite-volume quotient. But the metric on a hyperbolic link exterior has finite volume. \end{proof} Moreover, it frequently happens that a small triangulation of a hyperbolic 3-manifold admits a \emph{strict angle structure}; such structures themselves constitute proofs of hyperbolicity (see \cite{FuterGueritaud}). Regina can very often calculate these structures as well; this proves hyperbolicity without a laborious normal surface enumeration. \section{Further Directions} Neil Hoffman and I have recently gotten results on the complexity of the hyperbolicity problem, which we are in the process of writing up. Among other things we will show that the problem of hyperbolicity is in the complexity class \textbf{coNP} for nontrivial link exteriors, assuming that $S^3$-recognition is in \textbf{coNP}. \end{document}
\begin{document} \title{Unbalanced urn model with random addition } \author{ Aguech Rafik\thanks{(Corresponding author) Departement of Statistics and Operation research, King Saoud University, Riyadh, Riyadh 11692, K.S.A. E-mail: [email protected]} \thanks{D\'epartement de Math\'ematiques, Facult\'e des Sciences de Monastir, Avenue de l'environnement 5019, Monastir, Tunisia. E-mail: [email protected]}, \and Lasmar Nabil\thanks{D\'epartement de Math\'ematiques, Institut Pr\'eparatoire aux \'Etudes d'ing\'enieurs de Monastir, Avenue de l'environnement 5019, Monastir, Tunisia. E-mail: [email protected]} \and Selmi Olfa\thanks{D\'epartement de Math\'ematiques, Facult\'e des Sciences de Monastir, Avenue de l'environnement 5019, Monastir, Tunisia. E-mail: [email protected]} } \maketitle \begin{abstract} In this paper, we consider a multi-drawing urn model with random addition. At each discrete time step, we draw a sample of $m$ balls. According to the composition of the drawn colors, we return the balls together with a random number of balls depending on two discrete random variables $X$ and $Y$ with finite means and variances. Via the stochastic approximation algorithm, we give limit theorems describing the asymptotic behavior of white balls. \\ \noindent \textbf{Keywords:} unbalanced urn, martingale, stochastic algorithm, central limit theorem. \end{abstract} \section{Introduction} The classical P\'olya urn was introduced by P\'olya and Eggenberger \cite{Polya} describing contagious diseases. The first model is as follows: An urn contains balls of two colors at the start, white and black. At each step, one picks a ball randomly and returns it to the urn with a ball of the same color. Afterward this model was generalized and it has become a simple tool to describe several models such finance, clinical trials (see \cite{Pages}, \cite{Wei}), biology (see \cite{bio}), computer sciences, internet (see \cite{Mahmoud},\cite{Goldman}), etc. \\ Recently, H. Mahmoud, M.R. Chen, C.Z Wei, M. kuba and H. Sulzbach \cite{Kuba-Mahmoud-Panholzer,Chen-Kuba,Chen-Wei,kuba-Zulzbach,Kuba-mahmoud,Kuba-Mahmoud2}, have focused on the multidrawing urn. Instead of picking a ball, one picks a sample of $m$ balls ($m\geq 1$), say $l$ white and $m-l$ black balls. the pick is returned back to the urn together with $a_{m-l}$ white and $b_{l}$ black balls, where $a_l$ and $b_l, 0\leq l\leq m$ are integers. At first, they treated two particular cases when \{$a_{m-l}=c\times l \quad \text{and}\quad b_{m-l}=c\times (m-l)$\} and when \{$a_{m-l}=c\times (m-l)$ \quad\text{and}\quad $b_{m-l}=c\times l$\}, where $c$ is a positive constant. By different methods as martingales and moment methods, the authors described the asymptotic behavior of the urn composition. When considering the general case and in order to ensure the existence of a martingale, they supposed that $W_n$, the number of white balls in the urn after $n$ draws, satisfies the affinity condition i.e, there exists two deterministic sequences $(\alpha_n)$ and $(\beta_n)$ such that, for all $n\geq 0$, $\mathbb{E}[W_{n+1}|\mathcal{F}_n]=\alpha_n W_{n}+\beta_n$. Under this condition, the authors focused on small and large index urns. Later, the affinity condition was removed in the work of C. Mailler, N. Lasmer and S. Olfa \cite{C.N.O}, they generalized this model and looked at the case of more than two colors.\\ In the present paper, we deal with an unbalanced urn model, which was not been sufficiently addressed in the literature. It was mainly dealt with in the works of R. Aguech \cite{R.Aguech}, S. Janson \cite{S. Janson} and H. Renlund \cite{Renlund1, Renlund2}. In \cite{R.Aguech} and \cite{S. Janson}, the authors dealt with model with a simple pick, whereas in \cite{Renlund1,Renlund2} the author considered a model with two picks and, under some conditions, they described the asymptotic behavior of the urn composition. In this paper, we aim to give a generalization of a recent work \cite{A.L.O}. We deal with an unbalanced urn model with random addition. We consider an urn containing two different colors white and blue. We suppose that the urn is non empty at time 0. Let denote by $W_n$ (resp $B_n$) the number of white balls (resp blue balls) and by $T_n$ the total number of balls in the urn at time $n$. Let $(X_n)_{n\geq 0}$ and $(Y_n)_{n\geq 0}$ be strictly positive sequences of independent identically distributed discrete random variables with finite means and variances. The model we study is defined as follows: At a discrete time, we pick out a sample of $m$ balls from the urn (we suppose that $T_0=W_0+B_0\geq m$) and according to the composition of the sample, we return the balls with $Q_n(\xi_n,m-\xi_n)^t$ balls, where $Q_n$ is a $2\times 2$ matrix depending on the variables $X_n$ and $Y_n$ and $\xi_n$ is the number of white balls in the $n^{th}$ sample. Let $(\mathcal{F}_n)_{n\ge 0}$ be the $\sigma$-field generated by the first $n$ draws. We summarize the evolution of the urn by the recurrence \begin{equation}\label{recurrence} \begin{pmatrix} W_{n} \\ B_{n} \\ \end{pmatrix}\stackrel{\mathcal D}{=} \begin{pmatrix} W_{n-1} \\ B_{n-1} \\ \end{pmatrix}+Q_n \begin{pmatrix} \xi_{n} \\ m-\xi_{n} \\ \end{pmatrix}. \end{equation} Note that, with these notations, we have \begin{equation*}\mathbb{P}[\xi_n=k|\mathcal{F}_{n-1}]=\displaystyle\frac{\binom {W_{n-1}} k \binom {B_{n-1}} {m-k}}{\binom {T_{n-1}} m}.\end{equation*} The paper is organized as follows. In Section \ref{Main results}, we give the main results of the paper. In the first paragraph of Section \ref{Proofs}, we develop Theorem 1 \cite{Renlund1} and apply it to our urn model. The rest of this section is devoted to the prove the theorems. \textbf{Notation:} For a random variable $R$, we denote by $\mu_R=\mathbb{E}(R)$ and $\sigma_R^2=\mathbb{V}ar(X)$. Note that $\mu_X, \mu_Y ,\sigma^2_X$ and $\sigma^2_Y$ are finite.\\ \section{Main Results}\label{Main results} \begin{thm}\label{thmXopp} Consider the urn model evolving by the matrix $Q_n= \begin{pmatrix} 0 & X_n \\ X_n & 0 \\ \end{pmatrix}$. We have the following results: \begin{enumerate} \item \begin{equation}\label{asymp-T_n}T_n\stackrel{a.s}{=}m\mu_X n +o(\sqrt{n}\ \ln(n)^\delta),\end{equation} \begin{equation} W_n\stackrel{a.s}{=}\frac{m\mu_X }{2}n+o(\sqrt{n}\ \ln(n)^{\delta})\quad \text{and}\quad B_n\stackrel{a.s}{=}\frac{m\mu_X }{2}n+o(\sqrt{n}\ \ln(n)^{\delta});\quad\delta>\frac{1}{2}.\end{equation} \item \begin{equation}\frac{W_n-\frac{1}{2}T_n}{\sqrt{n}} \stackrel{\mathcal{L}}{\longrightarrow}\mathcal{N}\Big(0,\frac{m(\sigma_X^2+\mu_X^2)}{12}\Big).\end{equation} \item \begin{equation}\frac{W_n-\mathbb{E}(W_n)}{\sqrt{n}}\stackrel{\mathcal{L}}{\longrightarrow} \mathcal{N}\Big(0,\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma^2_X}{12}\Big).\end{equation} \end{enumerate} \end{thm} \begin{thm}\label{thmXself} Consider the urn model evolving by the matrix $Q_n= \begin{pmatrix} X_n & 0 \\ 0 & X_n \\ \end{pmatrix}$. There exists a positive random variable $\tilde W_\infty$, such that \begin{equation}T_n\stackrel{a.s}{=}m\mu_X n +o(\sqrt{n}\ \ln(n)^\delta),\quad W_n\stackrel{a.s}{=}\tilde W_\infty n +o(n) \quad\mbox{and}\;\;B_n\stackrel{a.s}{=} (m\mu_X-\tilde W_\infty)n+o(n). \end{equation} \end{thm} \begin{rmq} The random variable $\tilde W_\infty$ is absolutely continuous whenever $X$ is bounded. \end{rmq} \begin{thm}\label{thmXYopp} Consider the urn model evolving by the matrix $Q_n= \begin{pmatrix} 0 & X_n \\ Y_n & 0 \\ \end{pmatrix}.$ Let $z:=\frac{\sqrt{\mu_X}}{{\sqrt{\mu_X}+\sqrt{\mu_Y}}}$, we have the following results: \begin{enumerate} \item \begin{equation}\label{SL-Total}T_n\stackrel{a.s}{=}m\sqrt{\mu_X}\sqrt{\mu_Y}\ n+o(n),\end{equation} \begin{equation} W_n\stackrel{a.s}{=}m\sqrt{\mu_X}\sqrt{\mu_Y}\ z\ n+o(n)\quad\text{and}\quad B_n\stackrel{a.s}{=}m\sqrt{\mu_X}\sqrt{\mu_Y}(1-z)\ n+o(n).\end{equation} \item \begin{equation}\frac{W_n-z T_n}{\sqrt{n}}\stackrel{\mathcal{L}}{\longrightarrow}\mathcal{N}\Big(0,\frac{ G(z)}{3}\Big),\end{equation} where, \begin{equation*} G(x)=\sum_{i=0}^4a_ix^i,\end{equation*} with \begin{eqnarray*}a_0=m^2(\sigma^2_X+\mu_X^2)&,&a_1=m(1-2m)(\sigma_X^2+\mu_X^2),\\ a_2=3m(m-1)(\sigma_X^2+\mu_X^2)-2m(m-1)\mu_X\mu_Y &,& a_3=m\mathbb{E}(X-Y)^2-2(m^2-m)\bigl(\sigma_X^2+\mu_X^2-\mu_X\mu_Y\bigr)\\ \text{and}\quad a_4=m(m-1)\mathbb{E}(X-Y)^2.\end{eqnarray*} \end{enumerate} \end{thm} \begin{thm}\label{thmXYself} Consider the urn evolving by the matrix $Q_n= \begin{pmatrix} X_n & 0 \\ 0 & Y_n \\ \end{pmatrix}.$ We have the following results: \begin{enumerate} \item If $\mu_X > \mu_Y$, \begin{equation} T_n\stackrel{a.s}{=}m\mu_Xn+o(n),\quad W_n\stackrel{a.s}{=}m\mu_Xn+o(n)\quad \text{and}\quad B_n\stackrel{a.s}{=}B_{\infty}n^\rho+o(n^\rho), \end{equation} where $\rho=\frac{\mu_Y}{\mu_X}$ and $B_{\infty}$ is a positive random variable. \item If $\mu_X=\mu_Y$, \begin{equation}T_n\stackrel{a.s}{=}m\mu_Xn+o(n),\quad W_n\stackrel{a.s}{=}W_{\infty}n+o(n)\quad \text{and}\quad B_n\stackrel{a.s}{=}(\mu_Xm-W_{\infty})\ n+o(n),\end{equation} where $W_\infty$ is a positive random variable. \end{enumerate} \end{thm} \begin{rmq} The case when $\mu_X<\mu_Y$ is obtained by interchanging the colors. \end{rmq}\\ \textbf{Example:} Let $m=1$, this particular case was studied by R. Aguech \cite{R.Aguech}. Using martingales and branching processes , R. Aguech proved the following results:\\ if $\mu_X>\mu_Y$, \begin{equation*}W_n=\mu_X n+o(n),\quad B_n=D n^\rho\quad \text{and}\quad T_n=\mu_X n+o(n),\end{equation*} where $D$ is a positive random variable.\\ If $\mu_X=\mu_Y$, \begin{equation*}W_n=\mu_X \frac{W}{W+B}n+o(n)\quad \text{and}\quad B_n=\mu_X\frac{B}{W+B}n+o(n),\end{equation*} where $W$ and $B$ are positive random variables obtained by embedding some martingales in continuous time. \section{Proofs}\label{Proofs} The stochastic algorithm approximation plays a crucial role in the proofs in order to describe the asymptotic composition of the urn. As many versions of the stochastic algorithm exist in the literature (see \cite{Duflo} for example), we adapt the version of H. Renlund in \cite{Renlund1, Renlund2}. \subsection{A basic tool: Stochastic approximation} \begin{df}\label{def-algo} A stochastic approximation algorithm $(U_n)_{n\geq 0}$ is a stochastic process taking values in $[0,1]$ and adapted to a filtration $\mathcal{F}_n$ that satisfies \begin{equation}\label{eq:algo_sto} U_{n+1}-U_n = \gamma_{n+1}\big(f(U_n)+\Delta M_{n+1}\big), \end{equation} where $(\gamma_n)_{n\geq 1}$ and $(\Delta_n)_{n\geq 1}$ are two $\mathcal F_n$-measurable sequences of random variables, $f$ is a function from $[0,1]$ onto $\mathbb R$ and the following conditions hold almost surely. \begin{description} \item [(i)]$\frac{c_l}{n}\leq \gamma_n \leq \frac{c_u}{n}$, \item [(ii)]$|\Delta M_n|\leq K_u,$ \item [(iii)]$ |f(U_n)|\leq K_f,$ \item [(iv)]$ \mathbb E[\gamma_{n+1} \Delta M_{n+1}| \mathcal F_n] \leq K_e \gamma_n^2,$ \end{description} where the constants $c_l, c_u, K_u, K_f, $ and $K_e$ are positive real numbers. \end{df} \begin{df} Let $Q_f=\{x; f(x)=0\}.$ A zero $p\in Q_f$ will be called stable if there exists a neighborhood $\mathcal{N}_p$ of $p$ such that $f(x)(x-p)<0$ whenever $x\in \mathcal{N}_p\setminus\{p\}.$ If $f$ is differentiable, then $f'(p)$ is sufficient to determine that $p$ is stable. \end{df} \begin{thm}[\cite{Renlund1}]\label{th:renlund}Let $U_n$ bea stochastic algorithm defined in Equation (\ref{eq:algo_sto}). If $f$ is continuous, then $\displaystyle\lim_{n\rightarrow +\infty} U_n$ exists almost surely and is in $Q_f$. Furthermore, if $p$ is a stable zero, then $\mathbb{P}\Big(U_n\longrightarrow p\Big)>0.$ \end{thm} \begin{rmq} The conclusion of Theorem \ref{th:renlund} holds if we replace the condition $(ii)$ in Definition \ref{def-algo} by the following condition $\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\leq K_u$. \end{rmq} \begin{proof}[Proof of Theorem \ref{th:renlund}] For the convenience of the reader, we adapt the proof of Theorem \ref{th:renlund} and we show that, under the new condition $(ii) \quad\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\leq K_u$, the conclusion remains true. In fact, the following lemmas are useful.\\ \begin{lem}\label{V_n converges} Let $V_n=\sum_{i=1}^n\gamma_i\Delta M_i$. Then, $V_n$ converges almost surely. \end{lem} \begin{proof} Set $A_i=\gamma_i\Delta M_i$ and $\tilde A_i=\mathbb{E}[A_i|\mathcal{F}_{i-1}].$ Define the martingale $C_n=\sum_{i=1}^n(A_i-\tilde A_i),$ then \begin{eqnarray*} \mathbb{E}(C_n^2)&\leq&\sum_{i=1}^n\mathbb{E}(A_i^2)=\sum_{i=1}^n\mathbb{E}(\gamma_i^2\Delta M_i^2)\\ &\leq&\sum_{i=1}^n\frac{c_u^2}{i^2}\mathbb{E}(\Delta M_i^2), \end{eqnarray*} if there exists some positive constant $K_u$ such that $\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\leq K_u$, we conclude that $C_n$ is an $L^2-$ martingale and thus converges almost surely.\\ Next, since \[\sum_{i\geq 1}|\tilde A_i|\leq\sum_{i\geq 1}\frac{c_u^2}{(i-1)^2}K_l<+\infty,\] the series $\sum_{i\geq 1}A_i$ must also converges almost surely. \end{proof} \begin{lem}\label{Q_f}Let $U_\infty$ be the set of accumulation point of $\{U_n\}$ and $Q_f=\{x; f(x)=0\}$ be the zeros of $f$. Suppose $f$ is continuous. Then, \[\mathbb{P}\Big(U_\infty \subseteq Q_f\Big)=1.\] \end{lem} \begin{proof} See \cite{Renlund1} \end{proof} Next, we prove the main result of the theorem. If $\displaystyle\lim_{n\rightarrow +\infty} U_n$ does not exist, we can find two rational numbers in the open interval $]\displaystyle\liminf_{n\rightarrow +\infty} U_n, \displaystyle\limsup_{n\rightarrow +\infty} U_n[$.\\ Let $p<q$ be two arbitrary different rational numbers. If we can show that \[\mathbb{P}\Big(\{\liminf U_n \leq p\}\cap\{\limsup U_n \geq q\}\Big)=0,\] then, the existence of the limit will be established and the claim of the theorem follows from Lemma \ref{Q_f}.\\ For this reason, we need to distinguish two different cases whether or not $p$ and $q$ are in the same connected component of $Q_f$.\\ \textbf{Case 1: $p$ and $q$ are not in the same connected component of $Q_f$.}\\ See the proof in \cite{Renlund1}.\\ \textbf{Case 2: $p$ and $q$ are in the connected component of $Q_f$.}\\ Let $p$ and $q$ be two arbitrary rational numbers such that $p$ and $q$ are in the same connected component of $Q_f$. Assume that $\displaystyle\liminf_{n\rightarrow +\infty} U_n \leq p$ and fix an arbitrary $\varepsilon$ in such a way that $0\leq \varepsilon \leq q-p$.\\ We aim to show that $\displaystyle\limsup_{n\rightarrow +\infty} U_n\leq q$ i.e, it is sufficient to show that $\displaystyle\limsup_{n\rightarrow +\infty} U_n\leq p+\varepsilon.$\\ In view of Lemma \ref{V_n converges}, we have $V_n=\sum_{i=1}^n\gamma_i\Delta M_i$ converges a.s, then, for a stochastic $N_1> 0$, for $n,m> N_1$ we have $|W_n-W_m|<\frac{\varepsilon}{4}$ and $\gamma_n\Delta M_n\leq \frac{\varepsilon}{4}$.\\ Let $N=max(\frac{4K_f}{\varepsilon},N_1)$. By assumption, there is some stochastic $n>N$ such that $U_n-p< \frac{\varepsilon}{2}$.\\ Let \[\tau_1=\inf\{k\geq n; U_k\geq p\}\quad \text{and}\quad \sigma_1=\inf\{k>\tau_1; U_k<p\},\] and define, for $n\geq 1,$ \[\tau_{n+1}=\inf \{k>\sigma_n; U_k\geq p\}\quad \sigma_{n+1}=\inf \{k>\tau_n; U_k<p\}.\] Now, for all $k$ we have \[U_{\tau_k}=U_{\tau_k-1}+\gamma_{\tau_k-1}(f(U_{\tau_k-1})+\Delta M_{\tau_k}).\] Recall that $\gamma_{\tau_k-1}f(X_{\tau_k-1})\leq \frac{K_f}{\tau_{k}-1}\leq \frac{K_f}{n}$, for $n\geq N\geq \frac{4K_f}{\varepsilon}$ we have $\gamma_{\tau_k-1}f(X_{\tau_k-1})<\frac{\varepsilon}{4}$. It follows, \[\gamma_{\tau_k-1}(f(U_{\tau_k-1})+\Delta M_{\tau_k})\leq \frac{K_f}{n}+\frac{\varepsilon}{4}\leq \frac{\varepsilon}{4}+\frac{\varepsilon}{4} =\frac{\varepsilon}{2}.\] Note that $f(x)=0 $ when $x \in [ p,q ]$ ($p$ and $q$ are in $Q_f$). For $j$ such that $\tau_k+j-1$ is a time before the exit time of the interval $[p,q]$, we have \[U_{\tau_k+j}=X_{\tau_k}+W_{\tau_k+j}-W_{\tau_k}.\] As $|W_{\tau_k+j}-W_{\tau_k}|<\frac{\varepsilon}{4},$ we have $U_{\tau_k+j}\leq p+\frac{\varepsilon}{2}+\frac{\varepsilon}{4}\leq p+\varepsilon,$ the precess will never exceed $p+\varepsilon$ before $\sigma_{k+1}$. We conclude that $\sup_{k\geq n} U_k\leq p+\varepsilon.$\\ To establish that the limit is to a stable point, we refer the reader to \cite{Renlund1} to see a detailed proof. \end{proof} \begin{thm}[\cite{Renlund2}]\label{clt-renlund} Let $(U_n)_{n\geq 0}$ satisfying Equation~\eqref{eq:algo_sto} such that $\displaystyle\lim_{n\to +\infty} U_n = U^\star$. Let $\hat \gamma_n:= n\gamma_n \hat f(U_{n-1})$ where $\hat f(x) = \frac{-f(x)}{x-U^\star}$. Assume that $\hat \gamma_n$ converges almost surely to some limit $\hat \gamma$. Then, if $\hat\gamma > \frac12$ and if $\mathbb E[(n\gamma_n \Delta M_n)^2|\mathcal F_{n-1}] \to \sigma^2 > 0$, then \[\sqrt n (U_n -U^\star) \to \mathcal N\Big(0, \frac{\sigma^2}{2\hat\gamma -1}\Big).\] \end{thm} \subsection{Proof of the main results} \begin{proof}[Proof of Theorem \ref{thmXopp}] Consider the urn model defined in Equation (\ref{recurrence}) with $Q_n= \begin{pmatrix} 0 & X_n \\ X_n & 0 \\ \end{pmatrix}$. We have the following recursions: \begin{equation}\label{recurrence-opp2}W_{n+1}=W_n+X_{n+1}(m-\xi_{n+1})\quad \text{and} \quad T_{n+1}=T_n+mX_{n+1}.\end{equation} \textbf{Proof of claim 1} \begin{lem} Let $Z_n=\frac{W_n}{T_n}$ be the proportion of white balls in the urn after $n$ draws. Then, $Z_n $ satisfies the stochastic approximation algorithm defined by (\ref{eq:algo_sto}) with $\gamma_n=\frac{1}{T_n}$, $f(x)=\mu_X m(1-2x)$ and $\Delta M_{n+1}=X_{n+1}(m-\xi_{n+1}-mZ_n)-\mu m(1-Z_n)$. \end{lem} \begin{proof} We need to check the conditions of definition \ref{def-algo}.\\ \begin{description} \item [(i)] Recall that $T_n=T_0+m\sum_{i=1}^nX_i$, with $(X_i)_{i\geq 1}$ are iid random variables. It follows, by Rajechman strong law of large numbers, that \begin{equation}\label{asymp-T_n}T_n\stackrel{a.s}{=}\mu_X mn +o(\sqrt{n}\ \ln(n)^\delta),\quad \delta >\frac{1}{2},\end{equation} it follows that $\frac{1}{(m\mu_X+1)n}\leq\frac{1}{T_n}\leq\frac{2}{m\mu_X n},$ then, $c_l=\frac{1}{m\mu_X+1}$ and $c_u=\frac{2}{m\mu_X n},$ \item[(ii)] $\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\leq (6m^2+m)\mathbb{E}(X^2)+9m^2\mu^2=K_u,$\\ \item[(iii)] $|f(Z_n)|=m\mu_X|1-2Z_n|\leq 3m\mu_X=K_f$,\\ \item[(iv)]$\mathbb{E}(\gamma_{n+1}\Delta M_{n+1}|\mathcal{F}_n)\leq \frac{1}{T_n}\mathbb{E}(\Delta M_{n+1}|\mathcal{F}_n)=0=K_e$. \end{description} \end{proof} \begin{prop}\label{prop1/2} The proportion of white balls in the urn after $n$ draws, $Z_n$, converges almost surely to $\frac{1}{2}$. \end{prop} \begin{proof}[Proof of Proposition \ref{prop1/2}] Since the process $Z_n$ satisfies the stochastic approximation algorithm defined by Equation (\ref{eq:algo_sto}), we apply Theorem \ref{th:renlund}. As the function $f$ is continuous we conclude that $Z_n$ converges almost surely to $\frac{1}{2}$: the unique stable zero of the function $f$. \end{proof} We apply the previous results to the urn composition. As we can write $\frac{W_n}{n}=\frac{W_n}{T_n}\frac{T_n}{n}$, we deduce from Proposition \ref{prop1/2} and Equation (\ref{asymp-T_n}) that $\frac{W_n}{n}\stackrel{a.s}{=}\bigl(\frac{1}{2}+o(1)\bigr)\Big(\mu_Xm+o\Bigl(\frac{\ln(n)^\delta}{\sqrt{n}}\Bigr)\Bigr),$ then this corollary follows: \begin{cor} The number of white balls in the urn after $n$ draws, $W_n$, satisfies for $n$ large enough \begin{equation*}W_n\stackrel{a.s}{=}\frac{\mu_X m}{2}n+o(\sqrt{n}\ \ln(n)^\delta),\quad \delta >\frac{1}{2}.\end{equation*} \end{cor} \textbf{Proof of claim 2} We aim to apply Theorem \ref{clt-renlund}. For this reason, we need to find this limits: \begin{equation*} \lim_{n\rightarrow \infty}\mathbb{E}[\bigl(\frac{n}{T_n}\bigr)^2\Delta M_{n+1}^2|\mathcal{F}_n]\quad \text{and}\quad \lim_{n\rightarrow \infty}-\frac{n}{T_n}f'(Z_n). \end{equation*} We have \begin{eqnarray*} \mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]&=&\mathbb{E}(X_{n+1}^2)\mathbb{E}[(m-\xi_{n+1}-mZ_n)^2|\mathcal{F}_n])+\mu^2\mathbb{E}[(m-2mZ_n)^2|\mathcal{F}_n] \\ &&-2\mu_X^2\mathbb{E}[(m-\xi_{n+1}-mZ_n)(m-2mZ_n)|\mathcal{F}_n]\\ &=&(\sigma_X^2+\mu_X^2)\Big[m^2-4m^2Z_n+4m^2Z_n^2+mZ_n(1-Z_n)\frac{T_n-m}{T_n-1}\Big]-\mu_X^2[m^2+4m^2Z_n^2-4m^2Z_n]. \end{eqnarray*} As $n$ tends to infinity, we have $Z_n \stackrel{a.s}{\longrightarrow} \frac{1}{2}$ and $\frac{T_n-m}{T_n-1}\stackrel{a.s}{\longrightarrow} 1$. Then, \begin{equation*}\lim_{n\rightarrow \infty}\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\stackrel{a.s}{=}(\sigma_X^2+\mu_X^2)\frac{m}{4}\quad \text{and}\quad \lim_{n\rightarrow \infty}-\frac{n}{T_n}f'(Z_n)\stackrel{a.s}{=}2.\end{equation*} According to Theorem \ref{clt-renlund}, $\sqrt{n}(Z_n-\frac{1}{2})$ converges in distribution to $\mathcal{N}(0,\frac{\sigma_X^2+\mu_X^2}{12\mu_X^2m})$. Finally, by writing $\Big(\frac{W_n-\frac{1}{2}T_n}{\sqrt{n}}\Big)=\sqrt{n}(Z_n-\frac{1}{2})\frac{T_n}{n}$, we conclude using Slutsky theorem.\\ \textbf{Proof of claim 3} To prove this claim, we follow the proof of Lemma 3 and Theorem 2 in \cite{A.L.O}. Using the same methods, we show in a first step that the variables $(X_n(m-\xi_n))_{n\geq 0}$ are $\alpha$-mixing variables with a strong mixing coefficient $\alpha(n)=o\Big(\frac{\ln(n)^\delta}{\sqrt{n}}\Big)$, $\delta >\frac{1}{2}$. To conclude, we adapt the Bernstein method. Consider the same notation as in Theorem 2 in \cite{A.L.O}, and define $S_n=\frac{1}{\sqrt{n}}\sum_{i=1}^n\tilde\xi_i$ where $\tilde\xi_i=X_i(m-\xi_i)-\mu_X(m-\mathbb{E}(\xi_i))$. At first, we need to estimate the variance of $W_n$. \begin{prop}\label{var} The variance of $W_n$ satisfies \begin{equation}\label{variance}\mathbb{V}ar(W_n)=\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma_X^2}{12}\ n+o(\sqrt{n}\ \ln(n)^\delta),\quad \delta>\frac{1}{2}.\end{equation} \end{prop} \begin{proof}[Proof of Proposition \ref{var}] Recall that the number of white balls in the urn satisfies Equation (\ref{recurrence-opp2}), then \begin{equation*}\mathbb{V}ar(W_{n+1})=\mathbb{V}ar(W_n)+\mathbb{V}ar(X_n(m-\xi_n))+2\ \mathbb{C}ov(W_{n-1},X_n(m-\xi_n)).\end{equation*} We have $\mathbb{V}ar(X_n(m-\xi_n))=(\sigma_X^2+\mu_X^2)\Big(\mathbb{V}ar(mZ_{n-1}) +\mathbb{E}\Big(mZ_{n-1}(1-Z_{n-1})\frac{T_{n-1}-m}{T_{n-1}-1}\Big)\Big)+\sigma_X^2\mathbb{E}(m-\xi_n)^2.$ Using Equation (\ref{asymp-T_n}) and the fact that $Z_n\stackrel{a.s}{\rightarrow}\frac{1}{2}$, we obtain \begin{eqnarray*}\mathbb{V}ar(W_{n+1})&=&\Big(1-\frac 2n+o\Big(\frac{\ln(n)^\delta}{n^{\frac32}}\Big)\Big)\mathbb{V}ar(W_{n}) +\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma_X^2}{4}+o\Big(\frac{\ln(n)^\delta}{\sqrt n}\Big)\\ &=&a_n\mathbb{V}ar(W_{n})+b_n,\end{eqnarray*} where $a_n=\Bigl(1-\frac 2n+o\Big(\frac{\ln(n)^\delta}{n^{\frac32}}\Big)\Bigr)$ and $b_n=\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma_X^2}{4}+o\Big(\frac{\ln(n)^\delta}{\sqrt n}\Big).$\\ Thus, \begin{equation*} \mathbb{V}ar(W_n)=\Big(\prod_{k=1}^n a_k\Big)\Big(\mathbb{V}ar(W_0)+\sum_{k=0}^{n-1}\frac{b_k}{\prod_{j=0}^ka_j}\Big). \end{equation*} There exists a constant $a$ such that $\prod_{k=1}^na_k=\displaystyle\frac{e^{a}}{n^2}\Big(1+o\Big(\frac{\ln(n)^\delta}{\sqrt n}\Big)\Big)$, which leads to \begin{equation*} \mathbb{V}ar(W_n)=\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma_X^2}{12}n+o(\sqrt n\ln(n)^\delta),\quad \delta>\frac{1}{2}. \end{equation*} \end{proof} Recall that we follow the proof of Theorem 2 in \cite{A.L.O}, using Equation (\ref{variance}), we conclude that \begin{equation}\frac{W_n-\mathbb{E}(W_n)}{\sqrt{n}}\stackrel{\mathcal{L}}{\longrightarrow} \mathcal{N}\Bigl(0,\frac{m(\sigma_X^2+\mu_X^2)+m^2\sigma_X^2}{12}\Bigr). \end{equation} \end{proof} \begin{proof}[Proof of Theorem \ref{thmXself}] Consider the urn model defined in (\ref{recurrence}) with $Q_n= \begin{pmatrix} X_n & 0 \\ 0 & X_n \\ \end{pmatrix}$. The following recurrences hold: \begin{equation}\label{W-self} W_{n+1}=W_n+X_{n+1}\xi_{n+1}\quad \text{and}\quad T_{n+1}=T_n+mX_{n+1}. \end{equation} As $T_n$ is a sum of iid random variables then $T_n$ satisfies the following \begin{equation}\label{totalself}T_n\stackrel{a.s}{=}\frac{\mu_Xm}{2}n+o(\sqrt{n}\ln(n)^\delta).\end{equation} The processes $\tilde M_{n}=\prod_{k=1}^{n-1}\Big(\frac{T_k}{T_k+m\mu_X}\Big)W_n$ and $\tilde N_n=\prod_{k=1}^{n-1}\Big(\frac{T_k}{T_k+m\mu_X}\Big)B_n$ are two $\mathcal{F}_n$ positive martingales. In view of (\ref{totalself}), we have $\prod_{k=1}^{n-1}\Big(\frac{T_k}{T_k+m\mu_X}\Big)\stackrel{a.s}{=}\displaystyle\frac{e^{\gamma}}{n}\Big(1+o\Big(\frac{\ln (n)^\delta}{\sqrt n}\Big)\Big)$ for a positive constant $\gamma$. Thus, there exists nonnegative random variables $\tilde W_\infty$ and $\tilde B_{\infty}$ such that $\tilde W_\infty+\tilde B_\infty\stackrel{a.s}{=}m\mu_X$ and \begin{equation*}\frac{W_n}{n}\stackrel{a.s}{\longrightarrow}\tilde W_\infty,\quad \text{and}\quad \frac{B_n}{n}\stackrel{a.s}{\longrightarrow}\tilde B_{\infty}.\end{equation*} \textbf{Example: } In the original P\`olya urn model \cite{Polya}, when $m=1$ and $X=C$ (deterministic), the random variable $\tilde W_\infty/C$ has a $Beta(\frac{B_0}{C},\frac{W_0}{C})$ distribution \cite{Athreya-Ney,S. Janson}. Whereas, M.R. Chen and M. Kuba \cite{Chen-Kuba} considered the case when $X=C$ (non random) and $m>1$. They gave moments of all orders of $W_n$ and proved that $\tilde W_\infty$ cannot be an ordinary $Beta$ distribution. \begin{rmq} Suppose that the random variable $X$ has moments of all orders, let $\:m_k=E(X^k)$, for $ k\ge 1$. We have, almost surely, $W_n\le T_n$ then, by Minskowski inequality, we obtain $\mathbb{E}(W_n^{2k})\leq (mn)^{2k}\mathbb{E}(X^{2k})$. Using Carleman's condition we conclude that, if $\sum_{k\ge1}\mu_{2k}^{-\frac{1}{2k}}=\infty$, then the random variable $\tilde W_\infty$ is determined by its moments. Unfortunately, till now we still unable to give exact expressions of moments of all orders of $W_n$. But, we can characterize the distribution of $\tilde W_\infty$ in the case when the variable $X$ is bounded. \end{rmq} \begin{lem} \label{Abs_con} Assume that $X$ is a bounded random variable, then, for fixed $W_0,B_0$ and $m$ the random variable $\tilde W_\infty$ is absolutely continuous. \end{lem} The proof that $\tilde W_\infty$ is absolutely continuous is very close to that of Theorem 4.2 in \cite{Chen-Wei}. We give the main proposition to make the proof clearer. \begin{prop}\cite{Chen-Wei} Let $\Omega_\ell$ be a sequence of increasing events such that $\mathbb{P}(\cup_{\ell \ge 0}\Omega_\ell)=1$. If there exists nonnegative Borel measurable function $\{f_\ell\}_{\ell\geq 1}$ such that $\mathbb{P}\Big(\Omega_\ell\cap \tilde W_\infty^{-1}(B)\Big)=\int_Bf_\ell (x)dx$ for all Borel sets B, then, $f=\displaystyle\lim_{l\rightarrow+\infty}f_\ell$ exists almost everywhere and $f$ is the density of $\tilde W_\infty$. \end{prop} Let $(\Omega,\mathcal F,\mathcal{P})$ be a probability space. Suppose that there exists a constant $A$ such that, we have almost surely, $X\le A$. \begin{lem} Define the events \begin{equation*} \Omega_{\ell}:=\{W_\ell\ge m A \;\mbox{and}\;B_\ell\ge mA\}, \end{equation*} then, $(\Omega_{\ell})_{\ell\geq 0}$ is a sequence of increasing events, moreover we have $\mathbb{P}(\cup_{\ell \ge 0}\Omega_\ell)=1$. \end{lem} Next, we just need to show that the restriction of $\tilde W_\infty$ on $\Omega_{\ell,j}=\{\omega; W_\ell(\omega)=j\}$ has a density for each $j$, with $Am\leq j\leq T_{\ell-1}.$ Let $(p_c)_{c\in\text{supp}(X)}$ the distribution of $X$. \begin{lem} For a fixed $\ell>0$, there exists a positive constant $\kappa$, such that, for every $c\in\text{supp(X)}$, $n\ge \ell+1$, $Am\le j\le T_{\ell-1}$ and $k\le Am(n+1)$, we have \begin{equation} \label{Inequality_WEI} \sum_{i=0}^m \mathbb{P}(W_{n+1}=j+k|W_n=j+k-ci)\le p_c(1-\frac 1n+\frac{\kappa}{n^2}). \end{equation} \end{lem} \begin{proof} According to Lemma 4.1 \cite{Chen-Wei}, for $Am \leq j\leq T_{\ell -1}$, $n\geq \ell$ and $k\leq Am(n+1)$, the following holds: \begin{equation}\label{step2}\sum_{i=0}^m{j+c(k-i)\choose i}{T_n-j-c(k-i)\choose m-i}=\frac{T_n^m}{m!}+\frac{(1-m-2c)T_n^{m-1}}{2(m-1)!}+...,\end{equation} which is a polynomial in $T_n$ of degree $m$ with coefficients depending on $W_0, B_0, m$ and $c$ only.\\ Let $u_{n,k}(c)=\sum_{i=0}^m \mathbb{P}(W_{n+1}=j+k|W_n=j+k-ic)$. Applying Equation (\ref{step2}) to our model we have \begin{eqnarray} \label{Majoration1} u_{n,k}(c)&=&p_c\sum_{i=0}^m{j+k\choose i}{T_n-j-k\choose m-i}{T_n\choose m}^{-1} \nonumber \\ &=&p_c{T_n\choose m}^{-1}\Big(\frac{T_n^{m}}{m!}+ \frac{(1-m-2c)}{(m-1)!}T_n^{m-1}+\ldots\Big)\Big(\frac{T_n^m}{m!}+\frac{(1-m)}{2(m-1)!}T_n^{m-1}+\ldots\Big)^{-1}\nonumber \\ &\stackrel{a.s}{=}& p_c\Big(1-\frac{1}{n}+O\Big(\frac{1}{n^2}\Big)\Big). \end{eqnarray} \end{proof} Later, we will limit the proof by mentioning the main differences with Lemma 4.1 \cite{Chen-Wei}. For a fixed $\ell$ and $n\ge \ell+1$, we denote by $v_{n,j}=\displaystyle\max_{0\leq k\leq Amn}\mathbb{P}\bigl(W_{\ell+n}=j+k|W_\ell=j\bigr)$. We have the following inequality: \begin{eqnarray*} v_{n+1,j}&\le & \max_{0\le k\le Am(n+1)}\Big\{\sum_{i=0}^m\sum_{c\in\text{supp}{(X)}}\mathbb{P}(W_{\ell+ n+1}=j+k|W_{\ell+n}=j+k-ci)\Big\}\nonumber \\ &\le& \max_{0\le k\le Am(n+1)}\Big\{\sum_{i=0}^m\sum_{c\in\text{supp}{(X)}}\mathbb{P}(W_{\ell+n+1}=j+k|W_{\ell+n}=j+k-ci)\nonumber\\ &&\times \mathbb{P}(W_{\ell+n}=j+k-ci|W_\ell=j)\Big\}\nonumber\\ &\le&\max_{0\le k\le Am(n+1)}\sum_{i=0}^m\sum_{c\in\text{supp}{(X)}}\mathbb{P}(W_{\ell+n+1}=j+k|W_{\ell+n}=j+k-ci)\\ &&\times \max_{0\leq \tilde k\leq Amn}\mathbb{P}\bigl(W_{\ell+n}=j+\tilde k|W_\ell =j\bigr)\\ &\le&\sum_{c\in\text{supp}{(X)}}p_c\Big(1-\frac{1}{n+l}+\frac{\kappa}{(n+l)^2}\Big)v_{n,j}\\&&=\Big(1-\frac{1}{n+l}+\frac{\kappa}{(n+l)^2}\Big)v_{n,j}. \end{eqnarray*} This implies that there exists some positive constant $C(\ell)$, depending on $\ell$ only, such that, for a fixed $\ell$ and for all $n\ge \ell+1$, we get \begin{equation} \max_{0\leq k\leq m(n-l)}\mathbb{P}\bigl(W_n=j+k|W_l=j\bigr)\le\prod_{i=\ell}^n\Big(1-\frac1i+\frac{\kappa}{i^2}\Big)\le \frac{C(\ell)}{n}. \end{equation} The rest of the proof follows. \end{proof} \begin{proof}[Proof of Theorem \ref{thmXYopp}] Consider the urn model evolving by the matrix $Q_n= \begin{pmatrix} 0 & X_n \\ Y_n & 0 \\ \end{pmatrix}$. According to Equation (\ref{recurrence}), we have the following recursions: \begin{equation}\label{opposite-rec}W_{n+1}=W_n+X_{n+1}(m-\xi_{n+1})\quad \text{and}\quad T_{n+1}=T_n+mX_{n+1}+\xi_{n+1}(Y_{n+1}-X_{n+1}).\end{equation} \begin{lem} The proportion of white balls after $n$ draws, $Z_n$, satisfies the stochastic algorithm defined by (\ref{eq:algo_sto}), where $f(x)=m(\mu_Y-\mu_X)x^2-2\mu_Xmx+\mu_Xm$, $\gamma_n=\frac{1}{T_n}$ and $\Delta M_{n+1}=D_{n+1}-\mathbb{E}[D_{n+1}|\mathcal{F}_n]$, with $D_{n+1}=\xi_{n+1}(Z_n(X_{n+1}-Y_{n+1})-X_{n+1})+mX_{n+1}$. \end{lem} \begin{proof} We check the conditions of Definition \ref{def-algo}, indeed, \begin{description} \item[(i)] recall that $T_n=T_0+m\sum_{i=1}^nX_i+\sum_{i=1}^n\xi_i(Y_i-X_i)$, then $\frac{T_n}{n}\leq\frac{T_0}{n}+\frac{m}{n}\sum_{i=1}^nX_i+\frac{m}{n}\sum_{i=1}^n|Y_i-X_i|.$ By the strong law of large numbers we have $\frac{T_n}{n}\leq m(\mu_X+\mu_{|Y-X|})+1$. On the other hand, we have $T_n\geq \displaystyle\min_{1\leq i\leq n}(X_i, Y_i) m n,$ thus, the following bound holds \begin{equation*}\frac{1}{(m(\mu_X+\mu_{|Y-X|})+1)n}\leq \frac{1}{T_n}\leq \frac{1}{m \displaystyle\min_{1\leq i\leq n}(X_i,Y_i) n},\end{equation*} then $c_l=\frac{1}{(m(\mu_X+\mu_{|Y-X|})+1)n}$ and $c_u=\frac{1}{m \displaystyle\min_{1\leq i\leq n}(X_i,Y_i)},$\\ \item[(ii)] $\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n] \leq (\mu_{(X-Y)^2}+3\mu_X)(m+m^2)+5m^2\mu_{X^2}+2m^2\mu_X\mu_Y+m^2(|\mu_X-\mu_Y|+3\mu_X)=K_u,$ \item[(iii)]$|f(Z_n)|\leq m(|\mu_Y-\mu_X|+3\mu_X)=K_f,$ \item[(iv)] $\mathbb{E}[\frac{1}{T_{n+1}}\Delta M_{n+1}|\mathcal{F}_n]\leq \frac{1}{T_n}\mathbb{E}[\Delta M_{n+1}|\mathcal{F}_n]=0$ \end{description} \end{proof} \begin{prop} The proportion of white balls in the urn after $n$ draws, $Z_n$, satisfies as $n$ tends to infinity \begin{equation}\label{conv-proportion}Z_n\stackrel{a.s}{\longrightarrow}z:=\frac{\sqrt{\mu_X}}{\sqrt{\mu_X}+\sqrt{\mu_Y}}.\end{equation} \end{prop} \begin{proof} The proportion of white balls in the urn satisfies the stochastic approximation algorithm defined in (\ref{eq:algo_sto}). As the function $f$ is continuous, by Theorem \ref{th:renlund}, the process $Z_n$ converges almost surely to $z=\frac{\sqrt{\mu_X}}{\sqrt{\mu_X}+\sqrt{\mu_Y}}$, the unique zero of $f$ with negative derivative. \end{proof} Next, we give an estimate of $T_n$, the total number of balls in the urn after $n$ draws, in order to describe the asymptotic of the urn composition. By Equation (\ref{opposite-rec}), we have \begin{equation*}\frac{T_n}{n}=\frac{T_0}{n}+\frac{m}{n}\sum_{i=1}^nX_i +\frac{m(\mu_Y-\mu_X)}{n}\sum_{i=1}^nZ_{i-1}+\frac{1}{n}\sum_{i=1}^n\Big[\xi_i(Y_i-X_i)-\mathbb{E}[\xi_i(Y_i-X_i)|\mathcal{F}_{i-1}]\Big].\end{equation*} Since $(X_i)_{i\geq 1}$ are iid random variables, then by the strong law of large numbers we have $\frac{m}{n}\sum_{i=1}^nX_i\stackrel{a.s}{\rightarrow} m\mu_X$. Via Ces\'aro lemma, we conclude that $\frac{1}{n}\sum_{i=1}^nZ_{i-1}$ converges almost surely, as $n$ tends to infinity, to $z$. Finally, we prove that last term in the right side tends to zero, as $n$ tends to infinity. In fact, let $G_n=\sum_{i=1}^n\Big[\xi_i(Y_i-X_i)-\mathbb{E}[\xi_i(Y_i-X_i)|\mathcal{F}_{i-1}]\Big]$, then $(G_n,\mathcal{F}_n)$ is a martingale difference sequence such that \begin{equation*}\frac{<G>_n}{n}=\frac{1}{n}\sum_{i=1}^n\mathbb{E}[\nabla G_i^2|\mathcal{F}_{i-1}],\end{equation*} where $\nabla G_n=G_n-G_{n-1}=\xi_n(Y_n-X_{n})-\mathbb{E}[\xi_n(Y_n-X_{n})|\mathcal{F}_{n-1}]$ and $<G>_n$ denotes the quadratic variation of the martingale.\\ By a simple computation, we have the almost sure convergence of $\mathbb{E}[\nabla G_i^2|\mathcal{F}_{i-1}]$ to $(mz (1-z)+m^2z^2)(\sigma_Y^2+\sigma_X^2)$. Therefore, Ces\'aro lemma ensures that, $\frac{<G>_n}{n}$ converges to $(mz (1-z)+m^2z^2)(\sigma_Y^2+\sigma_X^2)$ and $\frac{G_n}{n}\stackrel{a.s}{\longrightarrow} 0$. Thus, for $n$ large enough we have \begin{equation}\label{T_n-convergence}\frac{T_n}{n}\stackrel{a.s}{\longrightarrow} m\sqrt{\mu_X}\sqrt{\mu_Y}.\end{equation} In view of Equation (\ref{T_n-convergence}), we describe the asymptotic behavior of the urn composition after $n$ draws. One can write $\frac{W_n}{n}\frac{W_n}{T_n}\frac{T_n}{n}$ and $\frac{B_n}{n}\stackrel{a.s}{=}\frac{B_n}{T_n}\frac{T_n}{n}$, using Equations (\ref{conv-proportion}, \ref{T_n-convergence}) and Slutsky theorem, we have, as $n$ tends to infinity, $\frac{W_n}{n}\stackrel{a.s}{\longrightarrow}m\sqrt{\mu_X}\sqrt{\mu_Y} z$ and $\frac{B_n}{n}\stackrel{a.s}{\longrightarrow} m\sqrt{\mu_X}\sqrt{\mu_Y}(1-z)$.\\ \textbf{Proof of claim 2}\\ Later, we aim to apply Theorem \ref{clt-renlund}. In our model, we have $\gamma_n=\frac{1}{T_n}$, then we need to control the following asymptotic behaviors \begin{equation*} \lim_{n\rightarrow +\infty}\mathbb{E}[\Big(\frac{n}{T_n}\Big)^2\Delta M_{n+1}^2|\mathcal{F}_n]\quad \text{and}\quad \lim_{n\rightarrow +\infty}-\frac{n}{T_n}f'(Z_n). \end{equation*} In fact, recall that $\frac{n}{T_n}$ converges almost surely to $\frac{1}{m\sqrt{\mu_X}\sqrt{\mu_Y}}$ and $\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]=\mathbb{E}[D_{n+1}^2|\mathcal{F}_n]+\mathbb{E}[D_{n+1}|\mathcal{F}_n]^2$. Since $\mathbb{E}[D_{n+1}|\mathcal{F}_n]^2$ converges almost surely to $f(z)^2=0$, we have, \begin{eqnarray*}\mathbb{E}[D_{n+1}^2|\mathcal{F}_n]&=&\mathbb{E}\Big[Z_n^2(X_{n+1}-Y_{n+1})^2-2Z_nX_{n+1}+X_{n+1}|\mathcal{F}_n\Big] \mathbb{E}[\xi_{n+1}^2|\mathcal{F}_n]+m^2\mathbb{E}(X^2)\\&&+2m^2\Bigl(Z_n^2(\mathbb{E}(X^2)-\mu_X\mu_Y)-Z_n\mathbb{E}(X^2)\Bigr).\end{eqnarray*} Using the fact that $\mathbb{E}[\xi_{n+1}^2|\mathcal{F}_n]=mZ_n(1-Z_n)\frac{T_n-m}{T_n-1}+m^2Z_n^2$ and that $Z_n$ converges almost surely to $z$, we conclude that $\mathbb{E}[D_{n+1}^2|\mathcal{F}_n]$ converges almost surely to $G(z)>0.$ Applying Theorem \ref{clt-renlund}, we obtain the following \begin{equation}\sqrt{n}(Z_n-z)\stackrel{\mathcal{L}}{\longrightarrow}\mathcal{N}\Big(0,\frac{G(z)}{3m^2\mu_X\mu_Y}\Big). \end{equation} But, we can write $\frac{W_n-zT_n}{\sqrt{n}}=\sqrt{n}\bigl(\frac{W_n}{T_n}-z\bigr)\frac{T_n}{n}$. Thus, it is enough to use Slutsky theorem to conclude the proof. \end{proof} \begin{proof}[Proof of Theorem \ref{thmXYself}] Consider the urn model defined in (\ref{recurrence}) with $Q_n= \begin{pmatrix} X_n & 0\\ 0& Y_n \\ \end{pmatrix}$. The process of the urn satisfies the following recursions: \begin{equation}\label{recurence-self1} W_{n+1}=W_n+X_{n+1}\xi_{n+1}\quad \text{and} \quad T_{n+1}=T_n+mY_{n+1}+\xi_{n+1}(X_{n+1}-Y_{n+1}).\end{equation} \begin{lem}\label{algo} If $\mu_X\neq \mu_Y$, the proportion of white balls in the urn after $n$ draws satisfies the stochastic algorithm defined by (\ref{eq:algo_sto}) where $\gamma_n=\frac{1}{T_n}$, $f(x)=m(\mu_Y-\mu_X)x(x-1)$ and $\Delta M_{n+1}=D_{n+1}-\mathbb{E}[D_{n+1}|\mathcal{F}_n]$ with $D_{n+1}=\xi_{n+1}(Z_n(Y_{n+1}-X_{n+1})+X_{n+1})-mZ_nY_{n+1}$.\\ \end{lem} \begin{proof} We check that, if $\mu_X\neq\mu_Y$, the conditions of definition \ref{def-algo} hold. Indeed, \begin{description} \item[(i)] as $T_n=T_0+m\sum_{i=1}^nY_i+\sum_{i=1}^n\xi_i(X_i-Y_i)$, then via the strong law of large numbers we have $|\frac{T_n}{n}|\leq m\mu_Y+m\mu_{|X-Y|}+1$. On the other hand, we have $T_n\geq \min_{1\leq i\leq n}(X_i,Y_i) m n$, thus,\begin{equation*}\frac{1}{(m\mu_Y+m\mu_{|X-Y|})n}\leq \frac{1}{T_n}\leq \frac{1}{\displaystyle\min_{1\leq i\leq n}(X_i,Y_i) m n},\end{equation*} \item[(ii)]$\mathbb{E}[\Delta M_{n+1}^2|\mathcal{F}_n]\leq (2m+m^2)(4\mu_{X^2}+\mu_{Y^2})+3m^2\mu_{Y^2}+2m^2\mu_X+2m^2\mu_X\mu_Y+4m^2(\mu_X-\mu_Y)^2=K_u,$\\ \item [(iii)]$|f(Z_n)|=|m(\mu_Y-\mu_X)Z_n(Z_n-1)|\leq 2m |\mu_Y-\mu_X|=K_f,$\\ \item[(iv)] $\mathbb{E}[\gamma_{n+1}\Delta M_{n+1}|\mathcal{F}_n]\leq \frac{1}{T_n}\mathbb{E}[\Delta M_{n+1}|\mathcal{F}_n]=0=K_e.$ \end{description} \end{proof} \end{proof} \begin{prop}\label{prop-self} The proportion of white balls in the urn after $n$ draws, $Z_n$, satisfies almost surely\\ $\displaystyle\lim_{n\rightarrow \infty}Z_n=\left\{ \begin{array}{ll} 1, & \hbox{ if }\mu_X>\mu_Y; \\ 0, & \hbox{ if }\mu_X<\mu_Y; \\ \tilde Z_\infty, & \hbox{ if }\mu_X=\mu_Y, \end{array} \right. $ \\ where $\tilde Z_\infty$ is a positive random variable. \end{prop} \begin{proof}[Proof of Proposition \ref{prop-self}] Recall that, if $\mu_X\neq \mu_Y$, $Z_n$ satisfies the stochastic algorithm defined in Lemma \ref{algo}. As the function $f$ is continuous, by Theorem \ref{clt-renlund} we conclude that $Z_n$ converges almost surely to the stable zero of the function $h$ with a negative derivative, which is $1$ if $\mu_X>\mu_Y$ and $0$ if $\mu_X<\mu_Y.$\\ In the case when $\mu_X=\mu_Y$, we have $Z_{n+1}=Z_n+\frac{P_{n+1}}{T_{n+1}}$, where $P_{n+1}=X_{n+1}\xi_{n+1}-Z_n\bigl(mY_{n+1}+\xi_{n+1}(X_{n+1}-Y_{n+1})\bigr)$. Since $\mathbb{E}[P_{n+1}|\mathcal{F}_n]=0$, then $Z_n$ is a positive martingale which converges almost surely to a positive random variable $\tilde Z_\infty$.\\ As a consequence, we have \begin{cor} The total number of balls in the urn, $T_n$, satisfies as $n$ tends to infinity if $\mu_X\geq \mu_Y$ \begin{equation*} \frac{T_n}{n}\stackrel{a.s}{\longrightarrow}m\mu_X. \end{equation*} \end{cor} \begin{proof} In fact, let $M_n=\sum_{i=1}^n\xi_i(X_i-Y_i)-\mathbb{E}[\xi_i(X_i-Y_i)|\mathcal{F}_{i-1}],$ we have \begin{eqnarray*}\frac{T_n}{n}&=&\frac{T_0}{n}+\frac{m}{n}\sum_{i=1}^nY_i+\frac{1}{n}\sum_{i=1}^n\xi_i(X_i-Y_i)\\ &=&\frac{T_0}{n}+\frac{m}{n}\sum_{i=1}^nY_i+ \frac{m(\mu_X-\mu_Y)}{n}\sum_{i=1}^nZ_{i-1}+\frac{M_n}{n}. \end{eqnarray*} As it was proved in the previous theorem, we show that, as $n$ tends to infinity, we have $\frac{M_n}{n}\stackrel{a.s}{\longrightarrow} 0$. Recall that, if $\mu_X>\mu_X$ , $Z_n$ converges almost surely to $1$. Then, using Ces\'aro lemma, we obtain the limits requested. If $\mu_X=\mu_Y$, we have $\frac{1}{n}\sum_{i=1}^nY_i$ converges to $\mu_Y$. \end{proof} Using the results above, the convergence of the normalized number of white balls follows immediately. Indeed, if $\mu_X>\mu_Y$, we have, as $n$ tends to infinity, \[\frac{W_n}{n}=\frac{W_n}{T_n}\frac{T_n}{n}\stackrel{a.s}{\longrightarrow}m\mu_X,\] Let $\tilde G_n=\Bigl(\prod_{i=1}^{n-1}(1+\frac{m\mu_Y}{T_i})\Bigr)^{-1}B_n,$ then $(\tilde G_n,\mathcal{F}_n)$ is a positive martingale. There exists a positive number $A$ such that $\prod_{i=1}^{n-1}(1+\frac{m\mu_Y}{T_i})\simeq A n^{\rho}$. Then, as $n$ tends to infinity we have \begin{equation*} \frac{B_n}{n^\rho}\stackrel{a.s}{\rightarrow} B_{\infty},\end{equation*} where $B_\infty$ is a positive random variable.\\ If $\mu_X=\mu_Y$, the sequences $\Bigl(\prod_{i=1}^{n-1}(1+\frac{m\mu_X}{T_i})\Bigr)^{-1}W_n$ and $\Bigl(\prod_{i=1}^{n-1}(1+\frac{m\mu_Y}{T_i})\Bigr)^{-1}B_n$ are $\mathcal{F}_n$ martingales such that $\Bigl(\prod_{i=1}^{n-1}(1+\frac{m\mu_X}{T_i})\Bigr)^{-1}\simeq B n,$ where $B>0$, then, as $n$ tends to infinity, we have \begin{equation*}\frac{W_n}{n}\stackrel{a.s}{\rightarrow} W_{\infty} \quad \text{and}\quad \frac{B_n}{n}\stackrel{a.s}{\rightarrow} \tilde B_{\infty},\end{equation*} where $W_{\infty}$ and $\tilde B_{\infty}$ are positive random variables satisfying $ \tilde B_{\infty}=m\mu_X-W_{\infty}.$ \end{proof} \begin{rmq} The case when $\mu_X<\mu_Y$ is obtained by interchanging the colors. In fact we have the following results: \begin{equation*}T_n\stackrel{a.s}{=}m\mu_Y n+o(n),\quad W_n=\tilde W_\infty n^\sigma+o(n)\quad \text{and} \quad B_n=m\mu_Yn+o(n),\end{equation*} where $\tilde W_\infty$ is a positive random variable and $\sigma=\frac{\mu_X}{\mu_Y}.$ \end{rmq} \end{document}
\begin{document} \title{Multi-parameter estimation beyond Quantum Fisher Information} \author{Rafa{\l} Demkowicz-Dobrza{\'n}ski$^1$, Wojciech G{\'{o}}recki$^1$, M\u{a}d\u{a}lin Gu\c{t}\u{a}$^2$} \address{$^1$ Faculty of Physics, University of Warsaw, Pasteura 5, PL-02093 Warsaw, Poland} \address{$^2$ University of Nottingham, School of Mathematical Sciences, University Park, NG7 2RD Nottingham, United Kingdom} \begin{abstract} This review aims at gathering the most relevant quantum multi-parameter estimation methods that go beyond the direct use of the Quantum Fisher Information concept. We discuss in detail the Holevo Cram\'er-Rao bound, the Quantum Local Asymptotic Normality approach as well as Bayesian methods. Even though the fundamental concepts in the field have been laid out more than forty years ago, a number of important results have appeared much more recently. Moreover, the field drew increased attention recently thanks to advances in practical quantum metrology proposals and implementations that often involve estimation of multiple parameters simultaneously. Since these topics are spread in the literature and often served in a very formal mathematical language, one of the main goals of this review is to provide a largely self-contained work that allows the reader to follow most of the derivations and get an intuitive understanding of the interrelations between different concepts using a set of simple yet representative examples involving qubit and Gaussian shift models. \end{abstract} \maketitle \section{Introduction} \subsection{Historical overview and motivation} From the very beginning of quantum estimation theory \cite{Belavkin1972, Yuen1973, Belavkin1976, Holevo1973, Holevo1977, Helstrom1976, Holevo1982} the simultaneous estimation of multiple parameters has been seen as a distinguished feature combining classical and quantum aspects of uncertainty. The pioneers of the newly emerging field realized that the non-commutativity of quantum theory lead to non-trivial trade-offs in multi-parameter estimation problems that are not present in classical as well as in single-parameter quantum models. The introduction of the symmetric logarithmic derivative (SLD) quantum Cram{\'e}r-Rao (CR) bound \cite{Helstrom1967} and the related concept of the Quantum Fisher Information (QFI) may be regarded as the starting point of quantum estimation theory. Soon thereafter it became clear that the extension of the single-parameter SLD CR bound to the multi-parameter scenario cannot account for the potential incompatibility of measurements optimal for extracting information on different parameters. This observation led to a development of new multi-parameter bounds including a bound based on the right logarithmic derivative (RLD) \cite{Yuen1973, Belavkin1976} and most notably the Holevo Cram{\'e}r-Rao bound (HCR) \cite{Holevo1973}. In parallel, multi-parameter quantum estimation problems have been analysed from a Bayesian perspective obtaining explicit solutions in case of some special cost functions and problems enjoying a sufficient symmetry \cite{Helstrom1976, Holevo1982}. After this `golden age of quantum estimation theory' came the `golden age of quantum metrology' with the seminal proposal of utilizing non-classical states of light in order to increase the sensitivity of interferometric gravitational wave detectors \cite{Caves1981}. In quantum metrology one no longer assumes that the parameters are encoded in quantum states in a fixed way, but rather considers probe states which evolve under a parameter dependent dynamics and are later measured in order to extract information about the parameters of interest \cite{Giovaennetti2006, Paris2009, Toth2014, Demkowicz2015, Dowling2015, Pezze2018, Pirandola2018, Braun2018, Degen2017}. This is an appropriate framework to understand e.g. the potential of utilizing non-classical states of light in optical interferometry, but introduces an additional challenge of identifying the input probe state that yields the maximal information about the dynamical parameters. The initial studies in quantum metrology focused mainly on performance of particular estimation protocols utilzing standard error propagation formulas and some variants of Heisenberg uncertainty relation as a benchmark \cite{Yurke1986, Xiao1987, Holland1993}. Only a few years later, the field eventually incorporated the methods developed earlier by the founders of quantum estimation theory \cite{Sanders1995, Bollinger1996, Huelga1997, Sarovar2006, Shaji2007, Monras2007, Dorner2008}. This was to a large extent due to the paper by Braunstein and Caves \cite{Braunstein1994} which sparked the interest in the QFI as a natural operationally meaningful metric in the space of quantum states. Since the most relevant interferometric models considered at that time involved single parameter estimation problems, the QFI appeared to be the quantity of choice for the most studies. Thanks to its relatively simple structure, it was possible to develop efficient computational methods of optimization of optimal input states as well as derivation of universal fundamental bounds on the precision achievable in the most general quantum metrological protocols, not only in idealized noiseless models \cite{Lee2002, Giovaennetti2006} but also in presence of generic uncorrelated noise models \cite{Fujiwara2008, Escher2011, Demkowicz2012, Kolodynski2013, Demkowicz2014, Knysh2014, Demkowicz2017, zhou2018achieving} as well as some models involving noise correlations \cite{Jeske2014, Frowis2014, Layden2019, Chabuda2020}. While the quantum metrology field developed both experimentally and theoretically, it became clear that single-parameter models are often an oversimplification of real-life metrological setups \cite{Szczykulska2016}. Simultaneous estimation of phase \emph{and} loss in optical interferometric experiments \cite{Crowley2014, Gessner2018, Proctor2018}, phase and dephasing coefficient in atomic interferometry \cite{Knysh2013, Vidrighin2014}, waveform estimation \cite{Tsang2011}, quantum imaging \cite{Tsang2016, Lupo2016, Chrostowski2017, Rehacek2017, Zhou2019}, multiple frequency estimation \cite{Gefen2019, Chen2019} or sensing of vector (e.g. magnetic) fields \cite{Baumgratz2016} are all problems that should be modelled within the multi-parameter estimation framework. Having a well developed quantum metrological toolbox based on the concept of the QFI at their disposal, researchers utilized it to address multi-parameter scenarios. The main quantity of interest became the QFI matrix which helped to obtain a useful insight into a number of multi-parameter problems---see a review paper \cite{Liu2019} which focuses on the properties and use of the QFI matrix in quantum metrology and beyond. This approach led to satisfactory results provided the issue of a measurement incompatibility was either absent or of marginal importance. In general, however, one may arrive at overly optimistic results by just focusing on the properties of the QFI matrix and in order to avoid it a more sophisticated approach may be required. This prompted a renewed interest in the estimation methods developed over forty years ago, and also led to new theoretical results and tools \cite{Nagaoka1989, D'Ariano.3, Gill2000, masahito2005asymptotic, Bagan2006a, Petz&Jencova, Keyl&Werner, Suzuki2016, Albarelli2019, Albarelli2019a, Tsang2019, Tsang2019a} relevant for further developments in quantum metrology. An area of significant current interest is that of asymptotic estimation for ensembles of independent, identically prepared systems. Similarly to the classical theory \cite{vanderVaart}, quantum central limit plays an important role \cite{Hayashi2003} in understanding the statistical model in the limit of large ensembles. This led to the development of quantum local asymptotic normality (QLAN) theory, which provides a precise mathematical framework for describing the Gaussian approximation of multi-copy models \cite{GutaKahn, GutaJanssensKahn, KahnGuta, GutaJencova, KahnGuta2, Gill2011, ButuceaGutaNussbaum, yamagata2013quantum, Yang2019}. The upshot is an adaptive strategy for optimal estimation, with asymptotically normal errors, and a clear understanding of the significance of the HCR bound and its asymptotic achievability--- see also \cite{hayashi2008asymptotic,Yang2019} for other approaches. This review aims at providing a comprehensive overview of the most important concepts and methods in quantum estimation theory that go beyond the standard SLD CR bound and the related QFI matrix. Throughout the paper we assume a given quantum statistical model and focus solely on the measurement and estimator optimization problem. Hence, we stay within the quantum estimation paradigm and do not discuss the problem of identification of the optimal probe states which is a domain of quantum metrology. Since our understanding of multi-parameter metrological models is far from complete, we hope that collecting the state-of-the-art knowledge on multi-parameter quantum estimation in this review will allow the reader to get a broader picture of the field as a whole, and appreciate the interrelations between ideas that are often discussed separately. For example, even though the HCR bound has been around for quite a long time, a general understanding of its operational meaning became clearer thanks to QLAN theory as it was linked to the saturability of the HCR bound for quantum Gaussian shift models. To our best knowledge there is no review that discusses these concepts together in a consistent and detailed way. This review has to large extent a self-contained and a bit pedagogical character, as the results we refer to are spread in the literature in publications where the mathematical formalism may sometimes be a challenge to a reader. We make an attempt to illustrate the concepts with examples which are chosen to be as simple as possible and yet provide a faithful representation of the interrelations between the concepts discussed. In particular, we highlight the examples where the discrepancies between the QFI based predictions and more informative approaches are the most pronounced. Note that recently there have appeared other review papers addressing closely related topics including already mentioned \cite{Liu2019} where the main object to interest is the QFI matrix, which focuses on a geometric aspects of multi-parameter estimation \cite{Sidhu2019} as well as a perspective article focusing on the multi-parameter estimation in the context of quantum imaging \cite{Albarelli2019b}. \subsection{Quantum estimation framework and notational conventions} Before proceeding to the discussion of the actual concepts and results, let us first describe in brief the quantum estimation framework both within the frequentist as well as Bayesian paradigms. This will allow us to set up the stage as well as fix the notation that will be used throughout this paper. Consider a family of quantum states $\rho_{{\boldsymbol{\var}}}$ with encoded values of $\mathfrak{p}$ real parameters which we will represent as a vector ${\boldsymbol{\var}} = [ \theta_1,\dots,\theta_\mathfrak{p}]^T$. These states may be obtained as a result of the application of a ${\boldsymbol{\var}}$ dependent quantum channel $\Lambda_{\boldsymbol{\var}}$ to a fixed input state $\rho$, or simply be prepared by some quantum state preparation device. A measurement, described by a set of positive operators $\{M_m\}$ ($M_m \geq 0 $, $\sum_m M_m = \mathbb{1}$) \cite{Nielsen2000}, is then performed on the system yielding a random measurement result $m$ with probability \begin{equation}\label{eq.p(m)} p_{{\boldsymbol{\var}}}(m) = \trace(\rho_{\boldsymbol{\var}} M_m). \end{equation} Based on the result $m$, one estimates the parameters using an estimator function $\tilde{{\boldsymbol{\var}}}(m)$. Finally, one needs to specify a cost function $\mathcal{C}({\boldsymbol{\var}},\tilde{{\boldsymbol{\var}}}) \geq 0$, that quantifies the `penalty' for the difference between the estimated value and the true one. This leads to the final figure of merit representing the average estimation cost (or risk): \begin{equation} \mathcal{\mathcal{C}} = \sum_{m} p_{\boldsymbol{\var}}(m) \mathcal{C}({\boldsymbol{\var}}, \tilde{{\boldsymbol{\var}}}(m)). \end{equation} The goal of quantum estimation theory is to find the measurement $\{M_m\}$ and the estimator $\tilde{{\boldsymbol{\var}}}(m)$ that yield the minimal average cost. If ${\boldsymbol{\var}}$ and $\tilde{{\boldsymbol{\var}}}$ are sufficiently close to each other and the cost function is smooth, the latter can be approximated by the quadratic function $\mathcal{C}({\boldsymbol{\var}},\tilde{{\boldsymbol{\var}}}) = ({\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}})^T C ({\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}})$, where $C$ is the Hessian of the cost function, which we will refer to as the cost matrix. In this case the average cost can be written as: \begin{equation} \mathcal{C} = \tracep(C\Sigma), \quad \Sigma = \sum_{m} p_{{\boldsymbol{\var}}}(m) [{\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}}(m)][{\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}}(m)]^T, \end{equation} where $\Sigma$ is the covariance matrix of $\tilde{{\boldsymbol{\var}}}$. Note that in order to avoid confusion, we will use $\tracep(\cdot)$ symbol to denote the trace for matrices acting on the parameter space and the $\trace(\cdot)$ to denote the trace with respect to the objects acting on the relevant Hilbert space of quantum states. In the frequentist statistical paradigm the estimated parameter is considered to be unknown but fixed \cite{LehmanCasella1998}. In order to have a non-trivial pointwise cost minimization problem, one imposes an unbiasedness constraints on the allowed measurement and estimation strategies in some region of parameter space $\Theta$: \begin{equation} \label{eq:lu1} \sum_m p_{\boldsymbol{\var}}(m) \tilde{{\boldsymbol{\var}}}(m) = {\boldsymbol{\var}}, \qquad {\rm for~all~}\qquad {\boldsymbol{\var}} \in \Theta \end{equation} or a weaker local unbiasedness (l.u.), which corresponds to the derivative of the above constraint at a fixed parameter value ${\boldsymbol{\var}}={\boldsymbol{\var}}_0$: \begin{equation} \label{eq:lu} \sum_m p_{{\boldsymbol{\var}}}(m) \tilde{{\boldsymbol{\var}}}(m) = {\boldsymbol{\var}}, \quad \sum_m \boldsymbol{\nabla} p_{\boldsymbol{\var}}(m)\tilde{{\boldsymbol{\var}}}(m)^T = \mathcal{I}, \end{equation} where $\boldsymbol{\nabla}$ denotes the gradient operator over parameters ${\boldsymbol{\var}}$ while $\mathcal{I}$ is the $\mathfrak{p} \times \mathfrak{p}$ identity matrix---in what follows we use $\mathcal{I}$ to denote the identity in the parameter space, while $\mathbb{1}$ denotes identity in the Hilbert space of quantum states. The l.u. conditions assure that the estimator tracks the true value of the parameter faithfully up to the first order around point ${\boldsymbol{\var}}_0$. This excludes pathological estimators, e.g. those which return a fixed value irrespective of the measurement outcome and thus appear to perform well when the true parameter coincides with this particular value. However, it is not clear how to interpret the l.u. conditions operationally, and moreover the restriction may be regarded as imposing a serious limitation on estimation strategies. An alternative solution is to consider a broader figure of merit, such as the maximum cost over all parameters ${\boldsymbol{\var}} \in \Theta$. In this context, optimal estimators are called minimax \cite{LehmanCasella1998}. However, the problem of finding explicit minimax procedures is often intractable. Moreover, such estimators may be overly pessimistic with regards to the estimation cost around specific points of the parameter space, where they are outperformed by procedures which take such local information into account. An even more refined notion of optimal estimator can be defined in the asymptotic setting where a large number of identical copies of the quantum state are available. A \emph{locally asymptotically minimax} cost, which will refer to as $\mathcal{C}_{\t{minmax}}$, captures the hardness of the estimation problem at any fixed point without making any unbiasedness assumptions. When following the Bayesian approach \cite{RobertBayesianChoice2007} we will be considering the average Bayesian cost defined as: \begin{equation} \label{eq:bayescost} \overline{\mathcal{C}} = \int \t{d}{\boldsymbol{\var}}\, p({\boldsymbol{\var}})\sum_{m} p_{\boldsymbol{\var}}(m) \mathcal{C}({\boldsymbol{\var}}, \tilde{{\boldsymbol{\var}}}(m)), \end{equation} where $p({\boldsymbol{\var}})$ is the prior distribution which encodes our initial knowledge about the parameters to be estimated. In this case no further constraint of unbiasedeness is imposed, and the task amounts to minimization $\overline{\mathcal{C}} $ over $\{M_m\}$ and $\tilde{{\boldsymbol{\var}}}(m)$. The above optimization problems, are very challenging as they deal with optimization over the set of operators $\{M_m\}$ (with unconstrained number of elements) and estimator functions $\tilde{{\boldsymbol{\var}}}(m)$. Furthermore, even if the single-parameter case is feasible in principle, the multi-parameter scenario may introduce further complications. Fortunately, in many cases one may avoid a brute-force optimization approach and either perform the optimization exactly thanks to the symmetry of the model or use universal asymptotic properties of the problem to derive informative asymptotic bounds. One of main points of the review is to show that in the asymptotic setting, the optimal estimation problem simplifies and the optimal costs of the different approaches to quantum parameter estimation agree with each other. In order to help the reader follow this review, we provide below an overview of the structure of the paper as well as highlight the most important results that are discussed in particular sections. \subsection{Structure of the paper and main results} In Sec.~\ref{sec:hcr} we provide a comprehensive discussion of CR bounds with the main focus on the HCR bound. We discuss its different equivalent formulations, saturability, relation with the standard SLD CR bound as well practical ways to compute it. Below we list the main results discussed in this section: \begin{enumerate} \item{The HCR bound can be numerically computed via a semi-definite program (Sec.~\ref{sec:hcrsd}).} \item{In case of a full rank cost matrix $C$, the HCR is equivalent to the SLD CR bound if and only if $\trace(\rho_{\boldsymbol{\var}} [L_i,L_j]) = 0$ for all $i,j$, where $L_i$ are the SLDs operators (Sec.~\ref{sec:SLDCR}).} \item{If the cost matrix $C$ is rank-one (all parameters except one are nuisance parameters) the SLD CR bound is always saturable (Sec.~\ref{sec:nuisance}).} \item{The HCR bound is at most two times larger than the SLD CR bound (Sec.~\ref{sec:sldvshcr}).} \item{In case of $\mathcal{D}$-invariant models the HCR bound conincides with the RLD bound (Sec.~\ref{sec:Dinvariance}).} \item{The HCR bound is always saturable in case of pure state models, $\rho_{\boldsymbol{\var}} = \ket{\psi_{\boldsymbol{\var}}}\bra{\psi_{\boldsymbol{\var}}}$, even on the single copy level (Sec.~\ref{sec:saturability}).} \end{enumerate} Sec.~\ref{sec:examples} contains a detailed discussion of qubit estimation models illustrating the measurement incompatibility issue as well as the role of collective measurements in saturation of asymptotic bounds. The second part of the section is devoted to the estimation theory of Gaussian shift models where the parameters are encoded linearly in the mean of quantum Gaussian states with fixed covariance. The choice of the examples is intentional as it serves as a `prelude' for the discussion of the QLAN theorem in Sec.~\ref{sec:qlan}, where these apparently unrelated qubit and Gaussian models are shown to be intimately connected. The quantitative results of this section are summarized in Tab.~\ref{tab:examples} where the HCR and SLD CR bounds are computed for all the models discussed. The key messages of this section are: \begin{enumerate} \item{Qubit models involving estimation of $(\theta,\thetaphi)$, $(r,\theta)$, $(r,\theta,\thetaphi)$ manifest respectively: fundamental measurement incompatibility, single copy measurement incompatibility which vanishes in the asymptotic limit and require collective measurements, fundamental measurement incompatibility where saturability of the HCR bound requires collective measurements (Sec.~\ref{sec:examplequbit}).} \item{The HCR and the SLD CR bounds for Gaussian shift models can be effectively computed (Sec~\ref{sec:examplegaussian}).} \item{The HCR bound is universally saturable for Gaussian shift models via application of linear measurement strategies (Sec~\ref{sec:examplegaussian}).} \end{enumerate} Sec.~\ref{sec:qlan} contains an overview of the QLAN theory, which shows that an estimation model involving large number of independent and identical copies of a finite dimensional quantum system may be approximated by a Gaussian shift model, to which it converges in the asymptotic limit. The convergence holds for states in a shrinking neighbourhood of a fixed state $\rho_{{\boldsymbol{\var}}_0}$ which can be parametrized in the `local' fashion as $\rho_{{\boldsymbol{\var}}_0+ {\bf u}/\sqrt{n}}$, where $n$ is the sample size. The section includes a detailed discussion of qubit models as well as general $d$-dimensional models highlighting the importance of the strong convergence approach in the QLAN which allows one to use the properties of Gaussian models to infer the corresponding properties for multiple-copy finite dimensional models in an operational fashion. The key results are: \begin{enumerate} \item{For pure-state multi-copy models, QLAN can be expressed in terms of the convergence of inner products of local product states towards the corresponding inner product of coherent state of a quantum Gaussian shift model (Sec.~\ref{sec:weakLAN}) } \item{The quantum central limit theorem (CLT) offers an intuitive understanding of the emergence of Gaussian shift model in QLAN for arbitrary states (Sec.~\ref{sec.clt.mixed})} \item {The notion of strong convergence replaces the CLT argument with an operational way of comparing the models based on quantum channels, which extends the classical LAN theory developed by Le Cam \cite{LeCam} (Sec~\ref{sec:QLANstrong}). This provides a mathematically rigorous procedure for defining `optimal' (asymptotically locally minimax) measurements (Sec.~\ref{sec:estimation.strategyLAN}) } \item{The key result of the whole section is that the HCR is asymptotically saturable on multiple copies of finite dimensional systems thanks to the QLAN theorem and the tightness of the HCR for Gaussian shift models. In addition, the optimal estimators has asymptotically Gaussian distribution which allows to construct asymptotically exact confidence regions. } \end{enumerate} The considerations in the above mentioned sections fit into the frequentist estimation approach. Following this approach, both the HCR bound and the QLAN approaches were shown to be capable of resolving the incompatibility of measurement issue that affects the QFI based quantities. However, this approach is less effective in dealing with parameter estimation using finite resources (few copies of a quantum state) and does not take into account prior information about the parameters of interest. In order to remedy this, in Sec.~\ref{sec:bayes}, we turn to the Bayesian approach and present the methods that allow us to obtain solutions that suffer from none of the above mentioned deficiencies. Unfortunately these methods are capable of producing rigorous results only for a restricted class of metrological models, whereas in general one may obtain Bayesian CR type bounds which, unlike frequentist bounds, take into account the prior information and typically agree with the frequentist bounds in the asymptotic limit. The summary of the main results of this section is given below. \begin{enumerate} \item{Direct single- to multi-parameter generalization of the analysis of Bayesian models with a quadratic cost function does not yield a tight formula for the cost. For Gaussian priors it may be related with the QFI matrix and as such ignores the potential optimal measurement incompatibility issue (Sec.~\ref{sec:bayesquadratic}).} \item{For problems with symmetry, covariant measurements are optimal and may significantly simplify the search for a rigorous Bayesian solution (Sec.~\ref{sec:bayescovariant}).} \item{Qubit multicopy models, when analysed using the Bayesian approach, yield asymptotic formulas equivalent to the HCR bound averaged with the respective prior (Sec.~\ref{sec:bayesianqubit}).} \item{Bayesian CR-type bounds may be derived, that in particular show that in general the Bayesian cost may be asymptotically lower bounded by the average HCR bound (Sec.~\ref{sec:bayescr}). } \end{enumerate} Finally, Sec.~\ref{sec:summary} summarizes the paper and provides an outlook on some open problems. \section{Holevo Cram{\'e}r-Rao bound} \label{sec:hcr} \subsection{Classical CR bound} We start with a brief reminder of the classical CR inequality for a generic statistical model $p_{{\boldsymbol{\var}}}$ with probabilities $\{p_{\boldsymbol{\var}}(m)\}$ depending smoothly on ${\boldsymbol{\var}}\in \Theta \subset \mathbb{R}^{\mathfrak{p}}$. Given a sample from $p_{\boldsymbol{\var}}$ one may lower bound the covariance of any l.u. estimator $\tilde{{\boldsymbol{\var}}}$ via the following matrix inequality \cite{Kay1993, LehmanCasella1998} \begin{equation} \Sigma\geq F^{-1},\qquad F=\sum_{m} \frac{\bnabla p_{{\boldsymbol{\var}}}(m) [\bnabla p_{{\boldsymbol{\var}}}(m)]^T}{p_{{\boldsymbol{\var}}}(m)}, \end{equation} where $F$ is the (classical) Fisher Information (FI) matrix of $p_{\boldsymbol{\var}}$ at ${\boldsymbol{\var}}$---we drop the explicit dependence of $F$ on ${\boldsymbol{\var}}$ for notational compactness. This implies the following bound on the effective estimation cost for a given cost matrix $C$: \begin{equation} \mathcal{C} = \tracep(C\Sigma) \geq \tracep (C F^{-1}). \end{equation} The following remarks summarise the key features FI and the CR bound. \begin{enumerate} \item[(i)]{ The FI is additive for product probability distributions, i.e. if $p_{\boldsymbol{\var}}(m_1,m_2)=p_{{\boldsymbol{\var}}}(m_1)p_{\boldsymbol{\var}}(m_2)$ then we have $F_{12} = F_{1}+F_2$. In particular for $n$ independent experiments the corresponding FI is $n$ times larger and the bound scales inversely proportionally to $n$: $\Sigma^{n}\geq F^{-1}/n$.} \item[(ii)]{ If the true parameter ${\boldsymbol{\var}}$ is close to some known value ${\boldsymbol{\var}}_0$, we may look for locally unbiased estimators around this point; the following estimator saturates the CR bound and hence is optimal at ${\boldsymbol{\var}}_0$ \begin{equation}\label{eq.one.step.estimator} \tilde{{\boldsymbol{\var}}}(m) = {\boldsymbol{\var}}_0 + \frac{1}{p_{{\boldsymbol{\var}}}(m)}F^{-1} \left.\bnabla p_{\boldsymbol{\var}}(m)\right|_{{\boldsymbol{\var}}= {\boldsymbol{\var}}_0}. \end{equation} However, with the exception of models belonging to the class of exponential family of probability distributions \cite{Fend59}, the estimator will depend explicitly on ${\boldsymbol{\var}}_0$ and is not optimal away from this point. This drawback can be remedied in a scenario where many independent samples are available, where the following two stage adaptive procedure can be applied \cite{Barndorff2000}: a `reasonable' preliminary estimator ${\boldsymbol{\var}}_0$ is computed on a subsample, while the remaining samples are used to compute the final estimator by using the above formula. Alternatively, the estimator \eqref{eq.one.step.estimator} can be seen as one step of the Fisher scoring algorithm for computing the maximum likelihood estimator \cite{Demidenko}. } \item[(iii)]{ If the measurement data consists of $n$ independent and identically distributed (i.i.d.) samples $(m_1,\dots,m_n)$ from $p_{\boldsymbol{\var}}$, then under mild regularity conditions, the maximum likelihood estimator $\tilde{\boldsymbol{\var}}^{n}_{\t{ML}}(m_1,...,m_n)\equiv {\rm arg\,max}_{\boldsymbol{\var}} p_{\boldsymbol{\var}}(m_1)...p_{\boldsymbol{\var}}(m_n)$ is asymptotically unbiased and achieves the CR bound: \begin{equation} \lim_{n\rightarrow\infty}n\Sigma^{n}_{\t{ML}}=F^{-1}, \end{equation} where $\Sigma^{n}_{\t{ML}}$ is the covariance matrix of $\tilde{\boldsymbol{\var}}^{n}_{\t{ML}}$ \cite{LehmanCasella1998, Kay1993}. Most importantly, unlike the estimator discussed in (ii), it is asymptotically normal, performs optimally for all parameter values and depends solely on the observed data and the probabilistic model involved. As a result it is one of the most widely used estimator in practical applications.} \end{enumerate} To summarize: the CR is asymptotically achievable and the optimal cost scales as $ \tracep(C F^{-1})/n$, where $n$ is the sample size. The multi-parameter aspect of the problem does not introduce any additional difficulties compared with the single parameter case apart from the fact that the CR bound involves matrices rather than scalars. \subsection{Quantum SLD CR bound} Let us move now to the the quantum case where $p_{{\boldsymbol{\var}}}(m)= \trace(\rho_{\boldsymbol{\var}} M_m)$ and the optimization is performed not only over estimators $\tilde{\boldsymbol{\var}}(m)$, but also over measurements $\{M_m\}$. In this case the covariance matrix of an arbitrary l.u. estimator may be lower bounded by the inverse of the QFI matrix $F_Q$ \cite{Helstrom1976, Braunstein1994, Liu2019} \begin{equation} \label{clq} \Sigma\geq F_Q^{-1},\quad F_{Q}=\tfrac{1}{2}\trace(\rho_{\boldsymbol{\var}} \{\mathbf{L},\mathbf{L}^T\}), \end{equation} where $\mathbf{L}=(L_1,\dots,L_{\mathfrak{p}})^T$ are SLDs satisfying \begin{equation}\label{eq:SLD} \bnabla \rho_{\boldsymbol{\var}}=\frac{1}{2} \{ \mathbf{L}, \rho_{\boldsymbol{\var}}\}, \end{equation} and $\{\cdot,\cdot\}$ denotes the anticommutator. We will refer to this bound as the SLD CR bound, due to the fact that it involves the choice of the SLD as an operator generalization of the logarithmic derivative. When the cost matrix $C$ is given, this implies the following bound on the effective cost: \begin{equation} \mathcal{C} = \tracep(C \Sigma ) \geq \tracep(C F_Q^{-1}) =: \mathcal{C}^{\t{SLD}}. \end{equation} Intuitively, QFI quantifies the amount of information about the parameter ${\boldsymbol{\var}}$ potentially available in a state $\rho_{\boldsymbol{\var}}$. Similarly to the FI, the QFI is additive for models consisting of product states. In particular, for $n$ copies of a quantum system $\rho_{\boldsymbol{\var}}^{\otimesimes n}$ the corresponding QFI matrix is $n F_Q$. On a formal level, the issue of saturability of the SLD CR bound amounts to the question of the existence of a measurement $\{M_m\}$ for which the corresponding probabilistic model $p_{\boldsymbol{\var}}(m) = \trace(\rho_{\boldsymbol{\var}} M_m)$ yields the FI matrix $F$ equal to $F_Q$. In the single parameter case $\theta\in \mathbb{R}$, it may be verified that the classical FI corresponding to measuring the SLD operator is equal to the QFI, and hence this measurement is optimal. Although the SLD generally depends on the unknown parameter $\theta$, this problem can be addressed by using the two-stage adaptive procedure described in point (ii) above, when a large number of independent copies of the state $\rho_{\boldsymbol{\var}}$ are available. The achievability of the SLD CR bound for correlated states needs to be treated separately. In particular, in quantum metrology, where the `samples' are typically correlated, an indiscriminate use of the QFI as a figure of merit may lead to some unjustified claims regarding the actually achievable asymptotic bounds \cite{Hall2012, Jarzyna2015, Gorecki2019}. The multi-parameter case is in general more involved. If all the SLDs corresponding to different parameters commute one may saturate the bound by performing a joint measurement of the SLDs. However, if the SLDs do not commute, it may happen that measurements that are optimal for different parameters are fundamentally incompatible. In this case, the measurement minimizing the total cost may strongly depend on a particular cost matrix $C$. Therefore, while classically we may say that $\Sigma=F^{-1}$ is the `optimal achievable covariance matrix' (independently on the choice of $C$), in the quantum case different cost matrices may correspond to different optimal covariance matrices, for which in general it might not be possible to say which is larger or smaller as the matrix ordering is only partial. From that one may see that any fundamental saturable quantum bound cannot have a form of a matrix inequality analogous to Eq.~\eqref{clq}---it needs to be based on the minimization of the scalar cost $\tracep(C\Sigma)$, as the problem of minimization of $\Sigma$ itself is ill defined from the very beginning. An important tool for studying the achievable cost in multi-parameter estimation problems is the HCR bound which is an extension of the SLD CR bound and will be the focus of the following section. In Sec.~\ref{sec:qlan} we will show how the asymptotically achievability of the HCR bound follows from the general theory of QLAN. \subsection{Formulation of the HCR bound} \label{sec:hcrderive} Among different equivalent formulations of the HCR, we will start with the one that is the most tractable computationally. It lower bounds the cost of a locally unbiased estimator as \cite{Nagaoka1989, hayashi2008asymptotic} \begin{equation} \label{HCR} \mathcal{C} = \tracep(C \Sigma)\geq\mathcal C^{\t{H}} =\min_{\bold{X},V}\left(\tracep(C V)\,\big|\,V\geq Z[\bold{X}], \trace\left(\bnabla \rho_{{\boldsymbol{\var}}} \mathbf{X}^T\right)=\mathcal{I}\right), \end{equation} where $\mathbf{X} = [X_1,\dots,X_\mathfrak{p}]^T$ is a vector representing a collection of $\mathfrak{p}$ Hermitian matrices acting on the system's Hilbert space, $V$ is a $\mathfrak{p} \times \mathfrak{p}$ real matrix while $Z[\bold{X}] = \trace(\rho_{\boldsymbol{\var}} \mathbf{X} \mathbf{X}^T)$ is a $\mathfrak{p} \times \mathfrak{p}$ complex matrix. At a first sight, this bound appears rather technical and not obvious to calculate. Still, as shown later on in the paper not only it can be efficiently calculated, but also plays a fundamental role in the whole quantum estimation theory as it is actually \emph{the} asymptotically tight bound for general multi-copy estimation models. \emph{Proof of the HCR bound.} We present a proof largely based on \cite{Nagaoka1989}, which provides the necessary intuition required to grasp the physical content of the bound. For any measurement $\{M_m\}$, estimator $\tilde{{\boldsymbol{\var}}}(m)$, and some fixed ${\boldsymbol{\var}}$, we define a vector of Hermitian matrices $\mathbf{X} = [X_1,\dots, X_\mathfrak{p}]^T$: \begin{equation} \label{eq:Xi} \mathbf{X}:=\sum_m(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})M_m. \end{equation} If $\tilde{{\boldsymbol{\var}}}(m)$ is a l.u. estimator then by Eqs.~(\ref{eq:lu1},\ref{eq:lu}), the operators $\mathbf{X}$ need to satisfy the conditions \begin{equation} \trace(\rho_{\boldsymbol{\var}} \mathbf{X})=0, \quad \trace(\bnabla \rho_{\boldsymbol{\var}} \mathbf{X}^T)=\mathcal{I} \end{equation} at ${\boldsymbol{\var}}={\boldsymbol{\var}}_0$. If the measurement $\{M_m\}$ is projective (i.e. $M_m M_{m'}=\delta_{mm'}M_m$) then the following equality holds \begin{equation} \Sigma=\sum_{m}(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})^T\trace(M_m\rho_{{\boldsymbol{\var}}})=\trace (\rho_{\boldsymbol{\var}} \mathbf{X} \mathbf{X}^T). \end{equation} Although for non-projective measurements the equality generally fails, we will now show that it can be replaced by an inequality. Let us define an extended Hilbert space $\mathbb{C}^{\mathfrak{p}}\otimesimes\mathcal{H}$, where the Hilbert space $\mathcal{H}$ of the system is tensored with a $\mathfrak{p}$ dimensional space of parameters. Consider a linear operator on $\mathbb{C}^{\mathfrak{p}}\otimesimes\mathcal{H}$ \begin{equation} {\bf R} := \sum_m [(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})\mathbb{1}-\mathbf{X}] M_m [(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})\mathbb{1}-\mathbf{X}]^T, \end{equation} which by construction is a positive operator. This implies that the following partial trace (in accordance with our previous convention $\trace$ in the formulas that follow denotes the trace over $\mathcal{H}$ only) is also positive \begin{align} &\trace (( \mathcal{I}\otimesimes\rho_{\boldsymbol{\theta}}) {\bf R}) = \sum_{m}(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})\trace(M_m\rho_{{\boldsymbol{\var}}}) (\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})^T \nonumber \\ &- \trace\bigg[\rho_{\boldsymbol{\var}}\bigg( \sum_m(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})M_m \mathbf{X}^T+\mathbf{X}\sum_m(\tilde{\boldsymbol{\var}}(m)-{\boldsymbol{\var}})^T M_m-\mathbf{X}\sum_m M_m \mathbf{X}^T\bigg)\bigg] \nonumber \\ &=\Sigma-\trace(\rho_{\boldsymbol{\var}} \mathbf{X} \mathbf{X}^T) \geq 0, \end{align} where in last step we have used \eqref{eq:Xi} and the identity $\sum_m M_m=\mathbb{1}$. Hence we arrive at the following matrix inequality which holds for any measurement $M_m$ \begin{equation} \label{matrixineq} \Sigma\geq Z[\bold{X}], \quad Z[\bold{X}] = \trace(\rho_{\boldsymbol{\var}} \mathbf{X} \mathbf{X}^T), \end{equation} where $Z[\bold{X}]$ is a $\mathfrak{p}\times \mathfrak{p}$ Hermitian matrix. Now, we can trace the above inequality with a given cost matrix $C$ to obtain scalar inequality \begin{equation} \label{eq:ineqcost} \mathcal{C} = \tracep(C \Sigma ) \geq \tracep(C Z[\bold{X}]) \end{equation} and since the above depends on the measurement and estimators only via $\mathbf{X}$, we will obtain a universally valid bound if we minimize the r.h.s. over $\mathbf{X}$ keeping in mind the l.u. conditions $\trace(\rho_{\boldsymbol{\var}} \mathbf{X})=0$, $\trace\left(\bnabla\rho_{{\boldsymbol{\var}}}\mathbf{X}^T \right)=\mathcal{I}$. Note that without the l.u. conditions we would get a trivial bound $\tracep(C \Sigma) \geq 0$. This procedure, however, is not in general the optimal way to obtain a scalar inequality from a matrix inequality \eqref{matrixineq}. Since $Z[\bold{X}]$ is in general a complex matrix, application of the trace with the real symmetric cost matrix $C$ causes the information hidden in the imaginary part of $Z[\bold{X}]$ to be lost. To remedy this issue, we may introduce a $\mathfrak{p} \times \mathfrak{p}$ real matrix $V$ satisfying $V\geq Z[\bold{X}]$ and we end up with the stronger HCR bound: \begin{equation} \label{HCR} \mathcal{C} = \tracep(C \Sigma)\geq\mathcal C^{\t{H}} :=\min_{\mathbf{X},V}\left(\tracep(C V)\big|V\geq Z[\bold{X}], \, \trace\left(\bnabla \rho_{{\boldsymbol{\var}}}\mathbf{X}^T\right)=\mathcal{I}\right), \end{equation} where we have kept only the second of the previously mentioned l.u. conditions, as the first condition may be dropped without affecting the result. To see this, let $\trace(\rho_{\boldsymbol{\var}} \mathbf{X})=\mathbf{c}$. Then we may redefine $\tilde{\mathbf{X}} = \mathbf{X} - \mathbf{c} \mathbb{1}$, for which the first l.u. condition is satisfied and at the same time the second l.u. condition is not affected. Finally, such a transformation will also lower the r.h.s. of \eqref{eq:ineqcost} (by the standard argument involving the inequality between the variance and the second moment) and hence the result of minimization with or without the first l.u. condition is the same. \qed \subsection{Numerical evaluation} \label{sec:hcrsd} There are a number of equivalent formulation of the HCR bound \cite{hayashi2008asymptotic} but before presenting them let us discuss an efficient numerical algorithm for calcualting the HCR bound which is based on the above formula. Interestingly, despite the well established position of the HCR bound in the quantum estimation literature, an explicit formulation of the algorithm which allows to efficiently calculate the HCR bound numerically in terms of a linear semi-definite program was proposed only recently \cite{Albarelli2019}. In order to write the HCR bound as a linear semi-definite program one needs to express the condition $V\geq Z[\bold{X}]$ in a way that it is linear in both $V$ and $X_i$. Let $\{\Lambda_a\}$ be a basis of $\mathcal{L}(\mathcal{H})$ (Hermitian operators acting on $\mathcal{H}$), orthonormal according to the Hilbert-Schmidt inner product, i.e. $\trace(\Lambda_a\Lambda_b)=\delta_{ab}$. We may now represent matrices $X_i$ and $\rho_{\boldsymbol{\var}}$ as vectors of coefficients $\boldsymbol{x}_i,\boldsymbol{s}_{\boldsymbol{\var}}\in\mathbb R^{(\dim\mathcal{H})^2}$ with respect to the basis $\{\Lambda_a\}$. Since $\trace(X_iX_j\rho_{{\boldsymbol{\var}}})$ may be seen as a non-negative defined bilinear form on $\mathcal{L}(\mathcal{H})$, it may also be written as: \begin{equation} \trace(X_iX_j\rho_{{\boldsymbol{\var}}})=\boldsymbol{x}_i^TS_{\boldsymbol{\var}} \boldsymbol{x}_j=\boldsymbol{x}_i^TR_{\boldsymbol{\var}}^\dagger R_{\boldsymbol{\var}} \boldsymbol{x}_j, \end{equation} where $S_{\boldsymbol{\var}}$ is a positive semi-definite matrix and $R_{\boldsymbol{\var}}$ is an arbitrary matrix satisfying $S_{\boldsymbol{\var}}=R_{\boldsymbol{\var}}^\dagger R_{\boldsymbol{\var}}$ (e.g. the Cholesky decomposition). Note, that according to the above formula $S_{{\boldsymbol{\var}}}$ and $\boldsymbol{s}_{\boldsymbol{\var}}$ are related \begin{equation} \left(S_{{\boldsymbol{\var}}}\right)_{a b} = \sum_c \trace(\Lambda_a \Lambda_b \Lambda_c) (\boldsymbol{s}_{\boldsymbol{\var}})_c. \end{equation} Introducing $\bold{x}=[\boldsymbol{x}_1,...,\boldsymbol{x}_{\mathfrak{p}}]$ (no transposition here is intentional) we may rewrite the above equality in a compact way \begin{equation} Z[\bold{X}] = \bold{x}^T R_{\boldsymbol{\var}}^\dagger R_{\boldsymbol{\var}} \bold{x}, \end{equation} where $R_{\boldsymbol{\var}} \bold{x} = [R_{\boldsymbol{\var}}\boldsymbol{x}_1,...,R_{\boldsymbol{\var}}\boldsymbol{x}_{\mathfrak{p}}]$ is in fact a $(\dim \mathcal{H})^2 \times \mathfrak{p}$ matrix. Then, we use a general fact that for any matrices $A,B$ the following are equivalent \begin{equation} A-B^\dagger B\geq 0 \quad \Longleftrightarrow\quad \begin{bmatrix} A& B^\dagger\\ B& \mathbb{1} \end{bmatrix}\geq 0 \end{equation} so that we may rewrite \eqref{HCR} as a linear semi-definite problem: \begin{equation} \min_{V,\bold{x}}\tracep(C V),\quad {\rm subject~to:} \begin{bmatrix} V& \bold{x}^T R_{\boldsymbol{\var}}^\dagger \\ R_{\boldsymbol{\var}} \bold{x} & \mathbb{1} \end{bmatrix}\geq 0,\quad \boldsymbol{x}_i^T\frac{\partial \boldsymbol{s}_{\boldsymbol{\var}}}{\partial\theta_j}=\delta_{ij}, \end{equation} where $\mathbb{1}$ is the identity on $\mathbb{R}^{(\dim \mathcal{H})^2}$. The above semi-definite program may be easily implemented numerically. \subsection{Equivalent formulations of the HCR bound} \label{sec:hcrequivalent} Below we show that for a given $\mathbf{X}$ minimization over $V$ in \eqref{HCR} may be performed directly. This leads us to a more explicit form of the HCR bound \cite{Nagaoka1989, hayashi2008asymptotic}. However, even though this form appears more informative from an analytical point of view, at the same time it is less suitable for numerical implementation. First, for any cost matrix $C$, the inequality $\sqrt{C}V\sqrt{C}\geq \sqrt{C}Z[\bold{X}]\sqrt{C}$ is still valid after transposition operation is applied $\sqrt{C}V\sqrt{C}\geq\sqrt{C}Z[\bold{X}]^T\sqrt{C}$. For Hermitian matrices the transposition operation leaves the real part of the matrix unchanged and changes the sign of the imaginary part. Therefore, given any column vector $\gv v_i$, these two inequalities lead to \begin{equation} \label{step1} \gv v_i^T\sqrt{C}\left(V-{\rm Re}Z[\bold{X}]\right)\sqrt{C}\gv v_i \geq\gv v_i^T\left(\pm i\sqrt{C}\mathrm{Im} Z[\bold{X}] \sqrt{C}\right)\gv v_i. \end{equation} By summing over vectors $\gv v_i$, which form the eigenbasis of $ i\sqrt{C}\mathrm{Im} Z[\bold{X}] \sqrt{C}$, we get a trace variant of the above inequality: \begin{equation} \label{step2} \tracep(C V)\geq \tracep(C \mathrm{Re}Z[\bold{X}])+\tracep(|\sqrt{C}\mathrm{Im} Z[\bold{X}]\sqrt{C}|), \end{equation} where the absolute value of an operator $|B|:=\sqrt{B^\dagger B}$ appears as a result of $\pm$ on the r.h.s. of the inequality. The last inequality may always be saturated by taking $V=\mathrm{Re} Z[\bold{X}] +\sqrt{C^{-1}}|\sqrt{C}\mathrm{Im} Z[\bold{X}]\sqrt{C}|\sqrt{C^{-1}}$. As a result the HCR bound may be written equivalently as~\cite{Nagaoka1989, hayashi2008asymptotic}: \begin{equation} \label{HCRtracenorm} \mathcal C^{\t{H}}:=\min_{\mathbf{X}}\left(\tracep(C \mathrm{Re} Z[\bold{X}])+\|\sqrt{C}\cdot {\rm Im}Z[\bold{X}]\cdot\sqrt{C}\|_1 \ \big| \ \trace\left(\bnabla \rho_{{\boldsymbol{\var}}}\mathbf{X}^T\right)=\mathcal{I}\right), \end{equation} where $\|B\|_1:=\tracep(|B|)$ is the trace norm. The last term is often written in literature as $\tracep[{\rm abs}(C\cdot {\rm Im}Z[\bold{X}])]$ \cite{Suzuki2016,Holevo1982,hayashi2008asymptotic}, where $\tracep(\t{abs}(\cdot))$ is the sum of absolute values of eigenvalues, and note that for non-Hermitian matrices is not the same as $\tracep| \cdot|$. Finally, we present yet another formulation of the HCR bound, originally proposed by Matsumoto only for the pure states \cite{matsumoto2002new}, and here generalized to arbitrary density matrices. This formulation has proven particulary suitable when discussing saturability of pure state models, as shown in Sec.~\ref{sec:saturability} and, moreover, it has been successfully employed in designing the optimal quantum error correction protocols in multi-parameter quantum metrology \cite{Gorecki2019}. The essential feature that makes the HCR bound stronger than the SLD CR bound, but at the same time makes this bound harder to compute is the fact that $Z[\bold{X}]$ may be complex. This is related with incompatibility of measurements which are optimal from the point of view of estimation of different parameters. This issue may be approached by formally considering matrices $Y_i\in\mathcal{L}(\mathcal H\oplus \mathbb{C}^{\mathfrak{p}})$ acting on a properly extended space instead of $X_i\in\mathcal{L}(\mathcal H)$, but with an additionally restriction $\mathrm{Im} Z[\bold{Y}]=0$, which reflects the requirement that the measurements on this extended subspace will no longer suffer from the incompatibility issue. Let us decompose $Y_i\in\mathcal{L}(\mathcal H\oplus \mathbb{C}^\mathfrak{p})$ into $Y_i=X_i+\tilde{X}_i$, where $P_{\mathcal H}X_i P_{\mathcal H}=X_i$ and $P_{\mathcal H}\tilde{X}_i P_{\mathcal H}=0$ ($P_{\mathcal H}$ is the projection onto $\mathcal H$). Now, one can see that using this decomposition we have $Z[\bold{Y}]=Z[\bold{X}+\bold{\tilde{X}}]= Z[\bold{X}]+Z[\bold{\tilde{X}}]$ and since both $Z[\bold{X}]$ and $Z[\bold{\tilde{X}}]$ are positive semi-definite, then $V\geq Z[\bold{X}+\bold{\tilde{X}}]$ implies $V\geq Z[\bold{X}]$. Therefore, for any fixed $\bold{X}$ we have: \begin{equation} \min_{V,\mathbf{\tilde{X}}:{\rm Im}Z[\bold{X}+\bold{\tilde{X}}]=0}\left(\tracep(C V):V\geq Z[\bold{X}+\bold{\tilde{X}}]\right)\geq\min_{V}\left(\tracep(C V):V\geq Z[\bold{X}]\right). \end{equation} The above inequality will be saturated if we find $\bold{\tilde{X}}$ satisfying \begin{equation} Z[\bold{\tilde{X}}]=\sqrt{C^{-1}}|\sqrt{C}{\rm Im}Z[\bold{X}]\sqrt{C}|\sqrt{C^{-1}}-i{\rm Im}Z[\bold{X}]. \end{equation} Indeed, such $\bold{\tilde{X}}$ always exist as the r.h.s. is a positive semi-definite matrix and in general for any positive semi-definite $\mathfrak{p}\times \mathfrak{p}$ matrix $A$ there exists $\bold{\tilde{X}}$, such that $Z[\bold{\tilde{X}}]=A$. To see this let $A=\sum_{k=1}^\mathfrak{p} a_k\ket{a_k}\bra{a_k}$ and let $\ket{\lambda}$ be an arbitrary non-zero eigenvector of $\rho_{\boldsymbol{\var}}$. Consider $\tilde{X}_i$ of the form $\tilde{X}_i=\sum_{k=1}^\mathfrak{p} \frac{1}{\sqrt{\lambda}}(\bar{\alpha}_{ik}\ket{k}\bra{\lambda}+\alpha_{ik}\ket{\lambda}\bra{k})$, where $\ket{k}$ is a basis in $\mathbb{C}^\mathfrak{p}$---note that this operator satisfies the requirement $P_{\mathcal H}\tilde{X}_i P_{\mathcal H}=0$. Then $Z[\bold{\tilde{X}}]=\sum_{k=1}^\mathfrak{p} \begin{bmatrix} \alpha_{1k}& \cdots& \alpha_{\mathfrak{p}k}\\ \end{bmatrix}^T \cdot \begin{bmatrix} \bar{\alpha}_{1k}&\hdots&\bar{\alpha}_{\mathfrak{p}k}\\ \end{bmatrix}.$ Setting $[\alpha_{1k},..,\alpha_{\mathfrak{p}k}]^T=\sqrt{a_k}\ket{a_k}$ we have $Z[\bold{\tilde{X}}]=A$. This all implies that the HCR may be alternatively formulated as: \begin{equation} \label{matsu} \mathcal C^{\t{H}}:=\min_{Y_i\in\mathcal{L}(\mathcal{H}\oplus\mathbb C^\mathfrak{p})}\left(\tracep(C Z[\bold{Y}])\ \big| \ \trace\left(\bnabla \rho_{{\boldsymbol{\var}}}\mathbf{Y}^T\right)=\mathcal{I}, \mathrm{Im} Z[\bold{Y}]=0\right). \end{equation} \subsection{Relation with the standard SLD CR bound} \label{sec:SLDCR} While deriving the HCR bound in Sec.~\ref{sec:hcrderive} we have mentioned that the bound \eqref{eq:ineqcost}, obtained naively by applying $\tracep(C\cdot)$ to the matrix inequality \eqref{matrixineq}, is in general not the optimal way to obtain a scalar bound from a matrix inequality. If, nevertheless, we pursue this line of derivation, it turns out that the bound corresponds exactly to the standard SLD CR bound $\mathcal{C}^{\t{SLD}}$: \begin{equation} \label{HCRSLD} \mathcal{C}^{\t{SLD}}=\min_{\mathbf{X}}\left(\tracep(C Z[\bold{X}])\ \big|\ \trace \left(\bnabla \rho_{{\boldsymbol{\var}}}\mathbf{X}^T\right)=\mathcal{I}\right). \end{equation} In order to prove this fact, and also establish the relation between the SLD CR bound and the HCR bound, we need to introduce some more mathematical tools. Any Hermitian matrix $X$ acting on $\mathcal{H}$ may be written down using following block structure: \begin{equation} X= \begin{bmatrix} X^\t{R}& X^{\t{RK}}\\ X^{\t{KR}}& X^\t{K} \end{bmatrix} \end{equation} where $X^{\t{R}}\in\mathcal{L}({\rm Range}( \rho_{\boldsymbol{\var}}))$ and $X^{\t{K}}\in\mathcal{L}({\rm Ker} (\rho_{\boldsymbol{\var}}))$, where $\t{Range}$ and $\t{Ker}$ denote the range and the kernel of an operator. Since $X_i^{\t{K}}$ does not affect $\trace(X_iX_j\rho_{\boldsymbol{\var}})$, we may restrict ourselves to the subspace of matrices $X$ for which $X^{\t{K}}=0$---more formally we deal with elements of the space $\mathcal{L}(\mathcal{H})/\mathcal{L}({\rm Ker}(\rho_{\boldsymbol{\var}}))$ (which is \emph{not} equivalent to $\mathcal{L}({\rm Range}(\rho_{\boldsymbol{\var}}))$, as off-diagonal blocks $X^{\t{RK}},X^{\t{KR}}$ are still important here). We define a scalar product on this subspace: \begin{equation} \label{eq:scal} \scal{X,Y}:=\trace\left(\rho_{\boldsymbol{\var}}\tfrac{1}{2}\{X,Y\}\right), \end{equation} for which the l.u. condition take a very concise form: \begin{equation*} \scal{\mathbf{L},\mathbf{X}^T}=\mathcal{I}. \end{equation*} In particular, it means that if we write $X_i=X_i^{\parallel}+X_i^{\perp} \in\mathcal{L}(\mathcal{H})$, where $X_i^{\parallel}\in{\rm span}_{\mathbb R}\{L_1,...,L_\mathfrak{p}\}$ and $X_i^{\perp}\perp{\rm span}_{\mathbb R}\{L_1,...,L_\mathfrak{p}\}$, then the l.u. condition implies that the parallel part is $\mathbf{X}^{\parallel}=F_Q^{-1} \mathbf{L}$ and there is no restriction for $\mathbf{X}^\perp$. Next, one may see that: \begin{equation} \mathrm{Re}(Z[\bold{X}])=\scal{\mathbf{X},\mathbf{X}^T} =\mathrm{Re} Z[\mathbf{X}^\parallel]+\mathrm{Re}[\mathbf{X}^\perp], \end{equation} where \begin{equation} \mathrm{Re} Z[\mathbf{X}^\parallel]=\mathrm{Re}\trace[\rho_{\boldsymbol{\var}} F_Q^{-1} \mathbf{L} \mathbf{L}^T (F_Q^{-1})]= F_Q^{-1}\mathrm{Re}\trace(\rho_{\boldsymbol{\var}} \mathbf{L} \mathbf{L}^T) F_Q^{-1}=F_Q^{-1}. \end{equation} From that it is clear that in order to minimize \eqref{HCRSLD} one should choose $\mathbf{X}^\perp=0$ and then the SLD CR bound is recovered. The HCR bound may now be rewritten in the form: \begin{multline} \label{eq:hcrperp} \mathcal C^{\t{H}}:=\mathcal{C}^{\t{SLD}}+ \min_{\mathbf{X}^\perp}\left(\tracep(C \mathrm{Re} Z[\mathbf{X}^\perp])+\|\sqrt{C}\cdot {\rm Im}Z[\mathbf{X}^\perp+\mathbf{X}^\parallel]\cdot\sqrt{C}\|_1 \ \big| \ \mathbf{X}^\parallel=F_Q^{-1} \mathbf{L}\right). \end{multline} We see that the HCR bound is identical to the SLD CR bound if and only if $\sqrt{C}\mathrm{Im} Z[\mathbf{X}^\parallel]\sqrt{C}=0$, which for full rank $C$ is equivalent to: \begin{equation} \trace(\rho_{\boldsymbol{\var}} [L_i,L_j])=0, \qquad {\rm for~all~} i,j. \end{equation} While this last condition has appeared in a number of papers \cite{Genoni2013a, Vidrighin2014, Crowley2014, Suzuki2016}, the fact that this is indeed a necessary \emph{and} sufficient condition for the equality between the SLD CR and the HCR bounds was not obvious and it was stated explicitly in \cite{Ragy2016}. \subsection{Scalar function estimation in the presence of nuisance parameters} \label{sec:nuisance} In quantum state tomography the usual figure of merit is derived from a proper distance function on quantum states, whose quadratic approximation has a strictly positive cost matrix $C$. Here, we look in more detail at the opposite situation where $C$ is a rank-1 matrix, so that $C = \mathbf{c}\mathbf{c}^T$ for some real valued vector $\mathbf{c}$. This occurs when the aim is to estimate a particular scalar function of the parameter, even though one deals with a multidimensional parameter manifold; locally, the parameter can be separated in the component along $\mathbf{c}$ which needs to be estimated, and other components which are regarded as nuisance parameters; see \cite{Suzuki2019, Suzuki2019a, Tsang2019, Yang2019} for a more general discussion of estimation in presence of nuisance parameters. The setup is also related to that semi-parametric estimation, where the estimation problem is often non-parametric (i.e. infinite dimensional parameter as in homodyne tomography of a cv state) but we are interested in a finite dimensional function of the parameter (e.g. the expectation value of certain observables). This setup is also relevant for the distributed sensing scenarios \cite{Ge2018, Sekatski2019}, interferometry \cite{Jarzyna2012}, field gradient sensing \cite{Altenburg2017, Apellaniz2018} and many others. Even though this may appear as a single parameter estimation problem, the uncertainty about the nuisance parameters leaves multi-parameter hallmark on the solution. Nevertheless, the argument below shows that this effect is fully captured by the SLD CR bound as in this case the HCR and the SLD CR bound coincide. To see this let us inspect the HCR bound in the form \eqref{HCRtracenorm} and notice that \begin{equation} i\sqrt{C} \t{Im}Z[\bold{X}] \sqrt{C} \propto \mathbf{c} \mathbf{c}^T (i \t{Im}Z[\bold{X}]) \mathbf{c} \mathbf{c}^T. \end{equation} Since $Z[\bold{X}]$ is a hermitian matrix, $i \t{Im}Z[\bold{X}]$ is a purely imaginary Hermitian matrix and the expectation $\mathbf{c}^T (i \t{Im}Z[\bold{X}]) \mathbf{c}$ is equal to zero for any real vector ${\bf c}$. By comparing with formula \eqref{HCRSLD} we conclude that \begin{equation} \mathcal C^{\t{H}} = \mathcal{C}^{\t{SLD}}, \quad \t{for } C = \mathbf{c}\mathbf{c}^T. \end{equation} \subsection{Maximal discrepancy between the SLD and the HCR bounds} \label{sec:sldvshcr} Interestingly, while the HCR bound is in general tighter than the SLD CR bound it will at most provide a factor of $2$ improvement over the SLD CR bound---a simple fact that has not been pointed out explicitly until very recently \cite{Albarelli2019a, Carollo2019} (see also \cite{Tsang2019a} were a weaker bound was derived). This can be shown as follows. For any $\mathbf{X}$ the matrix $Z[\bold{X}]$ is positive semi-definite. Now, adopting the reasoning that led to equation \eqref{step2}, namely: start with $\sqrt{C}Z[\bold{X}] \sqrt{C} \geq 0$; take the transpose $\sqrt{C} Z[\bold{X}]^T \sqrt{C}\geq 0$; add and subtract the two inequalities; separate the real and imaginary parts and take the trace on both sides; we arrive at: \begin{equation} Z[\bold{X}]\geq 0\Rightarrow\tracep(C \mathrm{Re} Z[\bold{X}])\geq\|\sqrt{C}\cdot {\rm Im}Z[\bold{X}]\cdot\sqrt{C}\|_1. \end{equation} Next, applying it to the second formulation of the HCR bound \eqref{HCRtracenorm} and using \eqref{HCRSLD}: \begin{equation} \mathcal C^{\t{H}}=\min_{\mathbf{X}}\tracep(C \mathrm{Re} Z[\bold{X}])+\|\sqrt{C}\cdot {\rm Im}Z[\bold{X}]\cdot\sqrt{C}\|_1 \leq 2\min_{\mathbf{X}}\tracep(C \mathrm{Re} Z[\bold{X}])=2 \mathcal{C}^{\t{SLD}} \end{equation} we prove the statement. Since, as will be discussed further on, the HCR bound is asymptotically saturable on many copies, the factor of $2$ represents the maximal asymptotic impact that measurement incompatibility can have on the optimal estimation of multiple parameters. This factor can also be understood from the perspective of the QLAN theory discussed in Sec.~\ref{sec:qlan}. Indeed, QLAN shows that the quantum estimation problem with many identical copies is asymptotically equivalent to estimating the mean in a Gaussian shift model. The factor $2$ stems from the fact that in a Gaussian shift model, one can group the coordinates of the cv system into two families (positions and momenta of individual modes) such that the means of each family can be estimated optimally by simultaneously measuring all coordinates in the family. We will come back to this point in Sec.~\ref{sec:qlan}. \subsection{$\mathcal{D}$-invariance and the RLD CR bound}\label{sec:Dinvariance} Using the notations and the concept of scalar product introduced in Sec.~\ref{sec:SLDCR}, the real part of $Z[\bold{X}]$ and the l.u. conditions read $\mathrm{Re}Z[\bold{X}]=\scal{\mathbf{X},\mathbf{X}^T}$ and $\scal{\mathbf{L},\mathbf{X}^T}=\mathcal{I}$. In order to write the imaginary part $\mathrm{Im}Z[\bold{X}]$ in a analogous way let us introduce a commutation superoperator $\mathcal{D}$ \cite{Holevo1982, Gill2011, hayashi2008asymptotic,yamagata2013quantum} satisfying\footnote{Its existence and uniqueness may be shown using the eigenbasis of $\rho_{\boldsymbol{\var}}$: $\braket{i|\mathcal D(X)}{j}=\frac{i(\rho_{ii}-\rho_{jj})}{\rho_{ii}+\rho_{jj}}\braket{i|X}{j}$. Here we use the definition introduced in \cite{yamagata2013quantum}, which differs from the one from \cite{Holevo1982} by a factor $2$.}: \begin{equation} \label{eq:dinv} \{\mathcal D (X),\rho_{\boldsymbol{\var}}\}=i[X,\rho_{\boldsymbol{\var}}],\quad \mathcal D (X)\in \mathcal{L}(\mathcal{H})/\mathcal{L}({\rm ker}\rho_{\boldsymbol{\var}}). \end{equation} Then we have: \begin{equation} \label{eq:Ddecomp} \trace(\rho_{\boldsymbol{\var}} X_i X_j)=\scal{X_i,X_j}+i\scal{\mathcal D(X_i),X_j}. \end{equation} Now we will prove that when looking for the optimal $X_i$ we may always restrict ourselves to $X_i$ which belong to the subspace $\mathcal{T}\subseteq\mathcal{L}(\mathcal{H})/\mathcal{L}({\rm ker}\rho_{\boldsymbol{\var}})$, which is the smallest $\mathcal D$-invariant subspace containing $\t{span}_{\mathbb{R}}\{L_1,...,L_\mathfrak{p}\}$; in other words this is a subspace obtained by sequential actions of $\mathcal{D}$ starting with operators from $\t{span}_{\mathbb{R}}\{L_1,...,L_\mathfrak{p}\}$. Let us denote by $P(X)$ and $P^\perp(X)$ the orthogonal projections of an operator $X$ onto respectively $\mathcal{T}$ and its orthogonal complement $\mathcal{T}^\perp$. According to \eqref{eq:Ddecomp} we can write:\begin{equation} \trace(\rho_{\boldsymbol{\var}} P(X_i)P^\perp(X_j))=\scal{P(X_i),P^\perp(X_j)}+i\scal{\mathcal D(P(X_i)),P^\perp(X_j)}. \end{equation} The first term on the r.h.s. is zero by definition of $P(X)$ and $P^\perp(X)$. The second term on the r.h.s is zero as well since $\mathcal{T}$ is $\mathcal{D}$-invariant and hence $\mathcal{D}(P(X_i)) \in \mathcal{T}$. As a result $\trace(\rho_{\boldsymbol{\var}} P(X_i)P^\perp(X_j))=0$. Thanks to this we have \begin{equation} Z(\bold{X})=Z(P(\bold{X}))+Z(P^{\perp}(\bold{X}))\geq Z(P(\bold{X})). \end{equation} Now, since $\mathcal{T}$ subspace contains $\t{span}_{\mathbb{R}}\{L_1,...,L_\mathfrak{p}\}$ operators, then if the l.u. condition $\scal{\mathbf{L},\mathbf{X}^T}=\mathcal{I}$ is satisfied for $\mathbf{X}$ then it is also satisfied for $\scal{\mathbf{L},P(\mathbf{X})}=\mathcal{I}$. Therefore, projecting onto $\mathcal{T}$ is always advantageous in performing the minimization. This proves that we may restrict to tuples ${\bf X}$ having all components in $\mathcal{T}$. Note, that since $\trace{(\bnabla \rho_{\boldsymbol{\var}})}=0$ then $\scal{\mathbf{L},\mathbb{1}} = 0$, and this equality remains unchanged under the action of $\mathcal{D}$ operator on $L_i$. As a result, $ \scal{X,\mathbb{1}}=0$ for all ${X\in \mathcal{T}}$. In particular, if $\mathcal{T}={\rm span}_{\mathbb R}\{L_1,...,L_\mathfrak{p}\}$ we will say that the model is $\mathcal{D}$-invariant. In this case it follows from \eqref{eq:hcrperp} that the result of minimization over $\mathbf{X}$ is given analytically as $\mathbf{X}=F_Q^{-1} \mathbf{L}$ and we have: \begin{equation} \mathcal C^{\t{H}}=\tracep(C F_Q^{-1})+\frac{1}{2}\|\sqrt{C}\cdot F_Q^{-1}\trace(\rho_{\boldsymbol{\var}} [\mathbf{L},\mathbf{L}^T])F_Q^{-1}\cdot\sqrt{C}\|_1, \end{equation} where we have used the fact that $\mathrm{Im}(\mathbf{L}\mathbf{L}^T) = \frac{1}{2i}[\mathbf{L},\mathbf{L}^T]$. It is also worth noting that the above equation may be written in an equivalent form, if one introduces the RLD $\bnabla \rho_{\boldsymbol{\var}}=\rho_{\boldsymbol{\var}}\mathbf{L_R}$ and the corresponding RLD bound \cite{Belavkin1976}: \begin{equation} \Sigma\geq {F}_R^{-1},\quad{\rm where}\quad F_R=\trace(\rho_{\boldsymbol{\var}} \mathbf{L_R}\mathbf{L_R}^T). \end{equation} In contrast to the standard QFI, $ F_R$ is not necessary real, and using the reasoning similar to the one presented in Sec~\ref{sec:hcrequivalent} the RLD scalar bound takes the form \cite{Holevo1982}: \begin{equation} \mathcal{C} = \tracep(C\Sigma)\geq \tracep(C\mathrm{Re}{{F}_R^{-1}})+\|\sqrt{C}\cdot \mathrm{Im}{{F}_R^{-1}}\cdot \sqrt{C}\|_1, \end{equation} Next, it may be shown \cite{Holevo1982} that $\mathrm{Re}{{F}_R^{-1}}=F_Q^{-1}, \mathrm{Im} {{F}_R^{-1}}=\frac{1}{2}F_Q^{-1}\trace(\rho_{\boldsymbol{\var}}\mathrm{Im}(\mathbf{L}\mathbf{L}^T))F_Q^{-1}$ and therefore for $\mathcal{D}$-invariant models the HCR bound is equivalent to the RLD bound. Since the $\mathcal{D}$-invariance property may at a first sight appear like an non-intuitive mathematical concept, let us provide here some more operational description of it in case of unitary parameter estimation. Imagine a quantum model where the parameters are being encoded in a unitary way via a set of generators $\mathbf{G} = [G_1,\dots,G_\mathfrak{p}]^T$: \begin{equation} \rho_{{\boldsymbol{\var}}} = e^{-i \mathbf{G}^T {\boldsymbol{\var}}} \rho_0 e^{i \mathbf{G}^T {\boldsymbol{\var}}}. \end{equation} If we consider estimation around ${\boldsymbol{\var}}=0$ point, the potential non-commutativity of $G_i$ does not affect the form of the first derivatives which read: \begin{equation} \left.\boldsymbol{\nabla} \rho_{\boldsymbol{\var}}\right|_{{\boldsymbol{\var}}=0} = i [ \rho_0, \mathbf{G}] \end{equation} and as a result the SLDs satisfy the following equation: \begin{equation} i [ \rho_0, \mathbf{G}] = \frac{1}{2}\{\rho_0, \mathbf{L}\}. \end{equation} Inspecting the definition of the $\mathcal{D}$ operator \eqref{eq:dinv} we see that up to the 1/2 factor $\mathcal{D}(G_i)$ is $L_i$. The $\mathcal{D}$ invariance property, may now be understood as follows. If we take the resulting SLDs and plug them into the definition of the model as new generators $\tilde{G}_i = L_i$, the resulting new SLDs $\tilde{L}_i$ should be spanned by the original ones so $\tilde{L}_i \in \t{span}_{\mathbb{R}}\{L_1,...,L_\mathfrak{p}\}$. Therefore, the $\mathcal{D}$-invariance property amounts to a statement that if we treat the orignal SLDs as additional generators of the unitary transformation the resulting span of the SLDs should not change. \subsection{The HCR bound on multiple copies} \label{sec:collectiveproof} In this subsection we show, that similarly to the SLD CR bound the HCR bound on multiple copies equals $1/n$ of the single copy formula \cite{yamagata2013quantum, hayashi2008asymptotic}: \begin{equation} \label{eq:additivity} n \mathcal C^{\t{H}}(\rho_{\boldsymbol{\var}}^{\otimesimes n}) = \mathcal C^{\t{H}}(\rho_{\boldsymbol{\var}}). \end{equation} This fact is crucial, as it implies that when the HCR bound is calculated for a single copy it already provides information on the scenario where collective measurements are performed on many copies. Consider an $n$-fold tensor space $\mathcal{H}^{\otimesimes n}$ and a quantum state that represents $n$ copies $\rho_{\boldsymbol{\var}}^{\otimesimes n}$ of a system. For any matrix $A$ we define: \begin{equation} A^{(n)}:=\sum_{k=1}^n\mathbb{1}^{\otimesimes k-1}\otimesimes A \otimesimes \mathbb{1}^{\otimesimes n-k}. \end{equation} In particular, in the $n$-copy model the SLDs are given as $L_i^{(n)}$, where $L_i$ are the single copy SLDs. Note also, that: $\mathcal{D}(A^{(n)})=(\mathcal D(A))^{(n)}$ and hence $\mathcal{T}^{(n)}=\{X^{(n)}: X\in \mathcal{T}\}$. Next, note that \begin{multline} \trace(\rho_{\boldsymbol{\var}}^{\otimesimes n}A^{(n)}B^{(n)}) =\trace\left(\rho_{\boldsymbol{\var}}^{\otimesimes n}\left(\sum_{k=1}^n\mathbb{1}^{\otimesimes k-1}\otimesimes A \otimesimes \mathbb{1}^{\otimesimes n-k}\right)\left(\sum_{l=1}^n\mathbb{1}^{\otimesimes l-1}\otimesimes B \otimesimes \mathbb{1}^{\otimesimes n-l}\right)\right)\\ =n\trace(\rho_{\boldsymbol{\var}} AB)+n(n-1)\trace(\rho_{\boldsymbol{\var}} A)\trace(\rho_{\boldsymbol{\var}} B). \end{multline} Moreover, since $\scal{X_i,\mathbb{1}} = 0$ for all ${X_i \in T}$, from the above formula we have $\trace(\rho_{\boldsymbol{\var}}^{\otimesimes n}X_i^{(n)}X_j^{(n)})=n\trace(\rho_{\boldsymbol{\var}} X_iX_j)$ as all the cross-terms vanish. Therefore, if $\mathbf{X}$ minimizes the Holevo bound for a single copy of the system, then $\frac{1}{n}\mathbf{X}^{(n)}$ minimizes it for the $n$ copies. Indeed, note that the l.u. condition for $\mathbf{X}$, $\scal{\mathbf{L},\mathbf{X}^T}=\mathcal{I}$, implies that the $n$ copy variant of the l.u. condition will be satisfied for $\frac{1}{n}\mathbf{X}^{(n)}$: $\scal{\frac{1}{n}\mathbf{X}^{(n)}, \mathbf{L}^{(n)T}} = \frac{1}{n} n \scal{\mathbf{X},\mathbf{L}^T} = \mathcal{I}$. This proves \eqref{eq:additivity}. \subsection{Saturability} \label{sec:saturability} Having proven the HCR bound and showing its $1/n$ scaling when applied to multi-copy models, we now turn to discuss its saturability. In this section we will show, that for pure state models there always exists a measurement saturating the HCR bound already on the single copy level ~\cite{matsumoto2002new}. In case of mixed states, the HCR bound is saturable in general only asymptotically, and this in general requires collective measurements performed on many copies. A discussion of this fact will be postponed until Sec.~\ref{sec:qlan} where it will be addressed using the QLAN perspective. Let us focus on the HCR bound in the variant derived in \eqref{matsu}. Let $\mathbf{Y}$ be the operators resulting from the minimization in \eqref{matsu} for $\rho_{\boldsymbol{\theta}}=\ket{\psi_{\boldsymbol{\theta}}}\bra{\psi_{\boldsymbol{\theta}}}$. Let us define $\ket{y_i}:=Y_i\ket{\psi_{\boldsymbol{\theta}}}$. As $\braket{\psi_{\boldsymbol{\theta}}}{y_i}=0$ and $\braket{y_i}{y_j}\in \mathbb{R}$ for all $i,j$, one may choose a basis $\{\ket{b_i}\}$ of the $\t{span}\{\ket{\psi_{\boldsymbol{\theta}}},\ket{y_1},\ldots,\ket{y_\mathfrak{p}}\}$ satisfying: $\braket{\psi_{\boldsymbol{\theta}}}{b_i}\in \mathbb{R}\backslash\{0\}$ and $\braket{y_i}{b_j}\in \mathbb{R}$ for all $i,j$. Then one can define a projective measurement on $\mathcal{H}\oplus\mathbb C^{\mathfrak{p}}$: \begin{gather} M_m=\ket{b_m}\bra{b_m}\; (m=1,\ldots,p+1),\quad M_0=\mathbb{1}-\textstyle{\sum}_{m=1}^{\mathfrak{p}+1}\ket{b_m}\bra{b_m}, \end{gather} with the corresponding estimator: \begin{equation} \tilde\theta_i(m)=\frac{\braket{b_m}{y_i}}{\braket{b_m}{\psi_{\boldsymbol{\theta}}}}+\theta_i,\;m\geq 1,\quad \tilde\theta_i(0)=0, \end{equation} which is l.u. at the fixed point ${\boldsymbol{\var}}$ and satisfies \begin{equation} \ket{y_i}=\sum_{m=0}^{\mathfrak{p}+1}(\tilde\theta_i(m)-\theta_i)M_m\ket{\psi_{\boldsymbol{\theta}}}\Rightarrow \Sigma_{ij}=\braket{y_i}{y_j}. \end{equation} Any projective measurement on $\mathcal{H}\oplus\mathbb C^{\mathfrak{p}}$ clearly defines a general measurement on $\mathcal{H}$. Therefore, for pure states the HCR bound is saturable in a single-shot measurement and no collective measurement can further boost the estimation precision in such case. For mixed states this is no longer the case in general and as mentioned before saturability will be guaranteed only asymptotically when measurements are performed on many-copies. Since, as shown in Sec.~\ref{sec:collectiveproof}, the HCR bound for an $n$-copy model is equal to the $1/n$ of the single copy HCR bound, we can summarize the results on saturability via the following chain of inequalities: \begin{equation} \mathcal{C}^{n}=\tracep(C\Sigma^{n})\geq \tfrac{1}{n}\mathcal \mathcal C^{\t{H}}(\rho_{\boldsymbol{\var}}) \geq \tfrac{1}{n} \mathcal{C}^{\t{SLD}}(\rho_{\boldsymbol{\var}}), \end{equation} where $\Sigma^{n}$ is a covariance matrix corresponding to l.u. estimation strategy performed on $n$-copy state. The first inequality is always saturable for pure states and any $n$, while for mixed states it is guaranteed to be saturated asymptotically as $n\rightarrow \infty$. As discussed in Sec.~\ref{sec:SLDCR}, for full rank $C$ the second inequality becomes equality if and only if $\trace(\rho_{\boldsymbol{\var}} [L_i ,L_j])=0$ for all $i,j$, in which case the SLD CR bound is equivalent to the HCR bound. In the light of the saturability conditions of the HCR bound this also implies that the measurement incompatibility is not affecting the achievable precision in the asymptotic limit involving many copies, whereas for pure state models this statement is valid also for any finite $n$. \subsection{Estimating functions of parameters} \label{sec:function} Assume that we have analyzed the estimation problem and the corresponding CR bounds using ${\boldsymbol{\var}}$ parametrization of quantum states. It might happen, that in some physical situation it might be more natural to think in terms of estimation of certain functions of ${\boldsymbol{\var}}$, i.e. \begin{equation} {\boldsymbol{\var}}^\prime = \mathbf{f}({\boldsymbol{\var}}), \end{equation} where we assume that $\mathbf{f}$ is an invertible vector function of parameters. It is now straightforward to write the relevant quantities in the new parametrization provided they are known in the old parametrization. All we need to is to replace all the gradient operators $\boldsymbol{\nabla}$ with $\boldsymbol{\nabla^\prime} = (J^T)^{-1} \cdot \boldsymbol{\nabla}$, where $J$ ($\t{det} J \neq 0$) is the derivative matrix of the $f$ function taken at the estimation point ${\boldsymbol{\var}}_0$: $J_{ij} = \left.\tfrac{\partial \theta^\prime_i}{\partial \theta_j}\right|_{{\boldsymbol{\var}} = {\boldsymbol{\var}}_0}$. As a result the corresponding SLD operators and the inverted QFI matrices will transform: \begin{equation} \mathbf{L}^\prime = (J^T)^{-1} \mathbf{L}, \quad F_Q^{\prime-1} = J F_Q^{-1} J^T, \end{equation} wheras the objects that appear in the computation of the HCR bound transform as: \begin{equation} \mathbf{X}^\prime = J \mathbf{X}, \quad Z[\bold{X}]^\prime = J Z[\bold{X}] J^T. \end{equation} Taking a `dual' point of view we may also say that, since all the scalar bounds are obtained by some variants of tracing the matrices $F_Q^{-1}$, $Z[\bold{X}]$ together with the cost matrix, therefore, when calculating a scalar bound within the new parametrization using a cost matrix $C^\prime$, this bound may be always calculated using the objects obtained in the old parametrization, provided we replace the $C^\prime$ matrix with \begin{equation} C = J^T C^\prime J. \end{equation} \section{Examples} \label{sec:examples} In order to illustrate the concepts intorduced in Sec.~\ref{sec:hcr}, we discuss two classes of examples. In Sec.~\ref{sec:examplequbit} we discuss qubit estimation examples, while in Sec.~\ref{sec:examplegaussian} we discuss the Gaussian shift model examples. These examples encompass all non-trivial features that may appear in estimation problems including non-compatibilty of optimal measurements, as well as the potential advantage offered by collective measurement. The discussion of these two classes will also be helpful in understanding the general concept of QLAN presented in Sec.~\ref{sec:qlan}, where generic many-copy estimation models become asymptotically equivalent to the Gaussian shift models. \subsection{Qubit models.} \label{sec:examplequbit} In this section we use the standard Bloch ball parametrization of qubit states \cite{Nielsen2000}: \begin{equation} \rho_{\textbf{r}} =\tfrac{1}{2}\left(\mathbb{1}+\boldsymbol{\sigma} \cdot \bold{r}\right), \end{equation} where $\boldsymbol{\sigma}$ is a vector of Pauli matrices and $\bold r$ is the Bloch vector with polar coordinates $(r, \theta, \thetaphi)$. \subsubsection{Two parameter pure state model.} \label{sec:examplequbit1} First, let us consider a problem of estimation of an unknown pure qubit state, where the state is parametrized with angles $(\theta,\thetaphi)$ and we set $r=1$: \begin{equation} \rho_{(\theta,\thetaphi)} = \ket{\psi_{(\theta,\thetaphi)}}\bra{\psi_{(\theta,\thetaphi)}},\ \ket{\psi_{(\theta,\thetaphi)}}=\cos(\theta/2)\ket{0}+\sin(\theta/2)e^{i\thetaphi}\ket{1}. \end{equation} We choose the cost matrix $C$ in a way that it corresponds to the natural metric on the sphere (coinciding with the Fubini-Study metric \cite{Bengtsson2006}) \begin{equation} C= \t{diag}[1,\sin^2(\theta)]. \end{equation} For simplicity, thanks to the rotational symmetry we may focus on estimation around the point $(\theta,\thetaphi) = (\pi/2,0)$. We have: \begin{equation} \rho_{(\theta,\thetaphi)}=\tfrac{1}{2}(\mathbb{1}+\sigma_x),\quad \partial_\theta\rho_{(\theta,\thetaphi)}=-\tfrac{1}{2}\sigma_z, \quad \partial_\thetaphi\rho_{(\theta,\thetaphi)}=\tfrac{1}{2}\sigma_y. \end{equation} In order to calculate the HCR bound we first apply the l.u. conditions on the $\mathbf{X}$ operators: $\trace\left(\bnabla\rho_{(\theta,\thetaphi)}\mathbf{X}^T \right)=\mathcal{I}$, $\trace(\rho_{(\theta,\thetaphi)} \mathbf{X})=0$---according to the discussion in Sec.~\ref{sec:hcrderive} the second condition is not necessary as it does not affect the final result of the minimization but we impose it nevertheless to reduce the number of free parameters and simplify the reasoning. As a result we get \begin{equation} X_\theta=-\sigma_z+\alpha_\theta(\mathbb{1} - \sigma_x),\quad X_\thetaphi=\sigma_y+\alpha_\thetaphi(\mathbb{1} - \sigma_x),\quad \alpha_{\theta/\thetaphi}\in\mathbb R. \end{equation} Note, that $Z[X]$ does not depend on $\alpha_{\thetaphi/\theta}$ (as $1-\sigma_x\in \mathcal L(ker \rho_{(\theta,\thetaphi)}$). Therefore without loss we may set $\alpha_{\thetaphi/\theta}=0$: \begin{equation} X_\theta=-\sigma_z,\quad X_\thetaphi=\sigma_y,\quad Z[\bold{X}]r= \begin{bmatrix} 1&i\\ -i&1 \end{bmatrix}, \end{equation} for which the corresponding HCR bound is: \begin{equation} \label{eq:hcrqubit1} \mathcal C^{\t{H}}_{(\theta,\thetaphi)}=4. \end{equation} Using the formula \eqref{HCRSLD} we obtain \begin{equation} \label{eq:sldqubit1} \mathcal{C}^{\t{SLD}}_{(\theta,\thetaphi)}= \min_{\bf X}\tracep(CZ[\bold{X}]r) = 2, \end{equation} without the need to compute the actual SLDs. Still for completeness, we provide below the explicit form of the QFI matrix and the SLDs (note that since the state is pure the SLDs are not unique): \begin{equation} F_Q =\t{diag}[1,\sin^2(\theta)], \ L_\theta = \boldsymbol{\sigma}\cdot \partial_\theta \mathbf{r}, \ L_\thetaphi = \boldsymbol{\sigma}\cdot \partial_\thetaphi \mathbf{r} \end{equation} and it is clear from the above that indeed $\mathcal{C}^{\t{SLD}}_{(\theta,\thetaphi)} = \tracep{(C F_Q^{-1})} = 2$. We see that the the HCR bound is twice as large as the SLD CR, which corresponds to the maximal possible discrepancy, as discussed in Sec.~\ref{sec:sldvshcr}. It means that the measurements optimal for both of these parameters are `maximally' incompatible---the hallmark of this is the noncommutativity of the SLDs. A measurement for which the corresponding classical FI matrix $F$ yields $\tracep(C F^{-1})$ saturating the HCR bound may be constructed by combining the optimal measurements for the two parameters with equal weights: \begin{equation} \{M_m\}= \Big\{\tfrac{1}{2}\ket{+}\bra{+}_y, \,\tfrac{1}{2}\ket{-}\bra{-}_y, \, \tfrac{1}{2}\ket{+}\bra{+}_z, \, \tfrac{1}{2}\ket{-}\bra{-}_z\Big\}. \end{equation} \subsubsection{Two parameter mixed state model. } Let us now consider a mixed state qubit model with fixed $\thetaphi=0$, where the parameters $(r,\theta)$ correspond to the length (representing the purity of the state) and the latitude $\theta$ of the Bloch vector: \begin{equation} \rho_{(r,\theta)}=\tfrac{1}{2}(\mathbb{1}+r\sin(\theta)\sigma_x+r\cos(\theta)\sigma_z). \end{equation} Unlike in the pure state model there is no natural choice for the cost matrix for this problem, as the $r$ parameter is not associated with any group action in the space of quantum states. Therefore we only assume that the cost matrix is diagonal in $(r,\theta)$ and consider \begin{equation} C=\t{diag}[c(r),r^2], \end{equation} where $c(r)>0$ determines the character of the cost function with respect to the $r$ parameter and the $r^2$ cost in case of $\theta$ we choose for convenience in order to stay in agreement with the spherical coordinate conventions. In particular, the Euclidean metric corresponds to the choice $c(r)=1$, while a more natural Bures metric \cite{Bures, Bengtsson2006} corresponds (up to a constant) to $c(r) = 1/(1-r^2)$. Without loss of generality, we consider estimation around the point $(r,\theta) = (r,0)$, in which case we have: \begin{equation} \rho_{(r,\theta)}=\tfrac{1}{2}(\mathbb{1}+r\sigma_z),\quad \partial_r\rho_{(r,\theta)}=\tfrac{1}{2}\sigma_z,\quad \partial_\theta\rho_{(r,\theta)}=\tfrac{1}{2}r\sigma_x, \end{equation} and the l.u. conditions imply that \begin{equation} X_r=\sigma_z- r \mathbb{1}+\alpha_r\sigma_y,\quad X_\theta=\tfrac{1}{r}\sigma_x+\alpha_\theta\sigma_y,\quad \alpha_{r/\theta} \in\mathbb R. \end{equation} Direct minimization of the cost leads to $\alpha_{r/\theta}=0$: \begin{equation} X_r=\sigma_z-r\mathbb{1},\quad X_\theta=\tfrac{1}{r}\sigma_x,\quadZ[\bold{X}]r= \t{diag}[1-r^2, \tfrac{1}{r^2}] \end{equation} and the final HCR bound reads: \begin{equation} \label{eq:hcrqubit2} \mathcal C^{\t{H}}_{(r,\theta)}=c(r)(1-r^2) +1. \end{equation} Interestingly, the SLD CR bound $\min_{\bf X}\tracep(CZ[\bold{X}]r)$ yields the same result: \begin{equation} \mathcal{C}^{\t{SLD}}_{(r,\theta)}=\mathcal C^{\t{H}}_{(r,\theta)}, \end{equation} which can also be independently confirmed using the explicit form of the SLDs and the QFI matrix: \begin{equation} F_Q = \t{diag}[\tfrac{1}{1-r^2}, r^2] ,\ L_r = \tfrac{1}{1-r^2}\left( \boldsymbol{\sigma} \cdot \mathbf{r} - r \mathbb{1} \right), \ L_\theta = \boldsymbol{\sigma} \cdot \partial_\theta \mathbf{r}. \end{equation} From the above form of SLDs we find that $\trace(\rho_{\bold{r}}[L_r,L_\theta])=0$, so according to the discussion from Sec.~\ref{sec:SLDCR} the two bounds must indeed be equal. Note however, that the SLDs do not commute as operators $[L_r,L_\theta]\neq 0$. In fact, as discussed in detail in \cite{Bagan2004, Vidrighin2014} in this case there is no local single qubit measurement that saturates the CR bound and hence collective measurements prove advantageous. To shed more light in this problem, one may refer to the the Hayashi-Gill-Massar bound (HGM) \cite{Gill2000, masahito2005asymptotic,yamagata2011efficiency} which is valid for qubit estimation models and is always saturable using local measurement. It states that: \begin{equation} CG_{(r,\theta)} \geq CG^{\t{HGM}}_{(r,\theta)}:=\left(\tracep\left[ \sqrt{\sqrt{F_Q^{-1}}C \sqrt{F_Q^{-1}}}\right]\right)^2= [1+\sqrt{c(r)(1-r^2)}]^2. \end{equation} It is worth noticing, that this bound may also be saturated by using weighted measurements optimal for both parameters: \begin{equation} \{M_m\}= \Big\{ p_z\ket{+}\bra{+}_z, \, p_z\ket{-}\bra{-}_z, \, p_x \ket{+}\bra{+}_x, \, p_x \ket{-}\bra{-}_x\Big\}, \quad p_z+p_y=1, \end{equation} with weights chosen so to optimize the corresponding classical CR bound $\tracep(C F^{-1})=\frac{c(r)(1-r^2)}{p_z}+\frac{1}{p_x}$. These bounds are compared in Fig.~\ref{fig:collective} (for $c(r)=1$) from which it is clear that the HGM bound is significantly larger than the HCR bound everywhere except the border of the Bloch sphere. This implies that collective measurement allow to achieve a better precision in comparison with the local measurements---note that for the Bures distance cost $c(r)=1/(1-r^2)$, $C^{\t{H}}_{(r,\theta)} = 2$, $C^{\t{HGM}}_{(r,\theta)} = 4$ are parameter independent and hence the advantage of collective approach is the same irrespectively of the value of $r$. From a practical point of view it is important to understand what is the structure of a collective measurement that yields the maximal information on the length of the Bloch vector $r$ without loosing information on the angle $\theta$. It can be checked by direct computation that $\rho_{(r,\theta)}$ can be written as $\rho_{(r,\theta)}=e^{-i \sigma_y \theta/2}\frac{e^{2 \sigma_z \beta/2}}{2 \cosh \beta}e^{i\sigma_y\theta/2}$, where $\tanh(\beta)=r$. Hence, the tensor product of $n$ copies will have an analogous form: \begin{equation} \rho_{(r,\theta)}^{\otimesimes n}=e^{-iJ_y\theta}\frac{e^{2 J_z\beta}}{[2\cosh\beta]^n}e^{iJ_y\theta}, \end{equation} where $J_i=\frac{1}{2}\sum_{k}\sigma^{(k)}_i$ are the total angular momentum operators. Now, instead of measuring $r$ directly (which would correspond to measuring $J_z$), one may perform a projection onto subspaces with a well defined value of the total angular momentum---then no information about $\theta$ is lost, since the rotation $e^{-iJ_y\theta}$ commutes with the total angular momentum operator. Moreover, it turns out \cite{Keyl2001, hayashi2008asymptotic,Bagan2006a} that in the limit of $n \rightarrow \infty$ such a measurement gives the same precision of estimating $r$ as the optimal direct measurement, provided $r>0$. Finally, the optimal measurement to extract the information on $\theta$ is performed---the $J_x$ measurement. The performance of this collective measurement strategy is depicted in Fig.~\ref{fig:collective}, where a visible improvement with the increase number of copies involved is visible, and the precision achieved will approach the asymptotic bound for $n \rightarrow \infty$. We will see a generalization of this measurement strategy in the discussion of the QLAN in Sec.~\ref{sec:qlan}. \begin{figure*} \caption{Comparison of the HCR bound (which in this case coincides with the SLD CR bound, solid line) and the HGM bound (saturable using local measurements, dotted line) for the total cost (normalized by the number of qubits) corresponding to the Euclidean distance, $c(r)=1$, in the estimation of the Bloch vector parameters $(r,\theta)$ as a function of $r$. Dashed lines represent the performance of the exemplary collective measurement which approaches the HCR bound with an increasing number of copies of the system $n$.} \label{fig:collective} \end{figure*} \subsubsection{Three parameter mixed state model.} \label{sec:examplequbit3} Finally, let us consider the most challenging qubit estimation problem, namely estimation of a completely unknown qubit state. Following the line of reasoning from the previous examples we will consider the cost matrix to be \begin{equation} \label{eq:3dcost} C = \t{diag}[c(r), r^2, r^2 \sin^2(\theta)]. \end{equation} In order to obtain the HCR bound, it will be more convenient to switch from spherical $(r, \theta, \thetaphi)$ to Cartesian coordinates where we write the Bloch vector as $\bold{r}=[r_x,r_y,r_z]^T$, \begin{equation} \rho_{\bold{r}}=\tfrac{1}{2}(\mathbb{1}+\boldsymbol{\sigma} \cdot \bold{r}). \end{equation} In this parametrization the partial derivatives over the parameters are $\partial_i\rho_{\bold{r}}=\frac{1}{2}\sigma_i$, and the l.u. conditions lead to $X_i=\sigma_i - r_i\mathbb{1}$ with no free parameters to optimize over. We can therefore write: \begin{equation} Z[\bold{X}]_{ij}=\trace\left[\tfrac{1}{2}(\mathbb{1}+\boldsymbol{\sigma}\cdot \mathbf{r})(\sigma_i-r_i\mathbb{1})(\sigma_j-r_j\mathbb{1}) \right]= \delta_{ij} - r_i r_j + i \sum_k \thetaepsilon_{ijk} r_k, \end{equation} where $\thetaepsilon_{kij}$ is the Levi-Civita symbol. In order to calculate the cost using the cost matrix \eqref{eq:3dcost} defined for spherical coordinates, we can use the general approach presented in Sec.~\ref{sec:function}, and transform the above $Z[\bold{X}]$ written in Cartesian coordinates to spherical coordinates: \begin{equation} Z[\bold{X}]^\prime = J Z[\bold{X}] J^T = \begin{bmatrix} 1-r^2 & 0 & 0 \\ 0 & \frac{1}{r^2} & \frac{i}{r \sin(\theta)} \\ 0 & -\frac{i}{r \sin(\theta)} & \frac{1}{r^2 \sin^2(\theta)} \end{bmatrix}, \end{equation} where $J$ is the derivative of the standard transformation from Cartesian to spherical coordinates, which we do not write here explicitly. We may now use \eqref{HCRtracenorm} to compute the HCR bound for the cost matrix \eqref{eq:3dcost}: \begin{equation} \label{eq:hcrqubit3} \mathcal{C}^{\t{H}}_{\mathbf{r}}= 2 + c(r)(1-r^2) + 2r, \end{equation} where the last term comes from the imaginary part of the $Z[\bold{X}]$ matrix. The QFI matrix is in fact the inverse of the real part of the $Z[\bold{X}]$ matrix and reads \begin{align} F_Q &= \t{diag}[\tfrac{1}{1-r^2},r^2,r^2 \sin^2(\theta)], \ L_r = \tfrac{1}{1-r^2}\left( \boldsymbol{\sigma} \cdot \mathbf{r} - r \mathbb{1} \right), \, L_\theta = \boldsymbol{\sigma} \cdot \partial_{\theta} \mathbf{r}, \ L_{\thetaphi} = \boldsymbol{\sigma} \cdot \partial_{\thetaphi} \mathbf{r}, \end{align} where we also have provided an explicit form of the SLDs for completness. Therefore, the SLD CR and the HGM bounds read: \begin{equation} \mathcal{C}^{\t{SLD}}_{\mathbf{r}}=2+ c(r)(1-r^2), \quad CG^{\t{HGM}}_{\mathbf{r}}=(2+\sqrt{c(r)(1-r^2)})^2. \end{equation} In Figure \ref{fig:comp} we present the comparison of all the three bounds and its dependence on length of Bloch vector $r$ for the Euclidean distance $c(r)=1$ case. \begin{figure*} \caption{Comparison of HGM, HCR and the SLD CR bounds for the total estimation Euclidean cost in case of the estimation of a completely unknown mixed state of a qubit, as a function of Bloch's vector's length.} \label{fig:comp} \end{figure*} In order to get a better intuition in preparation for the QLAN discussion in Sec.~\ref{sec:qlan}, let us return to the Cartesian parametrization and consider estimation around the point $\bold{r}=[0,0,r]^T$. Then, locally the two parameters $r_x,r_y$ may be interpreted as rotations of the Bloch vector and the third one, $r_z$, as its length. At this point the QFI matrix and the corresponding SLDs read: \begin{equation} F_Q = \t{diag}[1,1,\tfrac{1}{1-r^2}], \ L_x=\sigma_x ,\ L_y=\sigma_y ,\ L_z = \t{diag}[\tfrac{1}{1+r},-\tfrac{1}{1-r}]. \end{equation} Let us notice the following properties: \begin{equation} \label{incom} \trace(\rho_\bold{r}[L_x,L_y])=r,\quad\trace(\rho_\bold{r}[L_y,L_z])=0,\quad\trace(\rho_\bold{r}[L_z,L_x])=0. \end{equation} Taking into account the discussion in Sec.~\ref{sec:SLDCR}, we see, that only $r_x,r_y$ are fundamentally incompatible---the third one may be effectively measured independently of the others (at least in the asymptotic limit utlizing collective measuremenrs). For $|r|=1$ we recover the pure state case discussed in the first example of this section where the HCR bound and the HGM bound coincide, as local measurements saturate the HCR bound in case of pure states. In general the optimal local measurements (saturating the HGM bound) have a similar structure as in the previous example: \begin{equation} \{M_m\}=\bigcup_{k\in\{x,y,z\}}\Big\{ p_k\ket{+}\bra{+}_k, p_k\ket{-}\bra{-}_k\Big\},\quad \sum_{k\in\{x,y,z\}}p_k=1, \end{equation} with $p_x,p_y,p_z$ chosen to minimize $\tracep(C F^{-1})=\frac{1}{p_x}+\frac{1}{p_y}+\frac{c(r)(1-r^2)}{p_z}$, as at this point the cost matrix in Cartesian coordinates reads $C=\t{diag}[1,1,c(r)]$. Finally, the fundamental measurement incompatibility vanishes only at $|r|=0$ (SLD CR bound coincides with the HCR bound), but since the HGM is still larger at this point it implies that the collective measurements are necessary to obtain the optimal performance. \subsection{Estimation for general quantum Gaussian shift models} \label{sec:examplegaussian} In this section we consider a general problem of estimating the parameters of a quantum Gaussian shift model, which is a special class of general Gaussian estimation models \cite{Gao2014, Nichols2018}. Aside from the mathematical interest and practical importance, the problem is directly relevant for the QLAN theory described in Sec.~\ref{sec:qlan}. In a nutshell, QLAN shows that that any model consisting of an ensemble of finite dimensional identically prepared systems is asymptotically equivalent in a statistical sense to a Gaussian shift model which encodes the local `tangent space' structure of the original one. In particular, each qubit model discussed in the preceding section will have a corresponding Gaussian model. A key property of Gaussian shift models is that the HCR bound is always saturable in a single-shot scenario. Combined with the QLAN theorem this will provide the proof of the asymptotic saturability of the HCR in the multi-copy setting. Consider a continuous variable system consisting of $\mathfrak{q}$ modes with canonical coordinates $(Q_{i}, P_{i})$, satisfying the commutation relations \cite{WeedbrokReviewGaussian2012} \begin{equation} [Q_{i}, P_{j}] = i\delta_{i,j} \mathbb{1} , \qquad i,j=1,\dots, \mathfrak{q}. \end{equation} The joint system can be represented on the tensor product space $\mathcal{F}^{\otimesimes \mathfrak{q}}$ such that the pair $(Q_{i}, P_{i})$ acts on $i$-th copy of the one-mode Fock space $\mathcal{F}$. Since it will be relevant for the QLAN formulation, we also allow for $\mathfrak{c}$ `classical real valued variables' $(Z_{1},\dots , Z_{\mathfrak{c}}) $ which commute with each other and with all $(Q_{i}, P_{i})$. These can be represented as position observables on $\mathfrak{c}$ additional copies of $\mathcal{F}$, whose affiliated algebra is $L^\infty(\mathbb{R}^\mathfrak{c})$. We put all canonical observables together as a column vector \begin{equation} {\bf R}:= [R_{1},\dots, R_{\mathfrak{r}}]^T\equiv [ Q_{1}, P_{1},\dots , Q_{\mathfrak{q}}, P_{\mathfrak{q}}, Z_{1},\dots, Z_{\mathfrak{c}}]^T,\quad \mathfrak{r}=2\mathfrak{q}+\mathfrak{c}, \end{equation} and write their commutation relations as \begin{equation} [ R_{i}, R_{j}] = iS_{i,j} \mathbb{1}, \quad [ \mathbf{R}, \mathbf{R}^T] = i S \mathbb{1}, \end{equation} where $S$ is the $\mathfrak{r} \times \mathfrak{r}$ block diagonal symplectic matrix of the form \begin{equation} S= {\rm diag}[\Omega, \dots ,\Omega, 0,\dots 0 ], \quad \Omega= \begin{bmatrix} 0&1\\ -1 &0 \end{bmatrix}. \end{equation} A state of this hybrid quantum-classical system is described in terms of its density matrix $\thetarho$ (in this review we use $\thetarho$ to represent continuous variable system states, in particular Gaussian states, in order to differentiate it from finite dimensional states $\rho$) which is a positive and normalised element of $\mathcal{T}^{1}(\mathcal{F}^{\otimesimes \mathfrak{q}})\otimesimes L^{1}(\mathbb{R}^{\mathfrak{c}})$, where $\mathcal{T}^1$ denotes the space of trace-class linear operators and $L^{1}$ the space of absolutely integrable functions. For any state $\thetarho$ let us define its characteristic function \begin{equation} \mathcal{X}_{\rho}(\boldsymbol{\mathcal{X}i}):= \trace\left( \thetarho\, e^{i \boldsymbol{\mathcal{X}i}^T {\bf R}} \right) \equiv \scalno{e^{i \boldsymbol{\mathcal{X}i}^T {\bf R}},\mathbb{1}}_\thetarho, \quad \boldsymbol{\mathcal{X}i} \in \mathbb{R}^\mathfrak{r}. \end{equation} where the symbol `${\rm Tr}$' is understood as taking trace over the quantum part and integrating over the classical part. We will say that a state $\thetarho$ is Gaussian if and only if its characteristic function $\mathcal{X}$ is Gaussian: \begin{equation} \label{eq:characteristic} \mathcal{X}_{\thetarho}(\boldsymbol{\mathcal{X}i}) = e^{i \boldsymbol{\mathcal{X}i}^T {\bf r} } e^{- \boldsymbol{\mathcal{X}i}^T V \boldsymbol{\mathcal{X}i}/2 }, \end{equation} where \begin{equation} \mathbf{r} := \scalno{\mathbf{R},\mathbb{1}}_\thetarho, \quad V := \scalno{({\bf R}- {\bf r}\mathbb{1} ),({\bf R}- {\bf r} \mathbb{1})^T}_\thetarho \end{equation} are the mean and the covariance matrix of the state respectively---note that we have used the previously introduced notation involving the scalar product as defined in equation \eqref{eq:scal}. The positivity of the density matrix imposes a restriction on the allowed covariance matrices, as expressed by the matrix Heisenberg uncertainty relation \cite{Simon1994, WeedbrokReviewGaussian2012}: \begin{equation} \label{eq:heisenberggaussian} V \geq \frac{i}{2} S. \end{equation} It is worth stressing that the opposite implication holds as well---to any covariance matrix satisfying \eqref{eq:heisenberggaussian} there corresponds a unique zero-mean Gaussian state $\thetarho$. A \emph{Gaussian shift model} with parameters ${\boldsymbol{\var}}\in \mathbb{R}^{\mathfrak p}$ is a family of Gaussian states $\thetarho_{{\boldsymbol{\var}}}$ with some \emph{fixed} covariance matrix $V$ and mean depending linearly on ${\boldsymbol{\var}}$ \begin{equation} \mathbf{r} = A {\boldsymbol{\var}}, \end{equation} with $A:\mathbb{R}^{\mathfrak{p}} \to\mathbb{R}^{\mathfrak{r}}$ a given injective linear map. For purely quantum models with no classical degrees of freedom ($\mathfrak{c}=0$), the states $\thetarho_{{\boldsymbol{\var}}}$ can be obtained by applying unitary shift operators to the mean zero Gaussian state with covariance matrix $V$ \begin{equation} \thetarho_{{\boldsymbol{\var}}} = e^{-i \mathbf{R}^T G {\boldsymbol{\var}}} \thetarho_0 e^{i \mathbf{R}^T G {\boldsymbol{\var}}},\qquad G = S A. \end{equation} Thanks to the fact that the parameters enter linearly into the mean of the Gaussian state and the covariance matrix is fixed, the SLDs of a Gaussian shift model are linear combinations of the canonical coordinates \cite{Holevo1982, Monras2013}. To see this, consider the characteristic function of $\thetarho_{{\boldsymbol{\var}}}$ \begin{equation} \mathcal{X}_{{\boldsymbol{\var}}}(\boldsymbol{\mathcal{X}i}) = e^{i \boldsymbol{\mathcal{X}i}^T A {\bf {\boldsymbol{\var}}} } e^{- \boldsymbol{\mathcal{X}i}^T V \boldsymbol{\mathcal{X}i}/2 }, \end{equation} and take derivatives over $\theta_i$ to get \begin{equation} \label{eq:sldgausstheta} \partial_i \mathcal{X}_{{\boldsymbol{\var}}}(\boldsymbol{\mathcal{X}i}) = i (A^T \boldsymbol{\mathcal{X}i})_i \mathcal{X}_{{\boldsymbol{\var}}}(\boldsymbol{\mathcal{X}i}). \end{equation} On the other hand, using the definition \eqref{eq:SLD} of the SLDs, the derivative may be expressed as \begin{equation} \label{eq:sldgauss1} \partial_i \mathcal{X}_{{\boldsymbol{\var}}}(\boldsymbol{\mathcal{X}i}) = \trace\left(\partial_i \thetarho_{{\boldsymbol{\var}}} e^{i \boldsymbol{\mathcal{X}i}^T {\bf R}}\right) = \trace\left(\tfrac{1}{2}\{L_i,\thetarho_{{\boldsymbol{\var}} } \} e^{i \boldsymbol{\mathcal{X}i}^T {\bf R}} \right). \end{equation} Making use of the following algebraic property $\tfrac{1}{2}\{R_i, e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}}\} = \tfrac{1}{i}\tfrac{\partial}{\partial \mathcal{X}i_i} e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}} $, which can be proven using the standard BCH formula, we get \begin{equation} \label{eq:anticomm} \trace\left(\tfrac{1}{2}\{R_i, \thetarho_{{\boldsymbol{\var}}}\} e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}} \right) = \trace\left(\tfrac{1}{2}\{R_i, e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}}\} \thetarho_{{\boldsymbol{\var}}} \right) = \frac{\partial}{i \partial \mathcal{X}i_i} \mathcal{X}_{{\boldsymbol{\var}}} = (A {\boldsymbol{\var}} + i V \boldsymbol{\mathcal{X}i})_i \mathcal{X}_{{\boldsymbol{\var}}}. \end{equation} Now, if we take \begin{equation} {\bf L} = [L_1, \dots , L_{\mathfrak{p}}]^T, \quad \mathbf{L} = A^T V^{-1} \mathbf{R} + A^TV^{-1}A {\boldsymbol{\var}}\mathbb{1}, \end{equation} and substitute into the r.h.s. in \eqref{eq:sldgauss1} we obtain \eqref{eq:sldgausstheta} and hence we see that this a correct formula for the SLDs operators. For simplicity and without loss of generality, from now on we will consider estimation around ${\boldsymbol{\var}}=0$ in which case the formula for the SLDs simplifies to \begin{equation} \label{eq:sldgauss} \mathbf{L} = A^T V^{-1} \mathbf{R} \end{equation} and the the QFI matrix has the same expression as its classical counterpart \begin{equation} \label{eq:gaussfisher} F_Q = \scalno{{\bf L}, {\bf L}^T}_{\rho_{{\boldsymbol{\var}}}} = A^T V^{-1} A. \end{equation} We are now in position to derive the HCR bound for the Gaussian shift model. Recall, from Sec.~\ref{sec:collectiveproof} that when performing the minimization in the formula \eqref{HCR} for the HCR bound, we may always restrict the class of operators $\mathbf{X}$ to belong to the smallest $\mathcal{D}$-invariant subspace $T$ that contains $\t{span}_{\mathbb{R}}\{L_1,\dots,L_{\mathfrak{p}}\}$. Using the fact that in the Gaussian shift model the SLDs are linear functions of canonical variables, we show below that $T\subseteq \t{span}_{\mathbb{R}}\{R_1,\dots,R_{\mathfrak{r}}\}$ (for ${\boldsymbol{\var}} \neq 0$ we need to include $\mathbb{1}$ in the span as well). To see this, note that the characteristic function of $[R_i, \rho_{{\boldsymbol{\var}}}]$ is equal to \begin{equation} \label{eq:DinR} \trace\left(i[R_i, \thetarho_{{\boldsymbol{\var}}}]e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}} \right) =i \trace\left(\thetarho_{{\boldsymbol{\var}}} [e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}},R_i] \right)=i \trace\left( \thetarho_{{\boldsymbol{\var}}} [i\boldsymbol{\mathcal{X}i}^T \mathbf{R},R_i]e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}} \right) =i( S\boldsymbol{\mathcal{X}i} )_i \mathcal{X}_{{\boldsymbol{\var}}}(\boldsymbol{\mathcal{X}i}) \end{equation} which corresponds to the original $\mathcal{X}_{{\boldsymbol{\var}}}$ multiplied by some linear transformation of $\boldsymbol{\mathcal{X}i}$ with imaginary coefficients. Next, equation \eqref{eq:anticomm} with ${\boldsymbol{\var}} =0$ reads \begin{equation} \trace\left(\{R_j, \thetarho_{{\boldsymbol{\var}}}\} e^{i \boldsymbol{\mathcal{X}i}^T \mathbf{R}} \right)=2i(V \boldsymbol{\mathcal{X}i})_j \mathcal{X}_{{\boldsymbol{\var}}} (\boldsymbol{\mathcal{X}i}). \end{equation} For the time being, we can restrict ourselves to quantum modes only since the $\mathcal{D}$ operator is trivial for classical variables. In this case $V$ is a strictly positive real matrix, and hence ${\rm Range}(V) = \mathbb{R}^\mathfrak{r}$. Since any operator is in one-to-one correspondence with its characteristic function, this means that ${\rm span}_\mathbb{R}\left\{\ \{R_j, \thetarho_{{\boldsymbol{\var}}}\},\, j=1, \dots , \mathfrak{r}\right\}$ contains $i[R_i, \thetarho_{{\boldsymbol{\var}}}]$ for any $i$. Taking into account the definition \eqref{eq:dinv} of the operator $\mathcal{D}$, this implies that $\mathcal{D}(R_i)$ may be written as linear combination of components of ${\bf R}$, $\mathcal{D}(R_i) = \frac{1}{2}(S V^{-1} \mathbf{R})_i$ , and hence $\t{span}_{\mathbb{R}}\{ R_1,\dots,R_{\mathfrak{r}}\}$ is $\mathcal{D}$ invariant which was to prove. Therefore, when calculating the HCR bound for the Gaussian shift model we may restrict the minimization to $\mathbf{X}$ operators of the form $\mathbf{X} = B \mathbf{R}$, where $B$ is a linear map $B:\mathbb{R}^{\mathfrak{r}} \to\mathbb{R}^{\mathfrak{p}}$. Moreover, taking into account the explicit form of the SLD operators given by equation \eqref{eq:sldgauss}, the l.u. condition may be equivalently written as: \begin{equation} \mathcal{I}= \trace(\bnabla \thetarho_{{\boldsymbol{\var}}} \mathbf{X}^T) = \trace\left(\thetarho_{{\boldsymbol{\var}}}\tfrac{1}{2}\{A^T V^{-1}\mathbf{R} ,(B \mathbf{R})^T \} \right) = A^T V^{-1} V B^T = (B A)^T. \end{equation} Additionally, \begin{equation} \t{Im}Z[\bold{X}] = \frac{1}{2i} \trace(\thetarho_{\boldsymbol{\var}} [\mathbf{X}, \mathbf{X}^T])= \frac{1}{2 i } B \trace(\thetarho_{\boldsymbol{\var}} [\mathbf{R},\mathbf{R}^T]) B^T = \frac{1}{2} B S B^T. \end{equation} and therefore the HCR bound \begin{equation} \mathcal C^{\t{H}}=\min_{\mathbf{X}=B \mathbf{R}}\left(\tracep(C \mathrm{Re} Z[\bold{X}])+\|\sqrt{C}\cdot {\rm Im}Z[\bold{X}]\cdot\sqrt{C}\|_1 \ \big|\ \, \trace\left(\bnabla \thetarho_{{\boldsymbol{\var}}}\mathbf{X}^T\right)=\mathcal{I}\right) \end{equation} may be written directly as the minimization over the linear map $B$ \begin{equation} \label{Bholevo} \mathcal C^{\t{H}}=\min_{B}\left(\tracep(C B V B^T)+\|\sqrt{C}\cdot B S B^T\cdot\sqrt{C}\|_1 \ \big|\ BA = \mathcal{I} \right). \end{equation} In general there is no closed analytical formula for the solution of this minimization problem. However, in a special case when the number of parameters of interest is maximal, $\mathfrak{p}=\mathfrak{r}$, the operator $A$ has a unique inverse and we obtain an explicit bound by simply substituting $B=A^{-1}$: \begin{equation} \label{eq:hcrgauss} \mathcal C^{\t{H}} = \tracep\left( C A^{-1} V (A^{-1})^T \right) + \frac{1}{2}\left\| \sqrt{C}A^{-1} S (A^{-1})^T \sqrt{C} \right\|_1. \end{equation} While the first term is identical to the cost of the corresponding classical Gaussian estimation problem, the second term in \eqref{eq:hcrgauss} represents the additional contribution due to non-commutativity. This model is also $\mathcal{D}$-invariant, since $\t{span}_{\mathbb{R}}\{L_1,\dots,L_{\mathfrak{p}}\}$ corresponds to the span of all canonical variables which is $\mathcal{D}$-invariant. Therefore, as discussed in Sec.~\ref{sec:Dinvariance}, the above bound coincides with the RLD CR bound. On the other hand when $\mathfrak{p}=1$, i.e. when we estimate only a single scalar variable, the HCR reduces to the SLD CR bound by the same arguments as given in Sec.~\ref{sec:nuisance}, where it was shown that this is a general feature of multi-parameter estimation problems with rank-1 cost matrix. Finally, we show that for the Gaussian shift models the HCR bound is always saturable (on the single copy level!). For simplicity, let us again assume the absence of classical degrees of freedom as the saturability issue is trivial for them---there is no measurement issue involved at all. For this, it will be enough to consider the so called linear measurement \cite{Holevo1982}. A linear measurement can be implemented by coupling the system with an independent ancillary system and measuring a commuting family of coordinates of the joint system. Let $\tilde{\bf R}:= [\tilde{R}_{1}, \dots ,\tilde{R}_{\mathfrak{r}}]^T$ be the coordinates of the ancillary system with the same number of modes and a symplectic matrix $\tilde{S}$. We assume that the joined system+ancillary state is $\rho_{\boldsymbol{\var}} \otimesimes \tilde{\rho}$ where $\tilde\rho$ is a fixed zero mean Gaussian state with covariance matrix $\tilde{V}$. Let $B$ the result of optimization \eqref{Bholevo}. The measurement is defined by a $\mathfrak{p}-$tuple ${\bf Y} = [Y_1, \dots ,Y_\mathfrak{p}]^T$ of {\it mutually commuting} variables of the form $$ {\bf Y} ={\bf X} + \tilde{\bf X} = B {\bf R} + \tilde{B} \tilde{{\bf R}}, $$ where $B, \tilde{B} $ are real $\mathfrak{p} \times \mathfrak{r}$ matrices, with a condition $B A = \mathcal{I}$ which guarantees that the l.u. property is fulfilled. As a result we obtain a l.u. unbiased estimator whose mean square error effectively depends on $\tilde{B}$ and the choice of the ancillary Gaussian state $\tilde{\rho}$: \begin{equation}\label{eq.risk.w} \mathcal{C}(\tilde{B},\tilde{\rho})= {\rm Tr}(C BVB^T) + {\rm Tr}(C \tilde B\tilde V\tilde B^T). \end{equation} Since we require all the $Y_{i}$ to commute with each other, we have \begin{equation} \label{eq:commutegauss} [ \tilde{\bf X}, \tilde{\bf X}^T ] = - [ {\bf X} , {\bf X}^T] = - i BS B^T \mathbb{1}. \end{equation} Notice that we can trivially satisfy the above requirement if the symplectic matrix $\tilde{S} = -S$ and we take $\tilde{B}=B$. Physically, this corresponds to inverting the roles of position and momentum operators. Then total cost \eqref{eq.risk.w} simplifies to: \begin{equation} \mathcal{C}(\tilde{\rho})= {\rm Tr}(C BVB^T) + {\rm Tr}(C B\tilde V B^T) \end{equation} and what remains is to perform optimization over $\tilde\rho$ (or, effectively over $\tilde V$). The uncertainty principle \eqref{eq:heisenberggaussian} applied to the ancillary variables $\tilde{\bf X}$ gives the constraint \begin{equation*} \tilde V\geq \frac{i}{2} S. \end{equation*} Using the same reasoning as the one leading to \eqref{step2} the above condition implies that: \begin{equation*} {\rm Tr}(C B\tilde V B^T) \geq \frac{1}{2}{\rm Tr}(|\sqrt{C}B SB^T\sqrt{C}|), \end{equation*} with equality for $\tilde V=(\sqrt{C}B)^{-1}|\sqrt{C}B SB^T\sqrt{C}|(B^T\sqrt{C})^{-1}$, which satisfies the uncertainty condition \eqref{eq:heisenberggaussian}. By choosing $\tilde{\rho}$ to be the corresponding Gaussian state, we conclude that \begin{equation*} \mathcal{C}= {\rm Tr }\left(C BVB^T\right)+ \frac{1}{2}{\rm Tr}\left(|\sqrt{C} B SB^T \sqrt{C} |\right). \end{equation*} Since $B$ is the solution of \eqref{Bholevo}, we recover the HCR bound. Note that the above construction of the optimal linear measurement is very similar in its spirit to the reasoning presented in Sec.~\ref{sec:hcrequivalent} leading to \eqref{matsu}. It utilizes an extended space in order to make the measurement operators commuting on the extended space. However, unlike the reasoning presented here, the derivation presented in Sec.~\ref{sec:hcrequivalent} does not necessarily provide an explicit construction of a measurement that saturates the HCR bound, indeed it does so only in specific cases such as the pure states models discussed in Sec.~\ref{sec:saturability}. Another special feature of the Gaussian shift model which stems from its covariance with respect to shifts, is the fact that the optimal measurement is independent of the actual value of ${\boldsymbol{\var}}$. To further emphasise the fundamental role of such models in quantum statistics, the QLAN theory described in Sec.~\ref{sec:qlan} shows that such models arise as asymptotic limits of quantum i.i.d. models where Gaussian shifts emerge from collective local unitary rotations in i.i.d. models. Let us finish this section by considering three basic examples, which in the light of the QLAN discussed in Sec.~\ref{sec:qlan} will be related with the three qubit model examples presented in Sec.~\ref{sec:examplequbit}. \subsubsection{Two quantum variables.} We first consider the standard joint position and momentum estimation problem on a single quantum mode with no classical variables, which corresponds to the following choice of Gaussian shift model parameters: $\mathfrak{p}=2$, $\mathfrak{r}=2$ ($\mathfrak{q}=1$, $\mathfrak{c}=0$), ${\boldsymbol{\var}} = (q,p)$, $A = \t{diag}[1,1]$, $V= \t{diag}[\sigma^2_q, \sigma^2_p]$ (we assume no $q$, $p$ correlations for simplicity). Since in this case $\mathfrak{r}=\mathfrak{p}$ we can use Eq.~\eqref{eq:hcrgauss} and for the cost matrix $C=\t{diag}[1,1]$ we obtain the cost of the joint estimation of momentum and position exceeding the SLD CR bound by an amount equal to twice the vacuum fluctuation contribution: \begin{equation} \mathcal C^{\t{H}}_{(q,p)} = \sigma_q^2 + \sigma_p^2 + 1, \quad \mathcal{C}^{\t{SLD}}_{(q,p)}=\sigma_q^2 + \sigma_p^2. \end{equation} In particular, when $\thetarho$ is the minimum uncertainty state, $\sigma_p^2=\sigma_q^2=1/2$, and we rescale the estimation parameters by choosing $A = \frac{1}{\sqrt{2}}\t{diag}[1,1]$, the bounds take exactly the same values as for the pure qubit state estimation example, see Eqs.~(\ref{eq:hcrqubit1},\ref{eq:sldqubit1}). \subsubsection{One quantum + one classical variable.} Second, consider a situation when apart from a value of a single quantum canonical variable $Q$ the goal is to estimate an independent classical variable $Z$. Formally this correspond to the choice: $\mathfrak{p}=2$, $\mathfrak{r}=3$ ($\mathfrak{q}=1$, $\mathfrak{c}=1$), ${\boldsymbol{\var}} = (q,z)$, \begin{equation} A = \begin{bmatrix} 1 & 0 \\ 0 & 0 \\ 0 & 1 \end{bmatrix}, \quad V = \t{diag}[\sigma_q^2,\sigma_p^2,\sigma_z^2]. \end{equation} Even though we do not deal here with the $\mathfrak{p}=\mathfrak{r}$ case, if we choose the cost matrix to be diagonal $C = \t{diag}[1,1]$, we can still use \eqref{eq:hcrgauss} since there are no correlations between $(Q,Z)$ and $P$ and hence we can simply ignore the latter---formally this corresponds to choosing $B$ equal to the pseudoinverse of $A$ which in this case corresponds to $B=A^T$. As a result we get \begin{equation} \mathcal C^{\t{H}}_{(q,z)} = \sigma_q^2 + \sigma_z^2 = \mathcal{C}^{\t{SLD}}_{(q,z)} \end{equation} and since HCR coincides with the SLD CR bound it implies that as expected there is no measurement incompatibility issue here. Moreover, if we choose $\thetarho$ to have the following $q,p$, variances , $\sigma_p^2=\sigma_q^2=\frac{1}{2 r}$, rescale the estimation parameters $q,p$ by $\frac{1}{\sqrt{2 r}}$ and choose $\sigma_z^2 =1-r^2$ we get the same bound for the cost as in the $(r,\theta)$ qubit estimation example, for the Euclidean cost choice $c(r)=1$, see Eq.~\eqref{eq:hcrqubit2}---we may also obtain the cost corresponding to arbitrary $c(r)$ function, by simply choosing the cost matrix in the Gaussian model by $C = \t{diag}[1,c(r)]$. \begin{center} \begin{table}[t] \begin{tabular}{|c|c|c|c|c|c|c|} \hline & \multicolumn{3}{c|}{qubit}& \multicolumn{3}{c|}{Gaussian} \\ & \multicolumn{1}{c}{$(\theta,\thetaphi)$} & \multicolumn{1}{c}{$(r, \theta)$ }& $(r,\theta,\thetaphi)$ & \multicolumn{1}{c}{$(q,p)$} & \multicolumn{1}{c}{$(q,z)$}& $(q,p,z)$ \\ \hline $\mathcal{C}^{\t{SLD}}$ & $2$ & $1 +c(r)(1-r^2)$ & $2 + c(r)(1-r^2)$ & $\sigma_q^2+\sigma_p^2$ & $\sigma_q^2+\sigma_z^2$ & $\sigma_q^2+\sigma_p^2 +\sigma_z^2$ \\ \hline $\mathcal{C}^{\t{H}} - \mathcal{C}^{\t{SLD}} $ & $2$ & $ 0 $ & $2r$ & $1$ &$0$ & $1$ \\ \hline {\tiny \begin{tabular}{c} asymptotic\\ measurement \\ incompatibility \end{tabular}} & $+$ & $-$ & $+$& $+$ &$-$ & $+$\\ \hline {\tiny \begin{tabular}{c} advantage of \\ collective\\ measurements \end{tabular}} & $-$ &$+$ & $+$ & $-$ & $-$& $-$\\ \hline \end{tabular} \caption{Summary of the results for the SLD CR as well as HCR bounds obtained for the qubit as well as Gaussian shift model examples. In the Gaussian models the cost matrix is chosen to be $C = \t{diag}[1,1,1]$, for $(q,p,z)$ variables, while in the qubit models $C = \t{diag}[c(r), r^2, r^2 \sin^2\theta]$, for $(r,\theta,\thetaphi)$ variables. If we choose the Gaussian state to have the following variances of the canonical variables $\sigma_q^2= \sigma_p^2 = \frac{1}{2r}$, $\sigma_z^2 = 1-r^2$ and furthermore we rescale the $q$ and $p$ parameters by a factor $\tfrac{1}{\sqrt{2r}}$ the bounds for the Gaussian models will coincide with the corresponding qubit models for the Euclidean distance cost $c(r)=1$ ---a manifestation of the general QLAN theorem discussed in Sec.~\ref{sec:qlan} } \label{tab:examples} \end{table} \end{center} \subsubsection{Two quantum + one classical variable. } Finally, consider the model which combines the two above cases and corresponds to $\mathfrak{p}=3$, $\mathfrak{r}=3$ ($\mathfrak{q}=1$, $\mathfrak{c}=1$), ${\boldsymbol{\var}} = (q,p, z)$, $A = \t{diag}[1,1,1]$, $V= \t{diag}[\sigma^2_q, \sigma^2_p, \sigma^2_z]$. Using \eqref{eq:hcrgauss} and \eqref{eq:sldgauss} we get: \begin{equation} \label{eq:hcrgauss3} \mathcal C^{\t{H}}_{(q,p,z)} = \sigma_q^2 + \sigma_p^2 + \sigma_z^2 +1, \qquad \mathcal{C}^{\t{SLD}}_{(q,p,z)}=\sigma_p^2 + \sigma_q^2 + \sigma_z^2. \end{equation} If we again choose $\thetarho$ to have $\sigma_p^2=\sigma_q^2=\frac{1}{2r}$, rescale the estimation parameters $q,p$ by $\frac{1}{\sqrt{2 r}}$ and choose $\sigma_z^2 = 1-r^2$ we get the same bounds for the cost as in the $(r,\theta,\thetaphi)$ qubit estimation example for $c(r)=1$, see Eq.~\eqref{eq:hcrqubit3}. Tab.~\ref{tab:examples} summarizes the results obtained in this subsection, and may be regarded as a take home message that allows to understand the difference between various multi-parameter models in terms of how the achievable precision deviates from the one predicted by the SLD CR bound and the role of collective measurements in achieving the fundamental bound. The similarity between the three qubit and three Gaussian examples is no coincidence and will become clear in the light if the QLAN considerations presented in the next section. \section{Quantum local asymptotic normality} \label{sec:qlan} We have ended the previous section with a list of examples of qubit and Gaussian shift models that illustrated the essential features of multi-parameter quantum estimation. In this section we will see that the link between qubit and Gaussian estimation problems is stronger than one might expect at first sight, and the relation between these models is captured by the concept of quantum local asymptotic normality (QLAN) \cite{GutaKahn, GutaJanssensKahn, KahnGuta, GutaJencova, KahnGuta2, Gill2011, yamagata2013quantum, Yang2019}. Informally, QLAN states that in the limit of large $n$, the statistical model describing independent ensembles of $n$ identically prepared finite dimensional systems can be approximated (locally in the parameter space) by a certain Gaussian shift model. This has three important consequences: \begin{enumerate} \item{It provides an asymptotically optimal estimation strategy for independent ensembles, which amounts to pulling back the optimal Gaussian measurement to a collective measurement on the ensemble, by means of quantum channels.} \item{When combined with the universal saturability of the HCR bound for Gaussian shift models, see Sec.~\ref{sec:examplegaussian}, QLAN implies that the HCR bound is asymptotically saturable on any multiple-copy models that satisfy certain regularity assumptions.} \item{The optimal measurement of point (i) has asymptotically normal distribution, which provides asymptotic confidence regions for the estimator.} \end{enumerate} For a better understanding of QLAN, we first provide some intuition regarding the classical local asymptotic normality (LAN) concept. Classical LAN \cite{vanderVaart} has very broad applicability including non-parametric estimation (estimation of infinite dimensional parameters, as in density estimation problems), and statistical problems involving non-i.i.d. data such as (hidden) Markov processes and time series. Here we will focus on parametric (finite dimensional) models with independent identically distributed (i.i.d.) samples, which serve us as a guide towards understanding the structure of quantum multi-copy models and the problem of optimal quantum state estimation. \subsection{LAN in classical statistics} Let us consider an i.i.d setting, where $n$ independent samples $m_1, \dots ,m_n$ are drawn from the probability distribution $p_{\boldsymbol{\var}}(m)$ which depends smoothly on ${\boldsymbol{\var}}\in \Theta\subset\mathbb{R}^{\mathfrak{p}}$. Since we expect the statistical uncertainty to scale as $n^{-1/2}$ with the increasing number of samples, we will analyse this model at the local level and express parameters ${\boldsymbol{\var}}$ in the neighbourhood of a fixed point ${\boldsymbol{\var}}_0$ as \begin{equation} {\boldsymbol{\var}} = {\boldsymbol{\var}}_0 + \mathbf{u}/\sqrt{n}. \end{equation} Thanks to this reparametrization, we expect the asymptotic formulas for the estimation precision of $\mathbf{u}$ to be independent of $n$. Furthermore, let us denote by $\mathcal{N}(\mathbf{u}, V)$ a classical Gaussian shift model, which consists of drawing a single sample $\mathbf{x} \in \mathbb{R}^{\mathfrak{p}} $ from a normal distribution with mean $\mathbf{u}$ and the covariance matrix $V$. Informally, LAN states that for large $n$, the i.i.d. model $p^n_{\bf u}:= p^n_{{\boldsymbol{\var}}_0 +\mathbf{u}/\sqrt{n}}$ is close to the Gaussian shift model $\mathcal{N}({\bf u} , F^{-1}_{{\boldsymbol{\var}}_0})$, where $F_{{\boldsymbol{\var}}_0}$ is the FI matrix for the $p_{\boldsymbol{\var}}$ distribution calculated at ${\boldsymbol{\var}}={\boldsymbol{\var}}_0$. Note that both models have the same Fisher information, and the CR inequality is attained in the Gaussian case by simply taking $\mathbf{x}$ as the estimator of the mean. In order to understand in what sense the two models are \emph{close} to each other, consider the likelihood process defined as the `random function' ${\boldsymbol{\var}}\mapsto p_{\boldsymbol{\var}}$ (a random variable with values $p_{\boldsymbol{\var}}(m)$ for each ${\boldsymbol{\var}}\in \Theta$). For our purposes it is more interesting to look at the \emph{log-likelihood process}, which is defined with respect to a fixed reference point ${\boldsymbol{\var}}_0$ \begin{equation} {\boldsymbol{\var}}\mapsto l_{\boldsymbol{\var}}:= \log \frac {p_{\boldsymbol{\var}}}{p_{{\boldsymbol{\var}}_0}}. \end{equation} This is in fact a \emph{sufficient statistic}, which means that it captures the entire statistical information contained in the original samples. In the specific case of the i.i.d sequence $p^n_{\bf u}$ with parameter ${\bf u}$, the log-likelihood ratio (with respect to ${\bf u}=0$) is \begin{equation} l^n_{\bf u} := \sum_{i=1}^n l_{{\boldsymbol{\var}}_0 + {\bf u}/\sqrt{n}}(m_i)= \sum_{i=1}^n \log \frac{p_{{\boldsymbol{\var}}_0 + {\bf u} /\sqrt{n}}}{p_{{\boldsymbol{\var}}_0}} (m_i). \end{equation} By expanding $l_{{\boldsymbol{\var}}_0 + {\bf u}/\sqrt{n}}$ to the second order with respect to $ {\bf u}/\sqrt{n}$ we obtain \begin{equation} l^n_{\bf u} = \frac{{\bf u}^T }{\sqrt{n}}\sum_{i=1}^n \boldsymbol{\nabla}{l}(m_i) + \frac{1}{2n} \sum_{i=1}^n {\bf u}^T \left[\boldsymbol{\nabla}\boldsymbol{\nabla}^T{l}(m_i)\right] {\bf u} + o(n^{-1}), \end{equation} where $\boldsymbol{\nabla}$ is the gradient operator with respect to ${\boldsymbol{\var}}$ taken at ${\boldsymbol{\var}}={\boldsymbol{\var}}_0$, while $\boldsymbol{\nabla}\boldsymbol{\nabla}^T$ represent the matrix of second derivatives (Hessian) at ${\boldsymbol{\var}}={\boldsymbol{\var}}_0$. By applying the central limit theorem (CLT) to the first sum and the law of large numbers to the second sum we obtain the (joint) convergence in distribution \begin{equation}\label{eq.weak.lan} l^n_{\bf u} \mathcal{X}rightarrow[p^n_{\mathbf{u}}]{~~n\to\infty~~} {\bf u}^T F_{{\boldsymbol{\var}}_0} \mathbf{x} - \frac{1}{2} {\bf u}^T F_{{\boldsymbol{\var}}_0}{\bf u}, \end{equation} where $\mathbf{x}$ is a real random variable with distribution $\mathcal{N}(0, F^{-1}_{{\boldsymbol{\var}}_0})$. Note that the right hand side is the log-likelihood ratio of the Gaussian shift model $\mathcal{N}({\bf u}, F^{-1}_{{\boldsymbol{\var}}_0})$ with respect to the reference point ${\bf u}=0$. A similar result can be shown for an arbitrary local parameter as reference. This amounts to what is called \emph{weak convergence} of the i.i.d. model $p^n_{\bf u}$ to the Gaussian limit model $\mathcal{N}({\bf u}, F^{-1}_{{\boldsymbol{\var}}_0})$. In the next subsection we will describe a quantum version of weak LAN; we will then introduce the notion of \emph{strong} LAN which allows for a more complete understanding of the Gaussian approximation, and the solution of the optimal estimation problem in the asymptotic regime. \subsection{Weak convergence approach to QLAN} \label{sec:weakLAN} A quantum i.i.d. version of the weak LAN convergence has been established in \cite{GutaJencova} and a different approach was taken in \cite{yamagata2013quantum,FujiwaraYamagata}. In the specific setup of pure state models weak convergence corresponds roughly to the geometric idea of convergence of state overlaps and can be used to derive LAN for correlated states such as outputs of quantum Markov processes (or stationary, purely-generated finitely correlated states) \cite{GutaKiukas1,GutaKiukas2}. However, for mixed states models, the theory of weak convergence is currently still in its infancy and the notion of strong convergence, discussed in Sec.~\ref{sec:QLANstrong}, appears to be a more versatile tool which yields operationally meaningful statements. \subsubsection{Single parameter pure state model.} For an intuitive illustration we will start by considering the special case of a single parameter pure state model consisting of a unitary rotation family $| \psi_\theta\rangle$ with $\theta\in \mathbb{R}$ and a selfadjoint generator $G$ \begin{equation} | \psi_\theta\rangle := e^{i \theta G} | \psi_0 \rangle,\qquad \langle \psi_0 |G| \psi_0\rangle =0. \end{equation} The corresponding QFI is $F_Q= 4 {\rm Var} (G) = 4\langle \psi_0 |G^2 |\psi_0\rangle$ and does not depend on $\theta$. We consider an ensemble of $n$ independent systems, and assume that the parameter is of the order of the statistical uncertainty, so that $\theta =\theta_0+ u/\sqrt{n}$ with $\theta_0$ fixed and known and $u$ an unknown `local parameter'. The joint state of the ensemble is \begin{equation} |\psi^n_{u}\rangle := |\psi^{\otimesimes n}_{\theta_0 + u/\sqrt{n}}\rangle. \end{equation} Since the QFI is additive and the parameter has been rescaled accordingly, the model $|\psi^n_{u}\rangle$ has Fisher information $F_Q$. In addition to the i.i.d. model, we consider the \emph{quantum Gaussian shift model} consisting of coherent states of a one-mode continuous variables system with canonical coordinates $(Q,P)$ \begin{equation} \left|\sqrt{\tfrac{F_Q}{2}}u\right\rangle := e^{-i u \sqrt{\tfrac{F_Q}{2}} P} |{\bf 0}\rangle, \end{equation} where $ |{\bf 0}\rangle\in\mathcal{F}$ denotes the vacuum state. The model is parametrised by $u$, such that the expectations of $(Q,P)$ are $(\sqrt{\tfrac{F_Q}{2}}u, 0)$, and it has quantum Fisher information $F_Q$. Since a pure state model is a family of Hilbert space vectors, its structure is uniquely determined by the inner products of pairs of vectors with different parameters. Therefore it is natural to say that a sequence of models converges to a limit model if such overlaps converge pointwise (see \cite{GutaKiukas1} for a more general discussion taking into account the phase ambiguity). We will call this notion the \emph{weak convergence} of quantum statistical models. \begin{figure} \caption{In the case of one-parameter pure state models, local asymptotic normality can be understood as convergence of inner products of states with local parameters $u$ and $v$ to the inner product of the corresponding coherent states in a one-mode Gaussian shift model, whose mean encodes the unknown local parameter. A similar result holds for multidimensional models.} \label{fig.weak.convergence} \end{figure} The following calculation shows that the sequence of models $|\psi_u^n\rangle$ converges weakly to the limit model $ | \sqrt{F_Q/2} u\rangle\ $, as illustrated in Fig.~\ref{fig.weak.convergence} \begin{multline} \langle\psi^n_{u} | \psi^n_{v} \rangle = \left\langle \psi_0 \left| e^{i \frac{(u-v)}{\sqrt{n}}G} \right|\psi_0\right\rangle^n = \left(1 -\tfrac{(u-v)^2 F_Q }{8n} + O(n^{-3/2})\right)^n \\ \mathcal{X}rightarrow{~~n\to\infty~~} \exp\left(- \tfrac{(u-v)^2 F_Q }{8}\right) = \left\langle \sqrt{\tfrac{F_Q}{2}} u \right|\left. \sqrt{\tfrac{F_Q}{2}} v\right\rangle. \label{eq.weak.qlan} \end{multline} Note that even though we deal with a one-dimensional pure states model, the limit model is not classical as one might expect but another pure state quantum model. This reflects the fact that the limit model may be used for different statistical problems (e.g. parameter estimation, testing) whose optimal measurements are incompatible, and is related to the fact that the SLD is not $\mathcal{D}$ invariant (see Sec.~\ref{sec:Dinvariance}). \subsubsection{Two-parameter pure qubit model.} In order to understand the measurement incompatibility from the QLAN perspective, we will now consider a two-dimensional qubit model obtained by applying a small rotation to one of the basis vectors \begin{equation} \ket{\psi_{{\boldsymbol{\var}}}} = e^{\frac{i}{2}( \theta_1 \sigma_x - \theta_2 \sigma_y)} |0\rangle, \qquad {\boldsymbol{\var}}= (\theta_1, \theta_2). \end{equation} Note that up to a unitary rotation this model is locally equivalent to the pure qubit state estimation model $(\theta,\thetaphi)$ discussed in Sec.~\ref{sec:examplequbit1}. The joint state of an i.i.d. ensemble of $n$ qubits is expressed in terms of the local parameter $ {\bf u} = (u_1, u_2)^T\in \mathbb{R}^2$ around ${\boldsymbol{\var}}_0 =0$ as \begin{equation} |\psi^n_{{\bf u}} \rangle = |\psi^{\otimesimes n}_{{\bf u}/\sqrt{n}}\rangle:= \left( e^{\frac{i}{2}( u_2\sigma_x - u_1\sigma_y)/\sqrt{n} } | 0 \rangle\right)^{\otimesimes n}. \end{equation} The corresponding SLDs $(L_1^n, L_2^n)$ at ${\bf u}=0$ and the generators $(G_1,G_2)$ are given by the collective spin observables \begin{equation} L^n_1 = 2 G_2 = \frac{1}{\sqrt{n}}\sum_{i=1}^n \sigma^{(i)}_x = \frac{2}{\sqrt{n}} J_x , \qquad L^n_2 = - 2 G_1 = \frac{1}{\sqrt{n}}\sum_{i=1}^n \sigma^{(i)}_y = \frac{2}{\sqrt{n}} J_y. \end{equation} Since $\langle \psi_{\bf 0} | [L_1^1, L_2^1] | \psi_{\bf 0} \rangle \neq 0$, the SLD CR bound is not achievable even in the asymptotic sense. This was reflected in the discussion in Sec.~\ref{sec:examplequbit} where we found that the HCR bound was strictly larger than the SLD CR bound. In the same vein as the calculation \eqref{eq.weak.qlan}, it can be shown that the following `weak convergence' holds $$ \left\langle \psi^n_{{\bf u}} | \psi^n_{{\bf v}}\right\rangle \mathcal{X}rightarrow{~~n\to\infty~~} \left\langle \tfrac{1}{\sqrt{2}}{\bf u} \right. \left| \tfrac{1}{\sqrt{2}}{\bf v} \right\rangle, $$ where $ | \tfrac{1}{\sqrt{2}}{\bf u} \rangle$ are coherent states forming a quantum Gaussian shift model \begin{equation} \thetarho_{\mathbf{u}} = \left| \tfrac{1}{\sqrt{2}} {\bf u} \right\rangle \left\langle \tfrac{1}{\sqrt{2}} {\bf u}\right|, \qquad \ket{\psi_\mathbf{u}} = \left| \tfrac{1}{\sqrt{2}}{\bf u} \right\rangle= e^{\frac{i}{\sqrt{2}}( u_2 Q -u_1 P)} \ket{{\bf 0}} \end{equation} with $\ket{{\bf 0}}\in \mathcal{F}$ again denoting the vacuum state. The same conclusion can be reached by using the quantum central limit theorem \cite{Petz} to show that the `joint distribution' (or more precisely the joint moments) of the SLDs of $|\psi^n_{\mathbf{u}} \rangle$ converges to that of the SLDs of the Gaussian model, which are equal to $(\sqrt{2}Q, \sqrt{2}P )$ at ${\bf u}=0$. Similarly, the generators of collective spin rotations converge to those of phase translations for the Gaussian model. This is in fact a statistical take on the well known Holstein-Primakov theory of coherent spin states \cite{HolsteinPrimakov,Radcliffe}, but holds generally for any i.i.d. pure state model. Using the above relation we may now use the known results for the minimal cost of estimation in the Gaussian shift model, as discussed in Sec.~\ref{sec:examplegaussian}, to infer the analogous results for the asymptotic qubit model. For simplicity, as well as in order to stay in accordance with the corresponding qubit example from Sec.~\ref{sec:examplequbit1}, we choose the cost matrix $C=\mathcal{I}$ so the corresponding cost function is $\mathcal{C}(\mathbf{u},\mathbf{\tilde{u}}) = \| \tilde{\bf u}-{\bf u} \|^2$. Using the notations of Sec.~\ref{sec:examplegaussian}, the Gaussian shift model considered here is a single mode ($\mathfrak{q}=1$), two parameter ($\mathfrak{p}=2$) estimation model with matrices $A=\frac{1}{\sqrt{2}} \mathcal{I}$, $V=\frac{1}{2}\mathcal{I}$, and according to \eqref{eq:hcrgauss} the resulting cost reads: \begin{equation} \mathcal{C}^{\thetarho} =4. \end{equation} Note that half of the contribution is from the inherent incompatibilty of simultaneous measurement of $Q$ and $P$, as the SLD CR bound would yield $\mathcal{C} \geq \mathcal{C}^{\t{SLD}} = 2$. As expected, this result coincides with the HCR bound formula for the corresponding qubit model, see Eq.~\eqref{eq:hcrqubit1}. The general optimal linear measurement construction discussed in Sec.~\ref{sec:examplegaussian} corresponds here to the standard heterodyne measurement \cite{Leonhardt} which can be implemented as follows. Consider an ancillary cv system $(\tilde{Q}, \tilde{P})$ prepared in the vacuum state $|{\bf 0}\rangle$, and measure the commuting pair of linear combinations $(Q+ \tilde{Q})/\sqrt{2}$ and $(P-\tilde{P})/\sqrt{2}$ for the joint state $|\frac{1}{\sqrt{2}}{\bf u}\rangle \otimesimes |{\bf 0}\rangle$. Physically, this can be interpreted as splitting the coherent state with a balanced beamsplitter and measuring different coordinates of the outgoing beams. The outcomes have normal distribution with covariance matrix $\mathcal{I}/2$ and mean $\mathbf{u}/2$, and when multiplied by $2$ yield the optimal unbiased estimator $\tilde{\mathbf{u}}$ with the corresponding cost $\mathcal{C} = \mathbb{E} \|\tilde{\bf u}-{\bf u}\|^2=4$. Based on the central limit argument and the optimality of the heterodyne measurement, we can now devise an asymptotically optimal measurement scheme for the original qubit estimation problem. In a first step (corresponding to the beamsplitter action) the qubits ensemble is separated into two equal parts; then the two collective spins are measured on each sub-ensemble. The (appropriately rescaled) outcomes $\tilde{\mathbf{u}}^n$ have asymptotically normal distribution $\mathcal{N}({\bf u}, 2 \mathcal{I})$ and \begin{equation} \lim_{n\to\infty} n \mathcal{C}^n = \lim_{n\to\infty} n \mathbb{E} \| \tilde{\bf u}^n - {\bf u}\|^2 = 4 = \mathcal{C}^{\thetarho}. \end{equation} Note that even though formally the collective spins are measured, this measurement is equivalent to the local measurement strategy as discussed in Sec.~\ref{sec:examplequbit1} that saturated the HCR bound for the corresponding qubit model. However, our heuristic arguments rely on the vague assumption that we deal with small rotations around a given state, rather than a completely unknown pure state. We will continue to ignore this for the moment but will revisit the issue in the context of strong QLAN where we detail a rigorous two step adaptive procedure for the optimal state estimation. \subsection{Central limit argument for mixed qubit states} \label{sec.clt.mixed} Let us now consider the extension of the previous qubit model to mixed states. We are interested in the structure of the quantum model i.i.d. in a neighbourhood of size $1/\sqrt{n}$ of a given mixed state. Without loss of generality, the latter can be chosen to be the state $\rho_{{\bf r}_0}$ with Bloch vector ${\bf r}_0 = (0,0, r_0)$ with $0 < r <1$. Adopting the notation of Sec.~\ref{sec:examplequbit} we parameterise the neighbourhood of $\rho_{{\bf r}_ 0}$ using the `local parameter' ${\bf u}$ as follows \begin{equation} \rho_{{\bf r}_0 + {\bf u}/\sqrt{n}} = \rho_{{\bf r}_0} + \tfrac{1}{2\sqrt{n}}{\bf u} \boldsymbol{\sigma}= \frac{1}{2}\left( \begin{array}{cc} 1 + r_0 + \frac{u_3}{\sqrt{n}} & \frac{u_1 -i u_2}{\sqrt{n}}\\ \frac{u_1+ iu_2}{\sqrt{n}} &1-r_0 - \frac{u_3}{\sqrt{n}} \end{array} \right), \qquad {\bf u} = (u_1, u_2, u_3)^T\in \mathbb{R}^3. \end{equation} The off-diagonal parameters $(u_1,u_2)$ describe a unitary rotation of $\rho_{{\bf r}_0}$, while the diagonal parameter $u_3$ describes the change in eigenvalues. The local i.i.d. model is given by $n$ independent qubits with the joint state \begin{equation}\label{eq.qubits.mixed.ensemble} \rho_{\bf u}^n = \rho_{{\bf r}_0 + {\bf u}/\sqrt{n}}^{\otimesimes n} \end{equation} As above, we use the quantum central limit theorem to uncover the structure of the limit Gaussian model. The three SLDs at ${\bf u}=0$ are again given by collective observables $$ L_1^n = \frac{1}{\sqrt{n}} \sum_{i=1}^n \sigma_x^{(i)}, \quad L_2^n = \frac{1}{\sqrt{n}} \sum_{i=1}^n \sigma_y^{(i)}, \quad L_3^n = \frac{1}{\sqrt{n}(1-r_0^2)} \sum_{i=1}^n ( \sigma_z^{(i)} - r_0 \mathbb{1}). $$ By applying the quantum CLT with respect to the state $\rho_{\bf u}^n$ we obtain the joint convergence (in moments or characteristic function) $$ (L_1^n, L_2^n, L_3^n) \mathcal{X}rightarrow{~~n\to\infty~~} (L_1, L_2, L_3), $$ where the limit observables $(L_1, L_2, L_3)$ are canonical variables of a Gaussian shift model whose state is denoted $\thetarho_{\bf u}$. To completely identify the Gaussian model we first compute the commutations relations of the SLDs at $\mathbf{u}=0$. By the CLT, the commutators of the limit SLDs are proportional to the identity, with coefficients given by the expectations of the commutators of one qubit SLDs \begin{equation}\label{eq.commutations.sld} [L_1, L_2] = {\rm Tr} (\rho_{{\bf r}_0} [L_1^1, L_2^1]) \mathbb{1}= 2 i r_0 \mathbb{1}, \qquad [L_1, L_3] = [L_2, L_3] = 0. \end{equation} This means that the first two coordinates can be identified (up to a constant) with those of a one-mode cv system $(L_1, L_2)= (\sqrt{2r_0}Q, \sqrt{2r_0} P )$ while $L_3$ is a classical real valued random variable, as it commutes with all the others. The corresponding FI matrix at ${\bf u}=0$ equals \begin{equation} \label{eq:QFIqubitmixed} (F_Q)_{ij} =\trace (\rho_{{\bf r}_0} L_i^1 \circ L_j^1 ) = {\rm diag} \left[1,1, \frac{1}{1-r_0^2}\right]. \end{equation} Finally, since \begin{equation} \trace \left( \left.\frac{\partial \rho_{{\bf r}_0 +{\bf u}}}{\partial u_i} \right|_{{\bf u} =0} L^1_j \right) = (F_Q)_{ij}, \end{equation} then in order for the Gaussian model to properly account for the shifts in the $\mathbf{u}$ parameter we need to have: \begin{equation}\label{eq.means.sld} \trace(\thetarho_{\mathbf{u}} L_1 ) = u_1,\quad \trace(\thetarho_{\mathbf{u}} L_2 ) = u_2,\quad \trace(\thetarho_{\mathbf{u}} L_3 ) = \frac{u_3}{1-r_0^2}. \end{equation} Based on equations \eqref{eq.commutations.sld}, \eqref{eq:QFIqubitmixed} and \eqref{eq.means.sld}, we conclude that the Gaussian model is given by the quantum-classical state \begin{equation} \label{eq.classical.quantum.gaussian} \thetarho_{\bf u}= q_{\bf u} \otimesimes p_{\bf u}, \end{equation} where $p_{\bf u}$ is the probability density of the normal random variable $Z:=(1-r_0^2) L_3$ and $q_{\bf u}$ is a single mode `displaced thermal state'. The mean and the covariance matrix of the canonical variables $(Q, P, Z)$ with respect to $\thetarho_{\bf u}$ are given by \begin{equation} A \mathbf{u} = [\tfrac{1}{\sqrt{2r_0}}u_1, \tfrac{1}{\sqrt{2r_0}}u_2, u_3]^T, \qquad V =\t{diag}[\tfrac{1}{2r_0}, \tfrac{1}{2r_0}, 1-r_0^2]. \end{equation} Note that using the formula \eqref{eq:gaussfisher} for the QFI matrix in the Gaussian shift model we indeed recover \eqref{eq:QFIqubitmixed}. Consider now the estimation problem, and for simplicity let the cost function be the square Euclidean distance $\|\tilde{\bf u} - {\bf u}\|^2$ with the cost matrix $C= \mathcal{I}$. The optimal measurement for the quantum component is the heterodyne which provides independent unbiased estimators $(\tilde{u}_1, \tilde{u}_2)$ with distribution $\mathcal{N}((u_1, u_2), (1+r_0)\mathcal{I})$, while the estimator for $u_3$ is given by the classical component $\tilde{u}_3 := Z$. The resulting cost will be \begin{equation} \label{eq:gauss3param} \mathcal{C}^{\thetarho} = 2(1+r_0) + (1-r_0^2) = 3 + 2r_0 -r_0^2, \end{equation} which according to Eqs.~(\ref{eq:hcrqubit3},\ref{eq:hcrgauss3}) coincides with the HCR bound for the qubit model as well as for the corresponding Gaussian model. However, when we now look back at the multi-copy qubit model it is not clear how to construct the qubits measurement corresponding to the limiting heterodyne, and how the classical variable emerges in the limit. In the next section we introduce an alternative approach to QLAN which can answer these questions. \subsection{Strong convergence approach to QLAN for qubits}\label{sec:QLANstrong} Let us return to the i.i.d. qubit model $\rho^n_{\bf u}$ introduced in equation \eqref{eq.qubits.mixed.ensemble}. We will analyse its structure using group representation theory, and proceed to define an operational notion of strong convergence to the limit Gaussian model. The qubits space $(\mathbb{C}^2)^{\otimesimes n}$ carries commuting unitary representations of the symmetric group $S_n$ and the unitary group SU(2) \begin{eqnarray*} ~\pi(\sigma) : |\psi_1\rangle \otimesimes \dots \otimesimes |\psi_n\rangle &\mapsto & |\psi_{\sigma^{-1}(1)}\rangle \otimesimes \dots \otimesimes |\psi_{\sigma^{-1}(n)} \rangle,\qquad \sigma\in S_n,\\ \pi^\prime(u) : |\psi_1\rangle \otimesimes \dots \otimesimes |\psi_n\rangle &\mapsto & u|\psi_1\rangle \otimesimes \dots \otimesimes u|\psi_n\rangle \qquad ~~~~~~~~u\in \t{SU(2)}. \end{eqnarray*} According to Weyl's Theorem \cite{Fulton1991} the space decomposes as a direct sum of tensor products of irreducible representations indexed by the total spin $j\in \mathcal{J}_n:= \{0 (\tfrac{1}{2}), \dots , \tfrac{n}{2}\}$ \begin{equation} (\mathbb{C}^2)^{\otimesimes n} = \bigoplus_{j\in \mathcal{J}_n} \mathbb{C}^{2j+1} \otimesimes \mathbb{C}^{m_j}, \quad \pi(\sigma) = \bigoplus_{j\in \mathcal{J}_n} \mathbb{1}_{2j+1} \otimesimes \pi_j(\sigma), \quad \pi^\prime(u) = \bigoplus_{j\in \mathcal{J}_n} \pi^\prime_j(u) \otimesimes\mathbb{1}_{m_j}, \end{equation} where $m_j$ is the dimension of the corresponding irreducible representation of $S_n$. By permutation symmetry, the joint state has a block-diagonal decomposition \begin{equation}\label{eq.irrep.decomp} \rho_{\bf u}^n = \bigoplus_{j\in \mathcal{J}_n} p_{\bf u}^{n,j} \rho_{\bf u}^{n,j} \otimesimes \frac{\mathbb{1}_{m_j}}{m_j}, \end{equation} where $p_{\bf u}^{n,j}$ is a probability distribution over $j\in \mathcal{J}_n$ and $\rho_{\bf u}^{n,j}$ are density matrices, both depending on ${\bf u}$. Let $P_j$ denote the projection onto the subspace $\mathbb{C}^{j+1} \otimesimes \mathbb{C}^{m_j}$. The decomposition \eqref{eq.irrep.decomp} implies that the projective measurement $\{P_j\}_{j\in\mathcal{J}_n }$ is non-demolition for the family $\{\rho_{\bf u}^n\}$. The classical outcome $j$ and the corresponding quantum conditional state are the two components of a classical-quantum model which in the limit of large $n$ converges to the Gaussian model in equation \eqref{eq.classical.quantum.gaussian}, as shown below. Let us describe these components in more detail. \subsubsection{Classical part.} Let us denote by $J^{(n)}$ the classical random variable with probability distribution $p_{\bf u}^{n,j}$ over $j \in \mathcal{J}_n$. This provides information about the eigenvalue parameter $u_3$ and (in the first order of approximation) does not depend on the `rotation parameters' $u_1,u_2$. Note that the measurement $\{P_j\}_{j\in\mathcal{J}_n }$ does not amount to measuring the $z$ component of the collective spin, but is rather to measuring the total spin $$ J= \sqrt{J_x^2 + J_y^2 +J_z^2}, \qquad {\rm where} \qquad J_j = \frac{1}{2}\sum_{i=1}^n\sigma_j^{(i)}. $$ Intuitively however, we expect that for large $n$ the distributions of the two observables will be similar on the basis of the fact that $J_x$ and $J_y$ are centred and have standard deviations of the order $n^{1/2}$ while $J_z$ has mean of order $n$. Indeed, one can show \cite{GutaJanssensKahn} that $J$ satisfies the same central limit as $J_z$: \begin{equation}\label{eq.zn} Z_n:= \frac{1}{\sqrt{n}} (2J^{(n)} - r n) \mathcal{X}rightarrow{n\to\infty} \mathcal{N}(u_3, 1-r_0^2). \end{equation} Therefore, by defining the estimator of $u_3$ as $\tilde{u}^n_3= Z_n $ we obtain the asymptotic MSE $$ \lim_{n\to\infty}\mathbb{E} (\tilde{u}^n_3 - u_3)^2 = (1-r_0^2). $$ Additionally, $J^{(n)}$ satisfies a concentration property, namely it belongs to the interval $\mathcal{J}_n^\delta :=[\frac{r n}{2} - n^{1/2 +\delta} ,\frac{r n}{2} + n^{1/2 +\delta}] $ with probability converging exponentially fast to one \cite{GutaJanssensKahn}. Since the asymptotic estimation cost typically scales as $n^{-1}$, we can safely ignore events of exponentially small probability and assume that $J^{(n)}\in \mathcal{J}_n^\delta$. We now proceed to analysis of the quantum component of the statistical model under this assumption. \subsubsection{Quantum part.} Conditional on the measurement outcome $J^{(n)}$ taking the value $j\in \mathcal{J}_n^\delta$, the state of the qubits ensemble is $\rho_{\bf u}^{n,j}\otimesimes \mathbb{1}/m_j$; since the right-side of the tensor product is trivial, it can be traced over and we remain with the state $\rho_{\bf u}^{n,j}$ on $\mathbb{C}^{2j+1}$. We now construct an explicit isometric embedding of $\mathbb{C}^{2j+1}$ into the Fock space $\mathcal{F}$ of a one-mode cv system, and use this to map the qubit states into a cv state. Let $\{|j, m \rangle : m =-j,\dots ,j\}$ be the orthonormal basis of $\mathbb{C}^{2j+1}$ consisting of eigenvectors of $J_z$ (i.e. $J_z |j, m \rangle = m |j, m \rangle $), and let $\{|k\rangle : k=0, 1,\dots\}$ be the Fock basis in $\mathcal{F}$. Then the map \begin{equation} W^{j} : \mathbb{C}^{2j+1} \to \mathcal{F}, \qquad \qquad | j, m \rangle \mapsto | j-m \rangle \end{equation} extends to an isometry, and defines a quantum channel \begin{equation}\label{eq.Tj} T^j: M(\mathbb{C}^{2j+1}) \to \mathcal{T}^1(\mathcal{F}) , \qquad T^j (\rho) = W^j \rho W^{j^\dagger}, \end{equation} where $\mathcal{T}^1(\mathcal{F})$ denotes the trace-class operators on $\mathcal{F}$. On the other hand, a reverse channel can be defined as \begin{equation}\label{eq.Sj} S^j :\mathcal{T}^1(\mathcal{F})\to M(\mathbb{C}^{j+1}) , \qquad S^j(\thetarho) = W^{j\dagger} \thetarho W^j + {\rm Tr}( {Q}^j_\perp \thetarho) \frac{\mathbb{1}_j}{j+1}, \end{equation} whereby the state is first measured with projections $({Q^j := W^jW^{j\dagger}}, Q^j_\perp)$ and conditionally on the outcome, the isometry is reversed or the trivial state $\mathbb{1}_j/(2j+1)$ is prepared. Now, the essence of the strong formulation of QLAN is that for \emph{all} typical values of $j$, the embedded state is well approximated by a Gaussian state, \emph{uniformly} over the local parameter $\bf{u}$. Formally this can be expressed as the following convergence statement that holds for $\epsilon$ small enough \cite{GutaJanssensKahn} \begin{align} \label{theorem.lan.1} &\lim_{n\to\infty} \, \max_{j\in \mathcal{J}_n^\delta} \, \sup_{\|{\bf u} \| \leq n^{\epsilon}}\, \left\| T^j (\rho_{\bf u}^{n,j}) - q_{\bf u} \right\|_1 = 0, \\ \nonumber &\lim_{n\to\infty} \, \max_{j\in \mathcal{J}_n^\delta} \, \sup_{\|{\bf u} \| \leq n^{\epsilon}}\, \left\| S^j (q_{\bf u}) - \rho_{\bf u}^{n,j} \right\|_1 = 0, \end{align} where $\rho_{\bf u}^{n,j}$ is the conditional state in the decomposition \eqref{eq.irrep.decomp}, $T^j$ and $S^j$ are the channels defined in \eqref{eq.Tj} and respectively \eqref{eq.Sj} and $q_{\bf u}$ is the density matrix of the quantum part of the Gaussian state as defined in \eqref{eq.classical.quantum.gaussian}. What is the difference between this statement and the central limit approach to LAN of the previous section? The latter is a statement about pointwise convergence (for fixed ${\bf u}$) of the `joint distribution' of SLDs from the multi-copy model to those of the Gaussian model. The former is an \emph{operational procedure} for mapping the first model into the second one (and backwards) while controlling the trace-norm approximation errors uniformly with respect to the parameter ${\bf u}$. As we will see in the next section, this is crucial in defining the measurement strategy and proving its optimality. \subsubsection{Strong QLAN for classical and quantum components} The convergence in \eqref{theorem.lan.1} concerns only the `quantum part' of the i.i.d. model, but a similar statement can be made for the full classical-quantum model as follows. For each $n$ we can define a `quantum to quantum-classical' channel $$ T_n: M((\mathbb{C}^{2})^{\otimesimes n} ) \to \mathcal{T}^1(\mathcal{F}) \otimesimes L^1(\mathbb{R}) $$ whose action is described by the following sequence of operations \cite{GutaJanssensKahn}: \begin{enumerate} \item{ Measure $\{P_j\}_{j\in\mathcal{J}_n }$ to obtain outcome $J^{(n)}=j$ and conditional state $\rho^{n,j}_{\bf u}$. } \item{ Rescale $J^{(n)}$ to obtain $Z_n$ defined in equation \eqref{eq.zn}}. \item{ Randomise $Z_n$ by adding an independent sample from a centred normal distribution with variance $1/(2{\sqrt{n}})$. to obtain the classical output of the channel $T_n$ } \item{ Map $\rho^{n,j}_{\bf u}$ through the channel $T^j$ to obtain the quantum output of the channel $T_n$}. \end{enumerate} Step (3) requires some justification. While $Z_n$ converges \emph{in distribution} to the desired Gaussian $\mathcal{N}(u_3, 1-r_0^2)$ (cf. equation \eqref{eq.zn}), the fact that it takes discrete values prevents it from converging also in the norm-one sense used in \eqref{theorem.lan.1}. This can be remedied by adding a `small' continuous noise without spoiling the statistical information (see \cite{Kahnthesis} for details). A similar procedure can be used to define the reverse channel $S_n$ by discretising the Gaussian variable to obtain a sample $\tilde{j} \in \mathcal{J}_n$ and mapping the quantum Gaussian through the corresponding channel $S_{\tilde{j}}$. The following result is similar to Eq.~\eqref{theorem.lan.1} but captures the convergence to classical-quantum Gaussian model more clearly: \begin{align} \label{th.lan.2} &\lim_{n\to\infty} \, \sup_{\|{\bf u} \| \leq n^{\epsilon}}\, \left\| T_n (\rho_{\bf u}^{n}) - \thetarho_{\bf u} \right\|_1 = 0 \\ \nonumber &\lim_{n\to\infty} \, \sup_{\|{\bf u} \| \leq n^{\epsilon}}\, \left\| S_n (\thetarho_{\bf u}) - \rho_{\bf u}^{n} \right\|_1 = 0. \end{align} where $\rho_{\bf u}^{n}$ be the i.i.d. state \eqref{eq.qubits.mixed.ensemble}, $T_n$ and $S_n$ are the channels defined above and $\thetarho_{\bf u} = q_\mathbf{u} \otimesimes p_{\bf u}$ is the quantum-classical Gaussian state as defined in \eqref{eq.classical.quantum.gaussian}. \subsection{Asymptotically optimal estimation strategy and the region of applicability} \label{sec:estimation.strategyLAN} We now come to the key issue demonstrating the power of the strong QLAN formulation. In Sec.~\ref{sec:hcr}, while discussing the HCR and SLD CR bounds as well as the optimal estimation strategies that saturated the bounds, we have stayed within the l.u. estimation paradigm. This approach has a significant deficiency in that the derived optimal estimation strategies are guaranteed to perform as expected only in the direct vicinity of a fixed parameter, and is a priori unclear what parameter region may be covered by an estimation strategy derived within this paradigm. This in some extreme cases may lead to controversies, such as e.g. that surrounding the correct scaling constant in the actually achievable Heisenberg limit \cite{Gorecki2019a}. Fortunately, this problem is absent for the Gaussian shift models, see Sec.~\ref{sec:examplegaussian}, where the optimal linear measurement strategy does not depend on the unknown parameter. Since the essential message of QLAN is the equivalence of asymptotic multi-copy models with the Gaussian shift models it is highly relevant to understand to what extent the uniform achievability for Gaussian models translates to multi-copy models. We will show that this is indeed the case in the asymptotic setting, as is already hinted at by the convergence results \eqref{theorem.lan.1} and \eqref{th.lan.2}. Let us recall that the original parameter ${\bf r}$ (the Bloch vector) is related to the local one as ${\bf r} = {\bf r}_0 + {\bf u}/\sqrt{n}$ where ${\bf r}_0$ is a fixed and known. Therefore, Eqs.~\eqref{theorem.lan.1} and \eqref{th.lan.2} say that if we restrict ourselves to regions in the parameter space of size $n^{-1/2+\epsilon}$, then the i.i.d. model $\rho_{\bf r}^{\otimesimes n}$ can be approximated by a simple Gaussian shift model. Since the approximation has an operational meaning in terms of quantum channels, this promises to simplify the estimation task, at least in the limit of large $n$. But can we claim that this is consistent with the setting where ${\bf r}$ is assumed to be completely unknown? We will show that this can be done by using the following two step adaptive procedure. \begin{enumerate} \item{ \emph{Localise parameter:} use $\tilde{n}= n^{1-\epsilon}$ samples to produce a rough estimator $\tilde{\rho}$ such that the probability that $\|\tilde{\rho} - \rho\| > n^{-1/2 +\epsilon}$ is exponentially small; for instance, the combination of Pauli measurements and maximum likelihood has this property \cite{GutaKahn}. Since the `large deviation' event has small probability, it does not contribute to the asymptotic cost (which scales as $1/n$) and can be ignored. } \item{\emph{rotate to diagonal state:} the remaining $n^\prime = n-\tilde{n}$ samples are rotated by means of a unitary $U$ which diagonalises $\tilde{\rho}$, i.e. $U\tilde{\rho} U^\dagger = \rho_{{\bf r}_0}= \frac{1}{2}(\mathbb{1} + r_0 \sigma_z)$. Here $r_0$ is not known in advance but can be considered fixed and known from this point on. The rotated qubits can be parametrised as $\rho_{\bf u}$ with local parameter $\|{\bf u}\|\leq n^\epsilon$. } \end{enumerate} After this localisation procedure we can assume that the unknown parameter is in the local region in which the QLAN approximation is valid. In the next steps we describe an asymptotically optimal measurement strategy. Since $n^\prime/n \to 1$, replacing $n^\prime$ by $n$ will not change the asymptotic analysis and so for simplicity we will continue to use $n$. \begin{enumerate} \addtocounter{enumi}{2} \item{\emph{Estimate eigenvalue parameter:} perform the measurement $\{P_j\}_{j\in\mathcal{J}_n }$ which projects onto a tensor products of irreducible representations $\mathbb{C}^{2j+1}\otimesimes \mathbb{C}^{m_j}$. Rescale outcome $J^{(n)}$ to obtain estimator $\tilde{u}^n_3 = Z_n$ as defined in equation \eqref{eq.zn}. } \item{\emph{Embed into Fock space:} map the conditional state $\rho^{n,j}_{\bf u}$ through the channel $T^j$, which according to \eqref{theorem.lan.1} is close to the Gaussian state $q_\mathbf{u}$} \item{ \emph{Estimate the rotation parameters:} apply the heterodyne measurement. The rescaled outcome is the estimator $(\tilde{u}^n_1, \tilde{u}^n_2)$ which according to the convergence result \eqref{theorem.lan.1}, has asymptotic distribution $\mathcal{N}((u_1, u_2), (1+r_0)\mathcal{I})$ } \item{\emph{Compute the final estimator:} using $\tilde{\bf u}^n := (\tilde{u}^n_1, \tilde{u}^n_2, \tilde{u}^n_3)^T$, we define the final estimator of the qubit state \begin{equation} \tilde{\rho}_n : =U^\dagger \rho_{ {\bf r}_0 + \tilde{\bf u}^n/\sqrt{n}} U. \end{equation}} \end{enumerate} Note that steps (iii) and (iv) amount to mapping the i.i.d. state through the channel $T_n$ defined earlier, with the exception of the randomisation step which is only needed for `technical' reasons in \eqref{th.lan.2}. For the rest of this section we discuss in what sense this procedure is asymptotically optimal. We define our figure of merit in terms of the maximum cost (risk) and minimax estimators, as is customary in mathematical statistics. As before, we consider the standard cost function given by the Euclidean distance $ C(\tilde{\bf r}, {\bf r} ) = \| \tilde{\bf r} -{\bf r} \|^2$. Let us fix the Bloch vector ${\bf r}_0$ and consider the estimation error for states in the neighbourhood of $\rho_{{\bf r}_0}$. We will use the \emph{local maximum} cost of $\tilde{\bf r}_n$ (or $\tilde{\rho}_n = \rho_{\tilde{\bf r}_n}$) which is defined as $$ \mathcal{C}^n_{\textrm{max},c}(\tilde{\bf r}_n) = \sup_{ \|{\bf r}-{\bf r}_0\|^2 \leq c/n } \mathbb{E} \| \tilde{\bf r}_n - {\bf r} \|^2, $$ where $c$ is a positive constant, and the expectation is computed with respect to the state $\rho^{\otimesimes n}_{\bf r}$. This reflects the hardness of the estimation problem around ${\bf r}_0$ and is both more operationally meaningful than a single point cost for an l.u. strategy as well as more informative than the maximum cost over all parameters, or the Bayes cost for a particular prior distribution. Since $\mathcal{C}^n_{\textrm{max},c}$ scales as $n^{-1}$ for reasonable estimators, the asymptotic behaviour of the best estimator is determined by the constant called the \emph{local asymptotic minimax} (LAM) cost at ${\bf r}_0$ \begin{equation} \mathcal{C}_{\rm minmax} := \sup_{c>0} \underset{n\to\infty}{\lim\sup} \,\inf_{\tilde{\bf r}_n} \, n \mathcal{C}^n_{\textrm{max},c} (\tilde{\bf r}_n), \end{equation} where the dependence on the constant $c$ is lifted in the last step. A sequence of estimators $\tilde{\rho}_n= \rho_{\tilde{\bf r}_n}$ is LAM if it achieves the LAM cost in the limit of large $n$ \begin{equation} \sup_{c>0}\, \underset{n\to\infty}{\lim\sup} \, n \mathcal{C}^n_{\textrm{max},c}(\tilde{\bf r}_n) = \mathcal{C}_{\rm minmax}. \end{equation} The strong QLAN convergence \eqref{th.lan.2} implies that the asymptotic minimax cost $ \mathcal{C}_{\rm minmax}$ of the multi-copy model is equal to the minimax cost $\mathcal{C}^\thetarho_{\rm minmax}$ of the limit Gaussian model, and the sequence of estimators $\tilde{\rho}_n$ defined in steps (i) to (vi) above is LAM. The proof of the first statement follows the same lines as that of its classical counterpart and we refer to \cite{GutaJanssensKahn} for the details. The key idea is that strong LAN controls the norm-one distance between the i.i.d. models and the Gaussian one, so that the optimal Gaussian measurement can be pulled back into a LAM measurement for the i.i.d. model. The second statement follows from the fact that the measurement performed in step (v) is minimax for the quantum part of the Gaussian model. Indeed, it can be shown more generally that thanks to the covariance properties of the model, the optimal measurement for l.u. estimators discussed in Sec.~\ref{sec:examplegaussian} is also a minimax estimator. Therefore, $\mathcal{C}^\thetarho_{\rm minmax} = \mathcal{C}^\thetarho$, where the latter is the HCR bound computed in \eqref{eq:gauss3param}, and by combining with strong LAN we get \begin{equation} \mathcal{C}_{\rm minmax} = \mathcal{C}^\thetarho_{\rm minmax} = \mathcal{C}^\thetarho = 3 + 2 r_0 - r_0^2. \end{equation} Moreover, since the estimator for the Gaussian model has normal distribution, this will also be true asymptotically, for the i.i.d. model $$ \sqrt{n} \left(\tilde{\bf r}_n - {\bf r}\right) \mathcal{X}rightarrow{n\to\infty} \mathcal{N}(0,\Sigma), \qquad \Sigma= {\rm diag}[1+r_0, 1+r_0,1-r_0^2]. $$ This means that the estimator $\tilde{\bf r}_n$ is not only LAM optimal, but is normally distributed around the true parameter, which equips us with asymptoticcally exact confidence regions (error bars). \subsection{Optimal estimation for i.i.d. ensembles via QLAN} In this section we go beyond qubit models and discuss a general state estimation problem for multi-copy models with finite dimensional systems. The reasoning follows the same line as in the qubits case and therefore we will not repeat all technical considerations and proofs but rather focus on the key steps and results. \subsubsection{The qudit model.} Let $\rho_0 = {\rm diag}[\mu_1, \dots, \mu_d]$ be a $d$-dimensional mixed state with $\mu_1>\dots >\mu_d>0$, and let us parametrise the states around $\rho_0$ as \begin{equation}\label{rho.theta.tilde} \rho_{\bf u} := \begin{bmatrix} \mu_1 + h_1 & \zeta_{1,2}^* & \dots & \zeta_{1,d}^* \\ \zeta_{1,2} & \mu_2 + h_2 & \ddots& \vdots \\ \vdots & \ddots & \ddots & \zeta_{d-1,d}^* \\ \zeta_{1,d} & \dots & \zeta_{d-1,d} &\mu_d - \sum_{i=1}^{d-1} h_i \end{bmatrix}, \end{equation} where ${\bf u} = ({\bf h}, \boldsymbol{\zeta})\in \mathbb{R}^{d-1}\times \mathbb{C}^{d(d-1)/2}$ represent eigenvalue changes and rotations respectively. The multi-copy local model for an ensemble of $n$ qudits has a joint state $$ \rho_{\bf u}^n = \rho_{{\bf u} /\sqrt{n}}^{\otimesimes n}. $$ \subsubsection{Gaussian shift model.} The limiting Gaussian model can be identified by applying the same CLT arguments of the qubits case. This shows that it is a product of independent classical and quantum Gaussian shifts \begin{equation} \thetarho_{\bf u}:= q_{\bf u} \otimesimes p_{\bf u}, \end{equation} where the classical component is the $(d-1)$-dimensional Gaussian \begin{equation} \label{eq:QLANgaussian} p_{\bf u} = \mathcal{N}({\bf h} , V_c(\boldsymbol{\mu})), \quad V_c(\boldsymbol{\mu})_{ij} := \delta_{ij} \mu_{i} -\mu_{i}\mu_{j}. \end{equation} and is the limit of the classical multinomial model obtained by restricting the attention to diagonal states. The quantum component is a tensor product \begin{equation} \label{eq:quantumQLAN} q_{\bf u} = \bigotimes_{i<j} \thetarho \left(\sqrt{\frac{2}{\mu_i-\mu_j}} \zeta_{ij}, \, \frac{\mu_i+\mu_j}{2(\mu_i-\mu_j)}\right) \end{equation} of `displaced thermal states', each carrying information about one of the off-diagonal elements $\zeta_{ij}$ and implicitly about rotations with respect to the standard basis---here we used a short-hand notation where $\thetarho(\zeta, v)$ denotes a single mode Gaussian state with mean $\zeta$ and covariance matrix $v \mathcal{I}$. \subsubsection{QLAN for finite dimensional states.} We can now state the general QLAN result for finite dimensional i.i.d. quantum models. It is important to note that the convergence to the Gaussian model holds under the assumption that $\rho_0$ is \emph{fully mixed}, and is generally not valid for rank-deficient states which lie on the boundary of the parameter space. Although QLAN results can be proved for restricted models around such states (e.g. pure state models), the general asymptotic analysis for boundary states needs to be dealt with separately (see \cite{AcharyaGuta} for the qubits minimax theory) and will not be discussed here. The construction is similar in spirit to that of \eqref{th.lan.2}, but since we deal here with $d$-dimensional systems we use Weyl's Theorem \cite{Fulton1991} to write the i.i.d. state as a block diagonal matrix with blocks indexed by Young diagrams $\boldsymbol{\lambda}$ with $d$ rows and $n$ boxes \begin{align*} \left(\mathbb{C}^d \right)^{\otimesimes n} &= \bigoplus_{\boldsymbol{\lambda}} \mathbb{C}^{d_{\boldsymbol{\lambda}} } \otimesimes \mathbb{C}^{m_{\boldsymbol{\lambda},n}},\\ \rho^{n}_{\bf u} &= \bigoplus_{\boldsymbol{\lambda}} p^{n, \boldsymbol{\lambda}}_{\bf u} \rho^{n, \boldsymbol{\lambda}}_{\bf u} \otimesimes \frac{\mathbb{1} }{m_{\boldsymbol{\lambda},n}} , \end{align*} where $d_{\boldsymbol{\lambda}}$ and $m_{\boldsymbol{\lambda},n}$ are the dimensions of the irreducible representations of $\t{SU}(d)$ and $S(n)$. The classical part is a probability distribution $p^{n, \boldsymbol{\lambda}}_{\bf u} $ over Young diagrams which concentrates on `typical' diagrams with rows lengths $\lambda_i \approx n \mu_i$, and it converges to the Gaussian distribution $p_{\bf u}$ \eqref{eq:QLANgaussian} after rescaling, similarly to \eqref{eq.zn}. The conditional quantum state $\rho^{n, \boldsymbol{\lambda}}_{\bf u}$ can be mapped isometrically into the multimode Fock space $\mathcal{F}^{\otimesimes d(d-1)/2}$ so that it approximates the Gaussian state $q_{\bf u}$ defined in \eqref{eq:quantumQLAN}. Unlike the qubit case, there isn't a natural orthonormal basis that can be used to define the isometry, but rather an `approximate' one. Indeed, the irreducible representation $\mathbb{C}^{d_{\boldsymbol{\lambda}}}$ carries a natural basis $| \boldsymbol{\lambda}, {\bf m} \rangle$ indexed by (semistandard) Young tableaux $t_{\bf m}$. These are Young diagrams whose boxes have labels in $\{1,\dots, d\}$ such that each row is increasing from left to right and each column is strictly increasing from top to bottom. A tableau is completely determined by the multiplicities ${\bf m}= \{m_{i,j} : 1\leq i<j\leq d \}$ where $m_{i,j}$ is the number of $j$s in the $i$-th row, for instance $$ t_{\bf m}= \young(1111111122333,222223,333) ~, \qquad {\rm with}~ m_{1,2}=2, m_{1,3}=3,m_{2,3}=1. $$ The basis $| \boldsymbol{\lambda}, {\bf m} \rangle$ is not orthogonal (except when $d=2$), but for large $n$ the states $\rho^{n, \boldsymbol{\lambda}}_{\bf u}$ concentrate on `low excitation' basis vectors ($|{\bf m}|\ll n$), which are approximately orthogonal and can be mapped approximately into Fock basis vectors $|{\bf m}\rangle := \otimesimes_{i<j} |m_{ij}\rangle$. This leads to the general form of QLAN \cite{KahnGuta} which states that there exist quantum channels \begin{eqnarray} T_n &:& M(\mathbb{C} ^{d})^{\otimesimes n} \to L^{1}(\mathbb{R}^{d-1}) \otimesimes \mathcal{T}_{1}(\mathcal{F}^{\otimesimes d(d-1)/2}),\\ S_n &:& L^{1}(\mathbb{R}^{d-1}) \otimesimes \mathcal{T}_{1}(\mathcal{F}^{\otimesimes d(d-1)/2}) \to M(\mathbb{C} ^{d})^{\otimesimes n} \end{eqnarray} such that \begin{align} \label{th.main.lan} \sup_{ \| {\bf u}\|\in \Theta^{(\beta, \gamma)}_n } \left\lVert \thetarho_{\bf u} - T_n(\rho^{n}_{\bf u}) \right\rVert_1 = O (n^{- \epsilon } ) , \\ \nonumber \label{Sn} \sup_{ \| {\bf u}\|\in \Theta^{(\beta, \gamma)}_n } \left\lVert S_n(\thetarho_{\bf u}) - \rho^{n}_{\bf u} \right\rVert_1 = O (n^{- \epsilon } ). \end{align} for some constant $\epsilon = \epsilon(\beta, \gamma, \boldsymbol{\mu}) > 0$, and where local parameters are restricted to the `slowly growing' balls $\Theta^{(\beta, \gamma)}_n :=\left\{ {\bf u} = ({\bf h}, \boldsymbol{\zeta})\,: \, \| {\bf h}\| \leq n^\gamma, \| \boldsymbol{\zeta} \| \leq n^\beta \right\}$ with technical restrictions $\beta < 1 / 9$ and $\gamma < 1/4$. \subsubsection{Local asymptotic minimax estimation.} The QLAN theorem \eqref{th.main.lan} can be used to construct LAM estimators in a two step procedure, as detailed in the qubit case. A subsample $\tilde{n}= n^{1-\epsilon}$ is used to localise the state within a local neighbourhood of the type $\Theta_n^{(\beta, \gamma)}$. After an appropriate unitary rotations, the remaining qudits are mapped via the channel $T_n$ into a classical-quantum state which is close to $\thetarho_{\bf u}$. In general, the optimal measurement depends on the chosen cost function. Let us consider the following cost function \begin{equation} \mathcal{C}(\mathbf{u}, \tilde{\mathbf{u}}) = (\mathbf{h} - \tilde{\mathbf{h}})^T C_c (\mathbf{h} - \tilde{\mathbf{h}}) + \sum_{i<j} C_q^{ij} |{\zeta}_{ij} - \tilde{\zeta}_{ij}|^2, \end{equation} where $C_c \geq 0$ and $C_q^{ij}\geq 0$ for all $i<j$. This is a general quadratic cost function, where we have separated the contribution to the cost coming from the eigenvalue changes (classical) and rotations (quantum) and for simplicity we have assumed all potential cross-terms are zero. In this case, the optimal measurement is the heterodyne for each Gaussian mode, which provides (asymptotically) independent unbiased estimator of the off-diagonal parameters $\tilde{\zeta}^n_{ij}\sim \mathcal{N}(\zeta_{ij}, \mu_i/2 )$. On the other hand, the classical component is an (asymptotically) unbiased estimator of the diagonal parameters $\tilde{\bf h}^n\sim \mathcal{N}({\bf h}, V_c(\boldsymbol{\mu}))$. Therefore, the LAM risk reads explicitly \begin{equation} \mathcal{C}_{\rm minmax} = \sup_{c>0}\, \underset{n\to\infty}{\lim\sup} \inf_{\tilde{\mathbf{u}}^n} \, n \sup_{\|{\bf u} \|\leq c} \mathcal{C}(\tilde{\mathbf{u}}^n , \mathbf{u}) = {\rm Tr} (V_c C_c) + \sum_{i<j} C_q^{ij} \mu_i. \end{equation} Let us look at two often encountered examples of cost function. If we consider the Frobenius (norm-two square) distance in the space of density matrices $\rho_{\mathbf{u}}$ this results in the corresponding cost in the parameter space: \begin{equation} \mathcal{C}^F(\mathbf{u}, \tilde{\mathbf{u}}) = d_F(\rho_{\mathbf{u}},\rho_{\tilde{\mathbf{u}}}) = \|\rho_{\bf u}- \rho_{\tilde{\bf u}}\|_2^2 = (\mathbf{h}-\tilde{\mathbf{h}})^T(\mathbf{h}-\tilde{\mathbf{h}}) +2 \sum_{i<j} |\zeta_{ij}- \tilde{\zeta}_{ij}|^2, \end{equation} which gives \begin{equation} \mathcal{C}^{F}_{\rm minmax} =\sum_{i=1}^d \mu_i (1-\mu_i) + 2 \sum_{i=1}^d (d-i) \mu_i \leq 2d-1. \end{equation} In contrast to this, the optimal Frobenius cost for \emph{separate} measurements scales as $d^2$ \cite{HaahHarrow} which shows that collective measurements outperform separate measurements by a factor $d$. If instead of the Frobenius distance we take the Bures distance \cite{Bures, Bengtsson2006} the corresponding cost function is \begin{multline} \mathcal{C}^B(\mathbf{u}, \tilde{\mathbf{u}}) = d_B (\rho_{\bf u}, \rho_{\tilde{{\bf u}}}) = 2 -2 {\rm Tr} \left( \sqrt{\rho_{\bf u} \sqrt{\rho_{\tilde{{\bf u}}}} \rho_{\bf u}} \right) \overset{ o(\|{\bf u} - \tilde{{\bf u}}\|^2) }{=} \\ ({\bf u} - \tilde{{\bf u}})^T F(\boldsymbol{\mu}) ({\bf u} - \tilde{{\bf u}}) = ({\bf h} - \tilde{{\bf h}})^T F_{c} (\boldsymbol{\mu}) ({\bf h} - \tilde{{\bf h}}) + \sum_{i<j} (\boldsymbol{\zeta} - \tilde{\boldsymbol{\zeta}})^T F_q (\boldsymbol{\mu})(\boldsymbol{\zeta} - \tilde{\boldsymbol{\zeta}}), \end{multline} which corresponds to the QFI and has a block diagonal form with respect to the classical set of parameters and each pair $({\rm Re} \zeta_{ij}, {\rm Im} \zeta_{ij})$ of quantum parameters \begin{equation} F_c = V_c(\boldsymbol{\mu}), \qquad F^{ij}_q(\boldsymbol{\mu}) = \frac{4}{\mu_i+\mu_j} \mathcal{I}. \end{equation} The LAM Bures cost is then \begin{equation} \mathcal{C}^{B}_{\rm minmax} =(d-1) + 4 \sum_{i<j} \frac{\mu_i}{\mu_i+\mu_j}. \end{equation} It follows immediately that $ d^2-1\leq \mathcal{C}^{B}_{\rm minmax} \leq (d-1)(2d+1)$. The inequality confirms the expectation that the cost is larger than the quantum SLD CR lower bound, which is achieved only for the completely incoherent state $\rho_0 = \mathbb{1}/d$. Strictly speaking however, the QLAN results as stated is not valid at this state since all eigenvalues are equal. One can check that at this point the limit model is \emph{completely classical} which explains why the Cram\'{e}r-Rao bound is achievable for this state. On the other hand, the upper bound is approached in the limit of almost pure states with $1\approx \mu_1\gg \mu_2\gg \dots \gg \mu_d$. \section{Bayesian approach} \label{sec:bayes} In this section we follow the Bayesian paradigm and describe methods that allow to find the optimal measurement and estimation strategies in multi-parameter metrological problems. Similarly as in the frequentist approach, a Bayesian quantum estimation model consists of a family of states $\rho_{\boldsymbol{\var}}$, but it is additionally supplemented by the prior distribution $p({\boldsymbol{\var}})$ representing prior knowledge on the set of parameters to be estimated. Given a cost function $\mathcal{C}({\boldsymbol{\var}}, \tilde{{\boldsymbol{\var}}})$, the goal is to minimize the average Bayesian cost $\overline{\mathcal{C}}$ as defined in Eq.~\eqref{eq:bayescost} over measurement $\{M_m\}$ and estimators $\tilde{{\boldsymbol{\var}}}(m)$. Note that we can formally coarse grain the measurement operators $M_m$ and relabel operators by the estimated value of parameter $M_{\tilde{{\boldsymbol{\var}}}(m)}$ with $M_{\tilde{{\boldsymbol{\var}}}} = \int \t{d}m\, M_m \delta(\tilde{{\boldsymbol{\var}}} - \tilde{{\boldsymbol{\var}}}(m))$. Thanks to this we can combine the double minimization over the measurement and the estimator to a single optimization over the measurements only: \begin{align} \min_{\{M_{\tilde{{\boldsymbol{\var}}}}\}} \overline{\mathcal{C}}, \quad M_{\tilde{{\boldsymbol{\var}}}} \geq 0, \quad \int\t{d}\tilde{{\boldsymbol{\var}}}\, M_{\tilde{{\boldsymbol{\var}}}} = \mathbb{1}, \quad \overline{\mathcal{C}} = \int \t{d} {\boldsymbol{\var}} \t{d}\tilde{{\boldsymbol{\var}}} p({\boldsymbol{\var}}) \trace(\rho_{\boldsymbol{\var}} M_{\tilde{{\boldsymbol{\var}}}}) \mathcal{C}({\boldsymbol{\var}},\tilde{{\boldsymbol{\var}}}). \end{align} Of course this in general is an untractable problem, as the space of all allowed generalized measurements is enormous. Still, as demonstrated below with some additional assumptions on the cost function or the set of states, the problem may be solved. \subsection{Single parameter case} Let us start with the simplest exactly solvable case, namely \emph{single} parameter Bayesian estimation with a quadratic cost function $\mathcal{C}(\theta, \tilde{\theta}) = (\theta-\tilde{\theta})^2$. For simplicity of the formulas that follow we redefine the parameter $\theta$ so that the expectation value of the prior distribution is zero, $\int \t{d} \theta\, p(\theta) \theta = 0$. The Bayesian variance to be minimized takes the form: \begin{multline} \label{eq:costquadratic} \overline{{\Delta_\G^2\tilde\bvar}s} = \int \t{d} \theta\, \t{d}\tilde{\theta}\, p(\theta)\trace[\rho_\theta M_{\tilde{\theta}}(\theta - \tilde{\theta})^2] = \int \t{d} \theta\, p(\theta) \theta^2 + \trace \left[\int \t{d}\theta\, p(\theta) \rho_\theta \int \t{d}\tilde{\theta} M_{\tilde{\theta}} \tilde{\theta}^2 \right] + \\ - 2 \trace\left[\int \t{d}\theta\, p(\theta) \theta \rho_\theta \int \t{d}\tilde{\theta}M_{\tilde{\theta}} \tilde{\theta} \right] = {\Delta^2\var} + \trace(\bar{\rho} \Lambda_2) - 2 \trace(\bar{\rho}^\prime \Lambda_1), \end{multline} where ${\Delta^2\var} = \int \t{d} \theta\, p(\theta) \theta^2$ represents the variance of the prior distribution, $\bar{\rho}= \int\t{d}\theta \, p(\theta)\rho_\theta$ is the average state, $\bar{\rho}^\prime = \int \t{d}\theta\, p(\theta) \theta \rho_\theta$ and $\Lambda_k = \int \t{d}\tilde{\theta}\, M_{\tilde{\theta}} \tilde{\theta}^k$. Let us first prove that if a given POVM measurement $\{M_{\tilde{\theta}}\}$ is optimal, then we may find a projective measurement yielding the same cost. Let us perform eigen-decomposition of $\Lambda_1$ operator: \begin{equation} \Lambda_1 = \int \t{d}\tilde{\theta}\, M_{\tilde{\theta}} \tilde{\theta} = \sum_i \tilde{\theta}_i \ket{\tilde{\theta}_i}\bra{\tilde{\theta}_i}. \end{equation} Consider now the following inequality: \begin{equation} \label{eq:ineqBayesbound} \int\t{d}\tilde{\theta}\,(\tilde{\theta} - \Lambda_1) M_{\tilde{\theta}} (\tilde{\theta} - \Lambda_1) \geq 0, \end{equation} which is true since $M_{\tilde{\theta}} \geq 0$ while $\Lambda_1$ is hermitian. This implies: \begin{equation} \int\t{d}\tilde{\theta}\, M_{\tilde{\theta}} \tilde{\theta^2} + \Lambda_1^2 - 2 \Lambda_1^2 \geq 0 \end{equation} and hence \begin{equation} \label{eq:l1l2} \Lambda_2 \geq \Lambda_1^2. \end{equation} Let us now replace the measurement $\{M_{\tilde{\theta}}\}$ with the projective measurement, corresponding to the projection on the eigenbasis $\ket{\tilde{\theta}_i}$ of $\Lambda_1$. For this choice $\Lambda_2 = \Lambda_1^2$, which according to \eqref{eq:l1l2} is the smallest operator possible. Inspecting \eqref{eq:costquadratic} we see that we want the term $\trace (\bar{\rho}\Lambda_2)$ to be as small as possible, and hence it is always optimal to choose the projective measurement in the eigenbasis of $\Lambda_1$. Assuming the measurement is projective, we may now introduce a single operator variable write $\bar{\Lambda} =\Lambda_1$, $ \Lambda_2 = \bar{\Lambda}^2$ and the optimization problem amounts to minimization of the following cost function over a single hermitian operator $\bar{\Lambda}$: \begin{equation} \overline{{\Delta_\G^2\tilde\bvar}s} = {\Delta^2\var} + \trace(\bar{\rho} \bar{\Lambda}^2) - 2 \trace(\bar{\rho}^\prime \bar{\Lambda}). \end{equation} Since the above formula is quadratic in matrix $\bar{\Lambda}$, the minimization can be performed explicitly and the condition for vanishing first derivative amounts to the following linear equation: \begin{equation} \label{eq:lambdabayes} \bar{\Lambda} \bar{\rho} + \bar{\rho}\bar{\Lambda} -2 \bar{\rho}^\prime = 0. \end{equation} Multiplying the above equality by $\bar{\Lambda}$ and taking the trace of both sides we get that $\trace(\bar{\rho}^\prime \bar{\Lambda}) = \trace(\bar{\rho}\bar{\Lambda}^2)$ and therefore we find that the minimal Bayesian cost reads \cite{Helstrom1976, Macieszczak2014}: \begin{equation} \label{eq:bayescostopt} \overline{{\Delta_\G^2\tilde\bvar}s} = {\Delta^2\var} - \trace\left(\bar{\rho} \bar{\Lambda}^2\right), \end{equation} where $\bar{\Lambda}$ is defined by \eqref{eq:lambdabayes}. Let us note here an interesting observation that the above formula can be related with the formula for the QFI in case of Gaussian priors, where the $\bar{\rho}^\prime$ operator is related with the derivative of the $\bar{\rho}$ operator with respect to the shift of the center of the prior \cite{Macieszczak2014}. \ Indeed, consider the prior $p_{\theta_0}(\theta)=\frac{1}{\sqrt{2\pi{\Delta^2\var}}}e^{-(\theta-\theta_0)^2/(2 {\Delta^2\var})}$ which center $\theta_0$ we treat as a parameter to estimate by performing a measurement on an effectively averaged state $\bar{\rho}_{\theta_0}= \int\t{d}\theta \, p_{\theta_0}(\theta)\rho_\theta$. We can now regard $\bar{\rho}_{\theta_0}$ as a family of states as considered in the frequentist estimation approach. Notice the following mathematical identity: \begin{equation} \label{deriv} \bar{\rho}^\prime=\int \t{d}\theta\, p_{\theta_0=0}(\theta)\theta\rho_\theta=\int \t{d}\theta\, {\Delta^2\var}\frac{\t{d}p_{\theta_0}(\theta)}{\t{d}\theta_0}\Big|_{\theta_0=0}\rho_\theta={\Delta^2\var} \frac{\t{d}}{\t{d}\theta_0}\int \t{d}\theta\, p_{\theta_0}(\theta)\rho_\theta\Big|_{\theta_0=0}={\Delta^2\var}\frac{\t{d}\bar{\rho}_{\theta_0}}{\t{d}\theta_0}\Big|_{\theta_0=0} \end{equation} and therefore from \eqref{eq:lambdabayes} \begin{equation} \bar{\rho}^\prime = \frac{1}{2}(\bar\rho \Lambda+\bar\Lambda\bar\rho)={\Delta^2\var}\left.\frac{\t{d}\bar\rho_{\theta_0}}{\t{d}\theta_0}\right|_{\theta_0=0}. \end{equation} It means that $\bar\Lambda$ is proportional to SLD in the freuquentist estimation problem of estimating $\theta_0$ on states $\bar{\rho}_{\theta_0}$: $\bar\Lambda={\Delta^2\var}\cdot L$. Therefore, the mean Bayesian cost may be written as: \begin{equation} \label{eq:bayescostopt} \overline{{\Delta_\G^2\tilde\bvar}s} = {\Delta^2\var}\left[ 1 -{\Delta^2\var} \left(F_Q(\bar\rho_{\theta_0})\right)\right], \end{equation} where $F_Q(\bar\rho_{\theta_0})$ is the QFI for the $\theta_0$ estimation problem for the family of states $\bar{\rho}_{\theta_0}$. \subsection{Multi-parameter case} \label{sec:bayesquadratic} Let us now turn to a multi-parameter scenario and see whether the reasoning from the previous subsection can be generalized to work in this case. For a given choice of the cost matrix $C$ the average Bayesian cost reads: \begin{equation} \label{eq:bayescostmulti} \overline{{\Delta_\G^2\tilde\bvar}s} = \int \t{d} {\boldsymbol{\var}}\, \t{d} \tilde{{\boldsymbol{\var}}}\, p({\boldsymbol{\var}}) \trace\left[\rho_{{\boldsymbol{\var}}} M_{\tilde{{\boldsymbol{\var}}}} ({\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}})^T C ({\boldsymbol{\var}} - \tilde{{\boldsymbol{\var}}}) \right] = \Delta^2_{C} {\boldsymbol{\var}} + \trace\left(\bar{\rho} \Lambda_2 \right) - 2 \trace \left({\boldsymbol{\bar{\rho}}}^{\prime T} C \boldsymbol{\Lambda_{1}} \right), \end{equation} where analogously as in the single parameter case, $\Delta^2_{C} {\boldsymbol{\var}} = \int \t{d}{\boldsymbol{\var}} \, p({\boldsymbol{\var}}) {\boldsymbol{\var}}^T C {\boldsymbol{\var}}$ represents the cost corresponding to the prior distribution, $\bar{\rho} = \int \t{d}{\boldsymbol{\var}} \, p({\boldsymbol{\var}}) \rho_{{\boldsymbol{\var}}}$ is the average state, $\Lambda_2 = \int \t{d} \tilde{{\boldsymbol{\var}}}\, M_{\tilde{{\boldsymbol{\var}}}} \tilde{{\boldsymbol{\var}}}^T C \tilde{{\boldsymbol{\var}}}$, ${\boldsymbol{\bar{\rho}}^\prime} =\int \t{d}{\boldsymbol{\var}} \, p({\boldsymbol{\var}}) {\boldsymbol{\var}}\rho_{{\boldsymbol{\var}}} $ while $\boldsymbol{\Lambda_1} =\int \t{d}\tilde{{\boldsymbol{\var}}} \, M_{\tilde{{\boldsymbol{\var}}}} \tilde{{\boldsymbol{\var}}} $---note that $\boldsymbol{\bar{\rho}^\prime}$, $\boldsymbol{\Lambda_1}$ are now operator vectors. Consider now an inequality which is a multi-parameter generalization of \eqref{eq:ineqBayesbound}: \begin{equation} \int\t{d}\tilde{\theta}\, M_{\tilde{\theta}} (\tilde{{\boldsymbol{\var}}} - \boldsymbol{\Lambda_1})^T C (\tilde{{\boldsymbol{\var}}} - \boldsymbol{\Lambda_1}) \geq 0. \end{equation} From this it follows that: \begin{equation} \label{eq:ineql2l1} \Lambda_2 \geq \boldsymbol{\Lambda_1}^T C \boldsymbol{\Lambda_1}. \end{equation} Replacing $\Lambda_2$ in Eq.~\eqref{eq:bayescostmulti} with the r.h.s. of the above inequality we get \begin{equation} \overline{{\Delta_\G^2\tilde\bvar}s} = \Delta^2_{C} {\boldsymbol{\var}} + \tracep\left(C\left(\trace(\bar{\rho} \boldsymbol{\Lambda_1}\boldsymbol{\Lambda_1}^T)-2\trace( \boldsymbol{\Lambda_1}\boldsymbol{\bar{\rho}_{\boldsymbol{\var}}^\prime}^T) \right)\right) \end{equation} We can now perform minimization over $\boldsymbol{\Lambda_1}$ (treating formally different vector components of $\boldsymbol{\Lambda_1}$ as independent operators) and obtain effectively a lower bound on the cost in the form: \begin{equation} \overline{{\Delta_\G^2\tilde\bvar}s} \geq \Delta^2_{C} {\boldsymbol{\var}} - \tracep\left(C\trace\left(\bar{\rho} \boldsymbol{\bar{\Lambda}}\boldsymbol{\bar{\Lambda}^T}\right)\right), \end{equation} where $\bar{\boldsymbol{\Lambda}}$ is the solution of the following equation: \begin{equation} \boldsymbol{\bar{\Lambda}} \bar{\rho} + \bar{\rho}\boldsymbol{\bar{\Lambda}} = 2 \boldsymbol{\bar{\rho}^\prime}. \end{equation} Unfortunately, unlike in the single parameter case this bound is not always saturable. This is due to the fact that there is in general no single eigenbasis that would diagonalize all operators forming the optimal vector $\boldsymbol{\bar{\Lambda}}$. Had there been such an eigenbasis, we could write $\boldsymbol{\bar{\Lambda}} = \sum_i \tilde{{\boldsymbol{\var}}}_{i} \ket{\tilde{{\boldsymbol{\var}}}_i}\bra{\tilde{{\boldsymbol{\var}}}_i}$, where $\ket{\tilde{{\boldsymbol{\var}}}_i}$ form an orthonormal eigenbasis, and $\tilde{{\boldsymbol{\var}}}_i$ vectors of eigenvalues. In this case inequality \eqref{eq:ineql2l1} would be saturated and hence the bound would be saturated provided the measurement is performed in this eigebasis and the estimated values correspond to $\tilde{{\boldsymbol{\var}}}_i$. Analogously to the one-parameter case, in case of multiparmeter Gaussian prior the above formula may be related with the QFI matrix of the corresponding frequentist estimation problem of estimating the mean ${\boldsymbol{\var}}_0$ of the prior $p_{{\boldsymbol{\var}}_0}({\boldsymbol{\var}})$. Consider \begin{equation} p_{{\boldsymbol{\var}}_0}({\boldsymbol{\var}})=\frac{1}{\sqrt{2\pi}^{\mathfrak{p}}\sqrt{\det V}}e^{-\tfrac{1}{2}({\boldsymbol{\var}}-{\boldsymbol{\var}}_0)^T V^{-1} ({\boldsymbol{\var}}-{\boldsymbol{\var}}_0)}, \end{equation} where $V$ is the positive symmetric covariance matrix of the prior---in this case the prior cost reads simply: $\Delta^2_{C} {\boldsymbol{\var}} = \tracep(C V)$. Repeating the reasoning from \eqref{deriv} we have $\boldsymbol{\bar\rho^{\prime}}\big|_{{\boldsymbol{\var}}_0=0}=V\boldsymbol\nabla \bar\rho_{{\boldsymbol{\var}}_0}|_{{\boldsymbol{\var}}_0=0}$ (where $\boldsymbol{\nabla}$ here denotes gradient operator with respect to ${\boldsymbol{\var}}_0$), and from that $\boldsymbol{\bar{\Lambda}}=V\cdot\bold{L}$. Keeping in mind that $F_Q(\bar\rho_{{\boldsymbol{\var}}_0})=\trace(\bar\rho\bold{L}\bold{L}^T)$ we finally get: \begin{equation} \overline{{\Delta_\G^2\tilde\bvar}s} \geq \tracep(C V) - \tracep\left(C V F_Q(\bar\rho_{{\boldsymbol{\var}}_0}) V \right). \end{equation} An analogous result was derived in a slightly different way in \cite{Sidhu2019, Rubio2019}. It is appealing as it yields a simple bound on multi-parameter Bayesian estimation cost utilizing the QFI matrix. One needs to keep in mind, however, that due to the use of the QFI, this bound will not properly address the potential optimal measurement incompatibility issue which may affect its tightness. \subsection{Covariant estimation} \label{sec:bayescovariant} The simplicity of the quadratic cost function made it possible to solve a general single-parameter Bayesian estimation problem, while in the multi-parameter case allowed to derive non-trivial bounds. There is no universal way, however, to find an exact solution in case of a general multi-parameter Bayesian estimation problem in this way. Still, provided the problem enjoys certain symmetry one may utilize the powerful method of covariant measurements in order to find the solution. We will start by defining the class of covariant problems. A covariant estimation problem involves two group actions which act in a covariant fashion. On the hand, the parameter space $\Theta$ carries the action of a Lie group $G$: for each ${\boldsymbol{\var}}\in \Theta$, the action of $g\in G$ is denoted ${\boldsymbol{\var}}\mapsto g{\boldsymbol{\var}}$. On the other hand, the Hilbert space of the quantum statistical model $\{\rho_{\boldsymbol{\var}}\}_{{\boldsymbol{\var}}\in \Theta}$ carries a unitary representation $U_g$ of $G$. We say that the estimation problem is covariant with respect to the two actions if and only if the following conditions are satisfied: \begin{enumerate} \item{The parameter to be estimated is an element of the group $g \in G$.} \item{The family of states is covariant with respect to the group representation $$ \rho_{g{\boldsymbol{\var}}} = U_g \rho_{\boldsymbol{\var}} U_g^\dagger $$} \item{The cost function is left invariant with respect to the action of the group: $ \mathcal{C}(g {\boldsymbol{\var}}_1, g {\boldsymbol{\var}}_2)= \mathcal{C}({\boldsymbol{\var}}_1,{\boldsymbol{\var}}_2)$ for all $g\in G$. } \item{The prior distribution is invariant with respect to the group action: $ p(gd{\boldsymbol{\var}}) = p(d{\boldsymbol{\var}})$--- This represent in a formal sense the maximal prior ignorance about the parameter.} \end{enumerate} For a covariant estimation problems the Bayesian cost is given by: \begin{equation} \overline{\mathcal{C}} = \int \t{d}g\, \t{d}\tilde{g} \, \trace(U_g \rho_e U_g^\dagger \, M_{\tilde{g}}) \mathcal{C}(g,\tilde{g}), \end{equation} where we assume that $\t{d}g$ is the normalized Haar measure on the group, $\int \t{d} g =1$, with respect to which the prior is trivial $p(g)=1$. Thanks to the covariance property, it can be proven that the one can restrict the search for optimal measurements to the class of covariant measurements \cite{Holevo1982}. A measurement $M_{\tilde{g}}$ is said to be covariant with respect to the action of the group representation if and only if \begin{equation} U_h M_{\tilde{g}}U_h^\dagger = M_{h \tilde{g}}, \qquad {\rm for~all~}\qquad \tilde{g}, h. \end{equation} In particular for a covariant measurement \begin{equation} M_{\tilde{g}} = U_{\tilde{g}} M_e U_{\tilde{g}}^\dagger, \end{equation} so that all measurement operators are determined by a single \emph{seed} operator $M_e$. Note that thanks to the covariance property of the measurement and the group invariance of the Haar measure we have: \begin{multline} \label{eq:covproblem} \overline{\mathcal{C}} = \int \t{d}g\, \t{d}\tilde{g}\, \trace\left(M_{\tilde{g}} \rho_g \right) \mathcal{C}(g,\tilde{g}) = \int \t{d}g\, \t{d}\tilde{g}\, \trace\left(U^\dagger_{\tilde{g}^{-1} g} M_e U_{\tilde{g}^{-1}g} \rho_e \right) \mathcal{C}(g,\tilde{g}) = \\ \overset{g\rightarrow \tilde{g} g}{=} \int \t{d}g\, \t{d}\tilde{g}\, \trace\left(U^\dagger_g M_e U_g \rho_e \right) \mathcal{C}(g,e) = \int \t{d}g\, \trace\left(M_e \rho_g \right) \mathcal{C}(g,e). \end{multline} As such, the whole problem now amounts to minimization of the above quantity over a \emph{single} operator $M_e$ with constraints $M_e\geq 0 $, $\int \t{d}g\, U_g M_e U_g^\dagger = \mathbb{1}$. This is a huge simplification of the original problem and often the optimal operator $M_e$ may be found analytically, as is demonstrated below. \subsection{Qubit models} \label{sec:bayesianqubit} \subsubsection{Pure qubit case.} First, we consider an estimation model in which we are given $n$ copies of a completely unknowns qubit state. Using the standard Bloch sphere parametrization, we write the state as \begin{equation} \rho_{\Omega}^{n} = \ket{\psi_\Omega}\bra{\psi_\Omega}^{\otimesimes n}, \ \ket{\psi_\Omega} = \cos(\theta/2)\ket{0} + e^{i\thetaphi }\sin(\theta/2) \ket{1}, \end{equation} where for compactness of notation we have introduced $\Omega = (\theta,\thetaphi)$. As a cost function we choose: \begin{equation} \mathcal{C}(\psi,\tilde{\psi}) = 4(1-|\braket{\psi}{\tilde{\psi}}|^2) \overset{\tilde{\psi} \approx \psi+ \t{d}\psi}{=} \t{d}\theta^2 + \sin^2\theta \t{d}\thetaphi^2, \end{equation} which in the first order approximation reduces to the standard metric on the sphere---a useful property that will let us relate the asymptotic Bayesian cost with the costs obtained within the frequentist approach. In order to think of this problem as a covariant estimation problem we may view $\ket{\psi_\Omega}$ as obtained by a rotation a fixed state $\ket{0}$ using the defining representation of the SU(2) group. More precisely, since the initial state $\ket{0}$ will not change under rotations around the $z$ axis, the parameter set corresponds to the $\t{SU}(2)/\t{U}(1)$ (i.e. the set of left cosets of $\t{U}(1)$ in $\t{SU}(2)$). However, $\t{SU}(2)/\t{U}(1)$ is not a group itself. Therefore, to use \eqref{eq:covproblem} directly, we still need to refer to the full $\t{SU}(2)$ group. Consider SU(2) parameterizations using Euler's angles $(\uparrowpsi,\theta,\thetaphi)$: \begin{equation} \ket{\psi_{(\theta,\thetaphi)}}=U_{(\uparrowpsi,\theta,\thetaphi)}\ket{0},\quad U_{(\uparrowpsi,\theta,\thetaphi)} = e^{i \thetaphi \sigma_z/2}e^{i \theta \sigma_y/2} e^{i \uparrowpsi \sigma_z/2},\quad \thetaphi\in [0,2\pi[,\theta\in]0,\pi[, \uparrowpsi\in[0,4\pi[, \end{equation} with the corresponding Haar measure: \begin{equation} \t{d}g=\frac{\t{d}\uparrowpsi}{4\pi} \frac{\sin(\theta)\t{d}\theta}{2}\frac{\t{d}\thetaphi}{2\pi}. \end{equation} Using \eqref{eq:covproblem} (and after performing a trivial integration over $\uparrowpsi$) we have \begin{equation} \overline{\mathcal{C}}^{n} = \int \t{d}\Omega \, \trace\left(M_e \ket{\psi_\Omega}\bra{\psi_\Omega}^{\otimesimes n} \right)4(1-|\braket{\psi_\Omega}{0}|^2), \end{equation} where \begin{equation} \t{d}\Omega = \frac{1}{4\pi}\t{d} \theta \t{d}\thetaphi \sin\theta \end{equation} is the measure on $\t{SU}(2)/\t{U}(1)$, induced by the Haar measure on $\t{SU}(2)$. In this way we have now formulated the problem as a covariant estimation problem, which can be solved explicitly \cite{Massar1995}. We need to minimize $\overline{\mathcal{C}}^{n}$ over $M_e$, keeping in mind $M_e \geq 0$, $\int\t{d}\Omega \, U_\Omega^{\otimesimes n} M_e U_{\Omega}^{\dagger \otimesimes n } = \mathbb{1}$ (we assume without loss that $M_e$ is invariant for rotation around the $z$ axis). We can rewrite the expression for the cost as \begin{equation} \overline{\mathcal{C}}^{n} = 4\left[ 1 - \int\t{d}\Omega \,\trace\left(M_e \otimesimes \ket{0}\bra{0} \cdot \ket{\psi_\Omega}\bra{\psi_\Omega}^{\otimesimes n+1} \right) \right]. \end{equation} We may then take the integration over $\t{d} \Omega$ under the trace and make use of the following property \begin{equation} \int \t{d} \Omega \ket{\psi_\Omega}\bra{\psi_\Omega}^{\otimesimes n+1} = \frac{1}{n+2} \mathbb{1}_{\mathcal{H}_S^{\otimesimes n+1}}, \end{equation} where $\mathcal{H}_S^{\otimesimes n+1}$ is the fully symmetric subspace of $n+1$ qubits---this fact follows from the Schur Lemma and the irreduciblity of the SU(2) representation that acts on the fully symmetric space of qubits. Therefore the optimal $M_e$ is the one that maximizes $\trace\left(M_e \otimesimes \ket{0}\bra{0} \, \mathbb{1}_{\mathcal{H}_S^{\otimesimes n+1}} \right)$. We may restrict the $M_e$ operator to act solely on the symmetric subspace $\mathcal{H}_S^{\otimesimes n}$ as this is the subspace where states $\ket{\psi_\Omega}^{\otimesimes n}$ live. Let us denote $U_g^{j=n/2}$ to be the irreducible representation of SU(2) acting on this subspace. Taking into account the completeness condition for $M_e$: \begin{equation} \int \t{d} \Omega U_\Omega^{j=n/2} M_e U_\Omega^{j=n/2 \dagger} = \mathbb{1}_{\mathcal{H}_S^{\otimesimes n}} \end{equation} we see that $\traceM_e = n+1$. It is clear that in order to have the largest overlap between $M_e \otimesimes \ket{0}\bra{0}$ and $\mathbb{1}_{\mathcal{H}_S^{\otimesimes n+1}}$, we would like to have $M_e \otimesimes \ket{0}\bra{0}$ operator fully supported on $\mathcal{H}_S^{\otimesimes n+1}$. This will be so provided we choose \begin{equation} M_e = \ket{0}\bra{0}^{\otimesimes n} (n+1). \end{equation} As a result we get \begin{equation} \bar{\mathcal{C}}^{n} = 4\left(1-\frac{n+1}{n+2} \right) \overset{n \rightarrow \infty}{\approx} \frac{4}{n} \end{equation} for which the asymptotic form indeed agrees with the saturable HCR bound derived in Sec.~\ref{sec:examplequbit1}. This proves asymptotic consistency between the Bayesian and the frequentist approaches. \subsubsection{Mixed qubit case.} Consider now a mixed qubit state estimation problem, as in Sec.~\ref{sec:examplequbit3}, with the $n$-copy state \begin{equation} \rho_{\mathbf{r}}^n = \rho_{\mathbf{r}}^{\otimesimes n}, \quad \rho_\mathbf{r} = \frac{1}{2}\left(\mathbb{1} + \boldsymbol{\sigma} \cdot \mathbf{r} \right). \end{equation} Here, we assume that the prior distribution of the $\mathbf{r}$ parameter when written using spherical coordinates takes the form: \begin{equation} p(\mathbf{r}) d\mathbf{r} = w(r)\t{d}r \t{d} \Omega, \end{equation} where $w(r)$ is an arbitrary nonnegative function of the Bloch vector length for $0 \leq r \leq 1$. This prior is invariant with respect to Bloch ball rotations, and hence the problem will be covariant with respect to $\theta,\thetaphi$ estimation, but not with respect to the $r$ parameter for which there is no corresponding group structure. Still, this partial group covariance leads to a significant simplification in obtaining the final solution \cite{Vidall1999, Bagan2006a}. Let us choose the cost function to be: \begin{equation} \mathcal{C}(\mathbf{r},\tilde{\mathbf{r}}) = 4\left(1- \trace(\sqrt{\sqrt{\rho_\mathbf{r}}\rho_{\tilde{\mathbf{r}}} \sqrt{\rho_\mathbf{r}}})^2 \right) \overset{\tilde{\mathbf{r}} = \mathbf{r} + \t{d} \tilde{\mathbf{r}}}{=} \frac{\t{d}r^2}{1-r^2} + r^2(\t{d} \theta^2 + \sin^2\theta \t{d}\thetaphi^2), \end{equation} which is the most natural choice, as it is directly related with quantum state fidelity \cite{Uhlmann1976, Jozsa1994, Bengtsson2006}. Furthermore, for neighbouring states it reduces to the quadratic cost function equivalent to the Bures metric---this implies that in order to make a meaningful asymptotic comparison with the results obtained within the frequentist approach, we should set the cost matrix to be $C_{\t{Bures}} = \t{diag}[\tfrac{1}{1-r^2}, r^2, r^2 \sin^2\theta]$ in the frequentist formulas. As discussed in detail in Sec.~\ref{sec:QLANstrong} the $\rho_\mathbf{r}^n$ state may be decomposed in terms of irreducible SU(2) and $S_n$ subspaces as \begin{equation} \rho_{\bf r}^n = \bigoplus_{j\in \mathcal{J}_n} p_{\bf r}^{n,j} \rho_{\bf r}^{n,j} \otimesimes \frac{\mathbb{1}_{m_j}}{m_j}. \end{equation} It can be shown \cite{Bagan2006a} that the above structure of the state together with the partial covariant nature of the problem implies that the optimal measurement is a measurement covariant with respect to the action of the SU(2): \begin{equation} M_{j,\tilde{\Omega}} = (2j+1) U_{\tilde{\Omega}}^j\ket{j,j}\bra{j,j}U_{\tilde{\Omega}}^{j\dagger} \otimesimes \mathbb{1}_{m_j}, \quad\bigoplus_{j \in \mathcal{J}_n} \int\t{d} \tilde{\Omega} M_{j,\tilde{\Omega}} = \mathbb{1}. \end{equation} This measurement may be understood as acting trivially on multiplicity space, as it carries no information on the state, yields information on the total angular momentum $j$ and, moreover, within each irreducible subspace corresponding to a given $j$ performs a covariant measurement obtained by rotating a state with the maximum angular momentum projection on the $z$ axis $\ket{j,j}$. The Bloch vector direction estimate is $\tilde{\Omega}$, while all the information on the Bloch vector length comes from $j$ and the explicit optimal estimator of $\tilde{r}$ reads \cite{Bagan2006a} \begin{equation} \tilde{r}(j) = \frac{|v_j^z|}{\sqrt{{v_j^{0}}^2 + v_j^{z2}}} \overset{n \rightarrow \infty}{\approx} \frac{j}{n/2}, \end{equation} where \begin{align} v_j^0&= \int_0^1 \t{d}r\, w(r) \sqrt{1-r^2} \left(\frac{1-r^2}{4}\right)^{\frac{n}{2}}\sum_{m=-j}^j \left(\frac{1+r}{1-r}\right)^m, \\ \nonumber v_j^z&= \int_0^1 \t{d}r\, \frac{w(r) r}{j+1} \left(\frac{1-r^2}{4}\right)^{\frac{n}{2}} \sum_{m=-j}^j m \left(\frac{1+r}{1-r}\right)^m. \end{align} The resulting optimal cost reads \begin{equation} \label{eq:examplequbit3bayes} \bar{\mathcal{C}}^n = 2\left(1 - \sum_{j=0(\frac{1}{2})}^{\frac{n}{2}} m_j \sqrt{{v_j^{0}}^2 + v_j^{z2}}\right) \overset{n \rightarrow \infty}{\approx} \int_0^1\t{d}r\, w(r)\frac{3 + 2r}{n}. \end{equation} Note that the asymptotic formula is in agreement with the HCR bound for the corresponding frequentist model, see Sec.~\ref{sec:examplequbit3}---if in Eq.~\eqref{eq:hcrqubit3} we choose $c(r)=1/(1-r^2)$ then the cost matrix becomes the Bures cost matrix $C=C_{\t{Bures}}$ and we get $\mathcal{C}^n = (3 + 2r)/n$. If we now perform the averaging of the HCR bound over $w(r)$ we indeed obtain \eqref{eq:examplequbit3bayes} (up to the $1/n$ rescaling due to the number of copies). Similar analysis can been performed for the case of $(r,\thetaphi)$ estimation \cite{Bagan2006a}, with the natural prior $w(r) \frac{\t{d}\thetaphi}{2\pi}$ guaranteeing $U(1)$ covariance of the problem. Without going into details, we just mention that in this case the asymptotic Bayesian cost reads: \begin{equation} \bar{\mathcal{C}}^n \overset{n \rightarrow \infty}{\approx} \frac{2}{n}, \end{equation} which is independent of the prior distribution $w(r)$ and is in agreement with the frequentist bounds for the corresponding cost matrix $C=\t{diag}[\tfrac{1}{1-r^2}, 1]$, see Eq.~\eqref{eq:hcrqubit2}. \subsection{Bayesian Cram{\'e}r-Rao like bounds} \label{sec:bayescr} The examples of the previous subsection indicate that while the Bayesian approach is typically technically more demanding than the frequentist approach, it offers a deeper understanding of the actually achievable cost using finite resources. Moreover, the obtained results coincide with the latter in the limit of multiple copy estimation. Unfortunately, Bayesian models can only be solved exactly in special cases and therefore one may wonder whether an efficiently calculable Bayesian bounds exist which would combine the simplicity of the computation characteristic for the frequentist bounds while taking into account the impact of prior information as well as the finite resources. In the single parameter case the quantum Bayesian CR bound can be derived using the classical Van Trees inequality \cite{Trees1968} by simply replacing the classical FI with the QFI in the formulas. The direct multi-parameter generalization (see the proof below) leads to \cite{Gill1995, Tsang2011} \begin{equation} \label{eq:bayesianCRbound} \overline{{\Delta_\G^2\tilde\bvar}s} \geq \tracep[ C (\overline{F_Q} + I)^{-1}], \end{equation} where $\overline{F_Q} = \int \t{d}{\boldsymbol{\var}} \, p({\boldsymbol{\var}})F_Q(\rho_{\boldsymbol{\var}})$ is the QFI matrix averaged over the prior $p({\boldsymbol{\var}})$ while \begin{equation} I = \int \frac{1}{p({\boldsymbol{\var}})}\boldsymbol{\nabla}p({\boldsymbol{\var}})\boldsymbol{\nabla}^Tp({\boldsymbol{\var}})\t{d}{\boldsymbol{\var}} \end{equation} is a matrix representing the information contribution coming from the prior. This bound, while providing some insight into the impact of the prior knowledge and finite data, is insensitive to the measurement incompatibility issue as it is based on the QFI matrix. One might try to derive an analogous bound using the HCR bound rather than SLD CR bound, while retaining the Bayesian framework and the prior information contribution in the bound. This task is somehow challenging, as unlike the SLD CR bound the HCR bound can not be reduced to a single matrix inequality and therefore we do not have a matrix with which we can replace the QFI matrix in \eqref{eq:bayesianCRbound} in order to strengthen the bound. Below we provide a derivation of the Bayesian bound that involves the HCR bound following along the lines presented in \cite{Gill1995, Gill2005}. At the same time this will also lead us to a proof of \eqref{eq:bayesianCRbound} as a corrolary. Let us first focus on a purely classical Bayesian model with prior $p({\boldsymbol{\var}})$ and conditional probability $p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}})$ of measuring outcome $\tilde{{\boldsymbol{\var}}}$ given the true value is ${\boldsymbol{\var}}$. We will write $p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}}) = p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}}) p({\boldsymbol{\var}})$ to denote the joint probability distribution---note that we avoid using $p(\tilde{{\boldsymbol{\var}}}|{\boldsymbol{\var}}))$ for conditional probability in order not to have different notation in Bayesian and frequentist approaches for the same quantity. In what follows $\mathbb{E}$ denotes expectation value with respect to $p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})$ so \begin{equation} \mathbb{E}[f]=\iint f(\tilde{\boldsymbol{\var}},{\boldsymbol{\var}}) p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}}) d\tilde{\boldsymbol{\var}} d{\boldsymbol{\var}}. \end{equation} Let us introduce a $\mathfrak{p}\times\mathfrak{p}$ matrix $Q({\boldsymbol{\var}})$ and define vectors: \begin{equation} \bold A=\tilde{\boldsymbol{\var}}-{\boldsymbol{\var}}, \quad \bold B=\frac{1}{p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})}\left[\boldsymbol{\nabla}^T\left(Q({\boldsymbol{\var}}) p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})\right)\right]^T. \end{equation} Using the Cauchy-Schwarz inequality for vector functions $\sqrt{C}\bold{A},\sqrt{C^{-1}}\bold{B}$ we get: \begin{equation} \begin{split} \label{CSb} \mathbb{E}[\bold A^TC \bold A]\mathbb{E}[\bold B^TC^{-1} \bold B]&\geq\mathbb{E}[\bold B^T\bold A]^2. \end{split} \end{equation} The first term is simply the Bayesian cost: \begin{equation} \mathbb{E}[\bold A^TC\bold{A}]=\overline\mathcal{C}. \end{equation} The second one: \begin{multline} \mathbb{E}[\bold{B}^TC^{-1}\bold{B}]= \iint d{\boldsymbol{\var}} d\tilde{\boldsymbol{\var}} \frac{1}{p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})}\boldsymbol{\nabla}^T\left(Q({\boldsymbol{\var}}) p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})\right)C^{-1}\left[\boldsymbol{\nabla}^T\left(Q({\boldsymbol{\var}})p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})\right)\right]^T=\\ \int \tracep\left(Q({\boldsymbol{\var}})C^{-1} Q({\boldsymbol{\var}})^T \underbrace{\int\frac{1}{p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}})}\boldsymbol{\nabla}p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}})\boldsymbol{\nabla}^Tp_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}}) \t{d}\tilde{\boldsymbol{\var}}}_{F({\boldsymbol{\var}})} \right) p({\boldsymbol{\var}})\t{d}{\boldsymbol{\var}}+\\ \tracep\left(C^{-1}\underbrace{\int \frac{1}{p({\boldsymbol{\var}})}\left[\boldsymbol{\nabla}^T(Q({\boldsymbol{\var}})p({\boldsymbol{\var}}))\right]^T[\boldsymbol{\nabla}^T (Q({\boldsymbol{\var}})p({\boldsymbol{\var}}))]d{\boldsymbol{\var}}}_{I_Q}\right), \end{multline} where we have used the Leibniz rule and the fact that thanks to the normalization of conditional probability $\int \t{d} \tilde{{\boldsymbol{\var}}} \nabla p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}}) = 0$ (which makes cross-terms vanish). Here $F({\boldsymbol{\var}})$ is the classical FI calculated for a given value of ${\boldsymbol{\var}}$. Finally, \begin{equation} \mathbb{E}[\bold{B}^T\bold{A}]=\iint d{\boldsymbol{\var}} d\tilde{\boldsymbol{\var}}\boldsymbol{\nabla}^T\left(Q({\boldsymbol{\var}})p(\tilde{{\boldsymbol{\var}}},{\boldsymbol{\var}})\right)(\tilde{\boldsymbol{\var}}-{\boldsymbol{\var}}) =\int d{\boldsymbol{\var}} \tracep(Q({\boldsymbol{\var}}))p({\boldsymbol{\var}})d {\boldsymbol{\var}} = \overline{\tracep(Q)}, \end{equation} where we have applied integration by parts assuming the prior distribution vanishes at the boundaries. Therefore, from \eqref{CSb} we have: \begin{equation} \label{baygen} \overline{\mathcal{C}} \geq \frac{\left(\overline{\tracep(Q)}\right)^2}{\overline{\tracep(C^{-1} Q^T F Q )}+\tracep(C^{-1} I_Q)}. \end{equation} For $Q({\boldsymbol{\var}})=\sqrt{C}(\overline{F} + I)^{-1}\sqrt{C}$ we recover the standard Bayesian CR inequality \cite{Gill1995} \begin{equation} \label{vantree} \overline{\mathcal{C}} = \tracep(\overline{\Sigma} C)\geq\tracep [C\left(\overline{F}+I\right)^{-1}],\quad I=\int \frac{1}{p({\boldsymbol{\var}})}\boldsymbol{\nabla}p({\boldsymbol{\var}})\boldsymbol{\nabla}^Tp({\boldsymbol{\var}})d{\boldsymbol{\var}}. \end{equation} As the inequality works for any cost matrix $C$, it is in fact equivalent to a matrix inequality: \begin{equation} \overline{\Sigma} \geq\left(\overline{F}+I\right)^{-1}. \end{equation} In a quantum model we have $p_{{\boldsymbol{\var}}}(\tilde{{\boldsymbol{\var}}}) = \trace(\rho_{{\boldsymbol{\var}}} M_{\tilde{{\boldsymbol{\var}}}})$ and hence $F({\boldsymbol{\var}})$ depends on the measurement $\{M_{\tilde{\boldsymbol{\var}}}\}$ chosen. In order to arrive at an universally valid bound independent on choice of measurement, one may use matrix inequality involving the QFI matrix $F_Q({\boldsymbol{\var}})\geq F({\boldsymbol{\var}})$ and arrive at \eqref{eq:bayesianCRbound}. If instead we take $Q({\boldsymbol{\var}})=F({\boldsymbol{\var}})^{-1}C$ then from \eqref{baygen} we have \begin{equation} \label{eq:bayesianhcr} \overline{\mathcal{C}} \geq \frac{\left(\overline{\tracep(C F^{-1})}\right)^2}{\overline{\tracep(C F^{-1})}+R} \geq \overline{\tracep(C F^{-1})}-R, \end{equation} where \begin{equation} R= \int \frac{1}{p({\boldsymbol{\var}})}\tracep\left(C \left[\boldsymbol{\nabla}^T(F({\boldsymbol{\var}})^{-1}p({\boldsymbol{\var}}))\right]^T[\boldsymbol{\nabla}^T (F({\boldsymbol{\var}})^{-1}p({\boldsymbol{\var}}))]\right)\t{d}{\boldsymbol{\var}} \end{equation} and where in the last inequality in \eqref{eq:bayesianhcr} we have used $\frac{a^2}{a+b}\geq a-b$. Finally, we may use the HCR bound $\tracep(C F({\boldsymbol{\var}})^{-1}) \geq \mathcal{C}^{\t{H}}({\boldsymbol{\var}})$ to obtain a `Bayesian Holevo CR bound': \begin{equation} \label{eq:bayesianhcr2} \overline{\mathcal{C}} \geq \overline{C^{\t{H}}({\boldsymbol{\var}})}-R. \end{equation} Note that this bound is still measurement dependent since $R$ depends on the FI matrix $F({\boldsymbol{\var}})$ corresponding to some measurement. The bound will be valid whatever the measurement chosen yet the choice may affect the tightness of the bound---see \cite{Gill2005} where some further improvement of the bound was proposed. Now consider many copies of a system. Then we have $\mathcal{C}^H({\boldsymbol{\var}})^n =\frac{1}{n}\mathcal{C}^H({\boldsymbol{\var}})^n$, $F^n({\boldsymbol{\var}})=\frac{1}{n}F({\boldsymbol{\var}})$ and from that $R^n=\frac{1}{n^2}R$. Therefore \begin{equation} \mathcal{C}^n \geq \frac{1}{n}\overline{\mathcal{C}^H({\boldsymbol{\var}})}-\frac{1}{n^2}R \end{equation} and in the limit of large $n$ the second term may be neglected resulting in the asymptotic bound \cite{Gill2005} \begin{equation}\label{eq.BayesH} \lim_{n\to \infty} n\mathcal{C}^n \geq \overline{\mathcal{C}^H({\boldsymbol{\var}})}. \end{equation} One can see that for large sample size the impact of prior knowledge becomes negligible, and the asymptotic Bayesian cost is bounded by the mean value of the HCR, which is in agreement with our observation from Sec.~\ref{sec:bayesianqubit} that the asymptotic form of the Bayesian cost was actually \emph{equal} to the average HCR bound in the qubit models we have studied. We conjecture that under appropriate model and prior regularity conditions, the asymptotic equivalence of Bayes and frequentist costs holds generally for finite dimensional systems with fully mixed states. The QLAN theory described in Sec. \ref{sec:qlan} has already been used to establish the achievability of the bound \eqref{eq.BayesH} for the qubit case \cite{GutaKahn, GutaJanssensKahn} and should be the appropriate tool for obtaining similar results for more general finite dimensional models. \section{Summary and outlook} \label{sec:summary} In this review we discussed various theoretical methods for quantum multi-parameter estimation going beyond the mere computation of the QFI matrix and the related SLD CR bound. We have seen than that the HCR bound, the QLAN theory as well as Bayesian methods offer an advantage over the QFI by being able to address the issue of potential incompatiblity of measurements that are optimal from the point of view of extracting information on different parameters. We have also pointed out the key contribution of the QLAN approach which led to realization that the HCR bound (which predates QLAN by many years), is the actual asymptotically saturable bound in a quantum estimation problem involving many independent copies. Moreover, when discussing the Bayesian approach we stressed that it offers more insight into the finite number of samples regime and is capable of incorporating the prior knowledge into the estimation process. Importantly, both Bayesian and frequentist approaches agree in the asymptotic limit of multiple copy estimation under appropriate regularity conditions. An often overlooked assumption which is crucial for both the CR theory and LAN is that the unknown parameter is an interior point of the parameter space, and does not lie on its boundary. However, interesting states such as low rank states often do lie on the boundary, in which case asymptotic normality generally fails \cite{BlumeKohout}. Although QLAN may still be useful, the corresponding asymptotic theory theory is less well understood \cite{GutaAcharya}. A related problem is that certain natural distances such as the Bures distance do not have a quadratic approximation, which leads to estimation cost scaling with the `anomalous' rate $1/\sqrt{n}$ for \emph{fixed, separate} measurements and $1/n$ for \emph{adaptive} measurements \cite{Mahler,Ferrie,GranadeFerrie}. In parallel to the developments in `optimality' theory, there has been significant interest in practical estimation methods for estimation of large dimensional systems under sparsity assumptions such as \emph{low rank} \cite{gross_2010, flammia_2012,kueng_2017,guta_2018}, finite correlations \cite{cramer_2010}, or permutational symmetry \cite{toth_2010}. Another important developing direction with relevance for quantum metrology concerns the finite sample (non-asymptotic) theory, e.g. estimation bounds \cite{Haah2017,odonnel2016} and confidence regions \cite{christandl_reliable_2012, blume-kohout2012, faist_practical_2016}. We have focused on methods of multi-parameter estimation and have not entered into the more physical and practical aspects characteristic to the field of quantum metrology \cite{Giovaennetti2006, Paris2009, Toth2014, Demkowicz2015, Dowling2015, Pezze2018, Pirandola2018, Braun2018}. In particular, the optimal $n$-probe states that appear in quantum metrological considerations are often entangled and therefore go beyond the i.i.d. setting on which we have focused our attention in this review. In general the QLAN methods cannot be directly applied in these cases, the asymptotic limit does not necessary imply saturability of the HCR bound, whereas the Bayesian methods, while in principle applicable, are typically too challenging to yield a closed rigorous solution. In the single parameter case, most of these issues have been resolved in recent years. Efficient methods to compute asymptotically saturable bounds in quantum metrologial scenarios in presence of uncorrelated decoherence models have been developed \cite{Fujiwara2008, Escher2011, Demkowicz2012, Kolodynski2013, Demkowicz2014, Knysh2014, Demkowicz2017, zhou2018achieving}. Interestingly, in presence of typical decoherence models the asymptotic optimal cost in quantum metrological protocols will scale as $1/n$ in a similarly manner as in the multi-copy (independent) setups and hence some of the claims proven in the latter case generalize to theses models as well. This includes asymptotic saturability as well as asymptotic equivalence between Bayesian and frequentist approaches \cite{Jarzyna2015, Yan2018}. The optimal states require only short-range entanglement structures and can be to some extent regarded as close to i.i.d. models. In particular they may be effectively described using the matrix product states formalism \cite{Jarzyna2013, Chabuda2020}. Interestingly, matrix product states, are also closely related with the input-output formalism describing the Markov evolution of an open system interacting with a bath modelled as quantum Bosonic noise. The quantum Fisher information of the output process has been studied in \cite{Molmer2014,GutaGarrahan} and the QLAN and information geometry theory were established in \cite{GutaKiukas1,GutaKiukas2,GutaBoutenCatana}. On the other hand, for a special class of quantum metrological models involving unitary parameter estimation (e.g. phase), where the effect of noise can be either neglected or effectively mitigated via application of e.g. quantum error correction techniques \cite{Kessler2014a, Arrad2014, Dur2014, Demkowicz2017, sekatski2017quantum, zhou2018achieving, Layden2019, Gorecki2019}, one may in principle reach the Heisenberg scaling where the effective quadratic cost scales as $1/n^2$. In this case, the relevant states and results are far from the i.i.d. setup and there is even no guarantee of asymptotic saturability of the SLD CR bound in the single parameter case. In fact, in an effectively noiseless unitary single-parameter estimation model there is a $\pi$ constant factor discrepancy between asymptotically achievable Bayesian cost and the frequentist bound \cite{Gorecki2019a}. Very few of the above mentioned methods and results in quantum metrology have been satisfactorily generalized to multi-parameter scenarios. There are cases, where the character of the problem allows for a direct generalization of single-parameter metrological bounds \cite{Tsang2011, Ragy2016, Baumgratz2016, Vidrighin2014, Pezze2017, Gessner2018} but in general such a procedure will typically lead to loose bounds that do not account for measurement incompatibility as well as trade-offs between the probes states optimally sensing various parameters. An example when such a single-to-multiple parameter generalization has been succesfully realized is the generaliztion of quantum error-correction schemes that yield optimal quantum metrological protocols provided the character of decoherence allows for the Heisenberg scaling to be preserved \cite{Gorecki2019}. The open question is whether the same can be done for generic multi-parameter metrological models where the character of decoherence allows only for a constant factor improvement over the i.i.d. scenarios. We speculate that that matrix product states and QLAN may offer some deeper insight into the structure of optimal probe states and the resulting achievable precision. The other open avenue, which is challenging already on the single parameter level, is to develop efficient methods to deal with quantum metrological models involving spatially or temporally correlated noise---some single parameter models of this kind have been analyzed in the literature \cite{Jeske2014, Frowis2014, Layden2019, Chabuda2020}, but this research is far from complete, not to mention its generalization to multi-parameter case. Let us finish this review with a more lightweight and a slightly philosophical remark. There is an anecdote related with the Bohr's obsessive use of the notion of complementarity in quantum mechanics. At some point, von Neumann made a remark wondering why Bohr keeps on talking about \emph{two} non-commuting variables, saying: `Well, there are many things which do not commute and you can easily find three operators which do not commute' \cite{Jammer1966}. In light of asymptotic normality, we see that maybe Bohr was right after all! (at least asymptotically). Indeed in the limit of large ensembles, local transformations of quantum states may be equivalently viewed as either being classical (change in eigenvalues) or generated by complementary observable equivalent to position and momentum operators of quantum harmonic oscillators. We may just wonder, whether this insight has some deeper implications for our understanding of quantum mechanics as a whole as well as the problem of quantum-to-classical transition. \providecommand{\newblock}{} \end{document}
\begin{document} \title[A TQFT extending the RT Theory]{A 2-categorical extension of the Reshetikhin--Turaev theory} \author{Yu Tsumura} \email{[email protected]} \address{Department of Mathematics, Purdue University, West Lafayette IN 47907, USA} \begin{abstract} We concretely construct a 2-categorically extended TQFT that extends the Reshetikhin-Turaev TQFT to cobordisms with corners. The source category will be a well chosen 2-category of decorated cobordisms with corners and the target bicategory will be the Kapranov-Voevodsky 2-vector spaces. \end{abstract} \maketitle \section{Introduction} One of the great breakthroughs in the understanding of physical theories was the construction of Reshetikhin--Turaev (2+1)-dimensional topological quantum field theories \cite{Turaev10, MR1091619}. Prior to this, Atiyah axiomatized a TQFT in \cite{MR1001453}. The simpler (1+1)-dimensional theory was nicely formulated by \cite{Dijkgraafthesis}. The latter construction was lifted to a conformal field theory by Segal \cite{MR981378}. In all these cases, one has a functor from a cobordism category to an algebraic category. Going back to Freed and Quinn \cite{MR1240583}, Cardy and Lewellen \cite{MR1107480} , there has been an interest in including boundary conditions/information. Besides the physical challenges, this poses a mathematical problem as both the geometric and the algebraic category need to be moved into higher categories. Naively, a cobordism with corners is a 2-category, by viewing the corners as objects, the boundaries as cobordisms between them and the cobordism as a cobordism of cobordisms. The devil is of course in the details. There have been several approaches to the problem such as Lurie's approach \cite{Lurie2009}, \cite{MR2648901}, \cite{MR2713992} and the project \cite{DSS}. Taking a step back to the (1+1)-dimensional situation, the TQFTs with boundaries have been nicely characterized and given rise to new axioms such as the Cardy axiom, see e.g.\ \cite{MR2395583} for a nice introduction or \cite{MR2242677} for a model free approach. Here the objects are not quite cobordisms with corners, but more simply surfaces with boundaries and points on the boundary. In this paper we will give a constructive solution to the problem by augmenting the setup of the Reshetikhin--Turaev theory. We give very careful treatment and check all the details. In order to do this we use an algebraic and a geometric 2-category and define a 2-functor (with anomaly) between them. The source geometric 2-category $\mathbb{C}o$ is constructed extending the category of decorated cobordisms of the RT TQFT. The target algebraic (weak) 2-category is the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$ defined in \cite{KV1994}. Great caution has to be used in the definition of gluing of decorated cobordisms with corners. As objects and 1-morphisms, we fix the \textit{standard circles} and the \textit{standard surfaces}. Thus, the 2-category $\mathbb{C}o$ on the level of objects and 1-morphisms is combinatoric. The topological nature of $\mathbb{C}o$ lies in the 2-morphism level. A 2-morphisms of $\mathbb{C}o$ is a decorated 3-manifold with corners. One of the property of a decorated 3-manifold is that the boundary of the manifold is parametrized. Namely, there are several embeddings from the standard surfaces to the boundary of the manifold. This is where we need to be careful. The gluing of standard surfaces is not a standard surface but homeomorphic to it. Therefore to define horizontal composition of 2-morphisms, we need to choose and fix a homeomorphism between these spaces with caution. In order to construct a 2-functor from $\mathbb{C}o$ to $2\-\mathrm{Vect}$, we "cap off" the corners of a cobordism to reduce it to a cobordism without corners. Then we apply the original RT TQFT. To prove the functoriality of this 2-functor, we develop a technique of representing cobordisms by special ribbon graphs and reduce calculations on manifolds to calculations on special ribbon graphs. Our work is intended as a bridge between several subjects. The presentation of the categories was chosen to match with the considerations of string topology \cite{MR2597733} in the formalism of \cite{MR2314100,MR2411420}, which will hopefully lead to some new connections between the two subjects. Once the classification of \cite{DSPVB} is available, it will be interesting to see how our concrete realization of 3-2-1 TQFT fits. Their results and those of \cite{DSS} are for a different target category. We will provide a link in Section \ref{sec:comments}. Further studies of 3-2-1 extensions but for Turaev--Viro are in \cite{Turaev2010} and in \cite{BalsamI, BalsamII, BalsamIII}. It would be interesting to relate these to our construction using the relationship between the RT and the TV invariants \cite{Turaev2010}. The paper is organized as follows. In Section \ref{sec:A 2-category of cobordisms with corners}, we will introduce the 2-category $\mathbb{C}o$. The 2-category $\mathbb{C}o$ is our choice of a 2-category of decorated cobordisms with corners. In Section \ref{sec:A 2-category of the Kapranov-Voevodsky 2-vector spaces}, we will recall the Kapranov-Voevodsky 2-vector spaces 2-$\mathcal{V}ect$ as the target 2-category of the extended TQFT. Before we will start the discussion of an extension of the RT TQFT to the cobordisms with corners, we will review some of the original construction of the Reshetikhin-Turaev TQFT in Section \ref{sec:Review and Modification of the Reshetikhin-Turaev TQFT} since we will extensively use the original theory. This will also serve as a quick reference of the notations and definitions of \cite{Turaev10}. In Section \ref{sec:An extended TQFT}, we construct an extended TQFT $\mathcal{X}$ from $\mathbb{C}o$ to $2\-\mathrm{Vect}$ and all the details of several compatibility of gluings, which is the main part of the paper, will be proved in Section \ref{sec:Main Theorem}. We also show that this is indeed an extension by showing when it is restricted to regular cobordisms it produce the RT TQFT. In Appendix, we review B\'{e}nabou's definition of a bicategory and a pseudo 2-functor \cite{MR0220789} and extend it to the definition of \textit{projective pseudo 2-functor}. \section*{Acknowledgments} I would like to show my greatest appreciation to Professor Ralph M.\ Kaufmann whose advice, guidance, suggestions were of inevitable value for my study. I also owe a very important debt to Professor Alexander A.\ Voronov and Professor Christopher Schommer-Pries for their interest in the current paper and valuable feedback. I would like to thank the referee for carefully reading my manuscript and for giving such constructive comments which substantially helped improving the quality of the current paper. \section{A 2-category of cobordisms with corners}\label{sec:A 2-category of cobordisms with corners} In this section we define a 2-category of decorated cobordisms with corners $\mathbb{C}o$ which is the source 2-category of our extended TQFT as a projective pseudo 2-functor. Let us explain the outline of our construction of a 2-category of decorated cobordisms with corners $\mathbb{C}o$. We will give the precise definitions later. The objects are standard circles. The 1-morphisms are standard surfaces with boundaries. The 2-morphisms are decorated 3-manifolds with corners with parametrized boundaries. In the literature, there are many kind of definitions of a 2-category of cobordism with corners but as far as the author knows there has not been a definition of 2-category of decorated cobordisms with corners. (Remark: We found that the definition given by Kerler and Lyubashenko \cite{MR1862634} is close to our definition. They use a double category instead of a 2-category.) One of the difference between our 2-category of cobordisms with corners from others is that 2-morphism cobordisms are parametrized. This means that we fix standard surfaces and each cobordism is equipped with homeomorphisms from standard surfaces to its boundary components. The difficulty with standard surfaces is that the composite of two standard surfaces is not a standard surface. Thus we need to deal with compositions carefully. Even though we choose to use standard circles and standard surfaces as our objects and 1-morphisms of $\mathbb{C}o$, the essence is combinatorial. Namely, we only need the number of components of circles and \textit{decorated type} of a surface. Topological information lives in the level of 2-morphisms. Thus our definition of $\mathbb{C}o$ can be regarded as a geometric realization of combinatorial data on objects and 1-morphisms. See the table below. \begin{center} \begin{tabular}{ | l | l | l | } \hline $\mathbb{C}o$ & Geometric realization & Combinatorial data \\ \hline Objects & Standar circles & Integers \\ \hline 1-morphisms & Standard surfaces & Decorated types \\ \hline 2-morphisms & Classes of decorated cobordisms with corners & \\ \hline \end{tabular} \end{center} In our setting, surfaces are restricted to connected ones. This restriction makes the theory simpler and fit rigorously into a 2-categorical setting. Also, to avoid non-connected surfaces, we introduce two formal objects $_*\emptyset$ and $\emptyset_*$, which we call the left and the right empty sets. As a just 2-category of cobordisms with corners, including non-connected surfaces is natural and simpler. However including general non-connected surfaces makes it complicated to construct an extended TQFT. In fact, our technique of representing cobordisms by special ribbon graphs does not generalize to the case of non-connected surfaces. For this reason, the non-connected surfaces will be dealt with in a future paper. Now we are going to explain the rigorous definitions. Along with doing so, we need to modify and extend several definitions used in the Reshetikhin-Turaev TQFT. We define data $\mathbb{C}o$ consisting of objects, 1-morphisms, and 2-morphisms. It will be shown that the data $\mathbb{C}o$ is indeed a 2-category. \subsection{Objects of $\mathbb{C}o$} Let us consider the 1-dimensional circle $S^1=\{(x, y) \in \mathbb{R}^2 \mid x^2+y^2=1\}$ in $\mathbb{R}^2$. We call the pair $(S^1, (0,-1))$ the \textit{ 1-dimensional standard circle}. We often omit the point $(0, -1)$ in the notation and just write $S^1$. For each natural number $n$, the ordered disjoint union of $n$ 1-dimensional standard circles is called the \textit{ $n$-standard circles}. We denote $n$-standard circle by $S^{\sqcup n}:=(S^1, i_1=(0,-1))\sqcup \cdots \sqcup (S^1, i_n=(0,-1))$, where $i_k$ are called the \textit{$k$-th base point} for $k=1, \dots, n$. In general, a pair of a connected manifold and a point of the manifold is called a \textit{pointed} manifold and the specified point is called the \textit{base point}. The disjoint union of several pointed manifolds is called a multi-pointed manifold. Thus the $n$-standard circles is a multi-pointed manifold. \begin{Definition} We define an \textit{object} of the data $\mathbb{C}o$ to be the $n$-standard surface $S^{\sqcup n}$ for each natural number $n$. We also include two formal symbols $\emptyset_*$ and ${_* \emptyset}$. We call them the left and the right empty set, respectively. These two formal symbols are needed to confine ourselves to connected surfaces. \end{Definition} \subsection{1-Morphisms of $\mathbb{C}o$} In the previous section we defined the objects of $\mathbb{C}o$. Now we define a 1-morphism between two objects of $\mathbb{C}o$. A 1-morphism will be defined to be a standard surface, which we define below. First, we define decorated types and decorated surface needed to define the standard surfaces. \subsubsection{Decorated types and decorated surfaces} Already Reshetikhin-Turaev's construction uses surfaces that are decorated by objects of a modular tensor category. We extend their definition to include surfaces with boundaries. Fix a modular category $\mathcal{V}$. Let \begin{equation}\label{equ:type} t=(m,n; a_1, a_2, \dots, a_p) \end{equation} be a tuple consisting of non-negative integers $m$, $n$, and for $i=1, \dots, p$, $a_i$ is either a non-negative integer or a pair $(W, \nu)$, where $W$ is an object of the modular category $\mathcal{V}$ and $\nu$ is either $1$ or $-1$. Such a pair is called a \textit{mark} or a \textit{signed object} of $\mathcal{V}$. The tuple $t$ is called a \textit{decorated type} or when confusion is unlikely we simply call it \textit{type}. Let $L(t)=m$ and $R(t)=n$ denote the first and the second integer of the type $t$, respectively. By an \textit{arc} on a surface $\Sigma$, we mean a simple oriented arc lying $\Sigma \setminus \partial \Sigma$. An arc on $\Sigma$ endowed with an object $W$ of $\mathcal{V}$ and a sign $\nu=\pm 1$ is said to be \textit{marked}. A connected compact orientable surface $\Sigma$ is said to be \textit{decorated} by a decorated type $t=(m,n; a_1, a_2, \dots, a_p)$ if the following conditions are satisfied. \begin{enumerate} \item There are $m+n$ boundary components of $\Sigma$ and the boundary components are totally ordered. The first $m$ components are called \textit{inboundary} or \textit{left boundary} and the last $n$ components are called \textit{outboundary} or \textit{right boundary}. \item The boundary $\partial \Sigma$ is a multi-pointed manifold. \item For each singed object entry of $a_i$, the surface $\Sigma$ is equipped with a marked arc with the mark $a_i$. \item The genus of $\Sigma$ is the sum of the all integer components $a_i$ except for the first and the second integers. \end{enumerate} A \textit{$d$-homeomorphism} of decorated surfaces is a degree 1 homeomorphism of the underlying surfaces preserving the order of boundary components, base points, orientation, the distinguished arcs together with their orientations, marks, and order. There is a natural \textit{negation} of the structure on a decorated surface. First, for a type $t=(m,n; a_1, a_2, \dots, a_p)$ we define its opposite type $-t=(m,n; b_1, b_2,\dots, b_p)$ as follows. If $a_i$ is an integer entry, then let $b_i=a_i$. If $a_i=(W,\nu)$ is a mark, then let $b_i=(W, -\nu)$. For a decorated surface $\Sigma$, its opposite decorated surface $-\Sigma$ is obtained from $\Sigma$ by reversing the orientation of $\Sigma$, reversing the orientation of its distinguished arcs, and multiplying the signs of all distinguished arcs by $-1$ while keeping the labels and the order of these arcs. Note that the decorated type of $-\Sigma$ is the opposite type of $\Sigma$. \subsubsection{Remark}\label{subsec:Remark} In the Reshetikhin-Turaev theory for the cobordisms without corners, a decorated type is denoted by \[t_{\text{RT}}=(g; (W_1, \nu_1), \dots, (W_m, \nu_m)),\] where $g$ is an integer indicating a genus and $W_i$ is an object of a modular category and $\nu_i$ is either $1$ or $-1$ for $i=1, \dots, m$. In our notation, this decorated type is expressed by the type \[t=(0, 0; (W_1, \nu_1), \dots, (W_m, \nu_m), 1,1,\dots, 1),\] where the number of $1$'s is $g$. Thus our theory includes the RT theory. \subsubsection{Standard surfaces} For each type $t$ we define the standard surface of type $t$. Let $t=(m,n; a_1, a_2, \dots, a_p)$ be a decorated type. To construct the standard surface, we first define a ``block" of a ribbon graph, which can be thought of an elementary core of the standard surface. For a mark $a=(W, \nu)$, the block for $(W, \nu)$ is defined to be a $1\times 1$ square coupon in $\mathbb{R}^2 $ and a length 1 band attached to the top of the coupon and the band is colored by $W$ if $\nu=1$ and $W^*$ if $\nu=-1$. See Figure \ref{fig:blockmark}. \begin{figure} \caption{The block for $(W,\nu)$} \label{fig:blockmark} \end{figure} The block for a positive integer $a$ consists of a $1 \times 1$ square coupon in $\mathbb{R}^2$ and rainbow like bands with $a$ bands on the top of the square. These bands are not colored and their cores are oriented from right to left. See Figure \ref{fig:blockrainbow}. \begin{figure} \caption{The block for an integer} \label{fig:blockrainbow} \end{figure} For the first entry integer $m$ of the type $t$, the block for $m$ is defined as in the left figure of Figure \ref{fig:side ribbons}. There are $m$ bands attached to the top of the square and the bands are bent so that the ends of bands have the same $x$-coordinates as in the figure. Similarly, for the second entry integer $n$ of the type $t$, the block for $n$ is defined as in the right figure of Figure \ref{fig:side ribbons}. For each integer, the left and the right ribbon graphs in Figure \ref{fig:side ribbons} are mirror reflection with respect to $y$-axis. \begin{figure} \caption{The block for the fist and the second integers} \label{fig:side ribbons} \end{figure} Now let $R_t$ be a ribbon graph in $\mathbb{R}^3$ constructed by arranging, in the strip $\mathbb{R} \times 0 \times [-1, 1] \subset \mathbb{R}^3$, the block for $m$ so that the top left corner is at $(0,0,0)$ and the block for $a_i$ so that the top left corner of the square of the block is located at $(i,0,0)$ for $i=1,\dots, p$ and the block for $n$ so that the top left corner is at $(p+1,0,0)$. We delete the joint segments of the coupons and make it a single coupon with length $p+2$. Let $R_t$ denote the resulting ribbon graph. See Figure \ref{fig:Rtnew} for an example of $R_t$ with the type \[t=(2,3; (W_1, \nu_1), 1, (W_2, \nu_2),3, (W_3, \nu_3),2).\] \begin{figure} \caption{The ribbon graph $R_t$} \label{fig:Rtnew} \end{figure} Let $l$ be the number of entries in the type $t$, which is the width of the coupon in $R_t$. Fix a close regular neighborhood $U_t$ of the ribbon graph $R_t$ in the stripe $[0,l]\times \mathbb{R} \times [-2,1] \subset \mathbb{R}^3$. We provide $U_t$ with right-handed orientation and provide the boundary surface $\partial U_t$ with the induced orientation. We assume by shrinking the coupon slightly so that the graph $R_t$ intersect with $\partial U_t$ only at the ends of short bands. If a band has a mark $(W_i, \nu_i)$, provide the intersection arc with this mark. The surface $\partial U_t$ with these intersection arcs with marks and $m+n$ non-marked arcs is called the \textit{capped standard surface} for the type $t$ and denoted by $\hat{\Sigma}_t$. Fix an embedding of the disjoint union of $m+n$ 2-dimensional disks $D^2$'s into $\hat \Sigma_t$ so that each boundary circle enclose exactly one non-marked arc of $\hat \Sigma_t$. Each image of $[-1/2,1/2]\subset D^2$ is one of the arcs. Cutting out the image of the interior of these disks we obtain a surface with marked arcs and boundary. Each boundary component has a base point which is an image of $(0, -1)$. We assume that the intersection of planes $\{0\}\times \mathbb{R}^2$ and $\{l\}\times \mathbb{R}^2$ with $U_t$ are those embedded disks. The resulting surface is called the \textit{standard surface} of type $t$ and denoted by $\Sigma_t$. The boundary components of $\Sigma_t$ corresponding to the uncolored left $m$ bands are called the left boundary and denoted by $\partial_{L} \Sigma_t$ and the boundary components corresponding to the uncolored right $n$ bands are called the right boundary and denoted by $\partial_{R} \Sigma_t$. The left boundary circles are ordered according to the order of the left bands ordered from left to right. The right boundary circles are ordered according to the order of the right bands ordered from right to left. The 3-manifold $U_t$ with the ribbon graph $R_t$ sitting inside $U_t$ is called the \textit{standard handle body} for type $t$. \begin{figure} \caption{The standard handle body and embedded disks} \label{fig:capped standard handlebody} \end{figure} Analogously, consider the mirror reflection $-R_t:=\mathrm{mir}(R_t)$, where $\mathrm{mir}:\mathbb{R}^3 \to \mathbb{R}^3$ is a reflection with respect to a plane $\mathbb{R}^2\times \{1/2\} \subset \mathbb{R}^3$. Set $U_t^-=\mathrm{mir}(U_t)$. We provide $U_t^-$ with right-handed orientation and provide $\partial(U_t^-)$ with the induced orientation. For the $i$-th arc of the intersection $-R_t \cap \partial (U_t^-)$, we assign marks $(W_i, -\nu_i)$. Set $\Sigma_t^-:=\partial U_t^-$. If we confine ourselves to closed surfaces, the definition of the standard surfaces are minor modification of that of Turaev's. For our purpose, we need to consider gluings of surfaces along boundaries. Thus we need to deal with the composition of these data we defined. Two types $t=(l,m; a_1, a_2, \dots, a_p)$ and $s=(m',n; b_1, b_2, \dots, b_q)$ are said to be \textit{composable} if $m=m'$. If they are composable, the composition of $t$ and $s$ is defined to be \begin{equation}\label{equ:composition of types} t\circ s =(l,n; a_1, a_2, \dots, a_p, m-1, b_1, b_2,\dots, b_q). \end{equation} As we need it later, we also define $D_n$ (Figure \ref{fig:Dn}) to be the disjoint union of $n$ cylinder $D^2\times [0, 1]$, where $D^2=\{ (x,y)\in \mathbb{R}^2 \mid x^2+y^2 \leq 1 \}$, with an uncolored untwisted band $[-1/2, 1/2] \times [0, 1]$ in each cylinder that only intersects with the boundary of the cylinder at the bottom disk $D^2 \times \{0\}$ and the top disk $D^2 \times \{1\}$ transversally. Let $C(n)=\sqcup_n \partial (D^2) \times [0,1]$. The space $C(n)$ is the boundary of $D_n$ minus the interior of the union of the top boundary $\sqcup_n \partial(D^2) \times \{1\}$ and the bottom boundary $\sqcup_n \partial(D^2) \times \{0\}$. The points in the boundary of $C(n)$ corresponding to the point $(0,-1) \times \{0\}$ and $(0,-1)\times \{1\}$ in $D^2 \times [0,1]$ are base points of $C(n)$. We provide $D_n$ with right-handed orientation and provide the boundary surface $C(n)$ with the induced orientation. Let $\mathrm{ref}:D_n \to D_n$ be an orientation reversing homeomorphism that is induced by the map sending $(x, y)\times \{t\}$ to $(-x, y) \times \{t\}$ in $D^2 \times [0, 1]$. Thus the map $\mathrm{ref}$ is a reflection map with respect to $y$-$z$ plane in $\mathbb{R}^3$. Restricting on $C(n)$, the map $\mathrm{ref}$ induces an orientation reversing map on $C(n)$, which is also denoted by $\mathrm{ref}$. \begin{figure} \caption{The cylinder $D_n$} \label{fig:Dn} \end{figure} \begin{Definition}[1-morphisms of $\mathbb{C}o$]\label{def:1-morphism of Co} Let $X$ and $Y$ be objects of $\mathbb{C}o$. A 1-morphism from $X$ to $Y$ is defined to be the standard surface $\Sigma_t$ for a decorated type $t$ depending on $X$ and $Y$ as follows. \begin{enumerate} \item If $X=\nstand{m}$ and $Y=\nstand{n}$, then $t=(m, n;a_1, a_2, \dots, a_p)$. \item If $X={_* \emptyset}$ and $Y=\nstand{n}$, then $t=(0, n;a_1, a_2, \dots, a_p)$. \item If $X=\nstand{m}$ and $Y=\emptyset_*$, then $t=(m, 0;a_1, a_2, \dots, a_p)$. \item If $X={_* \emptyset}$ and $Y=\emptyset_*$, then $t=(0, 0;a_1, a_2, \dots, a_p)$. \end{enumerate} We add formal identity symbols $\mathrm{id}_n:\nstand{n} \to \nstand{n}$ in the set of 1-morphism for each natural number $n$. If we agree with the convention that the source object $X=\nstand{0}$ denotes ${_* \emptyset}$ and the target object $Y=\nstand{0}$ denotes $\emptyset_*$, then the definition (2)-(4) are special cases of (1). \end{Definition} We will explain the role of the formal symbol $\mathrm{id}_n$ later when we discuss compositions of $\mathbb{C}o$. \subsection{2-morphisms of $\mathbb{C}o$} A 2-morphism of $\mathbb{C}o$ will be an equivalence class of a \textit{decorated} cobordism, which we are going to define. Let \[ t=(m,n; a_1, a_2, \dots, a_p) \partial_{-}ox{ and } s=(m,n; b_1, b_2, \dots, b_q)\] be types. Let $\Sigma_{t}$ and $\Sigma_{s}$ be 1-morphisms from $\nstand{m}$ to $\nstand{m}$. We define a \textit{decorated cobordism with corner} from $\Sigma_{t}$ to $\Sigma_{s}$ as follows. Consider a compact oriented 3-manifold $M$ whose boundary decomposes into four pieces as \[\partial M= \partial_{B} M \cup \partial_{T} M \cup \partial_{L} M \cup \partial_{R} M,\] such that \begin{enumerate} \item $\partial_{B} M \cap \partial_{T} M=\emptyset$, $\partial_{L} M \cap \partial_{R} M=\emptyset$ \item The intersections $\partial_{B} M \cap \partial_{L} M$ and $\partial_{T} M \cap \partial_{L} M$ consist of $m$ circles, respectively. \item The intersections $\partial_{B} M \cap \partial_{R} M$ and $\partial_{T} M \cap \partial_{R} M$ consist of $n$ circles, respectively. \item The surfaces $\partial_{B} M$ and $\partial_{T} M$ are decorated surface of type $-t$ and $-s$, respectively. \item The surfaces $\partial_{L} M$ and $\partial_{R} M$ are multi-pointed surface which are homeomorphic to $m$ cylinder over a circle and $n$ cylinder over a circle, respectively. \item The base points of these four surfaces agree on their intersections. \end{enumerate} A ribbon graph $\Omega$ in $M$ meets $\partial M$ transversely along the distinguished arcs in $\partial_{B} M \cup \partial_{T} M \subset \partial M$ which are bases of certain bands of $\Omega$. Such a manifold $M$ together with a $v$-colored ribbon graph $\Omega$ is said to be \textit{decorated} if the surfaces $\partial_{B} M$, $\partial_{T} M$, $\partial_{L} M$, and $\partial_{R} M$ are \textit{parametrized}. This means that there are $d$-homeomorphism (for bottom and top) and base point preserving homeomorphisms (for left and right) \[\phi_B:\Sigma_{t} \to -\partial_{B} M,\] \[\phi_T: \Sigma_{s}^{-} \to \partial_{T} M,\] \[\phi_L: C(m) \to \partial_{L} M, \] \[\phi_R: C(n) \to \partial_{R} M. \] We call $\phi=(\phi_B,\phi_T,\phi_L,\phi_R)$ a \textit{parametrization} of $\partial M$ (or $M$). A $d$-homeomorphism of decorated 3-manifolds is a homeomorphism of the underlying 3-manifold preserving all additional structures in question. In the sequel, we often call $d$-homeomorphism simply homeomorphism when the domain and the range are decorated cobordisms with corners. We say that such pairs $(M, \phi)$ and $(M', \phi')$ are equivalent if there exist a ($d$-)homeomorphism $f$ from $M$ to $M'$ such that it commutes with parametrizations: $f\circ\phi_*= \phi'_*$ for $*=B$, $T$, $L$, $R$. This is clearly an equivalence relation. \begin{Definition}[2-morphisms of $\mathbb{C}o$] Let $\Sigma_{t}$ and $\Sigma_{s}$ be 1-morphisms from $\nstand{m}$ to $\nstand{m}$. A \textit{2-morphism} from $\Sigma_{t}$ to $\Sigma_{s}$ is the class $[(M,\phi)]$ of a pair of a decorated cobordism with corners from $\Sigma_t$ to $\Sigma_s$ and its parametrization $\phi$. For each 1-morphism $X$, we add the formal identity symbol $\mathrm{id}_{X}$. If one of the 1-morphisms is a formal identity 1-morphism $\mathrm{id}_n$, then there is no 2-morphism unless both are formal identity 1-morphisms and for this case there is only one formal identity 2-morphism $\mathrm{id}_{\mathrm{id}_n}$. \end{Definition} Let $(M,\phi)$ be a representative of the 2-morphisms $[(M,\phi)]$ from $\Sigma_t$ to $\Sigma_s$. We define the \textit{standard boundary} $\Sigma(\phi)$ for the parametrization $\phi$ to be the surface obtained from $\Sigma_t$, $\Sigma_s^-$, $C(m)$, and $C(n)$ by identifying the boundaries via homeomorphisms of boundaries \[g_{BL}:=\phi_L^{-1}\circ \phi_B |_{\partial_{L} \Sigma_t},\] \[g_{BR}:=\phi_R^{-1}\circ \phi_B |_{\partial_{R} \Sigma_t},\] \[g_{TL}:=\phi_L^{-1}\circ \phi_T |_{\partial_{L} \Sigma^{-}_s},\] \[g_{TR}:=\phi_R^{-1}\circ \phi_T |_{\partial_{R} \Sigma^{-}_s}.\] Hence \[\Sigma(\phi)=(\Sigma_t \sqcup \Sigma^{-}_s) \cup_{\partial_{-}ox{glue}}( C(m) \sqcup C(n)),\] where ``glue'' means the identification of boundaries by the homeomorphisms $g_{BL}$, $g_{BR}$, $g_{TL}$, $g_{TR}$. Then the parametrization $\phi$ of $M$ induces the homeomorphism, also denoted by $\phi$, from $\Sigma(\phi)$ to $\partial M$. In addition to decorated 3-manifolds with specific parametrizations, we will add formal identities in the set of 2-morphism. The details are explained below when we deal with compositions. We now introduce the notion of \textit{isotopy} in decorated cobordisms with corners. Recall that if $\Sigma$ is a parametrized $d$-surface, then the cylinder $\Sigma \times [0, 1]$ has a natural structure of a decorated cobordism. \begin{Definition} Let $\phi$ and $\phi'$ be two parametrizations of $M$. We say that $\phi=(\phi_B, \phi_T,\phi_L,\phi_R)$ and $\phi'=(\phi'_B, \phi'_T,\phi'_L,\phi'_R)$ are \textit{isotopic} if the following conditions are satisfied. Let $S_*$ be a standard boundary corresponding to each $*=B$, $T$, $L$, $R$. \begin{enumerate} \item $\phi_*$ is equal to $\phi'_*$ on the boundary circles $\partial S_*$ for each $*$. \item There is a homeomorphism $F_*:S_* \times [0, 1]\to \partial_* M \times [0,1]$ for each $*$ satisfying the following conditions. \begin{enumerate} \item $F_*(x,0)=\phi_*(x) \times \{0\}$ and $F_*(x, 1)=\phi'_*(x)\times \{1\}$. \item Its restriction on $\partial S_* \times [0,1]$ agrees with $\phi_* \times \mathrm{id}_{[0,1]}$ \end{enumerate} \end{enumerate} \end{Definition} If two parametrizations $\phi$ and $\phi'$ are isotopic, then we have $\Sigma(\phi)=\Sigma(\phi')$ since the gluing maps are the same by the condition (1). The condition (b) guarantees that we can combine four homeomorphisms $F_*$ with $*=B$, $T$, $L$, $R$ into a homeomorphism \[F:\Sigma(\phi) \times [0,1] \to \partial M \times [0,1]\] such that $F(x, 0)=\phi(x) \times \{0\}$ and $F(x,1)=\phi'(x) \times \{1\}$. \begin{lemma}\label{lem:isotopy equivalence} Let $\phi$ and $\phi'$ be two parametrizations on a decorated cobordism $M$. Assume that $ \phi $ and $ \phi' $ are isotopic. Then $(M, \phi)$ is equivalent to $(M, \phi')$. \end{lemma} \begin{proof} Let us just write $\Sigma$ for $\Sigma(\phi)=\Sigma(\phi')$. Let $F:\Sigma\times [0,1] \to \partial M \times [0, 1]$ be a $d$-homeomorphism that gives an isotopy between $\phi$ and $\phi'$ so that we have $F(x, 0)=\phi(x)\times \{0\}$ and $F(x, 1)=\phi'(x)\times \{1\}$. Consider a collar neighborhood $U=\partial M \times [-1,0]$ of $\partial M$ in $M$. Also consider the space $M \cup_{\partial M \times \{0\}} (\partial M \times [0,1])$ obtained by attaching $\partial M \times [0,1]$ to $M$ along $\partial M\times \{0\}=\partial M$. Let $f:M \to M \cup_{\partial M \times \{0\}} (\partial M \times [0,1])$ be a map that is identity outside $U$ and sends each point $x \times t \in U$ with $x\in \partial M$ and $t\in[-1, 0]$ to the point \[ x \times (2t+1)\in U \cup_{\partial M \times \{0\}} (\partial M \times [0,1]) \subset M \cup_{\partial M \times \{0\}} (\partial M \times [0,1]).\] See Figure \ref{fig:callar f}. It is easy to see that the map $f$ is a $d$-homeomorphism. \begin{figure} \caption{The $d$-homeomorphism $f$} \label{fig:callar f} \end{figure} Now the proof of the lemma is summarized into the following commutative diagram. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1em] { & $M$ & $M \cup_{\partial M \times \{0\}} (\partial M \times [0,1])$ &\\ $\Sigma$ & & & $M\cup_{\phi^{-1}} (\Sigma \times [0,1])$ \\ & $M$ & $M \cup_{\partial M \times \{0\}} (\partial M \times [0,1])$ & \\}; \path[->, font=\scriptsize] (m-2-1) edge node[above] {$\phi$} (m-1-2); \path[->, font=\scriptsize] (m-1-2) edge node[above] {$f$} node[below]{$\sim$} (m-1-3); \path[->, font=\scriptsize] (m-1-3.east) edge node[auto] {$\mathrm{id}_M\cup (\phi^{-1} \times \mathrm{id}_{[0,1]})$} node[below right, sloped] {$\sim$} (m-2-4.north west); \path[->, font=\scriptsize] (m-3-3) edge node[above]{$f^{-1}$} node[auto] {$\sim$} (m-3-2); \path[->, font=\scriptsize] (m-2-4.south west) edge node[auto] {$\mathrm{id}_M\cup F$} node[above left, sloped] {$\sim$} (m-3-3.east); \path[->, font=\scriptsize] (m-2-1) edge node[below] {$\phi'$} (m-3-2); \path[->, font=\scriptsize, dashed] (m-1-2) edge node[above] {} (m-3-2); \end{tikzpicture} \end{center} The collar homeomorphism gives the homeomorphisms at the top and the bottom of the diagram above. The space $ M \cup (\partial M_B \times [0,1])$ is further homeomorphic to $M\cup_{\phi^{-1}} (\Sigma \times [0,1])$, where we identify $\partial M$ with $\Sigma\times \{0\}$ by the inverse of the parametrization $\phi^{-1}$. The homeomorphism is given by the identity on $M$ and $[0, 1]$, and $\phi^{-1}$ from $\partial M$ to $\Sigma$. The next homeomorphism from $M\cup_{\phi^{-1}} (\Sigma \times [0,1])$ to $M \cup (\partial M \times [0,1])$ is given by the identity on $M$ and $F$ on the rest. The map $\mathrm{id}\cup F$ is compatible with the unions: every element $x\in \partial M$ is identified with $(\phi^{-1}(x), 0) \in\Sigma \times \{0\}$. This is, in turn, mapped to $F(\phi^{-1}(x), 0)=(\phi\circ\phi^{-1}(x), 0)=(x, 0) \in \partial M \times \{0\}$ and this is identified with $x=\mathrm{id}(x)\in \partial M$. Thus the map $\mathrm{id} \cup F$ is well-defined. Composing these homeomorphisms, we obtain a homeomorphism from $M$ to $M$ (the dashed arrow in the diagram). Now we show that this homeomorphism commutes with parametrizations. For each element $x\in \Sigma$, we have the following commutative diagram and it shows that the homeomorphism commutes with parametrizations. \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=3em] { & $\phi(x)\in M$ & $\phi(x) \times \{1\} \in M \cup (\partial M \times [0,1])$ \\ $x \in \Sigma$ & & $x\times \{1\} \in M\cup_{\phi^{-1}} (\Sigma \times [0,1])$ \\ & $\phi'(x)\in M$ & $F(x,1)=\phi'(x)\times \{1\} \in M \cup (\partial M \times [0,1])$ \\}; \path[|->, font=\scriptsize] (m-2-1) edge node[above] {$\phi$} (m-1-2); \path[|->, font=\scriptsize] (m-1-2) edge (m-1-3); \path[|->, font=\scriptsize] (m-1-3.south) edge node[auto] {$\mathrm{id}\cup \phi^{-1} \cup \mathrm{id}$} (m-2-3); \path[|->, font=\scriptsize] (m-3-3) edge (m-3-2); \path[|->, font=\scriptsize] (m-2-3) edge node[auto] {$\mathrm{id}\cup F$} (m-3-3.north); \path[|->, font=\scriptsize] (m-2-1) edge node[below] {$\phi'$} (m-3-2); \end{tikzpicture} \end{proof} \subsection{Proving that $\mathbb{C}o$ is a 2-category} Now that we defined the data $\mathbb{C}o$, in this section we will show the following proposition: \begin{prop} The data $\mathbb{C}o$ is a 2-category. \end{prop} Our convention about 2-categories is summarized in Section \ref{sec:appendix:bicategory}. To claim that $\mathbb{C}o$ is a 2-category, we need to define several composition rules for 1-morphisms and 2-morphisms among other things. Let $\Sigma_t :\nstand{l} \to \nstand{m}$ and $\Sigma_s :\nstand{m} \to \nstand{n}$ be two 1-morphism so that the target object of $\Sigma_t$ is the source object of $\Sigma_s$ (including the cases when $l=0$ or $n=0$). We define the composition of $\Sigma_t$ and $\Sigma_s$ to be $\Sigma_{t\circ s}$, where $t\circ s$ is the composition of types defined in (\ref{equ:composition of types}). This composition is associative since the composition of types is associative. For each object $\nstand{n}$ with an integer $n$, we let the formal symbol $\mathrm{id}_n$ act as an identity. Remark: note that the composition of 1-morphism is not a topological gluing of surfaces along boundaries. As 1-morphism surfaces, we fixed the standard surfaces and we need that the composite of 1-morphisms is also a standard surface. Also note that on the 1-morphism level, the boundary circles are not parametrized and hence there is no canonical homeomorphism of boundary circles. \subsubsection{Vertical Gluing} We define the vertical composition. Let $[(M_1, \phi_1)]:\Sigma_{t_1}\mathbb{R}ightarrow \Sigma_{t_2}: \nstand{m} \to \nstand{n}$ and $[(M_2, \phi_2)]:\Sigma_{t_2}\mathbb{R}ightarrow \Sigma_{t_3}: \nstand{m} \to \nstand{n}$ be 2-morphisms of $\mathbb{C}o$ so that the target 1-morphism of $[M_1]$ is equal to the source 1-morphism of $[M_2]$. We define the vertical composite $[M_1]\cdot [M_2]$ of $[M_1]$ and $[M_2]$. The vertical composite will be a 2-morphism $[M_1]\cdot [M_2]: \Sigma_{t_1}\mathbb{R}ightarrow \Sigma_{t_3}: \nstand{l} \to \nstand{n}$. Let us fix representative $(M_1, \phi_1)$ and $(M_2, \phi_2)$ of these 2-morphisms. We first glue $M_1$ and $M_2$ along the top boundary $\phi_1(\Sigma_{t_2}^-)$ of $M_1$ and the bottom boundary $\phi_2(\Sigma_{t_2})$ via the homeomorphism obtained from the composition of the following homeomorphisms \[\partial M_1 \supset \phi_1(\Sigma_{t_2}^-) \xrightarrow{\phi_1^{-1}} \Sigma_{t_2}^- \xrightarrow{(\mathrm{mir})^{-1}} \Sigma_{t_2} \xrightarrow{\phi_2} \phi_2(\Sigma_{t_2}) \subset \partial M_2.\] Denote the resulting manifold by $M_1 \cdot M_2$. Now we need to construct a parametrization from $\Sigma_{t_1} \sqcup \Sigma_{t_3}^{-} \sqcup C(m) \sqcup C(n)$ to the boundary of $M_1\cdot M_2$. There is a natural parametrization, which we denote by $\phi_1\cdot_{\text{v}} \phi_2$, obtained as follows. The map $\phi_1\cdot_{\text{v}} \phi_2$ restricts to $\phi_1$ and $\phi_2$ on $\Sigma_{t_1}$ and $\Sigma_{t_3}^-$. This means that $(\phi_1 \cdot_{\text{v}} \phi_2)_B=(\phi_1)_B$ and $(\phi_1 \cdot_{\text{v}} \phi_2)_T=(\phi_2)_T$. Next we define $(\phi_1 \cdot_{\text{v}} \phi_2)_L$ as follows. Let $C_1(m)$ and $C_2(m)$ be copies of $C(m)$ with $(\phi_i)_L: C_i(m) \to \partial_{R} M_i$ for $i=1,2$. We identify the top boundary of the cylinder $C_1(m)$ with the bottom boundary of $C_2(m)$ via the homeomorphism \begin{equation}\label{equ:zeta vertical gluing map} \zeta:=(\phi_2)^{-1}_L (\phi_2)_B (\mathrm{mir})^{-1} (\phi_1)_T^{-1} (\phi_1)_L |_{\partial_{T} C_1(m)}. \end{equation} The following diagram summarizes the definition of $\zeta$. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1em] { $C_1(m)$ & & & $C_2(m)$ \\ & $\Sigma_{t_2}^{-}$ & $\Sigma_{t_2}$ \\ $M_1$ & & & $M_2$ \\ }; \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$\zeta$} (m-1-4); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$(\phi_1)_L$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$(\phi_1)_T$} (m-3-1); \path[->, font=\scriptsize] (m-1-4) edge node[auto] {$(\phi_2)_L$} (m-3-4); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$(\phi_2)_B$} (m-3-4); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$(\mathrm{mir})^{-1}$} (m-2-3); \end{tikzpicture} \end{center} Then there is a natural homeomorphism extending $(\phi_1)_L$ and $(\phi_2)_L$ from $C_1(m) \cup_{\zeta} C_2(m)$ to $\partial_{R} (M_1\cdot M_2)$. We denote this map by $\phi_1 \cup_{\zeta} \phi_2$. A problem is that $C_1(m) \cup_{\zeta} C_2(m)$ is not a standard surface. However, this can be easily remedied thanks to the cylindrical structure of $C(m)=\nstand{m}\times [0,1]$. Let us define the stretching map $s$ from $C(m)$ to $C_1(m) \cup_{\zeta} C_2(m)$ by sending $(x, t) \in C(m)$ to $(x, 2t) \in C_1(m)$ if $t \leq 1/2$ and to $(\zeta(x), 2t)$ if $t >1/2$. We define the parametrization $(\phi_1 \cdot_{\text{v}} \phi_2)_L: C(m) \to \partial_{R}(M_1\cdot M_2)$ to be the composite $(\phi_1 \cup_{\zeta} \phi_2) \circ s$. Similarly we define the right parametrization. Next we need to show that a different choice of representative gives rise to an equivalent parametrized manifold. Let $(N_1, \psi_1)$ and $(N_2, \psi_2)$ be another choice of representatives for $[(M_1, \phi_1)]$ and $[(M_2, \phi_2)]$, respectively. By the definition of the equivalence, we have homeomorphisms $\alpha:N_1 \to M_1$ and $\beta:N_2\to M_2$ such that the parametrizations commute: $\phi_1= \alpha \circ \psi_1$ and $\phi_2= \beta \circ \psi_2$. These homeomorphisms induce a homeomorphism $\alpha \cup \beta: N_1\cdot N_2 \to M_1\cdot M_2$ such that $(\alpha \cup \beta)|_{N_1}=\alpha$ and $(\alpha \cup \beta)|_{N_2}=\beta$. This is well-defined since on the glued components, we have the following commutative diagram. \[ \begin{CD} \partial_{T} N_1 @> \psi_2\circ (\mathrm{mir})^{-1}\circ \psi_1^{-1} >> \partial_{B} N_2\\ @VV \alpha V @VV \beta V\\ \partial_{T} M_1 @> \phi_2 \circ (\mathrm{mir})^{-1} \circ \phi_1^{-1} >> \partial_{B} M_2\\ \end{CD} \] Then we claim that the homeomorphism $\alpha \cup \beta: N_1 \cdot N_2 \to M_1\cdot M_2$ commutes with parametrizations: $\alpha \circ(\psi_1\cdot_{\text{v}} \psi_2)=\phi_1\cdot_{\text{v}} \phi_2$. On the bottom and top boundaries, this is clear. Let us check this equation on the left boundary. Recall that the left parametrization is defined to be $(\phi_1 \cdot_{\text{v}} \phi_2)|_{C(m)}=(\phi_1 \cup_{\zeta} \phi_2)\circ s$, where $\zeta$ is the gluing map of two copies of cylinders $C_1(m)$ and $C_2(m)$ defined in (\ref{equ:zeta vertical gluing map}) and $s$ is the stretching map. For the second pair $(N_1, \psi_1)$ and $(N_2, \psi_2)$, we also have $(\psi_1 \cdot_{\text{v}} \psi_2)|_{C(m)}=(\psi_1 \cup_{\eta} \psi_2)\circ s$, where $\eta$ is the gluing map of cylinders defined by \[\eta:=(\psi_2)^{-1}_L (\psi_2)_B (\mathrm{mir})^{-1} (\psi_1)_T^{-1} (\psi_1)_L |_{\partial_{T} C_1(m)}.\] Since we have the following commutative diagram, we have in fact $\zeta=\eta$. The commutativity of the left rectangle in the diagram is the definition of $\eta$ and the commutativity of the right rectangle follows since $\alpha$ and $\beta$ commute with parametrizations. Note that the big rectangle is the definition of $\zeta$ since $\alpha \circ (\psi_1)_L=(\phi_2)_L$ and $\beta \circ (\psi_2)_L=(\phi_2)_L$. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=7em, row sep=1em] { $\partial_{B} C_2(m)$ & $\partial_{L} N_2$ & $\partial_{L} M_2$ \\ $\partial_{T} C_1(m)$ & $\partial_{L} N_1$ & $\partial_{L} M_1$ \\ }; \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\eta$} (m-1-1); \path[->, font=\scriptsize] (m-2-1) edge node[below] {$(\psi_1)_L$} (m-2-2); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$(\psi_2)_L$} (m-1-2); \path[->, font=\scriptsize] (m-1-2) edge node[auto] {$\beta$} (m-1-3); \path[->, font=\scriptsize] (m-2-2) edge node[below] {$\alpha$} (m-2-3); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\phi_2 (\mathrm{mir})^{-1} \phi_1^{-1}$} (m-1-3); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\psi_2 (\mathrm{mir})^{-1} \psi_1^{-1}$} (m-1-2); \end{tikzpicture} \end{center} The commutativity of this diagram also shows that we have \[(\alpha \cup \beta) \circ (\psi_1 \cup_{\eta} \psi_2)\circ s=(\phi_1 \cup_{\zeta} \phi_2)\circ s\] and hence we have \[(\alpha \cup \beta) \circ (\psi_1 \cdot_{\text{v}} \psi_2)|_{C(m)}=(\phi_1 \cdot_{\text{v}} \phi_2)|_{C(m)}\] Similarly for the right boundaries. Thus the definition of the vertical composite \[ [(M_1, \phi_1)]\cdot [(M_2, \phi_2)]:=[(M_1\cdot M_2, \phi_1 \cdot_{\text{v}} \phi_2)] \] is independent of the choice of representatives. \subsubsection{Associativity for the vertical composition} Let $[(M_1, \phi_1)]:\Sigma_{t_1}\mathbb{R}ightarrow \Sigma_{t_2}:\nstand{m}\to \nstand{n}$, $[(M_2, \phi_2)]:\Sigma_{t_2}\mathbb{R}ightarrow \Sigma_{t_3}:\nstand{m}\to \nstand{n}$, and $[(M_3, \phi_3)]:\Sigma_{t_3}\mathbb{R}ightarrow \Sigma_{t_4}:\nstand{m}\to \nstand{n}$ be 2-morphisms. The pairs $[M_1]$ and $[M_2]$, $[M_2]$ and $[M_3]$ are vertically composable. We show that the associativity holds: We show that \[\mathcal{B}igl((([M_1]\cdot [M_2])\cdot [M_3]), \quad (\phi_1 \cdot_{\text{v}} \phi_2)\cdot_{\text{v}} \phi_3 \mathcal{B}igr)= \mathcal{B}igl(([M_1]\cdot ([M_2]\cdot [M_3])), \quad \phi_1 \cdot_{\text{v}} (\phi_2 \cdot_{\text{v}} \phi_3) \mathcal{B}igl) \] The both sides equal to $M_1 \cdot M_2 \cdot M_3$ as a manifold. Thus it suffices to show that the parametrization $(\phi_1 \cdot_{\text{v}} \phi_2)\cdot_{\text{v}} \phi_3$ is isotopic to the parametrization $\phi_1 \cdot_{\text{v}} (\phi_2 \cdot_{\text{v}} \phi_3)$ by Lemma \ref{lem:isotopy equivalence}. Checking this on the top and bottom boundaries are again trivial. Let us check the isotopy on the left boundary. From the definition of vertical composition, we need to consider three copies of the cylinder $C(m)$. Name them $C_i(m)$ for $i=1,2,3$ for each $M_i$. Let $\zeta_1$ be the gluing map for the cylinders $C_1(m)$ and $C_2(m)$ and let $\zeta_2$ be the gluing map for the cylinders $C_2(m)$ and $C_3(m)$ as in (\ref{equ:zeta vertical gluing map}). (Technically, $\zeta_i$ depends an order of gluing of three cylinders but it is straightforward to see that $\zeta_i$ is defined independently of the order.) Now by definition $(\phi_1 \cdot_{\text{v}} \phi_2)\cdot_{\text{v}} \phi_3$ on the left cylinder $C(m)$ is equal to $[ (\phi_1 \cup_{\zeta_1} \phi_2)\circ s_1 \cup_{\zeta_2} \phi_3]\circ s_2 $, where $s_i$ is the stretching map corresponding to $\zeta_i$. This is equal to $[\phi_1 \cup_{\zeta_1} \phi_2 \cup_{\zeta_2} \phi_3 ] \circ (s_1 \cup \mathrm{id} )\circ s_2$. Now we see that the map $(s_1 \cup \mathrm{id} )\circ s_2$ is isotopic to the map $(\mathrm{id} \cup s_2)\circ s_1$. (This is like the proof of associativity of fundamental groups.) Hence we see that $(\phi_1 \cdot_{\text{v}} \phi_2)\cdot_{\text{v}} \phi_3$ is isotopic to $\phi_1 \cdot_{\text{v}} (\phi_2 \cdot_{\text{v}} \phi_3)$ on the left boundary. Similarly for the right boundary. Therefore the associativity follows. \subsubsection{Units for vertical composition}\label{subsubsec:unit for the vertical composition of Co} From the above arguments, for each pair of objects $\nstand{m}$, $\nstand{n}$ of $\mathbb{C}o$, we have the semigroupoid (category without identity) $\mathbb{C}o(\nstand{m}, \nstand{n})$ whose objects are 1-morphisms from $\nstand{n}$ to $\nstand{m}$ of $\mathbb{C}o$ and whose morphisms are 2-morphisms between such 1-morphisms in $\mathbb{C}o$. To make the semigroupoid $\mathbb{C}o(\nstand{m}, \nstand{n})$ a category, we need to specify the identity morphism for each object of $\mathbb{C}o(\nstand{m}, \nstand{n})$. For each object $X$ of $\mathbb{C}o(\nstand{m}, \nstand{n})$ (a standard surface), we just use a formal unit $\mathrm{id}_{X}$, rather than construct the identity cobordism. Thus the formal unit $\mathrm{id}_{X}$ should act as the identity morphism in the category $\mathbb{C}o(\nstand{m}, \nstand{n})$. Remark: one reason to use formal units is that otherwise we need to construct a concrete cobordism with a parametrization. The obvious candidate for the identity cobordism is the cylinder over the standard surface $\Sigma_{t}\times [0,1]$, where $t$ is the type with $L(t)=m$ and $R(t)=n$. The bottom boundary $\Sigma_t\times \{0\}$ can be identified with the standard surface $\Sigma_t$ and the identity map can be used as a parametrization. The top boundary $\Sigma_t \times \{1\}$ also can be identified with $\Sigma_t$ and $\mathrm{mir}:\Sigma_t^- \to \Sigma_t$ can be used as a parametrization. The problem is to define parametrizations for the left and the right boundaries. We need to construct parametrization homeomorphism from $C(m)$ and $C(n)$ to the side boundaries of $\Sigma_t \times [0,1]$, which are cylinders over the boundary circles of $\Sigma_t$. However there is no canonical homeomorphism at hand. Since it does not seem that this gives more insights in our theory we just avoid the burden by introducing the formal units. On the other hand, there is no obstruction in the case when $m=n=0$ since there is no side boundaries. Using the cylindrical neighborhood, we can prove the cylinder over a standard surface is in fact the identity. \subsubsection{Horizontal composition} Next, we define the horizontal composition of 2-morphisms. Let $X=\nstand{l}$, $Y=\nstand{m}$ and $Z=\nstand{n}$ be objects of $\mathbb{C}o$. Let $\Sigma_{t_i}: X \to Y$ and $\Sigma_{s_i}: Y \to Z$ be 1-morphisms for $i=1,2$. Let $[M]: \Sigma_{t_1} \mathbb{R}ightarrow \Sigma_{t_2}: X \to Y$ and $[M']:\Sigma_{s_1} \mathbb{R}ightarrow \Sigma_{s_2}: Y \to Z$ be 2-morphisms. We define the horizontal composite $[M]\circ [M']$ of $[M]$ and $[M']$ as follows. The composite $[M]\circ [M']$ will be a 2-morphism from $\Sigma_{t_1\circ s_1}: X\to Z$ to $\Sigma_{t_2 \circ s_2}: X \to Z$. Pick representatives $(M,\phi)$ and $(M', \phi') $ for $[M]$ and $[M']$, respectively. Here $\phi: \Sigma(\phi) \to \partial M$ and $\phi': \Sigma(\phi') \to \partial M'$ are parametrizations of boundaries of decorated cobordisms $M$ and $M'$, respectively. We glue $M$ and $M'$ by identifying $\partial_{R} M$ and $\partial_{L} M'$ via the homeomorphism $\phi'_L \circ \mathrm{ref} \circ \phi_R^{-1}: \partial_{R} M \to \partial_{L} M'$. Since this homeomorphisms is the composite of three orientation reversing maps, this map is orientation reversing. In the sequel, we omit writing the map $\mathrm{ref}$ to simplify expressions. Denote the resulting manifold by \[ M\circ M'=M\cup_{\phi'_L \circ \phi_R^{-1}}M'.\] The next task it to construct a parametrization of $M\circ M'$ from $C_{l}\sqcup C_{n} \sqcup\Sigma_{t_1\circ s_1} \sqcup\Sigma_{t_2\circ s_2}^-$ and then the equivalence class of this pair will be the horizontal composite. On the left and right boundaries, the parametrizations are just $\phi$ and $\phi'$, respectively. We now define a parametrization homeomorphism from the standard surface $\Sigma_{t_1\circ t_2}$ to the bottom boundary of $M\circ M'$. This is not a straightforward task because the topological gluing of standard surfaces are not a standard surface. First let us write \[ g:=\phi'^{-1}_{B} \circ \phi'_L \circ \phi_{R}^{-1}\circ \phi_B: \partial_{R} \Sigma_{t_1} \to \partial_{L} \Sigma_{s_1}.\] Gluing via this homeomorphism we obtain the surface $\Sigma_{t_1}\cup_g \Sigma_{s_1}$. Define $\Phi=\Phi(\phi_B,\phi'_B): \Sigma_{t_1} \cup_g \Sigma_{s_1} \to \partial_{B} (M_1 \circ M_2)$ by \[\Phi(x)=\Phi(\phi_B,\phi'_B)(x)= \begin{cases} \phi_B(x) & \partial_{-}ox{ if } x \in \Sigma_{t_1} \\ \phi'_B(x) & \partial_{-}ox{ if } x\in \Sigma_{s_1} \end{cases} \] This is well-defined since $\partial_{B} (M_1 \circ M_2)=\partial_{B} M_1 \cup_{\phi'_L \circ \phi_R^{-1}} \partial_{B} M'$. Next, because the surface $\Sigma_{t_1}\cup_g \Sigma_{s_1}$ is not a standard surface, we define a homeomorphism $\Sigma_{t_1 \circ s_1} \to \Sigma_{t_1}\cup_g \Sigma_{s_1}$. This homeomorphism will depend on several choices. However, two different choices give homeomorphisms that differ only by an isotopy. The standard surfaces $\Sigma_{t_1}$ and $\Sigma_{s_1}$ are by definition sitting in $\mathbb{R}^3$. There is a translation map $\tau$ of $\mathbb{R}^3$ that maps $\partial_{R} \Sigma_{t_1}$ to $\partial_{L} \Sigma_{s_1}$. Now both $\Sigma_{t_1\circ s_1}$ and $\tau(\Sigma_{t_1})\cup \Sigma_{s_1}$ are in $\mathbb{R}^3$ and they are homeomorphic. We fix a homeomorphism $h(t_1, s_1):\Sigma_{t_1\circ s_1 }\to \tau(\Sigma_{t_1})\cup \Sigma_{s_1} $ as follows. Recall that the standard surface $\Sigma_t$ is obtained from the boundary of the standard handlebody $U_t$ in $\mathbb{R}^3$ with several disks removed. There is an ambient isotopy $F: \mathbb{R}^3 \times [0,1] \to \mathbb{R}^3$ of handlebodies $U_{t_1 \circ s_1}$ and $\tau(U_{t_1})\cup U_{s_1}$ which maps the ribbon graph in one to the other and when $F$ is restricted on $\partial_{L} \Sigma_{t_1 \circ s_1}\subset U_{t_1 \circ s_1}$ it is just a translation in the $x$-coordinate in $\mathbb{R}^3$ and when $F$ is restricted to $\partial_{R} \Sigma_{t_1 \circ s_1}\subset U_{t_1 \circ s_1}$, it is also just a translation but it might move different amount in $x$-direction. Then we define $h(t_1, s_1)$ to be the restriction of $F$ to $\Sigma_{t_1 \circ s_1}$. There is a canonical homeomorphism $f_{\tau}:\tau(\Sigma_{t_1}) \cup \Sigma_{s_1} \to \Sigma_{t}\cup_{\tau} \Sigma_{s_1} $. Here the latter space is obtained by regarding $\tau$ as a homeomorphism from $\partial_{R} \Sigma_{t_1}$ to $\partial_{L} \Sigma_{s_1}$ and gluing $\Sigma_{t_1}$ and $\Sigma_{s_1}$ along $\tau$. Finally we need to choose a homeomorphism $\Sigma_{t}\cup_{\tau} \Sigma_{s_1} \to \Sigma_{t}\cup_{g} \Sigma_{s_1} $, which seems to be the most arbitrary. First we have the following result which follows from Lemma in Appendix III of \cite{Turaev10}. \begin{lemma}\label{lem:Turaev Appendix III} Let $X=\partial_{R} \Sigma_t$ and let $f:X \to X$ be a homeomorphism preserving the orientation and the base points $\{x_i\}$ in each component of $X$. Then there exists a homeomorphism $\Psi: \Sigma_t \to \Sigma_t$ satisfying the following. \begin{enumerate} \item The restriction $\Psi|_{X}=f$. \item The homeomorphism $\Psi$ is the identity on $(\Sigma_t \setminus \mathrm{int}(U))\cup (\{x_i\}\times [0,1])$, where $U=X \times [0,1]$ is a cylindrical collar neighborhood of $X=X\times \{0\}$ in $\Sigma_t$. \item The homeomorphism $\Psi$ carries $X \times \{t\} \subset U$ into $X \times \{t\}$ for all $t \in [0,1]$. \end{enumerate} Any two such homeomorphisms $\Psi:\Sigma \to \Sigma$ are isotopic via an isotopy constant on $\partial \Sigma$. \end{lemma} Now from two homeomorphisms $\tau$ and $g$ from $\partial_{R} \Sigma_{t_1}$ to $\partial_{L} \Sigma_{s_1}$ we obtain the self homeomorphism $f=\tau^{-1}\circ g$ of $X=\partial_{R} \Sigma_{t_1}$. Lemma \ref{lem:Turaev Appendix III} yields a homeomorphism $\Psi(f):\Sigma_{t_1} \to \Sigma_{t_1}$ that extends $f=\tau^{-1}\circ g$ and any two such homeomorphisms are isotopic. We fix one such $\Psi(f)$. Thus $\Psi(f)$ induces an homeomorphism $\Psi(g, \tau):\Sigma_{t}\cup_{\tau} \Sigma_{s_1} \to \Sigma_{t}\cup_{g} \Sigma_{s_1} $ and different choice of $\Phi(f)$ induces an isotopic homeomorphism $\Psi(g, \tau)$. Then we define the parametrization homeomorphism $\phi_B\circ_h \phi'_B$ of $M \circ M'$ to be \begin{equation}\label{equ:horizontal parametrizations} \phi_B\circ_h \phi'_B:=\Phi(\phi_B, \phi'_B)\circ \Psi(g, \tau) \circ f_{\tau} \circ h(t_1, s_1): \Sigma_{t_1 \circ s_1} \to \partial_{B} (M \circ M'). \end{equation} Similarly we obtain the top parametrization $\phi_T\circ_h \phi'_T:\Sigma_{t_2\circ s_2}^{-} \to \partial_{T} (M\circ M')$. Now we obtained the pair $(M\circ M',\phi\circ_h\phi')$. After choosing the representatives $M$ and $M'$, there are several choices we made to define the parametrization. Namely, $h(t_1, s_1)$ and $\Psi(g, \tau)$ are defined up to isotopy fixing boundaries. But this ambiguity does not affect the equivalence class by virtue of Lemma \ref{lem:isotopy equivalence}. The next lemma shows that a different choice of representatives of a 2-morphism gives an equivalent pair. \begin{lemma} Suppose that $(M,\phi)$ is equivalent to $(N, \psi)$ and $(M', \phi')$ is equivalent to $(N',\psi')$ then $(M\circ M, \phi\circ_h \phi')$ is equivalent to $(N\circ N', \psi\circ_h \psi')$. \end{lemma} \begin{proof} By the definition of the equivalence, there are homeomorphisms $\alpha:N \to M$ and $\beta:N'\to M'$ such that $\phi= \alpha \circ \psi$ and $\phi'=\beta \circ \psi'$. They induce a homeomorphism $\alpha \cup \beta : N\circ N' \to M \circ M'$. This homeomorphism is well-defined since on the common boundary we have the following commutative diagram: \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1.5em] {$C(m)$ & & & $C(m)$ \\ & $N$ & $N'$ &\\ & $M$ & $M'$ &\\ $C(m)$ & & & $C(m)$\\}; \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$=$} (m-1-4); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$=$} (m-4-1); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$\psi_R$} (m-2-2); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\psi'_L \circ \psi^{-1}_R$} (m-2-3); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\beta$} (m-3-3); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\alpha$} (m-3-2); \path[->, font=\scriptsize] (m-3-2) edge node[auto] {$\phi'_L \circ \phi^{-1}_R$} (m-3-3); \path[->, font=\scriptsize] (m-1-4) edge node[auto] {$=$} (m-4-4); \path[->, font=\scriptsize] (m-4-1) edge node[auto] {$=$} (m-4-4); \path[->, font=\scriptsize] (m-4-1) edge node[auto] {$\phi_R$} (m-3-2); \path[->, font=\scriptsize] (m-4-4) edge node[auto] {$\phi_L'$} (m-3-3); \path[->, font=\scriptsize] (m-1-4) edge node[auto] {$\psi'_L$} (m-2-3); \end{tikzpicture} \end{center} We need to show that \begin{equation}\label{equ:horizontal parametrization commutes} \phi \circ_{\text{h}} \phi'=(\alpha \cup \beta)\circ (\psi \circ_{\text{h}} \psi'). \end{equation} The right and the left boundary parts just follow from the definition of $\alpha$ and $\beta$.. Let us check this equality on the bottom boundary. By the remark before the lemma, we can choose the parametrizations as follows. \[\phi_B\circ_h \phi'_B:=\Phi(\phi_B, \phi'_B)\circ \Psi(g, \tau) \circ f_{\tau} \circ h(t_1, s_1): \Sigma_{t_1 \circ s_1} \to \partial_{B} (M \circ M')\] and \[\psi_B\circ_h \psi'_B:=\Phi(\psi_B, \psi'_B)\circ \Psi(g', \tau) \circ f_{\tau} \circ h(t_1, s_1): \Sigma_{t_1 \circ s_1} \to \partial_{B} (N \circ N'),\] where \[ g:=\phi'^{-1}_{B} \circ \phi'_L \circ \phi_{R}^{-1}\circ \phi_B: \partial_{R} \Sigma_{t_1} \to \partial_{L} \Sigma_{s_1}\] and \[ g':=\psi'^{-1}_{B} \circ \psi'_L \circ \psi_{R}^{-1}\circ \psi_B: \partial_{R} \Sigma_{t_1} \to \partial_{L} \Sigma_{s_1}.\] Because we have homeomorphism commuting with parametrizations, we have the following commutative diagram and we have in fact $g=g'$ \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=2em] { & $M$ & & $M'$ & \\ $\Sigma_{t}$ & & $C(m)$& & $ \Sigma_{s_1}$ \\ & $N$ & & $N'$ & \\}; \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\phi_B$} (m-1-2); \path[->, font=\scriptsize] (m-2-1) edge node[left]{$\psi_B$} (m-3-2); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\phi_R$} (m-1-2); \path[->, font=\scriptsize] (m-2-3) edge node[below] {$\psi_R$} (m-3-2); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\phi'_L$} (m-1-4); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\psi'_L$} (m-3-4); \path[->, font=\scriptsize] (m-2-5) edge node[auto] {$\phi'_B$} (m-1-4); \path[->, font=\scriptsize] (m-2-5) edge node[auto] {$\psi'_B$} (m-3-4); \path[->, font=\scriptsize] (m-3-2) edge node[auto] {$\alpha$} (m-1-2); \path[->, font=\scriptsize] (m-3-4) edge node[auto] {$\beta$} (m-1-4); \end{tikzpicture} \end{center} We also have the following commutative diagram and the equality (\ref{equ:horizontal parametrization commutes}) holds on the bottom boundary. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=6em, row sep=2em] { & & $\partial_{B}(M\circ M')$ \\ $\Sigma_{t\circ t'}$ & $\Sigma_{t} \cup_{g} \Sigma_{t'}$ \\ & & $\partial_{B} (N \circ N')$ \\}; \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\Psi(g, \tau) f_{\tau}h(t_1, s_1)$} (m-2-2); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\Phi(\phi_B, \phi_{B'})$} (m-1-3); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\Phi(\psi_B, \psi_{B'})$} (m-3-3); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$\alpha \cup \beta $} (m-3-3); \end{tikzpicture} \end{center} Similarly for the top boundary. Hence we have $\phi \circ_{\text{h}} \phi'=(\alpha \cup \beta )\circ (\psi \circ_{\text{h}} \psi')$ and conclude that $(M\circ M, \phi\circ_h \phi')$ is equivalent to $(N\circ N', \psi\circ_h \psi')$. \end{proof} \subsubsection{Associativity for horizontal composition} Let $(M, \phi)$, $(M', \phi')$, and $(M'', \phi'')$ be representative of 2-morphisms of $\mathbb{C}o$ such that $(M, \phi)$ and $(M', \phi')$, $(M', \phi')$ and $(M'', \phi'')$ are horizontally composable. We show that horizontal composition of $Co$ is associative. It suffices to show that the map $(\phi\circ_{\text{h}} \phi') \circ_{\text{h}} \phi''$ is isotopic to $\phi \circ_{\text{h}} (\phi' \circ_{\text{h}} \phi'')$. We check this on the bottom part. Recall the definition of horizontal composition of parametrizations from (\ref{equ:horizontal parametrizations}). For the sake of simplicity, we use the letter $\tau$ for translations in $\mathbb{R}^3$ and we denote by $\bar h$ the composite of homeomorphism $f_{\tau}\circ h(t, s): \Sigma_{t\circ s} \to \Sigma_{t} \cup_{\tau} \Sigma_{s}$. Thus maps $\tau$ and $\bar h$ should be understood from the context. Let $g_1$ and $g_2$ be the homeomorphism defined by the following commutative diagram. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1em] { $\partial_{R} \Sigma_{t}$ & & $\partial_{L} \Sigma_{t'}, \partial_{R} \Sigma_{t'}$& & $ \partial_{L} \Sigma_{t''}$ \\ & $C(m)$ & & $C(n)$ & \\ $\partial_{B} M$ & & $\partial_{B} M'$& & $ \partial_{B} M''$ \\ }; \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$g_1$} (m-1-3); \path[->, font=\scriptsize] (m-1-3) edge node[auto]{$g_2$} (m-1-5); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$\phi_B$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\phi_R$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\phi'_L$} (m-3-3); \path[->, font=\scriptsize] (m-2-4) edge node[auto] {$\phi'_R$} (m-3-3); \path[->, font=\scriptsize] (m-2-4) edge node[auto] {$\phi''_L$} (m-3-5); \path[->, font=\scriptsize] (m-1-5) edge node[auto] {$\phi''_B$} (m-3-5); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$\phi'_B$} (m-3-3); \end{tikzpicture} \end{center} Thus $g_1$ and $g_2$ are gluing homeomorphism of standard surfaces induced by parametrizations. To calculate $(\phi\circ_{\text{h}} \phi') \circ_{\text{h}} \phi''$ and $\phi \circ_{\text{h}} (\phi' \circ_{\text{h}} \phi'')$ , we also need the following gluing homeomorphisms $g_3$ and $g_4$, respectively. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=7em, row sep=1em] { $\partial_{R} \Sigma_{t}$ & & $\partial_{L} \Sigma_{t'\circ t''}$\\ & $C(m)$ &\\ $\partial_{B} M$ & & $\partial_{B} (M'\circ M'') $ \\ }; \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$g_3$} (m-1-3); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$\phi_B$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$\phi_R$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$(\phi' \circ_{\text{h}} \phi'')_L=\phi'_L$} (m-3-3); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$(\phi' \circ_{\text{h}} \phi'')_B$} (m-3-3); \end{tikzpicture} \end{center} \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=7em, row sep=1em] { $\partial_{R} (\Sigma_{t\circ t'})$ & & $\partial_{L} \Sigma_{ t''}$\\ & $C(n)$ & \\ $\partial_{B} (M\circ M') $ & & $\partial_{B} M'' $ \\ }; \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$g_4$} (m-1-3); \path[->, font=\scriptsize] (m-1-1) edge node[auto] {$(\phi\circ_{\text{h}} \phi')_B$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$(\phi\circ_{\text{h}} \phi')_R=\phi_R$} (m-3-1); \path[->, font=\scriptsize] (m-2-2) edge node[auto] {$ \phi''_L$} (m-3-3); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$ \phi''_B$} (m-3-3); \end{tikzpicture} \end{center} Let $\Psi_i:=\Psi(g_i, \tau)$. Note that since $(\phi' \circ_{\text{h}} \phi'')_B |_{\partial_{L} (\Sigma_{t' \circ t''})}=\phi'_B \circ \Psi_2\circ \bar h |_{\partial_{L}(\Sigma_{t'\circ t''})}$, we have $\Psi_2 \circ \bar h \circ g_3=g_1$. Moreover $\Psi_2 \circ \bar h$ is identity on the boundary $\partial_{L} \Sigma_{t' \circ t''}$, thus in fact we have $g_1=g_3$, and hence $\Psi_1=\Psi_3$. Also since $(\phi \circ_{\text{h}} \phi')_B|_{\partial_{R} (\Sigma_{t \circ t'})}=\phi'_B \circ \Psi_1\circ \bar h |_{\partial_{R}(\Sigma_{t \circ t'})}$, we have $g_2\circ \Psi_1 \circ \bar h =g_4$. Deciphering the definition of maps we have the following diagram, where the left path is a parametrization $\phi_B \circ_{\text{h}} (\phi'_B \circ_{\text{h}} \phi''_B)$ and the right path is the parametrization $(\phi_B \circ_{\text{h}} \phi'_B)\circ_{\text{h}} \phi''_B$. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=2em, row sep=1em] { & $\Sigma_{t\circ t' \circ t''}$& \\ $\Sigma_t \cup_{\tau} \Sigma_{t' \circ t''}$ & & $\Sigma_{t \circ t'} \cup_{\tau} \Sigma_{t''}$ \\ $\Sigma_t \cup_{g_3} \Sigma_{t' \circ t''}$ & & $\Sigma_{t \circ t'} \cup_{g_4} \Sigma_{t''} $ \\ $\Sigma_t \cup_{\bar h g_3} (\Sigma_{t'} \cup_{\tau} \Sigma_{t''})$ & &$(\Sigma_{t} \cup_{\tau} \Sigma_{t'}) \cup_{g_4 \bar{h}^{-1}} \Sigma_{t''} $ \\ $\Sigma_t \cup_{\Psi_2 \bar h g_3} (\Sigma_{t'} \cup_{g_2} \Sigma_{t''})$ & & $(\Sigma_{t} \cup_{g_1} \Sigma_{t'}) \cup_{g_4 \bar{h}^{-1} \Psi^{-1}_1} \Sigma_{t''} $ \\ &$\partial_{B} (M\circ M' \circ M'')$ & \\ }; \path[->, font=\scriptsize] (m-1-2) edge node[above] {$\bar h$} (m-2-1); \path[->, font=\scriptsize] (m-1-2) edge node[auto]{$\bar h$} (m-2-3); \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\Psi_3$} (m-3-1); \path[->, font=\scriptsize] (m-3-1) edge node[auto]{$\bar h$} (m-4-1); \path[->, font=\scriptsize] (m-4-1) edge node[auto]{$\Psi_2$} (m-5-1); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\Psi_4$} (m-3-3); \path[->, font=\scriptsize] (m-3-3) edge node[auto]{$\bar h$} (m-4-3); \path[->, font=\scriptsize] (m-4-3) edge node[auto]{$\Psi_1$} (m-5-3); \path[->, font=\scriptsize] (m-5-1) edge node[auto] {$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \path[->, font=\scriptsize] (m-5-3) edge node[auto]{$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \end{tikzpicture} \end{center} On the left path, we have $\bar h \circ \Psi_3=\Psi_3\circ \bar h$ since $\Psi_3$ changes only $\Sigma_t$ part and $\bar h$ changes only the $\Sigma_{t'\circ t''}$ part. On the right path, $\Psi_4$ and $\bar h$ does not commute. This diagram can be also written as follows. Let $\tilde \Psi_4:=\bar h \Psi_4 \bar{h}^{-1}$. Then we have $\bar h \circ \Psi_4=\tilde \Psi_4 \circ \bar h$ and thus we have the following diagram. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=2em, row sep=1em] { & $\Sigma_{t\circ t' \circ t''}$& \\ $\Sigma_t \cup_{\tau} \Sigma_{t' \circ t''}$ & & $\Sigma_{t \circ t'} \cup_{\tau} \Sigma_{t''}$ \\ $\Sigma_t \cup_{\bar h \tau} (\Sigma_{t'} \cup_{\tau} \Sigma_{t''})$ & & $(\Sigma_{t} \cup_{\tau} \Sigma_{t'}) \cup_{\tau} \Sigma_{t''} $ \\ $\Sigma_t \cup_{\bar h g_3} (\Sigma_{t'} \cup_{\tau} \Sigma_{t''})$ & &$(\Sigma_{t} \cup_{\tau} \Sigma_{t'}) \cup_{g_4 \bar{h}^{-1}} \Sigma_{t''} $ \\ $\Sigma_t \cup_{\Psi_2 \bar h g_3} (\Sigma_{t'} \cup_{g_2} \Sigma_{t''})$ & & $(\Sigma_{t} \cup_{g_1} \Sigma_{t'}) \cup_{g_4 \bar{h}^{-1} \Psi^{-1}_1} \Sigma_{t''} $ \\ &$\partial_{B} (M\circ M' \circ M'')$ & \\ }; \path[->, font=\scriptsize] (m-1-2) edge node[above] {$\bar h$} (m-2-1); \path[->, font=\scriptsize] (m-1-2) edge node[auto]{$\bar h$} (m-2-3); \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\bar h$} (m-3-1); \path[->, font=\scriptsize] (m-3-1) edge node[auto]{$\Psi_3$} (m-4-1); \path[->, font=\scriptsize] (m-4-1) edge node[auto]{$\Psi_2$} (m-5-1); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\bar h$} (m-3-3); \path[->, font=\scriptsize] (m-3-3) edge node[auto]{$\tilde \Psi_4$} (m-4-3); \path[->, font=\scriptsize] (m-4-3) edge node[auto]{$\Psi_1$} (m-5-3); \path[->, font=\scriptsize] (m-5-1) edge node[auto] {$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \path[->, font=\scriptsize] (m-5-3) edge node[auto]{$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \end{tikzpicture} \end{center} Investigating the gluing maps, we obtain the following diagram. \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=2em, row sep=1em] { & $\Sigma_{t\circ t' \circ t''}$& \\ $\Sigma_t \cup_{\tau} \Sigma_{t' \circ t''}$ & & $\Sigma_{t \circ t'} \cup_{\tau} \Sigma_{t''}$ \\ $\Sigma_t \cup_{ \tau} (\Sigma_{t'} \cup_{\tau} \Sigma_{t''})$ & & $(\Sigma_{t} \cup_{\tau} \Sigma_{t'}) \cup_{\tau} \Sigma_{t''} $ \\ $\Sigma_t \cup_{\bar h g_3} (\Sigma_{t'} \cup_{\tau} \Sigma_{t''})$ & &$(\Sigma_{t} \cup_{\tau} \Sigma_{t'}) \cup_{g_4 \bar{h}^{-1}} \Sigma_{t''} $ \\ $\Sigma_t \cup_{g_1} (\Sigma_{t'} \cup_{g_2} \Sigma_{t''})$ & & $(\Sigma_{t} \cup_{g_1} \Sigma_{t'}) \cup_{g_2} \Sigma_{t''} $ \\ &$\partial_{B} (M\circ M' \circ M'')$ & \\ }; \path[->, font=\scriptsize] (m-1-2) edge node[above] {$\bar h$} (m-2-1); \path[->, font=\scriptsize] (m-1-2) edge node[auto]{$\bar h$} (m-2-3); \path[->, font=\scriptsize] (m-3-1) edge node[above] {$=$} (m-3-3); \path[->, font=\scriptsize] (m-2-1) edge node[auto] {$\bar h$} (m-3-1); \path[->, font=\scriptsize] (m-3-1) edge node[auto]{$\Psi_3$} (m-4-1); \path[->, font=\scriptsize] (m-4-1) edge node[auto]{$\Psi_2$} (m-5-1); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\bar h$} (m-3-3); \path[->, font=\scriptsize] (m-3-3) edge node[auto]{$\tilde \Psi_4$} (m-4-3); \path[->, font=\scriptsize] (m-4-3) edge node[auto]{$\Psi_1$} (m-5-3); \path[->, font=\scriptsize] (m-5-1) edge node[auto] {$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \path[->, font=\scriptsize] (m-5-3) edge node[auto]{$\phi_B \cup \phi'_B \cup \phi''_B$} (m-6-2); \end{tikzpicture} \end{center} Here the top pentagon is commutative up to isotopy: this is again similar to the proof of associativity of the fundamental group. The bottom heptagon is also commutative up to isotopy. To see this, first note that $\tilde \Psi_4$ is identity on $\Sigma_t \subset \Sigma_t \cup_{\tau} \Sigma_{t'}$ since $\Psi_4$ is identity outside of a collar neighborhood of $\partial_{R} \Sigma_{t \circ t'} \subset \bar {h}^{-1} (\Sigma_{t'})$. Thus the restriction of $\bar \Psi_4$ on $\Sigma_{t'}$ part is isotopic to $\Psi_2$ by Lemma \ref{lem:Turaev Appendix III} since on the boundary $\partial_{R} \Sigma_{t'}$ they agree. Since $\Psi_1$ and $\Psi_2$ commute, the heptagon is commutative up to isotopy. By Lemma \ref{lem:isotopy equivalence} the associativity on the level of classses holds. \subsubsection{Units for horizontal composition} As in the case of vertical composition, we use formal units for horizontal composition. This means that for each object $\nstand{n}$ of $\mathbb{C}o$, we add the formal identity object $\mathrm{id}_n$ to $\mathbb{C}o(\nstand{n}, \nstand{n})$ and we also add the formal identity 2-morphism $\mathrm{id}_{\mathrm{id}_n}$ between the object $\mathrm{id}_n$. These formal identities act as identity for the horizontal composition. \subsubsection{Interchange law} We check the interchange law. For four 2-morphisms \begin{align*} &[(M_1, \phi_1)]: \Sigma_{t_1} \mathbb{R}ightarrow \Sigma_{t_2}: \nstand{l} \to \nstand{m}, \qquad &[(M_2, \phi_2)]: \Sigma_{t_2} \mathbb{R}ightarrow \Sigma_{t_3}: \nstand{l} \to \nstand{m},\\ &[(M_1', \psi_1)]: \Sigma_{s_1} \mathbb{R}ightarrow \Sigma_{s_2}: \nstand{m} \to \nstand{n}, \qquad &[(M_2', \psi_2)]: \Sigma_{s_2} \mathbb{R}ightarrow \Sigma_{s_3}: \nstand{m} \to \nstand{n}, \end{align*} \begin{center} \begin{tikzcd} \nstand{l} \arrow[bend left=50]{r}[name=U,below]{}{\Sigma_{t_1}} \arrow{r}[name=M,below]{}{\Sigma_{t_2}} \arrow[bend right=50]{r}[name=D]{}{\Sigma_{t_3}} &\nstand{m} \arrow[bend left=50]{r}[name=U',below]{}{\Sigma_{s_1}} \arrow{r}[name=M',below]{}{\Sigma_{s_2}} \arrow[bend right=50]{r}[name=D']{}{\Sigma_{s_3}} & \nstand{n} \end{tikzcd} \end{center} the interchange law says that the following equality holds; \[ ([M_1]\circ[M_1'])\cdot ([M_2] \circ [M_2'])=( [M_1]\cdot [M_2]) \circ ([M_1'] \cdot [M_2']) \] as a 2-morphism $\Sigma_{t_1\circ s_1}\mathbb{R}ightarrow \Sigma_{t_3\circ s_3}: \nstand{l}\to \nstand{n}$. As a manifold both sides are the same. Hence we only need to check whether \[(\phi_1\circ_{\text{h}} \psi_1)\cdot_{\text{v}} (\phi_2 \circ_{\text{h}}\psi_2) =(\phi_1 \cdot_{\text{v}} \phi_2)\circ_{\text{h}} (\psi_1\cdot_{\text{v}} \psi_2). \] This equality is true because that horizontal composition does not change the parametrizations on the side boundaries and vertical composition does not change the parametrizations on the top and the bottom boundaries. \section{A bicategory of the Kapranov-Voevodsky 2-vector spaces}\label{sec:A 2-category of the Kapranov-Voevodsky 2-vector spaces} The target bicategory of our extended TQFT will be the Kapranov-Voevodsky (KV) 2-vector spaces. The reason that we chose the KV 2-vector spaces as a target algebraic bicategory is that it is a natural extension of the usual category of vector spaces and the calculations are very explicit. We recall the relevant definitions. \begin{Definition} \begin{enumerate} Let $K$ be a commutative ring. \item A \textit{2-matrix} is an $m\times n$ matrix such that the $(i,j)$-component is a projective module over $K$. \item A \textit{2-homomorphism} from an $m\times n$ 2-matrix $V$ to an $m \times n$ 2-matrix $W$ is an $m\times n$ matrix of $K$-homomorphisms. In other words, the $(i, j)$-components of a matrix of homomorphisms is a $K$-homomorphism from $V_{ij}$ to $W_{ij}$. \item Two 2-matrices $V$ and $W$ are said to be \textit{isomorphic} if there is a 2-homomorphism $T$ from $V$ to $W$ such that each entry of $T$ is an isomorphism. \end{enumerate} \end{Definition} \begin{Definition} The \textit{Kapranov-Voevodsky 2-vector spaces}, $2\-\mathrm{Vect}$, consists of the followings. \begin{enumerate} \item The \textit{objects} of $2\-\mathrm{Vect}$ are symbols $\{n\}$ for non-negative integers $n$. \item A \textit{1-morphism} from $\{m\}$ to $\{n\}$ is an $(m\times n)$ 2-matrix $V$. We denote a 1-morphism from $\{m\}$ to $\{n\}$ by $V: \{m\}\to \{n\}$. \item A \textit{2-morphism} from a 1-morphism $V:\{m\}\to \{n\}$ to a 1-morphism $W:\{m\}\to \{n\}$ is a 2-homomorphism from $V$ to $W$. \end{enumerate} Usual matrix calculations extend to this setting if we replace a multiplication by $\otimes$ and an addition by $\oplus$. Horizontal composition is given by matrix multiplication and vertical composition is given by the composition of each entries. With these composition operations, the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$ is indeed a bicategory. (For details, see \cite{KV1994} on page 226.) \end{Definition} \section{Review and Modification of the Reshetikhin-Turaev TQFT}\label{sec:Review and Modification of the Reshetikhin-Turaev TQFT} Our construction of a projective pseudo 2-functor from the 2-category $\mathbb{C}o$ to the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$ requires the original Reshetikhin-Turaev theory. In this section we review some of the relevant part of the RT theory. \subsection{Operator Invariant}\label{subsec:operator invariant} One of the key ingredient to construct the RT TQFT is so called the ``operator invariant''. Let $\mathcal{V}$ be a modular category. (More generally, the operator invariant exists for a strict ribbon category $\mathcal{V}$.) Define the category $\mathbb{R}ib$ of the ribbon graphs over $\mathcal{V}$ as follows. The objects of $\mathbb{R}ib$ are finite sequences of the form $((V_1, \epsilon_1), \dots, (V_m, \epsilon_m))$, where $V_i$ is an object of the modular category $\mathcal{V}$ and $\epsilon_i$ is either $\pm1$ for $i=1, \dots, m$. A morphism $\eta \to \eta'$ in $\mathbb{R}ib$ is an isotopy type of a $v$-colored ribbon graph over $\mathcal{V}$ such that $\eta$ (resp. $\eta'$) is the sequence of colors and directions of those bands which hit the bottom (resp. top) boundary intervals. The downward direction near the corresponding boundary corresponds $\epsilon=1$, and $\epsilon=-1$ corresponds to the band directed up. For example, the ribbon graph drawn in Figure \ref{fig:ribbon graph} represents a morphism from $((V_1, -1), (V_2,1), (V_3, 1), (U, 1))$ to $((V_2, -1), (V_1, -1), (V_3, 1), (V, 1))$. \begin{figure} \caption{$v$-colored Ribbon graph} \label{fig:ribbon graph} \end{figure} The composition of morphisms of $\mathbb{R}ib$ is given by concatenation of ribbon graphs. The juxtaposition of ribbon graphs provides $\mathbb{R}ib$ with the structure of a monoidal category. Then it is a fact that there is a unique monoidal functor $F=F_{\mathcal{V}}: \mathbb{R}ib \to \mathcal{V}$ satisfying the following conditions: \begin{enumerate} \item $F$ transforms any object $(V, +1)$ into $V$ and any object $(V, -1)$ into $V^*$ \item $F$ maps a crossing ribbon graph to a braiding, a twist ribbon graph to a twist, a cup like band and a cap like band to a corresponding duality map in $V$. (The X-shape ribbon in Figure \ref{fig:ribbon graph} is one of crossing ribbons and the once-curled ribbon in the middle is one of twist ribbons. The others obtained by changing directions and crossings.) \item For each elementary $v$-colored ribbon graph $\Gamma$, we have $F(\Gamma)=f$, where $f$ is the color of the only coupon of $\Gamma$. (An example of elementary $v$-colored ribbon graph is the ribbon with one coupon colored by $f$ on the right in Figure \ref{fig:ribbon graph}. In general there may be multiple vertical bands attached to one coupon.) \end{enumerate} (For details, see Theorem I.2.5 in \cite{Turaev10}.) The morphism $F(\Omega)$ associated to a $v$-colored ribbon graph $\Omega$ is called the \textit{operator invariant} of $\Omega$. \subsection{Modular categories} A modular category is an input for the RT TQFT. The definition of a modular category is a ribbon category with a finite many simple objects satisfying several axioms. (See \cite{Turaev10} for a full definition and refer to \cite{MR1321145} for a ribbon category.) The objects of a modular category are used as decorations of surfaces as we saw in Section \ref{sec:A 2-category of cobordisms with corners} . Here we set several notations and state an important lemma. Let $\mathcal{V}$ be a modular category with a finite set $\{V_i\}_{i\in I}$ of simple objects, where $I$ is a finite index set $I=\{1,2, \dots, \mathbf{k}\}$. We assume that $V_1$ is the unit object $\mathbb{1}$ of $\mathcal{V}$. The ring $K:=\mathrm{Hom}(\mathbb{1}, \mathbb{1})$ is called the \textit{ground ring}. The ground ring $K$ is known to be commutative. We assume that $\mathcal{V}$ has an element $\mathcal{D}$ called a \textit{rank} of $\mathcal{V}$ given by the formula \begin{equation*}\label{equ:rank} \mathcal{D}^2= \sum_{i\in I} \left( \dim(V_i) \right)^2. \end{equation*} (This assumption is not essential.) Besides the rank $\mathcal{D}$, we need another element $\mathcal{D}elta$ defined as follows. The modular category has a twist morphism $\theta_{V}: V \to V$ for each object $V$ of $\mathcal{V}$. Since $V_i$ is a simple object, the twist $\theta_{V_i}$ acts in $V_i$ as multiplication by a certain $v_i \in K$. Since the twist acts via isomorphism, the element $v_i$ is invertible in $K$. We set \begin{equation*}\label{equ:Delta} \mathcal{D}elta=\sum_{i \in I} v_i^{-1} \left( \dim(V_i) \right)^2 \in K. \end{equation*} The elements $\mathcal{D}$ and $\mathcal{D}elta$ are known to be invertible in $K$. The following lemma is very important. \begin{lemma}\label{lem:sum over simple 2} For any objects $V, W$ of the modular category $\mathcal{V}$, there is a canonical $K$-linear splitting \begin{equation*} \mathrm{Hom}(\mathbb{1}, V \otimes W)= \bigoplus_{i \in I} \left( \mathrm{Hom}(\mathbb{1}, V\otimes V_{i}^* )\otimes_K \mathrm{Hom}(\mathbb{1}, V_{i} \otimes W) \right) \end{equation*} The isomorphism $u$ transforming the right-hand side into the left-hand side is given by the formula \begin{equation}\label{equ: cap isom} u_i: x \otimes y \mapsto (\mathrm{id}_V\otimes d_{V_i} \otimes \mathrm{id}_W) (x \otimes y), \end{equation} where $x \in \mathrm{Hom}(\mathbb{1}, V\otimes V^*_{i})$, $y\in \mathrm{Hom}(\mathbb{1}, V_{i} \otimes W)$. The map (\ref{equ: cap isom}) is given graphically as in Figure \ref{fig:the map u_i}. \end{lemma} For a proof, see Lemma I\hspace{-.1em}V.2.2.2 in \cite{Turaev10}. \begin{figure} \caption{The map $u_i$} \label{fig:the map u_i} \end{figure} \subsection{Invariants of 3-manifolds with ribbon graphs} We review an invariant of closed 3-manifolds with ribbon graphs sitting inside manifolds. When we will construct the RT TQFT below, a cobordism will be turned into a closed 3-manifold and we apply this invariant. Let $M$ be a closed connected oriented 3-manifold. Let $\Omega$ be a $v$-colored ribbon graph over $\mathcal{V}$ in $M$. Present $M$ as the result of surgery on $S^3$ along a framed link $L$ with components $L_1, \dots, L_m$. Fix an arbitrary orientation of $L$. This choice can be shown to be irrelevant. We may assume that $\Omega \subset S^3 \setminus U$, where $U$ is a closed regular neighborhood of $L$ in $S^3$ by applying an isotopy to $\Omega$ if necessary. Denote by $\mathrm{col}(L)$ the set of all mappings from the set of components of $L$ into the index set $I$. For each $\lambda\in \mathrm{col}(L)$, the pair $(L, \lambda)$ determines a colored ribbon graph $\Gamma(L, \lambda)$ formed by $m$ annuli. The cores of these annuli are the oriented circles $L_1, \dots, L_m$, the normal vector field on the cores transversal to the annuli represents the given framing. The color of the $i$-th annuli is $V_{\lambda(L_i)}$. Since the union $\Gamma(L, \lambda)\cup \Omega$ is a $v$-colored ribbon graph and it has no free ends, the operator invariant $F(\Gamma(L, \lambda) \cup \Omega) \in K=\mathrm{End}(\mathbb{1})$. Set \[ \{L, \Omega\}=\sum_{ \lambda \in \mathrm{col}(L)}\dim(\lambda)F(\Gamma(L, \lambda) \cup \Omega) \in K=\mathrm{End}(\mathbb{1}), \] where \[\dim(\lambda)=\prod_{n=1}^m \dim\left( \lambda(L_n) \right) \partial_{-}ox{ with } \dim(i):=\dim(V_i).\] Set \begin{equation}\label{equ:invariant tau} \tau(M, \Omega)=\mathcal{D}elta^{\sigma(L)} \mathcal{D}^{-\sigma(L)-m-1} \{L, \Omega\}. \end{equation} Here $\sigma(L)$ is the signature of the surgery link $L$. It is an important fact that $\tau(M, \Omega)$ is a topological invariant of the pair $(M, \Omega)$. The invariants $\tau$ extends to $v$-colored ribbon graphs in any non-connected closed oriented 3-manifold $M$ by the formula \[\tau(M, \Omega)=\prod_{r} \tau(M_r, \Omega_r),\] where $M_r$ runs over the connected components of $M$ and $\Omega_r$ denotes the part of $\Omega$ lying in $M_r$. The invariant $\tau(M, \Omega)$ satisfies the following multiplicativity law: \begin{equation}\label{equ:tau multiplicative} \tau(M_1 \# M_2, \Omega_1 \sqcup \Omega_2)=\mathcal{D} \tau(M_1, \Omega_1)\tau(M_2, \Omega_2), \end{equation} where $\Omega_1$ and $\Omega_2$ are $v$-colored ribbon graphs in closed connected oriented 3-manifolds $M_1$ and $M_2$, respectively. This can be seen as follows. Let $L$ and $L'$ be surgery links for $M_1$ and $M_2$, respectively. Then the ribbon $L\cup \Omega_1$ and $L'\cup \Omega_2$ sitting inside the same $S^3$ separately is a pair of a surgery link for $M_1 \# M_2$ and $\Omega_1\sqcup \Omega_2$. Then the formula (\ref{equ:tau multiplicative}) follows from the direct calculation using the defining formula (\ref{equ:invariant tau}). \subsection{Construction of the (non-extended) Reshetikhin-Turaev TQFT} Now we review the construction of the original RT TQFT for closed surfaces and cobordisms without corners. Instead of going over the original construction, we modify it so that it adapts in our setting. Let us fix a modular category $\mathcal{V}$. As we noted in Section \ref{subsec:Remark}, the original theory concerns only decorated types of the form \[t=(0, 0; (W_1, \nu_1), \dots, (W_m, \nu_m), 1,1,\dots, 1),\] where $W_i$ is an object of a modular category and $\nu_i$ is either $1$ or $-1$ for $i=1, \dots, m$. We can modify the theory using more general decorated types \begin{equation}\label{equ:type 0 0} t=(0,0; a_1, \dots, a_p), \end{equation} where $a_i$ is non-negative integer or the pair of an object of the modular category $\mathcal{V}$ and a sign $\pm 1$. Note that the first two entries should be zero since these numbers encode the number of boundary components of surfaces, which is zero for the original RT theory. In the following, we review the RT theory replacing the original decorated types with types of the form in (\ref{equ:type 0 0}). Let $t=(0,0; a_1, \dots, a_p)$ and $s=(0,0; b_1, \dots, b_q)$ be decorated types whose first two entries are zero. Let $[(M, \phi)]: \Sigma_t \mathbb{R}ightarrow \Sigma_s:{_* \emptyset} \to \emptyset_*$ be a 2-morphism of the 2-category $\mathbb{C}o$. Note that since the first two entries of the types $t, s$ are zero, we have $\Sigma(\phi)=\Sigma_t \sqcup \Sigma_s^-$. The images $\phi(\Sigma_t)$ and $\phi(\Sigma_s^-)$ are denoted by $\partial_{B} M$ and $\partial_{T} M$, respectively as before. The Reshetikhin-Turaev TQFT is a pair of assignments $(\tau, \mathcal{T})$ which will be constructed below so that $\tau(M)$ is a $K$-homomorphism from a projective module $\mathcal{T}(\partial_{B} M)$ to a projective module $\mathcal{T}(\partial_{T} M)$. \subsubsection{Definition of the projective module $\mathcal{T}(S)$} For a decorated surface $S$ of type $t=(0,0; a_1, \dots, a_p)$, the projective module $\mathcal{T}(S)$ is defined as follows. In a non-precise but instructive way, we think that $\mathcal{T}(S)$ is a projective module of all possible colors for the coupon of the ribbon graph $R_t$ in Figure \ref{fig:Rtnew}. To make it precise, we set up several notations. First, we define an object $H_i^a$ of the modular category $\mathcal{V}$ as follows. Here $a$ is either a positive integer or $a=(W, \nu)$ is a signed object of the modular category $\mathcal{V}$. For a positive integer $a$ and $i=(i_1,\dots, i_a)\in I^a$, we set \begin{equation}\label{equ:H_i^a} H^a_i=V_{i_1}\otimes V_{i_2}\otimes \cdots \otimes V_{i_a} \otimes V^*_{i_a}\otimes \cdots \otimes V_{i_2}^* \otimes V^*_{i_1}. \end{equation} If $a=(W, \nu)$ is a signed object of $\mathcal{V}$, we set $I^a$ to be a set of only one element and set $H^a_i=W^{\nu}$ for $i\in I^a=\{i\}$. Here we used the letter $i$ for the unique element of $I^a$ to streamline notations. Note that the tensor product in (\ref{equ:H_i^a}) can be used as a color for rainbow like bands in $R_t$ corresponding to an integer entry $a$. For a type $t=(m, n; a_1, a_2, \dots, a_p)$, we write \begin{equation}\label{equ:I^t} I^t:=I^{a_1} \times \cdots \times I^{a_p}. \end{equation} For $\zeta=(\zeta_1, \dots, \zeta_p) \in I^t$ with $\zeta_1=(\zeta_1^1, \dots, \zeta_1^{a_1}), \dots, \zeta_p=(\zeta_p^1, \dots, \zeta_p^{a_p})$, we set \begin{equation}\label{equ:Phi.t.zeta} \Phi(t; \zeta)= H^{a_1}_{\zeta_1} \otimes H^{a_2}_{\zeta_2} \otimes \cdots \otimes H^{a_p}_{\zeta_p}. \end{equation} Note that each choice of $\zeta=(\zeta_1, \dots, \zeta_p) \in I^t$ determines the color of ribbon graph $R_t$ except for the coupon. The coupon can be colored by a morphism from the monoidal unit $\mathbb{1}$ to the element $\Phi(t; \zeta)$. Thus all the possible colors of the coupon of $R_t$ varying $\zeta \in I^t$ is \begin{equation}\label{equ:T(S)} \mathcal{T}(S):=\bigoplus_{\zeta \in I^t} \mathrm{Hom} \big(\mathbb{1}, \Phi(t; \zeta) \big) \end{equation} and we define it to be $\mathcal{T}(S)$. Since $\Phi(t;\zeta)$ is an object of the modular category $\mathcal{V}$, $\mathcal{T}(S)$ is a projective module over the grand ring $K=\mathrm{Hom}(\mathbb{1}, \mathbb{1})$. \subsubsection{Definition of $K$-homomorphism $\tau(M)$}\label{subsec:tau M} Let $[(M, \phi)]: \Sigma_t \mathbb{R}ightarrow \Sigma_s: {_* \emptyset} \to \emptyset_*$ be a 2-morphism of the 2-category $\mathbb{C}o$ and fix a representative $(M, \phi)$. We explain the construction of the corresponding $K$-homomorphism $\tau(M)$ from $\mathcal{T}(\partial_{B} M)$ to $\mathcal{T}(\partial_{T} M)$. Glue the standard handlebody $U_t$ and $U_s^-$ to $M$ along the parametrization $\phi$. The resulting manifold $\tilde{M}$ is a closed 3-manifold with ribbon graph $\tilde{\Omega}$ sitting inside $\tilde{M}$. The ribbon graph $\tilde{\Omega}$ is obtained by gluing the ribbon graph in $M$ and the ribbon graph $R_t$ and $-R_s$ sitting inside the standard handlebodies $U_t$ and $U_s^-$. The ribbon graph $\tilde{\Omega}$ is not $v$-colored since the cap-like rainbow bands and the cup-like rainbow bands and the coupons of $R_t$ and $-R_s$ in the newly glued handlebodies are not colored. By its definition, each element of the module $\mathcal{T}(\partial_{-} M)$ determines a color of $R_t$ and each element of $\mathcal{T}(\partial_{T} M)^*$ determines a color of $-R_s$. For such a choice of color $y$ of $\tilde{\Omega}$ we obtain a $v$-coloring of $\tilde{\Omega}$. Applying the invariant $\tau$ of $v$-colored ribbon graph in a closed 3-manifold defined in (\ref{equ:invariant tau}), we obtain a certain element $\tau(\tilde{M}, \tilde{\Omega}, y)\in K$. This induces a $K$-homomorphism $\mathcal{T}(\partial_{B} M) \otimes_K \mathcal{T}(\partial_{T} M)^* \to K$. Taking adjoints, we get a $K$-homomorphism \begin{equation}\label{equ:after adjoint} T(\partial_{B} M) \to \mathcal{T}(\partial_{T} M). \end{equation} To finish the construction of $\tau(M):T(\partial_{B} M) \to \mathcal{T}(\partial_{T} M)$, we compose the above $K$-homomorphism with an endomorphism $\eta$ defined as follows. Let $S$ be a connected parametrized $d$-surface of type $t=(0,0; a_1, \dots, a_p)$. The endomorphism $\eta(S): \mathcal{T}(S) \to T(S)$ preserves the splitting (\ref{equ:T(S)}) and acts in each summand $\mathrm{Hom}(\mathbb{1}, \Phi(t;\zeta))$ as multiplication by $\mathcal{D}^{1-g}\dim(\zeta)$, where $g$ is the sum of integer entries of the type $t$ and \[\dim(\zeta):=\prod_{i=1}^p\dim(\zeta_i) \] with \[\dim(\zeta_i):= \begin{cases} \prod_{l=1}^{a_i} \dim(\zeta_i^l) & \partial_{-}ox{ if } a_i \in \mathbb{Z} \\ 1 & \partial_{-}ox{ if } a_i \partial_{-}ox{ is a mark.} \end{cases} \] Recall that $\dim(\zeta_i^l)$ denotes the dimension of the simple object $V_{\zeta_i^l}$. Now we complete the construction of $\tau(M):T(\partial_{B} M) \to \mathcal{T}(\partial_{T} M)$ by composing the $K$-homomorphism (\ref{equ:after adjoint}) with $\eta(\partial_{T} M): \mathcal{T}(\partial_{T} M) \to \mathcal{T}(\partial_{T} M)$. The pair $(\tau, \mathcal{T})$ is the \textit{Reshetikhin-Turaev TQFT}. In general, this is not a functor because it has \textit{gluing anomaly}. \subsubsection{Explicit Formula for the homomorphism $\tau(M)$}\label{subsec:explicit formula for tau(M)} We will develop a technique of presentation of a decorated connected 3-cobordism $M$ by a certain ribbon graph in $\mathbb{R}^3$ and give the explicit formula to calculate the homomorphism $\tau(M)$ using this ribbon graph. First as we are in the closed case, let $t=(0,0; a_1, \dots, a_p)$ and $s=(0,0; b_1, \dots, b_q)$ be decorated types whose first two entries are zero. Let $[(M, \phi)]: \Sigma_t \mathbb{R}ightarrow \Sigma_s:{_* \emptyset} \to \emptyset_*$ be a 2-morphism of the 2-category $\mathbb{C}o$ and fix a representative $(M, \phi)$. We assume that the cobordism $M$ is connected. As above, we glue the standard handlebodies $U_t$ and $U_s^-$ using the parametrization $\phi$ to $M$. We obtain the closed connected 3-manifold $\tilde{M}$ and the partially colored ribbon graph $\tilde{\Omega}$ sitting inside $\tilde{M}$. Present $\tilde{M}$ as the result of surgery on a framed link $L$ in $S^3$. Namely, we have a homeomorphism from $M_L$ to $\tilde{M}$, where $M_L$ is the resulting 3-manifold of surgery along $L$. Let $H=U_t \cup U_s^- \subset S^3$. We may think that $H$ is a subset of $S^3 \setminus T(L)\subset M_L$, where $T(L)$ is a closed tubular neighborhood of the link $L$. Restricting this homeomorphism, we see that the pair $(M, \phi)$ is equivalent to $(M_L, \mathrm{id})$. Thus we may assume that $\tilde{\Omega}$ is the union of $R_t, -R_s$ and a surgery link and a ribbon graph in $M$ in $\mathbb{R}^2 \times [0, 1] \subset \mathbb{R}^3 \subset S^3$. Of course, the surgery link might be tangled with $R_t$ and $-R_s$. By isotopy, we pull $R_t$ down so that the top of the coupon of $R_t$ lies in $\mathbb{R} \times \{0\}\times \{0\}$. Also we move $-R_s$ up so that the bottom of the coupon $-R_s$ lies in $\mathbb{R} \times \{0\} \times \{1\}$ and move the rest of the ribbon in $\mathbb{R}^2\times (0, 1)$. See Figure \ref{fig:special ribbon graph}. \begin{figure} \caption{Special ribbon graph} \label{fig:special ribbon graph} \end{figure} Let $\Omega_M$ be a ribbon graph obtained by removing the coupons of $R_t$ and $-R_s$ from $\tilde{\Omega}$. We call $\Omega_M$ \textit{special ribbon graph} for $M$. We now give an explicit formula for computing the homomorphism $\tau(M): \mathcal{T}(\partial_{B} M) \to \mathcal{T}(\partial_{T} M)$ from the operator invariants of the special ribbon graph $\Omega_M$ (after coloring $\Omega_M$). With respect to the splittings (\ref{equ:T(S)}) of $\mathcal{T}(\partial_{B} M)$ and $\mathcal{T}(\partial_{T} M)$, the homomorphism $\tau(M)$ may be presented by a block matrix $\tau_{\zeta}^{\eta}$, where \[\zeta=(\zeta_1, \dots, \zeta_p) \in I^t \partial_{-}ox{ with } \zeta_1=(\zeta_1^1, \dots, \zeta_1^{a_1}) \in I^{a_1}, \dots, \zeta_p=(\zeta_p^1, \dots, \zeta_p^{a_p})\in I^{a_p}\] and \[\eta=(\eta_1, \dots, \eta_q) \in I^s \partial_{-}ox{ with } \eta_1=(\eta_1^1, \dots, \eta_1^{b_1}) \in I^{b_1}, \dots, \eta_q=(\eta_q^1, \dots, \eta_q^{b_q})\in I^{b_q}.\] Each such $\zeta\in I^t$ determines a coloring of the cap-like rainbow bands of $R_t$ in $\Omega_M$. Similarly each such $\eta \in I^s$ determines a coloring of the cup-like rainbow bands of $-R_s$ in $\Omega_M$. Therefore a pair $(\zeta, \eta)\in I^t \times I^s$ determines a coloring of uncolored bands of $\Omega_M$. Note that the surgery link $L$ in $\Omega_M$ is not colored. (More precisely, the ribbon graph obtained by thickening the surgery link along framing.) Every element $\lambda \in \mathrm{col}(L)$ determines coloring of the surgery ribbon. Thus every element $(\zeta, \eta, \lambda)\in I^t \times I^s \times \mathrm{col}(L)$ determines a $v$-coloring of $\Omega_M$. Denote the resulting $v$-colored ribbon graph in $R^3$ by $(\Omega_M, \zeta, \eta, \lambda)$. Consider its operator invariant $F(\Omega_M, \zeta, \eta, \lambda): \Phi(t; \zeta)\to \Phi(s; \eta)$ defined in Section \ref{subsec:operator invariant}. The composition of a morphism $\mathbb{1} \to \Phi(t; \zeta)$ with $F(\Omega_M, \zeta, \eta, \lambda)$ defines a $K$-linear homomorphism $\mathrm{Hom}(\mathbb{1}, \Phi(t; \zeta))\to \mathrm{Hom}(\mathbb{1}, \Phi(s;\eta))$ denoted by $F_0(\Omega_M, \zeta, \eta, \lambda)$. It follows from the very definition of $\tau(M)$ given in Section \ref{subsec:tau M} that \begin{equation}\label{equ:tau zeta eta} \tau_{\zeta}^{\eta}= \mathcal{D}elta^{\sigma(L)} \mathcal{D}^{-g^+ -\sigma(L) - m} \dim(\eta) \sum_{\lambda \in \mathrm{col}(L)} \dim(\lambda) \mathcal{F}_0(\Omega_M, \zeta, \eta, \lambda), \end{equation} where $g^+$ is the sum of the integer entries of $s$ and \[\dim(\eta):=\prod_{i=1}^q\dim(\eta_i) \] with \[\dim(\eta_i):= \begin{cases} \prod_{l=1}^{b_i} \dim(\eta_i^l) & \partial_{-}ox{ if } b_i \in \mathbb{Z} \\ 1 & \partial_{-}ox{ if } b_i \partial_{-}ox{ is a mark.} \end{cases} \] \section{An extended TQFT $\mathcal{X}$}\label{sec:An extended TQFT} Now we proceed to construct a projective pseudo 2-functor $\mathcal{X}$ from the 2-category $\mathbb{C}o$ of decorated cobordisms with corners to the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$ that will be our extension of the Reshetikhin-Turaev TQFT functor. For our convention of the language of 2-category, see Appendix. As in the Reshetikhin-Turaev theory, we fix a modular category $\mathcal{V}$ with $\mathbf{k}$ simple objects $V_1, \dots, V_{\mathbf{k}}$. Let $I$ be the index set of simple objects, hence its cardinality is $|I|=\mathbf{k}$. Let $K$ denote the ground ring $\mathrm{Hom}(\mathbb{1},\mathbb{1})$. \subsection{$\mathcal{X}$ on objects} Each object $\nstand{n}$ of $\mathbb{C}o$ for a natural number $n$ is mapped by $\mathcal{X}$ to the object $\{\mathbf{k}^n\}$ of $2\-\mathrm{Vect}$. The formal symbol objects ${_* \emptyset}$ and $\emptyset_*$ are mapped to the object $\{1\}$. \subsection{$\mathcal{X}$ on 1-morphisms}\label{sec:X on 1-morphisms} For a 1-morphism $\Sigma_t: \nstand{m} \to \nstand{n}$ with a type \[t=(m, n;a_1, a_2, \dots, a_p),\] we need to define a $\mathbf{k}^m\times \mathbf{k}^n$ 2-matrix $\mathcal{X}(\Sigma_t)$. Using the lexicographic order of the power sets $I^m$ and $I^n$, pick $i$-th element of $I^m$ and $j$-th element of $I^n$. Abusing notation, we write the $i$-th element of $I^m$ as $i=(i_1, i_2, \dots, i_m)$ and the $j$-th element of $I^n$ as $j=(j_1, \dots, j_n)$. For each decorated type $t$, we defined the ribbon graph $R_t$ (Figure \ref{fig:Rtnew}). In the ribbon graph $R_t$, there are $m=L(t)$ uncolored bands on the left and $n=R(t)$ uncolored bands on the right. Those bands are bent for the convenience of horizontal gluing. From now on we just draw vertical bands instead of bent ones for the sake of simple graphics as in Figure \ref{fig:non bend Rt}. \begin{figure} \caption{The ribbon graph $R_t$} \label{fig:non bend Rt} \end{figure} Recall that those left bands are ordered from the left and those right bands are ordered from the right. We color the $k$-th left uncolored band with the simple object $V_{i_k}$ for $k=1, \dots, m$. Also we color the $l$-th right uncolored band with the simple object $V_{j_l}$ for $l=1, \dots, n$. Let us denote the ribbon graph obtained by this way $R_t(i, j)$. The only uncolored ribbons in $R_t(i,j)$ are the cap-like bands. See Figure \ref{fig:ijcolored}. \begin{figure} \caption{The ribbon graph $R_t(i,j)$} \label{fig:ijcolored} \end{figure} Fixing $i\in I^m$ and $j\in I^n$, we consider all the possible colors of the uncolored cap-like bands by simple objects. The $(i, j)$-component module $\mathcal{X}(\Sigma_t)_{ij}$ of the 2-matrix $\mathcal{X}(\Sigma_t)$ will be the projective module of all the possible colors of the coupon $R_t(i ,j)$. Recall the definition of the object $H_i^a$ of the modular category $\mathcal{V}$ defined in (\ref{equ:H_i^a}). For a positive integer $a$ and $i=(i_1,\dots, i_a)\in I^a$, the object was defined to be \begin{equation}\label{equ:Hai} H^a_i=V_{i_1}\otimes V_{i_2}\otimes \cdots \otimes V_{i_a} \otimes V^*_{i_a}\otimes \cdots \otimes V_{i_2}^* \otimes V^*_{i_1}. \end{equation} If $a=(W, \nu)$ is a signed object of $\mathcal{V}$, we set $I^a$ to be a set of only one element and set $H^a_i=W^{\nu}$ for $i\in I^a$. Note that the tensor product in (\ref{equ:Hai}) can be used as a color for rainbow like bands corresponding to an integer entry $a$ in a decorated type. For $\zeta=(\zeta_1, \dots, \zeta_p) \in I^t$ with $\zeta_1=(\zeta_1^1, \dots, \zeta_1^{a_1}), \dots, \zeta_p=(\zeta_p^1, \dots, \zeta_p^{a_p})$, recall the notation given in (\ref{equ:Phi.t.zeta}): \begin{equation*} \Phi(t; \zeta)= H^{a_1}_{\zeta_1} \otimes H^{a_2}_{\zeta_2} \otimes \cdots \otimes H^{a_p}_{\zeta_p}. \end{equation*} The next one is a new notation. We set \begin{equation}\label{equ: Phi for e-type} \Phi(t; \zeta; i, j)=V^*_{i_1} \otimes V^*_{i_2} \otimes \cdots \otimes V^*_{i_m} \otimes \Phi(t, \zeta) \otimes V_{j_n}\otimes V_{j_{n-1}}\otimes \cdots \otimes V_{j_1}, \end{equation} Note that each choice of $\zeta=(\zeta_1, \dots, \zeta_p) \in I^t$ determines a color of ribbon graph $R_t(i, j)$ via $\Phi(t; \zeta; i, j)$ except for the coupon. The coupon can be colored by a morphism from the monoidal unit $\mathbb{1}$ to the object $\Phi(t; \zeta; i, j)$. Thus all the possible colors of the coupon of $R_t(i, j)$ varying $\zeta \in I^t$ is \begin{equation}\label{equ:tsigma} \mathcal{X}(\Sigma_t)_{ij}=\bigoplus_{\zeta \in I^t} \mathrm{Hom} \big(\mathbb{1}, \Phi(t; \zeta;i,j) \big) \end{equation} and we define this to be the $(i, j)$-component projective module $\mathcal{X}(\Sigma_t)_{ij}$. We also need to specify the assignment of $\mathcal{X}$ on each formal identity 1-morphism $\mathrm{id}_n: \nstand{n} \to \nstand{n}$ with an integer $n$. The $\mathbf{k}^n \times \mathbf{k}^n$ 2-matrix $\mathcal{X}(\Sigma_n)$ is defined to be the identity $\mathbf{k}^n \times \mathbf{k}^n$ 2-matrix. Namely each diagonal entry of the 2-matrix $\mathcal{X}(\mathrm{id}_n)$ is the ground ring $K$ and each entry off the diagonal is zero. \subsection{$\mathcal{X}$ on 2-morphisms} Let $[M]: \Sigma_{t} \mathbb{R}ightarrow \Sigma_{s}: \nstand{m} \to \nstand{n}$ be a 2-morphism of $\mathbb{C}o$. We need to define a $K$-homomorphism $\mathcal{X}([M])_{ij}$ from $\mathcal{X}(\Sigma_{t})_{ij}$ to $\mathcal{X}(\Sigma_{s})_{ij}$ for each $i\in I^m$ and $j\in I^n$. This homomorphism will be obtained by applying the Reshetikhin-Turaev TQFT to the decorated cobordism obtained by ``capping'' or ``filling'' the left and the right boundaries of $M$. \subsubsection{Filling $M$ by the standard handlebodies} Let $(M, \phi)$ be a representative of the 2-morphism $[M]: \Sigma_{t} \mathbb{R}ightarrow \Sigma_{s}: \nstand{m} \to \nstand{n}$. Here $\phi$ is a parametrization of the boundary $\partial M$. Recall that using the parametrization, we can form the surface $\Sigma(\phi)$ by gluing standard surfaces. Then the parametrization $\phi$ can be regarded as a homeomorphism from $\Sigma(\phi)$ to $\partial M$. Consider the standard handlebodies $U_t$, $U_s^-$ and solid cylinders $D_m$, $D_n$ (see Figure \ref{fig:Dn}). Their boundaries are capped standard surfaces. Then the gluing map induced by the parametrization extends to the disks enclosed by boundary circles of the standard surfaces. Gluing $U_t$, $U_s^-$, $D_m$, and $D_n$ along this homeomorphism, we obtain a 3-manifold whose boundary is $\Sigma(\phi)$. We also can assume that the ribbon graphs glues well under this gluing. Let $\mathcal{M}(t,s)$ be the manifold obtained by the procedure and we call it the \textit{standard handlebody} for the pair $(t,s)$. By the Alexander trick, the manifold $\mathcal{M}(t, s)$ is defined up to homeomorphism. The manifold $\mathcal{M}(t, s)$ is equiped with a ribbon graph obtained from the ribbon graph $R_t$ and $R_s$ in $U_t$ and $U_s^-$ respectively, and the vertical bands in $D_m$, $D_n$ joining uncolored bands along the embedded disks. We denote the ribbon graph by $R(t,s)$ and call this ribbon graph in $\mathcal{M}(t, s)$ the \textit{standard ribbon graph} for the pair $(t, s)$. \begin{figure} \caption{The standard handlebody $\mathcal{M} \label{fig:standard handlebody with the standard ribbon} \end{figure} Now we glue the manifolds $M$ and $\mathcal{M}(t, s)$ along the boundaries by the parametrization $\phi$ and obtain the closed 3-manifold \begin{equation}\label{equ: tilde M} \mathcal{F}ill(M):=M\cup_{\phi} \mathcal{M}(t, s). \end{equation} In a sense, we ``filled'' the boundary of $M$ by the standard handlebody $\mathcal{M}(t, s)$. It equips with a ribbon graph coming from a ribbon graph in $M$ and the standard ribbon graph $R(t, s)$ in $\mathcal{M}(t, s)$ The same manifold can be obtained differently as follows. First, we glue cylinders $D_m$ and $D_n$ to $M$ via $\phi$, which we denote by \begin{equation} \mathrm{Fill}_{\mathrm{c}}(M):=M\cup_{\phi}(D_m\sqcup D_n) \end{equation} Namely, we filled only cylindrical parts of $M$. The subscript $\mathrm{c}$ in $\mathrm{Fill}_{\mathrm{c}}$ stands for ``cylinder''. Then the boundary of $M\cup_{\phi}(D_m\sqcup D_n)$ is homeomorphic to the disjoint union of the capped standard boundaries $\hat{\Sigma}_t \sqcup \hat{\Sigma}_s^-$. The gluing homeomorphism is given by the parametrization $\phi$ extended to embedded disks. Thus $\mathrm{Fill}_{\mathrm{c}}(M)$ is a usual parametrized cobordism except that the ribbons in $D_m\sqcup D_n$ are not colored. If we glue the standard handlebodies $U_t$ and $U_s^-$ via the parametrization we obtain the same manifold $\mathcal{F}ill(M)$ as in (\ref{equ: tilde M}). \subsubsection{Definition of a $K$-homomorphism $\mathcal{X}([M])_{ij}$} Fix the $i$-th element $i=(i_1, i_2, \dots, i_m)$ of $I^m$ and the $j$-th element $j=(j_1, \dots, j_n)$ of $I^n$ as before. We will construct a $K$-homomorphism $\mathcal{X}([M])_{ij}$ from $\mathcal{X}(\Sigma_{t})_{ij}$ to $\mathcal{X}(\Sigma_{s})_{ij}$. Let us give colors to the uncolored ribbon graphs of $D_m \sqcup D_n$ in $\mathrm{Fill}_{\mathrm{c}}(M)=M\cup_{\phi}(D_m\sqcup D_n)$ as follows. Order the uncolored bands in $D_m$ from the left and order the bands in $D_n$ from the right according to the order of the circles in the boundary surface. We color the $k$-th left uncolored band with the simple object $V_{i_k}$ for $k=1, \dots, m$. Also we color the $l$-th right uncolored band with the simple object $V_{j_l}$ for $l=1, \dots, n$. Then $\mathrm{Fill}_{\mathrm{c}}(M)$ together with this $v$-colored ribbon graph in $\mathrm{Fill}_{\mathrm{c}}(M)$, which we denote by $\mathrm{Fill}_{\mathrm{c}}(M)_{ij}$, is the normal cobordism of the Reshetikhin-Turaev type. Apply the Reshetikhin-Turaev TQFT, we obtain a $K$-homomorphism $\tau(\mathrm{Fill}_{\mathrm{c}}(M)_{ij})$ from $\mathcal{X}(\Sigma_t)_{ij}$ to $\mathcal{X}(\Sigma_s)_{ij}$. \begin{lemma} If $(M, \phi)$ is equivalent to $(N, \psi)$, then $\tau(\mathrm{Fill}_{\mathrm{c}}(M)_{ij})=\tau(\mathrm{Fill}_{\mathrm{c}}(N)_{ij})$. \end{lemma} \begin{proof} Since $M$ and $M$ are equivalent, we see that $\mathrm{Fill}_{\mathrm{c}}(M)_{ij}$ is $d$-homeomorphic to $\mathrm{Fill}_{\mathrm{c}}(N)_{ij}$. Since the Reshetikhin-Turaev TQFT $\tau$ is invariant under $d$-homeomorphisms, we have the result. (To see this invariance, note that $d$-homeomorphic cobordisms have the same special ribbon graph representation.) \end{proof} Thus the $K$-homomorphism $\tau(\mathrm{Fill}_{\mathrm{c}}(M)_{ij})$ is independent of the choice of a representative of $[M]$. Hence we can define the $(i,j)$-entry of the 2-matrix $\mathcal{X}(M)$ to be \begin{equation}\label{equ:XM ij} \mathcal{X}([M])_{ij}:=\tau(\mathrm{Fill}_{\mathrm{c}}(M)_{ij}) \end{equation} For each formal identity $\mathrm{id}_{\Sigma_t} \in \mathbb{C}o(\nstand{m}, \nstand{n})$, $\mathcal{X}$ assigns the $\mathbf{k}^m$ by $\mathbf{k}^n$ 2-homomorphism matrix whose $(i,j)$-entry is the identity self-homomorphism of the module $\mathcal{X}(\Sigma_t)_{ij}$. For the formal horizontal unit 2-morphism $\mathrm{id}_{\mathrm{id}_n}$, $\mathcal{X}$ assigns the $k^n$ by $k^n$ identity 2-homomorphism. Namely, each of its diagonal entry is the identity self-homomorphism of the base ring $K$ and off-diagonal entries are zero. \subsubsection{Representation by a ribbon graph} The rest of the paper will be devoted to prove that the assignment $\mathcal{X}$ is indeed a projective pseudo 2-functor. The key ingredient of the proof is the explicit formula to calculate the homomorphism $\mathcal{X}(M)_{ij}$ obtained by representing $M$ by a special ribbon graph as in Section \ref{subsec:explicit formula for tau(M)}. We define a \textit{special ribbon graph} for $(M, i, j)$ to be a special ribbon graph for $\mathrm{Fill}_{\mathrm{c}}(M)_{ij}$. In place of $M$, $R_t$, and $-R_s$ in Section \ref{subsec:explicit formula for tau(M)}, we just need to use $\mathrm{Fill}_{\mathrm{c}}(M)_{ij}$, $R_t(i,j)$, and $-R_s(i,j)$. As noted above, gluing handlebodies to fill the boundary of $\mathrm{Fill}_{\mathrm{c}}(M)_{ij}$ produces $\mathcal{F}ill(M)$ with uncolored vertical bands are colored according to $(i,j)$. The $(i,j)$-colored $\mathcal{F}ill(M)$ is denoted by $\mathcal{F}ill(M)_{ij}$ and the $(i, j)$-colored standard ribbon graph $R(t,s)$ is denoted by $R(t,s)_{ij}$. By changing to an equivalent manifold if necessarily, we may assume that a special ribbon graph for $(M, i,j)$, which is denoted by $\Omega_{(M, i,j)}$, is a disjoint union of the standard ribbon graph $R(t,s)_{ij}$ and a surgery link $L=L_1\cup \cdots\cup L_{\mu}$ and a ribbon graph of $M$. The ribbon graph obtained by replacing $R(t,s)_{ij}$ by $R(t,s)$ is denoted by $\Omega_M$. This ribbon graph $\Omega_M$ is thus obtained by removing the colors of left and right vertical bands. Note that if $M$ has no corners then $\Omega_M$ is the same as the definition of special ribbon graph given in Section \ref{subsec:explicit formula for tau(M)}. In summery we have the following explicit formula for the $(\zeta, \eta)$-block matrix \begin{align}\label{equ:tau zeta eta extended} (\mathcal{X}(M)_{ij})_{\zeta}^{\eta}&=\tau(\mathrm{Fill}_{\mathrm{c}}(M)_{ij})_{\zeta}^{\eta} \notag \\ &= \mathcal{D}elta^{\sigma(L)} \mathcal{D}^{-g^+ -\sigma(L) - \mu} \dim(\eta) \sum_{\lambda \in \mathrm{col}(L)} \dim(\lambda) \mathcal{F}_0(\Omega_{(M, i, j)}, \zeta, \eta, \lambda), \end{align} where $g^+$ is the sum of the integer entries of $s$ and \[\dim(\eta):=\prod_{i=1}^q\dim(\eta_i) \] with \[\dim(\eta_i):= \begin{cases} \prod_{l=1}^{b_i} \dim(\eta_i^l) & \partial_{-}ox{ if } b_i \in \mathbb{Z} \\ 1 & \partial_{-}ox{ if } b_i \partial_{-}ox{ is a mark.} \end{cases} \] \section{Main Theorem}\label{sec:Main Theorem} So far we defined the 2-category of decorated cobordisms with corners $\mathbb{C}o$, where cobordisms are decorated by a modular category $\mathcal{V}$. We constructed the assignment $\mathcal{X}$ from $\mathbb{C}o$ to the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$. The explicit formula was obtained by expressing a cobordism with corners $M$ by the special ribbon graph $\Omega_M$ in $S^3$. Now we prove the following main theorem of the current paper. \begin{thm}\label{thm:main theorem} The assignment $\mathcal{X}$ defined above is a projective pseudo 2-functor from $\mathbb{C}o$ to $2\-\mathrm{Vect}$. \end{thm} For our convention of the language of 2-category, see Appendix. This theorem follows from several propositions below. The idea of the proof is that we reduce the gluings of cobordisms to the gluing of special ribbon graphs and work with the explicit formula. \subsection{Vertical projective Functor} The vertical composition is not preserved by $\mathcal{X}$. This is because of an anomaly in the Reshetikhin-Turaev TQFT. Thus we instead claim that $\mathcal{X}$ is a projective functor on the hom-category $\mathbb{C}o(\nstand{m}, \nstand{n})$. For a projective functor to exist, the target category should be an $K$-module for some commutative ring $K$. In our case, the target category is the hom-category $2\-\mathrm{Vect}(\mathbf{k}^m, \mathbf{k}^n)$. This hom-category is a $K=\mathrm{Hom}(\mathbb{1},\mathbb{1})$-module by multiplying an element $k\in K$ component-wise: \[ k\cdot (f_{ij})_{ij}:=(kf_{ij})_{ij},\] where $(f_{ij})_{ij}$ is a 2-morphism in $2\-\mathrm{Vect}$. First we state results regarding the anomaly of the original RT TQFT. Let $M_1$ and $M_2$ be a composable decorated cobordisms (without corners). (As always in this paper, we assume the source and the target boundary surfaces are both connected.) Let $L_1$, $L_2$ and $L$ be surgery links for special ribbon graphs $\Omega_{M_1}$, $\Omega_{M_2}$ and $\Omega_{M_1\cdot M_2}$ of $M_1$ and $M_2$ and $M_1\cdot M_2$, respectively. \begin{lemma} Using notations above, the vertical concatenation $\Omega_{M_1}\cdot \Omega_{M_2}$ of the special ribbon graphs $\Omega_{M_1}$ and $\Omega_{M_2}$ is a special ribbon graph for $M_1 \cdot M_2$. \end{lemma} \begin{lemma}\label{lem:vertical anomaly of original RT} With the same notations as above, we have \[\tau(M_1\cdot M_2)= k(M_1, M_2)\tau(M_1)\cdot \tau(M_2),\] where \[k(M_1, M_2)=(\mathcal{D} \mathcal{D}elta)^{\sigma(L_1)+\sigma(L_2)-\sigma(L)}. \] \end{lemma} The proofs of both lemmas can be found in \cite[Lemma I\hspace{-.1em}V 2.1.2]{Turaev10}. \begin{prop}[Vertical composition]\label{lem:2vertical composition} Let $[(M_1, \phi_1)]:\Sigma_{t_1}\mathbb{R}ightarrow \Sigma_{t_2}: \nstand{m} \to \nstand{n}$ and $[(M_2, \phi_2)]:\Sigma_{t_2}\mathbb{R}ightarrow \Sigma_{t_3}: \nstand{m} \to \nstand{n}$ be (non-formal) 2-morphisms of $\mathbb{C}o$ so that the target 1-morphism of $[M_1]$ is equal to the source 1-morphism of $[M_2]$. Then we have \[\mathcal{X}(M_1\cdot M_2)=k(M_1, M_2) \mathcal{X}(M_1)\cdot \mathcal{X}(M_2),\] where $k(M_1, M_2) \in K$ is a gluing anomaly of the pair $(M_1, M_2)$ given as follows. Let $L_1$, $L_2$ and $L$ be surgery links of $\Omega_{M_1}$, $\Omega_{M_2}$ and $\Omega_{M_1} \cdot \Omega_{M_2}$, respectively. Then \[k(M_1, M_2)=(\mathcal{D} \mathcal{D}elta)^{\sigma(L_1)+\sigma(L_2)-\sigma(L)}.\] \end{prop} \begin{proof} It suffices to show that the equality \begin{equation*}\label{equ:XMM} \mathcal{X}(M_1 \cdot M_2)_{ij} =k(M_1, M_2) \left( \mathcal{X}(M_1) \cdot \mathcal{X}(M_2) \right)_{ij} \end{equation*} holds for $i\in I^m$ and $j\in I^n$. Note that the surgery links $L_1$, $L_2$ and $L$ are independent of the indices $i$ and $j$, so is $k(M_1, M_2$), hence the result follows. By the definition of $\mathcal{X}(M)_{ij}$ (see (\ref{equ:XM ij})), the above equality is equivalent to show the following equality: \begin{equation}\label{equ:tau fill MM} \tau\left(\mathrm{Fill}_{\mathrm{c}}(M_1 \cdot M_2\right)_{ij})=k(M_1, M_2) \tau\left(\mathrm{Fill}_{\mathrm{c}}(M_1)_{ij}\right) \circ \tau\left(\mathrm{Fill}_{\mathrm{c}}(M_2)_{ij}\right). \end{equation} Since filling corners and vertical gluing commute, we have \[\mathrm{Fill}_{\mathrm{c}}(M_1 \cdot M_2)_{ij} = \mathrm{Fill}_{\mathrm{c}}(M_1)_{ij} \cdot \mathrm{Fill}_{\mathrm{c}}(M_2)_{ij}.\] By definition, the special ribbon graphs $(\Omega_{M_1})_{ij}$ and $(\Omega_{M_2})_{ij}$ represent the cobordisms $\mathrm{Fill}_{\mathrm{c}}(M_1)_{ij}$ and $\mathrm{Fill}_{\mathrm{c}}(M_2)_{ij}$, respectively. Note that the surgery links of $(\Omega_{M_1})_{ij}$ and $(\Omega_{M_2})_{ij}$ are the same as the surgery links of $\Omega_{M_1}$ and $\Omega_{M_2}$ since only difference between them is the colors of the left and the right vertical bands, which are not surgery links. Thus, the equality (\ref{equ:tau fill MM}) follows from Lemma \ref{lem:vertical anomaly of original RT}. \end{proof} If one of $M_1$ and $M_2$ is the vertical identity, then we set $k(M_1\cdot M_2)=1 \in K$. \begin{lemma}\label{lem:vertical anomaly associativity} Suppose that $M_1$, $M_2$, and $M_3$ are three vertically composable 2-morphisms of $\mathbb{C}o$. Namely, we can form the 2-morphism $M_1 \cdot M_2 \cdot M_3$. Then we have \begin{equation}\label{equ:vertical anomaly} k(M_1, M_2 \cdot M_3)k(M_2\cdot M_3)=k(M_1 \cdot M_2, M_3)k(M_1, M_2). \end{equation} \begin{proof} For $i=1,2,3$ present the cobordisms $M_i$ by a special ribbon graph $\Omega_{M_i}$ and let $L_i$ be the surgery link in $\Omega_i$. The cobordism $M_1\cdot M_2$ is represented by the special ribbon graph $\Omega_{M_1} \cdot \Omega_{M_2}$. Let $L_{12}$ be a part of the surgery link of $\Omega_{M_1} \cdot \Omega_{M_2}$ that is not in $L_1 \cup L_2$. Namely, the surgery link $L_{12}$ is a newly emerged ribbons when we concatenate the ribbons $\Omega_{M_1}$ and $\Omega_{M_2}$. Similarly, let $L_{23}$ be the surgery link that is not in $L_2 \cup L_3$. Let $L_{123}$ be the surgery link of $\Omega_{M_1}\cdot \Omega_{M_2} \cdot \Omega_{M_3}$, namely $L_{123}$ is the union of all of the above surgery links. By Lemma \ref{lem:2vertical composition}, anomalies can be computed by signatures of surgery links. Thus, the equality \ref{equ:vertical anomaly} is equivalent to the equality \begin{align*} [\sigma(L)+\sigma(L_3)-\sigma(L_{123})] +[ \sigma(L_1)+\sigma(L_2)-\sigma(L)]\\ =[\sigma(L_1)+\sigma(L')-\sigma(L_{123})]+[\sigma(L_2) +\sigma(L_3)-\sigma(L')]. \end{align*} Since both sides are equal to $\sigma(L_1)+\sigma(L_2)+\sigma(L_3)-\sigma(L_{123})$, the equality holds. \end{proof} \end{lemma} The results of Proposition \ref{lem:2vertical composition} and Lemma \ref{lem:vertical anomaly associativity} can be summarized into: \begin{prop}\label{prop:vertical projective functor} The assignment $\mathcal{X}$ is a projective functor from the hom-category $\mathbb{C}o(\nstand{m}, \nstand{n})$ to the hom-category $2\-\mathrm{Vect}(\mathbf{k}^m, \mathbf{k}^n)$. \end{prop} \subsection{Horizontal Axioms} Now we are going to study how the assignment $\mathcal{X}$ behaves on horizontal gluings. For each type $t=(m,n; a_1, \cdots, a_p)$, recall the following notations from Section \ref{sec:X on 1-morphisms}: \begin{equation*} \Phi(t; \zeta)= H^{a_1}_{\zeta_1} \otimes H^{a_2}_{\zeta_2} \otimes \cdots \otimes H^{a_p}_{\zeta_p} \end{equation*} and \begin{equation*} \Phi(t; \zeta; i, j)=V^*_{i_1} \otimes V^*_{i_2} \otimes \cdots \otimes V^*_{i_m} \otimes \Phi(t, \zeta) \otimes V_{j_n}\otimes V_{j_{n-1}}\otimes \cdots \otimes V_{j_1}. \end{equation*} Also recall that we defined the module \begin{equation*} \mathcal{X}(\Sigma_t)_{ij}=\bigoplus_{\zeta \in I^t} \mathrm{Hom} \big(\mathbb{1}, \Phi(t; \zeta;i,j) \big) \end{equation*} \begin{prop}\label{prop:2-functor on 1-morphisms} Let $\Sigma_{t_1}: l \to m$ and $\Sigma_{t_2}:m \to n$ be composable 1-morphisms of $\mathbb{C}o$. Then the 2-matrix $\mathcal{X}(\Sigma_{t_1}\circ \Sigma_{t_2})$ is canonically isomorphic to the 2-matrix $\mathcal{X}(\Sigma_{t_1})\mathcal{X}(\Sigma_{t_2})$. \end{prop} \begin{proof} The $(h, j)$-component of the product of the 2-matrices $\mathcal{X}(\Sigma_{t_1})$ and $\mathcal{X}(\Sigma_{t_2})$ is the module \begin{align}\label{equ:product of 2matrices} \notag \left( \mathcal{X}(\Sigma_{t_1}) \circ \mathcal{X}(\Sigma_{t_2}) \right)_{hj}=\bigoplus_{1 \leq i \leq \mathbf{k}^m} \mathcal{X}(\Sigma_{t_1})_{h, i}\otimes \mathcal{X}(\Sigma_{t_2})_{i, j}\\ = \bigoplus_{1 \leq i \leq \mathbf{k}^m} \left[ \bigoplus_{\zeta \in I^{t_1}} \mathrm{Hom} \big(\mathbb{1}, \Phi(t_1; \zeta;h,i) \big) \otimes \bigoplus_{\eta \in I^{t_2}} \mathrm{Hom} \big(\mathbb{1}, \Phi(t_2; \eta;i,j) \big) \right] \end{align} Using Lemma \ref{lem:sum over simple 2} we sum over $i_1$ and the module (\ref{equ:product of 2matrices}) is isomorphic to \begin{equation}\label{equ:product of 2matrices second} \bigoplus_{i=(i_2, \dots, i_{m-1})\in I^{m-1}} \bigoplus_{\zeta \in I^{t_1}, \eta \in I^{t_2}}\mathrm{Hom}\left(\mathbb{1}, U(i, \zeta, \eta) \right), \end{equation} where $U(i, \zeta, \eta)$ is the following module. \begin{multline*} V^*_{h_1}\otimes \cdots \otimes V^*_{h_l} \otimes \Phi(t_1, \zeta) \\ \otimes V_{i_m}\otimes V_{i_{m-1}}\otimes \cdots \otimes V_{i_{2}} \otimes V_{i_{2}}^*\otimes V_{i_{3}}^* \otimes \cdots \otimes V_{i_m}^* \\ \otimes \Phi(t_2, \eta) \otimes V_{j_n}\otimes \cdots \otimes V_{1} \end{multline*} Note that we have the equality \[\bigoplus_{i=(i_2, \dots, i_{m})\in I^{m-1}} \bigoplus_{\zeta \in I^{t_1}, \eta \in I^{t_2}}=\bigoplus_{\xi \in I^{t_1 \circ t_2} }\] and for $\xi=(\zeta, i, \eta)\in I^{t_1\circ t_2}$ with $\zeta \in I^{t_1}, i\in I^{m-1}, \eta \in I^{t_2}$, the object $\Phi(t_1 \circ t_2, \xi)$ is equal to \[\Phi(t_1, \zeta)\otimes V_{i_m}\otimes V_{i_{m-1}}\otimes \cdots \otimes V_{i_{2}} \otimes V_{i_{2}}^*\otimes V_{i_{3}}^* \otimes \cdots \otimes V_{i_m}^* \otimes \Phi(t_2, \eta)\] Thus the module (\ref{equ:product of 2matrices second}) is equal to the module \begin{align*} &\bigoplus_{\xi \in I^{t_1 \circ t_2} }\mathrm{Hom}(\mathbb{1}, V^*_{h_1}\otimes \cdots \otimes V^*_{h_l}\otimes \Phi(t_1 \circ t_2, \xi) \otimes V_{j_n}\otimes \cdots \otimes V_{1}) \\ &=\bigoplus_{\xi \in I^{t_1 \circ t_2} }\mathrm{Hom}(\mathbb{1}, \Phi(t_1\circ t_2; \xi; h,j)) =\mathcal{X}(\Sigma_{t_1\circ t_2})_{hj} \end{align*} Note that the isomorphism from $\mathcal{X}(\Sigma_{t_1}) \circ \mathcal{X}(\Sigma_{t_2})$ to $\mathcal{X}(\Sigma_{t_1\circ t_2})$ is given by the isomorphism $u$ of Lemma \ref{lem:sum over simple 2}. This fact will be used in the proof of Lemma \ref{prop:2horizontal} below. \end{proof} We saw that vertical composition of cobordisms corresponds to concatenation of their special ribbon graphs. This correspondence was the key observation to prove the projective functoriality of $\mathcal{X}$. Similarly, to investigate how horizontal composition behave under the map $\mathcal{X}$, we first need to study how horizontal composition of cobordisms can be expressed as an operation on the special ribbon graph side. The obvious guess is to juxtapose two special ribbon graphs. But juxtaposing does not correspond to horizontal composition of cobordisms. This can be seen, for instance, by noting that the type of bottom surface is not the desired one. Let $[M]: \Sigma_{t_1} \mathbb{R}ightarrow \Sigma_{t_2}: \nstand{l} \to \nstand{m}$ and $[M']:\Sigma_{s_1} \mathbb{R}ightarrow \Sigma_{s_2}: \nstand{m}\to \nstand{n}$ be 2-morphisms which can be glued horizontally. Let $\Omega_M$ and $\Omega_{M'}$ be special ribbon graphs representing the cobordisms $M$ and $M'$, respectively. Recall that the special ribbon graph $\Omega_M$ consists of ribbons from $R_{t_1}$ and $-R_{t_2}$ with uncolored vertical bands connected, and a surgery link. The surgery link may be tangled with $R_{t_1}$ and $-R_{t_2}$ as in Figure \ref{fig:OmegaM}. \begin{figure} \caption{The special ribbon graph $\Omega_M$} \label{fig:OmegaM} \end{figure} We may assume that the surgery link in $\Omega_M$ is away from the rightmost vertical band of $\Omega_M$ by pulling a component of the surgery link over the top coupon and bring it to the other side. Similarly, we may assume that no component of surgery link in $\Omega_{M'}$ is tangled with the leftmost uncolored vertical band of $\Omega_{M'}$ as in Figure \ref{fig:nosurgerylink}. \begin{figure} \caption{No surgery link tangled at the rightmost and the leftmost} \label{fig:nosurgerylink} \end{figure} We construct a new ribbon graph from these ribbon graphs $\Omega_{M}$ and $\Omega_{M'}$ as follows. From $\Omega_{M}$, remove the rightmost uncolored vertical bands and denote the resulting ribbon graph by $\Omega_{M}^{-}$. Similarly, remove the leftmost uncolored vertical bands from $\Omega_{M'}$ and denote the resulting ribbon graph by ${^{-}\Omega_{M'}}$. We juxtapose $\Omega_{M}^{-}$ and $^{-}{\Omega_{M'}}$ so that $\Omega_{M}^{-}$ is on the left of ${^{-}\Omega_{M'}}$, namely $\Omega_{M}^{-} \otimes {^{-}\Omega_{M'}}$ in the category $\mathbb{R}ib$. In the middle of the ribbon graph $\Omega_{M}^{-} \otimes {^{-}\Omega_{M'}}$, there are $2(m-1)$ uncolored vertical bands coming from the right uncolored vertical bands of $\Omega_{M}^{-}$ and the left uncolored vertical bands of ${^{-}\Omega_{M'}}$. For each natural number $n$, let $\omega_{n}$ be a ribbon graph in $\mathbb{R}^2\times [0,1]\subset R^3$ defined in Figure \ref{fig:omega}. The number of annulus ribbons in $\omega_{n}$ is $n$. \begin{figure} \caption{Ribbon graph $\omega_{n} \label{fig:omega} \end{figure} On the bottom of these $2(m-1)$ bands, we attach the ribbon graph $\omega_{m-1}$ defined in Figure \ref{fig:omega}. Let $\Omega_{M, M'}$ denote the resulting ribbon graphs fitted in $\mathbb{R}^2 \times [0,1]$. See Figure \ref{fig:Horizontal Special Ribbon } for an example. \begin{figure} \caption{Special ribbon graph $\Omega_{M, M'} \label{fig:Horizontal Special Ribbon } \end{figure} \begin{lemma}\label{lem:horizontal glue of ribbons} The ribbon graph $\Omega_{M, M'}$ constructed above represents the horizontally glued cobordism $M\circ M'$. \end{lemma} \begin{proof} First note that since surgery links of $\Omega_M$ and $\Omega_{M'}$ are away from the neighborhoods of uncolored vertical bands of $\Omega_M$ and $\Omega_{M'}$, the order of the gluing and surgery is interchangeable. From $S^3$ with the ribbon graph $\Omega_M$ in it, let us cut out a regular neighborhood of the bottom coupon and top coupon and rainbow bands attached to them. Assuming the neighborhood of the top coupon contains the infinity in $S^3=\mathbb{R}^3 \cup \{ \infty \}$, we may assume that the rest of the ribbon graph lies in $S^2 \times [0,1] \subset \mathbb{R}^3$. Similarly for $\Omega_{M'}$. The horizontal gluing of $M$ and $M'$ now corresponds to cutting out the regular neighborhoods of the right vertical bands of $\Omega_M\subset S^2 \times [0,1]$ and the left vertical bands of $\Omega_{M'} \subset S^2 \times [0,1]$ and identify their boundaries and do surgery. We decompose this procedure in several steps. Instead of cutting out those neighborhoods at the same time, we first cut out only the rightmost vertical band of $\Omega_M$ and the rightmost vertical band of $\Omega_{M'}$. Then we identify the boundary. This gluing can be realized in $R^3$ as in Figure \ref{fig:first gluing}. \begin{figure} \caption{Gluing the first corners} \label{fig:first gluing} \end{figure} Thus the horizontally glued cobordism $M \circ M'$ can be obtained from the ribbon graph $\Omega_{M}^{-} \otimes {^{-}\Omega_{M'}}$ sitting in $S^2\times [0,1]$ by removing the neighborhoods of the middle $2(m-1)$ uncolored vertical bands and identify their boundaries. Now we start from the ribbon graph $\Omega_{M, M'}$. Attach coupons on the top and the bottom of the graph $\Omega_{M, M'}$. Cut out the regular neighborhoods $T$ of the top and the bottom coupons and rainbow bands. The rest of the ribbon lies in $S^2 \times [0,1]\subset R^3$. We do surgery along the surgery link of $\omega_{m-1}$. Let us describe this surgery carefully. We follow the argument given in \cite[Lemma I\hspace{-.1em}V 2.6]{Turaev10}. The ribbon graph $\omega_{m-1}$ has $m-1$ annuli along which we do surgery. Let $A_r$ be the $r$-th annulus of $\omega_{m-1}$ for $r=1,\dots, m-1$. We present this annulus in the form $A_r=D_r \setminus \mathrm{Int}(D_r')$, where $D_r$ and $D_r'$ are concentric 2-disks in $\mathbb{R}^2 \times [0,1]$ such that $D_r' \subset \mathrm{Int}(D_r)$ and $D_r'$ transversally intersects $\omega_{m-1}$ along two short intervals lying on two bands of $\omega_{m-1}$ linked by the annulus $A_r$, see Figure \ref{fig:annulus in omega}. \begin{figure} \caption{The annulus $A_r$ in $\omega_{m-1} \label{fig:annulus in omega} \caption{Gluing $F(r)$} \label{fig:gluing F(r)} \end{figure} Consider a regular neighborhood $D_r \times [-1, 1]$ in $\mathbb{R}^2 \times (0,1)$ of the larger disk $D_r$. We think that $D_r$ lies in $D_r\times \{0\}$. We assume that there are no redundant crossings. Namely, locally the picture is as in Figure \ref{fig:annulus in omega}. Let $B_r^{-}$ and $B_r^+$ be small closed disjoint 2-disks in $\mathrm{Int}(D_r')$ and we assume that the intersection of $D_r \times [-1, 1]$ and $T$ is subcylinder $B_r^-\times [-1, 1]$ and $B_r^+ \times [-1,1]$. The surgery along the framed knot defined by $A_r$ may be described as follows. Consider the solid torus \[A_r \times [-1,1]= (D_r \times [-1,1]) \setminus (\mathrm{Int}(D_r') \times [-1,1]) \subset S^3.\] Its boundary consists of four annuli $A_r \times \{-1\}$, $A_r \times \{1\}$, $\partial D_r \times [-1,1]$, $\partial D_r' \times [-1,1]$. We remove the interior of $A_r \times [-1,1]$ from $S^3 \setminus \mathrm{Int}(T)$ and glue in its place the standard solid torus $D^2\times S^1$. The gluing is performed along a homeomorphism $\partial(A_r \times [-1, 1]) \to \partial (D^2 \times S^1)$ carrying each circle $\partial D_r' \times \{t\}$ with $t\in [-1,1]$ onto a circle $\partial D^2 \times \{x\}$ with $x\in S^1$. Let $E(r)$ denote the solid 3-cylinder formed by the disks $D^2 \times \{x\}$ glued to $\partial D_r' \times \{t\}$ with $t\in [-1,1]$. Let $F(r)$ denote the complementary solid 3-cylinder $\overline{(D^2 \times S^1) \setminus E(r)}$. For $r=1, \dots, m-1$ consider the genus 2 handlebody \[ (D_r' \times [-1,1]) \setminus \mathrm{Int}(T) =(D_r' \setminus (\mathrm{Int}(B_r^- \cup B_r^+))) \times [-1, 1] \] and glue $E(r)$ to it as specified above. This gives a 3-cobordism with bases $\partial B_r^- \times [-1, 1]$ and $\partial B_r^+ \times [-1,1]$ lying in the bottom boundary and the top boundary, respectively, of the cobordism represented by $\Omega_{M, M'}$. This cobordism is a cylinder over $\partial B_r^- \times [-1,1]$. Indeed, for $t\in [-1,1]$, the disk $D^2 \times \{x\} \subset E(r)$ glued to $\partial D_r' \times \{t\}$ and the disk with two hole $(D_r' \setminus \mathrm{Int}(B_r^- \cup B_r^+)) \times \{t\}$ form an annulus with bases $\partial B_r^- \times \{t\}$ and $\partial B_r^+ \times \{t\}$. These annuli corresponding to all $t \in [-1,1]$ form the cylinder in question. When $r$ runs over $1, \dots, m-1$, we get $m-1$ cylinder cobordism. We may glue each $F(r)$ inside $D_r \times [-1,1]\subset S^3$. Then locally this is a complement of two cylinders as in Figure \ref{fig:gluing F(r)}. Note that the union of these spaces corresponds to the identification of the cylindrical boundaries. In Figure \ref{fig:identification of boundaries}, the space described on the left is the compliment of cylinders. The second space is $\partial B_r^- \times [-1,1] \times [0,1]$. The inner boundary corresponds to $\partial B_r^- \times [-1, 1] \times \{0\}$ and the outer boundary is $\partial B_r^+ \times [-1,1]\times \{1\}$. For each $s\in[0,1]$, the cylinder $\partial B_r^-\times [-1,1]\times \{s\}$ is glued to the first space. (The red circles indicate where to glue and the blue line indicate the interval $[0,1]$.) This gluing of cylinder corresponds to identifying the time $s$ circles of the cylindrical boundaries. \begin{figure} \caption{Identification of boundaries} \label{fig:identification of boundaries} \end{figure} Thus the surgery along framed link in $\omega_{m-1}$ is the same as cutting out regular neighborhoods of the middle uncolored vertical bands of $\Omega_{M}^{-} \otimes {^{-}\Omega_{M'}}$ and identifying the boundaries (after absorbing the small top and bottom cylindrical parts into the top and bottom boundaries by isotopy respectively.) \end{proof} Now that we obtained the ribbon graph operation for horizontal gluing, we use it to study the behavior of the assignment $\mathcal{X}$ under horizontal gluing. Let $[M_1]: \Sigma_{t_1} \mathbb{R}ightarrow \Sigma_{t_2}: \nstand{l} \to \nstand{m}$ and $[M_2]:\Sigma_{s_1} \mathbb{R}ightarrow \Sigma_{s_2}: \nstand{m} \to \nstand{n}$ be 2-morphisms of $\mathbb{C}o$ that can be glued horizontally. Recall the canonical isomorphisms $u_1:\mathcal{X}(\Sigma_{t_1})\circ\mathcal{X}(\Sigma_{s_1}) \to \mathcal{X}(\Sigma_{t_1} \circ \Sigma_{s_1}) $ and $u_2:\mathcal{X}(\Sigma_{t_2})\circ \mathcal{X}(\Sigma_{s_2}) \to \mathcal{X}(\Sigma_{t_2} \circ \Sigma_{s_2})$ given in the proof of Proposition \ref{prop:2-functor on 1-morphisms}. The next lemma shows that these isomorphisms commute with 2-homomorphisms $\mathcal{X}(M_1 \circ M_2)$ and $\mathcal{X}(M_1) \circ \mathcal{X}(M_2)$. \begin{prop}[Horizontal composition]\label{prop:2horizontal} Let $[M_1]: \Sigma_{t_1} \mathbb{R}ightarrow \Sigma_{t_2}: \nstand{l} \to \nstand{m}$ and $[M_2]:\Sigma_{s_1} \mathbb{R}ightarrow \Sigma_{s_2}: \nstand{m} \to \nstand{n}$ be 2-morphisms of $\mathbb{C}o$. Then we have $\mathcal{X}(M_1\circ M_2)u_1=u_2(\mathcal{X}(M_1)\circ \mathcal{X}(M_2))$. \end{prop} \begin{proof} Let $\Omega_1=\Omega_{M_1}$ and $\Omega_2=\Omega_{M_2}$ be special ribbon graphs representing $M_1$ and $M_2$, respectively, so that no surgery links are tangled with the rightmost vertical band of $\Omega_{1}$ and the leftmost vertical band of $\Omega_{2}$. Then the special ribbon graph $\Omega=\Omega_{M_1, M_2}$ represents the horizontally glued cobordism $M_1\circ M_2$ by Lemma \ref{lem:horizontal glue of ribbons}. From $\Omega_{1}$, remove the rightmost uncolored vertical bands and denote the resulting ribbon graph by $\Omega_{1}^{-}$. Similarly, remove the leftmost uncolored vertical bands from $\Omega_{2}$ and denote the resulting ribbon graph by ${^{-}\Omega_{2}}$. For $i=1,2$, we define several notations. Let $g_i^+$ be the number of cup like bands in $\Omega_{i}$. Let $g^+$ be the number of cup like band in $\Omega$. Since there are $m-1$ cup like bands in $\omega_{m-1}$, we have $g^+=g_1^+ + g_2^+ +(m-1)$. Let $L_i$ be surgery links in $\Omega_{i}$. The $m-1$ annuli in $\omega_{m-1}$ is denoted by $L_3$. Denote by $L$ the surgery links of the ribbon graph $\Omega=\Omega_{M_1, M_2}$. Then $L$ is a disjoint (unlinked) union of $L_1$ and $L_2$ and $L_3$. Let $\mu, \mu_1, \mu_2$ be the number of components of $L, L_1, L_2$ respectively. We have $\mu=\mu_1 + \mu_2 + m-1$. The $(h,j)$-component homomorphism $\mathcal{X}(M_1 \circ M_2)_{h,j}: \mathcal{X}(\Sigma_{t_1\circ s_1})_{h, j} \to \mathcal{X}(\Sigma_{t_2 \circ s_2})_{h, j}$ can be calculated by Formula (\ref{equ:tau zeta eta extended}). Let $\zeta$ and $\eta$ be a color for cap like bands and cup like bands of $\Omega$, respectively. We calculate $(\zeta, \eta)$-block \[\mathcal{X}_{\zeta}^{\eta}:=\left(\mathcal{X}(M_1 \circ M_2)_{h, j}\right)_{\zeta}^{\eta}.\] By Formula (\ref{equ:tau zeta eta extended}), we have \begin{equation*} \mathcal{X}_{\zeta}^{\eta}=\mathcal{D}elta^{\sigma(L)} \mathcal{D}^{-g^+ -\sigma(L)-\mu} \dim (\eta) \sum_{\lambda \in \mathrm{col}(L)} \dim (\lambda) F_0({_h\Omega_{j}}, \zeta, \eta, \lambda), \end{equation*} where ${_h\Omega_{j}}$ is the ribbon graph $\Omega$ with the left vertical bands colored by $h$ and the right vertical bands colored by $j$. The ribbon graph ${_h\Omega_{j}}$ is the same as $\Omega_{(M\circ M', h, j)}$ in the notation of Formula (\ref{equ:tau zeta eta extended}). Note that we have $\sigma(L)=\sigma(L_1)+\sigma(L_2)+\sigma(L_3)=\sigma(L_1)+\sigma(L_2)$, since the annuli of $\omega_{m-1}$ are separated, thus $\sigma(L_3)=0$. We write $\eta=\eta_1+\eta_2+\eta_3$, where $\eta_i$ is a color of the cup like bands of $\Omega_i$ for $i=1, 2$, and $\eta_3$ is a color of the cup like bands of $\omega_{m-1}$. Then we have $\dim(\eta)=\dim(\eta_1)\dim(\eta_2)\dim(\eta_3)$. Write analogously $\zeta=\zeta_1 +\zeta_2 +\zeta_3$ for the cap like bands. Similarly, we decompose a color $\lambda=\lambda_1 + \lambda_2 + \lambda_3$, where $\lambda_i$ is a color of $L_i$ for $i=1,2,3$. Then we have $\dim(\lambda)=\dim(\lambda_1)\dim(\lambda_2)\dim(\lambda_3)$. Expressing the ribbon graph $\Omega$ as a morphism of $\mathbb{R}ib$, we have \[\Omega=(\Omega_{1}^{-} \otimes {^{-}\Omega_{2}})(\mathrm{id}_1\otimes \omega_{m-1} \otimes \mathrm{id}_2).\] See Figure \ref{fig:Horizontal Special Ribbon }. Since the operator invariant $F$ is a monoidal functor from $\mathbb{R}ib$, we have $F(_h\Omega_{j}, \zeta, \eta, \lambda)$ \[=\left( F(_h(\Omega_1^-), \zeta_1,\eta_1, \lambda_1) \otimes F(({^-\Omega_2})_j,\zeta_2, \eta_2, \lambda_2) \right) F(\mathrm{id}_1\otimes \omega_{m-1} \otimes \mathrm{id}_2, \zeta_3, \eta_3,\lambda_3).\] Then $\mathcal{X}_{\zeta}^{\eta}$ is the composition of a morphism $\mathbb{1} \to \Phi(t_1\circ s_1; \zeta; h,j)$ with \begin{multline*} \bigg( \mathcal{D}elta^{\sigma(L_1)} \mathcal{D}^{-g_1^+ -\sigma(L_1)-\mu_1} \dim (\eta_1) \sum_{\lambda_1 \in \mathrm{col}(L_1)} \dim (\lambda) F(_h(\Omega_1^-), \zeta_1, \eta_1, \lambda_1)\\ \otimes \mathcal{D}elta^{\sigma(L_2)} \mathcal{D}^{-g_2^+ -\sigma(L_2)-\mu_2} \dim (\eta_2) \sum_{\lambda_2 \in \mathrm{col}(L_2)} \dim (\lambda_2) F(({^-\Omega_2})_j, \zeta_2, \eta_2, \lambda_2) \bigg)\\ \mathcal{D}elta^{\sigma(L_3)} \mathcal{D}^{-(m-1) -\sigma(L_3)-(m-1)} \dim (\eta_3) \sum_{\lambda_3 \in \mathrm{col}(L_3)} \dim (\lambda_3) F(\mathrm{id}_1\otimes \omega_{m-1} \otimes \mathrm{id}_2, \zeta_3, \eta_3, \lambda_3). \end{multline*} We compute the last term. First, since $\sigma(L_3)=0$, the last term reduces to \begin{equation} \mathcal{D}^{-2(m-1)} \dim (\eta_3) \sum_{\lambda_3 \in \mathrm{col}(L_3)} \dim (\lambda_3) F(\mathrm{id}_1\otimes \omega_{m-1} \otimes \mathrm{id}_2, \zeta_3, \eta_3, \lambda_3). \end{equation} We claim that the sum is zero unless $\zeta_3=\eta_3$. If $\zeta_3=\eta_3$, then the sum is equal to the operator invariant of $\mathrm{id}_{\zeta}$. Here $\mathrm{id}_{\zeta}$ is vertical bands whose colors are determined according to $\zeta$. We postpone the proof of this claim. We will prove this lemma as a consequence of some graphical calculations. See Lemma \ref{lem:claim} below. Assuming the claim, we complete the current proof. To show $u_2\mathcal{X}(M_1\circ M_2)=(\mathcal{X}(M_1)\circ \mathcal{X}(M_2))u_1$, it suffices to show \begin{equation}\label{equ:ugg=gu} \mathcal{X}(M_1\circ M_2)_{h,j}u_1=u_2(\mathcal{X}(M_1)_{h,i}\circ \mathcal{X}(M_2)_{i,j}), \end{equation} where $u_i$ is an isomorphism in Lemma \ref{lem:sum over simple 2}. The left hand side of (\ref{equ:ugg=gu}) is equal to \begin{equation}\label{equ:g (Omega Omega)} \biggr[ \bigoplus_{\zeta, \eta} \mathcal{D}elta^{\sigma(L)}\mathcal{D}^{-g^+ - \sigma(L) - \mu} \dim (\eta) \sum_{\lambda} \dim (\lambda) F_0(_h \Omega_j, \zeta, \eta, \lambda) \biggr]u_1, \end{equation} where $\zeta \in I^{t_1\circ s_1}$ and $\eta \in I^{t_2 \circ s_2}$ decompose as $\zeta=\zeta_1+\zeta_2+\zeta_3$ and $\eta=\eta_1+\eta_2+\eta_3$ with notations as above. By the claim we may assume that $\zeta_3=\eta_3$. Hence we can write the equation (\ref{equ:g (Omega Omega)}) as follows. \begin{multline*} \bigoplus_{i\in I^{m-1} } \bigoplus_{\zeta_1, \zeta_2} \bigoplus_{\eta_1, \eta_2} \\\biggl[ \mathcal{D}elta^{\sigma(L_1)} \mathcal{D}^{-g_1^+ -\sigma(L_1)-\mu_1} \dim (\eta_1) \sum_{\lambda_1 \in \mathrm{col}(L_1)} \dim (\lambda) F(_h(\Omega_1^-)_i, \zeta_1, \eta_1, \lambda_1)\\ \otimes \mathcal{D}elta^{\sigma(L_2)} \mathcal{D}^{-g_2^+ -\sigma(L_2)-\mu_2} \dim (\eta_2) \sum_{\lambda_2 \in \mathrm{col}(L_2)} \dim (\lambda_2) F(_i(^-\Omega_2)_j, \zeta_2, \eta_2, \lambda_2)\biggr]u_1. \end{multline*} The above equation is further equal to \begin{multline*}\label{equ:big big bigoplus 2} u_2\bigoplus_{i\in I^{m} } \bigoplus_{\zeta_1, \zeta_2} \bigoplus_{\eta_1, \eta_2} \\ \biggl[ \mathcal{D}elta^{\sigma(L_1)} \mathcal{D}^{-g_1^+ -\sigma(L_1)-\mu_1} \dim (\eta_1) \sum_{\lambda_1 \in \mathrm{col}(L_1)} \dim (\lambda) F(_h(\Omega_1)_i, \zeta_1, \eta_1, \lambda_1)\\ \otimes \mathcal{D}elta^{\sigma(L_2)} \mathcal{D}^{-g_2^+ -\sigma(L_2)-\mu_2} \dim (\eta_2) \sum_{\lambda_2 \in \mathrm{col}(L_2)} \dim (\lambda_2) F(_i(\Omega_2)_j, \zeta_2, \eta_2, \lambda_2)\biggr]. \end{multline*} To see this, note that as a graphical calculation $u_2$ connects the top of the rightmost band of $\Omega_1$ and the leftmost band of $\Omega_2$ by a cap like band as in Figure \ref{fig:ugg-gu}. Since no surgery links are tangled with those bands, we can push down the cap like bands. This explain the equality of Figure \ref{fig:ugg-gu}. \begin{figure} \caption{The graphical calculation for (\ref{equ:ugg=gu} \label{fig:ugg-gu} \end{figure} Finally by Formula (\ref{equ:tau zeta eta extended}), the above equation is equal to \[ (\mathcal{X}(M_1)_{h,i} \circ \mathcal{X}(M_2))_{i,j} u_1.\] Thus the proof is complete assuming the claim, which we prove below. \end{proof} In the following graphical calculations, the equality with dot $\stackrel{\bullet}{=}$ means the equality after applying the operator invariant functor $F$ to the ribbon graphs. Consider the ribbon graphs in Figure \ref{fig:Fig310}. The label $i$ is an arbitrary element of $I$. \begin{figure}\label{fig:Fig310} \end{figure} \begin{lemma}\label{lem:Fig310} The equality in Figure \ref{fig:Fig310} holds. If the color of the left vertical strand is replaced with $j\neq i$, then the sum on the left hand side is equal to $0$. \end{lemma} \begin{proof} See \cite[Section II 3 p.98]{Turaev10} \end{proof} \begin{lemma}\label{lem:claim} For each ribbon graph $\omega_n$, let $\zeta$ and $\eta$ be sequences of colors of the bottom and top rainbow like bands of $\omega_n$. Then we have \begin{align}\label{equ:F of omega} &\mathcal{D}^{-2n} \dim (\eta) \sum_{\lambda \in \mathrm{col}(L)} \dim (\lambda) F(\omega_{n}, \zeta, \eta, \lambda)\\ & \stackrel{\bullet}{=} \begin{cases} \mathrm{id}_{\zeta} &\partial_{-}ox{if } \zeta=\eta \\ \notag 0 & \partial_{-}ox{if } \zeta \neq \eta. \end{cases} \end{align} \end{lemma} \begin{proof} A part of the ribbon graph $\omega_n$ that consists of an annulus and a pair of a cup like band and a cap like band can be deformed so that it contains the ribbon graphs that appeared on the left hand side of the equation in Figure \ref{fig:Fig310}. This deformation is depicted in the first equality in Figure \ref{fig:graphical calculation 2}. It follows from Lemma \ref{lem:Fig310} that the sum on the left hand side of (\ref{equ:F of omega}) is zero unless $\zeta=\eta$. If $\zeta=\eta$, then the calculation in Figure \ref{fig:graphical calculation 2} shows that each annuli part gives rise to a factor $\mathcal{D}^2$ and the ribbon becomes the $2n$ vertical bands. \begin{figure}\label{fig:graphical calculation 2} \end{figure} \end{proof} To wrap up this section, we state here the main theorem (Theorem \ref{thm:main theorem}) again and complete its proof. \begin{thm} The assignment $\mathcal{X}$ is a projective pseudo 2-functor from the 2-category $\mathbb{C}o$ of decorated cobordisms with corners to the Kapranov-Voevodsky 2-vector spaces $2\-\mathrm{Vect}$. \end{thm} \begin{proof} We check the conditions (1)$\sim$(4) and (M.1), (M.2) of a projective pseudo 2-functor in Section \ref{sec:2-functor}. (A projectivity is defined in Definition \ref{def:projective functor}.) In the current case $(F, \phi)$ in the notation of Section \ref{sec:2-functor} is $(\mathcal{X}, u)$, where $u$ is the map used in the proof of Proposition \ref{prop:2-functor on 1-morphisms}. The condition (1) is just the definition of $\mathcal{X}$. The condition (2) follows from Proposition \ref{prop:vertical projective functor}. The condition (3) as well as (M.2) is satisfied since we just use formal identities. The condition (4) follows from Proposition \ref{prop:2-functor on 1-morphisms} and \ref{prop:2horizontal}. Finally the condition (M.1) follows since the isomorphism $u$ in Lemma \ref{lem:sum over simple 2} satisfies the analogue diagram. \end{proof} \subsection{The extended TQFT $\mathcal{X} $}\label{sec:the extended tqft} \begin{Definition} An \textit{extended TQFT} is a projective pseudo 2-functor from $\mathbb{C}o$ to $2\-\mathrm{Vect}$. An extended TQFT \textit{extends} the Reshetikhin-Turaev TQFT if when it is restricted to the category $\mathbb{C}o({_* \emptyset}, \emptyset_*)$ of cobordisms without corners, it is the Reshetikhin Turaev TQFT. \end{Definition} Our candidate for an extended TQFT that extends the Reshetikhin-Turaev TQFT is the projective pseudo 2-functor $\mathcal{X}$. By definition, the 2-functor $\mathcal{X}$ is an extended TQFT. \begin{prop} The extended TQFT $\mathcal{X}$ extends the Reshetikhin-Turaev TQFT. \end{prop} \begin{proof} Suppose $(M, \partial_{B} M, \partial_{T} M)$ is a cobordism without corners. Then $M$ is represented by a special ribbon graph with a bottom type $t^-=(0,0; a_1, \dots, a_p)$ and a top type $t^+=(0,0; b_1, \dots, b_q)$. Since there are no left and right circles on boundary surfaces of $M$, $\mathcal{X}(\partial_{-} M)$ is a $(1 \times 1)$ 2-matrix and we can canonically identify the 2-matrix with its only entry $\bigoplus_{i \in I^{t^-}}\mathrm{Hom}(\mathbb{1}, \Phi(t^-; i))$. This module is what the RT TQFT assigns to $\partial_{-} M$. Similarly for $\partial_{+} M$. Then we can identify $\mathcal{X}(M)$ as a homomorphism from the module $\mathcal{X}(\partial_{-} M)$ to the module $\mathcal{X}(\partial_{+} M)$, which is the same as the RT TQFT by definition. \end{proof} \section{Comments}\label{sec:comments} \subsection{A 2-category of Special Ribbon Graphs} As we have seen, the construction of the projective pseudo 2-functor $\mathcal{X}$ from $\mathbb{C}o$ to $2\-\mathrm{Vect}$ extensively depends on the technique of representing a cobordism by a special ribbon graphs. On the level of objects, the 2-functor $X$ extracts just the number of components for each 1-manifold. On the 1-morphisms, the 2-functor $X$ reads the type of each surface and outputs the projective module constructed only from the information of the type. We can hence consider the 2-category $\mathrm{Srg}$ whose objects are integers and whose 1-morphisms are decorated types and whose 2-morphisms are special ribbon graphs. we could have defined $\mathcal{X}$ alternatively by the composition of a 2-functors from $\mathbb{C}o$ to $\mathrm{Srg}$ and a 2-functor from $\mathrm{Srg}$ and $2\-\mathrm{Vect}$. However, to make it meaningful we need to define composition in $\mathrm{Srg}$ independently of $\mathbb{C}o$. This would digress from the main stream of our argument and thus we did not choose to do so. \begin{center} \begin{tabular}{ | l | l | l | } \hline & $\mathbb{C}o$ & $\mathrm{Srg}$ \\ \hline & Geometric realization & Combinatorial data \\ \hline Objects & Standar circles & Integers \\ \hline 1-morphisms & Standard surfaces & Decorated types \\ \hline 2-morphisms & Classes of decorated cobordisms with corners & Special ribbon graphs \\ \hline \end{tabular} \end{center} \subsection{A connection to other work} We chose that our extended TQFT takes values in $2\-\mathrm{Vect}$. There are 2-functors 2-$\mathcal{V}ect \to \mathcal{B}imod \to \mathbb{C}at$. Here $\mathcal{B}imod$ is a 2-category whose objects are $K$-algebra, where $K$ is the ground ring, and whose 1-morphisms are bimodules and 2-morphisms are equivalence classes of $K$-homomorphisms. The 2-category $\mathbb{C}at$ consists of categories for objects, functors for 1-morphisms, and natural transformations for 2-morphisms. Thus our extended TQFT can also take values in $\mathcal{B}imod$ or $\mathbb{C}at$ and this make a connection with the work of \cite{DSS}. The 2-functors can be constructed as follows. First let us define the 2-functor 2-$\mathcal{V}ect \to \mathcal{B}imod$. On object level, to each object $n \in \mathbb{Z}$ of 2-$\mathcal{V}ect$ we assign $K^n$. On 1-morphism level, to each $m\times n$ 2-matrix $(V_{ij})_{ij}$ we assign a $(K^m, K^n)$-bimodule $\bigoplus_{i,j}V_{ij}$, where the bimodule structure is induced by the multiplication of $1 \times m$ matrix from the left of $(V_{ij})_{ij}$ and the multiplication of $n \times 1$ matrix from the right of $(V_{ij})_{ij}$. On 2-morphism level, if $(T_{ij})$ is a 2-morphism from $(V_{ij})$ to $(W_{ij})$, we assign $\bigoplus_{i,j} T_{i,j}: \bigoplus_{i,j}V_{ij} \to \bigoplus_{i,j}W_{ij}$. These assignments are easily seen to be a 2-functor. The 2-functor from $\mathcal{B}imod$ to $\mathbb{C}at$ assigns on object level, to each $K$-algebra $A$, the category of right $A$-modules whose objects are right $A$-modules and morphisms are homomorphisms. On 1-morphism level, the assignment is induced by tensoring a bimodule from the right. On 2-morphism level, natural transformations are induced by homomorphisms of bimodules. Composing this 2-functor with the extended TQFT $\mathcal{X}: \mathbb{C}o \to 2\partial_{-}ox{-}\mathcal{V}ect$, we have a 2-functor from the 2-category of cobordisms with corners to the 2-category $\mathbb{C}at$. \section{Appendix: Bicategories}\label{sec:appendix:bicategory} Here we review the definition of bicategories. The following definitions of a bicategory and a pseudo 2-functor are excerpted from the paper \cite{MR0220789}. \subsection{Bicategories} A \textit{bicategory} $\underbar{S}$ is determined by the following data: \begin{enumerate} \item A set $\underbar{S}_0 =\mathrm{Ob}(S)$ called set of objects of $\underbar{S}$. \item For each pair $(A, B)$ of objects, a category $\underbar{S}(A, B)$. An object $S$ of $\underbar{S}(A,B)$ is called a \textit{morphism} of $S$, and written $A \xrightarrow{S} B$; the composition sign $\circ$ of maps in $\underbar{S}(A,B)$ will usually be omitted. A map $s$ from $S$ to $S'$ will be called a \textit{2-morphism} and written $s: S \mathbb{R}ightarrow S'$, or better will be represented by: \begin{center} \begin{tikzpicture} \node (A) at (-1,0) {$A$}; \node (B) at (1,0) {$B$}; \node at (0,0) {\rotatebox{270}{$\mathbb{R}ightarrow$}}; \path[->,font=\scriptsize,>=angle 90] node[right]{$s$} (A) edge [bend left] node[above] {$S$} (B) edge [bend right] node[below] {$S'$} (B); \end{tikzpicture} \end{center} \item For each triple $(A, B, C)$ of objects of $\underbar{S}$, a \textit{composition functor}: \[c(A, B,C): \underbar{S}(A,B) \times \underbar{S}(B,C) \to \underbar{S}(A,C).\] We write $S\circ T$ and $s\circ t$ instead of $c(A,B,C)(S,T)$ and $c(A,B,C)(s,t)$ for $(S,T)$ and $(s,t)$ objects and maps of $\underbar{S}(A,B) \times \underbar{S}(B,C)$, and abbreviate $\mathrm{id}_{S} \circ t$ and $s\circ \mathrm{id}_{T}$ into $S\circ t$ and $s\circ T$. This composition corresponds to the pasting: \item For each object $A$ of $\underbar{S}$ an object $I_A$ of $\underbar{S}(A,A)$ called an \textit{identity morphism} of $A$.The identity map of $I_A$ is $\underbar{S}(A,A)$ is denoted $i_A:I_A \mathbb{R}ightarrow I_A$ and called an \textit{identity 2-morphism} of $A$. \item For each quadruple $(A,B,C,D)$ of objects of $\underbar{S}$, a natural isomorphism $a(A, B, C, D)$, called an \textit{associativity isomorphism}, between the two composite functors bounding the diagram: \begin{center} \begin{tikzcd}[column sep=3cm] \underbar{S}(A, B) \times \underbar{S}(B, C) \times \underbar{S}(C, D) \arrow{r}{\mathrm{id} \times c(B, C, D)} \arrow{d}[swap]{c(A,B,C) \times \mathrm{id}} &\underbar{S}(A,B) \times \underbar{S}(B,D) \arrow{d}{c(A,B,D)}\\ \underbar{S}(A,C) \times \underbar{S}(C,D) \arrow[Rightarrow]{ru}{a(A, B, C,D)} \arrow{r}{c(A,C,D)} & \underbar{S}(A,D) \end{tikzcd} \end{center} Explicitly: \[a(A,B,C,D): c(A,C,D) \circ ( c(A, B, C)\times \mathrm{id}) \to c(A, B,D) \circ (\it \times c(B,C,D)).\] If $(S,T,U)$ is an object of $\underbar{S}(A, B) \times \underbar{S}(B, C) \times \underbar{S}(C, D)$ the isomorphism \[a(A, B,C,D)(S,T,U): (S \circ T) \circ U \xrightarrow{\sim} S \circ (T \circ U)\] in $\underbar{S}(A, D)$ is called the \textit{component} of $a(A,B,C,D)$ at $(S,T,U)$ and is abbreviated into $a(S,T,U)$ or even $a$, except when confusions are possible. \item For eahc pair $(A,B)$ of objects of $\underbar{S}$, two natural isomorphisms $l(A,B)$ and $r(A,B)$, called \textit{left} and \textit{right} identities, between the functors bounding the diagrams: \begin{center} \begin{tikzcd}[column sep=3cm] 1\times \underbar{S}(A, B) \arrow{r}{I_A \times \mathrm{id}} \arrow{d}[swap]{\partial_{-}ox{canonical}} &\underbar{S}(A,A) \times \underbar{S}(A,B) \arrow{d}{c(A,A,B)}\\ \underbar{S}(A,B) \arrow[Rightarrow]{ru}{l(A,B)} \arrow{r}{=} & \underbar{S}(A,B) \end{tikzcd} \end{center} \begin{center} \begin{tikzcd}[column sep=3cm] \underbar{S}(A, B)\times 1 \arrow{r}{\mathrm{id} \times I_B} \arrow{d}[swap]{\partial_{-}ox{canonical}} &\underbar{S}(A,B) \times \underbar{S}(B,B) \arrow{d}{c(A,B,B)}\\ \underbar{S}(A,B) \arrow[Rightarrow]{ru}{l(A,B)} \arrow{r}{=} & \underbar{S}(A,B) \end{tikzcd} \end{center} If $S$ is an object of $\underbar{S}(A, B)$, the isomorphism, component at $S$ of $l(A,B)$, \[l(A,B)(S): I_A \circ S \xrightarrow{\sim} S \] is abbreviated into $l(S)$ or even $l$, and similarly we write: \[r=r(S)=r(A,B)(S): S \circ I_B \xrightarrow{\sim} S.\] \end{enumerate} The families of natural isomorphisms $a(A,B,C,D)$, $l(A,B)$ and $r(A,B)$ are furthermore required to satisfy the following axioms: \begin{enumerate} \item[(A.C.)] Associativity coherence: If $(S, T,U,V)$ is an object of \[\underbar{S}(A,B)\times \underbar{S}(B,C) \times \underbar{S}(C,D)\times \underbar{S}(D,E)\] the following diagram commutes: \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1.5em] { $((S\circ T) \circ U)\circ V$ & & $(S\circ (T\circ U)) \circ V$ \\ $(S\circ T) \circ (U \circ V)$ & & $S \circ ((T \circ U) \circ V)$ \\ & $S\circ (T \circ (U \circ V))$ & \\}; \path[->, font=\scriptsize] (m-1-1) edge node[above] {$a(S,T,U)\circ \mathrm{id}$} (m-1-3); \path[->, font=\scriptsize] (m-1-1) edge node[left] {$a(A\circ T, U, V)$} (m-2-1); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$a(S, T\circ U, V)$} (m-2-3); \path[->, font=\scriptsize] (m-2-1) edge node[below left] {$a(S,T, U \circ V)$} (m-3-2); \path[->, font=\scriptsize] (m-2-3) edge node[auto] {$\mathrm{id} \circ a(T, U, V)$} (m-3-2); \end{tikzpicture} \end{center} \item[(I. C.)] Identity coherence: If $(S, T)$ is an object of $\underbar{S}(A,B) \times \underbar{S}(B,C)$ the following diagram commutes: \begin{center} \begin{tikzpicture} \matrix (m) [matrix of nodes, column sep=3em, row sep=1.5em] { $(S\circ I_B) \circ T$ & & $S\circ (I_B \circ T)$ \\ & $S \circ T$ & \\}; \path[->, font=\scriptsize] (m-1-1) edge node[above] {$a(S,I_B, T)$} (m-1-3); \path[->, font=\scriptsize] (m-1-1) edge node[below left] {$r(S) \circ \mathrm{id}$} (m-2-2); \path[->, font=\scriptsize] (m-1-3) edge node[auto] {$\mathrm{id} \circ l(T)$} (m-2-2); \end{tikzpicture} \end{center} \end{enumerate} \subsection{Pseudo 2-functors of bicategories}\label{sec:2-functor} Let $\underbar{S}=( \underbar{S}_0, c, I, a, l, r)$ and $\bar{\underbar{S}}=( \bar{\underbar{S}_0}, \bar{c}, \bar{I}, \bar{a}, \bar{l}, \bar{r})$ be two bicategories. A \textit{pseudo 2-functor} $\Phi=(F, \phi)$ from $\underbar{S}$ to $\bar{\underbar{S}}$ is determined by the following: \begin{enumerate} \item A map $F: \underbar{S}_0 \to \bar{\underbar{S}_0}, A \mapsto FA$. \item A family of functors \[ F(A, B): \underbar{S}(A, B) \to \bar{\underbar{S}}(FA, FB), \quad S \mapsto FS, \quad s \mapsto Fs.\] \item For each object $A$ of $\underbar{S}$, an arrow of $S(FA, FA)$ (i.e., a 2-cell of $\underbar{S}$) \[ \phi_A: \bar{I}_{FA} \to F(I_A).\] \begin{center} \begin{tikzpicture} \node (A) at (-1,0) {$FA$}; \node (B) at (1,0) {$FA$}; \node at (0,0) {\rotatebox{270}{$\mathbb{R}ightarrow$}}; \path[->,font=\scriptsize,>=angle 90] node[right] {$\phi_A$} (A) edge [bend left] node[above] {$\bar{I}_{FA}$} (B) edge [bend right] node[below] {$F(I_A)$} (B); \end{tikzpicture} \end{center} \item A family of natural transformations: \[\phi(A, B, C): \bar{c}(FA, FB, FC) \circ (F(A, B) \times F(B, C)) \to F(A, C) \circ c(A, B, C). \] \begin{center} \begin{tikzcd}[column sep=3cm] \underbar{S}(A, C) \arrow[leftarrow]{r}{c(A, B, C)} \arrow{d}{F(A, C)} \arrow[Leftarrow]{rd}{\phi(A, B, C)} &\underbar{S}(A,B) \times \underbar{S}(B,C) \arrow{d}{F(A, B) \times F(B,C)}\\ \bar{\underbar{S}}(FA,FC) \arrow[leftarrow]{r}{\bar{c}(FA,FB,FC)} & \bar{\underbar{S}}(FA,FB) \times \bar{\underbar{S}}(FB,FC) \end{tikzcd} \end{center} If $(S,T)$ is an object of $\bar{\underbar{S}}(A,B) \times \bar{\underbar{S}}(B,C)$ the $(S,T)$-component of $\phi(A,B,C)$ \[FS \circ FT \xrightarrow{\phi(A,B,C)(S,T)} F(S \circ T) \] shall usually be abbreviated into $\phi(S, T)$ or even $\phi$. \end{enumerate} These data are required to satisfy the following coherence axioms: \begin{enumerate} \item[(M. 1)] If $(S, T, U)$ is an object of $\underbar{S}(A, B) \times \underbar{S}(B,C) \times \underbar{S}(C,D)$ the following diagram, where indices $A, B, C, D$ have been omitted, is commutative: \begin{tikzcd}[column sep=large] (FS \circ FT) \circ FU \arrow{r}{ \phi(S, T) \circ \mathrm{id}} \arrow{d}{ \bar{a}(FS, FT, FU)} &F(S\circ T) \circ FU \arrow{r}{ \phi(S \circ T, U)} & F((S\circ T) \circ U) \arrow{d}{F(a(S,T,U))} \\ FS\circ (FT \circ FU) \arrow{r}{\mathrm{id} \circ \phi(T, U)} & FS \circ F(T \circ U) \arrow{r}{\phi(S, T \circ U)} & F(S \circ (T \circ U)) \end{tikzcd} \item[(M. 2)] If $S$ is an object of $\underbar{S}(A,B)$ the following diagrams commute: \begin{tikzcd} FS \arrow[leftarrow]{r}{Fr} & F(S \circ I_B) \\ FS\circ \bar{I}_{FB} \arrow{u}{\bar{r}} \arrow{r}{ \mathrm{id} \circ \phi_B} & FS \circ FI_B \arrow{u}{\phi(S, I_B)} \end{tikzcd} \qquad \begin{tikzcd} F(I_A \circ S) \arrow{r}{Fl} & FS \\ FI_A \circ FS \arrow{u}{\phi(I_A, S)}\arrow[leftarrow]{r}{\phi_A \circ \mathrm{id}} & \bar{I}_{FA} \circ FS \arrow{u}{\bar{l}} \end{tikzcd} \end{enumerate} \subsection{Projective Functors and Projective Pseudofunctors} Let $\mathscr{C}$ and $\mathscr{D}$ be categories. We introduce the notion of a \textit{projective functor} from $\mathscr{C}$ to $\mathscr{D}$. This notion deals with the anomaly of the Reshetikhin-Turaev TQFT. \begin{Definition}\label{def:projective functor} Assume that the set of morphisms of $\mathscr{D}$ is an $R$-module for some ring $R$. The following assignment $F$ is called \textit{projective functor} \begin{enumerate} \item For each object $X\in \mathrm{Obj}(\mathscr{C})$, an object $F(X) \in \mathrm{Obj}(\mathscr{D})$. \item For a morphism $f: X\to Y$ in $\mathscr{C}$, a morphism $F(f): F(X) \to F(Y)$ in $\mathscr{D}$ satisfying the following conditions: \begin{enumerate} \item(Unit) For any object $X$ in $\mathscr{C}$, the identity morphism $\mathrm{id}_X: X \to X$ in $\mathscr{C}$ is mapped to the identity morphism $\mathrm{id}_{F(X)}: F(X) \to F(X)$ in $\mathscr{D}$. \item(Projectivity) For composable two morphisms $f$ and $g$ in $\mathscr{C}$, there exist unique element $k(f,g)$, which is called an \textit{anomaly}, of the ring $R$ such that \begin{equation} F(f\circ g)=k(f,g) F(f) \circ F(g). \end{equation} \item If one of $f$ and $g$ above is the identity morphism, then $k(f, g)$ is the unit element of $R$. \item (Associativity) For composable three morphisms $f, g, h$ in $\mathscr{C}$, we have \begin{equation} k(f, g\circ h)k(g,h)=k(f \circ g, h)k(f,g) \end{equation} \end{enumerate} \end{enumerate} \end{Definition} When every anomaly is the unit, then the notion of a projective functor is the same as the usual notion of a functor. The notion of a natural transformation between functors can be extended to a natural transformation between projective functors if anomaly factors are the same for both functors. We call a natural transformation between projective functors \textit{projective natural transformation} Then, we define a \textit{projective pseudo 2-functor} by replacing functors and natural transformations in the definition of a pseudo 2-functor with projective functors and projective natural transformations. {} \end{document}
\begin{document} \setcounter{page}{1} \vskip1.5cm \begin{center} {\LARGE \bf The Final Solutions of Monty Hall Problem and Three Prisoners Problem} \vskip0.5cm {\bf \large Shiro Ishikawa } \\ \vskip0.2cm \rm \it Department of Mathematics, Faculty of Science and Technology, Keio University, \\ 3-14-1 Hiyoshi kohoku-ku, Yokohama, 223-8522 Japan. \\ E-mail: [email protected] \end{center} \par \rm \vskip0.3cm \par \noindent {\bf Abstract} \normalsize \par \noindent Recently we proposed the linguistic interpretation of quantum mechanics (called quantum and classical measurement theory, or quantum language), which was characterized as a kind of metaphysical and linguistic turn of the Copenhagen interpretation. This turn from physics to language does not only extend quantum theory to classical systems but also yield the quantum mechanical world view (i.e., the philosophy of quantum mechanics, in other words, quantum philosophy).And we believe that this quantum language is the most powerful language to describe science. The purpose of this paper is to describe the Monty-Hall problem and the three prisoners problem in quantum language. We of course believe that our proposal is the final solutions of the two problems. Thus in this paper, we can answer the question: "Why have philosophers continued to stick to these problems?"And the readers will find that these problems are never elementary, and they can not be solved without the deep understanding of "probability" and "dualism". \rm \par \noindent \par \par \vskip0.3cm \par \noindent {\bf Keywords}: Philosophy of probability, Fisher Maximum Likelihood Method, Bayes' Method, The Principle of Equal (a priori) Probabilities \par \def\cal{\cal} \def\text{\large $\: \boxtimes \,$}{\text{\large $\: \boxtimes \,$}} \par \noindent \section{ Introduction} \subsection{ Monty Hall problem and Three prisoners problem } \rm \par According to ref. \cite{Ishi2}, we shall introduce the usual descriptions of the Monty Hall problem and the three prisoners problem as follows. \par \noindent {\bf Problem 1} \rm [{}Monty Hall problem]. { Suppose you are on a game show, and you are given the choice of three doors (i.e., \lq\lq Door $A_1$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_2$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_3$\rq\rq$\!\!)$. Behind one door is a car, behind the others, goats. You do not know what's behind the doors \par \noindent However, you pick a door, say "Door $A_1$", and the host, who knows what's behind the doors, opens another door, say \lq\lq Door $A_3$\rq\rq$\!\!\!,\;$ which has a goat. \par \noindent He says to you, \lq\lq Do you want to pick Door $A_2$?\rq\rq$\;\;$ Is it to your advantage to switch your choice of doors? \par \noindent \vskip0.3cm \par \noindent \unitlength=0.26mm \begin{picture}(500,150) \thicklines \put(430,55) {{ \drawline[-15](-40,-30)(120,-30)(120,90)(-350,90) \put(-350,90){\vector(0,-1){20}} \put(-225,90){\vector(0,-1){20}} \put(-100,90){\vector(0,-1){20}} \path(0,30)(60,30)(60,60)(20,60)(20,45)(0,45)(0,30) \put(20,30){\circle{15}} \put(47,30){\circle{15}} }} \put(400,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \put(470,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \thicklines \put(20,20){\line(1,0){370}} \put(40,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_1$} } \put(160,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_2$} } \put(280,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_3$} } \end{picture} \par \noindent {\bf Problem 2} \rm [{}Three prisoners problem]. Three prisoners, $A_1$, $A_2$, and $A_3$ were in jail. They knew that one of them was to be set free and the other two were to be executed. They did not know who was the one to be spared, but the emperor did know. $A_1$ said to the emperor, {\lq\lq}I already know that at least one the other two prisoners will be executed, so if you tell me the name of one who will be executed, you won't have given me any information about my own execution\rq\rq.$\;\;$ After some thinking, the emperor said, \lq\lq $A_3$ will be executed.\rq\rq$\;\;$ Thereupon $A_1$ felt happier because his chance had increased from $\frac{1}{3(= {\rm Num}[\{A_1,A_2,A_3 \}])}$ to $\frac{1}{2(= {\rm Num}[\{ A_1,A_2 \}])}$. This prisoner $A_1$'s happiness may or may not be reasonable? \par \noindent \unitlength=0.35mm \begin{picture}(500,130) \thicklines \put(20,0) {{{ \put(70,20) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-3,56){\footnotesize E} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(200,20) {{ { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-6,40)(-15,40) \put(-6,56){\footnotesize $A_1$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(50,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-6,56){\footnotesize $A_2$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(100,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-,40)(-15,40) \put(-6,56){\footnotesize $A_3$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } }} \thicklines \put(20,20){\line(1,0){370}} \put(160,20){ \path(0,0)(0,100)(180,100)(180,0) } \linethickness{0.15mm} \put(164,20) { \multiput(0,0)(10,0){19}{\line(0,1){100}} } \put(70,20) { \put(10,60){\vector(1,0){50}} \put(60,60){\vector(1,0){60}} \put(6,68){\footnotesize \lq\lq $A_3$ will be executed\rq\rq} \put(6,48){\footnotesize (Emperor)} } }}} \end{picture} The purpose of this paper is to clarify Problem 1 (Monty Hall problem) and Problem 2 (three prisoners problem ) as follows. \begin{itemize} \item[(A1)]Problem 1 (Monty Hall problem) is solvable, but Problem 2 (Three prisoners problem) is not well posed. In this sense, Problem 1 and Problem 2 are not equivalent. This is the direct consequence of Fisher's maximal likelihood method mentioned in Section 2. \item[(A2)]Also, there are two ways that the probabilistic property is introduced to both problems as follows: \begin{itemize} \item[(A2$_1$)] in Problem 1, one (discussed in Section 4) is that the host casts the dice, and another (discussed in Section 6) is that you cast the dice. \item[(A2$_2$)] in Problem 2, one (discussed in Section 4) is that the emperor casts the dice, and another (discussed in Section 6) is that three prisons cast the dice. \end{itemize} In the case of each, the former solution is due to Bayes' method ( mentioned in Section 2). And the latter solution is due to the principle is equal probabilities ( mentioned in Section 5). And, after all, we can conclude, under the situation (A2), that Problem 1 and Problem 2 are equivalent. \end{itemize} \rm The above will be shown in terms of quantum language (=measurement theory). And therefore, we expect the readers to find that quantum language is superior to Kolmogorov's probability theory {\cite{Kolm}}. \par \noindent \subsection{ Overview: Measurement Theory (= Quantum Language) } \rm \par \rm \par \par \rm As emphasized in refs. {}{\cite{Ishi4,Ishi5}}, measurement theory (or in short, MT) is, by a linguistic turn of quantum mechanics (cf. {\bf Figure 1}:\textcircled{\footnotesize 7} later), constructed as the scientific theory formulated in a certain {}{$C^*$}-algebra ${\cal A}$ (i.e., a norm closed subalgebra in the operator algebra $B(H)$ composed of all bounded operators on a Hilbert space $H$, {\rm cf.} {}{\cite{Murp, Neum}} ). MT is composed of two theories (i.e., pure measurement theory (or, in short, PMT] and statistical measurement theory (or, in short, SMT). That is, it has the following structure: \par \rm \par \begin{itemize} \item[(B)] $ \underset{\text{\footnotesize }}{ \text{ MT (measurement theory = quantum language) } } $ \\ $=\cases \text{(B1)}: \underset{\text{\scriptsize }}{\text{[PMT ]}} = \displaystyle{ { \mathop{\mbox{[(pure) measurement]}}_{\text{\scriptsize (Axiom$^{\rm P}$ 1) }} } } + \displaystyle{ \mathop{ \mbox{ [causality] } }_{ { \mbox{ \scriptsize (Axiom 2) } } } } \\ \\ \text{(B2)} : \underset{\text{\scriptsize }}{\text{[SMT ]}} = \displaystyle{ { \mathop{\mbox{[(statistical) measurement]}}_{\text {\scriptsize (Axiom$^{\rm S}$ 1) }} } } \! + \! \displaystyle{ \mathop{ \mbox{ [causality] } }_{ { \mbox{ \scriptsize (Axiom 2) } } } } \end{array}\right. $ \end{itemize} \par \noindent where Axiom 2 is common in PMT and SMT. For completeness, note that measurement theory (B) (i.e., (B1) and (B2)) is not physics but a kind of language based on {\lq\lq}the quantum mechanical world view{\rq\rq}. As seen in {}{\cite{Ishi6}}, note that MT gives a foundation to statistics. That is, roughly speaking, \begin{itemize} \item[(C1)] it may be understandable to consider that PMT and SMT is related to Fisher's statistics and Bayesian statistics respectively. \end{itemize} When ${\cal A}=B_c(H)$, the ${C^*}$-algebra composed of all compact operators on a Hilbert space $H$, the (B) is called {quantum measurement theory} (or, quantum system theory), which can be regarded as the linguistic aspect of quantum mechanics. Also, when ${\cal A}$ is commutative $\big($ that is, when ${\cal A}$ is characterized by $C_0(\Omega)$, the $C^*$-algebra composed of all continuous complex-valued functions vanishing at infinity on a locally compact Hausdorff space $\Omega$ ({\rm cf.} {}{\cite{Murp}})$\big)$, the (B) is called {classical measurement theory}. Thus, we have the following classification: \begin{itemize} \item[(C2)] $ \quad \underset{\text{\scriptsize }}{\text{MT}} $ $\left\{\begin{array}{ll} \text{quantum MT$\quad$(when ${\cal A}=B_c (H)$)} \\ \\ \text{classical MT $\quad$ (when ${\cal A}=C_0(\Omega)$)} \end{array}\right. $ \end{itemize} \par \noindent Also, for the position of MT in science, see {\bf Figure 1}, which was precisely explained in {}{\cite{Ishi5, Ishi8}}. \begin{center} \begin{picture}(410,170) { \put(10,70){ { \put(0,-3){ $\!\! \underset{{\text{\footnotesize Alistotle}}}{\underset{{\text{\footnotesize Plato}}}{\overset{\text{\footnotesize Parmenides}}{{\overset{\text{\footnotesize Socrates}}{ {\fbox{\shortstack[l]{Greek\\ {\footnotesize philosophy}}}} } } } } } $ } } \put(51,-3){ \rm $\xrightarrow[\text{\footnotesize sticism}]{\text{\footnotesize Schola-} }$ $\!\! \textcircled{\scriptsize 1}$ } \put(93,7){ {\line(0,1){34}} } \put(93,-7){ {\line(0,-1){47}} } } \put(100,70){ $ \begin{array}{l} \!\!\! {\; \xrightarrow[]{ \; \quad }} \overset{\text{\scriptsize (monism)}}{\underset{\text{\scriptsize (realism)}} {\fbox{\text{Newton}}}} { \overset{\textcircled{\scriptsize 2}}{{\line(1,0){17}}} } \begin{array}{llll} \!\! \rightarrow {\fbox{\shortstack[l]{relativity \\ theory}}} {\xrightarrow[]{\qquad \quad \;\;\;} }{\textcircled{\scriptsize 3}} \\ \\ \!\! \rightarrow {\fbox{\shortstack[l]{quantum \\mechanics}}} { \xrightarrow[\qquad \quad \;\; ]{} }\textcircled{\scriptsize 4} \end{array} \\ \\ \!\! \xrightarrow[]{{ \quad}} \overset{\text{\scriptsize (dualism)}}{ \underset{\text{\scriptsize (idealism)}}{\fbox{ {\shortstack[l]{Descartes\\ Rock,... \\Kant}} }} } {\xrightarrow[]{\textcircled{\scriptsize 6} }} \! \overset{\text{\scriptsize (linguistic view)}}{\fbox{ \shortstack[l]{language \\ philosophy} }} \!\! \xrightarrow[{ } { \text{\footnotesize zation }} ]{{ { } {\text{\footnotesize quanti-} } }}\textcircled{\scriptsize 8} \end{array} $ } \put(300,86){ {\put(-40,0){\drawline(0,2)(0,-30)}} {\put(-40,-32){\text{$\xrightarrow[]{\; \text{\footnotesize language}}$}}} {\put(6,-32){\textcircled{\scriptsize 7}}} } \put(190,80){\line(0,1){46}} \put(302,110){ $ \left.\begin{array}{llll} \; \\ \; \\ \; \\ \; \end{array}\right\} \xrightarrow[]{\textcircled{\scriptsize 5}} {\!\!\! \overset{\text{\scriptsize (unsolved)}}{ \underset{\text{\scriptsize (quantum phys.)}}{ \fbox{\shortstack[l]{theory of \\ everything}} } } } $ } \put(302,20){ $ \left.\begin{array}{lllll} \; \\ \; \\ \; \\ \; \\ \; \end{array}\right\} {\xrightarrow[]{\textcircled{\scriptsize 10}}} \overset{\text{\scriptsize (=MT)}}{ \underset{\text{\scriptsize (language)}}{ \fbox{\shortstack[l]{\color{red}quantum\\ \color{red}language}} } } $ } \put(100,-70){ {\bf \hypertarget{Figure 1}{Figure 1}: The history of the world-view } } \put(65,-32){ } { \thicklines \color{red} \dashline[50]{4}(287,-47)(270,-47)(270,70)(420,70)(420,-47)(380,-47)} \thicklines {\put(175,-16) {{ { \fbox{\shortstack[l]{ statistics \\ system theory}} } }$\xrightarrow[]{\qquad \;}$\textcircled{\scriptsize 9} } } { { \put(288,-50){\color{red} \bf $\;\;$ linguistic view } \color{black} } { \put(200,155){\color{blue} \bf $\;\;$ realistic view } \color{black} } } } { \color{blue} $\!\!\!\!\!\!\!\!${\dashline[50]{4}(190,160)(110,160)(110,74)(420,74)(420,160)(290,160)} \color{black} } \end{picture} \vskip1.8cm \end{center} \par \vskip0.3cm \par \noindent \section{ Classical Measurement Theory (Axioms and Interpretation)} \subsection{ Mathematical Preparations } \par \noindent \par Since our concern is concentrated to the Monty Hall problem and three prisoners problem, we devote ourselves to classical MT in (C2). \par Throughout this paper, we assume that $\Omega$ is a compact Hausdorff space. Thus, we can put $C_0(\Omega) =$ $C(\Omega)$, which is defined by a Banach space (or precisely, a commutative $C^*$-algebra ) composed of all continuous complex-valued functions on a compact Hausdorff space $\Omega$, where its norm $\|f\|_{C(\Omega)}$ is defined by $\max_{\omega \in \Omega}|f(\omega)|$. Let ${C(\Omega)}^*$ be the dual Banach space of ${C(\Omega)}$. That is, $ {C(\Omega)}^* $ $ {=} $ $ \{ \rho \; | \; \rho$ is a continuous linear functional on ${C(\Omega)}$ $\}$, and the norm $\| \rho \|_{ {C(\Omega)}^* } $ is defined by $ \sup \{ | \rho ({}f{}) | \:{}: \; f \in {C(\Omega)} \text{ such that }\| f \|_{{C(\Omega)}} \le 1 \}$. The bi-linear functional $\rho(f)$ is also denoted by ${}_{{C(\Omega)}^*} \langle \rho, f \rangle_{C(\Omega)}$, or in short $ \langle \rho, f \rangle$. Define the \it mixed state $\rho \;(\in{C(\Omega)}^*)$ \rm such that $\| \rho \|_{{C(\Omega)}^* } =1$ and $ \rho ({}f) \ge 0 \text{ for all }f\in {C(\Omega)} \text{ such that } f \ge 0$. And put \begin{align*} {\frak S}^m ({}{C(\Omega)}^*{}) {=} \{ \rho \in {C(\Omega)}^* \; | \; \rho \text{ is a mixed state} \}. \end{align*} \rm Also, for each $\omega \in \Omega$, define the {\it pure state} $\delta_\omega$ $(\in {\frak S}^m ({}{C(\Omega)}^*{}) )$ such that ${}_{{C(\Omega)}^*} \langle \delta_\omega, f \rangle_{C(\Omega)}$ $=$ $f(\omega)$ $(\forall f \in C(\Omega ))$. And put \begin{align*} {\frak S}^p ({}{C(\Omega)}^*{}) {=} \{ \delta_\omega \in {C(\Omega)}^* \; | \; \delta_\omega \text{ is a pure state} \}, \end{align*} which is called a \it state space. \rm \rm Note, by the Riesz theorem ({\rm cf}. {}{\cite{Yosi}} ), that $C(\Omega )^*$ $=$ ${\cal M}(\Omega ) $ $\equiv$ $\{ \rho \;|\; \rho $ is a signed measure on $\Omega$ $ \}$ and ${\frak S}^m(C(\Omega )^*)$ $=$ ${\cal M}_{+1}^m(\Omega ) $ $\equiv$ $\{ \rho \;|\; \rho $ is a measure on $\Omega$ such that $\rho(\Omega)=1$ $ \}$. Also, it is clear that $ {\frak S}^p ({}{C(\Omega)}^*{})$ $=$ $\{ \delta_{\omega_0} \;|\; \delta_{\omega_0}$ is a point measure at ${\omega_0} \in \Omega \}$, where $ \int_\Omega f(\omega) \delta_{\omega_0} (d \omega )$ $=$ $f({\omega_0})$ $ (\forall f $ $ \in C(\Omega))$. This implies that the state space $ {\frak S}^p ({}{C(\Omega)}^*{})$ can be also identified with $\Omega$ (called a {\it spectrum space} or simply, {\it spectrum}) such as \begin{align} \underset{\text{\scriptsize (state space)}}{{\frak S}^p ({}{C(\Omega)}^*{})} \ni \delta_{\omega} \leftrightarrow {\omega} \in \underset{\text{\scriptsize (spectrum)}}{\Omega} \label{eq1} \end{align} Also, note that ${C(\Omega)}$ is unital, i.e., it has the identity $I$ (or precisely, $I_{C(\Omega)}$), since we assume that $\Omega$ is compact. According to the noted idea ({\rm cf.} {}{\cite{ Davi}}) in quantum mechanics, an {\it observable} ${\mathsf O}{\; :=}(X, {\cal F},$ $F)$ in ${{C(\Omega)}}$ is defined as follows: \par \par \begin{itemize} \item[(D$_1$)] [Field] $X$ is a set, ${\cal F} (\subseteq 2^X $, the power set of $X$) is a field of $X$, that is, {\lq\lq}$\Xi_1, \Xi_2 \in {\cal F}\Rightarrow \Xi_1 \cup \Xi_2 \in {\cal F}${\rq\rq}, {\lq\lq}$\Xi \in {\cal F}\Rightarrow X \setminus \Xi \in {\cal F}\;${\rq\rq}. \item[(D$_2$)] [Additivity] $F$ is a mapping from ${\cal F}$ to ${{C(\Omega)}}$ satisfying: (a): for every $\Xi \in {\cal F}$, $F(\Xi)$ is a non-negative element in ${{C(\Omega)}}$ such that $0 \le F(\Xi) $ $\le I$, (b): $F(\emptyset) = 0$ and $F(X) = I$, where $0$ and $I$ is the $0$-element and the identity in ${C(\Omega)}$ respectively. (c): for any $\Xi_1$, $\Xi_2$ $\in {\cal F}$ such that $\Xi_1 \cap \Xi_2 = \emptyset$, it holds that $ F(\Xi_1 \cup \Xi_2 ) $ $ = $ $ F(\Xi_1 ) + F( \Xi_2 )$. \end{itemize} \par \noindent \par \noindent For the more precise argument (such as countably additivity, etc.), see {}{\cite{Ishi6}}. \vskip0.3cm \par \par \par \par \noindent \subsection{ Classical PMT in (B1) } \rm \par In this section we shall explain classical PMT in (A$_1$). \par \rm With any {\it system} $S$, a commutative $C^*$-algebra ${C(\Omega)}$ can be associated in which the measurement theory (B) of that system can be formulated. A {\it state} of the system $S$ is represented by an element ${\delta_\omega} (\in {\frak S}^p ({}{C(\Omega)}^*{}))$ and an {\it observable} is represented by an observable ${\mathsf{O}}{\; :=} (X, {\cal F}, F)$ in ${{C(\Omega)}}$. Also, the {\it measurement of the observable ${\mathsf{O}}$ for the system $S$ with the state ${\delta_\omega}$} is denoted by ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[{\delta_\omega}]})$ $\big($ or more precisely, ${\mathsf{M}}_{C(\Omega)} ({\mathsf{O}}{\; :=} (X, {\cal F}, F), S_{[{\delta_\omega}]})$ $\big)$. An observer can obtain a measured value $x $ ($\in X$) by the measurement ${\mathsf{M}}_{C(\Omega)} ({\mathsf{O}}, S_{[{\delta_\omega}]})$. \par \noindent \par The Axiom$^{\rm P}$ 1 presented below is a kind of mathematical generalization of Born's probabilistic interpretation of quantum mechanics. And thus, it is a statement without reality. \par \noindent {\bf{Axiom$^{\rm P}$ 1\;\; \rm $[$Classical PMT$]$}}. \it The probability that a measured value $x$ $( \in X)$ obtained by the measurement ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}$ ${ :=} (X, {\cal F}, F),$ {}{$ S_{[{\delta_{\omega_0}}]})$} belongs to a set $\Xi (\in {\cal F})$ is given by $ [F(\Xi)](\omega_0) $. \rm \par \par \vskip0.2cm \par \par Next, we explain Axiom 2 in (B). Let $(T,\le)$ be a tree, i.e., a partial ordered set such that {\lq\lq$t_1 \le t_3$ and $t_2 \le t_3$\rq\rq} implies {\lq\lq$t_1 \le t_2$ or $t_2 \le t_1$\rq\rq}\!. In this paper, we assume that $T$ is finite. Assume that there exists an element $t_0 \in T$, called the {\it root} of $T$, such that $t_0 \le t$ ($\forall t \in T$) holds. Put $T^2_\le = \{ (t_1,t_2) \in T^2{}\;|\; t_1 \le t_2 \}$. The family $\{ \Phi_{t_1,t_2}{}: $ ${C(\Omega_{t_2})} \to {C(\Omega_{t_1})} \}_{(t_1,t_2) \in T^2_\le}$ is called a {\it causal relation} ({\it due to the Heisenberg picture}), \rm if it satisfies the following conditions {}{(E$_1$) and (E$_2$)}. \begin{itemize} \item[{\rm (E$_1$)}] With each $t \in T$, a $C^*$-algebra ${C(\Omega_{t})}$ is associated. \item[{\rm (E$_2$)}] For every $(t_1,t_2) \in T_{\le}^2$, a Markov operator $\Phi_{t_1,t_2}{}: {C(\Omega_{t_2})} \to {C(\Omega_{t_1})}$ is defined (i.e., $\Phi_{t_1,t_2} \ge 0$, $\Phi_{t_1,t_2}(I_{{C(\Omega_{t_2})}})$ $ = $ $ I_{{C(\Omega_{t_1})}}$ ). And it satisfies that $\Phi_{t_1,t_2} \Phi_{t_2,t_3} = \Phi_{t_1,t_3}$ holds for any $(t_1,t_2)$, $(t_2,t_3) \in T_\le^2$. \end{itemize} \noindent The family of dual operators $\{ \Phi_{t_1,t_2}^*{}: $ $ {\frak S}^m ({C(\Omega_{t_1})}^*) \to {\frak S}^m ({C(\Omega_{t_2})}^*) \}_{(t_1,t_2) \in T^2_\le}$ is called a { \it dual causal relation} ({\it due to the Schr\"{o}dinger picture}). When $ \Phi_{t_1,t_2}^*{}$ $ ( {\frak S}^p ({C(\Omega_{t_1})}^*) $ $\subseteq $ $ {\frak S}^p ({C(\Omega_{t_2})}^*) $ holds for any $ {(t_1,t_2) \in T^2_\le}$, the causal relation is said to be deterministic. \par \par \rm Here, Axiom 2 in the measurement theory (B) is presented as follows: \rm \par \noindent {\bf{Axiom 2} \rm[Causality]}. \it The causality is represented by a causal relation $\{ \Phi_{t_1,t_2}{}: $ ${C(\Omega_{t_2})} \to {C(\Omega_{t_1})} \}_{(t_1,t_2) \in T^2_\le}$. \rm \par For the further argument (i.e., the $W^*$-algebraic formulation) of measurement theory, see Appendix in {}{\cite{Ishi4}}. \noindent \par \noindent \par \noindent \noindent \subsection{ Classical SMT in (B2) } \rm \rm It is usual to consider that we do not know the state $\delta_{\omega_0}$ when we take a measurement ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[\delta_{\omega_0}]})$. That is because we usually take a measurement ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[\delta_{\omega_0}]})$ in order to know the state $\delta_{\omega_0}$. Thus, when we want to emphasize that we do not know the the state $\delta_{\omega_0}$, ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[\delta_{\omega_0}]})$ is denoted by ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[\ast]})$. Also, when we know the distribution $\nu_0$ $( \in {\cal M}_{+1}^m(\Omega) ={\frak S}^m({C(\Omega)}^*) )$ of the unknown state $\delta_{\omega_0}$, the ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, S_{[\delta_{\omega_0}]})$ is denoted by ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}},$ $ S_{[\ast]} ( \nu_0 ) )$. \par \vskip0.3cm \par \par The Axiom$^{\rm S}$ 1 presented below is a kind of mathematical generalization of Axiom$^{\rm P}$ 1. \par \par \noindent {\bf{Axiom$^{\rm S}$\;1\; \rm \;[Classical SMT]}}. \it The probability that a measured value $x$ $( \in X)$ obtained by the measurement ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}$ ${ :=} (X, {\cal F}, F),$ {}{$ S_{[\ast]}( \nu_0 ) )$} belongs to a set $\Xi (\in {\cal F})$ is given by $ \nu_0 ( F(\Xi) ) $ $($ $= {}_{{{C(\Omega)}^*}}\langle \nu_0, F(\Xi) \rangle_{{C(\Omega)}}$ $)$. \rm \par \noindent {\it Remark 1}. Note that two statistical measurements ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}},$ {}{$ S_{[\delta_{\omega_1}]}( \nu_0 ) )$} and ${\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}},$ {}{$ S_{[\delta_{\omega_2}]}( \nu_0 ) )$} can not be distinguished before measurements. In this sense, we consider that, even if $\omega_1 \not= \omega_2$, we can assume that \begin{align} {\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, {}{ S_{[\delta_{\omega_1}]}( \nu_0 ) )} = {\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, {}{ S_{[\ast]}( \nu_0 ) )} = {\mathsf{M}}_{{{C(\Omega)}}} ({\mathsf{O}}, {}{ S_{[\delta_{\omega_2}]}( \nu_0 ) )}. \label{eq2} \end{align} \par \noindent \vskip0.2cm \par \subsection{ Linguistic Interpretation } \par Next, we have to answer how to use the above axioms as follows. That is, we present the following linguistic interpretation (F) [=(F$_1$)--(F$_3$)], which is characterized as a kind of linguistic turn of so-called Copenhagen interpretation ({\rm cf.} {}{\cite{Ishi4,Ishi5}} ). \par \noindent That is, we propose: \begin{itemize} \item[(F$_1$)] Consider the dualism composed of {\lq\lq}observer{\rq\rq} and {\lq\lq}system( =measuring object){\rq\rq} such as \par \noindent \vskip0.5cm \noindent \begin{center} \unitlength=0.5mm \begin{picture}(200,72)(15,0) \put(-8,0) { \allinethickness{0.2mm} \drawline[-40](80,0)(80,62)(30,62)(30,0) \drawline[-40](130,0)(130,62)(175,62)(175,0) \allinethickness{0.5mm} \path(20,0)(175,0) \put(14,-5){ \put(37,50){$\bullet$} } \put(50,25){\ellipse{17}{25}} \put(50,44){\ellipse{10}{13}} \put(0,44){\put(43,30){\sf \footnotesize{observer}} \put(42,25){\scriptsize{(I(=mind))}} } \put(7,7){\path(46,27)(55,20)(58,20)} \path(48,13)(47,0)(49,0)(50,13) \path(51,13)(52,0)(54,0)(53,13) \put(0,26){ \put(142,48){\sf \footnotesize system} \put(143,43){\scriptsize (matter)} } \path(152,0)(152,20)(165,20)(150,50)(135,20)(148,20)(148,0) \put(10,0){} \allinethickness{0.2mm} \put(0,-5){ \put(130,39){\vector(-1,0){60}} \put(70,43){\vector(1,0){60}} \put(92,56){\sf \scriptsize \fbox{observable}} \put(58,50){\sf \scriptsize } \put(57,53){\sf \scriptsize \fbox{\shortstack[l]{measured \\ value}}} \put(80,44){\scriptsize \textcircled{\scriptsize a}interfere} \put(80,33){\scriptsize \textcircled{\scriptsize b}perceive a reaction} \put(130,56){\sf \scriptsize \fbox{state}} } } \put(30,-15){\bf \hypertarget{fig2}{\bf Figure 2}. Descartes' figure in MT } \end{picture} \end{center} \vskip1.0cm And therefore, {\lq\lq}observer{\rq\rq} and {\lq\lq}system{\rq\rq} must be absolutely separated. \item[(F$_2$)] Only one measurement is permitted. And thus, the state after a measurement is meaningless $\;$ since it can not be measured any longer. Also, the causality should be assumed only in the side of system, however, a state never moves. Thus, the Heisenberg picture should be adopted. \item[(F$_3$)] Also, the observer does not have the space-time. Thus, the question: {\lq\lq}When and where is a measured value obtained?{\rq\rq} is out of measurement theory, \end{itemize} \par \noindent and so on. This interpretation is, of course, common to both PMT and SMT. \par \noindent \vskip0.2cm \par \subsection{ Preliminary Fundamental Theorems } We have the following two fundamental theorems in measurement theory: \par \noindent {\bf Theorem 1} [Fisher's maximum likelihood method ({\rm cf}. {}{\cite{Ishi6}})]. Assume that a measured value obtained by a measurement ${\mathsf M}_{C(\Omega)}({\mathsf O}:=(X,{\cal F}, F) , S_{[*]})$ belongs to $\Xi \;(\in {\cal F} )$. Then, there is a reason to infer that the unknown state $[\ast]$ is equal to $\delta_{\omega_0}$, where $\omega_0 \;(\in \Omega )$ is defined by \begin{align*} [{F}(\Xi )](\omega_0) = \max_{\omega \in \Omega } [{F}(\Xi )](\omega). \end{align*} \par \noindent {\bf Theorem 2} [Bayes' method ({\rm cf}. {}{\cite{Ishi6}})]. Assume that a measured value obtained by a statistical measurement ${\mathsf M}_{C(\Omega)}({\mathsf O}:=(X,{\cal F}, F) ,$ $S_{[*]}( \nu_0 ))$ belongs to $\Xi \;(\in {\cal F} )$. Then, there is a reason to infer that the posterior state (i.e., the mixed state after the measurement ) is equal to $\nu_{\rm post}$, which is defined by \begin{align*} \nu_{\rm post} (D) = \frac{\int_D [F(\Xi)](\omega ) \nu_0(d \omega) }{\int_\Omega [F(\Xi)](\omega ) \nu_0(d \omega) } \\ \quad (\forall D \in {\cal B}_\Omega; \text{Borel field}). \end{align*} The above two theorems are, of course, the most fundamental in statistics. Thus, we believe in {\bf Figure 1}, i.e., $$ \fbox{\mbox{statistics}} \xrightarrow[\textcircled{\footnotesize 9}]{\qquad \qquad}\textcircled{\footnotesize 10} \fbox{\mbox{quantum language}} $$ \par \noindent \section{ The First Answer to Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} by Fisher's method} \par \par \noindent \par In this section, we present the first answer to Problem 1 (Monty-Hall problem) \textcolor{blue}{[resp. Problem 2 (Three prisoners problem)]} in classical PMT. The two will be simultaneously solved as follows. The spirit of dualism (in Figure 2) urges us to declare that \begin{itemize} \item[(G)] "observer$\;\; \approx\;\; $you" and "system$\;\; \approx\;\; $three doors" in Problem 1 \\ \color{blue} [resp. "observer$\;\; \approx\;\; $prisoner $A_1$" and "system$\;\; \approx \;\;$emperor's mind" in Problem 2] \color{black} \end{itemize} \par \noindent Put $\Omega = \{ \omega_{1} , \omega_{2} , \omega_{3} \}$ with the discrete topology. Assume that each state $\delta_{\omega_{{m}}} (\in {\frak S}^p (C(\Omega)^* ))$ means \begin{align} & \delta_{\omega_{{m}}} \Leftrightarrow \text{ the state that the car is } \text{behind the door $A_m$} \nonumber \\ \textcolor{blue}{[\mbox{resp.}} \;\;\; & \mbox{ \textcolor{blue}{ $ \delta_{\omega_{{m}}} \Leftrightarrow $ }} \mbox{ \textcolor{blue}{ the state that the prisoner $A_m$ will be free ] } } \nonumber \\ & \qquad \qquad (m=1,2,3 ) \label{eq3} \end{align} Define the observable ${\mathsf O}_1$ $\equiv$ $({}\{ 1, 2,3 \}, 2^{\{1, 2 ,3\}}, F_1)$ in $C({}\Omega{})$ such that \begin{align} & [F_1({}\{ 1 \}{})](\omega_1{})= 0.0,\qquad [F_1({}\{ 2 \}{})](\omega_1{})= 0.5, \qquad [F_1({}\{ 3 \}{})](\omega_1{})= 0.5, \nonumber \\ & [F_1({}\{ 1 \}{})](\omega_2{})= 0.0, \qquad [F_1({}\{ 2 \}{})](\omega_2{})= 0.0, \qquad [F_1({}\{ 3 \}{})](\omega_2{})= 1.0, \nonumber \\ & [F_1({}\{ 1 \}{})](\omega_3{})= 0.0,\qquad [F_1({}\{ 2 \}{})](\omega_3{})= 1.0, \qquad [F_1({}\{ 3 \}{})](\omega_3{})= 0.0, \label{eq4} \end{align} where it is also possible to assume that $F_1({}\{ 2 \}{})(\omega_1{})=\alpha$, $F_1({}\{ 3 \}{})(\omega_1{}) =1- \alpha$ $ (0 < \alpha < 1)$. Thus we have a measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]})$, which should be regarded as the measurement theoretical representation of the measurement that \it you say "Door $A_1$" \rm \textcolor{blue}{ [resp. \it "Prisoner $A_1$" asks to the emperor \rm ].} Here, we assume that \begin{itemize} \item[a)] {\lq\lq}measured value $1$ is obtained by the measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]})${\rq\rq} \\ $ \Leftrightarrow \text{The host says {\lq\lq}Door $A_1$ has a goat{\rq\rq}}$ \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_1$ will be executed{\rq\rq} ]} \color{black} \item[b)] {\lq\lq}measured value $2$ is obtained by the measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]})$ {\rq\rq} \\ $ \Leftrightarrow \text{The host says {\lq\lq}Door $A_2$ has a goat{\rq\rq}}$ \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_2$ will be executed{\rq\rq} ]} \color{black} \item[c)] {\lq\lq}measured value $3$ is obtained by the measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]})$ {\rq\rq} \\ $ \Leftrightarrow \text{The host says {\lq\lq}Door $A_3$ has a goat{\rq\rq}}$ \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_3$ will be executed{\rq\rq} ]} \color{black} \end{itemize} \par \par \noindent Recall that, in Problem 1 (Monty-Hall problem) \textcolor{blue}{[resp. Problem 2 (Three prisoners problem)]}, the host said {\lq\lq}Door 3 has a goat{\rq\rq} \textcolor{blue}{[resp. the emperor said {\lq\lq}Prisoner $A_3$ wil be executed{\rq\rq}]} This implies that you \textcolor{blue}{[resp. {\lq\lq}Prisoner $A_1$]} get the measured value {\lq\lq}3{\rq\rq} by the measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[\ast]}{})$. Note that \begin{align} & [F_1({}\{3\}{})] ({}\omega_2{}) = 1.0 = \max \{ 0.5, \; \; 1.0 , \; \; 0.0 \} \nonumber \\ & = \max \{ [F_1({}\{3\}{})] ({}\omega_1{}), [F_1({}\{3 \}{}){}]({}\omega_2{}), [F_1({}\{3 \}{})] ({}\omega_3{}) \}, \label{eq5} \end{align} Therefore, Theorem 1 (Fisher's maximum likelihood method) says that \begin{itemize} \item[(H1)] In Problem 1 (Monty-Hall problem), there is a reason to infer that $[\ast]$ $=$ $\delta_{\omega_2}$. Thus, you should switch to Door $A_2$. \color{blue} \item[(H2)] \color{blue} In Problem 2 (Three prisoners problem), there is a reason to infer that $[\ast]$ $=$ $\delta_{\omega_2}$. However, there is no reasonable answer for the question: whether Prisoner $A_1$'s happiness increases. That is, Problem 2 is not a well-posed problem. \color{black} \end{itemize} \par \noindent \section{ The Second Answer to Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} by Bayes' method} \par In order to use Bayes' method, shall modify Problem 1(Monty Hall problem) and Problem 2(Three prisoners problem) as follows. \subsection{ Problems 1$'$ and 2$'$ ( Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} ) } \par \noindent {\bf Problem 1$'$} \rm [{}Monty Hall problem; the host casts the dice]. { Suppose you are on a game show, and you are given the choice of three doors (i.e., \lq\lq Door $A_1$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_2$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_3$\rq\rq$\!\!)$. Behind one door is a car, behind the others, goats. You do not know what's behind the doors. \par \noindent However, you pick a door, say "Door $A_1$", and the host, who knows what's behind the doors, opens another door, say \lq\lq Door $A_3$\rq\rq$\!\!\!,\;$ which has a goat. And he adds that \begin{itemize} \rm \item[{}{($\sharp_1$)}] \it the car was set behind the door decided by the cast of the (distorted) dice. That is, the host set the car behind Door $A_m$ with probability $p_m$ (where $p_1 + p_2 + p_3 =1$, $ 0 \le p_1 , p_2 , p_3 \le 1 $ $)$. \end{itemize} He says to you, \lq\lq Do you want to pick Door $A_2$?\rq\rq$\;\;$ Is it to your advantage to switch your choice of doors? \par \noindent \vskip0.3cm \par \noindent \unitlength=0.26mm \begin{picture}(500,150) \thicklines \put(430,55) {{ \drawline[-15](-40,-30)(120,-30)(120,90)(-350,90) \put(-350,90){\vector(0,-1){20}} \put(-225,90){\vector(0,-1){20}} \put(-100,90){\vector(0,-1){20}} \path(0,30)(60,30)(60,60)(20,60)(20,45)(0,45)(0,30) \put(20,30){\circle{15}} \put(47,30){\circle{15}} }} \put(400,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \put(470,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \thicklines \put(20,20){\line(1,0){370}} \put(40,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_1$} } \put(160,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_2$} } \put(280,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_3$} } \end{picture} \par \noindent {\bf Problem 2$'$} \rm [{}Three prisoners problem; the emperor casts the dice]. Three prisoners, $A_1$, $A_2$, and $A_3$ were in jail. They knew that one of them was to be set free and the other two were to be executed. They did not know who was the one to be spared, but they know that \begin{itemize} \rm \item[{}{($\sharp_2$)}] \it the one to be spared was decided by the cast of the (distorted) dice. That is, Prisoner $A_m$ is to be spared with probability $p_m$ (where $p_1 + p_2 + p_3 =1$, $ 0 \le p_1 , p_2 , p_3 \le 1 $ $)$. \end{itemize} but the emperor did know the one to be spared. $A_1$ said to the emperor, {\lq\lq}I already know that at least one the other two prisoners will be executed, so if you tell me the name of one who will be executed, you won't have given me any information about my own execution\rq\rq.$\;\;$ After some thinking, the emperor said, \lq\lq $A_3$ will be executed.\rq\rq$\;\;$ Thereupon $A_1$ felt happier because his chance had increased from $\frac{1}{3(= {\rm Num}[\{A_1,A_2,A_3 \}])}$ to $\frac{1}{2(= {\rm Num}[\{ A_1,A_2 \}])}$. This prisoner $A_1$'s happiness may or may not be reasonable? \par \noindent \unitlength=0.35mm \begin{picture}(500,130) \thicklines \put(20,0) {{{ \put(70,20) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-3,56){\footnotesize E} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(200,20) {{ { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-6,40)(-15,40) \put(-6,56){\footnotesize $A_1$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(50,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-6,56){\footnotesize $A_2$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(100,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-,40)(-15,40) \put(-6,56){\footnotesize $A_3$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } }} \thicklines \put(20,20){\line(1,0){370}} \put(160,20){ \path(0,0)(0,100)(180,100)(180,0) } \linethickness{0.15mm} \put(164,20) { \multiput(0,0)(10,0){19}{\line(0,1){100}} } \put(70,20) { \put(10,60){\vector(1,0){50}} \put(60,60){\vector(1,0){60}} \put(6,68){\footnotesize \lq\lq $A_3$ will be executed\rq\rq} \put(6,48){\footnotesize (Emperor)} } }}} \end{picture} \par \noindent {\it Remark 2}. In Problem 1$'$, you may choose "Door $A_1$" by various ways. For example, you may choose "Door $A_1$" by the method mentioned in Problem 1$''$ later. \subsection{ The second answer to Problems 1$'$ and 2$'$ ( Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} ) by Bayes' method } \rm In what follows we study these problems. Let $\Omega$ and ${\mathsf O}_1$ be as in Section 3 . Under the hypothesis ($\sharp_1)$ \textcolor{blue}{[resp. ($\sharp_2)$]}, define the mixed state $\nu_0$ $({}\in {\cal M}_{+1}^m ({}\Omega{}){})$ such that: \begin{align} \nu_0 ({}\{ \omega_1 \}{}) = p_1, \quad \nu_0 ({}\{ \omega_2 \}{}) = p_2, \quad \nu_0 ({}\{ \omega_3 \}{}) = p_3 \label{eq6} \end{align} Thus we have a statistical measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]} ({} \nu_0{}))$. Note that \begin{itemize} \item[a)] {\lq\lq}measured value $1$ is obtained by the statistical measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]} ({} \nu_0{}))${\rq\rq} \\ $ \Leftrightarrow \text{the host says {\lq\lq}Door $A_1$ has a goat{\rq\rq}} $ \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_1$ will be executed{\rq\rq} ]} \color{black} \item[b)] {\lq\lq}measured value $2$ is obtained by the statistical measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]} ({} \nu_0{}))${\rq\rq} \\ $ \Leftrightarrow \text{the host says {\lq\lq}Door $A_2$ has a goat{\rq\rq}} $ \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_2$ will be executed{\rq\rq} ]} \color{black} \item[c)] {\lq\lq}measured value $3$ is obtained by the statistical measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]} ({} \nu_0{}))${\rq\rq} \\ $ \Leftrightarrow $ the host says {\lq\lq}Door $A_3$ has a goat{\rq\rq} \\ \color{blue} \text{[resp. $\Leftrightarrow$ the emperor says {\lq\lq}Prisoner $A_3$ will be executed{\rq\rq} ]} \color{black} \end{itemize} \par \noindent Here, assume that, by the statistical measurement ${\mathsf M}_{C({}\Omega{})} ({}{\mathsf O}_1, S_{[{}\ast{}]} ({}\nu_0{}))$, you obtain a measured value $3$, which corresponds to the fact that the host said {\lq\lq}Door $A_3$ has a goat{\rq\rq}$\!\!\!.\;$ \color{blue}{[resp. the emperor said that Prisoner $A_3$ is to be executed ]}, \color{black} Then, Theorem 2 (Bayes' method) says that the posterior state $\nu_{\rm post}$ $({}\in {\cal M}_{+1}^m ({}\Omega{}){})$ is given by \begin{align} \nu_{\rm post} = \frac{F_1(\{3\}) \times \nu_0} {\bigl\langle \nu_0, F_1(\{3\}) \bigr\rangle}. \label{eq7} \end{align} That is, \begin{align} & \nu_{\rm post} ({}\{ \omega_1 \}{})= \frac{\frac{p_1}{2}}{ \frac{p_1}{2} + p_2 }, \quad \nu_{\rm post} ({}\{ \omega_2 \}{})= \frac{p_2}{ \frac{p_1}{2} + p_2 }, \quad \nu_{\rm post} ({}\{ \omega_3 \}{}) = 0. \label{eq8} \end{align} Then, \begin{itemize} \item[(I1)] In Problem 1$'$, $$ \begin{cases} \mbox{ if $\nu_{\rm post} ({}\{ \omega_1 \}{})$ $<$ $\nu_{\rm post} ({}\{ \omega_2 \}{})$ (i.e., $p_1 < 2 p_2 $), you should pick Door $A_2$} \\ \mbox{ if $\nu_{\rm post} ({}\{ \omega_1 \}{})$ $=$ $\nu_{\rm post} ({}\{ \omega_2 \}{})$ (i.e., $p_1 < 2 p_2 $), you may pick Doors $A_1$ or $A_2$} \\ \mbox{ if $\nu_{\rm post} ({}\{ \omega_1 \}{})$ $>$ $\nu_{\rm post} ({}\{ \omega_2 \}{})$ (i.e., $p_1 < 2 p_2 $), you should not pick Door $A_2$} \end{cases} $$ \rm \color{blue} \item[(I2)] \color{blue} In Problem 2$'$, $$ \begin{cases} \mbox{ if $ \nu_{0} (\{\omega_1\}) < \nu_{\rm post} (\{\omega_1\})$ (i.e., $p_1 < 1- 2 p_2$), the prisoner $A_1$'s happiness increases } \\ \mbox{ if $ \nu_{0} (\{\omega_1\}) = \nu_{\rm post} (\{\omega_1\})$ (i.e., $p_1 = 1- 2 p_2$), the prisoner $A_1$'s happiness is invariant } \\ \mbox{ if $ \nu_{0} (\{\omega_1\}) > \nu_{\rm post} (\{\omega_1\})$ (i.e., $p_1 > 1- 2 p_2$), the prisoner $A_1$'s happiness decreases } \\ \end{cases} $$ \rm \color{black} \end{itemize} \par \par \noindent \vskip0.3cm \section{ The Principle of Equal Probability } In this section, according to \cite{Ishi2, Ishi4, Ishi9} we prepare Theorem 3 (the principle of equal probability), i.e., \begin{itemize} \item[(J)] unless we have sufficient reason to regard one possible case as more probable than another, we treat them as equally probable. \end{itemize} This theorem will be used in the following section. \vskip0.3cm \par \noindent \par \par Put $\Omega = \{ \omega_1 , \omega_2 , \omega_3, \ldots , \omega_n \}$ with the discrete topology. And consider any observable ${\mathsf O}_1 \equiv (X, {\cal F}, {F}_1 )$ in $C(\Omega)$. Define the bijection $\phi_1: \Omega \to \Omega$ such that \begin{align*} \phi_1 ( \omega_j ) = \cases \omega_{j+1} & \quad (j \not= n) \\ \omega_{1} & \quad (j=n) \end{array}\right. \end{align*} and define the observable ${\mathsf O}_k \equiv (X, {\cal F}, {F}_k )$ in $C(\Omega)$ such that \begin{align*} & [F_k(\Xi)](\omega) = [F_1(\Xi)](\phi_{k-1}(\omega)) \\ & \quad (\forall \omega \in \Omega, k=1,2,...,n ) \end{align*} where $\phi_0 (\omega) =\omega (\forall \in \Omega )$ and $\phi_{k} (\omega) = \phi_1 (\phi_{k-1} (\omega ))$ $(\forall \omega \in \Omega, k=1,2,...,n )$. Let $p_k (k=1,...,n)$ be a non-negative real number such that $\sum_{k=1}^n p_k =1$. \begin{itemize} \item[(K)] For example, fix a state $\delta_{\omega_m}$ $(m=1,2,...,n)$. And, by the cast of the ( distorted ) dice, you choose an observable $ {\mathsf O}_k \equiv (X, {\cal F},{ F_k} ) $ with probability $p_k$. And further, you take a measurement $ {\mathsf M}_{C({}{\Omega})}( {\mathsf O}_k := (X, {\cal F}, {F}_k ), S_{[\delta_{\omega_m} ]} ) $. \end{itemize} Here, we can easily see that the probability that a measured value obtained by the measurement (K) belongs to $\Xi (\in {\cal F})$ is given by \begin{align} \sum_{k=1}^n p_k \langle F_k (\Xi) , \delta_{\omega_m} \rangle \big( = \sum_{k=1}^n p_k [ F_k (\Xi)] (\omega_m) \big) \label{eq9} \end{align} which is equal to $\langle F_1 (\Xi) , \sum_{k=1}^n p_k \delta_{\phi_{k-1} (\omega_m)} \rangle$. This implies that the measurement (K) is equivalent to a statistical measurement $ {\mathsf M}_{C({}{\Omega})}( {\mathsf O}_1 := (X, {\cal F}, {F}_1 ), $ $ S_{[\delta_{\omega_m}]} ( \sum_{k=1}^n p_k \delta_{\phi_{k-1} (\omega_m)} ) ) $. Note that the (\ref{eq9}) depends on the state $\delta_m$. Thus, we can not calculate the (\ref{eq9}) such as the (\ref{eq8}). However, if it holds that $p_k = 1/n$ $(k=1,...,n)$, we see that $ \frac{1}{n} \sum_{k=1}^n \delta_{\phi_{k-1} (\omega_m) } $ is independent of the choice of the state $\delta_{\omega_m}$. Thus, putting $ \frac{1}{n} \sum_{k=1}^n \delta_{\phi_{k-1} (\omega_m) } $ $=$ $\nu_e$, we see that the measurement (K) is equivalent to the statistical measurement $ {\mathsf M}_{C({}{\Omega})}( {\mathsf O}_1, $ $ S_{[\delta_{\omega_m}]} ( \nu_e ) ) $, which is also equivalent to $ {\mathsf M}_{C({}{\Omega})}( {\mathsf O}_1, $ $ S_{[\ast ]} ( \nu_e ) ) $ (from the formula (\ref{eq2}) in Remark 1). Thus, under the above notation, we have the following theorem, which realizes the spirit (J). \par \noindent {\bf Theorem 3} [ The principle of equal probability (i.e., the equal probability of selection) ]. If $p_k = 1/n $ $(k=1,...,n)$, the measurement (K) is independent of the choice of the state $\delta_m$. Hence, the (K) is equivalent to a statistical measurement $ {\mathsf M}_{C({}{\Omega})}( {\mathsf O}_1 := (X, {\cal F}, {F}_1 ), $ $ S_{[\ast]} ( \nu_e ) ) $. \par \vskip0.3cm \par It should be noted that the principle of equal probability is not "principle" but "theorem" in measurement theory. \par \noindent {\it Remark 3}. In the above argument, we consider the set $B'=\{\phi_k \;|\;k=1,2,...,n \}$. However, it may be more natural to consider the set $B=\{\phi \;|\; \mbox{$\phi: \Omega \to \Omega$ is a bijection} \}$. \par \noindent \section{ The Third Answer to Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} by the principle of equal probability} \par \subsection{ Problems 1$''$ and 2$''$ ( Monty Hall Problem \textcolor{blue}{[resp. Three prisoners problem]} ) } \par \noindent {\bf Problem 1$''$} \rm [{}Monty Hall problem; you cast the dice]. { Suppose you are on a game show, and you are given the choice of three doors (i.e., \lq\lq Door $A_1$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_2$\rq\rq$\!\!\!,\;$ \lq\lq Door $A_3$\rq\rq$\!\!)$. Behind one door is a car, behind the others, goats. You do not know what's behind the doors. Thus, \begin{itemize} \rm \item[{}{($\sharp_1$)}] \it you select Door $A_1$ by the cast of the fair dice. That is, you say "Door $A_1$" with probability 1/3. \end{itemize} \par \noindent The host, who knows what's behind the doors, opens another door, say \lq\lq Door $A_3$\rq\rq$\!\!\!,\;$ which has a goat. He says to you, \lq\lq Do you want to pick Door $A_2$?\rq\rq$\;\;$ Is it to your advantage to switch your choice of doors? \par \noindent \vskip0.3cm \par \noindent \unitlength=0.26mm \begin{picture}(500,150) \thicklines \put(430,55) {{ \drawline[-15](-40,-30)(120,-30)(120,90)(-350,90) \put(-350,90){\vector(0,-1){20}} \put(-225,90){\vector(0,-1){20}} \put(-100,90){\vector(0,-1){20}} \path(0,30)(60,30)(60,60)(20,60)(20,45)(0,45)(0,30) \put(20,30){\circle{15}} \put(47,30){\circle{15}} }} \put(400,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \put(470,20) {{ \spline(0,30)(5,40)(40,40)(50,30)(40,20)(5,25)(0,15)(-1,30) \spline(-5,50)(5,35)(10,60) \path(15,25)(12,10) \path(16,26)(17,10) \path(30,25)(30,10) \path(31,25)(33,10) \put(8,30){\circle*{2}} \path(50,30)(55,25) }} \thicklines \put(20,20){\line(1,0){370}} \put(40,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_1$} } \put(160,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_2$} } \put(280,20){ \path(0,0)(0,100)(80,100)(80,0) \put(20,50){Door $A_3$} } \end{picture} \par \noindent {\bf Problem 2$''$} \rm [{}Three prisoners problem; the prisoners cast the dice]. Three prisoners, $A_1$, $A_2$, and $A_3$ were in jail. They knew that one of them was to be set free and the other two were to be executed. They did not know who was the one to be spared, but the emperor did know. Since three prisoners wanted to ask the emperor, \begin{itemize} \rm \item[{}{($\sharp_2$)}] \it the questioner was decided by the fair die throw. And Prisoner $A_1$ was selected with probability $1/3$ \end{itemize} Then, $A_1$ said to the emperor, {\lq\lq}I already know that at least one the other two prisoners will be executed, so if you tell me the name of one who will be executed, you won't have given me any information about my own execution\rq\rq.$\;\;$ After some thinking, the emperor said, \lq\lq $A_3$ will be executed.\rq\rq$\;\;$ Thereupon $A_1$ felt happier because his chance had increased from $\frac{1}{3(= {\rm Num}[\{A_1,A_2,A_3 \}])}$ to $\frac{1}{2(= {\rm Num}[\{ A_1,A_2 \}])}$. This prisoner $A_1$'s happiness may or may not be reasonable? \par \noindent \unitlength=0.35mm \begin{picture}(500,130) \thicklines \put(20,0) {{{ \put(70,20) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-3,56){\footnotesize E} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(200,20) {{ { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-6,40)(-15,40) \put(-6,56){\footnotesize $A_1$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(50,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(-3,45)(6,40)(15,40) \put(-6,56){\footnotesize $A_2$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } \put(100,0) { \put(0,60){\circle{14}} \put(0,40){\ellipse{15}{25}} \path(3,45)(-,40)(-15,40) \put(-6,56){\footnotesize $A_3$} \path(-7,10)(-5,29) \path(5,29)(7,10) \path(-7,10)(-3,10)(-1,27) \path(1,27)(3,10)(7,10) } }} \thicklines \put(20,20){\line(1,0){370}} \put(160,20){ \path(0,0)(0,100)(180,100)(180,0) } \linethickness{0.15mm} \put(164,20) { \multiput(0,0)(10,0){19}{\line(0,1){100}} } \put(70,20) { \put(10,60){\vector(1,0){50}} \put(60,60){\vector(1,0){60}} \put(6,68){\footnotesize \lq\lq $A_3$ will be executed\rq\rq} \put(6,48){\footnotesize (Emperor)} } }}} \end{picture} \par \noindent \bf Answer: \rm By Theorem 3 (The principle of equal probability), the above Problems 1$''$ and 2$''$ is respectively the same as Problems 1$'$ and 2$'$ in the case that $p_1=p_2=p_3=1/3$. Then, the formulas (\ref{eq6}) and (\ref{eq8}) say that \begin{itemize} \item[(L1)] In Problem 1$''$, since $\nu_{\rm post} ({}\{ \omega_1 \}{})=1/3$ $<$ $2/3 =\nu_{\rm post} ({}\{ \omega_2 \}{})$, you should pick Door $A_2$. \rm \color{blue} \item[(L2)] \color{blue} In Problem 2$''$, since $ \nu_{0} (\{\omega_1\})=1/3= \nu_{\rm post} (\{\omega_1\})$, the prisoner $A_1$'s happiness is invariant. \rm \color{black} \end{itemize} \rm \noindent \par \noindent \section{ Conclusions } Although main idea is due to refs. \cite{Ishi3, Ishi9}, in this paper we simultaneously discussed the Monty Hall problem and the three prisoners problem in terms of quantum language. That is, we gave three answers, i.e., \begin{itemize} \item[(M1)] the first answer (due to Fisher's method) in Section 3, \item[(M2)] the second answer (due to Bayes' method) in Section 4, \item[(M3)] the third answer (due to Theorem 3(the principle of equal probability)) in Section 6 \end{itemize} We of course believe that our proposal is the final solutions of the two problems. It should be noted that both the Monty Hall problem and the three prisoners problem are never elementary, and they can not be solved without the deep understanding of "probability" and "dualism (G)". Thus in this paper, we answered the question: \begin{center} "Why have philosophers continued to stick to these problems?" \end{center} We hope that our assertion will be examined from various view points. \rm \par { \small \normalsize } \end{document}
\betagin{document} \title{ \large{\textbf{ Boundedness of $\epsilon$-lc Complements on Surfaces} \tableofcontents \section{Boundedness of $\varepsilonsilon$-log canonical complements on surfaces} \subsection{Introduction} The concept of {\textbf{complement}} was introduced and studied by Shokurov [Sh1, Sh2]. He used complements as a tool in the construction of 3-fold log flips [Sh1] and in the classification of singularities and contractions [Sh2]. Roughly speaking a complement is a ``good member'' of the anti-pluricanonical linear system i.e. a general member of $|-nK_X|$ for some $n>0$. In order, the existence of such member and the behaviour of the index $n$ are the most important problems in complement theory. Below we give the precise definition of the ``good member''. Throughout this paper we assume that the varieties involved are algebraic varieties over $\mathbb{C}$. In this section the varieties are all surfaces unless otherwise stated. By a log pair $(X/Z,B)$ we mean algebraic varieties $X$ and $Z$ equiped with a projective contraction $X\longrightarrow Z$ and $B$ is an $\mathbb{R}$-boundary on $X$. When we write $(X/P\in Z,B)$ we mean a log pair $(X/Z,B)$ with a fixed point $P\in Z$; in such situation we may shrink $Z$ around $P$ without mentioning it. The pair $(X/Z,B)$ is weak log Fano (WLF) if it has log canonical singularities (lc) and $-(K_X+B)$ is nef and big$/Z$ and $X$ is $\mathbb{Q}$-factorial. For the basic definitions of the Log Minimal Model Program (LMMP), the main references are [KMM] and [KM]. And to learn more about the complement theory [Sh2] and [Pr] are the best. \betagin{defn}[Complements]\label{comp} Let $(X/Z,B=\sum_{i} b_{i}B_{i})$ be a $d$-dim pair where $X$ is normal and $B$ is an $\mathbb{R}$-boundary. Then $K_{X}+B^+$ is called an $(\varepsilonsilon, n)$-complement/$P\in Z$ (resp. in codim 2) for $K_X+B$, where $B^{+}=\sum_{i} b_{i}^{+}B_{i}$, if the following properties hold: \betagin{description} \item[$\diamond$] $(X,K_{X}+B^{+})$ is an $\varepsilonsilon$-lc pair/$P\in Z$ (resp. $\varepsilonsilon$-lc in codim 2) and $n(K_{X}+B^{+})\sim 0/P\in Z$. \item[$\diamond$] $\llcorner (n+1)b_{i}\lrcorner\leq nb_{i}^{+}$. \end{description} $K_{X}+B^+$ is called an $(\varepsilonsilon,\mathbb{R})$-complement/$P\in Z$ (resp. in codim 2) for $K_X+B$ if $(X,K_{X}+B^{+})$ is $\varepsilonsilon$-lc$/P\in Z$ (resp. $\varepsilonsilon$-lc in codim 2), $K_{X}+B^{+}\sim_{\mathbb{R}} 0/P\in Z$ and $B^{+}\geq B$. An $(\varepsilonsilon,\mathbb{Q})$-complement/$P\in Z$ can be similarly defined where $\sim_{\mathbb{R}}$ is replaced by $\sim_{\mathbb{Q}}$. \end{defn} Despite the not quite easy definition above, complements have very good birational and inductive properties which make the theory a powerful tool to apply to the LMMP. In order, complements don't always exist even with strong conditions such as $-(K_X+B)$ being nef [Sh2, 1.1]. But for example they certainly do when $-(K_X+B)$ is nef and big and $K_X+B$ is lc. In this paper, in all the situations that occur the complements usually exist. So we concentrate on the second main problem about complements which is related to several open problems in the LMMP; namely the boundedness. We state conjectures on the boundedness of complements due to Shokurov. \betagin{conj}[Weak $\varepsilonsilon$-lc Complements]\label{weak} Let $\Gammamma \subseteq [0,1]$ be a set of real numbers which satisfies D.C.C. Then for any $0<\deltalta$ and $d$ there exist a finite set $\mathcal{N}_{\deltalta,d,\Gammamma}$ of positive integers and $0<\varepsilonsilon$ such that any d-dim $\deltalta$-lc (resp. in codim 2) weak log Fano pair $(X/P\in Z,K_{X}+B)$, where $B\in \Gammamma$, would be $(\varepsilonsilon,n)$-complementary/$P\in Z$ (resp. in codim 2) for some $n\in \mathcal{N}_{\deltalta,d ,\Gammamma}$ . \end{conj} We show the above conjecture as $WC_{\deltalta, d,\Gammamma}$ for short. \betagin{conj}[Strong $\varepsilonsilon$-lc Complements] \label{strong} For any $0<\varepsilonsilon$ and $d$ there exists a finite set $\mathcal{N}_{\varepsilonsilon, d}$ of positive integers such that any d-dim $\varepsilonsilon$-lc(resp. in codim 2) weak log Fano pair $(X/P\in Z,K_{X}+B)$ has an $(\varepsilonsilon,n)$-complement/$P\in Z$ (resp. in codim 2) for some $n\in \mathcal{N}_{\varepsilonsilon,d}$. \end{conj} We show the above conjecture as $SC_{\deltalta, d}$ for short. If we replace $\varepsilonsilon>0$ with $\varepsilonsilon=0$ in the conjecture above (it makes a big difference) then we get the usual conjecture on the boundedness of lc complements which has been studied by Shokurov, Prokhorov and others [Sh2, PSh, PSh1, Pr]. It is proved in dim 2 [Sh2] with some restrictions on the coefficients of $B$. The following important conjecture, due to Alexeev and Borisov brothers, is related to the conjectures above [Mc, A1, PSh, MP]. \betagin{conj}[BAB]\label{BAB} Let $\deltalta >0$ be a real number and $\Gammamma\subset [0,1]$. Then those varieties $X$ for which $(X,B)$ is a $\deltalta$-lc WLF pair of dim $d$ for a boundary $B\in \Gammamma$, are elements of an algebraic family. \end{conj} We show the above conjecture as $BAB_{\deltalta, d,\Gammamma}$ for short. Alexeev proved $BAB_{\deltalta, 2,\Gammamma}$ for any $\deltalta>0$ and $\Gammamma$ [A1]. This conjecture was proved by Kawamata for terminal singularities in dim 3 [K1] and $BAB_{1, 3,\{0\}}$ was proved by Kollar, Mori, Miyaoka and Takagi [KMMT]. The smooth case was proved by Kollar, Mori and Miyaoka in any dimension. The conjecture is open even in dim 3 when $\deltalta<1$. In order, in many interesting applications $\deltalta<1$. The following special case of conjecture \ref{BAB} was proved by Borisov in dim 3 [B] and by McKernan in any dimension [Mc]. \betagin{thm}[BM]\label{BM} The set of all klt WLF pairs $(X,B)$ with a fixed given index is bounded i.e. these pairs are elements of an algebraic family. \end{thm} The following conjecture is due to Shokurov. \betagin{conj}[ACC for mlds]\label{acc} Suppose $\Gammamma\subseteq [0,1]$ satisfies the descending chain condition (DCC). Then the following set satisfies the ascending chain condition (ACC): \[ \{\mld(\mu,X,B)| {\mbox{$(X,B)$ is lc of dim d, $\mu$ a point of $X$ and $B\in \Gammamma$}}\}\] \end{conj} Note that $\mu$ is assumed to be a Grothendieck point of $X$ which is not necessarily closed. We show the above conjecture as $ACC_{d,\Gammamma}$ for short. Alexeev proved $ACC_{2,\Gammamma}$ for any DCC set $\Gammamma\subseteq [0,1]$ [A2]. This conjecture is open in higher dimensions except in some special cases. \betagin{conj}[Log Termination]\label{lt} Let $(X,B)$ be a Klt pair of dim d. Then any sequence of $K_X+B$-flips terminates. \end{conj} This conjecture is the last step of the LMMP in dim $d$ and we show it as $LT_{d}$. Kawamata proved $LT_{3}$ [K2] and the four dimensional case with terminal singularities [KMM]. Actually $LT_{4}$ is the main missing component of $LMMP_4$ without which we can not apply the powerful LMMP to problems in algebraic geometry. This conjecture did not seem to be that difficult at least because of the short proof of Kawamata to $LT_{3}$ where he uses the classification of terminal singularities. The later classification is not known in higher dimensions. Recent attempts by Kawamata and others to solve $LT_{4}$ showed that this problem is much deeper than they expected. There is speculation that it may be even more difficult than the flip problem (Shokurov believes this). We listed several important conjectures with no obvious relation. It is Shokurov's amazing idea to put all these conjectures in a single framework which we call it {\textbf{Shokurov's Program}}: \betagin{equation}\label{program} \end{equation} \betagin{description} \item[${\mathbf{ACC_d \longrightarrow LT_d}}$] Shokurov proved that the $LT_d$ follows from the above ACC conjecture up to dim $d$ and the following problem up to dim $d$ [Sh4]: \betagin{conj}[Lower Semi-Continuity]\label{lsc} For any klt pair $(X,B)$ of dim d and any $c\in \{0, 1, \dots, d-1\}$ the function $\mld_c(\mu,X,B): \{c-points ~of~ X\}\longrightarrow \mathbb{R}$ is lower semi-continuous. \end{conj} A $c$-point is a $c$-dim Grothendieck point of $X$. This conjecture is proved up to dim 3 by Ambro [Am]. This conjecture doesn't seem to be as tough as previous conjectures. Shokurov proved this problem in dim 4 for mlds in $[0,2]$ [Sh4, lemma 2]. So ACC in dim 4 is enough for the log termination in dim 4 [Sh4, corollary 5]. Actually ACC for mlds in [0,1] for closed points is enough [Sh4, corollary 5]. \item[$\mathbf{BAB_{d-1} \longrightarrow ACC_d}$] Shokurov associates a topological dimension $0\leq reg(P\in X,B)\leq d-1$ to any $d$-dim lc singularity $(P\in X,B)$ [Sh2, 7.9] and proves that the $ACC_{d,\Gammamma}$ for pairs with $reg(P\in X,B)=0$ is followed from the BAB conjecture in dim $d-1$ [PSh , 4.4]. In order if $reg(P\in X,B)=0$ then the singularity is exceptional (see definition \ref{exc}). Also $ACC_{d,\Gammamma}$ for pairs with $reg(P\in X,B)\in \{1, \dots, d-2\}$ can be reduced to lower dimensions. So the only remaining part of $ACC_{d,\Gammamma}$ is when $reg(P\in X,B)=d-1$. This case is expected to be proved using different methods. So {\emph{in particular}} $ACC_{4,\Gammamma}$ follows from the BAB in dim 3 and the $reg(P\in X,B)=3$ case. Moreover $ACC_{3,\Gammamma}$ is followed from the $reg(P\in X,B)=2$ case. \item[$\mathbf{WC_{d-1} \longrightarrow BAB_{d-1}}$] And here it comes probably the most important application of the theory of complements: $WC_{\deltalta, d-1,\{0\}}$ ``implies'' $BAB_{\deltalta,d-1,[0,1]}$. More precisely, these two problems can be solved together at once. In order, in those situations where boundedness of varieties is difficult to prove, boundedness of complements is easier to prove. And that is exactly what we do in this paper for the 2-dim case: we prove $WC_{\deltalta, 2,\{0\}}$ and $BAB_{\deltalta,2,[0,1]}$. Our main objective was to obtain a proof with as little as possible use of surface geometry so that it can be generalised to higher dimensions. In other words, the methods used in the proof of these results are of the most importance to us. After finishing this work, now, we expect to finish the proof of $WC_{\deltalta, 3,\{0\}}$ ``implies'' $BAB_{\deltalta,3,[0,1]}$ in not a far future! And that was our original goal. \item[The program in dim 4] Let us mention that by carrying out Shokurov's program in dim 4, in which the main ingridient is $WC_{\deltalta ,3, \{0\}}$ i.e. boundedness of $\varepsilonsilon$-lc complements in dim 3, we will prove the following conjectures: \betagin{itemize} \item ACC for mlds in dim 3. \item Boundedness of $\deltalta$-lc 3-fold log Fanos i.e. BAB in dim 3. \item ACC for mlds in dim 4. \item Lower semi-continuity for mlds in dim 4. \item Log termination in dim 4 and then LMMP in dim 4. \end{itemize} \end{description} About this paper: \betagin{enumerate} \item Section 1 is devoted to the study of complements on log surfaces. \item In 1.2 we recall some definitions and lemmas. \item In 1.3 we prove $WC_{\deltalta, 1, [0,1]}$ i.e. the boundedness of $\varepsilonsilon$-lc complements in dim 1 (\textbf{theorem \ref{curve}}). \item In 1.5 we prove $WC_{\deltalta, 2, \{0\}}$ for the case $X=Z$ i.e. the boundedness of $\varepsilonsilon$-lc complements in dim 2, locally, for points on surfaces with $B=0$ (\textbf{theorem \ref{main-local-isom}}). \item In 1.6 we prove $WC_{\deltalta, 2, \{0\}}$ when $X/Z$ is a birational equivalence i.e. the boundedness of $\varepsilonsilon$-lc complements in dim 2, locally, for birational contractions of surfaces with $B=0$ (\textbf{theorem \ref{3-I}}). This proof is a surface proof i.e. we heavilly use surface geometry and it is not expected to be generalized to higher dimensions. A second proof of the birational case is given in 1.10 (\textbf{theorem \ref{4-A'}}). \item In 1.7 we prove $WC_{\deltalta, 2, \{0\}}$ when $Z=pt$ i.e. the boundedness of $\varepsilonsilon$-lc complements on surfaces, globally, with $B=0$ (\textbf{theorem \ref{weak-2dim}}). The proof is based on the LMMP and we expect to be generalised to higher dimensions. As a corollary we give a totally new proof to the boundedness of $\varepsilonsilon$-lc log Dell Pezzo surfaces (=BAB indim 2) (\textbf{corollary \ref{BAB'}}). Another application of our theorem is that the boundedness of lc ($\varepsilonsilon=0$) complements can be proved only using complement theory (\textbf{theorem \ref{bc_2}}). The later boundedness was proved by Shokurov [Sh2]. \item In 1.8 we give a second proof of $WC_{\deltalta, 2, \{0\}}$ in the global case i.e. when $Z=pt$ (\textbf{theorem \ref{3-5-H}}). This proof also uses surface geometry and is not expected to be generalized to higher dimensions. \item In 1.9 we discuss an example which shows that the transformed boundary doesn't have a better singularity than the original boundary. \item In 1.10 we give a proof to all local cases for $B\in \Phi_{sm}$, in particular, the case where $X/Z$ is a fibration over a curve (\textbf{theorem \ref{4-A'}}). This proof is also based on the LMMP. \item Section 2 is about higher dimensional $\varepsilonsilon$-lc complements. We discuss our joint work with Shokurov. \item In 2.1 We give a \textbf{Plan} about how to attack the boundedness of $\varepsilonsilon$-lc complements in dimension 3. This is proposed by the author. \item In 2.2 We give Shokurov's \textbf{Plan} about how to attack the boundedness of $\varepsilonsilon$-lc complements in dimension 3. \end{enumerate} Let us summarise the main results of section 1: \betagin{thm} Conjecture \ref{weak} holds in dim 1 for $\Gammamma=[0,1]$. \end{thm} See \ref{curve} for the proof. \betagin{thm} Conjecture \ref{weak} holds in dim 2 in the global case (i.e. $\dim Z=0$) for $\Gammamma=\{0\}$. \end{thm} See \ref{weak-2dim} and \ref{3-5-H} for proofs. \betagin{thm} Conjecture \ref{weak} holds in dim 2 in the local cases (i.e. $\dim Z>0$) for $\Gammamma=\Phi_{sm}$. \end{thm} See \ref{4-A'} , \ref{main-local-isom} and \ref{3-I} for proofs. \betagin{cor} Conjecture \ref{BAB} holds in dim 2. \end{cor} See \ref{BAB'} for proof. Conjecture \ref{BAB} in dim 2 was first proved by Alexeev using different methods [A1]. \betagin{cor} Theorem \ref{bc_2} can be proved using only the complement theory. \end{cor} See the discussion following theorem \ref{bc_2}. \betagin{rem}[$\varepsilonsilon$-lc Complements Method] Though formally speaking the list above are the main results in section 1, but we believe that the method used to prove \ref{weak-2dim} and \ref{4-A'} is the most important result of this section. \end{rem} Here we mention some developments in the theory of complements. The following theorem was proved by Shokurov [Sh2] for surfaces. \betagin{thm}\label{bc_2} There exists a finite set $\mathcal{N}_{2}$ of positive integers such that any 2-dim lc weak log Fano pair $(X/P\in Z,B)$ has a $(0,n)$-complement$/P\in Z$ for some $n\in \mathcal{N}_{2}$ if $B$ is semi-standard i.e. for each coefficient $b$ of $B$, $b\geq \frac{6}{7}$ or $b=\frac{m-1}{m}$ for some natural number $m$. In order if $\dim Z>0$ then the theorem holds for a general boundary. \end{thm} In order Shokurov uses the BAB in dim 2 in the proof of the above theorem. As mentioned before, the results of this paper imply the BAB in dim 2. So the above theorem can be proved only based on the theory of complements. A similar theorem is proved by Prokhorov and Shokurov in dim 3 modulo BAB in dim 3 and the effective adjunction in dim 3 [PSh1]. However the local case doesn't need the later assumptions as the following theorem shows. \betagin{thm} Let $(X/P\in Z,B)$ be a Klt WLF 3-fold pair where $\dim Z\geq 1$ and $B\in \Phi_{sm}$. Then $K_X+B$ is $(0,n)$-complementary$/P\in Z$ for some $n\in \mathcal{N}_{2}$. \end{thm} Complements have good inductive properties as the theorem above shows which was proved by Prokhorov and Shokurov [PSh]. This theorem is stated and proved in higher dimensions in more general settings (see [PSh]). To avoid some exotic definitions, we stated only the 3-fold version. Finally we give some easy examples of complements. More interesting examples can be found in [Sh1, Sh2, Pr, PSh, PSh1]. \betagin{exa} Let $(X/Z,B)=(\mathbb{P}^1/pt.,0)$ and $P_1, P_2, P_3$ distinct points on $\mathbb{P}^1$. Then $K_X+P_1+P_2$ is a $(0,1)$-complement for $K_X$ but it is not an $(\varepsilonsilon,n)$-complement for any $\varepsilonsilon>0$ since $K_X+P_1+P_2$ is not Klt. On the other hand $K_X+\frac{2}{3}P_1+\frac{2}{3}P_2+\frac{2}{3}P_3$ is a $(\frac{1}{3}, 3)$-complement for $K_X$. \end{exa} \betagin{exa} Let $(X_1/Z_1,B_1)=(\mathbb{P}^2/pt.,0)$ and $(X_2/Z_2,B_2)=(\mathbb{P}^2/\mathbb{P}^2,0)$. Then $K_{X_2}$ is a $(2,1)$-complement/$Z_2$ at any point $P\in Z_2$ but obviously $K_{X_1}$ is not even numerically zero/$Z_1$ though $K_{X_1}=K_{X_2}$. \end{exa} \betagin{exa} Let $(X/Z,B)=(X/X,0)$ where $X$ is a surface with canonical singularities. Then the index of $K_X$ is 1 at any point $P\in X$. So we can take $B^+=0$ and $K_X$ a $(1,1)$-complement$/X$ for $K_X$ at any $P\in X$. \end{exa} \subsection{Preliminaries} In this subsection we bring some basic definitions and constructions. \betagin{defn} A set $\mathcal{X}$ of varieties of the same dimension is called bounded if there are schemes $\mathbb{X}$ and $S$ of finite type and a morphism $\phi: \mathbb{X}\longrightarrow S$ such that each element of $\mathcal{X}$ is isomorphic to a geometric fibre of $\phi$. Moreover each fibre should give an element of $\mathcal{X}$. \end{defn} \betagin{defn} Let $\mathcal{X}$ be a set of pairs $(X,B_X)$ of the same dimension. Then the set consisting of all $(X,\Supp B_X)$ where $X\in \mathcal{X}$ is called bounded if there are schemes $\mathbb{X}$ and $S$ of finite type, a divisor $\mathbb{B}$ on $\mathbb{X}$ and a morphism $\phi: \mathbb{X}\longrightarrow S$ such that each $X\in \mathcal{X}$ is isomorphic to a geometric fibre $\mathbb{X}_{s}$ and $\Supp B_X=\Supp \mathbb{B}|\mathbb{X}_{s}$. And $(X,B)$ bounded means that $(X,\Supp B_X)$ is bounded and there are only finitely many possibilities for the coefficients of $B$. \end{defn} \betagin{defn} Let $(X,B)$ be a Klt pair of dim $d$. Let $\phi: Y\longrightarrow X$ be a morphism such that $B_Y\geq 0$ where $K_Y+B_Y={^*(K_X+B)}$. Then $Y$ is called a partial resolution of $(X,B)$. \end{defn} \betagin{lem} Let $\mathcal{X}=\{X\}$ be a bounded set of Klt varieties of dim $d$ such that $-K_X$ is nef and big. Then $\mathcal{Y}$ the set of partial resolutions for all $X\in\mathcal{X}$ is bounded. \end{lem} \betagin{proof} By assumptions for each $Y\in\mathcal{Y}$ there are $X\in\mathcal{X}$ and a boundary $B_Y$ such that $K_Y+B_Y={^*K_X}$. Since $\mathcal{X}$ is bounded hence there are only a finite number of possibilities for the coefficients of $B_Y$ independent of $Y$ thus the index of $K_Y+B_Y$ is bounded. And since $-K_X$ is nef and big so $-(K_Y+B_Y)$ is also nef and big. Now by Borisov-Mckernan [Mc] the set $\mathcal{Y}$ is bounded. $\Box$ \end{proof} \betagin{defn} A variety $X/Z$ of dim $d$, is called Pseudo-WLF/$Z$ if there exists a boundary $B$ such that $(X/Z,B)$ is WLF. Moreover $X$ is called Klt Pseudo-WLF/$Z$ if there is a Klt WLF $(X/Z,B)$. If $\dim Z=0$ then we usually drop $Z$. \end{defn} \betagin{rem} Pseudo-WLF varieties have good properties. For example $\overline{NE}(X/Z)$ is a finite rational polyhedral cone. Moreover each extremal face of the cone is contractible [Sh5, Sh3]. In section 2 we prove that the Klt Pseudo-WLF property is preserved under flip and divisorial contractions. \end{rem} \betagin{defn} The set $\Phi_{sm}=\{\frac{k-1}{k}| k\in \mathbb{N}\}\cup \{1\}$ is called the set of standard boundary multiplicities. For a boundary $B$ by $B\in \Phi_{sm}$ we mean that the coefficients of $B$ are in $\Phi_{sm}$. \end{defn} \betagin{defn}[Exceptional pairs]\label{exc} Let $(X/Z,B)$ be a pair of dim $d$. If $\dim Z=0$ then $(X/Z,B)$ is called exceptional if there is at least a $(0,\mathbb{Q})$-complement $K_X+B^+$ and any $(0,\mathbb{Q})$-complement $K_X+B^+$ is Klt. If $\dim Z>0$ then $(X/Z,B)$ is called exceptional if there is at least a $(0,\mathbb{Q})$-complement $K_X+B^+$ and any $(0,\mathbb{Q})$-complement $K_X+B^+$ is plt on a log terminal resolution. Otherwise $(X/Z,B)$ is called non-exceptional. \end{defn} \betagin{rem}\label{analytic-algebraic} Boundedness of analytic $(\varepsilonsilon,n)$-complements implies the boundedness of algebraic $(\varepsilonsilon,n)$-complement. That is because of the general GAGA priciple [Sh1]. \end{rem} \betagin{lem}\label{pre-1} Let $Y/X/Z$ and $K_Y+B_Y$ be nef$/X$ and $K_X+B={_*(K_Y+B_Y)}$ be $(\varepsilonsilon,n)$-complementary/$Z$. Moreover assume that each non-exceptional$/X$ component of $B_Y$ that intersects an exceptional divisor$/X$ has a standard coefficient then $(Y,B_Y)$ will also be $(\varepsilonsilon,n)$-complementary/$Z$. \end{lem} \betagin{proof} See [PSh, 6.1]. \end{proof} \subsection{{The case of curves}} In this subsection we prove \ref{weak} for the case of curves. Note that 1-dim global log Fano pairs are just $(\mathbb{P}^{1}, B)$ for a boundary $B=\sum_{i} b_{i}B_{i}$ where $\sum_{i} b_{i}-2<0$. The local case for curves is trivial. \betagin{thm}\label{curve} $WC_{\deltalta, 1,[0,1]}$ holds; more precisely, suppose $\frac{m-1}{m}\leq 1-\deltalta <\frac{m}{m+1}$ for $m$ a natural number then we have: \betagin{description} \item[$\diamond$] $N_{\deltalta,1, [0,1]}\subseteq \cup_{0<k\leq m}\{k,k+1\}$. \item[$\diamond$] $(\mathbb{P}^{1},B^{+})$ can be taken $\frac{1}{m+1}$-lc. \end{description} \end{thm} \betagin{proof} Let $B=\sum_{i} b_{i}B_{i}$ and put $b=b_{h}=max\{b_{i}\}$ and suppose $\frac{k-1}{k}\leq b <\frac{k}{k+1}$ for a natural number $k$. If $k=1$ then $0\leq b <\frac{1}{2}$ and so we have a 1-complement $B^{+}=0$. Now assume that $K>1$ and define $a_{i,t}=\llcorner (t+1)b_{i}\lrcorner$ and note that by our assumptions $\sum_{i} a_{i,k}\leq 2k+1$ since $\sum_{i}b_{i}<2$. If $K+B$ doesn't have a $k$-complement then $\sum_{i} a_{i,k}=2k+1$. Since $\frac{k-1}{k}\leq b <\frac{k}{k+1}$ we have $\frac{(k+1)(k-1)}{k}=k+1-\frac{k+1}{k}=k-\frac{1}{k}\leq (k+1)b <\frac{(k+1)k}{k+1}=k$. Thus $a_{h,k}=k-1$ and $1-\frac{1}{k}\leq ~ \langle(k+1)b\rangle ~ <1$ where $\langle .\rangle$ stands for the fractional part. Now $a_{i,k+1}=\llcorner (k+2)b_{i}\lrcorner=\llcorner (k+1)b_{i}+b_{i}\lrcorner$. So $a_{i,k+1}$ is $a_{i,k}$ or $a_{i,k}+1$. The later happens iff $1\leq b_{i}+\langle (k+1)b_{i}\rangle$. By the above $b_{h}+\langle (k+1)b_{h}\rangle \geq \frac{k-1}{k}+1-\frac{1}{k} \geq 1$ so $a_{h,k+1}=a_{h,k}+1$. On the other hand since $\sum_{i}b_{i}<2$ and $\sum_{i}a_{i,k}=2k+1$ then $\sum_{i}\langle (k+1)b_{i}\rangle <1$. And since $ 1-\frac{1}{k}\leq \langle (k+1)b_{h}\rangle$ then $\langle (k+1)b_{i}\rangle <\frac{1}{k}$ if $i\neq h$. So under assumption $i\neq h$ if $1 \leq \langle (k+1)b_{i}\rangle +b_{i}$ then $1-\frac{1}{k}<b_{i}$. \\ Thus if $K+B$ has no $k+1$-complement then $1 \leq \langle (k+1)b_{j}\rangle +b_{j}$ should hold at least for some $j\neq h$. So again we have $ 1-\frac{1}{k}\leq \langle (k+1)b_{j}\rangle$ and so $\langle (k+1)b_{j}\rangle + \langle (k+1)b_{h}\rangle \geq 2(1-\frac{1}{k})\geq 1$ and this is a contradiction. Hence $K+B$ should have a $k$ or $k+1$-complement. If $K+B$ has a $k$ complement then we can have a maximum $max\{b_{i}^{+}\}=b^{+}=1-\frac{1}{k}\leq 1-\deltalta$. If it has a $k+1$-complement then again we can have a maximum $b^{+}\leq \frac{k}{k+1}$. Since $0<k\leq m$ then $N_{\deltalta,1}\subseteq \cup_{0<k\leq m}\{k,k+1\}$ and $K+B^{+}$ can be chosen as $\frac{1}{m+1}$-lc. These prove the theorem. $\Box$ \end{proof} \betagin{rem} Above we just proved that $\sum_{i} \frac{\llcorner (n+1)b_{i}\lrcorner}{n}\leq 2$ for a bounded $n$. If the equality doesn't hold then we may add some positive coefficients to get the equality and construct the complement. \end{rem} \subsection{The case of surfaces} We divide the surface case of conjecture \ref{weak} into the following cases: \betagin{description} \item[local isomorphic] $X/Z$ is an isomorphism. \item[local birational] $X/Z$ is birational but may not be an isomorphism. \item[local over curve] $Z$ is a curve. \item[global] $Z$ is a point. \end{description} \subsection{Local isomorphic case} The main theorem in this subsection is theorem \ref{main-local-isom}. We use classification of surface singularities. \betagin{thm}\label{main-local-isom} Conjecture $WC_{\deltalta, 2,\{0\}}$ holds in the local isomorphic case. \end{thm} \betagin{proof} If $\deltalta\geq 1$ then $P$ is smooth and so we are already done. So assume that $\deltalta<1$. If the singularity at $P$ is of type $E_6$, $E_7$ or $E_8$ then there are only a finite number of possibilities up to analytic isomorphism because of the $\deltalta$-lc assumption [see Pr 6.1.2]. Otherwise the graph of the resolution will be either of type $A_{r}$: \betagin{displaymath} \xymatrix{O^{-\alphapha_{r}} \ar@{-}[rr]&& \dots &\ar@{-}[rr]& & &O^{-\alphapha_{2 }}\ar@{-}[rr] && O^{-\alphapha_{1}}} \end{displaymath} where $\alphapha_{i}\geq 2$. Or of type $D_{r}$: \betagin{displaymath} \xymatrix{&& &&&& && O^{-2}\ar@{-}[d] \\ O^{-\alphapha_{r}} \ar@{-}[rr]&& \dots &\ar@{-}[rr]& & &O^{-\alphapha_{2}}\ar@{-}[rr] && O^{-\alphapha_{1}}\\ &&&&&&& & O^{-2}\ar@{-}[u] } \end{displaymath} where $\alphapha_{i}\geq 2$. First we work out the case $A_{n}$. Let $K_{W}-\sum_{i}e_{i}E_{i}={^{*}K_{Z}}$ where $e_{i}$ are the discrepancies for a log resolution $W\longrightarrow Z$/$P$. The following lemma is well known and a proof can be found in [AM, 1.2]. \betagin{lem} The numbers $(-E_{i}^{2})$ are bounded from above in terms of $\deltalta$. \end{lem} Intersecting $K_{W}-\sum_{i}e_{i}E_{i}$ with all the exceptional divisors we get a system like the following: \[a_{1}(-E_{1}^{2})-a_{2}-1= 0\] \[a_{2}(-E_{2}^{2})-a_{1}-a_{3}= 0 \] \[a_{3}(-E_{3}^{2})-a_{2}-a_{4}= 0\] \[\vdots \] \[a_{r-1}(-E_{r-1}^{2})-a_{r-2}-a_{r}=0 \] \[a_{r}(-E_{r}^{2})-a_{r-1}-1= 0\] where $a_i$ is the log discrepancy of $E_i$ with respect to $K_Z$. From the equation $a_{i}(-E_{i}^{2})-a_{i-1}-a_{i+1}\leq 0$ we get $a_{i}(-E_{i}^{2}-2)+a_{i}-a_{i-1}\leq a_{i+1}-a_{i}$ which shows that if $ a_{i-1}\leq a_{i}$ then $ a_{i}\leq a_{i+1}$ and moreover if $ a_{i-1}< a_{i}$ then $ a_{i} <a_{i+1}$ . So the solution for the system above should satisfy the following: \betagin{equation}\label{A_r} \end{equation} \[a_{1} \geq \dots \geq a_{i}\leq \dots \leq a_{r}\] for some $i\geq 1$. If $r\leq 2$ (or any fixed number) then the theorem is trivial. So we may assume that $r>3$ and also can assume $i\neq r$ unless $a_1=a_2=\dots=a_r$. Now for any $i\leq j<r$ if $-E_{j}^{2}>2$ then $a_{j+1}-a_{j}\geq a_{j}(-E_{j}^{2}-2)\geq \deltalta$. So if we have $l$ members in $\{j: -E_{j}^{2}>2 ~and ~i\leq j<r\}$ then $a_{r}\geq l\deltalta$. Hence $a_{r}(-E_{r}^{2}-1)+a_{r}-a_{r-1}\geq l\deltalta$ which contradicts the last equation in the system if $l$ gets big arbitrarily. In order $l\deltalta \leq 1$ and so $l \leq \frac{1}{\deltalta}$. Similar observation shows that the number $l'$ of $1 \leq j\leq i$ where $-E_{j}^{2}>2$ should be bounded. Then $l+l' \leq \frac{2}{\deltalta}$. Suppose $a_{i_{2}}=\dots =a_{i}=\dots =a_{i_{1}}$, $a_{i_{1}-1}\neq a_{i_{1}}$ (or $i_1=1$) and $a_{i_{2}}\neq a_{i_{2}+1}$ (or $i_2=r$) where $i_2\leq i\leq i_1$. Assume that $i_{1}\neq i$ or $i_{2}\neq i$. Let for example $i_{1}\neq i$. Then if all $a_j$ are not equal ($=1$) then we have \[1=(-E_{r}^{2}-1)a_{r}+a_{r}-a_{r-1}\geq (r-i_{1})(a_{i_{1}+1}-a_{i_{1}})\] \[=(r-i_{1})[(-E_{i_{1}}^{2}-2)a_{i_{1}}+a_{i_{1}}-a_{i_{1}-1}]\] \[=(r-i_{1})(-E_{i_{1}}^{2}-2)a_{i_{1}}\geq (r-i_{1})\deltalta\] because $-E_{i_{1}}^{2}$ can not be equal to $2$. So $(r-i_{1})\deltalta \leq 1$ then $r-i_{1}\leq \frac{1}{\deltalta}$ is bounded. Similarly $i_{2}$ should be bounded. These observation show that, mentioning that $-E_{k}^{2}$ are bounded, the denominators of $a_{k}$ are bounded. And so the index of $K_{Z}$ at $P$ is bounded and so we are done in this case. But if $i_{1}= i=i_{2}$ then the story is different. In this case note that $\deltalta \leq (-E_{i}^{2}-2)a_{i}= a_{i-1}-a_{i}+a_{i+1}-a_{i}$. So $\frac{\deltalta}{2} \leq a_{i-1}-a_{i}$ or $\frac{\deltalta}{2} \leq a_{i+1}-a_{i}$. For example assume that the later holds then similar to the calculations we just carried out above, $r-i$ will be bounded. But it can happen that $a_{i-1}-a_{i}$ is very small so we won't be able to bound $i$. In order, we try to find a solution for the following system with bounded denominators: \[u_{1}(-E_{1}^{2})-u_{2}-1\leq 0\] \[u_{2}(-E_{2}^{2})-u_{1}-u_{3}\leq 0 \] \[u_{3}(-E_{3}^{2})-u_{2}-u_{4}\leq 0\] \[\vdots \] \[u_{r-1}(-E_{r-1}^{2})-u_{r-2}-u_{r}\leq 0 \] \[u_{r}(-E_{r}^{2})-u_{r-1}-1\leq 0\] To do this, note that if $-E_{i-1}^{2}>2$ then $\deltalta \leq (-E_{i-1}^{2}-2)a_{i-1}= a_{i-2}-a_{i-1}+a_{i}-a_{i-1}\leq a_{i-2}-a_{i-1}$ then again similar computations to the above shows that $i$ is bounded. Now let $j$ be the smallest number such that $-E_{j}^{2}=\dots = -E_{i-1}^{2}=2$ (remember that we have assumed $\frac{\deltalta}{2} \leq a_{i+1}-a_{i}$. ). Hence $j$ is bounded. Now take $u_{j}=\dots =u_{i}=\frac{1}{2}$ then the following equations are satisfied if $i-j>2$: \[u_{j+1}(-E_{j+1}^{2})-u_{j}-u_{j+2}=2u_{j}-u_{j}-u_{j}=0 \] \[\vdots \] \[u_{i-1}(-E_{i-1}^{2})-u_{i-2}-u_{i}=2u_{i-1}-u_{i-2}-u_{i}= 0 \] Since $r-i$ and $j$ are bounded then the number of remaining equations is bounded and so to satisfy them we just have to divide $u_{i}=\frac{1}{2}$ by a bounded natural number. This finishes the $A_{r}$ type. The $D_{r}$ type will follow. \betagin{rem} In order we have constructed a Klt log divisor $K_W+D$ with bounded index such that $-(K_W+D)$ is nef and big/$P\in Z$. Now we may use remark \ref{analytic-algebraic}. \end{rem} \betagin{rem} All the bounds occurring in the proof are effective and can be calculated in terms of $\deltalta$. \end{rem} \betagin{rem} In Shokurov's case where $\deltalta=\varepsilonsilon=0$ we just take $u_{1}=\dots =u_{r}=0$. \end{rem} The case of $D_{r}$: We have a chain $E_{1}, \dots , E_{r}$ of exceptional divisors plus $E$ and $E'$ where $E$ intersects only $E_{1}$ and the same holds for $E'$. In this case we have the following system: \[a(-E^{2})-a_{1}-1= 0\] \[a'(-E'^{2})-a_{1}-1= 0\] \[a_{1}(-E_{1}^{2})-a-a'- a_{2}+1= 0\] \[a_{2}(-E_{2}^{2})-a_{1}-a_{3}= 0 \] \[a_{3}(-E_{3}^{2})-a_{2}-a_{4}= 0\] \[\vdots \] \[a_{r-1}(-E_{r-1}^{2})-a_{r-2}-a_{r}=0 \] \[a_{r}(-E_{r}^{2})-a_{r-1}-1= 0\] Note that $-E^{2}=-E'^{2}=2$ so $2a-a_{1}-1= 0$ and $2a'-a_{1}-1= 0$ hence $a+a'=a_{1}+1$. Replacing this in the third equation and ignoring the two first equations we get the following system: \[a_{1}(-E_{1}^{2}-1)-a_{2}= 0\] \[a_{2}(-E_{2}^{2})-a_{1}-a_{3}= 0 \] \[a_{3}(-E_{3}^{2})-a_{2}-a_{4}= 0\] \[\vdots \] \[a_{r-1}(-E_{r-1}^{2})-a_{r-2}-a_{r}=0 \] \[a_{r}(-E_{r}^{2})-a_{r-1}-1= 0\] From this system we get a solution as following: \[a_{1}=\dots =a_{i}<a_{i+1}<\dots <a_{r}\] (i=r also may happen. In this case $a=a'=a_{1}=\dots =a_{r}=1$). Now $r-i$ should be bounded. In order if $i>1$ then $-E_{1}^{2}=\dots =-E_{i-1}^{2}=2$ but $-E_{i}^{2}>2$ (we have assumed $r>i$). Now $\deltalta (-E_{i}^{2}-2) \leq a_{i}(-E_{i}^{2}-2)+a_{i}-a_{i-1}=a_{i+1}-a_{i}$ (if $i=1$ then $\deltalta (-E_{1}^{2}-2) \leq a_{1}(-E_{1}^{2}-2)=a_{2}-a_{1}$). We also have the fact that $a_{k+1}-a_{k}\leq a_{k+2}-a_{k+1}$ for $i\leq k <r-1$. And on the other hand $\sum_{i\leq k<r} a_{k+1}-a_{k} \leq a_{r}<a_{r}+a_{r}-a_{r-1}<1$. So we conclude that $r-i$ should be bounded. Moreover since $-E_{k}^{2}$ is bounded, this proves that the denominators of all $a_{k}$ in the $D_{r}$ case are bounded and so the index of $K_{Z}$ at $P$. In this case $B^+=0$ and this finishes the proof of theorem \ref{main-local-isom}. $\Box$ \end{proof} \betagin{rem} Essentially the boundedness properties that we proved and used in the proof of theorem \ref{main-local-isom} had been more or less discovered by some other people independently. Shokurov had used these ideas in an unpublished preprint on mlds. \end{rem} \betagin{recall}\label{graphs} Here we recall the diagrams for the $E_{6}$, $E_{7}$ and $E_{8}$ types of singularities. The following is a general case of such singularities: \betagin{displaymath} \xymatrix{\mathbb{C}^2/{\mathbb{Z}_{m_{1}}} \ar@{-}[r] & O^{-p}& \ar@{-}[l] \mathbb{C}^2/{\mathbb{Z}_{m_{2}}}\\ &\ar@{-}[u] O^{-2}& } \end{displaymath} where the only possibilities for $(m_{1},m_{2})$ are $(3,3)$, $(3,4)$ and $(3,5)$. So the possible diagrams are as follows: For $(m_{1},m_{2})=(3,3)$ we have { \betagin{description} \item[$1$] \betagin{displaymath} \xymatrix{ O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-3}\\ &\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$2$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-3}\\ &&\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$3$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}\\ &&\ar@{-}[u] O^{-2}&& } \end{displaymath} \end{description} } For $(m_{1},m_{2})=(3,4)$ we have \betagin{description} \item[$4$] \betagin{displaymath} \xymatrix{ O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-4}\\ &\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$5$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-4}\\ &&\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$6$] \betagin{displaymath} \xymatrix{ O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}\\ &\ar@{-}[u] O^{-2}&&& } \end{displaymath} \item[$7$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}\\ &&\ar@{-}[u] O^{-2}&&& } \end{displaymath} \end{description} And for $(m_{1},m_{2})=(3,5)$ we have { \betagin{description} \item[$8$] \betagin{displaymath} \xymatrix{ O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-5}\\ &\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$9$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-5}\\ &&\ar@{-}[u] O^{-2}& } \end{displaymath} \item[$10$] \betagin{displaymath} \xymatrix{ O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-3}\\ &\ar@{-}[u] O^{-2}&& } \end{displaymath} \item[$11$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] & O^{-2} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-3} \\ &&\ar@{-}[u] O^{-2}&& } \end{displaymath} \item[$12$] \betagin{displaymath} \xymatrix{O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-3}& \ar@{-}[l] O^{-2} \\ &\ar@{-}[u] O^{-2}&& } \end{displaymath} \item[$13$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] &O^{-2} \ar@{-}[r]& O^{-p}& \ar@{-}[l] O^{-3}& \ar@{-}[l] O^{-2} \\ &&\ar@{-}[u] O^{-2}&& } \end{displaymath} \item[$14$] \betagin{displaymath} \xymatrix{O^{-3} \ar@{-}[r] & O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2} & \ar@{-}[l] O^{-2}\\ &\ar@{-}[u] O^{-2}&& &&} \end{displaymath} \item[$15$] \betagin{displaymath} \xymatrix{O^{-2} \ar@{-}[r] &O^{-2} \ar@{-}[r]& O^{-p}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2}& \ar@{-}[l] O^{-2} \\ &&\ar@{-}[u] O^{-2}&&&& } \end{displaymath} \end{description} } \end{recall} \subsection{Local birational case} In this subsection wherever we write /$Z$ we mean /$P\in Z$ for a fixed point $P$ on $Z$. \betagin{thm}\label{3-I} $WC_{\deltalta, 2,\{0\}}$ holds in the birational case. \end{thm} {\textbf{Strategy of the proof:}} Let $W$ be a minimal resolution of $X$ and $\{E_{i}\}$, $\{F_{j}\}$ be the exceptional divisors /$Z$ on $W$ where the $E_{i}$ are exceptional/$X$ but $ F_{j}$ are not ($E$ will be used for a typical $E_{i}$ and similarly $F$ for $F_{j}$ or its birational transform). We will construct an antinef/$Z$ and Klt log divisor $K_{W}+\Omega=K_{W}+\sum_{i} u_{i}E_{i}+\sum_{j} u_{j}F_{j}$ where $u_{i},u_{j}<1$ are rational numbers with bounded denominators. Then we use remark \ref{analytic-algebraic}. \betagin{proof} By contracting those curves where $-K_{X}$ is numerically zero, we can assume that $-K_{X}$ is ample/$Z$ (we can pull back the complement). Let $W$ be the minimal resolution of $X$. Then since $K_{W}$ is nef/$X$ by the negativity lemma we have $K_{W}-\sum_{i}e_{i}E_{i}=K_{W}+\sum_{i}(1-a_{i})E_{i} \equiv {^*K_{X}}$ where $ e_{i}\leq 0$. \betagin{defn} For any smooth model $Y$ where $W/Y/Z$ we define $\overline{exc}(Y/Z)$ to be the graph of the exceptional curves ignoring the birational transform of exceptional divisors of type $F$. For an exceptional/$Z$ divisor $G$ on $Y$ not of type $F$, $\overline{exc}(Y/Z)_{G}$ means the connected component of $\overline{exc}(Y/Z)$ where $G$ belongs to. \end{defn} \betagin{lem} We have the followings on $W$: \betagin{description} \item[$\diamond$] The exceptional divisors/$Z$ on $W$ are with simple normal crossings. \item[$\diamond$] Each $F$ (i.e. each exceptional divisor of type $F$) is a $-1$-curve. \item[$\diamond$] The model $\overline{W}$ obtained by blowing down $-1$-curves/$Z$ is the minimal resolution of $Z$. \item[$\diamond$] Each $F$ cuts at most two exceptional divisors of type $E$. \end{description} \end{lem} \betagin{proof} Let $F$ be an exceptional divisor/$Z$ on $W$ which is not exceptional/$X$. Then $(K_{W}-\sum_{i}e_{i}E_{i}).F= K_{W}.F+\sum_{i}(-e_{i})E_{i}.F=2p_{a}(F)-2-F^{2}+\sum_{i}(-e_{i})E_{i}.F < 0$ where $p_{a}(F)$ stands for the arithmetic genus of the curve $F$. Then $2p_{a}(F)-2-F^{2} < 0$ and so $p_{a}(F)=0$ and $-F^{2}=1$. In other words $F$ is a $-1$-curve.\\ On the other hand by contracting $-1$-curves/$Z$ (i.e. running the classical minimal model theory for smooth surfaces on $W/Z$) we get a model $\overline{W}/Z$ where $K_{\overline{W}}$ is nef/$Z$. Actually $\overline{W}$ is the minimal resolution of $P\in Z$. The exceptional divisors/$Z$ on $\overline{W}$ are with simple normal crossings and since $W$ is obtained from $\overline{W}$ by a sequence of blow ups then the exceptional divisors/$Z$ on $W$ also would be with simple normal crossings. This observation gives more information. Since all the $F$, exceptional/$Z$ but not/$X$, are contracted/$\overline{W}$ then they should intersect at most two of $E_{i}$ because $exc(\overline{W}/Z)$ is with simple normal crossings and $F$ is exceptional/$\overline{W}$. Moreover no two exceptional divisors of type $F$ should intersect on $W$ because they are both $-1$-curves. This means that the intersection points of any two exceptional divisor/$Z$ on $X$ should be a singular point of $X$. Also any exceptional divisor/$Z$ on $X$ contains at most two singular points of $X$. $\Box$ \end{proof} Now Suppose $\{Q_{k}\}_{k}$ to be the singular points of $X$. If no one of the points $\{Q_{k}\}$ is of type $A_{r}$ then the proof of theorem \ref{main-local-isom} shows that the discrepancies $e_{i}$ are with bounded denominators so we are already done. But if there is one point of type $A_{r}$ then the proof is more complicated (surprisingly the $A_{r}$ type is the most simple case in the sense of Shokurov i.e. when $\deltalta=0$). Similar to the proof of theorem \ref{main-local-isom} we try to understand the structure of $exc(W/Z)$ and the blow ups $W\rightarrow \overline{W}$. \betagin{defn} A smooth model $\ddot{W}$ where $W/\ddot{W}$ and $\ddot{W}/\overline{W}$ are series of smooth blow ups, is called a $blow~ up~ model$ of $\overline{W}$. Such a model is called $minimal$ if there is $X'$ such that $K_{\ddot{W}}$ is nef/$X'$ and $X/X'/Z$. In other words it is the minimal resolution of $X'$. The connected components of $\overline{exc}(\ddot{W}/Z)$ are either of type $A_{r}$, $D_{r}$, $E_{6}$, $E_{7}$ or $E_{8}$ for a minimal blow up model. \end{defn} \betagin{defn}\label{3-N} We call the divisor $K_{W}+\omegaega=K_{W}+\sum_{i} (1-a_{i})E_{i}={^*K_{X}}$ the $primary$ $~log~ divisor$. The pair $(X,B)$ has a log canonical $n$-complement $K_{X}+B^+$ over $Z$ in the sense of Shokurov [Sh2] ($n\leq 6$). From now on we call it a $Shokurov~ complement$. So $K_{W}+\omegaega_{Sh}+C=K_{W}+\sum_{i}(1-a^{Sh}_{i})E_{i}+\sum_{j}(1-a^{Sh}_{j})F_{j}+C={^*(K_{X}+B^+)}$ where $C$ is the birational transform of the non-exceptional part of $B^+$. We call $K_{W}+\omegaega_{Sh}$ a $Shokurov~ log~ divisor$ and the numbers $a^{Sh}_{i}$ and $a^{Sh}_{j}$ Shokurov log discrepancies. \end{defn} \betagin{defn}\label{3-F} In the graph $exc(W/Z)$ if we ignore those $F$ which appear with zero coefficient in $\omegaega_{Sh}$ (i.e. $a^{Sh}=1$) then we get a graph $exc(W/Z)_{>0}$ with some connected components. The connected graph $\mathcal{C}$ consisting of exceptional/$Z$ curves with $a^{Sh}=0$, is in one of the components of the graph $exc(W/Z)_{>0}$ which we show by $\mathcal{G}$ ($\mathcal{C}$ is connected because of the connectedness of the locus of log canonical centres/$P\in Z$). Now contracting all $-1$-curves/$Z$ in $\mathcal{G}$ and continuing the contractions of subsequent $-1$-curves/$Z$ which appear in $\mathcal{G}$, finally we get a model which we show as $W_{\mathcal{G}}$. The transform of $\mathcal{G}$ on $W_{\mathcal{G}}$ is shown by $\mathcal{G}_{1}$ and similarly the transform of $\mathcal{C}$ is $\mathcal{C}_{1}$. \end{defn} \betagin{defn}\label{3-H} A chain of exceptional curves consisting of $G_{\betata_{1}}, \dots, G_{\betata_{r}}$ is called $strictly ~monotonic$ if $r=1$ or if $a_{\betata_{1}}< a_{\betata_{2}}<\dots < a_{\betata_{r}}$ (these are log discrepancies with respect to $K_X$). $G_{\betata_{1}}$ is called the $base~ curve$. \end{defn} \betagin{defn} Let $G\in exc(\ddot{W}/Z)$ for a smooth blow up model $\ddot{W}$. Then define the $negativity$ of $G$ on this model as $N_{\ddot{W}}(G)=(K_{\ddot{W}}+{_{*}\omegaega}).G\leq 0$ ($_{*}\omegaega$ is pushdown of $\omegaega$). We also define the $total~ negativity$ by $N_{\ddot{W}}=\sum_{\alphapha} N_{\ddot{W}}(G_{\alphapha})$ where $G_{\alphapha}$ runs on all exceptional divisors/$Z$ on $\ddot{W}$ and for $G\in \overline{exc}(\ddot{W}/Z)$ define $N_{\ddot{W},G}=\sum_{\alphapha} N_{\ddot{W}}(G_{\alphapha})$ when the sum runs on all members of $\overline{exc}(\ddot{W}/Z)_{G}$. Similarly define the negativity functions $N^{Sh}$ and $N^+$ replacing $\omegaega$ with $\omegaega_{Sh}$ and $\omegaega_{Sh}+C$ respectively. Note that the later is always zero, because $K_{W}+\omegaega_{Sh}+C\equiv 0/Z$. \end{defn} \betagin{defn} The (smooth) blow up of a point which belongs to two exceptional divisors/$Z$ on a model is called a $double~ blow~ up$. If this point belongs to just one exceptional divisor/$Z$ then we call it a $single~ blow~ up$. A blow up is called double$^+$ blow up if the blown up point belongs to two components of $_{*}(\omegaega_{Sh}+C)$ (this is the pushdown). Similarly define a $single^+$ blow up. \end{defn} \betagin{lem}\label{3-A} For any exceptional $G_{\betata}\in \overline{exc}(\ddot{W}/Z)$ on a blow up model $\ddot{W}$ we have: \betagin{description} \item[$\diamond$] $-1+\deltalta \leq N_{\ddot{W},G_{\betata}}$ if $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ is of type $D_{r}$, $E_{6}$, $E_{7}$ or $E_{8}$. In particular, in these cases $-1+\deltalta \leq N_{\ddot{W}}(G_{\betata})$ holds. \item[$\diamond$] $2(-1+\deltalta) \leq N_{\ddot{W},G_{\betata}}$ and $-1+\deltalta \leq N_{\ddot{W}}(G_{\betata})$ if $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ is of type $A_{r}$ except if it is strictly monotonic. \end{description} \end{lem} \betagin{proof} $D_{r}$ case: Similar to the notation in the proof of theorem \ref{main-local-isom} let $G_{\betata}, G_{\betata'}, G_{\betata_{1}}, \dots , G_{\betata_{r}}$ be the exceptional divisors in $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$. Then from the equations in the proof of theorem \ref{main-local-isom} for the $D_{r}$ case we get the following system for the log discrepancies: \[2a_{\betata}-a_{\betata_{1}}-1\leq 0\] \[2a_{\betata'}-a_{\betata_{1}}-1\leq 0\] \[2a_{\betata_{1}}-a_{\betata}-a_{\betata'}- a_{\betata_{2}}+1\leq 0\] \[2a_{\betata_{2}}-a_{\betata_{1}}-a_{\betata_{3}}\leq 0 \] \[\vdots \] \[2a_{\betata_{r-1}}-a_{\betata_{r-2}}-a_{\betata_{r}}\leq 0 \] \[2a_{\betata_{r}}-a_{\betata_{r-1}}-1\leq 0\] Adding the first and the second equations gives $2a_{\betata}+2a_{\betata'}-2a_{\betata_{1}}-2\leq 0$ and putting this in the third equation we get $a_{\betata_{1}}\leq a_{\betata_{2}}$ and so $~a_{\betata_{1}}\leq a_{\betata_{2}}\leq \dots \leq a_{\betata_{r}}$. So \[N_{\ddot{W},G_{\betata}}\geq a_{\betata}+a_{\betata'}+a_{\betata_{r}}-a_{\betata_{1}}-2\geq a_{\betata}+a_{\betata'}+a_{\betata_{2}}-a_{\betata_{1}}-2\]\[ \geq 2a_{\betata_{1}}+1-a_{\betata_{1}}-2\geq a_{\betata_{1}}-1\geq \deltalta -1\] because $2a_{\betata_{1}}+1\leq a_{\betata}+a_{\betata'}+a_{\betata_{2}}$ and the fact that $X$ is $\deltalta$-lc. $A_{r}$ case [non-strictly monotonic]: In this case assume that the exceptional divisors in $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ are $ G_{\betata_{1}}, \dots , G_{\betata_{r}}$ so we get the system: \[2a_{\betata_{1}}-a_{\betata_{2}}-1\leq 0\] \[2a_{\betata_{2}}-a_{\betata_{1}}-a_{\betata_{3}}\leq 0 \] \[\vdots \] \[2a_{\betata_{r-1}}-a_{\betata_{r-2}}-a_{\betata_{r}}\leq 0 \] \[2a_{\betata_{r}}-a_{\betata_{r-1}}-1\leq 0\] So there will be $k$ such that $a_{\betata_{1}}\geq a_{\betata_{2}}\geq \dots \geq a_{\betata_{k}}\leq a_{\betata_{r}}$. Thus $N_{\ddot{W}}(G_{\betata_{1}})\geq a_{\betata_{1}}+a_{\betata_{1}}-a_{\betata_{2}}-1\geq a_{\betata_{1}}-1\geq \deltalta-1$. In this way we get the similar inequalities for all other equations except for $N_{\ddot{W}}(G_{\betata_{k}})$. Suppose $N_{\ddot{W}}(G_{\betata_{k}})<\deltalta-1$. So we get $2a_{\betata_{k}}-a_{\betata_{k-1}}-a_{\betata_{k+1}}< \deltalta-1$ and so $1- \deltalta <a_{\betata_{k-1}}+a_{\betata_{k+1}}-2a_{\betata_{k}}\leq a_{\betata_{1}}+a_{\betata_{r}}-2a_{\betata_{k}}$. On the other hand by adding all the equations in the system above we get $N_{\ddot{W},G_{\betata}}\geq a_{\betata_{1}}+a_{\betata_{r}}-2>1- \deltalta+2a_{\betata_{k}}-2\geq \deltalta-1$. This is a contradiction with the fact that $N_{\ddot{W}}(G_{\betata_{k}})\geq N_{\ddot{W},G_{\betata_{k}}}$. To get the inequality for $ N_{\ddot{W},G_{\betata_{k}}}$ add all the equations in the system above. Note that if $r=2$ then $a_{\betata_{1}}=a_{\betata_{2}}$ and checking the lemma is easy in this case. $E_{6}$, $E_{7}$, $E_{8}$ {cases:\footnote{I just prove that $-1+\deltalta \leq N_{\ddot{W}}(G)$ for any exceptional $G$. We won't need the inequality for total negativity.}} In these cases the graph $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ is as in the recall \ref{graphs}. It is enough to put $2$ in place of all self-intersection numbers because the negativity becomes smaller. We start from the smallest possible graph i.e. Case 1 in the mentioned recall: \[2a_{\betata}-a_{\betata_{2}}-1\leq 0\] \[2a_{\betata_{2}}-a_{\betata}-a_{\betata_{1}}- a_{\betata_{3}}+1\leq 0\] \[2a_{\betata_{1}}-a_{\betata_{2}}-1\leq 0\] \[2a_{\betata_{3}}-a_{\betata_{2}}-1\leq 0\] Adding all the equations we get $N_{\ddot{W},G_{\betata}}=a_{\betata}+a_{\betata_{1}}+a_{\betata_{3}}-a_{\betata_{2}}-2$. And by the second equation we have $a_{\betata}+a_{\betata_{1}}+a_{\betata_{3}}-a_{\betata_{2}}\geq a_{\betata_{2}}+1$ so $N_{\ddot{W},G_{\betata}}\geq a_{\betata_{2}}+1-2\geq \deltalta-1$. In order this was a special case of the $D_{r}$ type (the similarity of the system not necessarily the graph $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$). Note that the inequality for the total negativity implies the inequality for the negativity of each exceptional curve.\\ Now we prove other cases by induction on the number of the exceptional curves. The minimum is $4$ exceptional curves and we just proved this case. Suppose we have proved up to $k-1$ and our graph has $k$ members. Suppose the exceptional curves are $G_{\betata}, G_{\betata_{1}}, \dots ,G_{\betata_{k-1}}$ and such that $G_{\betata_{l}}$ cuts $G_{\betata}, G_{\betata_{l-1}}$ and $G_{\betata_{l+1}}$. If $l=2$ or $l=k-2$ then again this will be a system of type $D_{r}$. Assume that otherwise happens. Since $ -a_{\betata}+1\geq 0$ so we get a system as follows \[2a_{\betata_{1}}-a_{\betata_{2}}-1\leq 0\] \[2a_{\betata_{2}}-a_{\betata_{3}}-a_{\betata_{1}}\leq 0\] \[\vdots\] \[2a_{\betata_{k-1}}-a_{\betata_{k-2}}-1\leq 0\] This is a system of type $A_{k-1}$ so either we have $a_{\betata_{1}}\geq a_{\betata_{2}}$ or $a_{\betata_{k-1}}\geq a_{\betata_{k-2}}$. Assume that the first one holds (the other case is similar). Now note that $N_{\ddot{W}}(G_{\betata_{1}})\geq 2a_{\betata_{1}}-a_{\betata_{2}}-1=a_{\betata_{1}}-a_{\betata_{2}}+a_{\betata_{1}}-1\geq \deltalta-1$. By ignoring $G_{\betata_{1}}$ we get a system for a graph with a smaller number of elements: \[2a_{\betata_{2}}-a_{\betata_{3}}-1\leq 2a_{\betata_{2}}-a_{\betata_{3}}-a_{\betata_{1}} \leq 0\] \[\vdots\] \[2a_{\betata_{l}}-a_{\betata_{l-1}}-a_{\betata_{l+1}}-a_{\betata}+1\leq 0\] \[\vdots\] \[2a_{\betata_{k-1}}-a_{\betata_{k-2}}-1\leq 0\] and the lemma is proved by induction. $\Box$ \end{proof} \betagin{lem}\label{3-B} Suppose $\xi \in \ddot{W}/\overline{W}$ ($\ddot{W}$ is a blow up model), ~$\tilde{W}$ the blow up of $ \ddot{W}$ at $\xi$ and $G_{\alphapha}$ the exceptional divisor of the blow up. Then we have the followings: If $G_{\alphapha}$ is the double blow up of $G_{\betata}$ and $G_{\gammamma}$ (i.e. $\xi \in G_{\betata}\cap G_{\gammamma}$) then: \betagin{description} \item[$\diamond$] $N_{\tilde{W}}(G_{\alphapha})=a_{\alphapha}-a_{\betata}-a_{\gammamma}$ where $a_{\alphapha}$ is the log discrepancy of $G_{\alphapha}$ for $K_{X}$ and similarly $a_{\betata}$ and $a_{\gammamma}$. \item[$\diamond$] $N_{\tilde{W}}(G_{\betata})=N_{\ddot{W}}(G_{\betata})-N_{\tilde{W}}(G_{\alphapha})$ and $N_{\tilde{W}}(G_{\gammamma})=N_{\ddot{W}}(G_{\gammamma})-N_{\tilde{W}}(G_{\alphapha})$. \item[$\diamond$] $N_{\tilde{W}}=N_{\ddot{W}}-N_{\tilde{W}}(G_{\alphapha})$. \end{description} And if $G_{\alphapha}$ is the single blow up of $G_{\betata}$ then \betagin{description} \item[$\diamond$] $N_{\tilde{W}}(G_{\betata})=N_{\ddot{W}}(G_{\betata})-N_{\tilde{W}}(G_{\alphapha})$, $N_{\tilde{W}}(G_{\alphapha})=a_{\alphapha}-a_{\betata}-1\leq -\deltalta$ and $N_{\ddot{W}}(G_{\betata})+\deltalta\leq 0$. \item[$\diamond$] $N_{\tilde{W}}=N_{\ddot{W}}$. \end{description} \end{lem} \betagin{proof} Standard computations and left to the reader. $\Box$ \end{proof} \betagin{cor}\label{3-C} If $G_{\alphapha}$ is a single blow up of $G_{\betata}$ on $\ddot{W}$, a blow up model of $\overline{W}$, and $N_{\ddot{W}}(G_{\betata})\geq \deltalta -1$ then $a_{\alphapha}\geq a_{\betata}+\deltalta$. \end{cor} \betagin{proof} Since $G_{\alphapha}$ is a single blow up of $G_{\betata}$ then $1+a_{\betata}-a_{\alphapha}+N_{\ddot{W}}(G_{\betata})\leq 0$ and so $1+a_{\betata}-a_{\alphapha}+\deltalta -1 \leq 0$ then $a_{\betata}+\deltalta \leq a_{\alphapha}$. \end{proof} \betagin{defn} Let $\xi$ be a point on a blow up model $\ddot{W}$. Define the \\ $multiplicity~ of~ double~ blow~ ups$ as \[\mu_{db}(\xi)=max \{\#\{~ double~ blow~ ups~/\xi~ before~ having~ a~single~ blow ~up~/\xi\}\}\] the maximum is taken over all sequences of blow ups from $\ddot{W}$ to $W$. The next lemma shows the boundedness of this number. \end{defn} \betagin{lem}\label{3-E} $\mu_{db}(\xi)$ is bounded. \end{lem} \betagin{proof} Since by lemma \ref{3-B} each double blow up adds a non-negative number to the total negativity of the system and since the total negativity is {bounded\footnote{because the total negativity on $\overline{W}$ is bounded. This boundedness for the $A_{r}$ and $D_{r}$ cases is shown in lemma \ref{3-A} and for other cases it is obvious}} then except a bounded number of double blow ups we have $\frac{-\deltalta}{2}\leq N_{\tilde{W}}(G_{\alphapha})=a_{\alphapha}-a_{\betata}-a_{\gammamma}\leq 0$ where $G_{\alphapha}$ is the double blow up of some $G_{\betata}$ and $G_{\gammamma}$ and $G_{\betata}\cap G_{\gammamma}/\xi$. The inequality shows that $a_{\betata}+\frac{\deltalta}{2}\leq a_{\betata}+a_{\gammamma}-\frac{\deltalta}{2}\leq a_{\alphapha}$ and similarly $a_{\gammamma}+\frac{\deltalta}{2}\leq a_{\alphapha}$. In other words the log discrepancy is increasing at least by $\frac{\deltalta}{2}$. Since log discrepancies are in $[\deltalta,1]$ then the number of these double blow ups has to be bounded. $\Box$ \end{proof} \betagin{defn} Let $\xi \in \ddot{W}$ a blow up model. Define the $single~ blow~ up \\ ~multiciplity$ ~of ~$\xi$ as: \[\mu_{sb}(\xi)=max\{\#\{G: G ~is ~single~ blown~ up ~and ~ G/\xi\}\}\] The maximum is taken over all sequences of blow ups from $\ddot{W}$ to $W$. In the above definition $G$ is the excpetional divisor of a sinlge blow up/$\xi$. Also define $ \mu_{sb}(G_{\betata})=\sum_{\xi\in G_{\betata}} \mu_{sb}(\xi)$ and $ \mu_{sb}(\ddot{W})=\sum_{\xi\in \ddot{W}} \mu_{sb}(\xi)$ . \end{defn} So if $\xi_{2}/\xi_{1}$ (these points may be on different models) then $\mu_{sb}(\xi_{1})\geq \mu_{sb}(\xi_{2})$. \betagin{rem} Usually there is not a unique sequence of blow ups from $\ddot{W}$ to $W$. In order if $\xi_{1}\neq \xi_{2}$ are points on $\ddot{W}$ and they are centres of some exceptional divisors on $W$ then it doesn't matter which one we first blow up to get to $W$ i.e. they are independent. \end{rem} \betagin{defn} Let $\xi \in \overline{exc}(\ddot{W}/Z)$ be a point on a blow up model $\ddot{W}$. We call such a point a $base~ point$ if there is an exceptional divisor $G_{\alphapha}/\xi$ on a blow up model $\tilde{W}$ such that $N_{\tilde{W}}(G_{\alphapha})<\deltalta -1$. \end{defn} \betagin{rem}\label{3-K} By lemma \ref{3-A} and lemma \ref{3-B} if $\xi \in \overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ and $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ is of type $A_{r}$ (non-strictly monotonic) , $D_{r}$, $E_{6}$, $E_{7}$ or $E_{8}$ then $\xi$ can not be a base point. Moreover again by lemma \ref{3-A} if $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ is strictly monotonic then there can be at most one base point in $\overline{exc}(\ddot{W}/Z)_{G_{\betata}}$ and it only can belong to the base curve. \end{rem} \betagin{lem}\label{3-M} $\mu_{sb}(\xi)$ is bounded if $\xi\in \ddot{W}$ is not a base point. \end{lem} \betagin{proof} If $ G_{\alphapha}/\xi$ is a single blown up exceptional divisor then since $\xi$ is not a base point we get $\deltalta-1\leq N_{\tilde{W}}(G_{\alphapha})$. So if $G_{\alphapha})$ is a single blow up/$\xi$ of $G_{\betata})$ then $a_{\alphapha}\geq a_{\betata}+\deltalta$ i.e. it increases the log discrepancy at least by $\deltalta$. And as we said in the proof of lemma \ref{3-E}, except a bounded number, any other double blow up/$\xi$ increases the log discrepancy at least by $\frac{\deltalta}{2}$. In order, there can be only a bounded number of blow ups/$\xi$ from $\ddot{W}$ to $W$. $\Box$ \end{proof} \betagin{cor}\label{3-L} The number of exceptional curves/$\xi$ on $W$ is bounded for any non-base point $\xi\in \ddot{W}$. \end{cor} We now continue to the proof of theorem \ref{3-I}. If no divisor in $\omegaega_{Sh}$ has coefficient 1 then this is what we are looking for. Since in this case $K_{W}+\omegaega_{Sh}$ will be a $1/6$-lc log divisor. If the opposite happens i.e. some divisor appear with coefficient 1 in $\omegaega_{Sh}$ then these divisors will form a connected chain $\mathcal{C}$ which doesn't intersect any other exceptional divisor/$Z$ with positive coefficient in $\omegaega_{Sh}$, except in the edges of this chain. Some of the exceptional divisors of type $F$ may appear with positive coefficients and some with zero coefficients in $\omegaega_{Sh}$. The image of the graph $\mathcal{G}$ on $W_{\mathcal{G}}$, that is $\mathcal{G}_{1}$ (see definition \ref{3-F}), is either of type $A_{r}$, $D_{r}$, $E_{6}$, $E_{7}$ or $E_{8}$ because similar to what we proved above for $\overline{W}$ the model $W_{\mathcal{G}}$ is the minimal resolution of some surface i.e. the minimal resolution of the surface $X_{\mathcal{G}}$ obtained from $X$ by contracting the exceptional/$Z$ curves on $X$ whose birational trasnform belong to $\mathcal{G}$. In order there is no $-1$-curve/$X_{\mathcal{G}}$ on $W_{\mathcal{G}}$. Now suppose $\mathcal{G}_{1}$ is of type $A_{r}$, not strictly monotonic and let the pushdown of the chain $\mathcal{C}$ be $\mathcal{C}_{1}$ on $W_{\mathcal{G}}$ . Let the exceptional divisors of $\mathcal{G}_{1}$ be $G_{\betata_{1}}, \dots , G_{\betata_{r}}$ and assume that the chain $\mathcal{C}_{1}$ consists of $G_{\betata_{k}}, \dots , G_{\betata_{l}}$. Hence $N^{Sh}_{ W_{\mathcal{G}}}(G_{\betata_{k}})\leq -\frac{1}{6}$, $N^{Sh}_{ W_{\mathcal{G}}}(G_{\betata_{k+1}})= \dots = N^{Sh}_{ W_{\mathcal{G}}}(G_{\betata_{l-1}})=0$ and $N^{Sh}_{ W_{\mathcal{G}}}(G_{\betata_{l}})\leq -\frac{1}{6}$ . Here the superscript $Sh$ means that we compute the negativity according to the Shokurov log divisor not the primary log divisor (we have already defined it above). Note that if $a^{Sh}_{\betata}>0$ for some $\betata$ then $a^{Sh}_{\betata}\geq \frac{1}{6}$ because the denominator of $a^{Sh}_{\betata}$ is in $\{1,2,3,4,6\}$. The chain $\mathcal{C}_{1}$ is of type $A_{l-k+1}$. From the constructions in the local isomorphism subsection we can replace the Shokurov log numbers $a^{Sh}_{\betata_{k}}=0,\dots , a^{Sh}_{\betata_{l}}=0$ with new log numbers with bounded denominators and preserve all other Shokurov log numbers in the graph $exc(W_{\mathcal{G}}/Z)$ such that we obtain a new log divisor $K_{W_{\mathcal{G}}}+\Omega_{1}$ on $W_{\mathcal{G}}$ which is antinef/$Z$ and is a Klt log divisor. Now put $K_{W}+\Omega=^*(K_{W_{\mathcal{G}}}+\Omega_{1})$. The only problem with $\Omega$ is that it may have negative coefficients (it is a subboundary). Remark \ref{3-K} and corollary \ref{3-L} assure us that the negativity of these coefficients is bounded from below. Moreover if an exceptional divisor has negative coefficient in $\Omega$ then it should belong to the graph $\mathcal{G}$. But any exceptional divisor in $\mathcal{G}$ appears with positive coefficient in $\omegaega_{Sh}$. Since $\omegaega_{Sh}\geq \omegaega$ and also by the definition of $\mathcal{G}$, any exceptional divisor of type $F$ in $\mathcal{G}$ has positive coefficient at least $\frac{1}{6}$. And if $E$ is not of type $F$ but belongs to $\mathcal{G}$ then since $B^+$ is not zero $P\in Z$ we get positive coefficients in $\omegaega_{Sh}$ for all exceptional/$Z$ curves which are not of type $F$. Thus all members of $\mathcal{G}=Exc(W/Q)$ appear with positive coefficient in $\omegaega_{Sh}$. Now consider the sum \[K_{W}+\Omega+I[K_{W}+\omegaega_{Sh}]=(1+I)K_{W}+[\Omega+I\omegaega_{Sh}]\] where $I$ is an integer. Mentioning the fact that the negative coefficients appeared in $\Omega$ are bounded from below, this implies that there is a large bounded $I$ such that the sum $\Omega+I\omegaega_{Sh}$ is an effective divisor. So by construction the log divisor $K_{W}+\frac{[\Omega+I\omegaega_{Sh}]}{1+I}$ is $\varepsilonsilon$-lc and antinef/$Z$ for some fixed rational number $0<\varepsilonsilon$ and the denominators of the coefficients in the log divisor are bounded. Now assume that $\mathcal{G}_{1}$ is strictly monotonic and the base curve is $G_{\betata_{1}}$. By corollary \ref{3-L} and remark \ref{3-K} the only place where we may have difficulties is the base point, $\xi$, on the base curve if there is any such point. Now we blow $\xi$ up and get the exceptional divisor $G_{\alphapha_{1}}$. The chain $G_{\alphapha_{1}}, G_{\betata_{1}}, \dots , G_{\betata_{r}}$ is not exactly of type $A_{r+1}$ because $G_{\alphapha_{1}}$ is a $-1$-curve. But still we can claim that there is at most a base on this chain and it only can be on $G_{\alphapha_{1}}$. Obviously a base point cannot be on $G_{\betata_{2}}, \dots , G_{\betata_{r}}$. Now suppose that the intersection point of $G_{\alphapha_{1}}$ and $G_{\betata_{r}}$ is a base point. Then the sum of negativities of all $G_{\alphapha_{1}}, G_{\betata_{1}}, \dots , G_{\betata_{r}}$ should be less than $2\deltalta-2$. This is impossible because the sum of negativities of all $G_{\betata_{1}}, \dots , G_{\betata_{r}}$ on $W_{\mathcal{G}}$ is at least $2\deltalta-2$ (remember that blowing up reduces negativity). Now if on $G_{\alphapha_{1}}$ there is a base point $\xi_{1}$ then again we blow this point up to get $G_{\alphapha_{2}}$ and so on. This process has to stop after finitely many steps (not after bounded steps!). Let the final model to be $W_{\xi}$ and $G_{\alphapha_{1}}, \dots , G_{\alphapha_{s}}$ the new exceptional divisors. In order we have constructed a chain (because on each curve there was at most one base point) and by adding the new exceptional divisors to $\mathcal{G}_{1}$ we get a new graph $\mathcal{G}_{2}$. Now there is no base point on $\mathcal{G}_{2}$. All the divisors $G_{\alphapha_{i}}$ have self-intersection equal to $-2$ except $G_{\alphapha_{s}}$ which is a $-1$-curve. Now let $\mathcal{C}_{2}$ to be the pushdown of $\mathcal{C}$ i.e. the connected chain of curves with coefficient one in $\omegaega_{Sh}$ on $W_{xi}$. If $G_{\alphapha_{s}}$ is not in $\mathcal{C}_{2}$ then we proceed exactly as in the non-monotonic case above; that is we assign appropriate coefficients to the members of $\mathcal{C}_{2}$ and keep all other coefficients in $\omegaega_{Sh}$ on $W_{xi}$. If $G_{\alphapha_{s}}$ is in $\mathcal{C}_{2}$ then let $\mathcal{C}'$ be the chain $\mathcal{C}_{2}$ except the member $G_{\alphapha_{s}}$. This new chain (i.e. $\mathcal{C}'$) if of type $A_x$ and so we can assign appropriate coefficients to its members and put the coefficient of $G_{\alphapha_{s}}$ simply equal to zero and keep all other coefficients in $\omegaega_{Sh}$ on $W_{xi}$. In any case we construct a Klt log divisor $K+\Omega$ on $W_{xi}$ which is antinef/$Z$ and the boundary coefficients are with bounded denominators. The rest is as in the non-monotonic case above. Suppose the graph $\mathcal{G}_{1}$ is of type $D_{r}$ and $\mathcal{C}{1}\neq \emptyset$ (if it is empty then we already have $\Omega_{1}$). Assume that the members of $\mathcal{G}_{1}$ are $G_{\betata}, G_{\betata'}, G_{\betata_{1}}, \dots , G_{\betata_{r}}$ and the members of $\mathcal{C}_{1}$ are $G_{\betata_{k}}, \dots , G_{\betata_{l}}$. As in the proof of lemma \ref{3-A} for the $D_{r}$ case we have $a^{Sh}_{\betata_{1}}\leq a^{Sh}_{\betata_{2}}\leq \dots$ . So $k=1$ and we have $2a^{Sh}_{\betata}-0-1\leq 0$ and so $a^{Sh}_{\betata}\leq \frac{1}{2}$ and similarly $a^{Sh}_{\betata'}\leq \frac{1}{2}$. The chain $\mathcal{C}_{1}$ is of type $A_l$ and so we can change the coefficients of its members in $\omegaega_{Sh}$ on $W_{\mathcal{G}}$. The rest of the argument is very similar to the above cases. Just note that there is no base point in this case. The cases $E_{6}$, $E_{7}$ and $E_{8}$ are safe by remark \ref{3-K} and corollary \ref{3-L}. In these cases the graph $\mathcal{G}$ is bounded so assigning the primary log numbers to the members of $\mathcal{G}_{1}$ and Shokurov log numbers to the rest of the graph $exc(W_{\mathcal{G}}/Z)$ gives a log divisor which can be used as $K_{W_{\mathcal{G}}}+\Omega_{1}$. Here the proof of theorem \ref{3-I} is finished. $\Box$ \end{proof} \subsection{Global case} The main theorem of this subsection is the following theorem. A generalised version of this and the BAB follow as corollaries. \betagin{thm}\label{weak-2dim} Conjecture $WC_{\deltalta, 2,\{0\}}$ holds in the global case i.e. when $Z$ is a point. \end{thm} \betagin{proof} We divide the problem into two main cases: exceptional and non-exceptional. $(X,0)$ is non-exceptional if there is a non-Klt $\mathbb{Q}$-complement $K_X+M$. By [Sh2, 2.3.1], under our assumptions on $X$, non-exceptionallity is equivalent to the fact that $K_X$ has a non-Klt $(0,n)$-complement for some $n<58$. We prove that the exceptional cases are bounded. But in the non-exceptional case we only prove the existence of an $(\varepsilonsilon,n)$-complement for a bounded $n$. Later we show that this in order implies the boundedness of $X$. First assume that $(X,0)$ is {\textbf{non-exceptional}}. \betagin{enumerate} \item Lets show the set of accumulation points of the mlds in dim 2 for lc pairs $(T,B)$ where $B\in \Phi_{sm}$, by $Accum_{2,\Phi_{sm}}$. Then $Accum_{2,\Phi_{sm}}\cap [0,1]=\{1-z\}_{z\in \Phi_{sm}}=\{\frac{1}{k}\}_{k\in\mathbb{N}}\cup \{0\}$ [Sh8]. Now if there is a $\tau>0$ such that $\mld(P,T,B)\notin [\frac{1}{k},\frac{1}{k}+\tau]$ for any natural number $k$ and any point $P\in T$ then there will be only a finite number of possibilities for the index of $K_{T}+B$ at $P$ if $(T,B)$ is $\frac{1}{m}$-lc for some $m\in \mathbb{N}$. Now Borisov-Mckernan [Mc, 1.2] implies the boundedness of all such $T$ if $-(K_T+B)$ is nef and big and $\tau$ and $m$ are fixed. In order in the following steps we try to reduce our problem to this situation in some cases. \item \betagin{defn}\label{D-tau} Let $B=\sum b_{i}B_{i}$ be a boundary on a variety $T$ and $\tau>0$ a real number. Define \[D_{\tau}:=\sum_{b_{i}\notin [\frac{k-1}{k}-\tau, \frac{k-1}{k}]}b_{i}B_{i}+\sum_{b_{i}\in [\frac{k-1}{k}-\tau, \frac{k-1}{k}]} \frac{k-1}{k}B_{i}\] where in the first term $b_{i}\notin [\frac{k-1}{k}-\tau, \frac{k-1}{k}]$ for any natural number $k$ but in the second term $k$ is the smallest natural number satisfying $b_{i}\in [\frac{k-1}{k}-\tau, \frac{k-1}{k}]$. \end{defn} \betagin{lem}\label{transform} For any natural number $m$ there is a real number $\tau>0$ such that if $(T,B)$ is a surface log pair, $P\in T$, $K_T+B$ is $\frac{1}{m}$-lc at $P$ and $D_{\tau}\in\Phi_{sm}$ then $K_T+D_{\tau}$ is also $\frac{1}{m}$-lc at $P$. \end{lem} Note that $\tau$ depends only on $m$. \betagin{proof} By applying the ACC to all surface pairs with standard boundary, we get a fixed rational number $v>0$ such that if any $K_T+D_{\tau}$ is not $\frac{1}{m}$-lc at $P$ then $\mld(P,T,D_{\tau})<\frac{1}{m}-v$. Now assume that the lemma is not true. So there is a sequence $\tau_{1}>\tau_{2}>\dots$ and a sequence of pairs $\{(T_i,B_i)\}$ where if we take $\tau_i$ for the pair $(T_i,B_i)$ then the lemma doesn't hold at $P_i\in T_i$. In other words $\mld(P_i,T_i,D_{\tau_i})<\frac{1}{m}-v$. Write $B_i:=F_i+C_i$ where $F_i=\sum f_{i,x}F_{i,x}$ and $C_i=\sum c_{i,y}C_{i,y}$ have no common components and the coefficient of any component of $C_i$ is equal to the coefficient of the same component in $D_{\tau_i}$ but the coefficient of any component of $F_i$ is less than the coefficient of the same component in $D_{\tau_i}$. Now there is a set $\{s_{1,x}\}\subseteq [\frac{m-1}{m}-\tau_{1}, \frac{m-1}{m}]$ of rational numbers such that $\mld(P_1,T_1, \sum s_{1,x}F_{1,x}+C_1)=\frac{1}{m}-v$. There is $i_2$ such that $\max \{s_{1,x}\}<\frac{m-1}{m}-\tau_{i_2}$. So there is also a set $\{s_{2,x}\}\subseteq [\frac{m-1}{m}-\tau_{i_2}, \frac{m-1}{m}]$ such that $\mld(P_{i_2},T_{i_2}, \sum s_{2,x}F_{i_2,x}+C_{i_2})=\frac{1}{m}-\frac{v}{2}$. By continuing this process we find $\{s_{j,x}\}\subseteq [\frac{m-1}{m}-\tau_{i_j}, \frac{m-1}{m}]$ such that $\max \{s_{i_{j-1},x}\}<\frac{m-1}{m}-\tau_{i_j}$. Hence we can find a set $\{s_{j,x}\}\subseteq [\frac{m-1}{m}-\tau_{i_j}, \frac{m-1}{m}]$ such that $\mld(P_{i_j},T_{i_j}, \sum s_{j,x}F_{i_j,x}+C_{i_j})=\frac{1}{m}-\frac{v}{j}$. In order we have constructed a set $\cup \{s_{j,x}\}$ of rational numbers which satisfies the DCC condition but there is an increasing set of mlds corresponding to boundaries with coefficients in $\cup \{s_{j,x}\}$. This is a contradiction with the ACC for mlds. $\Box$ \end{proof} \item Let $m$ be the smallest number such that $\frac{1}{m}\leq \deltalta$. Let $h=\min\{\frac{k-1}{k}-\frac{u}{r!}>0\}_{1\leq k\leq m} $ where $u,k$ are natural numbers and $r=\max\{m,57\}$. Now choose a $\tau$ for $m$ as in lemma \ref{transform} such that $\tau <h$. Blow up one exceptional divisor $E$ via $f:Y\longrightarrow X$ such that the log discrepancy satisfies $\frac{1}{k}\leq a(E,X,0)\leq \frac{1}{k}+\tau$ for some $k>1$ (if such $E$ doesn't exist then go to step 1). The crepant log divisor $K_Y+B_Y$ is $\frac{1}{m}$-lc and so by lemma \ref{transform} $K_Y+D_{\tau}$ is also $\frac{1}{m}$-lc ($D_{\tau}$ is constructed for $B_Y$). Let $K_X+B^+$ be a $(0,n)$-complement for some $n<58$ and $K_Y+B_{Y}^+$ be the crepant blow up. Then by the way we chose $\tau$ we have $D_{\tau}\leq B^+$. Now run the anti-LMMP over $K_Y+D_{\tau}$ i.e. contract any birational type extremal ray $R$ such that $(K_Y+D_{\tau}).R>0$. At the end of this process we get a model $X_1$ and the corresponding map $g:Y\longrightarrow X_1$. After contracting those birational extremal rays where $K_{X_1}+D_{\tau}$ is numerically zero we get a model $S_1$ with one of the following properties: \betagin{description} \item[$\diamond$] $\rho(S_1)=1$ and $K_{S_1}+D_{\tau}\equiv K_{S_1}+B_{S_1}^+\equiv 0$ and $\frac{1}{m}$-lc. \item[$\diamond$] $\rho(S_1)=2$ and $(K_{S_1}+D_{\tau}).R= 0$ for a non-birational type extremal ray $R$ on $S_1$ and $K_{S_1}+D_{\tau}$ is $\frac{1}{m}$-lc. \item[$\diamond$] $-(K_{S_1}+D_{\tau})$ is nef and big and $K_{S_1}+D_{\tau}$ is $\frac{1}{m}$-lc. \end{description} where $K_{S_1}+D_{\tau}$ is the birational transform of $K_{Y}+D_{\tau}$. In any case $-(K_{S_1}+D_{\tau})$ is nef because $D_{\tau}\leq B_{S_1}^+$ and so $D_{\tau}$ can not be positive on a non-birational extremal ray. $K_{S_1}+D_{\tau}$ is $\frac{1}{m}$-lc by the way we have chosen $\tau$. \item If the first case occurs in the division in step 3 then we are done. \item If the second case occurs in the division in step 3 then $R$ defines a fiberation $\phi: S_1\longrightarrow Z$. Note that $B_{S_1}^+=D_{\tau}+N$ where each component of $N$ is a fibre of $\phi$ and there are only a finite number of possibilities for the coefficients of $N$. Now we can replace $N$ by $N'\equiv N$ where each component of $N'$ is a general fibre of $\phi$, with only a finite number of possibilities for the coefficients of $N'$ and such that $K_{S_1}+D_{\tau}+N'$ is $\frac{1}{m}$-lc. Note that the components of $N'$ are smooth curves and intersect the components of $D_{\tau}$ transversally in smooth points of $S_1$. Now the only problem is that we don't know if the index of $K_{S_1}+D_{\tau}+N'$ is bounded or not. Note that it is enough if we can get the boundedness of the index of $K_{S_1}+D_{\tau}$. \item Now assume that the third case or the second case occurs in the division in step 3. Let $C$ be a curve contracted by $g:Y\longrightarrow X_1$ constructed in step 3. If $C$ is not a component of $B_Y$ then the log discrepancy of $C$ with respect to $K_{X_1}+B_{X_1}$ is at least 1 where $K_{X_1}+B_{X_1}$ is the birational transform of $K_{Y}+B_{Y}$. Moreover $g(C)\in \Supp B_{X_1}\neq \emptyset$. So the log discrepancy of $C$ with respect to $K_{X_1}$ is more than 1. This means that $C$ is not a divisor on a minimal resolution $W_1\longrightarrow X_1$. Let $W\longrightarrow X$ be a minimal resolution. Then there is a morphism $W\longrightarrow W_1$. Hence $exc(W_1/X_1)\subseteq exc(W/X)$. Now if $C\in exc(W/X)$ is exceptional/$X_1$ then $a(C,X_1,D_{\tau})<a(C,X,0)$. \item Let $(X_1,B_1):=(X_1,D_{\tau})$ and repeat the process. In other words again we blow up one exceptional divisor $E$ via $f_1:Y_1\longrightarrow X_1$ such that the log discrepancy satisfies $\frac{1}{k}\leq a(E,X_1,B_1)\leq \frac{1}{k}+\tau$ for some natural number $k>1$. The crepant log divisor $K_{Y_1}+B_{1,Y_1}$ is $\frac{1}{m}$-lc and so by lemma \ref{transform} $K_{Y_1}+D_{1,\tau}$ is $\frac{1}{m}$-lc. Note that the point which is blown up on $X_1$ can not be smooth since $\tau <h$ as defined in step 3. So according to step 6 the blown up divisor $E$ is a member of $exc(W/X)$. Now we again run the anti-LMMP on $K_{Y_1}+D_{1,\tau}$ and proceed as in step 3. $$ \xymatrix{ W \ar[d]\ar[r] & W_1 \ar[d]\ar[r] & W_2 \ar[d]\ar[r] & \dots \\ Y \ar[d]^{f}\ar[rd]^{g} & Y_1\ar[d]^{f_1}\ar[rd]^{g_1} & Y_2 \ar[d]\ar[rd] &\dots\\ X & X_1\ar[d] & X_2\ar[d] & \dots \\ & S_1 & S_2 & \dots \\ }$$ \item Steps 6,7 show that each time we blow up a member of $exc(W/X)$ say $E$. And if we blow that divisor down in some step then the log discrepancy $a(E,X_j,B_j)$ will decrease. That divisor will not be blown up again unless the log discrepancy drops at least by $\frac{1}{2(m-1)}-\frac{1}{2m}$. So after finitely many steps either the case one occurs in the division in step 3 or we get a model $X_i$ with a standard boundary $B_i$ such that there is no $E$ where $\frac{1}{k}\leq a(E,X_i,B_i)\leq \frac{1}{k}+\tau$ for any $1<k\leq m$. The later implies the boundedness of the index of $K_{X_i}+B_i=K_{X_i}+D_{i-1,\tau}$. If $-(K_{X_i}+B_{i})$ is nef and big (case one) then $(X_i,B_i)$ will be bounded by step 1. Otherwise we have the second case in the division above and so by step 5 we are done (the index of $K_{X_i}+D_{i-1,\tau}+N'$ is bounded). Now we treat the {\textbf{exceptional}} case: From now on we assume that $(X,0)$ is exceptional. \item Let $W\longrightarrow X$ be a minimal resolution. Let $0<\tau<\frac{1}{2}$ be a number and the minimal log discrepancy of $(X,0)$ be $a=\mld(X,0)$. If $a\geq \frac{1}{2}+\tau$ then we know that $X$ belongs to a bounded family according to step 1 above. So we assume $a<\frac{1}{2}+\tau$ and then blow up an exceptional/$X$ curve $E_1$ with log discrepancy $a_{E_1}=a(E_1,X,0)\leq \frac{1}{2}+\tau$ to get $Y\longrightarrow X$ and put $K_{Y}+B_{Y}={^*K_{X}}$. Let $t\geq 0$ be a number such that there is an extremal ray $R$ such that $(K_{Y}+B_{Y}+tE_1).R=0$ and $E_1.R>0$ ( and s.t. $K_{Y}+B_{Y}+tE_1$ Klt and antinef). Such $R$ exists otherwise there is a $t>0$ such that $K_{Y}+B_{Y}+tE_1$ is lc (and not Klt) and antinef. This is a contradiction by [Sh2, 2.3.1]. Now contract $R: Y\longrightarrow Y_1$ if it is of birational type. Again by increasing $t$ there will be an extremal ray $R_1$ on $Y_1$ such that $(K_{Y_1}+B_{Y_1}+tE_1).R_1=0$ and $E_1.R_1>0$ (preserving the nefness of $-(K_{Y_1}+B_{Y_1}+tE_1)$ ). If it is of birational then contract it and so on. After finitely many steps we get a model $(V_1, B_{V_1}+t_1E_1)$ and a number $t_1>0$ with the following possible outcomes: \betagin{equation}\label{3-5-J} \end{equation} \betagin{description} \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt, $\rho(V_1)=1$ and $K_{V_1}+B_{V_1}+t_1 E_1$ is antinef. \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt and $\rho(V_1)=2$ and there is a non-birational extremal ray $R$ on $V_1$. Moreover $K_{V_1}+B_{V_1}+t_1 E_1$ and $K_{V_1}$ are antinef. \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt and $\rho(V_1)=2$ and there is a non-birational extremal ray $R$ on $V_1$. Moreover $K_{V_1}+B_{V_1}+t_1 E_1$ is antinef but $K_{V_1}$ is not antinef. \end{description} Define $K_{V_1}+D_1=K_{V_1}+B_{V_1}+t_1 E_1$. Note that in all the cases above $E_1$ is a divisor on $V_1$ and the coefficients of $B_{V_1}$ and $D_1$ are $\geq \frac{1}{2}-\tau$. \betagin{lem}\label{bound-index} Let $P\in U$ be a $\deltalta$-lc surface singularity. Moreover suppose that there is at most one exceptional/$U$ divisor such that $a(E, U,0)<\frac{1}{2}+\tau$. Then the index $K_U$ is bounded at $P$ where the bound only depends on $\deltalta$ and $\tau$. \end{lem} \betagin{proof} We only need to prove this when the singularity is of type $A_r$ (otherwise the index is bounded). If there is no $E/P$ such that $a(E,U,0))<\frac{1}{2}+\frac{\tau}{2}$ then step 1 shows that the index is bounded. But if there is one $E/P$ such that $a(E,U,0)<\frac{1}{2}+\frac{\tau}{2}$ then using the notation as in \ref{A_r} we have $a_{i+1}-a_i\geq \frac{\tau}{2}$ and $a_{i-1}-a_i\geq \frac{\tau}{2}$. This implies the boundedness of $r$ and so the index at $P$. $\Box$ \end{proof} \item Let $U$ be a surface with the following properties: \betagin{description} \item[$\diamond$] $\rho(U)=1$. \item[$\diamond$] $K_U+G_U$ antinef, Klt and exceptional. \item[$\diamond$] $K_U$ antiample. \end{description} Now blow up two divisors $E$ and $E'$ as $f: Y_U\longrightarrow U$ such that $a(E,U,0)<\frac{1}{2}+\tau$ and $a(E',U,0)<\frac{1}{2}+\tau$ (suppose there are such divisors). Choose $t,t'\geq 0$ such that $(f^*(K_{U}+G_U)+tE+t'E').R=0$ for an extremal ray $R$ s.t. $R.E\geq 0$ and $R.E'\geq 0$ and $f^*(K_{U}+G_U)+tE+t'E'$ is antinef and Klt. We contract $R$ to get $g:Y_U\longrightarrow U'$. We call such operation a {\textbf{hat of first type}}. Note that $E$ and $E'$ are divisors on $U'$ and $\rho(U')=2$. Define $K_{U'}+G_{U'}$ to be the pushdown of $f^*(K_{U}+G_U)+tE+t'E'$. If $K_U$ is $\deltalta$-lc and such $E,E'$ don't exist as above then the index of $K_U$ will be bounded by lemma \ref{bound-index}. So $U$ will be bounded. \item Let $U$ be a surface with the following properties: \betagin{description} \item[$\diamond$] $\rho(U)=2$. \item[$\diamond$] $K_U+G_U$ antinef, Klt and exceptional. \item[$\diamond$] $-K_U$ nef and big. \end{description} Now blow up a divisor $E$ to get $f:Y_U\longrightarrow U$ such that $a(E,U,0)<\frac{1}{2}+\tau$ (suppose there is such $E$). Let $t\geq 0$ be such that $(f^*(K_{U}+G_U)+tE).R=0$ for an extremal ray $R$ s.t. $R.E\geq 0$ and $f^*(K_{U}+G_U)+tE$ is antinef and Klt. We contract $R$ to get $g:Y_U\longrightarrow U'$. We call such operation a {\textbf{hat of second type}}. Note that $E$ is a divisor on $U'$ and $\rho(U')=2$. Define $K_{U'}+G_{U'}$ to be the pushdown of $f^*(K_{U}+G_U)+tE$. If $K_U$ is $\deltalta$-lc and such $E$ doesn't exist as above then the index of $K_U$ and so $U$ will be bounded by lemma \ref{bound-index}. \item Let $U$ be a surface with the following properties: \betagin{description} \item[$\diamond$] $\rho(U)=2$ and $U$ is Pseudo-WLF. \item[$\diamond$] There is a birational type extremal ray $R_{bir}$ and the other extremal ray of $U$ is of fibration type. \item[$\diamond$] $K_U+G_U$ antinef, Klt and exceptional. \item[$\diamond$] $K_U.R_{bir}>0$. \end{description} Then we say that $U$ is of {\textbf{2-bir}} type. Let $C$ be the divisor that defines $R_{bir}$ on $U$. There is a $c\in (0,1)$ such that $(K_U+cC).C=0$. Now blow up $E$ as $Y_U\longrightarrow U$ such that $a(E,U,cC)<\frac{1}{2}+\tau$ (suppose there is such $E$). Now let $t\geq 0$ such that $f^*(K_{U}+G_U+tC).R=0$ for an extremal ray $R$ s.t. $ R.E\geq 0$, $R.C\geq 0$ and $f^*(K_{U}+G_U+tC)$ is antinef and Klt. We contract $R$ to get $g:Y_U\longrightarrow U'$. We call such operation a {\textbf{hat of third type}}. Define $K_{U'}+G_{U'}$ to be the pushdown of $f^*(K_{U}+G_U+tC)$. Note that in this case $E$ and $C$ are both divisors on $U'$ and $\rho(U')=2$. If $K_U+cC$ is $\deltalta$-lc and such $E$ doesn't exist as above then contract $C: U\longrightarrow U_1$. Thus the index of $K_{U_1}$ will be bounded at each point by lemma \ref{bound-index} and so $U_1$ and consequently $U$ will be bounded. $$ \xymatrix{ Y_U \ar[d]^{f}\ar[rd]^{g} & \\ U & U' \\ }$$ \item Let $U$ be a surface such that $\rho(U)=2$ and $K_U+G_U$ antinef, Klt and exceptional where $G_U\neq 0$. Moreover suppose there are two exceptional curves $H_1$ and $H_2$ on $U$. In this case let $C$ be a component of $G_U$ and let $t\geq 0$ such that $(K_U+G_U+tC).H_i=0$ for $i=1$ or 2 and $K_U+G_U+tC$ Klt and antinef (assume $i=1$). We contract $H_1$ as $U\longrightarrow U_1$ and define $K_{U_1}+G_{U_1}$ to be the pushdown of $K_{U}+G_U+tC$. \betagin{defn} Define $K_U+\Delta_U$ as follows: $K_U+\Delta_U:=K_U$ in step 10 and step 11. $K_U+\Delta_U:=K_U+cC$ in step 12. And $K_{U_1}+\Delta_{U_1}:=K_{U_1}$ in step 13. \end{defn} \item The following lemmas are crucial to our proof. \betagin{lem}\label{bound-1} Let $\mathcal{U}$ be a bounded family of surfaces with Picard number one or two and let $0<x<1$ be a rational number. Moreover assume the following for each member $U_i$: \betagin{description} \item[$\diamond$] $-(K_{U_i}+B_i)$ is nef and big for a boundary $B_i$ where each coefficient of $B_i$ is $\geq x$. \item[$\diamond$] $K_{U_i}+B_i$ is Klt. \end{description} Then $(U_i, \Supp B_i)$ is bounded. \end{lem} \betagin{proof} In order we prove that there is a finite set $\Lambda_{f}$ such that for each $U_i$ there is a boundary $M_i\in \Lambda_f$ s.t. $-(K_{U_i}+M_i)$ is nef and big and $M_i\leq B_i$. If $\rho(U_i)=1$ then simply take $M_i= x\sum_{\alphapha}B_{\alphapha}$ where $B_i=\sum_{\alphapha}B_{\alphapha}$. Obviously $-(K_{U_i}+M_i)$ is nef and big and since $U_i$ belongs to a bounded family so $(U_i, \Supp M_i)$ is also bounded. Now suppose $\rho(U_i)=2$. Put $N_i= x\sum_{\alphapha}B_{\alphapha}$. If $-(K_{U_i}+N_i)$ is not nef then there should be an exceptional curve $E$ on $U_i$ where $(K_{U_i}+N_i).E>0$. Let $\theta: U_i\longrightarrow U'_i$ be the contraction of $E$. By our assumptions $K_{U'_i}+B'_i$, the pushdown of $K_{U_i}+B_i$, is antiample. So $K_{U'_i}+N'_i$, the pushdown of $K_{U_i}+N_i$ is also antiample. Boundedness of $U_i$ implies the boundedness of $U'_i$ (since we have a bound for the Picard number of a minimal resolution of $U'_i$). Thus $-(K_{U_i}+M_i):=-\theta^*(K_{U'_i}+N'_i)=-(K_{U_i}+N_i+yE)$ is nef and big and there are only a finite number of possibilities for $y>0$. This proves the boundedness of $(U_i, \Supp(N_i+yE))$. Note that in the arguments above $\Supp B_i=\Supp M_i$. $\Box$ \end{proof} \betagin{lem}[The main lemma]\label{mainlemma} Suppose that $\mathcal{U}=\{(U,\Supp D)\}$ is a bounded family of log pairs of dim $d$ where $K_U+D$ is antinef and $\varepsilonsilon$-lc for a fixed $\varepsilonsilon>0$. Then the set of partial resolutions of all $(U,D)\in\mathcal{U}$ is a bounded family. \end{lem} Note that here we don't assume $(U,D)$ to be bounded i.e. the coefficients of $D$ may not be in a finite set. \betagin{proof} Let $(U_t,D_t)$ be a member of the family. By our assumptions the number of components of $D_t$ is bounded (independent of $t$) and so we can consider any divisor supported in $D_t$ as a point in a real finite dimensional space. Let $D_t=\sum_{1\leq i \leq q} d_{i,t}D_{i,t}$ and define \[\mathcal{H}_t:=\{(h_1,\dots ,h_q)\in \mathbb{R}^q ~|~K_{U_t}+\sum_{1\leq i \leq q} h_{i}D_{i,t}~is~ antinef ~and ~ \varepsilonsilon-lc \}\] So $\mathcal{H}_t$ is a subset of the cube $[0,1]^q$ and since being $\varepsilonsilon$-lc and antinef are closed conditions then $\mathcal{H}_t$ is a closed and hence compact subset of $[0,1]^q$. In oder $\{(U_t,\mathcal{H}_t)\}$ is a bounded family. For each $H\in \mathcal{H}_t$ the corresponding pair $(U_t, H)$ is $\varepsilonsilon$-lc. Let $Y_H\longrightarrow U_t$ be a terminal blow up of $(U_t, H)$ and assume that the set of exceptional/$U_t$ divisors on $Y_H$ is $R_H$. For different $H$ we may have different $R_H$ but the union of all $R_H$ is a finite set where $H$ runs through $\mathcal{H}_t$. Suppose otherwise so there is a sequence $\{H_1,\dots, H_m,\dots\}\subseteq\mathcal{H}_t $ such that the union of all $R_{H_i}$ is not finite. Since $\mathcal{H}_t$ is compact then there is at least an accumulation point in $\mathcal{H}_t$, say $\bar{H}$, for the sequence (we can assume that this is the only accumulation point). So $(U_t, \bar{H})$ is $\varepsilonsilon$-lc. Let $v=(1,\dots,1)\in \mathbb{R}^q$. Then there are $\alphapha, \betata>0$ such that $K_{U_t}+H_\alphapha$ is $\varepsilonsilon-\betata$-lc where $\varepsilonsilon-\betata >0$ and $H_\alphapha$ is the corresponding divisor of $\bar{H}+\alphapha v$. In particular this implies that there is a (with positive radius) $d$-dim disc $\mathbb{B}\subseteq [0,1]^q$ with $\bar{H}$ as its centre such that $K_{U_t}+H$ is $\varepsilonsilon-\betata$-lc and $R_H\subseteq R_{H_\alphapha}$ for any $H\in \mathbb{B}$. This is a contradiction with the way we chose the sequence $\{H_1,\dots, H_m,\dots\}$. The function $R:\mathcal{H}_t \longrightarrow \mathbb{N}$ gives a finite decomposition of the set $\mathcal{H}_t$. This means that there are only a finite number of partial resolutions for all $(U_t,H)$ where $H\in \mathcal{H}_t$ for a fixed $t$. Using Noetherian induction completes the proof. $\Box$ \end{proof} Now we prove a statement similar to [Sh2, 4.2]. \betagin{lem} Let $\mathcal{U}=\{(U, \Supp D)\}$ be a bounded family where we assume that each $(U, D)$ is Klt and exceptional and $K+D$ is antinef. Then the singularity is bounded i.e. there is a constant $\gammamma >0$ such that each $(U,D)$ is $\gammamma$-lc. \end{lem} \betagin{proof} For $(U, \Supp B_i)$ a member of the family let \[\mathcal{H}_i = \{H =\sum h_{k,i}D_{k,i} |~ K + H ~is~ log~ canonical~ and~ -(K + H)~ is~ nef\}\] where $D_i=\sum d_{k,i}D_{k,i}$. It is a closed subset of a multi-dimensional cube (with bounded dimension) and so it is compact. Let $a_i=\inf\{\mld(U_i,H)~:~H\in \mathcal{H}_i\}>0$. Since the family is bounded then $\{a_i\}$ is bounded from below. $\Box$ \end{proof} Now we return to the division in \ref{3-5-J} and deal with each case as follows: \item (First case in \ref{3-5-J}) Perform a hat of the first type for $U:=V_1$ and $K_U+G_U:=K_{V_1}+D_1$ (so we blow up $E,E'$). Then we get $V_2:=U'$ and $K_{V_2}+D_2:=K_{U'}+G_{U'}$ as defined above and $Y_1:=Y_U$. Now $V_2$ would be as in step 11, 12 or 13 so we can perform the appropriate operation as explained in each case. If $V_2$ is as in step 11 then $a(E,V_2,\Delta_{V_2})=1$ and $a(E',V_2,\Delta_{V_2})=1$. If $V_2$ is as in step 12 then $E$ or $E'$ is not exceptional so we have $a(E,V_2,\Delta_{V_2})=1$ or $a(E',V_2,\Delta_{V_2})=1$. But if $V_2$ is as in step 13 then we get $U_1$ as defined in step 13 and so $a(E,U_1,\Delta_{U_1})=1$ or $a(E',U_1,\Delta_{U_1})=1$. In the later case we define (replace) $(V_2,D_2):=(U_1,G_{U_1})$. So whatever case we have for $V_2$ we have $a(A,V_2,\Delta_{V_2})=1$ at least for one $A\in exc(Y/X)$. \item (Second case in \ref{3-5-J}) Here we perform a hat of second type for $U:=V_1$ and $K_U+G_U:=K_{V_1}+D_1$ to get $V_2:=U'$ and $K_{V_2}+D_2:=K_{U'}+G_{U'}$. If $V_2$ is as in step 11 then $a(E,V_2,\Delta_{V_2})=1$. If $V_2$ is as in step 12 then go to step 17. But if $V_2$ is as in step 13 then we get $U_1$ as defined in step 13 where $K_U+G_U:=K_{V_2}+D_2$ and then continue the process for $U_1$ as in step 15. Here in some cases we may not be able to make the singularities better for $K+\Delta$ immediately on $V_2$ but the algorithm ensures us that we will be able to do that in later steps. \item (Third case in \ref{3-5-J}) In this case $V_1$ is 2-bir. We perform a hat of the third type where $U:=V_1$ and $K_U+G_U:=K_{V_1}+D_1$ so we get $V_2:=U'$ and $Y_1:=Y_U$ and $K_{V_2}+D_2:=K_{U'}+G_{U'}$. If $V_2$ is as in step 11 then $a(E,V_2,\Delta_{V_2})=1$ and $a(C,V_2,\Delta_{V_2})=1$ ($E$ is the blown divisor and $C$ is on $V_1$, as in step 12 for $U:=V_1$). If $V_2$ is as in step 12 then $a(E,V_2,\Delta_{V_2})=1$ or $a(C,V_2,\Delta_{V_2})=1$. Now if $V_2$ is as in step 13 then we get $U_1$ as defined in step 13 and so $a(E,U_1,\Delta_{U_1})=1$ or $a(C,U_1,\Delta_{U_1})=1$. Then we define (replace) $(V_2,D_2):=(U_1,G_{U_1})$. So whatever case we have for $V_2$ we have $a(A,V_2,\Delta_{2})=1$, after the appropriate operations, at least for one $A\in exc(Y/X)$. \item After finitely many steps we get $V_r$ where $W/V_r$ such that $K_W+D:={^*(K_{V_r}+D_r)}$ with effective $D$ where $V_r$ is bounded. Since all the coefficients of $B_{V_r}$ are $\geq \frac{1}{2}-\tau$ ($B_{V_r}$ is the birational transform of $B_W$ where $K_W+B_W={^*K_X}$) then $(V_r, B_{V_r})$ is also bounded by lemma \ref{bound-1}. By construction $\Supp D_r=\Supp B_{V_r}$ and so $(V_r, D_r)$ is bounded. Lemma \ref{mainlemma} implies the boundedness of $W$ and so of $X$. $$ \xymatrix{ W \ar[d]\ar[r] & W \ar[d]\ar[r] & W \ar[d]\ar[r] & \dots\ar[r] & W\ar[d]&\\ Y \ar[d]^{f}\ar[rd]^{g} & Y_1\ar[d]^{f_1}\ar[rd]^{g_1} & Y_2 \ar[d]\ar[rd] &\dots & Y_{r-1}\ar[d]\ar[rd]&\\ X & V_1 & V_2 & \dots & V_{r-1}& V_r\\ }$$ \end{enumerate} $\Box$ \end{proof} \betagin{cor}\label{BAB'} The BAB Conjecture(\ref{BAB}) holds in dim 2. \end{cor} \betagin{proof} {\textbf{ Reduction to the case $B=0$:}} We run the anti-LMMP on the divisor $K_X$; if there is an extremal ray $R$ such that $K_X.R>0$ then contract $R$ to get $X\longrightarrow X_1$. Note that $B.R<0$ so the bigness of $-K_X$ will be preserved (So $R$ has to be of birational type). Repeat the same process for $X_1$ i.e. if there is an extremal ray $R_1$ such that $K_{X_1}.R_1>0$ then contract it and so on. Since in each step we get a Pseudo-WLF then the canonical class can not become nef. Let $\overline{X}$ be the last model in our process, then $-K_{\overline{X}}$ is nef ad big. Now the boundedness of $\overline{X}$ implies the boundedness of $X$. So we replace $X$ by $\overline{X}$ i.e. from now on we can assume $B=0$. By theorem \ref{weak-2dim} $(X,0)$ has an $(\varepsilonsilon,n)$-complement $K_X+B^+$ for some $n\in \mathcal{N}_{\deltalta},2,\{0\}$. Now let $W\longrightarrow X$ be a minimal resolution and $\phi:W\longrightarrow S$ be the map obtained by running the classical MMP on $W$ i.e. contracting $-1$-curves to get a minimal $S$. As it is well known $S$ is $\mathbb{P}^2$ or a smooth ruled surface with no $-1$-curves. Let $B_{S}^+=\sum b_{i,S}^+B_{i,S}^+$ be the pushdown of $B_{W}^+$ on $S$ where $K_W+B_{W}^+$ is the crepant pullback of $K_X+B^+$. Then define \[A_S:=\frac{b_{1,S}^+}{2}B_{1,S}^++ \sum_{i\neq 1} b_{i,S}^+B_{i,S}^+\] If $S=\mathbb{P}^2$ then $-(K_S+A_S)$ is ample and $\Supp A_S=\Supp B_{S}^+$. By lemma \ref{bound-1} $(S, \Supp A_S=\Supp B_{S}^+)$ is bounded. Then lemma \ref{mainlemma} implies the boundedness of $W$ and so of $X$. Now assume that $S$ is a ruled surface. If there is no exceptional curve (with negative self-intersection) on $S$ then $-(K_S+A_S)$ is nef and big if we take $B_{1,S}^+$ a non-fibre component of $B_{S}^+$. Since $S$ is smooth then $S$ is bounded and so $(S, \Supp A_S=\Supp B_{S}^+)$ is bounded. But if there is an exceptional divisor $C$ on $S$ then contract $C$ as $S\longrightarrow S'$. So $S$ is a minimal resolution of $S'$. Since $\rho(S)=2$ and $(S',0)$ is $\deltalta$-lc then the index of each integral divisor on $S'$ is bounded. So $S'$ is bounded and then $(S',\Supp B_{S'}^+)$ is also bounded. This implies the boundedness of $S$, $W$ and so of $X$. Note that $B_{S'}^+\neq 0$ as $S'$ is WLF. $\Box$ \end{proof} \betagin{cor}\label{weak-proof} Conjecture \ref{weak} holds for any finite set $\Gammamma_f\subseteq [0,1]$ of rational numbers. \end{cor} \betagin{proof} It follows from corollary \ref{BAB'} $\Box$. \end{proof} \subsection{Second proof of the global case} Remember that all the varieties are algebraic surfaces unless otherwise stated. We first prove the boundedness of varieties and then prove the boundedness of complements. This is somehow the opposite of what we did in the last subsection. However our proof was inspired by the theory of complements. The following proof heavily uses the properties of surfaces. That means that it is not expected to have a higher dimensional generalisation. The method also has some similarity with the proof of Alexeev and Mori [AM] in the sense that we both analyse a series of blow ups, but in different ways. \betagin{thm}\label{3-5-H} The $BAB_{\deltalta, 2,[0,1]}$ holds. \end{thm} \betagin{proof} Now we reduce to the case $B=0$. Run the anti LMMP on the pair $(X, 0)$ i.e. if $-K_{X}$ is not nef then contract an extremal ray $R$ where $K_{X}.R>0$. This obviously contracts a curve in $B$. Repeating this process gives us a model $(X', 0)$ where $-K_{X'}$ is nef and big. Otherwise $X'$ should be with Picard number one and $K_{X'}$ nef. But this is impossible by our assumptions. We will prove the boundedness of $\{X'\}$ and so it implies the boundedness of $\{X\}$. Now we replace $(X,B)$ with $(X',0)$ but we denote it as $(X,0)$. We also assume that $\deltalta<1$ otherwise $X$ will be smooth and so with bounded index. Let $W\longrightarrow X$ be a minimal resolution. The main idea is to prove that there are only a bounded number of possibilities for the coefficients in $B_{W}$ where $K_{W}+B_{W}={^*K_{X}}$ i.e. the index of $K_{X}$ is bounded. {\bf{Strategy:}} Here we again have the familiar division into non-exceptional and exceptional cases. First assume that $(X,0)$ is non-exceptional. So there will be a $(0,n)$-complement $K_{X}+B^+$ for $n<58$. If we run the classical MMP on the pair $(W, 0)$ then we end up with $S$ which is either $\mathbb{P}^2$ or a ruled surface. Since $-(K_{S}+B_{S})={-_*(K_{W}+B_{W})}$ is nef and big then $K_{S}$ can not be nef. Let $K_{W}+B_{W}^+={^*(K_{X}+B_{X}^+)}$ \betagin{lem}\label{3-5-D} Let $G$ be a component in the boundary $B_{S}^+$ where $K_{S}+B_{S}^+={_*(K_{W}+B_{W}^+)}$ then $G^2$ is bounded from below and above. Moreover there are only a bounded number of components in $B_{S}^+$. \end{lem} \betagin{proof} The boundedness of $G^2$ follows from the next lemma and the fact that $X$ is $\deltalta$-lc. The boundedness of number of components in $B_{S}^+$ is left to the reader. $\Box$ \end{proof} The more general lemma below will also be needed later. \betagin{lem}\label{3-5-E} Let $(T,B_{T})$ be an $\deltalta$-lc WLF pair where $T$ is either $\mathbb{P}^2$ or a smooth ruled surface (with no $-1$-curves) and suppose $K_{T}+\overline{B}$ is antinef (lc) for a boundary $\overline{B}$. Let $M, B'_{T}$ be effective divisors with no common component such that $\overline{B}=B'_{T}+M$ then $M^2$ is bounded from above. \end{lem} \betagin{proof} First assume that $T=\mathbb{P}^2$. In this case the lemma is obvious because if $M^2$ is too big then so is $\deltag M$ and so it contradicts the fact that $\deltag M \leq 3$. Now assume that $T$ is a ruled surface where $F$ is a general fibre other than those curves in the boundary and $C$ a section. The Mori cone of $T$ is generated by its two edges. $F$ generates one of the edges. If all the components of $M$ are fibres then $M^2=0$ and we are done. So assume otherwise and let $M\equiv aC+bF$ then $0<M.F=(aC+bF).F=a$ so $a$ is positive. Let $C^2=-e$ and consider the following two cases: 1. $e\geq 0$: We know that $K_{T}\equiv -2C+(2g-2-e)F$ where $g$ is a non-negative number [H, V, 2.11]. So we have \[ 0\geq (K_{T}+M+tC).F= -2+a+t \] for some $t\geq 0$ where $B'_{T}\equiv tC+uF$ ($u\geq 0$ since $e\geq 0$). Hence $a+t\leq 2$. Calculations give $M^2=a(2b-ae)$. Since $a$ and $e$ are both non-negative then $M^2$ big implies that $b$ is big. But on other hand we have: \[ 0\geq (K_{T}+M+tC).C= (-2+a+t)(-e) +2g-2-e+b\] This gives a contradiction if $b$ is too big because $e$ is also bounded. The boundedness of $e$ follows from the fact that $T$ is $\deltalta$-lc. In order in the local isomorphic subsection we proved that exceptional divisors have bounded selfintersection numbers. 2. $e<0$: in this case by [H, V, 2.12] we have $e+2g\geq 0$ and so: \[ 0\geq (K_{T}+M).C=\] \[(-2+a)(-e) +2g-2-e+b=2g+e-2-(ae/2)+ (2b-ae)/2\] Now since $2g+e-(ae/2)\geq 0$ then $(2b-ae)/2\leq 2$. So $M^2$ is bounded because $a$ is also bounded. $\Box$ \end{proof} Let $P\in X$ be a singular point. If $P$ is not in the support of $B^+$ then the index of $K_{X}$ at $P$ is at most $57$ and so bounded. Now suppose that $P$ is in the support of $B^+$. If the singularity of $P$ is of type $E_{6}$, $E_{7}$, $E_{8}$ or $D_{r}$ then again the index of $K_{X}$ at $P$ is bounded. So assume that the singularity at $P$ is of type $A_{r}$. The goal is to prove that the number of curves in $exc(W/P)$ is bounded. In order we should prove that the number of $-2$-curves are bounded because the number of other curves is bounded by the proof of local isomorphic case. Note that the coefficient of any $E\in exc(W/P)$ in $B_{W}^+$ is positive and there are only a bounded number of possibilities for these coefficients. Let $\mathcal{C}$ be the longest connected subchain of $-2$-curves in $exc(W/P)$. Run the classical MMP on $W$ to get a model $W'$ such that there is a $-1$-curve $F$ on $W'$ s.t. it is the first $-1$-curve that intersects the chain $\mathcal{C}$ (if there is no such $W'$ and $F$ then $\mathcal{C}$ should consist of a single curve). We have two cases: 1. $F$ intersects, transversally and in one point, only one curve in $\mathcal{C}$ say $E$. First suppose that $E$ is a middle curve i.e. there are $E'$ and $E''$ in the chain which both intersect $E$. Now contract $F$ so $E$ becomes a $-1$-curve. Then contract $E$ and then $E'$ and then all those which are on the side of $E'$. In this case by contracting each curve we increase $E''^2$ by one. And so $E''$ will be a divisor on $S$ in $B_{S}^+$ with high self-intersection. By the lemma above there can be only a bounded number of curves in $\mathcal{C}$ on the side of $E'$. Similarly there are only a bounded number of curves on the side of $E''$. So we are done in this case. Now suppose that $E$ is on the edge of the chain and intersects $E'$. Let $t_{E}$ and $t_{F}$ be the coefficients of $E$ and $F$ in $B_{W}^+$ and similarly for other curves. Let $h$ be the intersection number of $F$ with the curves in $B_{W'}^+$ except those in $\mathcal{C}$ and $F$ itself. Now we have \[ 0=(K_{W'}+B_{W'}^+).F= t_{E}+h-1-t_{F}\] So  $h=1+t_{F}-t_{E}$. If $h\neq 0$ then $F$ intersects some other curve not in the chain $\mathcal{C}$. By contracting $F$ then $E$ and then other curves in the chain we get a contradiction again. Now suppose $h=0$ i.e. $t_{E}=1$ and $t_{F}=0$. In this case let $x$ be the intersection of $E$ with the curves in $B_{W'}^+$ except those in $\mathcal{C}$. So we have \[ 0=(K_{W'}+B_{W'}^+).E= -2t_{E}+t_{E'}+x\] So $x=2t_{E}-t_{E'}>0$ and similarly we again get a contradiction. 2. Now assume that $F$ intersects the chain in more than one curve or intersects a curve with intersection number more than one. Suppose the chain $\mathcal{C}$ consists of $E_{1}, \dots, E_{s}$ and $F$ intersects $E_{j_{1}}, \dots, E_{j_{l}}$. Note that $l$ is bounded. If $F.E_{j_{k}}>1$ for all $0\leq k \leq l$ then contract $F$. So $E_{j_{k}}^2\geq 0$ after contraction of $F$ and they will not be contracted later and so they appear in the boundary $B_{S}^+$. Now replace $\mathcal{C}$ with longest connected subchain when we disregard all $E_{j_{k}}$. Now go to step one again and if it doesn't hold come back to step two and so on. Now suppose $F.E_{j_{k}}=1$ for some $k$. So $F$ should intersect at least another $E_{j_{t}}$ where $t=k+1$ or $t=k-1$. Now contract $F$ so $E_{j_{k}}$ becomes a $-1$-curve and would intersect $E_{j_{t}}$. Contracting  $E_{j_{k}}$ and possible subsequent $-1$-curves will prove that there are a bounded number of curves between $E_{j_{t}}$ and $E_{j_{k}}$. Now after contracting $E_{j_{k}}$ and all other curves between $E_{j_{t}}$ and $E_{j_{k}}$ we will have $E_{j_{m}}^2\geq 0$ for each $m\neq k$. So again we take the longest connected subchain excluding all $E_{j_{t}}$. And repeat the procedure. It should stop after a bounded number of steps because the number of curves in $B_{S}^+$ is bounded. This boundedness implies that there are only a bounded number of possibilities for the coefficients in $B_{W}$ where $K_{W}+B_{W}={^*K_{X}}$. By Borisov-McKernan $W$ belongs to a bounded family and so complements would be bounded. Here the proof of the non-exceptional case finishes and from now on we assume that $(X,0)$ is exceptional. Let $W\longrightarrow X$ be a minimal resolution. Let $0<\tau<\frac{1}{2}$ be a number and the minimal log discrepancy of $(X,0)$ be $a=\mld(X,0)$. If $a\geq \frac{1}{2}+\tau$ then we know that $X$ belongs to a bounded family according to step 1 in the proof of theorem \ref{weak-2dim} above. So we assume $a<\frac{1}{2}+\tau$ and then blow up all exceptional/$X$ curves $E$ with log discrepancy $a_{E}=a(E,X,0)\leq \frac{1}{2}+\tau$ to get $Y\longrightarrow X$ and put $K_{Y}+B_{Y}={^*K_{X}}$. Fix $E_1$, one of these exceptional divisors. Let $t\geq 0$ be a number such that there is an extremal ray $R$ such that $(K_{Y}+B_{Y}+tE_1).R=0$ and $E_1.R>0$ (and s.t. $K_{Y}+B_{Y}+tE_1$ is Klt and antinef). Such $R$ exists otherwise there is a $t>0$ such that $K_{Y}+B_{Y}+tE_1$ is lc (and not Klt) and antinef. This is a contradiction by [Sh2, 2.3.1]. Now contract $R: Y\longrightarrow Y_1$ if it is of birational type. Again by increasing $t$ there will be an extremal ray $R_1$ on $Y_1$ such that $(K_{Y_1}+B_{Y_1}+tE_1).R_1=0$ and $E_1.R_1>0$ (preseving the nefness of $-(K_{Y_1}+B_{Y_1}+tE_1)$ ). If it is of birational type then contract it and so on. After finitely many steps we get a model $(V_1, B_{V_1}+t_1E_1)$ and a number $t_1>0$ with the following possible outcomes: \betagin{equation}\label{division} \end{equation} \betagin{description} \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt, $\rho(V_1)=1$ and $K_{V_1}+B_{V_1}+t_1 E_1\equiv 0$. \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt and $\rho(V_1)=2$ and there is a non-birational extremal ray $R$ on $V_1$ such that $(K_{V_1}+B_{V_1}+t_1 E_1).R=0$. Moreover $K_{V_1}+B_{V_1}+t_1 E_1$ is antinef. \end{description} Note that for each element $E\in exc(Y/X)$, either $E$ is a divisor on $V_1$ or it is contracted to a point in the support of $E_1$. \betagin{lem}\label{3-5-C} For any $h>0$ there is an $\eta>0$ such that if $(T, B)$ is a $\deltalta$-lc pair ($\deltalta$ is already fixed) with a component $C$ of $B$ passing through $P\in T$, with a coefficient $t \geq h$, then either $K_{T}$ is $\deltalta +\eta$-lc at $P$ or $1-a_{E}>\eta$ for each exceptional divisor $E/P$ on a minimal resolution of $T$ ($a_E=$ log discrepancy of $(T,B)$ at $E$). \end{lem} \betagin{proof} If $P$ is smooth or has $E_{6}, E_{7}$, $E_{8}$ or $D_{r}$ type of singularity then the lemma is clear since the index of $K_T$ at $P$ is bounded in all these cases (see the local isomorphism subsection). In order in all these cases there will be an $\eta>0$ such that $K_T$ is $\deltalta +\eta$-lc at $P$. Now suppose that the singularity at $P$ is of type $A_{r}$. Take a minimal resolution $W_T\longrightarrow T$ with $exc(W_T/P)=\{E_1, \dots, E_r\}$ (notation as in the local isomorphic subsection) and suppose that $j$ is the maximal number such that $\mld(P,T,0)=a'_j$ ( here we show the log discrepancy of $(T,0)$ at $E_{*}$ as $a'_{*}$) for an exceptional divisor $E_{j}/P$. Actually we may assume that $r-j$ is bounded. By the local isomorphic subsection , the distance of $E_{j}$ from one of the edges of $exc(W_T/P)$ is bounded. We denote the birational transform of $C$ on $W_T$ again by $C$. Suppose $C$ intersects $E_k$ in $exc(W_T/P)$. If $k\neq 1$ or $r$ then we have $(-E_{k}^2)a_{k}-a_{k-1}-a_{k+1}+x=0$ where $a_{*}$ shows the log discrepancy of the pair $(T,B)$ at $E_{*}$ and $x\geq h$ a number. So either $a_{k-1}-a_{k}\geq \frac{h}{2}$ or $a_{k+1}-a_{k}\geq \frac{h}{2}$. In either case the distance of $E_{k}$ is bounded from one of the edges of $exc(W_T/P)$. If this edge is the same edge as for $E{j}$ then again the lemma is clear since the coefficients of $E_{k}$ and $E_{j}$ in $^*C$ (now $C$ is on $T$ and $^*C$ on $W_T$) are bounded from below (in other words they are not too small). Now assume the otherwise i.e. $E_{k}$ and $E_{j}$ are close to different edges.  In this case we claim that the coefficients of the members of $exc(W_T/P)$ in $\overline{B}_{W_T}$, where $K_{W}+\overline{B}_{W_T}={^*(K_{T}+tC)}$, are bounded from below. Suppose that the smallest coefficient occurs at $E_{m}$. Simple calculation shows that we can assume that $E_{m}$ is one of the edges of $exc(W_T/P)$. Hence $E_{m}$ is with a bounded distance from $E_{j}$ or from $E_{k}$. Suppose that $E_{m}$ is with a bounded distance from $E_{j}$. If $a'_j\geq \frac{1+\deltalta}{2}$ then $K_T$ is $\frac{1+\deltalta}{2}$-lc at $P$. So we can assume that $a'_j<\frac{1+\deltalta}{2}$. We prove that all the numbers $1-a'_j, \dots, 1-a'_r$ are bounded from below. In order, if $1<j<r$ then $(-E_{j}^2)a'_{j}-a'_{j-1}-a'_{j+1}=0$ (note that $-E_{j}^2>2$ in this case). Now if $a'_{j-1}-a'_{j}\geq \frac{\deltalta}{2}$ then the chain will be bounded and so the index of $K_T$ at $P$. But if $a'_{j+1}-a'_{j}\geq \frac{\deltalta}{2}$ then $a'_{r}-a'_{r-1}\geq \frac{\deltalta}{2}$ and so $(-E_{r}^2-1)a'_{r}=1-(a'_{r}-a'_{r-1})\leq 1-\frac{\deltalta}{2}$. Hence if $m=r$ then we are done. But if $m=1$ then again the whole chain is bounded and so the index of $K_T$ at $P$. Now if $j=r$ then again the chain is bounded if $m=1$ and $a'_m=a'_j=a'_r<\frac{1+\deltalta}{2}$ if $m=r$. In the second case i.e. if $E_m$ is with a bounded distance from $E_{k}$ then the coefficient of $E_m$ in $^*C$ on $W$ is bounded from below. $\Box$ \end{proof} \betagin{lem}\label{3-5-I} For any $h>0$ there is a $\gammamma>0$ such that if $(T, B)$ is a $\deltalta$-lc pair ($\deltalta$ is already fixed), WLF with a component $C$ of $B$ passing through $P\in T$ and $t\geq h$ where $t$ is the coefficient of $C$ in $B$, then $K_{T}$ is $\deltalta +\gammamma$-lc. \end{lem} \betagin{proof} As discussed in lemma \ref{3-5-C} we may assume that the singularity at $P$ is of type $A_r$ and $1-a_{k}>\eta$ for some fixed number $\eta>0$ where $a_k$ is the log discrepancy of the pair $(T,B)$ at any exceptional divisor $E_{k}/P$ on $W_T$ where $W_T\longrightarrow T$ is a minimal resolution (we put $exc(W_T/P)=\{E_1, \dots, E_r\}$). Let $\mathcal{C}$ be the longest connected sub-chain of $-2$-curves in $exc(W_T/P)$ and $W_{1}$ a model where $\mathcal{C}$ is intersected by a $-1$-curve $F$ for the first time i.e. we blow down $-1$-curves on $W_T$ till we get a model $W_1$ and a morphism $W_T\longrightarrow W_{1}$ such that $W_1$ is the first model where there is a $-1$-curve $F$ intersecting $\mathcal{C}$ (on $W_1$). Let $K_{W_T}+{B^+}\equiv 0$ be a (lc) $\mathbb{Q}$-complement of $K_{W_T}+B_{W_T}$. Assume that $F$ intersects $E_{j}$ in $\mathcal{C}$ and let $t_{E_{j}}$ and $t_{F}$ be the coefficients of $E_{j}$ and $F$ in $B^+$ on $W_T$ (similar notation for the coefficients of other exceptional divisors). Then an argument as in the proof of the non-exceptional case gives a contradiction: 1. Suppose $F$ intersects, transversally and in one point, only one curve in $\mathcal{C}$ (which is $E_j$). First suppose that $E_{j}$ is a middle curve i.e. there are $E_{j-1}$ and $E_{j+1}$ in $\mathcal{C}$ which both intersect $E_{j}$. Now contract $F$ so $E_{j}$ becomes a $-1$-curve. Then contract $E_{j}$ and then $E_{j-1}$ and then all those which are on the of $E_{j-1}$. By contracting each curve we increase $E_{j+1}^2$ by one. If we continue contracting $-1$-curves we get $S$ ($S=\mathbb{P}^2$ or a ruled surface with no $-1$-curve) where $E_{j+1}$ is a component of $B_{S}$. By lemma \ref{3-5-E} there can be only a bounded number of curves in $\mathcal{C}$ on the side of $E_{j-1}$. Similarly there are only a bounded number of curves in $\mathcal{C}$ on the side of $E_{j+1}$. So we are done in this case. Now suppose that $E_{j}$ is on the edge of the chain $\mathcal{C}$ and it intersects $E_{j-1}$. Let ${B^+}_{W_{1}}=\dot{B^+}+M$ ($M$ and $\dot{B^+}$ with no common component) where each component of $\dot{B^+}$ is either $F$ or an element of $\mathcal{C}$. Now we have \[ 0= (K_{W_{1}}+{B^+}_{W_{1}}).F= t_{E_{j}}-1-t_{F}+(M.F)\] So $M.F= 1+t_{F}-t_{E_{j}}$. Similarly let ${B^+}_{W_{1}}=\ddot{B^+}+N$ ( $N$ and $\ddot{B^+}$ with no common component) where each component of $\ddot{B^+}$ is either $F$ or an element of $\mathcal{C}$. Then we have \[ 0= (K_{W_{1}}+{B^+}_{W_{1}}).E_{j}= -2t_{E_{j}}+t_{E_{j-1}}+t_{F}+(N.E_{j})\] and so $t_{E_{j}}=t_{E_{j-1}}-t_{E_{j}}+t_{F}+(N.E_{j})>\eta$. Hence $ t_{E_{j-1}}-t_{E_{j}}>\frac{\eta}{3}$ or $t_{F}>\frac{\eta}{3}$ or $(N.E_{j})>\frac{\eta}{3}$. If $t_{F}>\frac{\eta}{3}$ then by contracting $F$ we increase $M^2$ at least by $(M.F)^2\geq t_{F}^2> (\frac{\eta}{3})^2$. We have the same increase when we contract $E_{j}$ and then $E_{j-1}$ and so on. So lemma \ref{3-5-E} shows the boundedness of $\mathcal{C}$. If $(N.E_{j})>\frac{\eta}{3}$ then proceed similar to the last paragraph. If $ t_{E_{j-1}}-t_{E_{j}}>\frac{\eta}{3}$ then $ t_{E_{j-1}}>t_{E_{j}}+\frac{\eta}{3}$. This implies that $t_{E_{j}}\leq 1-\frac{\eta}{3}$ then $M.F\geq \frac{\eta}{3}$ and so we continue as above. 2. Now assume that $F$ intersects $\mathcal{C}$ in more than one curve or intersects a curve in $\mathcal{C}$ with intersection number more than one. Suppose the chain $\mathcal{C}$ consists of $E_{s}, \dots, E_{u}$ and $F$ intersects $E_{j_{1}}, \dots, E_{j_{l}}$. Note that $l$ is bounded. If $F.E_{j_{k}}>1$ for all $1\leq k \leq l$ then contract $F$. So $E_{j_{k}}^2\geq 0$ after contraction of $F$ and hence $E_{j_{k}}$ can not be contracted and so it appears in the boundary on a ``minimal'' model $S$ (i.e. $S$ is the projective plane or a smooth ruled surface with no $-1$-curve). Replace $\mathcal{C}$ with its longest connected subchain when we disregard all $E_{j_{k}}$. From here we can go back to step one and repeat the argument. Now suppose $F.E_{j_{k}}=1$ for some $k$. So $F$ should intersect at least another $E_{j_{q}}$ where $q=k+1$ or $q=k-1$. Now contract $F$ so $E_{j_{k}}$ becomes a $-1$-curve and would intersect $E_{j_{q}}$. Contracting $E_{j_{k}}$ and possible subsequent $-1$-curves will prove that there are only a bounded number of curves between $E_{j_{q}}$ and $E_{j_{k}}$ in $\mathcal{C}$. Now after contracting $E_{j_{k}}$ and all other curves between $E_{j_{q}}$ and $E_{j_{k}}$ we will have $E_{j_{m}}^2\geq 0$ for each $m\neq k$. So again we take the longest connected subchain excluding $E_{j_{1}}, \dots,E_{j_{l}} $ and go back to step one. This process should stop after a bounded number of steps because the number of curves in $B_{S}^+$ with coefficient $>\eta$ is bounded ($S$ is again a ``minimal'' model). To prove this later boundedness note that $(K_{S}+{B^+}_{S}).F=0$, where we assume that $S$ is a ruled surface and $F$ a fibre, implies that there are only a bounded number of non-fibre components in $B_{S}^+$ with coefficient $>\eta$. Let $L$ be a section and $t_L$ be its coefficient in $B_{S}^+$ and $F_i$ fibre components of $B_{S}^+$ with $t_{F_i}>\eta$. So \[0 \geq (K_{S}+t_{L}L+\sum_{i}t_{F_i}F_i).L= (-2L+(2g-2-e)F+t_{L}L+\sum_{i}t_{F_i}F_i).L\]\[=-t_Le+e+2g-2+\sum_{i}t_{F_i}\] which proves that there are a bounded number of $F_i$ ($L^2=-e$ and $e+2g\geq 0$ if $e<0$). So the chain $\mathcal{C}$ should have a bounded length. This implies that if we throw $C$ away in the boundary $B$ then the mld at $P$ will increase at least by $\gammamma >0$ a fixed number (which doesn't depend on $P$ nor $T$). This proves the lemma. $\Box$ \end{proof} Lemma \ref{3-5-I} settles the first case in \ref{division} by deleting the boundary $B_{V_1}$. Now assume the second case in the division above in \ref{division}. Let $F$ be a general fibre of the contraction defined by the extremal ray $R$. If the other extremal ray of $V_1$ defines a birational map $V_1\longrightarrow Z$ (otherwise delete the boundary and use \ref{3-5-I} ) then let $H$ be the exceptional divisor of this contraction. If $K_{V_1}$ is antinef then again use \ref{3-5-I}. If $K_{V_1}$ is not antinef and if $E_1\neq H$ then apply lemma \ref{3-5-I} to $(Z,B_Z)$. Boundedness of $Z$ implies the boundedness of $V_1$ and so we can apply lemma \ref{bound-1}. But if $K_{V_1}$ is not antinef and $E_1=H$ then perform a hat of the third type as defined in the proof of theorem \ref{weak-2dim} where $(U,G_U):=(V_1, B_{V_1}+t_1E_1)$ and $V_2:=U'$. We can use lemma \ref{3-5-I} on $V_2$ or after contracting a curve on $V_2$, in order, to get the boundedness of $V_2$. Boundedness of $V_2$ implies the boundedness of $V_1$. $\Box$ \end{proof} \betagin{cor} Conjecture $WC_{\deltalta, 2,\Gammamma_f}$ holds in the global case where $\Gammamma_f$ is a finite subset of rational numbers in $[0,1)$. \end{cor} \betagin{proof} Obvious by theorem \ref{3-5-H}. \end{proof} \subsection{An example} \betagin{exa} Let $m$ be a positive natural number. For any $1>\eta >0$ and any $\tau > 0$ there is a model $(X,0)$ (may not be global) satisfying the followings: \betagin{enumerate} \item $X$ is $\frac{1}{m}$-lc. \item Suppose $Y \longrightarrow X$ is a partial resolution such that $K_{Y}+B_{Y}={^*K_{X}}$ is $\frac{1}{m}+\eta$-lc and $b_{i}>\frac{m-1}{m}-\eta$. Put $D=\sum \frac{m-1}{m}B_{i}$. \item $K_{Y}+D$ is not $\frac{1}{m}+\tau$-lc. \end{enumerate} \end{exa} \betagin{proof} Let $P\in X$ and $X$ smooth outside $P$. Suppose the minimal resolution of $P$ has the following diagram: \betagin{displaymath} \xymatrix{O^{-3} \ar@{-}[r]& O^{-2} \ar@{-}[r]& \dots &\ar@{-}[r]& O^{-2}\ar@{-}[r] &O^{-2}\ar@{-}[r] & O^{-4}} \end{displaymath} where the numbers show the self-intersections. This diagram has the following corresponding system on a minimal resolution where $a_{i}$ stand for the log discrepancies: \[ 3a_{1}-a_{2}-1=0\] \[2a_{2}-a_{1}-a_{3}=0\] \[\vdots\] \[2a_{r-1}-a_{r-2}-a_{r}=0\] \[4a_{r}-a_{r-1}-1=0\] Now put $a_{r-1}-a_{r}=t$ so $a_{r-2}-a_{r-1}=t$, $\dots$, $a_{1}-a_{2}=t$. So we have: $a_{r}=\frac{1+t}{3}$ and $a_{1}=\frac{1-t}{2}$. The longer the chain is the smaller the $t$ is and the discrepancies vary from $-\frac{1+t}{2}$ to $\frac{t-2}{3}$. Other $a_{i}$ can be calculated as $a_{i}=a_{1}-(i-1)t=\frac{1-t}{2}-(i-1)t=\frac{1-(2i-1)t}{2}$. Suppose $j$ is such that $a_{j}<\frac{1}{m}+\eta$ but $a_{j-1}\geq \frac{1}{m}+\eta$. So the exceptional divisors corresponding to $a_{r}, a_{r-1}, \dots, a_{j}$ will appear on $Y$ but others not. Now we try to compute the log discrepancies of the pair $(Y, D)$. In order the minimal resolution for $P\in X$ is also the minimal resolution for $Y$. But here just $E_{1}, \dots, E_{j-1}$ are exceptional/$Y$. The system for the new log discrepancies (for $(Y, D)$) is as follows: \[3a'_{1}-a'_{2}-1=0\] \[2a'_{2}-a'_{1}-a'_{3}=0\] \[\vdots\] \[2a'_{j-2}-a'_{j-3}-a'_{j-1}=0\] \[2a'_{j-1}-a'_{j-2}-\frac{1}{m}=0\] Again put $a'_{j-2}-a'_{j-1}=s$ so similarly we have $a'_{j-1}=\frac{1}{m}+s$ and $a'_{1}=\frac{1-s}{2}$. If $j$ is big (i.e. if t is small enough) then $s$ would be small and so $a'_{j-1}=\frac{1}{m}+s <\frac{1}{m}+\tau$. Hence $(Y, D)$ is not $\frac{1}{m}+\tau$-lc. $\Box$ \end{proof} \subsection{Local cases revisited} Using the methods in the proof of the global case, we give a new proof of the local cases. Here again by $/Z$ we mean $/P\in Z$ for a fixed $P$. The following is the main theorem in this subsection. \betagin{thm}\label{4-A'} Conjecture $WC_{\deltalta, 2,{\Phi_{sm}}}$ holds in the local case i.e. when we have $\dim Z\geq 1$. \end{thm} \betagin{proof} Our proof is similar to the non-exceptional global case. Here the pair $(X/Z,B)$ is a relative WLF surface log pair (i.e. $-(K_X+B)$ is nef and big/$Z$), where $(X,B)$ is $\deltalta$-lc and $B\in \Phi_{sm}$. Fix $P\in Z$. Then there exists a regular $(0,n)$-complement/$P\in Z$, $K+B^+$ for some $n\in\{1,2,3,4,6\}$ by [Sh2]. \betagin{enumerate} \item Remember the first step in the proof of theorem \ref{weak-2dim}. \item Remember definition \ref{D-tau} and lemma \ref{transform}. Let $m$ be the smallest number such that $\frac{1}{m}\leq \deltalta$. Let $h=\min\{\frac{k-1}{k}-\frac{u}{r!}>0\}_{1\leq k\leq m} $ where $u,k$ are natural numbers and $r=\max\{m,6\}$. Now choose a $\tau$ for $m$ as in lemma \ref{transform} such that $\tau <h$. Blow up one exceptional divisor $E/P$ via $f:Y\longrightarrow X$ such that the log discrepancy satisfies $\frac{1}{k}\leq a(E,X,B)\leq \frac{1}{k}+\tau$ for some $k$ (if such $E$ doesn't exist then go to step 1). The crepant log divisor $K_Y+B_Y$ is $\frac{1}{m}$-lc and so by the choice of $\tau$, $K_Y+D_{\tau}$ is also $\frac{1}{m}$-lc ($D_{\tau}$ is constructed for $B_Y$). Let $K_Y+B_{Y}^+$ be the crepant blow up of $K_X+B^+$. Then again by the way we chose $\tau$ we have $D_{\tau}\leq B_{Y}^+$. Now run the anti-LMMP/$P\in Z$ over $K_Y+D_{\tau}$ i.e. contract any birational type extremal ray $R$/$P\in Z$ such that $(K_Y+D_{\tau}).R> 0$. At the end we get a model $X_1$ with one of the following properties: \betagin{description} \item[$\diamond$] $(K_{X_1}+D_{\tau})\equiv 0/P\in Z$ and $K_{X_1}+D_{\tau}$ is $\frac{1}{m}$-lc. \item[$\diamond$] $-(K_{X_1}+D_{\tau})$ is nef and big/$P\in Z$ and $K_{X_1}+D_{\tau}$ is $\frac{1}{m}$-lc. \end{description} where $K_{X_1}+D_{\tau}$ is the birational transform of $K_{Y}+D_{\tau}$ and let $g:Y\longrightarrow X_1$ be the corresponding morphism. The nefness of $-(K_{X_1}+D_{\tau})$ comes from the fact that $D_{\tau}\leq B_{1}^+$. And $K_{X_1}+D_{\tau}$ is $\frac{1}{m}$-lc by applying lemma \ref{transform}. \item Whichever case occurs above, to construct a complement, it is enough to bound the index of $K_{X_1}+D_{\tau}/P$. \item Let $C$ be a curve contracted by $g:Y\longrightarrow X_1$. If $C$ is not a component of $B_Y$ then the log discrepancy of $C$ with respect to $K_{X_1}+B_{X_1}$ is at least 1 where $K_{X_1}+B_{X_1}$ is the birational transform of $K_{Y}+B_{Y}$. Moreover $g(C)\in \Supp B_{X_1}\neq \emptyset$. So the log discrepancy of $C$ with respect to $K_{X_1}$ is more than 1. This means that $C$ is not a divisor on a minimal resolution $W_1\longrightarrow X_1$. Let $W\longrightarrow X$ be a minimal resolution. Then there is a morphism $W\longrightarrow W_1$. Hence $exc(W_1/X_1)\subseteq exc(W/X)\cup \Supp (B=B_X)$. Now if $C\in exc(W/X)\cup \Supp B$ is contracted by $g$ then $a(C,X_1,D_{\tau})<a(C,X,B)$. \item Let $(X_1,B_1):=(X_1,D_{\tau})$ and repeat the process. In other words again we blow up one exceptional divisor $E$ via $f_1:Y_1\longrightarrow X_1$ such that the log discrepancy satisfies $\frac{1}{k}\leq a(E,X_1,B_1)\leq \frac{1}{k}+\tau$ for some natural number $k>1$. The crepant log divisor $K_{Y_1}+B_{1,Y_1}$ is $\frac{1}{m}$-lc and so by lemma \ref{transform} $K_{Y_1}+D_{1,\tau}$ is $\frac{1}{m}$-lc. Note that the point which is blown up on $X_1$ can not be smooth since $\tau <h$ as defined above. So according to the last step the blown up divisor $E$ is a member of $exc(W/X)\cup \Supp B$. Now we again run the anti-LMMP on $K_{Y_1}+D_{1,\tau}$ and proceed as in step 2. $$ \xymatrix{ W \ar[d]\ar[r] & W_1 \ar[d]\ar[r] & W_2 \ar[d]\ar[r] & \dots \\ Y \ar[d]^{f}\ar[rd]^{g} & Y_1\ar[d]^{f_1}\ar[rd]^{g_1} & Y_2 \ar[d]\ar[rd] &\dots\\ X \ar[rd]& X_1 \ar[d]& X_2 \ar[ld]& \dots \\ &Z&& \\ }$$ \item Steps 4,5 show that each time we blow up a member of $exc(W/X)\cup \Supp B$ say $E$. And if we blow that divisor down in some step then the log discrepancy $a(E,X_j,B_j)$ will decrease. That divisor will not be blown up again unless the log discrepancy drops at least by $\frac{1}{2(m-1)}-\frac{1}{2m}$ (this is not a sharp bound). So after finitely many steps we get a model $X_i$ with a standard boundary $B_i$ such that there is no $E/P$ where $\frac{1}{k}\leq a(E,X_i,B_i)\leq \frac{1}{k}+\tau$ for any $1<k\leq m$. Hence the index of $-(K_{X_i}+B_{i})/P$ is bounded and so we can construct an appropriate complement for $(X_i,B_i)/Z$. This implies the existence of the desired complement for $(X,B)/Z$. \end{enumerate} $\Box$ \end{proof} \section{$\varepsilonsilon$-log canonical complements in higher dimensions} In this section we consider the $(\varepsilonsilon, n)$-lc complements in higher dimensions i.e. in dimensions more than two. This is a joint work in progress with V.V. Shokurov. In subsection 2.1 we try to work out the proof of theorem \ref{weak-2dim} in dim 3 and we point out the problems we have to solve in order to finish the proof of conjecture \ref{weak} in dim 3 (this is the plan of the author). In subsection 2.2 we outline Shokurov's plan on the same problem. These plans have already won an EPSRC three years postdoctoral fellowship by the author. Let $X\longrightarrow Z$ be an extremal $K_X$-negative contraction where $X$ is a 2-dim Pseudo-WLF and $Z$ is a curve. We know that $Z\simeq \mathbb{P}^1$ since $Z$ should be rationally connected as $X$ is. Moreover $\rho(X)=2$. Similar Mori fibre spaces in higher dimensions are not that simple. This makes the boundedness problem of $(\varepsilonsilon, n)$-lc complements more difficult in higher dimensions. We also don't know yet whether the index of $K_X+B$ will be bounded if we fix the mld at a point. In section 1 we first proved the boundedness of $\varepsilonsilon$-lc complements and then the BAB. But in higher dimensions we expect to prove both problems together at once. In other words in some cases where it is difficult to prove the boundedness of varieties, it seems easier to prove the boundedness of complements; specially when we deal with a fibre space. Conversely when it is difficult to prove the boundedness of $\varepsilonsilon$-lc complements, it is better to prove the boundedness of pairs; this is usually the case when the pairs are exceptional. \betagin{lem}\label{pre-2} Let $X\dasharrow X'$ be a flip/$Z$ and assume that $(X,B)$ is $(\varepsilonsilon,n)$-complementary/$Z$ then $(X',B')$ is $(\varepsilonsilon,n)$-complementary/$Z$ where $B'$ is the birational transform of $B$. \end{lem} \betagin{proof} Obvious from the definition of $(\varepsilonsilon,n)$-complements. \end{proof} Note that in the previous lemma it doesn't matter that the flipping is with respect to which log divisor. \betagin{lem}\label{pre-3} Let $(Y,B)$ be a pair and $Y\dashrightarrow Y'/Z$ be a composition of divisorial contractions and flips$/Z$ such that in each step we contract an extremal ray $R$ where $(K+B).R\geq 0$. Suppose $B'=\sum {b'}_i{B'}_i$ is the birational transform of $B$, the pair $(Y',B')$ is $(\varepsilonsilon,n)$-complementary/$Z$ and $(n+1){b'}_i\geq n{b'}_i$ for each coefficient ${b'}_i$ then $(Y,B)$ is also $(\varepsilonsilon,n)$-complementary/$Z$. \end{lem} \betagin{proof} Clear by lemmas \ref{pre-1} and \ref{pre-2}. \end{proof} \betagin{lem} The Klt Pseudo-WLF property is preserved under extremal flips and divisorial contractions with respect to any log divisor. \end{lem} \betagin{proof} Let $X$ be a Klt Pseudo-WLF and $B$ a boundary such that $(X,B)$ is a Klt WLF. Now let $X\dasharrow X'$ be an extremal flip corresponding to an extremal ray $R$. Since $(X,B)$ is a Klt WLF then there is a rational boundary $D$ such that $K_X+D$ is antiample and Klt. Now let $H'$ be an ample divisor on $X'$ and $H$ its transform on $X$. There is a rational $t>0$ such that $K_X+D+tH$ is antiample and Klt. Now take a Klt $\mathbb{Q}$-complement $K_X+D+tH+A\equiv 0$. So we have $K_{X'}+D'+tH'+A'\equiv 0$ on $X'$. From the assumptions $K_{X'}+D'+A'$ is antiample and Klt. So $X'$ is also a Klt Pseudo-WLF. If $X\longrightarrow X'$ is a divisorial extremal contraction then proceed as in the flip case by taking an ample divisor $H'$ on $X'$. $\Box$ \end{proof} \betagin{defn} Let $(V,B_V)$ and $(U,B_U)$ be lc pairs. $U$ is called a semi-partial resolution of $V$ if there is a partial resolution $(W,B_W)$ of $(V,B_V)$ such that $W$ and $U$ are isomorphic in codim 1. \end{defn} \betagin{defn}[$D$-LMMP]\label{d-lmmp} Let $D$ be an $\mathbb{R}$-Cartier divisor on a normal variety $X$. We say $D$-LMMP holds if the followings hold: \betagin{description} \item[$\diamond$] Any $D$-negative extremal ray $R$ on $X$ can be contracted. And the same holds in the subsequent steps for the birational transform of $D$. \item[$\diamond$] If a $D$-contraction as in the first step is a flipping then the corresponding $D$-flip exists. \item[$\diamond$] Any sequence of $D$-flips terminates. \end{description} \end{defn} If $D:=K+B$ for a lc $\mathbb{R}$-Cartier divisor $K+B$ then we know that $D$-LMMP holds in dim 3 by [Sh5]. \betagin{rem} Let $D$ be an $\mathbb{R}$-Cartier divisor on a variety $X$ of dim $d$ and assume that LMMP holds in dim $d$. Moreover assume that $\betata D\equiv K+B$ for a lc $\mathbb{R}$-Cartier log divisor $K+B$ and $\betata >0$ then the $D$-LMMP holds. Since in this case $D$-LMMP and $K+B$-LMMP are equivalent. \end{rem} \betagin{exa} Let $(X,B)$ be a $d$-dim Klt WLF and suppose LMMP holds in dim $d$ then $-K$-LMMP holds. In order since $(X,B)$ is a Klt WLF then there is a Klt $\mathbb{Q}$-complement $K+B^+\equiv 0$. There is a $t>0$ such that $K+B^++tB^+\equiv tB^+$ is Klt. Since $-K\equiv B^+$ then $-K$-LMMP is equivalent to $B^+$-LMMP and again equivalent to $tB^+$-LMMP. Since $K+B^++tB^+$-LMMP holds so does $-K$-LMMP. \end{exa} \subsection{$\varepsilonsilon$-lc complements in dimension 3} In this subsection we propose a plan toward the resolution of conjecture \ref{weak} in dim 3. We repeat the proof of \ref{weak-2dim}, in dim 3, step by step: \betagin{enumerate} \item Under the assumptions of conjecture \ref{weak} for $d=3$ and $\Gammamma=\{0\}$, first assume that $(X,0)$ is non-exceptional. \item We don't have much information about the accumulation points of mlds in dim 3. Actually we still have not proved ACC in dim 3. As pointed out in the introduction in section 1, only one case of ACC in dim 3 is remained to be proved. Remember that Shokurov's program tries to use complements in dim $d-1$ to prove the ACC in dim $d$. So it is reasonable to assume ACC in dim $d-1$. Lets show the set of accumulation points of mlds of $d$-dim lc pairs $(T,B)$, where $B\in \Gammamma$, with $Accum_{d, \Gammamma}$. \item We need the inductive version of complements; since $(X,0)$ is not exceptional then it is expected that there is an inductive $(0,n)$-complement $K_X+B^+$ where $n \in \mathcal{N}_{2}$. \item Remeber definition \ref{D-tau}. We can similarly define $D_{\tau, A}$ for a boundary $B$, with respect to a real number $\tau\geq 0$ and a set $A\subseteq [0,1]$: \[D_{\tau, A}:=\sum_{b_{i}\notin [a-\tau, a]}b_{i}B_{i}+\sum_{b_{i}\in [a-\tau, a]} aB_{i}\] where in the first term $b_{i}\notin [a-\tau, a]$ for any $a\in A$ but in the second term $a\in A$ is the biggest number satisfying $b_{i}\in [a-\tau, a]$. \betagin{defn} Let $A\subseteq [0,1]$ and $(T,B)$ a log pair. We say that $(T,B)$ is $A$-lc if $(T,B)$ is $x$-lc where $x:=1-\sup\{A\}$. \end{defn} Assuming the ACC in dim 3 a statement similar to lemma \ref{transform} may hold: For any $\gammamma >0$ and finite set $A\subseteq [0,1]$ containing $1-\gammamma$ there is a real number $\tau>0$ such that if $(T,B_T)$ is a 3-fold log pair, $P\in T$, $K_T+B_T$ is $\gammamma$-lc in codim 2 at $P$ and $D_{\tau,A}\in A$ then $K_T+D_{\tau,A}$ is also $\gammamma$-lc in codim 2 at $P$. Moreover we expect to choose a $\tau>0$ such that the followings hold as well: \betagin{itemize} \item If $B_T\in A$ and $E$ the exceptional divisor of a smooth blow up of $T$ then $a(E,T,B_T)\notin [1-a, 1-a+\tau]$ for any $a\in A$. \item If $B_T\in A$ and the pair $(T,B_T)$ non-exceptional then we can refine $\mathcal{N}_2$ such that there is a $(0,n)$-complement $K_T+B_{T}^+$ for some $n\in \mathcal{N}_2$ where $B_T\leq B_{T}^+$. \end{itemize} \item Let $A_1:=\{a_1\}$ where $1-a_1=\max Accum_{3,\{0\}}\cap [0,\deltalta]$. Now blow up all exceptional divisor $E$ such that $a(E,T,B_T)\in [1-a, 1-a+\tau]$ for some $a\in A_1$ to get $f:Y\longrightarrow X$. Construct $D_{\tau, A_1}$ for $B_Y$ where $K_Y+B_Y$ is the crepant pull back. So $(Y,D_{\tau, A_1})$ is $A_1$-lc. Run the $D$-LMMP where $D:=-(K_Y+D_{\tau, A_1})$. At the end we get $Y\dasharrow X_1$ and $X_1\dasharrow S_1$ such that $-(K_{X_1}+D_{\tau,A_1})$ is nef and $\equiv 0/S_1$ and $-(K_{S_1}+D_{\tau,A_1}).R>0$ for any birational type extremal ray $R$. \item There are the following possibilities for the model $S_1$: \betagin{description} \item[$\diamond$] $\rho(S_1)=1$, $-(K_{S_1}+D_{\tau,A_1})=-(K_{S_1}+B^+)\equiv 0$ and $K_{S_1}+D_{\tau,A_1}$ is $A_1$-lc. \item[$\diamond$] There is a fibration type extremal ray $R$ such that , $-(K_{S_1}+D_{\tau,A_1}).R=0$ and $K_{S_1}+D_{\tau,A_1}$ is $A_1$-lc. \item[$\diamond$] $-(K_{S_1}+D_{\tau,A_1})$ is nef and big and $K_{S_1}+D_{\tau,A_1}$ is $A_1$-lc. \end{description} \item In the first case in the division above we are done. In the second and third case then replace $(X,0)$ by $(X_1,B_1):=(X_1,D_{\tau, A_1})$ and go back to step one and repeat. By repeating and repeating the process, each time we get new coefficients. In other words we need to replace $A_i$ with $A_{i+1}$ such that $A_i\subseteq A_{i+1}$. We need to prove that $\cup_{i\rightarrow\infty} A_i$ is finite. \item At the end we get a model $(X_r, B_r)$ which is terminal in codim 2. Then we hope to prove the boundedness of the index of $K_{X_r}+B_r$ possibly after some more blow ups and blow downs. This will settle the problem if $-(K_{X_r}+B_r)$ is nef and big. Otherwise we may have a fibration and $K_{X_r}+B_{r}^+=K_{X_r}+B_r+N$ where $N$ is vertical. Then we may replace $N$ by $N'$ and construct a desirable complement $K_{X_r}+B_r+N'$. At the end we need to prove that the boundedness of the complement implies the boundedness of the pairs. \item Now let $(X,0)$ be exceptional. Since $BAB_{1,3,\{0\}}$ holds by [KMMT] then assuming ACC in dim 3, there is a $\tau>0$ such that $BAB_{1-\tau,3,\{0\}}$ also holds. Blow up an exceptional/$X$ divisor $E_1$ with log discrepancy $a_{E_1}=a(E_1,X,0)\leq 1-\tau$ to get $Y\longrightarrow X$ and put $K_{Y}+B_{Y}={^*K_{X}}$. Let $t\geq 0$ be a number such that there is an extremal ray $R$ such that $(K_{Y}+B_{Y}+tE_1).R=0$ and $E_1.R>0$ ( and s.t. $K_{Y}+B_{Y}+tE_1$ Klt and antinef). Such $R$ exists otherwise there is a $t>0$ such that $K_{Y}+B_{Y}+tE_1$ is lc (and not Klt) and antiample. This is a contradiction with the fact that $(X,0)$ is exceptional. Now contract $R: Y\longrightarrow Y_1$ if it is of birational type (and perform the flip if it is a flipping). Again by increasing $t$ there will be an extremal ray $R_1$ on $Y_1$ such that $(K_{Y_1}+B_{Y_1}+tE_1).R_1=0$ and $E_1.R_1>0$ (preserving the nefness of $-(K_{Y_1}+B_{Y_1}+tE_1)$ ). If it is of birational type then contract it and so on. After finitely many steps we get a model $(V_1, B_{V_1}+t_1E_1)$ and a number $t_1>0$ with the following possible outcomes: \betagin{description} \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt, $\rho(V_1)=1$ and $K_{V_1}+B_{V_1}+t_1 E_1\equiv 0$. \item[$\diamond$] $ (V_1, B_{V_1}+t_1 E_1)$ is Klt and there is a fibre type extremal ray $R$ on $V_1$ such that $(K_{V_1}+B_{V_1}+t_1 E_1).R=0$ and $K_{V_1}+B_{V_1}+t_1 E_1$ is antinef. \end{description} If the second case occurs then we don't know $\rho(V_1)$ unlike the surface case where $\rho(V_1)=2$. \item In the proof of theorem \ref{weak-2dim} we introduced three types of hat. Here also we can similarly define hats but it is not clear yet how to proceed. \end{enumerate} \subsection{$\varepsilonsilon$-lc complements in dimension 3: Shokurov's approach} Here we explain Shokurov's approach to the problem discussed in 4.1. \betagin{enumerate} \item We know that the $BAB_{1,3,\{0\}}$ holds by [KMMT]. Let $a$ be the smallest positive real number with the following property: $BAB_{a',3,\{0\}}$ holds for any $a'>a$. The idea is to prove that $BAB_{a,3,\{0\}}$ holds and so assuming the ACC in dim 3 we can prove that $a=0$. Now assume that $BAB_{\varepsilonsilon',3,\{0\}}$ holds for any $\varepsilonsilon'>\varepsilonsilon$ where $1>\varepsilonsilon>0$. \item Prove $SC_{\varepsilonsilon,3}$ in the local case. Moreover prove that the local $\varepsilonsilon$-lc complement indexes can be chosen such that there is a $\tau >0$ s.t. if $1-\varepsilonsilon-\tau \leq b\leq 1-\varepsilonsilon$ then $\llcorner (n+1)b\lrcorner \geq n(1-\varepsilonsilon)$ for any local $\varepsilonsilon$-lc complement index $n$. \item Blow up all exceptional divisor $E$ such that $\varepsilonsilon\leq a(E,X,0)\leq \varepsilonsilon+\tau$ to get $f: Y\longrightarrow X$. Then $D_{\tau, \{1-\varepsilonsilon\}}:=\sum_{i}(1-\varepsilonsilon)B_{i}$ where $B_Y=\sum_{i}b_iB_i$ is the crepant pull back boundary. Then run the $D$-LMMP for $D:=-(K_Y+D_{\tau, \{1-\varepsilonsilon\}})$. At the end we get $g:Y\dasharrow X_1$ and $X_1\dasharrow S_1$ such that $-(K_{X_1}+D_{\tau, \{1-\varepsilonsilon\}})$ is nef and $\equiv 0/S_1$ and $-(K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}).R>0$ for any birational type extremal ray $R$. \item There are the following possibilities for the model $S_1$: \betagin{description} \item[$\diamond$] $\rho(S_1)=1$, $K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}$ is ample and $K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}$ is $\varepsilonsilon$-lc. \item[$\diamond$] $-(K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}).R=0$ for a fibre type extremal ray $R$ and the log divisor $K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}$ is $\varepsilonsilon$-lc. \item[$\diamond$] $-(K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}})$ is nef and big and $K_{S_1}+D_{\tau, \{1-\varepsilonsilon\}}$ is $\varepsilonsilon$-lc. \end{description} \item If the first case happens in the division above then delete the boundary, so $(S_1, 0)$ is $\varepsilonsilon+\tau$-lc and so the pair is bounded by the assumptions. \item \betagin{defn} Let $f: T\longrightarrow Z$ be a contraction and $K_T+B\sim_{\mathbb{R}}0/Z$. Put $D_Z:=\sum_{i}d_iD_i$ where $d_i$ is defined as follows: \[1-d_i=\sup \{c|K_T+B+c{f^*D_i} ~\mbox{is lc over the generic point of $D_i$}\}\] \end{defn} \item If the second case occurs in the division above then we need the following general conjecture, due to Shokurov [PSh1] and Kawamata [K3], which is useful in many situations: \betagin{conj}[Adjunction] Let $(T/Z,B)$ be a lc pair of dim $d$ such that $K_T+B\sim_{\mathbb{R}}0/Z$. Define the unique class $M_Z$ up to $\mathbb{R}$-linear equivalence as $K_T+B\sim_{\mathbb{R}}{^*(K_Z+D_Z+M_Z)}$. Then the followings hold: \betagin{description} \item[Adjunction] We can choose an $M_Z\geq 0$ in its $\mathbb{R}$-linear equivalence class such that $(Z,D_Z+M_Z)$ is lc. \item[Effective adjunction] Fix $\Gammamma_f$. Then there is a constant $I\in \mathbb{N}$ depending only on $d$ and $\Gammamma_f$ such that $|IM_Z|$ is a free linear system for an appropriate choice of $M_Z$. In addition the following holds \[I(K_T+B)\sim {^*I(K_Z+D_Z+M_Z)}.\] \end{description} \end{conj} It is expected that the effective adjunction implies the boundedness of $S_1$ under our assumptions. \item If the third case occurs in the division above then we need to repeat the pcocess with a bigger $\varepsilonsilon$. We have new coefficients in the boundary. Moreover we need to prove that this process stops after a bounded number of steps. \item If every time the third case happens then at the end we get a pair $(X_r,B_r)$ which is terminal in codim 2 and $-(K_{X_r}+B_r)$ is nef and big. After some more blow ups and blow downs we may prove that the index of $K_{X_r}+B_r$ is bounded. \end{enumerate} \section{{List of notation and terminology}} \betagin{tabular*}{12cm}{l l l} ${\mathbf{\mathbb{N}}}$ && \parbox[t]{10cm} {\emph{The set of natural numbers $\{1,2, \dots\}$.}} \\ \\ ${\mathbf{\mathbb{R}^+}}$ && \parbox[t]{10cm} {\emph{The set of positive real numbers. Similar notation for $\mathbb{Q}$.}} \\ \\ {\textbf{dim}} && \parbox[t]{10cm} {\emph{Dimension or dimensional.}} \\ \\ {\textbf{WLF}} && \parbox[t]{10cm}{\emph{Weak log Fano. $(X/Z,B)$ is WLF if $X/Z$ is a projective contraction and $-(K_{X}+B)$ is nef and big/$Z$ and $X$ is $\mathbb{Q}$-factorial.}} \\ \\ {\textbf{Pseudo-WLF}} && \parbox[t]{10cm}{\emph{Pseudo weak log Fano/$Z$ i.e. there is a $B$ where $(X/Z,B)$ is WLF.}}\\ \\ ${\mathbf{\Phi_{sm}}}$ && \parbox[t]{10cm} {\emph{The set of standard boundary coefficients i.e. $\{\frac{k-1}{k}\}_{k\in \mathbb{N}}\cup \{1\}$.}} \\ \\ ${\mathbf{\Gammamma_f}}$ && \parbox[t]{10cm} {\emph{A finite subset of $[0,1]$.}} \\ \\ ${\mathbf{\mld(\mu,X,B)}}$ && \parbox[t]{10cm}{\emph{The log minimal discrepancy of $(X,B)$ at the centre $\mu$.}} \\ \\ ${\mathbf{index_{P}(D)}}$ && \parbox[t]{10cm}{\emph{The smallest positive natural number $r$ s.t. $rD$ is a Cartier divisor at $P$.}} \\ \\ ${\mathbf{WC_{\deltalta,d,\Gammamma}}}$ &&\parbox[t]{10cm} {\emph{The weak conjecture on the boundedness of $\varepsilonsilon$-lc complements in dim $d$. See \ref{weak}}} \\ \\ $\mathbf{SC_{\deltalta,d}}$ &&\parbox[t]{10cm} {\emph{The strong conjecture on the boundedness of $\varepsilonsilon$-lc complements in dim $d$. See \ref{strong}}} \\ \\ $\mathbf{BAB_{\deltalta,d,\Gammamma}}$ && \parbox[t]{10cm}{\emph{The Alexeev-Borisovs conjecture on the boundedness of $d$-dim $\deltalta$-lc WLF varieties. See \ref{BAB}}} \\ \\ ${\mathbf{LT_{d}}}$ && \parbox[t]{10cm}{\emph{The log termination conjecture in dim $d$. See \ref{lt}}} \\ \\ ${\mathbf{ACC_{d,\Gammamma}}}$ &&\parbox[t]{10cm} {\emph{The ACC conjecture on mlds in dim $d$. See \ref{acc}}} \\ \\ \end{tabular*} \section{{References:}} \betagin{tabular*}{12cm}{l l l} {\textbf{[A1]}} & & \parbox[t]{11cm} {V. Alexeev; {\emph{Boundedness and $K\sp 2$ for log surfaces.}} Internat. J. Math. 5 (1994), no. 6, 779--810.}\\ \\ {\textbf{[A2]}} & & \parbox[t]{11cm} {V. Alexeev; {\emph{Two two dimensional terminations.}} Duke Math. J. 69 (1993), no. 3, 527--545. }\\ \\ {\textbf{[AM]}}& &\parbox[t]{11cm} {V. Alexeev, S. Mori; {\emph{Bounding singular surfaces of general type.}} Algebra, arithmetic and geometry with applications (West Lafayette,IN, 2000), 143--174, Springer, Berlin, 2004.}\\ \\ {\textbf{[Am]}} & & \parbox[t]{11cm} {F. Ambro; {\emph{On minimal log discrepancies.}} Math. Res. Lett. 6 (1999), no. 5-6, 573--580.}\\ \\ {\textbf{[B]}}& & \parbox[t]{11cm} {A.A. Borisov; {\emph{Boundedness of Fano threefolds with log-terminal singularities of given index.}} J. Math. Sci. Univ. Tokyo 8 (2001), no. 2, 329--342.}\\ \\ {\textbf{[C]}}& & \parbox[t]{11cm} {A. Corti; {\emph{Recent results in higher-dimensional birational geometry.}} Current topics in complex algebraic geometry (Berkeley, CA, 1992/93), 35--56, Math. Sci. Res. Inst. Publ., 28, Cambridge Univ. Press, Cambridge, 1995. }\\ \\ {\textbf{[C1]}}& & \parbox[t]{11cm} {A. Corti; {\emph{Singularities of linear systems and $3$-fold birational geometry.}} Explicit birational geometry of 3-folds, 259--312, London Math. Soc. Lecture Note Ser., 281, Cambridge Univ. Press, Cambridge, 2000. }\\ \\ {\textbf{[CR]}}& & \parbox[t]{11cm} {A. Corti, M. Reid; {\emph{Explicit birational geometry of 3-folds.}} Edited by Alessio Corti and Miles Reid. London Mathematical Society Lecture Note Series, 281. Cambridge University Press, Cambridge, 2000. }\\ \\ {\textbf{[CPR]}}& & \parbox[t]{11cm} {A. Corti, A. Pukhlikov, M. Reid; {\emph{Fano $3$-fold hypersurfaces.}} Explicit birational geometry of 3-folds, 175--258, London Math. Soc. Lecture Note Ser., 281, Cambridge Univ. Press, Cambridge, 2000.}\\ \end{tabular*} \betagin{tabular*}{12cm}{l l l} {\textbf{[H]}}& & \parbox[t]{11cm} {R. Hartshorne; {\emph{Algebraic geometry.}} Graduate Texts in Mathematics, No. 52. Springer-Verlag, 1977.}\\ \\ {\textbf{[K1]}}& &\parbox[t]{11cm} {Y. Kawamata; {\emph{Boundedness of $\bold Q$-Fano threefolds.}} Proceedings of the International Conference on Algebra, Part 3 (Novosibirsk, 1989), 439--445, Contemp. Math., 131, Part 3, Amer. Math. Soc., Providence, RI, 1992.}\\ \\ {\textbf{[K2]}}&& \parbox[t]{11cm} {Y. Kawamata; {\emph{Termination of log flips for algebraic $3$-folds.}} Internat. J. Math. 3 (1992), no. 5, 653--659.}\\ \\ {\textbf{[K3]}}&& \parbox[t]{11cm} {Y. Kawamata; {\emph{ Subadjunction of log canonical divisors for a subvariety of codimension $2$}}. Birational algebraic geometry (Baltimore, MD, 1996), 79--88, Contemp. Math., 207, Amer. Math. Soc., Providence, RI, 1997.}\\ \\ {\textbf{[K4]}}&& \parbox[t]{11cm} {Y. Kawamata; {\emph{ Subadjunction of log canonical divisors. II}}. Amer. J. Math. 120 (1998), no. 5, 893--899. }\\ \\ {\textbf{[K5]}}&& \parbox[t]{11cm} {Y. Kawamata; {\emph{ The number of the minimal models for a $3$-fold of general type is finite}}. Math. Ann. 276 (1987), no. 4, 595--598.}\\ \\ {\textbf{[K6]}}&& \parbox[t]{11cm} {Y. Kawamata; {\emph{ Termination of log flips in dimension 4}}. Preprint. It contained a proof which turned to be not correct.}\\ \\ {\textbf{[KMM] }}&& \parbox[t]{11cm} {Y. Kawamata, K. Matsuda, K. Matsuki; {\emph{Introduction to the minimal model problem.}} Algebraic geometry, Sendai, 1985, 283--360, Adv. Stud. Pure Math.,10, North-Holland, Amsterdam, 1987.}\\ \\ {\textbf{[Ko1] }}&& \parbox[t]{11cm} {J. Kollar; {\emph{Singularities of pairs.}} Algebraic geometry---Santa Cruz 1995, 221--287, Proc. Sympos. Pure Math., 62, Part 1, Amer. Math. Soc., Providence, RI, 1997.} \\ \\ \end{tabular*} \betagin{tabular*}{12cm}{l l l} {\textbf{[Ko2] }}&& \parbox[t]{11cm} {J. Kollar; {\emph{ Rational curves on algebraic varieties.}} A Series of Modern Surveys in Mathematics [Results in Mathematics and Related Areas. 3rd Series. A Series of Modern Surveys in Mathematics], 32. Springer-Verlag, Berlin, 1996.} \\ \\ {\textbf{[KD] }}&& \parbox[t]{11cm} {J. Demailly, J. Kollar; {\emph{Semi-continuity of complex singularity exponents and Kähler-Einstein metrics on Fano orbifolds.}} Ann. Sci. École Norm. Sup. (4) 34 (2001), no. 4, 525--556. } \\ \\ {\textbf{[KM]}}&& \parbox[t]{11cm} {J. Kollar, S. Mori; {\emph{Birational geometry of algebraic varieties.}} With the collaboration of C. H. Clemens and A. Corti. Translated from the 1998 Japanese original. Cambridge Tracts in Mathematics, 134. Cambridge University Press, Cambridge, 1998.} \\ \\ {\textbf{[KMMT]}}& & \parbox[t]{11cm} {J. Koll\'ar, Y. Miyaoka, S. Mori, H. Takagi; {\emph{Boundedness of canonical $\bold Q$-Fano 3-folds.}} Proc. Japan Acad. Ser. A Math. Sci. 76 (2000), no. 5, 73--77.}\\ \\ ${\mathbf{[K^+]}}$& & \parbox[t]{11cm} {J. Koll\'ar and others; {\emph{Flips and abundance for algebraic threefolds.}} Papers from the Second Summer Seminar on Algebraic Geometry held at the University of Utah, Salt Lake City, Utah, August 1991. Astérisque No. 211 (1992). Société Mathématique de France, Paris, 1992. pp. 1--258.}\\ \\ {\textbf{[Mc]}}& & \parbox[t]{11cm} {J. McKernan; {\emph{ Boundedness of log terminal Fano pairs of bounded index.}} ArXiv/math.AG/0205214}\\ \\ {\textbf{[MP]}}& & \parbox[t]{11cm} {J. McKernan, Yu. Prokhorov;{\emph{ Threefold Thresholds.}} ArXiv/math.AG/0205214}\\ \\ {\textbf{[Pr] }}&& \parbox[t]{11cm} { Yu. Prokhorov; {\emph{ Lectures on complements on log surfaces.}} MSJ Memoirs, 10. Mathematical Society of Japan, Tokyo, 2001.}\\ \\ {\textbf{[Pr1] }}&& \parbox[t]{11cm} { Yu. Prokhorov; {\emph{Boundedness of exceptional quotient singularities. }}(Russian) Mat. Zametki 68 (2000), no. 5, 786--789; translation in Math. Notes 68 (2000), no. 5-6, 664--667}\\ \\ \end{tabular*} \betagin{tabular*}{12cm}{l l l} {\textbf{[Pr2] }}&& \parbox[t]{11cm} { Yu. Prokhorov; {\emph{Boundedness of nonbirational extremal contractions.}} Internat. J. Math. 11 (2000), no. 3, 393--411. }\\ \\ {\textbf{[PrM] }}&& \parbox[t]{11cm} {D. Markushevich, Yu. Prokhorov; {\emph{Exceptional quotient singularities.}} Amer. J. Math. 121 (1999), no. 6, 1179--1189. }\\ \\ {\textbf{[PrI] }}&& \parbox[t]{11cm} {V.A. Iskovskikh, Yu. Prokhorov; {\emph{ Fano varieties. Algebraic geometry, V, 1--247, Encyclopaedia Math. Sci., 47, Springer, Berlin, 1999.}} }\\ \\ {\textbf{[PSh]}}&& \parbox[t]{11cm} {Yu. Prokhorov; V.V. Shokurov; {\emph{The first fundamental theorem on complements: from global to local.}} (Russian) Izv. Ross. Akad. Nauk Ser. Mat. 65 (2001), no. 6, 99--128; translation in Izv. Math. 65 (2001), no. 6, 1169--1196.}\\ \\ {\textbf{[PSh1]}}&& \parbox[t]{11cm} {Yu. Prokhorov; V.V. Shokurov; {\emph{Toward the second main theorem on complements: from local to global.}} Preprint 2001.}\\ \\ {\textbf{[R]}}&& \parbox[t]{11cm} {M. Reid; {\emph{Chapters on algebraic surfaces.}} Complex algebraic geometry (Park City, UT, 1993), 3--159, IAS/Park City Math. Ser., 3, Amer. Math. Soc., Providence, RI, 1997.}\\ \\ {\textbf{[R1]}}&& \parbox[t]{11cm} {M. Reid; {\emph{Update on 3-folds.}} Proceedings of the International Congress of Mathematicians, Vol. II (Beijing, 2002), 513--524, Higher Ed. Press, Beijing, 2002. }\\ \\ {\textbf{[R2]}}&& \parbox[t]{11cm} {M. Reid; {\emph{Twenty-five years of $3$-folds---an old person's view.}} Explicit birational geometry of 3-folds, 313--343, London Math. Soc. Lecture Note Ser., 281, Cambridge Univ. Press, Cambridge, 2000. }\\ \\ {\textbf{[R3]}}&& \parbox[t]{11cm} {M. Reid; {\emph{Young person's guide to canonical singularities.}} Algebraic geometry, Bowdoin, 1985 (Brunswick, Maine, 1985), 345--414, Proc. Sympos. Pure Math., 46, Part 1, Amer. Math. Soc., Providence, RI, 1987. }\\ \\ {\textbf{[R4]}}&& \parbox[t]{11cm} {M. Reid; {\emph{The moduli space of $3$-folds with $K=0$ may nevertheless be irreducible.}} Math. Ann. 278 (1987), no. 1-4, 329--334. }\\ \\ \end{tabular*} \betagin{tabular*}{12cm}{l l l} {\textbf{[Sh1]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{Three-dimensional log flips.}} With an appendix in English by Yujiro Kawamata. Russian Acad. Sci. Izv. Math. 40 (1993), no. 1, 95--202.}\\ \\ {\textbf{[Sh2]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{Complements on surfaces.}} Algebraic geometry, 10. J. Math. Sci. (New York) 102 (2000), no. 2, 3876--3932.}\\ \\ {\textbf{[Sh3]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{Prelimiting flips.}} Tr. Mat. Inst. Steklova 240 (2003), Biratsion. Geom. Linein. Sist. Konechno Porozhdennye Algebry, 82--219; translation in Proc. Steklov Inst. Math. 2003, no. 1 (240), 75--213.} \\ \\ {\textbf{[Sh4]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{Letters of a birationalist V: Mld's and termination of log flips}}.} \\ \\ {\textbf{[Sh5]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{$3$-fold log models.}} Algebraic geometry, 4. J. Math. Sci. 81 (1996), no. 3, 2667--2699.}\\ \\ {\textbf{[Sh6]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{Letters of a bi-rationalist. IV. Geometry of log flips.}} Algebraic geometry, 313--328, de Gruyter, Berlin, 2002.}\\ \\ {\textbf{[Sh7]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{A nonvanishing theorem.}} (Russian) Izv. Akad. Nauk SSSR Ser. Mat. 49 (1985), no. 3, 635--651.}\\ \\ {\textbf{[Sh8]}}&& \parbox[t]{11cm} {V.V. Shokurov; {\emph{ACC in codim 2}}. Preprint.}\\ \\ \end{tabular*} \end{document}
\begin{document} \title{Deterministic entanglement of two trapped ions\cite{gov}} \author{ Q.\ A.\ Turchette,\cite{email} C.\ S.\ Wood, B.\ E.\ King, C.\ J.\ Myatt, D.\ Leibfried,\cite{didi} W.\ M.\ Itano, C.\ Monroe, and D.\ J.\ Wineland } \address{ Time and Frequency Division, National Institute of Standards and Technology, Boulder, CO 80303 } \date{\today} \maketitle \begin{abstract} We have prepared the internal states of two trapped ions in both the Bell-like singlet and triplet entangled states. In contrast to all other experiments with entangled states of either massive particles or photons, we do this in a deterministic fashion, producing entangled states {\it on demand} without selection. The deterministic production of entangled states is a crucial prerequisite for large-scale quantum computation. \end{abstract} \pacs{03.65.-w, 03.67.Lx, 3.65.Bz, 42.50.-p} Since the seminal discussions of Einstein, Podolsky, and Rosen, two-particle quantum entanglement has been used to magnify and confirm the peculiarities of quantum mechanics \cite{EPRBohmBell87}. More recently, quantum entanglement has been shown to be not purely of pedagogical interest, but also relevant to computation \cite{Shor97Grover97a}, information transfer \cite{Barenco95}, cryptography \cite{Ekert91} and spectroscopy \cite{Bollinger96,Wineland98}. Quantum computation (QC) exploits the inherent parallelism of quantum superposition and entanglement to perform certain tasks more efficiently than can be achieved classically \cite{Ekert96bGrover98}. Relatively few physical systems are able to approach the severe requirements of QC: controllable coherent interaction between the quantum information carriers (quantum bits or qubits), isolation from the environment, and high-efficiency interrogation of individual qubits. Cirac and Zoller have proposed a {\it scalable} scheme utilizing trapped ions for QC \cite{Cirac95}. In it, the qubits are two internal states of an ion; entanglement and computation are achieved by quantum logic operations on pairs of ions involving shared quantized motion. Previously, quantum logic operations were demonstrated between a single ion's motion and its spin \cite{Monroe95}; the requirements of QC have been explored experimentally in related cavity QED systems \cite{Turchette95Maitre97}. In this Letter, we use conditional quantum logic transformations to entangle and manipulate the qubits of two trapped ions. Previous experiments have studied entangled states of photons \cite{Freedman72Fry76Aspect82Aspect82b,Shih88Ou88Ou92Kwiat95Tittel97Bouwmeester97} and of massive particles\cite{Lamehi76,Hagley97,Laflamme97}. These experiments rely in some way on {\it random processes}, either in creation of the entanglement, as in photon cascades \cite{Freedman72Fry76Aspect82Aspect82b}, photon down-conversion \cite{Shih88Ou88Ou92Kwiat95Tittel97Bouwmeester97} and proton scattering \cite{Lamehi76}, or in the random arrival times of atoms in a cavity \cite{Hagley97}. Recent results in NMR of bulk samples have shown entanglement of particle spins \cite{Laflamme97,Chuang98Chuang98aCorey97Corey98} but because pseudo-pure states are selected through averaging over a thermal distribution, the signal is exponentially degraded as the number of qubits is increased. All the above processes are {\it selectable} but are not {\it deterministic} generators of entanglement. By deterministic, we mean that a known and controllable quantum state of (all of) a given set of particles is generated {\it at a specified time} \cite{Law97}. Deterministic entanglement coupled with the ability to store entangled states for future use is crucial for the realization of large-scale quantum computation. Ion-trap QC has no fundamental scaling limits; moreover, even the simple two-ion manipulations described here can, in principle, be incorporated into large-scale computing, either by coupling two-ion subsystems via cavities \cite{Cirac97}, or by using accumulators \cite{Wineland98}. In this Letter, we describe the deterministic generation of a state which under ideal conditions is given by \begin{equation} \vert \psi_e(\phi) \rangle = \left[ \frac{3}{5} \du - e^{i\phi} \frac{4}{5} \ud \right] \label{eq:estate} \end{equation} where $\down$ and $\up$ refer to internal electronic states of each ion (in the usual spin-$1/2$ analogy) and $\phi$ is a controllable phase-factor. For $\phi = 0$ or $\pi$, $\ket{\psi_e(\phi)}$ is a good approximation to the usual Bell singlet ($-$) or triplet ($+$) state $\vert \psi_B^\mp \rangle = [\du \mp \ud]/\sqrt{2}$ since $\ns{\psi_B^-}{\psi_e(0)} = \ns{\psi_B^+}{\psi_e(\pi)} = 0.98$ \cite{Hill97}. The fidelity of our experimentally generated state described by density matrix $\rho^\pm$ is $\langle\psi_e(\pi,0)\vert\rho^\pm\vert\psi_e(\pi,0)\rangle\approx \langle\psi_B^\pm\vert\rho^\pm\vert\psi_B^\pm\rangle \approx 0.70$, so that for all practical purposes, we can consider $\rho^\pm$ to be an approximation to the Bell states. We describe a novel means of differentially addressing each ion to generate the entanglement and a state-sensitive detection process to characterize it. The apparatus is described in Ref. \cite{King98}. We confine $^9$Be$^+$ ions in an elliptical rf Paul trap (major axis $\approx 525 \mu$m, aspect ratio 3:2) with a potential applied between ring and end-caps of $ V_0 \cos\Omega_Tt + U_0$ with $\Omega_T/2\pi \approx 238$ MHz, $V_0 \approx 520$ V. The trap is typically operated over the range $12$ V $< U_0 < 17$ V leading to secular frequencies of $(\omega_x,\omega_y,\omega_z)/2\pi = (7.3,16,12.6)$ to $(8.2,17.2,10.1)$ MHz. The ion-ion spacing (along $\hat x$) is $l \approx 2 \mu$m. The relevant level structure of $^9$Be$^+$ is shown in Fig.\ \ref{fig:exp}a. The qubit states are the $2s\;^2S_{1/2}\;\vert F = 2, m_F = 2 \rangle \equiv \down$ and $2s\;^2S_{1/2}\;\vert F = 1, m_F = 1 \rangle \equiv \up$ states. Laser beams D1 and D2 provide Doppler precooling and beam D3 prevents optical pumping to the $\vert F=2,m_F = 1 \rangle$ state. The cycling $\down \rightarrow 2p\;^2P_{3/2}\;\vert F = 3, m_F = 3 \rangle$ transition driven by the $\sigma^+$-polarized D2 laser beam allows us to differentiate $\up$ from $\down$ in a single ion with $\approx$90\% detection efficiency by observing the fluorescence rate. Transitions $\down \ket{n} \leftrightarrow \up \ket{n'}$ (where $n,n'$ are vibrational quantum numbers) are driven by stimulated Raman processes from pairs of laser beams in one of two geometries. Two types of transitions are driven: the ``carrier'' with $n'=n$, and the red motional sideband (rsb) with $n'=n-1$ \cite{Meekhof96}. With reference to Fig. \ref{fig:exp}a, the pair of Raman beams R1 $\perp$ R2 has difference-wavevector $\vec{\delta k} \parallel \hat x$ and is used for sideband cooling (to prepare $\dd\ket{0}$), driving the $\hat x$-rsb, and to drive the ``$\hat x$-carrier''. Beam pair R2 $\parallel$ R3 has $\vec{\delta k} \approx 0$ and is not sensitive to motion; this pair drives the ``co-propagating carrier'' transition. Two trapped ions aligned along $\hat x$ have two modes of motion along $\hat x$: the center-of-mass (COM) mode at frequency $\omega_x$ and the stretch mode, at frequency $\omega_{\rm str} = \sqrt{3}\omega_x$ in which the two ions move in opposite directions. We sideband-cool both of these modes to near the ground state, but use the stretch mode on transitions which involve the motion since it is colder (99\% probability of $\ket{n=0}$) than the COM and heats at a significantly reduced rate\cite{King98}. The relevant two-ion qubit level structure dressed by the quantized harmonic stretch motion is shown in Fig.\ \ref{fig:exp}b (we leave out the COM for clarity). In general, all four Rabi rates $\Omega_{i\pm}$, $i \in \{1,2\}$ connecting the levels are different and depend on $n$. Fig.\ \ref{fig:exp}b shows the states coupled on the rsb with Rabi frequencies (in the Lamb-Dicke limit) \begin{equation} \Omega_{i+} = \sqrt{n}\;\eta'\Omega_{i} ; \;\; \Omega_{i-} = \sqrt{n+1}\;\eta'\Omega_{i} \label{eq:rsbf} \end{equation} where $\eta' = \eta/\sqrt{2\sqrt{3}}$ is the stretch-mode two-ion Lamb-Dicke parameter (with single-ion $\eta \approx 0.23$ for $\omega_x/2\pi \approx 8$ MHz) and $\Omega_i$ is the carrier Rabi frequency of ion $i$ \cite{Monroe95}. On the carrier, the ions are not coupled and the time evolution is simply that of independent coherent Rabi oscillations with Rabi frequencies $\Omega_i$. On the co-propagating carrier, $\Omega_1 = \Omega_2 \equiv \Omega_c$. In the Cirac-Zoller scheme, each of an array of tightly focused laser beams illuminates one and only one ion for individual state preparation. Here we pursue an alternative technique, based not on $\Omega_i \rightarrow 0$ for all but one ion, but simply on $\Omega_1 \neq \Omega_2$. Differential Rabi frequencies can be used conveniently for individual addressing on the $\hat x$-carrier: for example, if $\Omega_1 = 2\Omega_2$, then ion 1 can be driven for a time $\Omega_1t = \pi$ ($2\pi$-pulse, no spin-flip) while ion 2 is driven for a $\pi$-pulse resulting in a spin-flip. Our technique for differential addressing is to control the ion micro-motion. To a good approximation, we can write \begin{equation} \Omega_i = \Omega_c J_0 (\vert\vec{\delta k}\vert \xi_i) \label{eq:micro} \end{equation} where $J_0$ is the zero-order Bessel function and $\xi_i$ is the amplitude of micro-motion (along $\hat x$) associated with ion $i$, proportional to the ion's mean displacement from trap center. The micro-motion is controlled by applying a static electric field to push the ions \cite{Jefferts95} along $\hat x$, moving ion 2 (ion 1) away from (toward) the rf null position, inducing a smaller (larger) Rabi frequency. The range of Rabi frequencies explored experimentally is shown in Fig. \ref{fig:Rabis}a. We determine $\Omega_{1,2}$ by observing the Rabi oscillations of the ions driven on the $\hat x$-carrier. An example with $\Omega_1 = 2\Omega_2$ is shown in Fig.\ \ref{fig:Rabis}b. We detect a fluorescence signal $S(t) = 2P_{\downarrow\downarrow} + (1 + \alpha) P_{\downarrow\uparrow} + (1 - \alpha) P_{\uparrow\downarrow}$ where $P_{kl} = \ns{\psi(t)}{kl}$, $k,l \in \{\uparrow, \downarrow\}$, $\psi (t)$ is the state at time $t$ and $\vert\alpha\vert \ll 1$ describes a small differential detection efficiency due to the induced differential micro-motion. Driving on the $\hat x$-carrier for time $t$ starting from $\dd\ket{0}$, $S(t)$ can be described by \begin{eqnarray} S(t) = 1 &+& (1/2)(1+\alpha) \cos (2\Omega_1t) e^{-\gamma t} \nonumber \\ &+& (1/2)(1-\alpha) \cos (2\Omega_2t) e^{-(\Omega_2/\Omega_1)\gamma t} \label{eq:S} \end{eqnarray} where $\gamma$ allows for decay of the signal \cite{Meekhof96}. The local maximum at $t = 2.4$ $\mu$s on Fig.\ \ref{fig:Rabis}b is the $2\pi:\pi$ point at which ion 1 has undergone a $2\pi$-pulse while ion 2 has undergone a $\pi$-pulse resulting in $\dd\ket{0} \rightarrow \du\ket{0}$. Driving a $\pi:\pi$ pulse on the co-propagating carrier transforms $\du\ket{0}$ to $\ud\ket{0}$ and $\dd\ket{0}$ to $\uu\ket{0}$, completing generation of all four internal basis states of Fig.\ \ref{fig:exp}b. Now consider the levels coupled by the first rsb shown in Fig.\ \ref{fig:exp}b. If we start in the state $\ket{\psi(0)} = \du \ket{0}$ and drive on the (stretch mode) rsb for time $t$, \begin{eqnarray} \ket{\psi(t)} &=& -\frac{i\Omega_{2-}}{G} \sin(Gt) \dd \ket{1} \nonumber \\ &+&\left[ \frac{\Omega_{2-}^2}{G^2}\left( \cos Gt - 1 \right) + 1\right] \du \ket{0} \nonumber \\ &+&e^{i\phi}\left[ \frac{\Omega_{2-} \Omega_{1-}}{G^2} \left(\cos Gt - 1 \right)\right] \ud \ket{0} \label{eq:state} \end{eqnarray} where $G = (\Omega_{2-}^2 + \Omega_{1-}^2)^{1/2}$ with $\Omega_{i-}$ from Eq. \ref{eq:rsbf} with $n = 0$. The phase factor $\phi = \vec{\delta k} \cdot \langle \vec x_1 - \vec x_2 \rangle$ depends on the spatial separation of the ions and arises because each ion sees a different phase in the $\hat x$ travelling-wave Raman field. The ion-ion spacing varies by $\delta l\approx 100$ nm over the range of $U_0$ cited above ($\phi = 0$ for $U_0 = 16.3$ V and $\phi = \pi$ for $U_0 = 12.6$ V, with $d\phi/dU_0$ in good agreement with theory). For $Gt = \pi$, the final state is $\psi_e(\phi)$ from Eq.\ \ref{eq:estate}. Note that $\Omega_1 = (\sqrt{2}+1)\Omega_2$ would generate the Bell states (but we would not have access to the initial state $\du$, since $\Omega_i$ are fixed throughout an experiment). We now describe our two-ion state-detection procedure. We first prepare a two-ion basis state $\ket{kl}$, apply the detection beam D2 for a time $\tau_d \approx 500 \mu$s and record the number of photons $m$ detected in time $\tau_d$. We repeat this sequence for $N \approx 10^4$ trials and build a histogram of the photons collected (Fig.\ \ref{fig:refs}). To determine the populations of an unknown state, we fit its histogram to a weighted sum of the four reference histograms with a simple linear least-squares procedure. We observe that the $\uu$ count distribution (Fig.\ \ref{fig:refs}a) is not a single peak at $m = 0$, corresponding to the expected zero scattered photons. Counts at $m = 1$ and $m = 2$ are due to a background of 200-400 photons per second. The counts in bins $m > 2$ (which account for $\sim$ 10\% of the area) are due to a depumping process in which D2 off-resonantly drives an ion out of $\up$ ultimately trapping it in the cycling transition. We approximately double the depumping time by applying two additional Raman ``shelving'' pulses ($\up \rightarrow$ $^2S_{1/2}\ket{F=2,m_F=0}\rightarrow$$^2S_{1/2} \vert F = 1, m_F = -1\rangle$; $\down$ unaffected) after every state preparation. Nevertheless, this results in an average difference of only 10-15 detected photons between an initial $\down$ and $\up$ state, as shown in Fig.\ \ref{fig:refs} \cite{dp}. The distributions associated with $\du$ ,$\ud$ and $\dd$ are non-Poissonian due to detection laser intensity and frequency fluctuations, the depumping described above and $\down \rightarrow \up$ transitions from imperfect polarization of D2. One may ask: what is our overall two-ion state-detection efficiency on a {\it per experiment} basis? To address this issue, we distinguish three cases: 1) $\uu$, 2) $\ud$ or $\du$, 3) $\dd$. Now define case 1 to be true when $m \leq 3$, case 2 when $3 < m < 17$, and case 3 when $m \geq 17$. This gives an optimal 80\% probability that the inferred case (1, 2, or 3) from a measured $m$ in a single experiment is the actual case. We have generated states described by density operators $\rho^\pm$ in which the populations (diagonals of $\rho^\pm$) are measured to be $P_{\downarrow\uparrow} \approx P_{\uparrow\downarrow} \approx 0.4, \; P_{\downarrow\downarrow} \approx 0.15, \; P_{\uparrow\uparrow} \approx 0.05$. To establish coherence, consider first the Bell singlet state $\psi_B^-$ which has $P_{\downarrow\uparrow} = P_{\uparrow\downarrow} = 1/2$. Since $\psi_B^-$ has total spin $J = 0$, any $J$-preserving transformation, such as an an equal rotation on both spins, must leave this state unchanged, whereas such a rotation on a mixed state with populations $P_{\downarrow\uparrow} = P_{\uparrow\downarrow} = 1/2$ and no coherences will evolve quite differently. We rotate both spins trough an angle $\theta$ by driving on the co-propagating carrier for a time $t$ such that $\theta = \Omega_c t$. Fig. \ref{fig:evolve}a shows the time evolution of an experimental state which approximates the singlet Bell state. Contrast this with the approximate ``triplet'' state shown in Fig. \ref{fig:evolve}b. More quantitatively, the data show that $\rho^\pm$ is decomposed as $\rho^\pm = C\vert\psi_B^\pm\rangle\langle\psi_B^\pm\vert + (1-C)\rho_m$ in which $\rho_m$ has no coherences which contribute to the measured signal (off diagonal elements connecting $\ud$ with $\du$ and $\uu$ with $\dd$), and $C=0.6$ is the contrast of the curves in Fig.\ \ref{fig:evolve}. This leads to a fidelity of $\langle\psi_B^\pm\vert\rho^\pm\vert\psi_B^\pm\rangle = (P_{\downarrow\uparrow} + P_{\uparrow\downarrow} + C)/2 \approx 0.7$. The non-unit fidelity of our states arises from several technical factors. The first is Raman laser intensity noise which gives rise to a noise-modulated Rabi-frequency. The second is a second-order (in $\eta$) effect on $\Omega_i$ due to the motional state of the COM mode \cite{Wineland98}, which is not in the ground state at all times \cite{King98}. These effects can be seen in Fig.\ \ref{fig:Rabis}b as a decay envelope on the data (modeled by $\gamma$ of Eq.\ \ref{eq:S}) and cause a 10\% infidelity in initial state preparation \cite{infidel}. This initial imperfection in state preparation, the contribution of the above factors on the rsb pulse and a first order effect due to imperfect ground-state preparation of the stretch mode are responsible for the rest of the infidelity. The micro-motion-induced selection of Rabi frequencies as here demonstrated is sufficient to implement universal quantum logic with individual addressing \cite{Cirac95}. To isolate ion 1, we arrange the trap strength and static electric field so that ion 1 is at the rf null position ($\Omega_1 = \Omega_c J_0(0) = \Omega_c$) and ion 2 is at a position such that $\Omega_2 = \Omega_c J_0(\vert\vec{\delta k}\vert\xi_2) = 0$. To isolate ion 2, we drive on the first motional sideband of the rf-micro-motion by adding $\Omega_T/2\pi = \pm 238$ MHz to the difference frequency of the Raman beams resulting in $\Omega_1 = \Omega_c J_1(0) = 0$ and $\Omega_2 = \Omega_c J_1(\vert\vec{\delta k}\vert\xi_2)= 0.519 \Omega_c$. This provides a means of individual addressing, with which the Cirac-Zoller scheme \cite{Cirac95} can be implemented for two ions. In conclusion, we have taken a first step in the quantum preparation and manipulation of entangled states of multiple trapped ions--- a step which is crucial for quantum computations with trapped ions. We have {\it engineered} entangled states deterministically, that is, there is no inherent probabilistic nature to our quantum entangling source. We have developed a two-ion state-sensitive detection technique which allows us to measure the diagonal elements of the density matrix $\rho^\pm$ of our states, and have performed transformations which directly measure the relevant off-diagonal coherences of $\rho^\pm$. We acknowledge support from the U.\ S.\ National Security Agency, Office of Naval Research and Army Research Office. We thank Eric Cornell, Tom Heavnor, David Kielpinski, and Matt Young for critical readings of the manuscript. \begin{references} \bibitem[*]{gov} Work of the US government. Not subject to US copyright. \bibitem[\dagger]{email} [email protected]. \bibitem[\ddagger]{didi} Present address: Institut f\"{u}r Experimentalphysik, Univ. Innsbruck, Austria. \bibitem{EPRBohmBell87} A. Einstein, B. Podolsky, and N. Rosen, Phys. Rev. {\bf 47}, 777 (1935). J.~S. Bell, {\em Speakable and unspeakable in quantum mechanics} (Cambridge University Press, Cambridge, England, 1987). \bibitem{Shor97Grover97a} P.~W. Shor, SIAM J. Comp. {\bf 26}, 1484 (1997). L.~K. Grover, Phys. Rev. Lett. {\bf 79}, 325 (1997). \bibitem{Barenco95} A. Barenco and A. Ekert, J. Mod. Opt. {\bf 42}, 1253 (1995). C.~H. Bennett, Phys. Today {\bf 48}, 24 (Oct., 1995); \bibitem{Ekert91} A. Ekert, Phys. Rev. Lett. {\bf 67}, 661 (1991). C.~H. Bennett, Sci. Am. {\bf 267}, 50 (Oct., 1992). \bibitem{Bollinger96} J.~J. Bollinger {\it et~al.}, Phys. Rev. {\bf A54}, R4649 (1996); S.~F. Huelga {\it et~al.}, Phys. Rev. Lett. {\bf 79}, 3865 (1997). \bibitem{Wineland98} D.~W. Wineland {\it et~al.}, to appear in NIST Journal of Research; e-print at quant-ph/9803023 (unpublished). \bibitem{Ekert96bGrover98} A. Ekert and R. Jozsa, Rev. Mod. Phys. {\bf 68}, 733 (1996). A. Steane, Rep. Prog. Phys. {\bf 61}, 117 (1998). \bibitem{Cirac95} J.~I. Cirac and P. Zoller, Phys. Rev. Lett. {\bf 74}, 4091 (1995). \bibitem{Monroe95} C. Monroe {\it et~al.}, Phys. Rev. Lett. {\bf 75}, 4714 (1995). \bibitem{Turchette95Maitre97} Q.~A. Turchette {\it et~al.}, Phys. Rev. Lett. {\bf 75}, 4710 (1995), X. Maitre {\it et~al.}, Phys. Rev. Lett. {\bf 79}, 769 (1997). \bibitem{Freedman72Fry76Aspect82Aspect82b} S.~J. Freedman and J.~F. Clauser, Phys. Rev. Lett. {\bf 28}, 938 (1972). E.~S. Fry and R.~C. Thompson, Phys. Rev. Lett. {\bf 37}, 465 (1976). A. Aspect, P. Grangier, and G. Roger, Phys. Rev. Lett. {\bf 49}, 91 (1982). \bibitem{Shih88Ou88Ou92Kwiat95Tittel97Bouwmeester97} Y.~H. Shih and C.~O. Alley, Phys. Rev. Lett. {\bf 68}, 3663 (1992). Z.~Y. Ou and L. Mandel, Phys. Rev. Lett. {\bf 61}, 50 (1988). Z.~Y. Ou {\it et~al.}, Phys. Rev. Lett. {\bf 68}, 3663 (1992). P. Kwiat {\it et~al.}, Phys. Rev. Lett. {\bf 75}, 4337 (1995). W. Tittel {\it et~al.}, Europhys. Lett. {\bf 40}, 595 (1997). D. Bouwmeester {\it et~al.}, Nature {\bf 390}, 575 (1997). \bibitem{Lamehi76} M. Lamehi-Rachti and W. Mittig, Phys. Rev. D {\bf 14}, 2543 (1976). \bibitem{Hagley97} E. Hagley {\it et~al.}, Phys. Rev. Lett. {\bf 79}, 1 (1997). \bibitem{Laflamme97} R. Laflamme {\it et~al.}, e-print at quant-ph/9709025 (unpublished). \bibitem{Chuang98Chuang98aCorey97Corey98} I.~L. Chuang, N. Gershenfeld, and M. Kubinec, Phys. Rev. Lett. {\bf 80}, 3408 (1998). I.~L. Chuang {\it et~al.}, e-print at quant-ph/9801037 (unpublished). D.~G. Corey, M.~D. Price, and T.~F. Havel, e-print at quant-ph/9709001 (unpublished). D.~G. Corey {\it et~al.}, e-print at quant-ph/9802018 (unpublished). \bibitem{Law97} C.~K. Law and H.~J. Kimble, J. Mod. Opt. {\bf 44}, 2067 (1997). \bibitem{Cirac97} J.~I. Cirac {\it et~al.}, Phys. Rev. Lett. {\bf 78}, 3221 (1997). \bibitem{Hill97} Note that {$E(\psi_e(\phi)) = 0.94$} where $E$ is the {\it entanglement} defined in C.~H. Bennett {\it et~al.} , Phys. Rev. A {\bf 53}, 2046 (1996). \bibitem{King98} B.~E. King {\it et~al.}, submitted; e-print at quant-ph/9803023. \bibitem{Meekhof96} D.~M. Meekhof {\it et~al.}, Phys. Rev. Lett. {\bf 76}, 1796 (1996); {\bf 77}, 2346 (1996) \bibitem{Jefferts95} S.~R. Jefferts {\it et~al.}, Phys. Rev. {\bf A51}, 3112 (1995). \bibitem{dp} This off-resonant de-pumping can be suppressed by increasing the energy separation between $\down$ and $\up$ with a strong magnetic field or by using a heavier ion with larger hyperfine splitting. \bibitem{infidel} The reference histograms for $\du$ and $\ud$ (Figs.\ \ref{fig:refs}b,c) have had this 10\% contamination from $\uu$ and $\dd$ removed, in order to assure that the references to which we fit are accurate representations of the underlying states. \end{references} \begin{figure} \caption{ (a) Relevant $^9$Be$^+$ energy levels. All optical transitions are near $\lambda = 313$ nm, $\Delta/2\pi = 40$ GHz and $\omega_0/2\pi = 1.25$ GHz. R1-3: Raman beams. D1-3: Doppler cooling, optical pumping and detection beams. (b) The internal basis qubit states of two spins shown with the vibrational levels connected on the red motional sideband. The labeled atomic states are as in (a); $n$ is the motional- state quantum number (note that the motional mode frequency $\omega_{\rm str} \label{fig:exp} \end{figure} \begin{figure} \caption{(a) Normalized $\hat x$-carrier Rabi frequencies $\Omega_i/\Omega_c$ of each of two ions as a function of center-of-mass displacement from the rf-null position $d$. The solid curves are Eq.\ \protect\ref{eq:micro} \label{fig:Rabis} \end{figure} \begin{figure} \caption{Photon-number distributions for the four basis qubit states. Plotted in each graph is the probability of occurrence $P(m)$ of $m$ photons detected in 500 $\mu$s {\it vs.} \label{fig:refs} \end{figure} \begin{figure} \caption{Probabilities $P_{\downarrow\uparrow} \label{fig:evolve} \end{figure} \end{document}
\begin{document} \title{Fault-tolerant quantum repeaters with minimal physical resources, and implementations based on single photon emitters} {\alpha} uthor{L. Childress} {\alpha} ffiliation{Department of Physics, Harvard University, Cambridge,Massachusetts, 02138} {\alpha} uthor{J. M. Taylor} {\alpha} ffiliation{Department of Physics, Harvard University, Cambridge,Massachusetts, 02138} {\alpha} uthor{A. S. S\o rensen} {\alpha} ffiliation{Department of Physics, Harvard University, Cambridge,Massachusetts, 02138} {\alpha} ffiliation{ITAMP, Harvard-Smithsonian Center for Astrophysics, Cambridge,Massachusetts, 02138} {\alpha} ffiliation{The Niels Bohr Institute, University of Copenhagen, DK-2100 Copenhagen \O, Denmark} {\alpha} uthor{M. D. Lukin} {\alpha} ffiliation{Department of Physics, Harvard University, Cambridge,Massachusetts, 02138} {\alpha} ffiliation{ITAMP, Harvard-Smithsonian Center for Astrophysics, Cambridge,Massachusetts, 02138} \date{\today}\begin{abstract} We analyze a novel method that uses fixed, minimal physical resources to achieve generation and nested purification of quantum entanglement for quantum communication over arbitrarily long distances, and discuss its implementation using realistic photon emitters and photonic channels. In this method, we use single photon emitters with two internal degrees of freedom formed by an electron spin and a nuclear spin to build intermediate nodes in a quantum channel. State-selective fluorescence is used for probabilistic entanglement generation between electron spins in adjacent nodes. We analyze in detail several approaches which are applicable to realistic, homogeneously broadened single photon emitters. Furthermore, the coupled electron and nuclear spins can be used to efficiently implement entanglement swapping and purification. We show that these techniques can be combined to generate high-fidelity entanglement over arbitrarily long distances. We present a specific protocol that functions in polynomial time and tolerates percent-level errors in entanglement fidelity and local operations. The scheme has the lowest requirements on physical resources of any current scheme for fully fault-tolerant quantum repeaters. \end{abstract}\pacs{03.67.Hk, 03.67.Mn, 78.67.Hc}\maketitle \section{Introduction} Quantum communication holds promise for transmitting secure messages via quantum cryptography, and for distributing quantum information~\cite{gisin02}. However, exponential attenuation in optical fibers fundamentally limits the range of direct quantum communication techniques~\cite{brassard00}, and extending them to long distances remains a conceptual and technological challenge. In principle, the limit set by photon losses can be overcome by introducing intermediate quantum nodes and utilizing a so-called quantum repeater protocol~\cite{briegel98}. Such a repeater creates quantum entanglement over long distances by building a backbone of entangled pairs between closely-spaced quantum nodes. Performing an entanglement swap at each intermediate node ~\cite{zukowski93} leaves the outer two nodes entangled, and this long-distance entanglement can be used to teleport quantum information ~\cite{bennett93,bouwmeester97} or transmit secret messages via quantum key distribution~\cite{ekert91}. Even though quantum operations are subject to errors, by incorporating entanglement purification ~\cite{Bennett96,Deutsch96} at each step, one can extend entanglement generation to arbitrary distances without loss of fidelity in a time that scales polynomially with distance ~\cite{briegel98}. This should be compared to direct communication, which scales exponentially, making it impractical for long distances. Several approaches for physical implementation of a quantum repeater protocol have been proposed. Early work was based on systems of several atoms trapped in high finesse optical cavities \cite{vanenk98,duan04, blinov04}. Such systems can form a quantum network with several quantum bits (qubits) per node, and are particularly suitable for efficient implementation of the pioneering proposal of Ref. \cite{briegel98}. In this approach, quantum communication over thousand kilometer distances requires seven quantum bits per node, which must be coherently coupled to perform local quantum logic operations , i.e. a seven qubit quantum computer. The specific implementation of these early ideas involved the techniques of cavity QED for interfacing stationary and photonic qubits and for performing the necessary quantum logic operations \cite{Bose99, Ye99}. Recent related work pointed out that long-distance entanglement can be implemented via probabilistic techniques without the use of ultra-high finesse cavities \cite{duan04,blinov04}, while local operations can be accomplished via short-range interactions involving e.g. interacting trapped ions. However, few-qubit registers are still technically very difficult to construct, and the difficulty increases drastically with the number of qubits involved. At the same time, a novel approach based on photon storage in atomic ensembles~\cite{Duan01} and probabilistic entanglement is also being actively explored. In comparison with systems based on many-qubit nodes, this approach offers less error tolerance and requires a longer communication time. Realization of a robust, practical system that can tolerate all expected errors remains therefore a challenging task. In a recent paper \cite{shortpaper} we proposed a quantum repeater protocol which could be implemented using the electronic and nuclear degrees of freedom in single photon emitters. Here we present further details of the proposal described in Ref. \cite{shortpaper}, and we compare our methods to alternative strategies. We show that our repeater protocol requires only two effective quantum bits at each node. This is the minimum requirement on physical resources which still allows active error correction. As a specific implementation, we consider nodes formed by a single quantum emitter with two internal degrees of freedom. A pair of electronic spin sublevels allows for state-selective optical excitation (see inset in Figure~1a), and a proximal nuclear spin provides an auxiliary memory. State-selective fluorescence is used for probabilistic entanglement generation between electronic spin degrees of freedom. We analyze in detail and compare several approaches for probabilistic entanglement generation, focussing on the feasibility of their implementation using realistic photon emitters. Once electronic spin entanglement is generated, the coupled electron and nuclear spin at each node can be used to efficiently implement entanglement swapping and purification. We show that these techniques can be combined to generate high-fidelity entanglement over arbitrarily long distances. We present a specific protocol that functions in polynomial time and tolerates percent-level errors in entanglement fidelity and local operations. Our approach is stimulated by recent experimental progress in single photon generation by means of single quantum emitters, including atoms and ions as well as impurities and nanostructures in solid state devices. Although our approach is relevant to atomic systems, such as single atoms trapped in a cavity~\cite{mckeever04} or single trapped ions~\cite{blinov04}, it is particularly suitable for implementation with solid-state emitters, for example impurity color centers~\cite{weinfurter00,beveratos02} and quantum dots ~\cite{michler00,santori02}. These devices offer many attractive features including optically accessible electronic and nuclear spin degrees of freedom, potential opto-electronic integrability, and fast operation. The paper is organized as follows. First, we will discuss techniques for entanglement generation. For clarity, we will present our results in the context of nitrogen-vacancy (NV) centers in diamond, and discuss alternative implementations at the end. Realistic imperfections, such as homogeneous broadening and limited selection rules, motivate a novel entanglement generation scheme based on state-selective Rayleigh scattering and interferometry. We calculate the success probability and entanglement fidelity for this scheme as implemented in NV centers, and compare this scheme to alternative schemes based on Raman scattering or optical $\pi$ pulses, with success conditioned on detection of one or two photons. Next, we will show how hyperfine coupling between the electron spin and proximal nuclei permits entanglement swapping and purification. Performing these operations in parallel and employing a nesting scheme, we calculate the fidelity obtained and the time required to generate it as a function of distance. In addition, we compare this scheme to pioneering proposals \cite{briegel98, Duan01} for fault-tolerant quantum repeaters. Finally, we quantitatively discuss the feasibility of implementing a quantum repeater using NV centers, and elucidate alternative material systems which satisfy the requirements of our technique. \section{Entanglement generation} The initial step in our scheme is entanglement generation between the electron spins of two emitters separated by a distance $L_0$. In principle, entanglement can be generated probabilistically by a variety of means, e.g., Raman scattering~\cite{cabrillo99,Bose99,browne03} or polarization-dependent fluorescence~\cite{blinov04}. However, for our repeater protocol it is essential that the optical transition be independent of the nuclear spin state, and solid state emitters do not always allow Raman scattering or polarization-dependent fluorescence which fulfills this requirement. We therefore consider an entanglement mechanism based on state-selective elastic light scattering as shown in Figure~1. Elastic light scattering places few restrictions on selection rules, and permits nuclear-spin-independent fluorescence as we discuss below. \subsection{Properties of single color centers} Our entanglement generation scheme is applicable to a wide variety of physical systems, requiring only the simple level structure illustrated in Fig. \ref{fig:levels}a. For clarity, we will present it first using a concrete example: the nitrogen-vacancy (NV) center in diamond, which has the specific level structure shown in Fig. \ref{shelf}. This example illustrates many generic features common to other solid-state emitters. NV centers represent a promising physical system because of their strong optical transition around 637 nm and optically accessible electron spin states. In particular, the ground state ($A_1$ symmetry class of the $C_{3v}$ group) has three electronic spin-states which we label $|-1\rangle$, $|0\rangle$ and $|1\rangle$ according to their spin angular momentum along the symmetry axis of the crystal ($M_s$). Spin-orbit and spin-spin effects lead to a splitting of $|0\rangle$ from $|\pm 1\rangle$ by 2.88 GHz. Since we only require two electronic spin states, $|0\rangle$ and $|1\rangle$, we isolate these two states from the $|-1\rangle$ state by either adding a small magnetic field to shift the energy of the $|\pm 1 \rangle$ state, or by using appropriately polarised ESR-pulses. As spin-orbit and spin-spin effects are substantially different for the optically excited state ($E$ symmetry class), the strong transition from the $M_s=0$ sublevel of the ground orbital state can be excited independently of the other $M_s$ states. Although there is evidence for photo-bleaching at low temperatures, current models indicate that crossover into the dark metastable state occurs primarily from the $M_s = \pm 1$ excited states \cite{nizovtzev03}. Furthermore, crossover into the trapping state is a detectable error. In the repeater protocol described below we perform a series of measurements on the electronic spin. During these measurements, the dark state will not produce any fluorescence, revealing the error. Shelving into the metastable state will thus influence the time (see Appendix) but not the fidelity associated with the repeater protocol. Consequently, we assume that we are only near resonance with a single state $|e\rangle$ which has $M_s=0$, and neglect photo-bleaching effects. \begin{figure} \caption{\it (a) Generic level structure showing the state-selective optical transitions and electronic spin sublevels required for entanglement generation. (b) Setup used to create entanglement. The two emitters act as state dependent mirrors in an interferometer. The outputs of the cavities ($a_1$ and $a_2$) are combined on a beamsplitter. By proper alignment of the interferometer the photons always exit through the $(a_1+a_2)/\sqrt{2} \end{figure} The electron spin degree of freedom suffices to generate entanglement between adjacent NV centers. To propagate entanglement to longer distances, we will make use of an auxiliary nuclear degree of freedom $\{\ket{\uparrow},\ket{\downarrow}\}$ which will be used for storage of quantum information during the repeater protocol. In NV centers, this nuclear degree of freedom can arise from a nearby carbon-13 impurity or directly from the nitrogen-14 atom that forms the center. The large energy separation between the $|0\rangle$ and $|\pm 1\rangle$ states exceeds the hyperfine interaction by an order of magnitude, decoupling the nuclear and electronic spins. The energy levels can thus be described by product states of the two degrees of freedom. Furthermore, in states with $M_s=0$, the energy is independent of the nuclear state. Finally, a small magnetic field $\sim 10-100$ Gauss allows spectral resolution of the $M_s = \pm 1$ states without producing significant nuclear Zeeman splitting. The optical transition between $\ket{0}$ and $\ket{e}$ is thus disentangled from the nuclear spin state. Consequently, the nuclear spin can be used to store entanglement while the $|0\rangle-|e\rangle$ transition is used to generate another entangled pair of electron spins. \begin{figure} \caption{\it{The relevant electronic and nuclear states of the coupled NV center and $^{13} \end{figure} \subsection{Entanglement protocol} To implement the entanglement scheme, each NV center is placed inside a photonic cavity, whose output is coupled to a single-mode photonic fiber (note, however, that cavities are not essential for this proposal, see below). Fibers from adjacent NV centers enter two ports of a beamsplitter, and entangled electron spin pairs are obtained conditional on detection of a photon in an outgoing arm of the beamsplitter. Specifically, our protocol for entanglement generation relies on scattering on the closed optical transition between $|0\rangle$ and $|e\rangle$. This scattering does not change the state of the NV center; the centers essentially act as mirrors reflecting the light only if they are in the state $|0\rangle$. We assume that each of the centers is initially prepared in the state $(|0\rangle+|1\rangle)/\sqrt{2}$, so that the total state is \begin{equation} \langlebel{eq:initstate} |\Psi_{{\rm ini}}\rangle = \frac{1}{2}{\left( |00\rangle+|11\rangle\right)}+\frac{1}{2} {\left(|01\rangle+|10\rangle\right)} . \end{equation} Since there is no light scattering from state $|1\rangle$, we can exclude the $|11\rangle$ component if we detect any scattered photons. In state $|00\rangle$, both centers act as mirrors, so that by balancing the interferometer in Fig.\ \ref{fig:levels} (b) we can arrange for the photons to leave through a specific port $D_+$ . A photon detection in the opposite port $D_-$ can thus only arise from the $|01\rangle$ and $|10\rangle$ states and produces an entangled superposition of these two states. Balancing and stabilizing an interferometer over tens of kilometers as required for the implementation of this protocol represents a considerable challenge. Using a method analogous to the plug-and-play system used in quantum key distribution~\cite{muller97}, we can reduce this requirement to stabilization of a small interferometer locally at each detector. Suppose that we wish to generate entanglement between repeater nodes $R_1$ and $R_2$. Employing fast optical switches, we excite $R_1$ by sending a pulse of light toward $R_2$, where the light is reflected and sent past the detector to $R_1$. Light emitted from $R_1$ follows the same path back to the detector. Similarly, we excite $R_2$ by sending a pulse of light toward a fast switch at $R_1$. The two paths thus cover the same path length between the nodes and we are insensitive to fluctuations in the path lengths as long as these fluctuations happen on a time scale which is longer than the time it takes for the pulses to travel between the stations. Alternatively one could change to a protocol which relies on the detection of two photons instead of one. In such protocols the sensitivity to changes in the path lengths can be reduced considerably \cite{simon,anders98,vanenk97}. \subsection{Entanglement fidelity in the presence of homogeneous broadening} We now describe this process mathematically, calculating the fidelity of the entangled pair produced by our protocol, as well as the probability for it to succeed. Our analysis incorporates dominant sources of error in solid-state systems; in particular, we account for effects of homogeneous broadening on the optical transition. Our model assumes that the NV centers are excited by a weak driving field applied between the states $|0\rangle$ and $|e\rangle$ with Rabi frequency $\Omega$; the excited states then radiatively decays at a rate $\gamma$. To describe the effect of homogeneous broadening on the optical transition we assume that the energy of the excited level fluctuates with a characteristic time which is slow compared to the optical frequency and much shorter than the lifetime of the excited state. In this approximation the broadening can be described by including a time-dependent detuning $\Delta(t)$ with white-noise characteristics: $\langlengle \Delta(t)\ranglengle = 0, \langlengle \Delta(t)\Delta(t')\ranglengle = \Gamma \delta(t-t')$. Below we shall be working in the limit of weak driving $\Omega\ll\gamma+\Gamma$. In this limit the light emitted from a center consists of two contributions: (i) a coherent part centered around the frequency of the driving laser with a width given by the width of the driving laser, and (ii) an incoherent part centered around the frequency of the transition with a frequency width of $\gamma+\Gamma$. The relative weight of these two contributions is $\gamma:\Gamma$. With considerable broadening of the optical transition $\Gamma\gtrsim\gamma$ it is therefore essential to filter out the incoherent scattered light with a frequency filter to get a high fidelity. To filter out the incoherent light and obtain a high collection efficiency we assume that the centers interact with an optical cavity with a coupling constant $g$ and a decay rate $\kappa$. We emphasize, however, that good cavities are not essential for our proposal: we only require sufficient collection efficiency and frequency selectivity, which could also be obtained by collecting the light with a lens and sending it through a narrow frequency filter. In general the weak drive may be detuned from the excited state, which would simplify the filtering of coherent from incoherent light. However, off-resonant excitation would require a stronger driving field, making it harder to avoid stray light reaching the detectors. For simplicity we only discuss the situation where, on average, the driving field and cavity mode are resonant with the center. The combined NV-center cavity system is then described by the Hamiltonian \begin{equation} \langlebel{eq:ham} H= \Delta(t) |e\rangle\langle e| +\frac{\Omega}{2}{\left( |0\rangle\langle e|+|e\rangle\langle 0|\right)} + g \hat{a}t{c}^\dagger |0\rangle\langle e|+g |e\rangle\langle 0| \hat{a}t{c}, \end{equation} where $\hat{a}t{c}$ is the photon annihilation operator for the field in the cavity. In the Heisenberg picture, decay terms can be included by considering the quantum Langevin equations of motion for the atomic operators $\hat{a}t{\sigma}_{ij} = \ket{i}\bra{j}$, \ba \langlebel{eq:lang} \frac{d\hat{a}t{c}}{dt} & =& -\frac{\kappa}{2}\hat{a}t{c} - i g \hat{a}t{\sigma}_{0e}+ {\hat{a}t F}_c \\ \langlebel{eq:lang2} \frac{d\hat{a}t{\sigma}_{0e}}{ dt} &= &\left(-\frac{\gamma}{2}-i\Delta(t)\right) \hat{a}t{\sigma}_{0e} + \nonumber \\ \langlebel{eq:lang3} & & i \left(g \hat{a}t{c} +\frac{ \Omega}{2}\right)(\hat{a}t{\sigma}_{ee}-\hat{a}t{\sigma}_{00})+ {\hat{a}t F}_{0e} \\ \frac{d\hat{a}t{\sigma}_{ee}}{dt} & =& -\gamma \hat{a}t{\sigma}_{ee} + \left( i (\frac{\Omega}{2}+ g \hat{a}t{c}^{\dagger})\hat{a}t{\sigma}_{0e} + {\rm h.c.}\right) + {\hat{a}t F}_{ee} \ea where the noise ${\hat{a}t F}_c$ is the incoming vacuum noise leading to cavity decay at rate $\kappa$ and the other noise operators ${\hat{a}t F}_{0e},{\hat{a}t F}_{ee}$ represent the effect of other optical modes that lead to decay. We obtain an appropriate solution to the quantum Langevin equations by noting that, in the limit of weak driving, $\Omega\ll \gamma$, there is virtually no population of the excited state, $\hat{a}t{\sigma}_{00}-\hat{a}t{\sigma}_{ee}{\alpha} pprox \hat{a}t{\sigma}_{00}$. The solution can then be written in the form $\hat{a}t{c}={\alpha} lpha \hat{a}t{\sigma}_{00}+$noise and $\hat{a}t{\sigma}_{0e}=\beta \hat{a}t{\sigma}_{00}+$noise, but the equations for ${\alpha} lpha$ and $\beta$ are complicated due to the noise $\Delta(t)$. By averaging the Langevin equations over the noise one can, however, find simple equations for various moments of ${\alpha} lpha$ and $\beta$, and by taking steady state solutions of the averaged equations we find \begin{eqnarray} \langlebel{eq:alphabeta} \overline{{\alpha} lpha}&=&\frac{-2g\Omega}{\kappa(\gamma+\Gamma) (1+4g^2/\kappa(\gamma+\Gamma))}\\ \overline{|{\alpha} lpha|^2}&=&\frac{\frac{4g^2\Omega^2} {\kappa^2(\gamma+\Gamma)^2}} {{\left (1+\frac{4g^2}{\kappa(\gamma+\Gamma)}\right)}{\left( 1-\frac{\Gamma\kappa}{(\gamma+\Gamma)(\gamma+\kappa)} +\frac{4g^2}{\kappa(\gamma+\Gamma)}\right)} }\\ \overline{\beta}&=&\frac{-i\Omega} {(\gamma+\Gamma) (1+4g^2/\kappa(\gamma+\Gamma))}. \end{eqnarray} Note that in the presence of homogeneous broadening $\Gamma \neq 0$, the moments do not factor, $\overline{|{\alpha} lpha|^2}\neq |\overline{{\alpha} lpha}|^2$, signifying incoherent scattering of light into the cavity. We now apply the entanglement generation protocol, and use our mathematical model to predict the average density matrix components of the NV center electron spins. In our scheme, we combine the output of the two cavities on a beamsplitter and select the desired entangled state by conditioning on a click in detector $D_-$, described by the photon annihilation operator $\hat{d}_-=\sqrt{\zeta \kappa/2}( \hat{a}t{c}_1-\hat{a}t{c}_2)$. Here, subscripts one and two refer to the two NV-centers we are trying to entangle, $\zeta$ is the total collection and detection efficiency for photons leaving the cavity, and we have omitted the contribution from vacuum noise operators. To describe the effect of the detection, we use the quantum jump formalism\cite{jump}. If the system starts out in state $\ket{\Psi_{\rm init}}$, the density matrix element $\rho_{i,j}$ at time $t$ can be found by \begin{equation} \langlebel{eq:jump} \rho_{i,j}(t)=\langlengle \Psi_{{\rm init}}| \hat{d}_-(t)^\dagger |j\rangle\langle i| \hat{d}_-(t) |\Psi_{{\rm init}}\rangle \delta t / \delta P, \end{equation} where the time argument $t$ is included to emphasise the time dependent Heisenberg operators, and where $\delta P$ is the probability to have a click during a time $\delta t$, \begin{equation} \langlebel{eq:dp} \delta P =\langlengle \Psi_{{\rm init}}| \hat{d}_-(t)^\dagger \hat{d}_-(t) |\Psi_{{\rm init}}\rangle \delta t. \end{equation} Our entanglement generation scheme relies on interference to eliminate $D_-$ detection events coming from the initial state $\ket{00}$. However, according to our formalism, if we start out in an initial state $|00\rangle$ the probability to have a click is given by \begin{eqnarray} \langlebel{eq:start00} \delta P&=& \delta t\langle 00| \overline{\hat{d}_-^\dagger\hat{d}_-}|00\rangle\nonumber \\ &=&\kappa\zeta\delta t {\left( \overline{|{\alpha} lpha|^2}- |\overline{{\alpha} lpha}|^2\right)}, \end{eqnarray} where we assume the noise is independent for the two centers. This expression vanishes only for coherent scattering of light into the cavity, i.e.~$\overline{|{\alpha} lpha|^2}=|\overline{{\alpha} lpha}|^2$ or $\Gamma = 0$. In the presence of broadening, there is a finite probability that light will be detected from the $|00\rangle$ state. Similarly, $\Gamma >0$ leads to a finite probability for incoherent scattering from $|01\rangle$ and $|10\rangle$. Homogeneous broadening thus reduces the fidelity ($F=\langle \Psi_{{\rm ideal}}|\rho|\Psi_{{\rm ideal}}\rangle$, (where $|\Psi_{{\rm ideal}}\rangle$ denotes the ideal entangled state) by \begin{eqnarray} \langlebel{eq:finitefiltering} 1-F&=&\frac{3}{2} {\left(1- \frac{|\overline{{\alpha} lpha}|^2} {\overline{|{\alpha} lpha|^2}} \right)}\nonumber \\ &=&\frac{3}{2} \frac{\Gamma}{\gamma+\Gamma} \frac{\kappa}{\gamma+\kappa} \frac{1}{1+4g^2/\kappa(\gamma+\Gamma)}. \end{eqnarray} Here we are interested in the limit where the fidelity is close to unity and we shall therefore assume $\overline{|{\alpha} lpha|^2}{\alpha} pprox |\overline{{\alpha} lpha}|^2$ in the calculation of other noise sources below. \subsection{Other errors} In addition to the error caused by homogeneous broadening, there is also a reduction in fidelity caused by multiple spontaneous emission events from the centers. This fidelity can conveniently be expressed in terms of the total emission probability \beq P_{em}=\frac{t_0\Omega^2}{(\gamma+\Gamma) [1+4g^2/\kappa(\gamma+\Gamma)] }, \langlebel{Pem} \eeq where $t_0$ is the duration of the applied laser pulse. In the absence of homogeneous broadening, multiple excitations result in a fidelity \begin{equation} \langlebel{eq:fidelnodestinguish} F=\frac{1}{2}+\frac{{\rm e}^{-P_{em}(1-\epsilon/2)}}{2} \end{equation} and success probability \beq P=(1-\exp(-\epsilon P_{em}/2))/2. \eeq The total collection efficiency can be expressed as $\epsilon=\zeta P_{{\rm cav}}$ with the probability to emit into the cavity given by \begin{equation} \langlebel{eq:pcav} P_{{\rm cav}}=\frac{4g^2/\kappa(\gamma+\Gamma)} {1+4g^2/\kappa(\gamma+\Gamma)}. \end{equation} This treatment has neglected the possibility of distinguishing multiple photon detection events. If our detector can resolve photon number, we can use the information to improve our protocol. In particular, a detection in the mode described by $\hat{d}_+=\sqrt{\zeta \kappa/2}( \hat{a}t{c}_1+\hat{a}t{c}_2)$+noise has no effect on the component of Eq.\ (\ref{eq:initstate}) that we are interested in, since $d_+(|01\rangle+|10\rangle) \propto (|01\rangle+|10\rangle)$. Furthermore, a detection in this plus mode contains contributions from $|00\rangle$, so it yields no useful information. On the other hand, detection events in the mode described by $\hat{d}_-$ change the sign of the superposition state, since $\hat{d}_-(|01\rangle+|10\rangle)\propto (|01\rangle-|10\rangle)$, and $\hat{d}_-(|01\rangle- |10\rangle)\propto (|01\rangle+|10\rangle)$. Consequently, the optimal strategy is to change the phase of the entangled state when an even number of photons is detected. The resulting fidelity is \begin{equation} \langlebel{eq:fidfinite} F=\frac{1}{2}+\frac{{\rm e}^{-P_{em}(1-\epsilon)}}{2}. \end{equation} Finally, we must include the effect of two other sources of noise: dark counts and electron spin dephasing. In the limit of small success probability $P$ the dark count introduces an incoherent admixture of the initial state and thus leads to a reduction in fidelity $P_{{\rm dark}}/P$ where $P_{{\rm dark}} = \gamma_{dc} t_0$ is the dark count probability. Electron spin dephasing makes the state decay towards a state with fidelity 1/2 at a rate $2\gamma_e$, yielding a reduction in the fidelity of $\gamma_e$ times the total time of the experiments. Typically, this total time will be dominated by the classical communication time between nodes, $t_c$. Putting these considerations together, we find that the entanglement scheme succeeds with probability $P = (1/2)\left(1-e^{-P_{\rm{em}}\epsilon/2}\right){\alpha} pprox \epsilon P_{\rm{em}}/4$, producing the state $|\Psi_-\ranglengle$ in time $T_0 {\alpha} pprox (t_0+t_c) /P$ with fidelity \ba F_0 &=& \frac{1}{2}\left(1+e^{-P_{\rm{em}}\left(1-\epsilon\right)}\right) - \gamma_e(t_0 + t_c)\nonumber \\ && - \gamma_{dc}\frac{t_0}{P} - \frac{3}{2}\frac{\Gamma}{\Gamma + \gamma}\frac{\kappa}{\kappa + \gamma}\frac{1}{1+4g^2/\kappa(\gamma + \Gamma)}. \langlebel{fidelity} \ea For realistic emitters placed into a cavity with either a narrow linewidth $\kappa\ll\gamma$ or a large Purcell factor $4g^2/(\kappa(\gamma + \Gamma))\gg 1$, the first two terms should dominate the error. \section{Comparison to other entanglement generation schemes} The entanglement generation scheme that we have presented so far is the scheme that we believe to be best suited to NV centers. For other systems, this may not be the case. In particular, the presented scheme has two primary drawbacks: (1) it relies on resonant scattering, making it difficult to filter fluorescence photons from the applied laser field; (2) to avoid loss of fidelity from incoherent scattering, one must detect only a narrow frequency interval in the scattered light. Other entanglement methods present different problems which may prove easier to resolve or other methods may be better suited for different physical systems. Consequently, we now briefly compare the resonant scattering scheme presented above to alternate techniques. \subsection{Raman transitions} One of the first schemes considered for probabilistic entanglement generation \cite{cabrillo99,Bose99,browne03} used Raman transitions in three level atoms. In such schemes, an electron spin flip between non-degenerate ground states $\ket{0}$ and $\ket{1}$ is associated with absorption of a laser photon and emission of a frequency shifted Raman photon. After interfering the emission from two atoms, detection of a Raman photon projects the two-atom state onto a state sharing at least one flipped spin. To avoid the possibility that both atoms emitted a Raman photon, the emission probability must be quite small $P_{{\rm em}}\ll 1$. In this limit, a photon detection event in detector $D_\pm$ results in an entangled spin state $|\Psi_\pm\rangle$. The Raman scheme can be implemented using either a weak drive between states $|1\rangle$ and $|e\rangle$ or with a short strong pulse which puts a small fraction of the population into $|e\rangle$. Since the latter is equivalent to the single detection $\pi$-pulse scheme discussed below, here we consider only weak driving. The system can now be treated using the quantum Langevin-quantum jump approach formulated above. As before, homogeneous broadening on the optical transition leads to an incoherent contribution to the Raman scattered light, which reduces the entanglement fidelity in a manner similar to Eq.~(\ref{eq:finitefiltering}). Again, for optimal fidelity the coherent part should be isolated with a narrow frequency filter. If we assume perfect filtering and a small collection efficiency $\epsilon\ll 1$ the fidelity conditioned on a click is given by \begin{equation} \langlebel{eq:fidelityraman} F=1-P_{{\rm em}} \end{equation} with success probability $P=P_{{\rm em}}\epsilon.$ In the limit of large fidelity $F{\alpha} pprox 1$, the Raman scheme has a success probability which is a factor of 4 higher than for our interferometric scheme. Furthermore, the Raman scheme has the advantage that stray light may be spectrally filtered from the Raman photons. Nevertheless, because of the hyperfine interaction in state $|1\rangle$, the transition frequency from $|1\rangle$ to $|0\rangle$ depends on the nuclear spin state. The associated detrimental effect on the nuclear coherence could potentially be avoided by using simultaneous transitions from $|1\rangle|\uparrow\rangle$ and $|-1\rangle|\downarrow\rangle$, which are degenerate. To our knowledge, however, fluorescence between $|e\rangle$ and $|1\rangle$ has not be observed, making it uncertain whether the Raman scheme can be implemented for the NV-centers \endnote{Raman transitions have been observed in a strong magnetic field by using the hyperfine interaction to mix the $|0\rangle$ and $|1\rangle$ states, but since such mixing involves the nuclear degree of freedom it is not applicable in the present context.}. \subsection{$\pi$ pulses} Time-gated detection offers an alternate method for distinguishing scattered photons from stray incident light. If an atom or NV center is excited by a sufficiently short, strong laser pulse, its population is coherently driven into the excited state. The excited state $|e\rangle$ then decays on a time scale $1/\gamma$. When the decay time is much longer than the incident pulse length, the excitation light and the photon emitted from the atom are separated in time, and can thus be distinguished. Entanglement is then generated conditional on the detection of one or two photons, as elucidated below. \subsubsection{Single detection} One particularly simple method for generating entanglement using $\pi$-pulses begins with each atom in a state \begin{equation} \langlebel{eq:smallphi} \cos{\left(\phi\right)} |1\rangle+ \sin{\left(\phi\right)} |0\rangle. \end{equation} The incident $\pi$-pulse excites the optically active state $\ket{0}$ to $\ket{e}$ with unit probability, and the spontaneously emitted photons are interfered on a beamsplitter and subsequently measured (as for the Raman scheme above). Provided $\phi\ll 1$ we can ignore the possibility that both atoms are in state $|0\rangle$. A photon detection in $D_\pm$ excludes the state $|11\rangle$, preparing the system in $|\Psi_\pm\rangle$. As with other entanglement schemes, high-fidelity entanglement generation requires filtering the incoherent scattering caused by homogeneous broadening of the optical transition. In previous sections, we have proposed to use a frequency filter to separate the narrow peak (in frequency) of coherent scattered light from the broad incoherent background. In the present case, filtering can be done in the time domain. In the excitation process a coherence is established between $|1\rangle$ and $|e\rangle$, and following the excitation this coherence (the off-diagonal density matrix element) decays at a rate $\Gamma/2$. By only conditioning on photons emitted a very short time after the excitation, during which the coherence has not had time to decay, a high quality entangled pair is produced. To describe this process mathematically, we again assume that the atom is placed inside an optical cavity. In contrast to our previous calculations, we assume that the cavity has a broad linewidth to ensure that generated photons leave the system as fast as possible. In the limit $\kappa\gg g$, $\gamma$, and $\Gamma$ we can adiabatically eliminate the cavity by setting $d \hat{a}t{c}/dt=0$ in Eq.~(\ref{eq:lang}) so that if we omit the noise we obtain \begin{equation} \langlebel{eq:hcadiabat} \hat{a}t{c}(t)=\frac{2ig}{\kappa}\hat{a}t{\sigma}_{0e}. \end{equation} Inserting this expression into Eqs.~(\ref{eq:lang2}) and (\ref{eq:lang3}) we find \ba \overline{\hat{a}t{\sigma}_{0e}}(t)&=& \hat{a}t{\sigma}_{0e}(t=0) \exp{\left(-\frac {\gamma_{{\rm eff}}+\Gamma}{2} t \right)} \\ \overline{\hat{a}t{\sigma}_{ee}}(t)&=& \hat{a}t{\sigma}_{ee}(t=0) \exp{\left(- \gamma_{{\rm eff}} t\right)}, \ea where the effective decay rate $\gamma_{{\rm eff}}$ is the decay rate enhanced by the Purcell effect \begin{equation} \langlebel{eq:purcell} \gamma_{{\rm eff}}=\gamma{\left( 1+\frac{4 g^2} {\kappa\gamma}\right)}. \end{equation} To find the fidelity of the entangled state created with this method we again use Eqs.~(\ref{eq:jump}) and (\ref{eq:dp}). For simplicity we only work in the limit of small collection efficiency $\epsilon\ll 1$. Conditioned on a click at time $t$ after the excitation, the fidelity of the entangled state is \begin{equation} \langlebel{eq:fidelitypit} F=\cos^2(\phi){\left(\frac{1}{2}+\frac{1}{2}{\rm e}^{-\Gamma t}\right)} \end{equation} and the probability to have a click during the short time interval from $t$ to $t+\delta t$ is \begin{equation} \langlebel{eq:pt} \delta P=2\epsilon \gamma_{{\rm eff}} \delta t \sin^2(\phi) {\rm e}^{-\gamma_{{\rm eff}}t} , \end{equation} where the collection efficiency $\epsilon=\zeta P_{{\rm cav}}$ is again given by the collection efficiency for the light leaving the cavity $\zeta$, and the probability to emit into the cavity is now given by \begin{equation} P_{{\rm cav}}=\frac{4g^2/\kappa\gamma} {1+4g^2/\kappa\gamma}. \end{equation} The success probability for a given fidelity now depends on the ratio between the broadening and the effective decay rate $\Gamma/\gamma_{{\rm eff}}$. For $\Gamma=0$, the procedure of initially transferring population from $|1\rangle$ to $|0\rangle$ and then applying a $\pi$-pulse between $|0\rangle$ and $|e\rangle$ is equivalent to a Raman transition, and Eqs.~(\ref{eq:fidelitypit},\ref{eq:pt}) indeed reproduces the same relation between success probability and fidelity given in Eq.~(\ref{eq:fidelityraman}). In the limit of small broadening, $\Gamma\ll\gamma_{{\rm eff}}$, the $\pi$-pulse scheme is advantageous over the interferometric scheme presented first. In particular, for a fixed fidelity $F{\alpha} pprox 1$ the success probability is a factor of 4 higher. In the presence of broadening, however, the situation is different. To obtain a high fidelity we should detect only photons emitted within a short time $T$ following the excitation. The average fidelity will then depend on two parameters $\phi$ and $T$. By optimizing these two parameters we find that for $F{\alpha} pprox1$ the fidelity is \begin{equation} \langlebel{eq:fidelityonpi} F=1-\sqrt{\frac{\Gamma}{8\gamma_{{\rm eff}}}}\sqrt{\frac{P}{\epsilon}}. \end{equation} Since previous expressions (\ref{eq:fidelnodestinguish}) and (\ref{eq:fidelityraman}) for $1-F$ depended linearly on $P$, this represents a much faster decrease in the fidelity. The $\pi-$pulse scheme is thus less attractive for homogeneously broadened emitters. \subsubsection{Double detection} If the collection efficiency is very high, it may be an advantage to rely on the detection of two photons instead of one \cite{anders02, barrett04,simon,anders98}. In this scheme, both atoms are initially prepared in $(|0\rangle+|1\rangle)/\sqrt{2}$ and a $\pi$-pulse is applied between $|0\rangle$ and $|e\rangle$. Following a detection in $D_\pm$ the populations in states $|0\rangle$ and $|1\rangle$ are interchanged and another $\pi$-pulse is applied between $|0\rangle$ and $|e\rangle$. Conditioned on clicks following both $\pi$ pulses we can exclude the possibility that the atoms were initially in the same state and we are left with $|\Psi_\pm\rangle$ conditioned on appropriate detector clicks. In the absence of homogeneous broadening, this protocol produces an entangled state with fidelity F=1 with probability $P=\epsilon^2/2$. The double-detection scheme thus avoids the multiple photon emission errors inherent in the single-detection schemes. With broadening of the optical transition, this is no longer the case. For $F{\alpha} pprox1$, the relation between fidelity and success probability is now given by \begin{equation} \langlebel{eq:fidelitypitwo} F=1-\frac{\Gamma}{\gamma_{{\rm eff}}\epsilon}\sqrt{2P}. \end{equation} Again the fidelity decreases more rapidly with the success probability than for the Raman and resonant scattering scheme, making it less useful for our purpose. \subsection{Summary} The best choice of scheme depends on the specific physical situation. The two $\pi$-pulse schemes are advantageous if the broadening is negligible. In particular, in the limit where we can ignore all errors except the photon attenuation, the double detection scheme results in the highest fidelity entangled pair. With low collection efficiency or large distances between emitters, the double detection will have a very small success probability because of the $\epsilon^2$ factor, and it may be advantageous to rely on a single detection scheme. The $\pi$-pulse schemes are less attractive if we are limited by homogeneous broadening of the optical transition because the fidelity decreases rapidly with the success probability. Better results are obtained for the resonant scattering or Raman schemes. When possible, the Raman scheme offers the best solution. The frequency-shifted Raman scattering allows frequency filtering of the incoming light; in addition the success probability is four times higher than for the resonant scattering scheme. But, as mentioned above, it is not always possible to drive Raman transitions , and it may be hard to achieve Raman transition which are independent of the nuclear spin state. For this reason we believe that the resonant scattering scheme is most promising in the particular case of NV-centers. Finally we wish to add that the calculations we have performed here assume a specific model for the broadening (short correlation time for the noise). With other broadening mechanisms, e.g. slowly varying noise, these considerations will be different. \section{Entanglement swapping and purification} Using one of the procedures outlined above, electron spin entanglement can be generated between adjacent pairs of nodes. We now discuss a means to extend the entanglement to longer distances. \subsection{Swapping} After entangling nearest-neighbor electron spins, the electron spin state is mapped onto the auxiliary nuclear spin qubit for long-term storage using the hyperfine interaction. This operation leaves the electronic degree of freedom available to generate entanglement between unconnected nodes, as illustrated in Figure~\ref{scaling}. By combining optical detection of individual electron spin states~\cite{jelezko04} and effective two-qubit operations associated with hyperfine coupling of electronic and nuclear spins~\cite{jelezko04b}, we may projectively measure all four Bell states in the electronic/nuclear manifold associated with each emitter. The outcomes of the Bell state measurements reveal the appropriate local rotations to obtain a singlet state in the remaining pair of nuclear spins, implementing a deterministic entanglement swap~\cite{bennett93,zukowski93}. By performing this procedure in parallel, and iterating the process for $N \propto \log_2{(L/L_0)}$ layers, we obtain the desired nuclear spin entanglement over distance $L$ in a time $\propto L\log_2{(L/L_0)}$. \begin{figure} \caption{\it{Entanglement propagation by swapping. To generate an entangled nuclear spin pair (black) over the distance $L_{n+1} \end{figure} \subsection{Purification} To extend entanglement to long distances in the presence of errors, active purification is required at each level of the repeater scheme. By performing local operations and measurements, it is possible to distill multiple entangled pairs with fidelity above some threshold $F_{min}$ into a single entangled pair with higher purity\cite{Bennett96, Deutsch96}. The purification algorithm we use is described in detail in Refs.\cite{Deutsch96, briegel98, Dur99, DurThesis}. For clarity we will present it in a form appropriate to the system under consideration, which uses repeated generation of electron spin entangled pairs to purify a stored nuclear spin entangled pair. Specifically, an electron spin entangled pair between stations $i$ and $j$ is described by the density matrix diagonal components $\{a_e, b_e, c_e, d_e\}$ in the Bell state basis $\{\ket{\Psi_-}, \ket{\Phi_+}, \ket{\Phi_-}, \ket{\Psi_+}\}$, where \ba \ket{\Psi_\pm}^{(e)}_{ij} &=& \frac{1}{\sqrt{2}}\left( \ket{0_i1_j} \pm \ket{1_i0_j}\right)\\ \ket{\Phi_\pm}^{(e)}_{ij} &=& \frac{1}{\sqrt{2}}\left( \ket{0_i0_j} \pm \ket{1_i1_j}\right). \ea We will refer to these diagonal elements as the ``vector fidelity" $\mathcal{F}_e = \{a_e, b_e, c_e, d_e\}$, noting that the first element $(\mathcal{F}_e)_1 = a_e$ encodes the fidelity with respect to the desired singlet state. A nuclear spin entangled pair between those stations is described by a similar vector fidelity $\mathcal{F}_n =\{a_n, b_n, c_n, d_n\}$ in the nuclear Bell basis \ba \ket{\Psi_\pm}_{ij}^{(n)} &=& \frac{1}{\sqrt{2}}\left( \ket{\downarrow_i\uparrow_j} \pm \ket{\uparrow_i\downarrow_j}\right)\\ \ket{\Phi_\pm}_{ij}^{(n)} &=& \frac{1}{\sqrt{2}}\left( \ket{\downarrow_i\downarrow_j} \pm \ket{\uparrow_i\uparrow_j}\right). \ea The purification protocol calls for a local rotation of each spin system at both locations: \ba \ket{0}_{i,j} \rightarrow \frac{1}{\sqrt{2}}\left(\ket{0}_{i,j}+i\ket{1}_{i,j}\right)\\ \ket{1}_{i,j} \rightarrow \frac{1}{\sqrt{2}}\left(\ket{1}_{i,j}+i\ket{0}_{i,j}\right)\\ \ket{\downarrow}_{i,j} \rightarrow \frac{1}{\sqrt{2}}\left(\ket{\downarrow}_{i,j}+i\ket{\uparrow}_{i,j}\right)\\ \ket{\uparrow}_{i,j} \rightarrow \frac{1}{\sqrt{2}}\left(\ket{\uparrow}_{i,j}+i\ket{\downarrow}_{i,j}\right), \ea followed by a two-qubit gate at each location: \beq \begin{array}{clcr} \ket{\downarrow 0}_i &\rightarrow& \ket{\downarrow 0}_i\\ \ket{\downarrow 1}_i &\rightarrow& \ket{\downarrow 1}_i\\ \ket{\uparrow 0}_i &\rightarrow& -\ket{\uparrow 1}_i\\ \ket{\uparrow 1}_i &\rightarrow& -\ket{\uparrow 0}_i\\ \end{array}~~~~~ \begin{array}{clcr} \ket{\downarrow 0}_j &\rightarrow& \ket{\downarrow 1}_j\\ \ket{\downarrow 1}_j &\rightarrow& \ket{\downarrow 0}_j\\ \ket{\uparrow 0}_j &\rightarrow& \ket{\uparrow 0}_j\\ \ket{\uparrow 1}_j &\rightarrow& \ket{\uparrow 1}_j. \end{array} \eeq After these operations, the electron spin is projectively measured at both locations. When the two electron spins are in the opposite state, the purification step succeeds, mapping the remaining nuclear spins to a diagonal state $\{a_n', b_n', c_n', d_n'\}$ with $a_n'>a_n$. This purification protocol can correct any type of error, but it functions best for phase errors, which correspond to diagonal elements of the form $\{f, 0, 0, 1-f\}$. To quantify the type of error associated with our entanglement generation scheme, we define a shape parameter $\upsilon$ such that the vector fidelity for entangled spin pairs between adjacent nodes is \beq \mathcal{F}_0 = \{F_0, (1-F_0)\upsilon, (1-F_0)\upsilon, (1-F_0 )(1-2\upsilon)\}. \langlebel{shape} \eeq Note that $\upsilon \rightarrow 0$ corresponds to phase errors while $\upsilon \rightarrow 1/3$ corresponds to a Werner state with equal distribution of all error types. Note also that the assumption of diagonality imposes no restriction on the entangled states we generate, as any off-diagonal elements in their density matrices can be eliminated by performing random rotations (similar to the procedure for creating Werner states but without the symmetrization step)\cite{DurThesis}. Furthermore, even without a randomization step, the average fidelity is determined by the diagonal elements \cite{Deutsch96}. \subsection{Errors} In the presence of local errors in measurements and operations, the purification and swap procedures deviate from their ideal effect. To describe this we use the error model described in \cite{Dur99}. Measurement errors are quantified using a parameter $\eta$ such that measurement projects the system into the desired state with probability $\eta$ and into the incorrect state with probability $1-\eta$. For example, a projective measurement of state $\ket{0}$ would be \beq P_0 = \eta\ket{0}\bra{0} + (1-\eta)\ket{1}\bra{1}. \eeq Errors in local operations are accounted for in a similar manner. With some probability $p$, the correct operation is performed; otherwise one traces over the relevant degrees of freedom in the density matrix and multiplies by the identity matrix (for further details see\cite{Dur99} and references therein). For example, the action of a two qubit operation $U_{ij}$ would become \beq U_{ij}\rho U_{ij}^{\dagger} \rightarrow p ~ U_{ij}\rho U_{ij}^{\dagger} + \frac{1-p}{4} Tr_{ij}(\rho)\otimes \mathcal{I}_{ij} \eeq In our calculations, we neglect errors in single qubit operations and focus on two-qubit errors, which are likely to yield the dominant contribution. These errors determine the level of purification which is possible given infinitely many purification steps. They also determine how much the fidelity degrades during the entanglement swap procedure. Below we describe a repeater protocol which, compared to the original proposal \cite{briegel98}, reduces the required number of qubits at each repeater station at the expense of extra connection steps. Owing to these extra connection steps, our protocol is slightly more sensitive to local errors than the original scheme. \subsection{Nesting Scheme} Previous proposals for fault tolerant long distance quantum communication have required larger and larger numbers of qubits at each node as the communication distance is increased. Here we describe a nesting scheme which can be used to communicate over arbitrarily long distances while maintaining a constant requirement of only two qubits per node. \begin{figure} \caption{\it{Nesting scheme for generation and purification of entangled nuclear and electron spin pairs. In each node, the nuclear spin degree of freedom is represented by the upper (black) circle, while the electron degree of freedom is represented by the lower (red) circle. Entanglement between different nodes is represented by a line connecting them. Ovals represent entanglement swap steps, and rectangles represent entanglement purification steps. For $n=2$ the B and C pairs may be directly generated. For $n\geq3$, the first step illustrates how the B pair is generated, while the remaining two steps illustrate how the C pair is generated while storing the B pair. The arbitrary distance algorithm works for $n\geq 6$.} \end{figure} The scheme for nested entanglement purification is illustrated in Figure \ref{first}. For clarity, we will label purified pairs by``A'', pairs to be purified by ``B'', and auxiliary pairs used to perform purification by ``C". Briefly, an entangled pair (``$B$") is stored in the nuclear spins while an auxiliary entangled pair (``$C$") is generated in the electron spins. The purification protocol described in ~\cite{Deutsch96,Dur99} is then performed by entangling the electron and nuclear spins via the hyperfine interaction, and subsequently measuring the electron spins. Comparison of the measurement outcomes reveals whether the purification step was successful, resulting in a new stored pair $B$ with higher fidelity. After successfully repeating the procedure for $m$ purification steps, (a technique sometimes referred to as ``entanglement pumping" ), the stored pair becomes a purified (``A") pair, which can then be used to create B and C pairs over longer distances. We may thus generate and purify entanglement to arbitrary distances. This procedure is analogous to the scheme in Ref.~\cite{briegel98}, but avoids the increase in the number of qubits required for that proposal. Mathematically, the scheme can be explained most easily using inductive arguments. Suppose that we have a means to create and purify entanglement over $k=2, 3, \cdots, n/2$ repeater stations ($(n+1)/2$ if $n$ is odd), and that we know the vector fidelity $\mathcal{F}_A(k)$ and the time $\mathcal{T}_A(k)$ required for each distance. We can then determine the time required and the vector fidelity possible after purification over $n$ repeater stations. We begin by creating two purified nuclear spin A pairs over half the distance and connecting them via a central electron spin pair of vector fidelity $\mathcal{F}_0$. In the presence of local errors, this yields a nuclear spin B pair with vector fidelity \beq \mathcal{F}_B(n) = \mathcal{C}\left(\{\mathcal{F}_A(\frac{n}{2}), \mathcal{F}_0, \mathcal{F}_A(\frac{n'}{2})\}, \eta ,p \right). \eeq Here, $\mathcal{C}$ gives the vector fidelity obtained upon connecting the entangled pairs in the presence of local errors\cite{DurThesis}, and $n/2$ and $n'/2$ are understood to represent $(n-1)/2$ and $(n+1)/2$ when $n$ is odd. The B pair is created in a time \beq \mathcal{T}_B(n) = \mathcal{T}_A(\frac{n'}{2}) + T_0 + \frac{n'}{2}t_c , \eeq where $T_0$ is the time required to generate nearest-neighbour entanglement and $t_c$ is the classical communication time between adjacent stations. We neglect the time required for local operations and measurement since these times are short compared to $T_0$ and $n t_c$. Similarly, we can find the vector fidelity and time for the electron spin C pair \ba \mathcal{F}_C(n) &=& \mathcal{C}(\{\mathcal{F}_0, \mathcal{F}_A(\frac{n}{2}-1), \mathcal{F}_0, \mathcal{F}_A(\frac{n'}{2} -1), \mathcal{F}_0\}, \eta, p)\nonumber \\ \mathcal{T}_C(n) &=& \mathcal{T}_A(\frac{n'}{2} -1) + T_0 + (n-2)t_c . \ea After performing one purification step, we obtain a nuclear spin pair $A_1$, with vector fidelity determined by the purification function $\mathcal{P}$ \beq \mathcal{F}_{A_1}(n) = \mathcal{P}(\mathcal{F}_B(n), \mathcal{F}_C(n), \eta, p). \eeq On average, the time required to perform this single step is \beq \mathcal{T}_{A_1}(n) = \frac{\left(\mathcal{T}_B(n)+\mathcal{T}_C(n)+(n-1)t_c\right)}{P_S(\mathcal{F}_B(n), \mathcal{F}_C(n))}, \eeq where $P_S$ is the probability that the purification step succeeds. After $m$ successful purification steps, the vector fidelity of the nuclear spin $A_m$ pair is \beq \mathcal{F}_{A_m}(n) = \mathcal{P}(\mathcal{F}_{A_{m-1}}(n), \mathcal{F}_C(n), \eta, p), \eeq and the average time required for its creation is \ba \mathcal{T}_{A_m}(n) &=& \frac{\left(\mathcal{T}_{A_{m-1}}(n)+\mathcal{T}_C(n)+(n-1)t_c\right)}{P_S(m)} \\ &=& \left(\mathcal{T}_C(n) + (n-1)t_c\right)\sum_{n = 1}^m \prod_{k=n}^m\left(\frac{1}{P_S(m)}\right) \nonumber \\ && + \mathcal{T}_B(n)\prod_{k=1}^m\left(\frac{1}{P_S(m)}\right), \ea where $P_S(m) = P_S(\mathcal{F}_{A_{m-1}}(n), \mathcal{F}_C(n))$. If we stop purifying at some fixed number $M$ of purification steps, then the desired vector fidelity and time over distance $n$ are given by \ba \mathcal{F}_A(n) &=& \mathcal{F}_{A_M}(n) \\ \mathcal{T}_A(n) &=& \mathcal{T}_{A_M}(n). \ea To complete the inductive argument, we must show that the protocol works for small distances. There are many schemes one can use to generate and purify entanglement over shorter distances, and one possibility is illustrated in Figure~\ref{first}. In fact, once the physical parameters for an implementation are determined, it should be possible to optimise the few-node scheme to minimise the required time or maximise the resulting fidelity. \subsection{Fixed point analysis} As the number of purification steps increases $m\rightarrow\infty$, the fidelity of the resulting entangled pair saturates. This saturation value can be found using a fixed point analysis (as described in\cite{Dur99}) by solving for the vector fidelity $\mathcal{F}_A$ which is unchanged by further purification steps \beq \mathcal{F}_A = \mathcal{P}\left(\mathcal{F}_A, \mathcal{F}_C, \eta, p \right), \langlebel{fp} \eeq where we have explicitly included the local errors in the purification function $\mathcal{P}$. This yields a fixed point fidelity $\mathcal{F}_{FP}(\mathcal{F}_C, \eta, p)$ which is independent of $\mathcal{F}_A$. Since the vector fidelity $\mathcal{F_A}$ has three independent parameters characterising the diagonal elements of the density matrix, one might miss the fixed point. However, as the number of purification steps increases our simulations do indeed approach the calculated fixed point. We therefore calculate the fixed point as a function of distance to find the upper bound on the fidelity which can be attained for given $\mathcal{F}_0, L/L_0, p$, and $\eta$. \subsection{Asymptotic Fidelity} As the distance increases $L\rightarrow \infty$, the fixed point fidelity can approach an asymptotic value $F_\infty$. We can understand the existence of $F_{\infty}$ and its value by examining the protocol as a function of nesting level. In particular, to generate entanglement over $n$ repeater stations we operate at nesting level $i \sim \log_2{n} $, where we obtain a purified pair \beq \mathcal{F}_A^{(i)} = \mathcal{F}_{FP}(\mathcal{F}_C^{(i)},\eta, p), \langlebel{fa} \eeq where $\mathcal{F}_{FP}$ is the fixed point solution to Eq.~(\ref{fp}), and $(\mathcal{F}_{FP})_1 = F_{FP}$ is the fixed point fidelity. We will then use this purified pair $\mathcal{F}_A^{(i)}$ to build up an auxiliary C pair on the next nesting level $i+1$. Since the fidelity over distance $n-1$ is greater than that over distance $n$, i.e. $(\mathcal{F}_A(n-1))_1 \gtrsim (\mathcal{F}_A(n))_1$, the auxiliary pair fidelity we obtain will be greater than or equal to the first component of \beq \mathcal{F}_C^{(i+1)} \sim C(\{\mathcal{F}_A^{(i)}, \mathcal{F}_A^{(i)}, \mathcal{F}_0, \mathcal{F}_0, \mathcal{F}_0\},\eta, p), \langlebel{fc} \eeq where $C$ is again the connection function. This auxiliary pair will then determine $\mathcal{F}_A^{(i+1)} = \mathcal{F}_{FP}(\mathcal{F}_C^{(i+1)},\eta, p)$. When $\mathcal{F}_A^{(i+1)} = \mathcal{F}_A^{(i)}$, we have reached the asymptotic fidelity $F_{\infty}=(\mathcal{F}_A)_1$ (see Figure~\ref{intersect}), which is given by the intersection of the purification curve Eq.~(\ref{fa}) and the auxiliary pair creation curve Eq.~(\ref{fc}). As was the case for the fixed point analysis, we must account for all diagonal components of the density matrix in the Bell state basis (not just the fidelity $a$). Consequently the asymptotic fidelity represents an upper bound to which the system may converge in the manner indicated by our simulations. Finally, we should stress that our calculations have not incorporated loss due to the long but finite memory time in the nuclear spins. This loss increases with the total time required for repeater operation, and sets the upper limit on the distance over which our scheme could operate. \begin{figure} \caption{\it{Approach to asymptotic fidelity. The solid curve shows the purified fidelity obtained from the auxiliary pair, while the dotted curve corresponds to the auxiliary pair (constructed from two smaller purified pairs) on the next nesting level. The system moves between the curves at each nesting step, and the upper intercept of the two curves gives the asymptotic fidelity. For this calculation $F_0 = p = \eta = 0.99, $ and $\upsilon = 0$.} \end{figure} \subsection{Results} The discussion of final fidelity may be summarized as follows: the fidelity obtained at the end of this nested purification procedure, $F(m, L, F_0, p, \eta)$, depends on the number of purification steps $m$, the distance $L$ between the outer nodes, the initial fidelity $F_0$ between adjacent nodes, and the reliability of measurements $\eta\leq1$ and local two-qubit operations $p\leq1$ required for entanglement purification and connection~\cite{Dur99}. As the number of purification steps increases $m\rightarrow \infty$, the fidelity at a given distance $L$ approaches a fixed point $F\rightarrow F _{FP}(L, F_0, p, \eta)$ at which additional purification steps yield no further benefit~\cite{Dur99}. Finally, as $L$ increases, the fidelity may approaches an asymptotic value $F_{FP}\rightarrow F_{\infty}(F_0, p, \eta)$. Figure~\ref{asymp}a illustrates the efficiency of the purification protocol: for initial fidelities $F_0\gtrsim 97\%$, three purification steps suffice to produce entanglement at large distances. \begin{figure} \caption{ \it{ (a) Fidelity scaling with distance. Points show results using 3 purification steps at each nesting level; dashed lines show the fixed point $F_{FP} \end{figure} Figure~\ref{asymp}b demonstrates that our scheme permits generation of high-fidelity, long distance entangled pairs in the presence of percent-level errors in polynomial time. Because solid-state devices allow fast operations and measurements, the overall time scale is set by the classical communication time between nodes. As an example, using a photon loss rate of $\sim 0.2$ dB/km and inter-node separation $L_0 \sim 20$ km (so that in the limit of good detectors the collection efficiency is $ 10^{-0.4} \sim 1/e$), a fidelity set by an emission probability $P_{\rm{em}} \sim 8\%$, local errors $\eta = p = 0.5\%$, and just one purification step at each nesting level, our scheme could potentially produce entangled pairs with fidelity $F\sim 0.8$ sufficient to violate Bell's inequalities over 1000 km in a few seconds. For comparison, under the same set of assumptions direct entanglement schemes would require $\sim10^{10}$ years. \begin{figure} \caption{ \it{(a) Long-distance asymptote dependence on initial fidelity $F_0$ of (i) 100\% (ii) 99\% (iii) 98\% (iv) 97\% (v) 96\% with phase errors only. (b) Long-distance asymptote dependence on error type. For the calculations shown, $F_0 = 0.99$, and the shape parameter ranges from $\upsilon = 0$ to $\upsilon = 0.3$. In both (a) and (b) measurement errors are set equal to operational errors, $\eta = p$. } \end{figure} Fig.~\ref{asymp2}a shows that our scheme will operate in the presence of $1-p \lsim 1\%$ errors in local operations and percent-level phase errors in initial entanglement fidelity. Other types of error are in principle possible, and we consider nonzero shape parameters $\upsilon$ for the initial fidelity $\mathcal{F}_0$ in Eq.~(\ref{shape}). The asymptotic fidelity shown in Fig.~\ref{asymp2}b indicates that, although the protocol we use is most effective for purifying phase errors, it also tolerates arbitrary errors. \subsection{Optimization} Once the parameters of the system are established, the protocol can be optimised to minimise the time required to generate some minimum fidelity $F_{min}$ over a distance $L$. We can vary the number of repeater stations $\sim L/L_0$ and the number of purification steps $m$ (which need not be constant). We can also tailor the entanglement generation procedure by changing the emission probability $P_{em}$ to find the optimum balance between initial infidelity $1-F_0\sim P_{em}$ and entanglement generation time $T_0 \propto 1/P_{em}$. Finally, one could use more advanced optimal control techniques to vary the details of the protocol itself. In particular, it should be possible to speed up the algorithm by working simultaneously on multiple nesting levels, beginning entanglement generation and connection on the next nesting level as soon as the interior nodes are free. Further speed-up may also be possible in the case when collection efficiency is very high by using coincidence detection in combination with e.g. time-bin encoding~\cite{gisin02}. As noted previously such coincidence detection could also be advantageous for interferometric stability \cite{simon,anders98,vanenk97}. Ultimately, the speed of this protocol is limited by three factors: classical communication time between nodes, probabilistic entanglement generation, and sequential purification. Faster techniques will require more efficient entanglement generation or larger numbers of qubits at each node to allow simultaneous purification steps. \subsection{Comparison to other quantum repeater schemes} This scheme combines the advantages of two pioneering proposals for quantum repeaters \cite{briegel98, Duan01}. Early work showed that entanglement purification and swapping could be combined to permit efficient, fault-tolerant quantum communication over distances longer than an attenuation length \cite{briegel98}. This scheme incorporated error correction at the cost of increased physical resources, requiring nodes containing a number of qubits scaling at minimum logarithmically with distance \cite{Dur99}. Owing to the difficulty of implementing even few-qubit quantum computation, implementation of this scheme remains a challenging goal. Our scheme is closely related to the original proposal with one key difference: by spatially rearranging the required physical resources, we can efficiently simulate their protocol while maintaining a constant requirement on qubits per node. This makes our scheme amenable to realistic physical implementation. Another physical implementation for quantum repeaters uses atomic ensembles as a long-lived memory for photons\cite{Duan01}. Entanglement is generated by interfering Raman scattered light from two ensembles. The entanglement is probabilistically swapped using an EIT readout technique. This scheme elegantly avoids effects of the dominant photon loss error by conditioning success on photon detection. Our scheme primarily differs from this proposal in two ways: first, access to two-qubit operations between electron and nuclear spin permits deterministic entanglement swapping; second, the two-qubit nodes allow active correction of arbitrary errors. \section{Physical systems} We conclude with three specific examples for potential implementation of the presented method. \subsection{Implementation with NV centers} The NV center level structure illustrated in Fig.~\ref{shelf} allows implementation of all steps in the repeater protocol. The cycling transition from $\ket{0}$ to $\ket{e}$ is used for electron spin initialization by measurement, entanglement generation, and electron spin state measurement. A series of ESR and NMR pulses can be used to perform arbitrary gates between the electron spin and an adjacent $^{13}$C spin \cite{jelezko04b}. Consequently, nuclear spin state initialization and measurement is achieved by initializing the electron spin, mapping the nuclear spin state onto the electron spin, and subsequently measuring the electron spin. Entanglement propagation and purification can be implemented in NV centers by driving ESR and NMR transitions and using optical detection of the electron spin states. Once electron spin entanglement is established between nodes $R_i$ and $R_{i-1}$, it can be transferred to the nuclear spins, leaving the electron degree of freedom free to generate entanglement between station $R_i$ and $R_{i+1}$. Provided that we can reinitialize the electron spin without affecting the nuclear entanglement, we can perform the same probabilistic entanglement procedure. Note that ESR multiplexing is required to perform a $\pi/2$ pulse independent of the nuclear spin; this can be accomplished simply by applying two ESR pulses at the two transition frequencies. We now consider the feasibility of implementing our repeater protocol using NV centers in diamond. Owing to the overlap of electron wavefunctions in the ground and excited states, most of the NV center optical emission goes into the phonon sidebands. Other color centers in diamond, for example the NE8 center \cite{Gaebel04, Kilin} may suffer less from this drawback. To enhance the relative strength of the zero-phonon line, it will be necessary to couple the NV center to a cavity. For NV centers coupled to cavities with Purcell factors $\sim 10$~\cite{santori02}, we find that the dominant source of error is electron spin decoherence during the classical communication period. Using an emission probability $P_{\rm{em}}\sim 5\%$, a collection efficiency $\epsilon \sim 0.2$, and a classical communication time of $t_c \sim 70\mu$s over $L_0 \sim 20$ km, we find the fidelity of directly entangled pairs can reach $F_0 \sim 97\%$ for electron spin coherence times in the range of a few milliseconds. Electron spin coherence times in the range of $100\mu$s have been observed at room temperature and significant improvements are expected for high purity samples at low temperatures~\cite{Kennedy03}. The large hyperfine splitting allows fast local operations between electron and nuclear spin degrees of freedom on a timescale $\sim 100$ns~\cite{jelezko04b} much shorter than the decoherence time, allowing $1-p < 1\%$. Finally, cavity enhanced collection should significantly improve observed measurement efficiencies of $\eta \sim 80\%$~\cite{jelezko04b}. \subsection{Alternative implementation: quantum dots} Our discussion thus far has attempted to remain general while exemplifying our proposal using NV centers. The basic idea of using two-qubit repeater stations should be applicable to a wide variety of systems featuring coupled electron and nuclear spins. To illustrate an alternative implementation, we consider doped self-assembled quantum dots whose electron spin is coupled to collective nuclear states in the lattice. Compared to NV centers, this system offers large oscillator strengths and the potential for Raman manipulation. Doped semiconductor quantum dots have been considered in a variety of quantum computing proposals and related technologies~\cite{imamoglu99,pazy03}. The spin state of the dopant electron provides a natural qubit with relatively long coherence times. Assuming a high degree of nuclear spin polarization ($P_{\rm n} \gtrsim 0.95$)~\cite{bracker04} and active ESR pulse correction, the electron spin dephasing time is expected to be $1$ ms~\cite{golovach03}. The spins of lattice nuclei in the quantum dot provide an additional, quasi-bosonic degree of freedom with extremely long coherence times ($\sim 1$ s with active correction~\cite{Ramanathan04}). Such ensembles of nuclear spin have been considered for use as a quantum memory~\cite{taylor03} and, by taking advantage of the non-linearity of the Jaynes-Cummings Hamiltonian, as a fundamental qubit for a quantum computer~\cite{taylor04}. Unlike the spin triplet state of the NV centers, the conduction band electron has two states, $\ket{\uparrow}$ and $\ket{\downarrow}$, corresponding to spin aligned and anti-aligned with an external magnetic field $B_{ext} || \hat{a}t{z}$. The quantum dot system also differs from NV centers in that it can be manipulated using Raman transitions: when the external field and growth direction are perpendicular (Voigt geometry), two allowed optical transitions to a trion state produce a lambda system ; moving towards aligned field and growth directions (Faraday geometry) suppresses the``forbidden'' transitions, as shown in Fig.~\ref{qd}a. Electron spin coherence can thus be prepared via Raman transitions or by standard ESR setups, and changes in effective magnetic field can be accomplished by off-resonant, spin-dependent AC Stark shifts with $\sigma_+$ light. Although optical transitions in doped quantum dots can exhibit homogeneous broadening $\Gamma \sim 100$ GHz$\sim 10-100\gamma$~\cite{kiraz02}, the corresponding error can be made negligible by sending the output from the cavity through a frequency filter with a linewidth of a few hundred MHz. \endnote{For our entanglement generation scheme, such a filter will allow the desired narrow band of coherent light to pass through while rejecting the broad incoherent background. Consequently the filter will not decrease collection efficiency in the desired mode.} Moreover, we note that InAs quantum dots have been successfully coupled to microcavities with Purcell factors $\sim 10$~\cite{santori02}. \begin{figure} \caption{\it{(a) Level structure for single electron to trion transition in a single-electron doped III-V or II-VI quantum dot with external magnetic field in close to a Faraday geometry with Zeeman splitting $\omega_z$, heavy-hole splitting $\omega^h_z$ and up to four optical fields of different frequency and polarization. Dashed lines indicate weak dipole moments due to small magnetic field mixing. The triplet two-electron states are not included due to a large ($> 1 $meV) exchange energy allowing for complete suppression of their effects. (b) Electronic ($\ket{\uparrow} \end{figure} Whereas the NV center electron spin was coupled to a single nuclear impurity, the electron in a quantum dot couples to collective excitations of many thousands of nuclei. We briefly discuss this system; further details are given in Refs.~\cite{taylor03, taylor03b}. The Hamiltonian governing this interaction is \beq H_{qd} = \omega_z \hat{a}t{S}_z + \hbar \sum_k \gamma_k \hat{a}t{I}_z^k + \hbar \Omega \sum_k \langlembda_k \hat{a}t{\vec{S}} \cdot \hat{a}t{\vec{I}}^k, \eeq where $\gamma_k$ is the gyromagnetic ratio for nuclear spin $\hat{a}t{\vec{I}}^k$, the nuclear spin coupling amplitudes satisfy $\langlembda_k \propto |\psi(r_k)|^2$, $\sum_k \langlembda_k^2 = 1$, and $\Omega = A / \hbar \sum_k \langlembda_k$ ($A$ is the hyperfine interaction constant). By identifying collective nuclear spin operators, $\hat{a}t{\vec{A}} = \sum_k \langlembda_k \hat{a}t{\vec{I}}$, the hyperfine term may be written $\hbar \Omega \hat{a}t{\vec{S}} \cdot \hat{a}t{\vec{A}}$. For simplicity we restrict the following discussion to the case of perfect nuclear polarization (so the initial state of all $N_n$ nuclear spins in the quantum dot is $\ket{0} = \ket{-I}\otimes \ldots \otimes \ket{-I}$ for $I$-spins). Then an effective Jaynes-Cummings type Hamiltonian describes the system: \beq H_{qd}^{eff} = \hbar \omega_z^{eff} \hat{a}t{S}_z + \hbar \Omega/2 (\hat{a}t{A}_+ \hat{a}t{S}_- + \hat{a}t{A}_- \hat{a}t{S}_+) . \eeq with corrections of order $\Omega / \sum_k \langlembda_k \sim A/N_n$. The effective Zeeman splitting $\omega_z^{eff}=\omega_z - I A/\hbar$ is dominated by the field associated with the polarized nuclear spins ; for example, in GaAs quantum dots, this Overhauser shift is $I A/\hbar \sim 33$GHz. The large detuning $\omega_z^{eff}$ suppresses interactions which exchange energy between the electron and nuclear spins. By changing the effective magnetic field, we can shift the system into resonance $\omega_z^{eff}\rightarrow 0$ to drive Rabi oscillations between the electron spin and the collective nuclear state, see figure \ref{qd}b. Pulsing the appropriate effective field permits a controllable map between the electron spin state and the collective nuclear degrees of freedom spanned by $\ket{0}$ and $\ket{1} = \hat{a}t{A}_+ \ket{0}$. In addition, more complicated sequences of electron-nuclear spin interaction and electron spin manipulation allow for arbitrary two qubit operations on $\ket{\hspace{-0.2 pt}\uparrow},\ket{\hspace{-0.2 pt}\downarrow}$ and $\ket{0},\ket{1}$ (see Ref.~\cite{taylor04} and commentary therein). Measurement and initialization proceed in much the same manner as described for NV centers. The state of the electron spin system can be read out by exciting a cycling transition with resonant $\sigma_+$ light, and measurement and ESR (or Raman transitions) can be employed to initialize the system in the desired state. As the effective Knight shift of the electron spin is negligible on the time scales of entanglement preparation, the collective nuclear state's coherence is unaffected by this process. Due to the improved selection rules and possibility of Raman transitions, it may be more effective to use the Raman entanglement generation scheme. The nuclear state of the quantum dot can be prepared by cooling the nuclear spins using preparation of electron spin and manipulation of the effective magnetic field~\cite{imamoglu03}. In practice, this leaves the nuclear system in a state $\ket{\mathcal D}$ with the same symmetry properties as the state $\ket{0}$ described above~\cite{taylor03b}. To date, 60\% nuclear spin polarization has been achieved by optical pumping in GaAs quantum dots~\cite{bracker04}. As was the case with NV centers, the nuclear spin state can be read out by preparing the electron spin in the $\ket{\downarrow}$ state, mapping the nuclear state to the electron spin state , and measuring the electron spin state. \subsection{Atomic Physics Implementation} Compared to the solid state implementations we have considered so far, implementations in single trapped atoms or ions have the advantage that they typically have very little broadening of the optical transitions. Because atomic systems do not reside in a complicated many-body environment, their internal degrees of freedom can have very long coherence times. For most atomic systems, however, it is hard to identify a mechanism which allows one degree of freedom, e.g., the nuclear spin, to be decoupled while we probe some other degree of freedom, e.g., the electron spin. Below we describe a system which does fulfill this requirement, although practical considerations indicate implementation may be challenging. We consider alkali-earth atoms, such as neutral magnesium, and chose an isotope with non-vanishing nuclear spin ($^{25}$Mg). The lowest lying states of magnesium are shown in Fig.~\ref{fig:mg} (electronic structure only). Instead of the electronic spin states we have considered so far, i.e., for NV centers and quantum dots, we will use states which differ both in spin and orbital angular momentum. The stable ground state $^1S_0$ will serve as state $|0\ranglengle$. In this state, the electronic degrees of freedom have neither spin nor orbital angular momentum and the nuclear spin is thus decoupled from the electronic state. The excited state $^3P_0^o$ (whose hyperfine interactions also vanish to leading order) will provide state $|1\ranglengle$. Note that the triplet-singlet transition from $^3P_0^o$ to the ground state is highly forbidden and this state has an extremely long lifetime, but transitions between the two states can still be induced with a strong laser. To create entanglement we couple the ground state to the excited state $^1P_1$ with a laser field and collect the scattered light. From this excited state the atom essentially always decays back into the ground state. If the driving is detuned much further than the hyperfine splitting in the excited state, the nuclear spin is also decoupled during this process. The nuclear spin can therefore by used to store information while we entangle the electronic state with another atom. Finally, to implement gates between the electronic and nuclear states one should, for instance, couple the $|0\ranglengle$ state to another state in the atom where there is a hyperfine interaction, for example using resonant excitation of the $^1P_1$ state. \begin{figure} \caption{\it{Electronic level structure of atomic magnesium. The electronic ground state $^1S_0$ has vanishing spin and orbital angular momentum. In this state the nuclear spin therefore decouples from the electronic degrees of freedom. An entangling operation which is also insensitive to the nuclear degree of freedom can be achieved with lasers which are detuned much further than the hyperfine splitting in the $^1P_1$ level.} \end{figure} Finally, we note that all three physical implementations we suggest operate in the visible or near-IR, and will likely require high-efficiency frequency conversion to telecom wavelengths for low-loss photon transmission. \section{Conclusion} In conclusion, we propose a method for fault tolerant quantum communication over long distances requiring only probabilistic nearest-neighbor entanglement generation , two-qubits per node, and two-qubit operations. We compare several schemes for entanglement generation and discuss two solid-state systems and an atomic system which might be used to implement them. Potential applications include secure transmission of secret messages over intercontinental distances. The authors wish to thank Phillip Hemmer, Aryesh Mukherjee, Klaus M\o lmer, Alexander Zibrov, and Gurudev Dutt. This work is supported by DARPA, NSF, ARO-MURI, and the Packard, Sloan and Hertz Foundations and the Danish Natural Science Research Council. \section{Appendix} Photo-bleaching is a detectable error, so it does not affect the fidelity of entanglement generation or measurement, as we described above. However, it can increase the time required for these operations. Fluorescence correlation experiments are consistent with assigning a metastable singlet structure to the shelving state, which is coupled strongly to the $M_s = \pm 1$ excited states but only weakly to the $M_s = 0$ excited state (see Figure~\ref{shelf}), \cite{nizovtzev03}. We need to account for the possibility that our NV center bleaches during entanglement generation, requiring us to start over. During each attempt we resonantly excite the $M_s = 0$ transition with some probability $P_{{\rm em}}$ (see Eq.~\ref{Pem}). To quantify the population lost to $\ket{W}$ we will consider a model where the $M_s = \pm1$ excited states decay to the shelving state at rate $\Gamma_S$. The oscillator strength for the $M_s = \pm1$ optical transitions are unknown, so we will assume that the Rabi frequencies on the $M_s = \pm 1$ transitions are $\Omega' $. During one attempt at entanglement generation, the probability to end up in the shelving state is \beq P_W \sim \Gamma_S\frac{\Omega'^2}{\delta^2} t_0, \eeq where $\delta$ is the detuning from the $M_s = \pm1$ optical transition. (The excited state energies are strongly inhomogeneously broadened, so $\delta$ is not precisely known; this detuning should be controllable using strain or applied electric fields.) On average, a large number of attempts $\sim 4/P_{{\rm em}}\epsilon^2$ are required for successful entanglement generation. Consequently the total probability for the system to end up in the shelving state during entanglement generation is \beq P_W \sim 4 \Gamma_S\frac{\Omega'^2}{\delta^2} \frac{\gamma}{\Omega^2 \epsilon^2} \sim 4 \frac{\Gamma_S \gamma}{\delta^2 \epsilon^2}\frac{\mu'^2}{\mu^2}, \eeq where $\mu (\mu')$ is the oscillator strength for the $M_s = 0(\pm1)$ transition. The precise values of these parameters are unknown, but we can estimate their order of magnitude: $\Gamma_S \sim 1-10$ MHz, $\gamma +\Gamma \sim 100$ MHz, $\delta \sim 1$ GHz, $\mu'\sim\mu$, yielding \beq P_W \sim \frac{4 (10^{-3}-10^{-4})}{\epsilon^2}. \eeq If this error rate is too large, we can also check for photo-bleaching at intervals during the entanglement procedure. The shelving state poses a similar problem during measurement. In this case, the $M_s = 0$ transition is strongly illuminated so that at least one photon reaches the detectors: $P_{{\rm em}}\sim 1/\epsilon^2$. Under the same illumination, any population in $\ket{1}$ will end up in the shelving state with probability \beq P_W \sim P_{{\rm ex}} \frac{\Gamma \gamma}{\delta^2}\frac{\mu'^2}{\mu^2} \sim \frac{\Gamma \gamma}{\delta^2 \epsilon^2}\frac{\mu'^2}{\mu^2} \sim \frac{(10^{-3}-10^{-4})}{\epsilon^2}, \eeq Note that the measurement fidelity is unaffected by photo-bleaching if we verify that the center is optically active by observing fluorescence either directly from the $\ket{0}$ state or after applying a multiplexed ESR pulse to the $\ket{1}$ states. Ultimately, in this model the only effect of the shelving state is to reduce the success rate for entanglement generation and measurement by of order a few percent. Finally, we should note that the effect of the shelving state on the nuclear spin state is currently not known, and could potentially complicate the sequence of operations necessary upon detection of a shelving event. \end{document}
\begin{document} \title{Input-Output Formalism for Few-Photon Transport: A Systematic Treatment Beyond Two Photons} \author{Shanshan Xu} \affiliation{Department of Physics, Stanford University, Stanford, California 94305} \author{Shanhui Fan} \email{[email protected]} \affiliation{Department of Electrical Engineering, Ginzton Laboratory, Stanford University, Stanford, California 94305} \begin{abstract} We provide a systematic treatment of $N$-photon transport in a waveguide coupled to a local system, using the input-output formalism. The main result of the paper is a general connection between the $N$-photon S matrix and the Green functions of the local system. We also show that the computation can be significantly simplified, by exploiting the connectedness structure of both the S matrix and the Green function, and by computing the Green function using an effective Hamiltonian that involves only the degrees of freedom of the local system. We illustrate our formalism by computing $N$-photon transport through a cavity containing a medium with Kerr nonlinearity, with $N$ up to 3. \end{abstract} \maketitle \section{\label{sec:level1} I. Introduction} The capability to create strong photon-photon interaction at a few-photon level in integrated photonic systems is of central importance for quantum information processing. To achieve such a capability, an important approach is to use the so-called waveguide quantum electrodynamics (QED) system, where one confines the photons to a waveguide that is strongly coupled to a local quantum system. Experimentally, the waveguides that have been used for this purpose include optical fibers \cite{adwb}, metallic plasmonic nanowires \cite{amy}, photonic crystal waveguides \cite{lhsj}, and microwave transmission line \cite{wsbf}. The local quantum system typically incorporates a variety of quantum multi-level systems such as actual atoms \cite{adwb}, quantum dots\cite{amy,lhsj}, or microwave qubits \cite{wsbf}, where the strong nonlinearity of these multi-level systems forms the basis for strong photon-photon interactions. These multi-level systems moreover can be embedded in cavity structures to further control their nonlinear properties \cite{bbmbnk,dpa,kp,emf,hbw,lbes}. The rapid experimental developments, in turn, have motivated significant theoretical efforts. From a fundamental physics perspective, the photon-photon interaction is characterized by the multi-photon scattering matrix (S matrix). Therefore, a natural objective for theoretical works is to compute such multi-photon S matrix. Moreover, from an engineering perspective, the systems considered here are envisioned as devices that process quantum states. To describe these systems as a device one naturally have to specify its input-output relation. The S matrix, which relates the input and output states, therefore provides a natural basis for device engineering as well. Motivated by both the physics and engineering considerations as discussed above, a large body of theoretical works have been therefore devoted to the computation the S matrix of various waveguide QED systems \cite{crdl,szgm,gmmtg,sf,sfA,fks,lsb,eks,zb,r,dr,koz,zgb,ll,ll2,sfs,rf,jg,ss,zgb2,rf2,sf2}. These computations, however, are limited in two important aspects: (1) All of these computations are carried out for a specific local quantum system. In most of these cases, the methods that were used were tailored to the property of the specific system. In the wavefunction approach for S matrix calculation \cite{sf,sfA,lsb,r,dr,koz,ll,ll2,jg,zgb2,szgm}, for example, the ansatz for the wavefunction used is specifically related to the local quantum system. As a result, it has been difficult, from these calculations, to identify the general features of S matrices for waveguide QED systems. (2) With a few exceptions \cite{ss,zgb2}, almost all previous calculations have been carried out for either single or two-photon S matrix. On the other hand, in quantum information processing, there is a strong effort to create and understand highly entangled states with more than two photons \cite{bpd,pdg, zczy}. It is important to understand whether waveguide QED system can be used for such a purpose. Thus, computation of $N$-photon S matrix with $N>2$ is essential. In this paper, we extend the input-output formalism \cite{fks,eks,rf,rf2,gc} to provide a systematic computation of $N$-photon S matrix for waveguide QED system. The main result of the paper is the relation between the $N$-photon scattering matrix, and the Green function of the local system. We prove this result using only a quantum causality condition, without the need of knowing the specific details of the Hamiltonian of the local system. The main result is therefore generally applicable for a large number of waveguide QED systems with different local quantum systems. We also discuss the general connectedness structure of both the S matrix and the Green function, which arise from the local nature of the interaction, and show that such connectedness structure can be used to significantly simplify the computation. Our work represents a significant step forward in the understanding of waveguide QED system. Our results here highlight some of the universal nature of the properties of these strongly correlated systems that has not been emphasized before. As a computational method, our work here leads to an approach for systematic computation for $N$-photon scattering matrix that is directly applicable to a large number of different systems. Our work utilizes the input-output formalism developed in standard quantum optics literature. However, our focus here is different. Whereas much of the standard quantum optics literature have focused on computing properties related to an input state that is a coherent state, a thermal state, or a squeezed state, here we focus exclusively on computations for Fock state input. In general, the transport property of Fock states is qualitatively different from that of the coherent state. As a prominent recent example as developed in the context of Boson sampling problem \cite{bs1,bs2}, it has now been recognized that the $N$-photon Fock state transport in a linear waveguide network is computationally hard \cite{bs3}, even though the transport properties of the same network for coherent states are well known. Similarly, in the system that we are discussing here, while many properties of the system in the presence of a coherent state input can be and have been computed with standard quantum optics tools, much less is known about how to compute transport properties of the same system with a $N$-photon Fock state input. The paper is organized as follows. In Section II we briefly review the input-output formalism and derive the quantum causality condition. In Section III we prove certain time-ordering relations, which are the key to compute the S matrix of waveguide photons. In Section IV we prove the connection between the $N$-photon S matrix and the time-ordered local system's Green function. This derivation represents the main result of the paper. To further simplify the calculation, we study the connectedness structure of S matrix in Section V. We also show in Section VI that the system's Green function can be computed with an effective Hamiltonian approach. Finally, in Section VII as an example of the application of this formalism, we calculate the exact $N$-photon S matrix up to $N=3$, when the local system is a cavity containing a medium with Kerr nonlinearity. \section{II. A brief review of the input-output formalism} We start with a brief review of the input-output formalism, highlighting only those aspects that will be required for the paper here. More details can be found in \cite{fks,gc}. Following \cite{fks,gc}, we consider the Hamiltonian of a one-mode waveguide coupled to a local system with finite degrees of freedom ($\hbar=1$): \begin{equation}\label{H} H=\int dk\,k\,c_{k}^{\dag}c_k+\xi\int dk\left(c_k^{\dag}a+a^{\dag}c_k\right)+H_{\text{sys}}\,, \end{equation} where $\xi$ is the coupling constant between the waveguide and the system, and is assumed to be frequency independent. $c_k\, (c_k^{\dag})$ is the annihilation (creation) operator of the photon state in the waveguide satisfying the standard commutation relation $[c_k, c_{k'}^{\dag}]=\delta(k-k')$. We consider only a narrow range of frequencies, in which the waveguide dispersion relation can be linearized, and the group velocity of the waveguide is taken to be 1. $a\,(a^{\dag})$ is one of several possible system operators that are assumed to commute with $c_k,\, c_k^{\dag}$. In this section we assume $a$ to be arbitrary. In practice $a$ can either be a bosonic operator describing a cavity mode \cite{ll,ll2,sfs,rf,sf2}, or a spin operator for atom-waveguide interaction \cite{sf,fks,eks,r,zgb}. The aim of the paper is to develop a systematic approach to compute the \textit{N}-photon scattering matrix. In general, the $N$-photon S matrix is related to the input and output operators by \cite{fks} \begin{equation}\label{NS} S_{p_1\cdots p_N; k_1 \cdots k_N}= \left( \prod_{i=1}^{N} \int \frac{d t'_i}{\sqrt{2\pi}} e^{i p_i t'_i} \prod_{j=1}^{N} \int \frac{d t_j}{\sqrt{2\pi}} e^{-i k_j t_j}\right) \langle 0| \prod_{i=1}^{N} c_{\text{out}}(t'_i)\prod_{j=1}^{N} c^{\dag}_{\text{in}}(t_j)|0\rangle\,, \end{equation} in which we define the input and output operators as: \begin{eqnarray}\label{dio} c_{\text{in}}(t)&=&\frac{1}{\sqrt{2\pi}}\int\,dk\, c_k(t_0)\,e^{-ik(t-t_0)}\,,\nonumber\\ c_{\text{out}}(t)&=&\frac{1}{\sqrt{2\pi}}\int\,dk\, c_k(t_1)\,e^{-ik(t-t_1)}\,, \end{eqnarray} with $t_0\rightarrow -\infty\,,t_1\rightarrow +\infty$. We note that $c_{\text{in}}(t)$ and $c_{\text{out}}(t)$ consist of Heisenberg operators of waveguide photons at time $-\infty$ and $+\infty$, respectively. They also satisfy the commutation relations \begin{equation}\label{cccc} \left[c_{\text{in}}(t), c_{\text{in}}(t')\right]=\left[c_{\text{out}}(t), c_{\text{out}}(t')\right]=0\,,\,\,\,\,\, \left[c_{\text{in}}(t), c^{\dag}_{\text{in}}(t')\right]=\left[c_{\text{out}}(t), c^{\dag}_{\text{out}}(t')\right]=\delta(t-t')\,. \end{equation} Following the standard procedure \cite{fks,gc}, one can develop the standard input-output formalism that relates $c_{\text{in}}$, $c_{\text{out}}$ and $a$, as: \begin{equation}\label{ior} c_{\text{out}}(t)=c_{\text{in}}(t)-i\sqrt{\gamma}\,a(t)\,, \end{equation} \begin{equation}\label{ain} \frac{d a}{dt}=-i\,\sqrt{\gamma}\,\left[a,a^{\dag}\right]\, c_{\text{in}} -i\,\left[a,\,H_{\text{sys}}-i\frac{\gamma}{2}a^{\dag}a\right]\,, \end{equation} or \begin{equation}\label{gain} \frac{d a}{dt}=-i\,\sqrt{\gamma}\left[a,a^{\dag}\right]\, c_{\text{out}} -i\,\left[a,\,H_{\text{sys}}+i\frac{\gamma}{2}a^{\dag}a\right]\,, \end{equation} where $\gamma=2\pi\xi^2$. Integrating (\ref{ain}) and (\ref{gain}) from $t = -\infty$ and $t = \infty$, respectively, result in: \begin{equation}\label{ie1} a(t)=a(-\infty) -i\int_{-\infty}^t d\tau\left[a,\,H_{\text{sys}}-i\frac{\gamma}{2}a^{\dag}a\right]-i\,\sqrt{\gamma}\int_{-\infty}^t d\tau \left[a,a^{\dag}\right]\, c_{\text{in}}\,, \end{equation} \begin{equation}\label{ie2} a(t)=a(+\infty) -i\int_{+\infty}^t d\tau\left[a,\,H_{\text{sys}}+i\frac{\gamma}{2}a^{\dag}a\right]-i\,\sqrt{\gamma}\int_{+\infty}^t d\tau \left[a,a^{\dag}\right]\, c_{\text{out}}\,, \end{equation} where the integrands in (\ref{ie1}) and (\ref{ie2}) are operators at time $\tau$. (\ref{ie1}) and (\ref{ie2}) can be used to prove a quantum causality relation. When using (\ref{ie1}) to evaluate $a(t)$ or $a^{\dag}(t)$, the integral in (\ref{ie1}) should result in an expression that involves only $c_{\text{in}}(\tau)$ and $c^{\dag}_{\text{in}}(\tau)$ with $\tau < t$. Therefore, by the commutation relation (\ref{cccc}) above, one concludes from (\ref{ie1}) that for $t\leqslant t'$, \begin{equation} \left[a(t),\, I(t')\right]=\left[a(-\infty),\,I(t')\right]\,,\,\,\,\,\,\,\,\left[a^{\dag}(t), \,I(t')\right]=\left[a^{\dag}(-\infty),\,I(t')\right]\,, \end{equation} where $I(t')$ is a shorthand notation for the input operators that represent either $c_{\text{in}}(t')$ or $c_{\text{in}}^{\dag}(t')$. On the other hand, the operator $I$ is really a Heisenberg operator at time $-\infty$ as can be seen in (\ref{dio}) above, and hence commute with $a(-\infty)$ and $a^{\dag}(-\infty)$. Therefore, we have \begin{equation}\label{cau1} \left[a(t), \,I(t')\right] =\left[a^{\dag}(t), I(t')\right] =0\, \,,\,\,\,\,\,\,\,\text{for}\,\,t\leqslant t'\,, \end{equation} Similarly, one can prove \begin{equation}\label{cau2} \left[a(t),\,O(t')\right]=\left[a^{\dag}(t),\,O(t')\right]=0\,, \,\,\,\,\,\,\,\text{for}\,\,t\geqslant t'\,, \end{equation} where $O(t')$ is a shorthand notation for the output operators that represent either $c_{\text{out}}(t')$ or $c_{\text{out}}^{\dag}(t')$, by utilizing (\ref{ie2}) and the fact that the output operators are really Heisenberg operators at time $+\infty$. Following \cite{gc}, we refer to (\ref{cau1}) and (\ref{cau2}) as the \textit{quantum causality condition}. The operator $a(t)$, which characterizes the physical field in the local system, depends only on the input field $c_{\text{in}}(\tau)$ with $\tau<t$, and generate only output field $c_{\text{out}}(\tau)$ with $\tau>t$. \section{III Relation involved time-ordered product} Having reviewed some of the basic aspects of the input-output formalism, we now proceed to compute the \textit{N}-photon S matrix as defined in (\ref{NS}). For this purpose, we first consider some of the properties of a time-ordered product involving $a$ and the input or output operators. We note that: \begin{eqnarray}\label{tor} {\cal{T}} a(t)I(t')&=&a(t)I(t')\,,\\ {\cal{T}} a(t)O(t')&=&O(t')a(t)\label{tor2}\,. \end{eqnarray} Take (\ref{tor}) as an example, by definition, ${\cal{T}} a(t)I(t')=a(t)I(t')$ for $t\geqslant t'$; When $t<t'$, by the quantum causality condition of (\ref{cau1}), we have ${\cal{T}} a(t)I(t')=I(t')a(t)=a(t)I(t')$, completing the proof. (\ref{tor2}) can be proved similarly. More generally, we have the following relation regarding the time-ordered product: \begin{eqnarray}\label{timeorder} {\cal{T}}\prod_{i,j} a(t_i)I(t'_j)&=&\left[{\cal{T}}\prod_{i} a(t_i)\right]\cdot\left[{\cal{T}}\prod_{j}I(t'_j)\right]\,,\\ {\cal{T}}\prod_{i,j} a(t_i)O(t'_j)&=&\left[{\cal{T}}\prod_{j}O(t'_j)\right]\cdot\left[{\cal{T}}\prod_{i} a(t_i)\right]\label{timeorder2}\,, \end{eqnarray} where bracket is used to indicate the range over which the time-ordering is being applied. (\ref{timeorder}) and (\ref{timeorder2}) can be proved in a similar way. Here we show only the proof of (\ref{timeorder}). The proof of (\ref{timeorder}) can be constructed from induction with respect to the number of operators. The base case is already proved in (\ref{tor}). Now suppose (\ref{timeorder}) holds for all cases involving a total number of $N$ operators of $a$ and $I$. Consider a time-ordered product involving $N+1$ operators, if the operator with the largest time label is $a(t_{\text{max}})$, \begin{eqnarray} {\cal{T}}\prod_{I,j} a(t_I)I(t'_j)=a(t_{\text{max}})\left[{\cal{T}}\prod_{i,j} a(t_i)I(t'_j)\right] =a(t_{\text{max}})\left[{\cal{T}}\prod_{i} a(t_i)\right]\cdot\left[{\cal{T}}\prod_{j}I(t'_j)\right] =\left[{\cal{T}}\prod_{I} a(t_I)\right]\cdot\left[{\cal{T}}\prod_{j}I(t'_j)\right]\,,\nonumber\\ \end{eqnarray} where the definition of time-ordered product is used in the first and last steps and the induction hypothesis is used in the second step. On the other hand, if the operator with the largest time label is $I(t_{\text{max}})$, we have \begin{eqnarray} {\cal{T}}\prod_{i,J} a(t_i)I(t'_J)&=&I(t_{\text{max}})\left[{\cal{T}}\prod_{i,j} a(t_i)I(t'_j)\right] = I(t_{\text{max}})\left[{\cal{T}}\prod_{i} a(t_i)\right]\cdot\left[{\cal{T}}\prod_{j}I(t'_j)\right]\nonumber\\ &=& \left[{\cal{T}}\prod_{i} a(t_i)\right]\cdot I(t_{\text{max}})\left[{\cal{T}}\prod_{j}I(t'_j)\right] =\left[{\cal{T}}\prod_{i} a(t_i)\right]\cdot\left[{\cal{T}}\prod_{J}I(t'_J)\right]\,,\nonumber\\ \end{eqnarray} where we use the induction hypothesis in the second step and the commutation relation (\ref{cau1}) in the third step. Therefore, (\ref{timeorder}) holds for $N+1$ operators, completing the proof. \section{IV \textit{N}-photon S matrix} Using (\ref{timeorder}) and (\ref{timeorder2}), we now compute the $N$-photon S matrix defined in (2). We first evaluate its Fourier transformation in the time domain \begin{equation}\label{NSt} S_{t'_1\cdots t'_N; t_1\cdots t_N}\equiv\langle 0| \prod_{i=1}^{N} c_{\text{out}}(t'_i)\prod_{j=1}^{N} c^{\dag}_{\text{in}}(t_j)|0\rangle\,. \end{equation} From now on, we refer both (\ref{NS}) and (\ref{NSt}) as S matrix. The computation is as follows: \begin{eqnarray} &&S_{t'_1\cdots t'_N; t_1\cdots t_N} \nonumber\\ &=& \langle 0|\left[ {\cal{T}} \prod_{i=1}^N c_{\text{out}}(t_i')\right] \cdot \prod_{j=1}^N c_\text{{in}}^{\dag}(t_j) |0\rangle \nonumber\\ &=& \langle 0| \left[{\cal{T}} \prod_{i=1}^N \left(c_\text{{in}}(t_i')-i\sqrt{\gamma}\,a(t_i') \right)\right] \cdot \prod_{j=1}^N c_\text{{in}}^{\dag}(t_j) |0\rangle\nonumber\\ &=& \sum_{M=0}^N \sum_{B_M} (-i\sqrt{\gamma})^M \langle 0| \left[{\cal{T}} \prod_{s=1}^{M} a(t_{B_M(s)}')\right] \cdot \left[{\cal{T}} \prod_{m=1}^{N-M} c_\text{{in}}(t_{B_M^c(m)}')\right] \cdot \prod_{j=1}^N c_\text{{in}}^{\dag}(t_j) |0\rangle\nonumber\\ &=& \sum_{M=0}^N (-i\sqrt{\gamma})^M \sum_{B_M,D_M} \langle 0| {\cal{T}}\prod_{s=1}^{M} a(t_{B_M(s)}') \prod_{r=1}^{M} c_\text{{in}}^{\dag}(t_{D_M(r)}) |0\rangle \sum_P \prod_{m=1}^{N-M} \delta\left(t_{B_M^c(m)}'-t_{PD_M^c(m)}\right) \nonumber\\ &=& \sum_{M=0}^N (-i\sqrt{\gamma})^M \sum_{B_M,D_M} \langle 0| {\cal{T}} \prod_{s=1}^{M} a(t_{B_M(s)}')\prod_{r=1}^{M} \left(c_{\text{out}}^{\dag}(t_{D_M(r)}) - i\sqrt{\gamma}\, a^{\dag}(t_{D_M(r)}) \right) |0\rangle\sum_P \prod_{m=1}^{N-M} \delta\left(t_{B_M^c(m)}'-t_{PD_M^c(m)}\right) \nonumber\\ &=& \sum_{M=0}^N (-\gamma)^{M} \sum_{B_M,D_M} \langle 0| {\cal{T}} \prod_{s=1}^{M} a(t_{B_M(s)}') \prod_{r=1}^{M} a^{\dag}(t_{D_M(r)})|0\rangle \sum_P \prod_{m=1}^{N-M} \delta\left(t_{B_M^c(m)}'-t_{PD_M^c(m)}\right). \end{eqnarray} In the first step above, we introduce the time-ordering operation for the product of $c_\text{{out}}$ operators since these operators commute with one another. In the second step, we use the input-output relation (\ref{ior}). In the third step, we expand the product in the bracket. In this derivation, for a given subset $B$ of $\{1...N\}$, we use $B^c$ to represent its corresponding complementary subset, and $B(s)$ to represent its $s$-th element. The summation is over all subsets of $\{1...N\}$. $B_M$ is a subset with $M$ elements. We use the time-ordering relation (\ref{timeorder}) to place all $c_\text{{in}}$ operators to the right of the $a$ operators. In the fourth step, we first remove the time-ordering operation for the product of $c_\text{{in}}$ and then contract the $c_\text{{in}}$ operators with the $c_{\text{in}}^{\dag}$ operators. Since there are $N-M$ $c_\text{{in}}$ and $N$ $c_\text{{in}}^{\dag}$ operators, in each term in the resulting summation we select $M$ $c_\text{{in}}^{\dag}$ operators as indexed by a $M$-element subset $D_M$, and perform a full contraction using the remaining $N-M$ $c_\text{{in}}^{\dag}$ operators. Such contraction results in a summation over all possible permutations $P$ of $D_M^c$. Finally, we restore the time-ordering operation on the product of $M$ $c_\text{{in}}^{\dag}$ operators. The use of (\ref{timeorder}) then results in a time-ordered product of all the $a$ and $c_\text{{in}}^{\dag}$ operators. In the fifth step, we again use the input-output relation (\ref{ior}). In the last step, we expand the product involving $a^{\dag}$, and then apply (\ref{timeorder2}) to each term in the product expansion. For every term that contains at least one $c_{\text{out}}^{\dag}$ operators, the use of (\ref{timeorder2}) resulting in such output operators being placed on the left-most positions of the operator product, and hence such a term vanishes. Therefore, we obtain the first main result of the paper: \begin{equation}\label{central} S_{t'_1\cdots t'_N; t_1\cdots t_N} =\sum_{M=0}^N (-\gamma)^{M} \sum_{B_M,D_M} \langle 0| {\cal{T}} \prod_{s=1}^{M} a(t_{B_M(s)}') \prod_{r=1}^{M} a^{\dag}(t_{D_M(r)})|0\rangle \sum_P \prod_{m=1}^{N-M} \delta\left(t_{B_M^c(m)}'-t_{PD_M^c(m)}\right). \end{equation} We define the time-ordered 2M-point Green function \begin{equation}\label{Gs} G(t'_1\cdots t'_M; t_1\cdots t_M)\equiv (-\gamma)^{M} \langle 0| {\cal{T}}a(t'_1)\cdots a(t'_M)a^{\dag}(t_1)\cdots a^{\dag}(t_M)|0\rangle\,, \end{equation} and its Fourier transformation \begin{equation}\label{NGP} G(p_1\cdots p_M; k_1\cdots k_M)\equiv \left( \prod_{i=1}^M \int \frac{d \,t'_i}{\sqrt{2\pi}} e^{i p_i t'_i} \prod_{j=1}^M \int \frac{d\, t_j}{\sqrt{2\pi}} e^{-i k_j t_j}\right) G(t'_1\cdots t'_M; t_1\cdots t_M)\,. \end{equation} From (\ref{central}), the \textit{N}-photon S matrix (\ref{NS}) in the frequency domain is \begin{equation}\label{cenm} S_{p_1\cdots p_N; k_1\cdots k_N}=\sum_{M=0}^N \sum_{B_M,D_M} G\left(p_{B_M}; k_{D_M}\right) \sum_P \prod_{m=1}^{N-M} \delta\left(p_{B_M^c(m)}-k_{PD_M^c(m)}\right)\,, \end{equation} where we use the shorthand notation $p_{B_M}\equiv \{p_i|i\in {B_M}\}$ and $k_{D_M}\equiv \{k_i|i\in {D_M}\}$. In (\ref{cenm}), $ k_{PD_M^c(m)}$ represents the frequencies of the incoming photons that bypass the local system. These photons do not change their frequencies, as signified by the $\delta$ functions in (\ref{cenm}). Whereas $k_{D_M}$ represents the frequencies of the incoming photons that enter and are scattered by the local system. Each term in (\ref{cenm}) can be represented diagrammatically. For the case with $N=5$, for example, we plot all different classes of diagrams in Fig.\ref{fig1}. (We summarize the definitions of all diagrams used in the paper in Fig.\ref{fig2}). The summation in (\ref{cenm}) then represents the summation of all such diagrams as shown in Fig.\ref{fig1}, each diagram containing a single Green function part, and the rest are $\delta$ functions. \begin{figure} \caption{ The diagrammatic representation of all six classes of terms that arise in the computation of S matrix for $N=5$ photons. } \label{fig1} \end{figure} \begin{figure} \caption{ Various diagrams used in the paper. The corresponding mathematical definition is shown on the right. The legs to the left (right) represent input (output) momenta. There is no distinction in the ordering among the input or output momenta. } \label{fig2} \end{figure} Our main result (\ref{cenm}) directly reduces the computation of the $N$-photon S matrix to the calculation of the Green function of the \textit{local system}. The result here is related to, but not identical to, the LSZ reduction approach as discussed in \cite{sfs}. In \cite{sfs}, the $N$-photon S matrix is first reduced with the LSZ reduction to the calculation of the Green function of the \textit{waveguide} photons, which is then related to the Green function of the local system after integrating out of the waveguide photon fields. In contrast, in our derivation the reduction to the Green function of the local system is obtained directly with input-output formalism, which represents a more direct approach. Also, the proof here is very general. It uses only the quantum causality relation without any need for knowing the details of the Hamiltonian of the local system, which again points to the power of the input-output formalism. \section{V The connectedness structure of \textit{N}-photon S matrix} Our main result (\ref{central}) and (\ref{cenm}) reduce the problem of computing $N$-photon S matrix, to the calculation of Green functions of the local system. Such a calculation, moreover, can be significantly simplified exploiting the connectedness structure of the S matrix and the Green function. Since the system is time-translation invariant, $G(p_1,\cdots,p_N;k_1,\cdots,k_N)$ must be proportional to $\delta\left(\sum_{i=1}^N p_i - \sum_{i=1}^N k_i\right)$. In general, $G(p_1,\cdots,p_N;k_1,\cdots,k_N)$ can be expressed as a sum of various terms containing products of several $\delta$ functions \cite{vpn}. Among all these terms, we define the term that contains only $\delta\left(\sum_{i=1}^N p_i - \sum_{i=1}^N k_i\right) $ and no other $\delta$ functions as the \textit{connected} Green function, $G^C(p_1,\cdots,p_N;k_1,\cdots,k_N)$. Similarly, as can be seen in (\ref{cenm}), the S matrix can also be organized as summing over various terms containing products of several $\delta$ functions. Among all these terms, we can again define the term that contains only $\delta \left(\sum_{i=1}^N p_i - \sum_{i=1}^N k_i\right)$ and no other $\delta$ function as the \textit{connected} part of the S matrix, $S^C_{p_1\cdots p_N;k_1\cdots k_N}$. Our main result (\ref{cenm}) then immediately implies that for $N>1$ \begin{equation}\label{SCGC} S^C_{p_1\cdots p_N;k_1\cdots k_N} = G^C(p_1,\cdots,p_N;k_1,\cdots,k_N)\,, \end{equation} and for $N=1$ \begin{equation}\label{S1G1} S_{p_1;k_1} = \delta(p_1-k_1)+G^C(p_1;k_1)\,. \end{equation} Therefore, for $N>1$, we use the same diagrammatic representation for $S^C$ and $G^C$ (Fig.\ref{fig2}). For $N=1$, the diagrammatic representation of $S^C$ is included in Fig.\ref{fig2} and the result of (\ref{S1G1}) can then be represented in Fig.\ref{fig3}. \begin{figure} \caption{ The single-photon S matrix is the sum of a $\delta$ function and a two-point Green function. } \label{fig3} \end{figure} Moreover, it is known that a Green function in general can be cluster-decomposed as \cite{vpn, QFT2}: \begin{equation}\label{dGC} G(p_1,\cdots, p_N;k_1,\cdots, k_N) = \sum_{\cal{B}} \sum_P \prod_{i=1}^{M_{\cal{B}}} G^C\left(p_{{\cal{B}}_i};k_{{\cal{B}}P_i}\right) \end{equation} where $\cal{B}$ is a partition of an ordered list $\{1,2,\cdots, N\}$ into a collection of subsets. (For example, for a list $\{1,\cdots, 5\}$, a partition results in $\{1\}, \{2,4\}, \{3,5\}$.) ${\cal{B}}P$ is the same partition as $\cal{B}$ but acts on the permutated ordered list $\{P(1), P(2), \cdots, P(N)\}$. (For example, the same $\cal{B}$ in the example above, acting on the permutation $\{5,3,4,2,1\}$ would result in $\{5\}, \{32\}, \{41\}$.) $M_{\cal{B}}$ is the number of subsets and ${\cal{B}}_i$, ${\cal{B}}P_i$ are the respective $i$-th subsets. We also use the shorthand notations $p_{{\cal{B}}_i}\equiv\{p_j| j\in {\cal{B}}_i\}$ and $k_{{\cal{B}}P_i}\equiv \{k_j|j \in {{\cal{B}}P_i}\}$. For a given partition, we only sum over all the distinct permutations under the exchange symmetries in each sublist. Graphically, (\ref{dGC}) states that the Green function can be decomposed into a summation of all possible products of the connected Green functions. By (\ref{SCGC}), (\ref{S1G1}) and (\ref{dGC}), we prove in the appendix that our main result (\ref{cenm}) implies similar cluster-decomposition properties for the S matrix of the system \cite{wc,QFT,QFT2}: \begin{equation}\label{dSC} S_{p_1\cdots p_N;k_1\cdots k_N} = \sum_{\cal{B}} \sum_P \prod_{i=1}^{M_{\cal{B}}} S^C_{p_{{\cal{B}}_i};k_{{\cal{B}}P_i}}\,. \end{equation} Below, we give three simplest examples of (\ref{dSC}) when $N=1,2,3$, respectively: \begin{equation}\label{1t} S_{p_1; k_1}=\delta(p_1-k_1)+G(p_1; k_1)=S^C_{p_1;k_1}\,. \end{equation} \begin{equation}\label{2tC} S_{p_1p_2; k_1k_2}=S_{p_1; k_1}S_{p_2; k_2}+S_{p_1; k_2}S_{p_2; k_1}+S^C(p_1,p_2; k_1,k_2)\,. \end{equation} \begin{eqnarray}\label{3tC} S_{p_1p_2p_3; k_1k_2k_3}&=&\sum_{P}S_{p_1;k_{P_1}}S_{p_2;k_{P_2}}S_{p_3;k_{P_3}}\nonumber\\ &&+\sum_{P_{\text{even}}}\left[S^C_{p_1, p_2;k_{P_1},k_{P_2}}S_{p_3;k_{P_3}}+S^C_{p_2, p_3;k_{P_2},k_{P_3}}S_{p_1;k_{P_1}}+S^C_{p_1, p_3;k_{P_1},k_{P_3}}S_{p_2;k_{P_2}}\right]\nonumber\\ &&+S^C(p_1,p_2,p_3; k_1,k_2,k_3)\,. \end{eqnarray} (\ref{2tC}) and (\ref{3tC}) are represented diagrammatically in Fig.\ref{fig4}. Using such diagrammatic representation, $N$ photon S matrix can be straightforwardly decomposed. The results here thus reduce the computation of the $N$-photon S matrix, to the evaluation of the connected $2N$-point Green function of the local system. \begin{figure} \caption{ The cluster decomposition of two- and three-photon S matrices. } \label{fig4} \end{figure} \section{VI Compute system's Green function with the effective hamiltonian for the system} In this section, we will prove that the Green function of (\ref{Gs}) can be computed using the effective Hamiltonian of the local system. The main result in this section is \begin{equation} \label{id} G({t'_1\cdots t'_N; t_1\cdots t_N})=\widetilde{G}({t'_1\cdots t'_N; t_1\cdots t_N})\,, \end{equation} where \begin{equation}\label{aGs} \widetilde{G}({t'_1\cdots t'_N; t_1\cdots t_N})\equiv (-\gamma)^N \langle 0| {\cal{T}}\,\widetilde{a}(t'_1) \cdots \widetilde{a}(t'_N)\widetilde{a^{\dag}}(t_1)\cdots \widetilde{a^{\dag}}(t_N) |0\rangle\,, \end{equation} with operators \begin{equation}\label{effa} \widetilde{a}(t)=e^{i H_{\text{eff}} t}\, a\, e^{-i H_{\text{eff}} t}\,,\,\,\,\,\,\,\,\,\widetilde{a^{\dag}}(t)=e^{i H_{\text{eff}} t} \,a^{\dag}\, e^{-i H_{\text{eff}} t}\,, \end{equation} where \begin{equation}\label{eff} H_{\text{eff}}\equiv H_{\text{sys}}-i\frac{\gamma}{2}a^{\dag}a\, \end{equation} is the effective Hamiltonian for the system. With the identity (\ref{id}), the computation of the Green function is simplified since no operators of waveguide photons are involved. We only need to solve a system which has a finite, and typically small, number of degrees of freedom. (\ref{id}) can be proved in the path integral formulation. The proof is summarized as: \begin{eqnarray}\label{GpG}\label{gpi} G({t'_1\cdots t'_N; t_1\cdots t_N})&=&(-\gamma)^N\frac{\int {\cal{D}}\left[c_k,c^{*}_k, a,a^*\right]a(t'_1) \cdots a(t'_N)a^{*}(t_1)\cdots a^{*}(t_N) e^{i\int dt L}}{\int {\cal{D}}\left[c_k,c^{*}_k,a, a^* \right] e^{i\int dt L}}\nonumber\\ &=&(-\gamma)^N\frac{\int {\cal{D}}\left[a,a^*\right]a(t'_1) \cdots a(t'_N)a^{*}(t_1)\cdots a^{*}(t_N) e^{i\int dt L_{\text{eff}}}}{\int {\cal{D}}\left[a, a^* \right] e^{i\int dt L_{\text{eff}}}}\nonumber\\ &=&\widetilde{G}({t'_1\cdots t'_N; t_1\cdots t_N})\,. \end{eqnarray} The first step in (\ref{gpi}) follows \cite{ps} to express Green function (\ref{Gs}) by path integral. Here for simplicity, we assume the local system is characterized only by $a$ and $a^*$. $L$ is the Lagrangian associated with the full Hamiltonian (\ref{H}): \begin{equation} L=\int dk \,c_k^{*}(i\partial_t-k)c_k-\xi\int dk\left(c_k^{*}a+a^{*}c_k\right)+L_{\text{sys}}\,, \end{equation} in which $L_{\text{sys}}$ is the local system's Lagrangian obtained by Legendre transformation on the system's Hamiltonian $H_{\text{sys}}$. The second step in (\ref{gpi}) is the key of the proof. As was done in the standard approach involving generating functional, we introduce the propagator of free waveguide photon \begin{equation} G^{(0)}_k(t-t')\equiv\int \frac{d \omega}{2\pi}\,e^{-i\omega (t-t')}\frac{i}{\omega-k+i 0^+} \end{equation} and integrate out the waveguide degrees of freedom: \begin{eqnarray} \int {\cal{D}}\left[c_k, c_k^* \right] e^{i\int dt L}&=& e^{i\int dt L_{\text{sys}}}\int {\cal{D}}\left[c_k, c_k^* \right] e^{i\int dt \int dk \left[c_k^{*}(t)(i\partial_t-k)c_k(t)-\xi c_k^{*}(t)a(t)-\xi a^{*}(t)c_k(t)\right]}\nonumber\\ &=&e^{i\int dt L_{\text{sys}}}\int {\cal{D}}\left[c_k, c_k^* \right] e^{-\xi^2\int dt \int dt' a^*(t)W(t-t')a(t')}\,, \end{eqnarray} where \begin{eqnarray} W(t-t')&=&\int dk\, G^{(0)}_k(t-t')=i\int \frac{d \omega}{2\pi}\,e^{-i\omega (t-t')}\int dk\left[\frac{{\cal{P}}}{\omega-k}-i\pi\delta(\omega-k)\right]\nonumber\\ &=&\pi \int \frac{d \omega}{2\pi}\,e^{-i\omega (t-t')}=\pi\delta(t-t')\,. \end{eqnarray} As a result, we obtain the effective Lagrangian \begin{equation} \label{Leff} L_{\text{eff}}= L_{\text{sys}}+i\pi\xi^2 a^{*}a=L_{\text{sys}}+i\frac{\gamma}{2} a^{*}a\,. \end{equation} Here, the imaginary part of the effective Lagrangian arises since the waveguide degrees of freedom that we are integrating out forms a continuum. In the last step of (\ref{gpi}), the path integral to the system's degrees of freedom with the effective Lagrangian (\ref{Leff}) corresponds exactly to the alternative Green function (\ref{aGs}) in the Heisenberg picture \cite{ps}. The effective Hamiltonian (\ref{eff}) is obtained from the effective Lagrangian (\ref{Leff}) by Legendre transformation. Since in practice, all evaluations of the Green function will be carried out using the effective Hamiltonian (\ref{eff}), in the following we will no longer make the distinction between the Green function of system defined in the full coupled Hamiltonian versus the effective Hamiltonian, i.e. we will no longer make distinction between the left and right hand side of (\ref{id}). In what follows, the time evolution of all system operators is considered as that in (\ref{effa}). (\ref{id}) allows us to compute the full Green function of the local system. To compute the S matrix, we only need the connected part of the Green function. In practice, the connected part of the Green function may actually be obtained in a simpler fashion without the need to evaluate and perform a cluster decomposition of the full Green function. This will be illustrated in the example below. \section{VII Example: three-photon S matrix with Kerr nonlinear cavity} As an example of the application of the formalism developed in this paper, we compute the S matrix of three-photon transport in a single-mode waveguide side-coupled to a ring resonator incorporating Kerr nonlinear media. The full Hamiltonian has the same form as (\ref{H}) with the specific form of $H_{\text{sys}}$: \begin{equation} H_{\text{sys}}=\omega_c \,a^{\dag}a+\frac{\chi}{2}\,a^{\dag}a^{\dag}aa\, \end{equation} where $a$ is the annihilation operator of cavity photon satisfying the standard commutator relation $\left[a, a^{\dag}\right]=1$. Let $\alpha\equiv \omega_c-i\frac{\gamma}{2}$, the effective Hamiltonian (\ref{eff}) in this case is \begin{equation} H_{\text{eff}}=\alpha \,a^{\dag}a+\frac{\chi}{2}\,a^{\dag}a^{\dag}aa\,, \end{equation} and can be diagonalized as \begin{equation}\label{eigen} H_{\text{eff}}|n\rangle=\left[\alpha n+\frac{\chi}{2}n(n-1)\right]|n\rangle\,. \end{equation} Use the formalism as described above, we now compute the S matrix for up to three photons. From (\ref{1t})-(\ref{3tC}) we only need the connected S matrix for up to three photons, which is the focus of the calculation here. We consider single photon S matrix first. Using (\ref{id}), the two-point Green function is computed as \begin{eqnarray}\label{G1} G(t'; t)=-\gamma\sum_{n}\langle 0|{a}(t')|n\rangle\langle n|{a^{\dag}}(t)|0\rangle\theta(t'-t) =-\gamma\,e^{-i \alpha(t'-t)}\theta(t'-t)\,, \end{eqnarray} where a completeness set of states of the system is inserted. To obtain the final result, we note that only the single photon state contributes to the summation. The single photon S matrix is then: \begin{equation}\label{sS} S_{p;k}=\frac{k-\omega_c-i\gamma/2}{k-\omega_c+i\gamma/2}\delta(p-k)=\left[1+s_k\right]\delta(p-k)\,, \end{equation} where, for later convenience, we defined \begin{equation} s_k\equiv -\gamma\frac{i}{k-\alpha}\,. \end{equation} To compute the connected two-photon S matrix, we first compute the four-point Green function $G_2(t'_1,t'_2;t_1,t_2)$. Depending on the values of the four time labels, the time ordering operation would give rise non-zero terms that can be classified into two types: $\langle {a}{a^{\dag}}{a}{a^{\dag}}\rangle$ and $\langle {a}{a}{a^{\dag}}{a^{\dag}}\rangle$, i.e. \begin{equation} G(t'_1,t'_2; t_1,t_2)\equiv \sum_{j=1}^2 G^{(j)}(t'_1,t'_2; t_1,t_2)\,, \end{equation} with \begin{eqnarray} G^{(1)}(t'_1,t'_2; t_1,t_2)&=&(-\gamma)^2\sum_{P,Q}\langle 0| {a}(t'_{Q_1}){a^{\dag}}(t_{P_1}){a}(t'_{Q_2}){a^{\dag}}(t_{P_2})|0\rangle\theta(t'_{Q_1}-t_{P_1})\theta(t_{P_1}-t'_{Q_2})\theta(t'_{Q_2}-t_{P_2})\,,\label{cor21}\\ G^{(2)}(t'_1,t'_2; t_1,t_2)&=&(-\gamma)^2\sum_{P,Q}\langle 0| {a}(t'_{Q_1}){a}(t'_{Q_2}){a^{\dag}}(t_{P_1}){a^{\dag}}(t_{P_2})|0\rangle\theta(t'_{Q_1}-t'_{Q_2})\theta(t'_{Q_2}-t_{P_1})\theta(t_{P_1}-t_{P_2})\,,\label{cor22} \end{eqnarray} where both $P$ and $Q$ are permutations over indices $\{1,2\}$. We calculate each term by inserting the complete sets of eigenstates, which results in: \begin{eqnarray} \langle 0| {a}(t'_1){a^{\dag}}(t_1){a}(t'_2){a^{\dag}}(t_2)|0\rangle &=&\sum_{m, n,l}\langle 0| {a}(t'_1)|m\rangle\langle m|{a^{\dag}}(t_1)|n\rangle \langle n | {a}(t'_2)| l\rangle\langle l |{a^{\dag}}(t_2)|0\rangle \nonumber\\ &=&\langle 0| {a}(t'_1)|1\rangle\langle 1|{a^{\dag}}(t_1)|0\rangle \langle 0 | {a}(t'_2)| 1\rangle\langle 1 |{a^{\dag}}(t_2)|0\rangle =e^{-i\alpha (t'_1-t_1)}e^{-i\alpha(t'_2-t_2)}\,, \end{eqnarray} and \begin{eqnarray} \langle 0| {a}(t'_1){a}(t'_2){a^{\dag}}(t_1){a^{\dag}}(t_2)|0\rangle &=&\langle 0| {a}(t'_1)|1\rangle\langle 1|{a}(t'_2)|2\rangle\langle 2 |{a^{\dag}}(t_1)| 1\rangle\langle 1 |{a^{\dag}}(t_2)|0\rangle \nonumber\\ &=&2e^{-i\alpha(t'_1-t_2)}e^{-i(\alpha+\chi)(t'_2-t_1)}\,. \end{eqnarray} Then by (\ref{NGP}), the Fourier transformation of (\ref{cor21}) and (\ref{cor22}) are \begin{eqnarray}\label{G2K} G^{(1)}(p_1,p_2; k_1,k_2)&=&-\frac{i}{2\pi}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}}\frac{1}{p_{Q_2}-k_{P_2}-i\epsilon}\delta(p_1+p_2-k_1-k_2)\,,\\ G^{(2)}(p_1,p_2; k_1,k_2)&=&\frac{i}{\pi}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}}\frac{1}{k_1+k_2-2\alpha-\chi}\delta(p_1+p_2-k_1-k_2)\,, \end{eqnarray} where in (\ref{G2K}) an infinitesimal imaginary part in the denominator arises due to the Fourier transform of the $\theta$ function. Moreover, we note that \begin{equation}\label{dirac} \frac{1}{p-k-i\epsilon}=\frac{{\cal{P}}}{p-k}+i\pi\delta(p-k)\,. \end{equation} On the other hand, since the connected two-photon S matrix contains only a single $\delta$ function, when we apply (\ref{dirac}) to (\ref{G2K}), only the principal part contributes to $S^C_{p_1,p_2; k_1,k_2}$. Therefore, we have \begin{equation} S^C_{p_1p_2;k_1k_2}=\sum_{j=1}^2 i{\cal{M}}^{(j)}_{p_1p_2;k_1k_2}\delta(p_1+p_2-k_1-k_2)\,, \end{equation} with \begin{eqnarray} i{\cal{M}}^{(1)}_{p_1p_2;k_1k_2}&=&-\frac{i}{2\pi}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}}\frac{{\cal{P}}}{p_{Q_2}-k_{P_2}}\,,\\ i{\cal{M}}^{(2)}_{p_1p_2;k_1k_2}&=&\frac{i}{\pi}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}}\frac{1}{k_1+k_2-2\alpha-\chi}\,. \end{eqnarray} For the connected two-photon S matrix, we can sum over all the permutation terms and obtain a compact form : \begin{equation}\label{2T} S^C_{p_1p_2;k_1k_2}=-\frac{\chi}{\pi \gamma} s_{p_1}s_{p_2}\left(s_{k_1}+s_{k_2}\right)\frac{1}{k_1+k_2-2\alpha-\chi}\delta(p_1+p_2-k_1-k_2)\,. \end{equation} The final result (\ref{2T}) indeed has the exact analytical structure constrained by the cluster decomposition principle \cite{xf}. The only singularities are isolated poles corresponding to one and two-photon excitations in the local system. All principle parts cancel. Finally, we sketch the computation for the connected three-photon S matrix. we start by computing the six-point Green function $G_3(t'_1,t'_2,t'_3;t_1,t_2,t_3)$. Similar to the previous calculation on four-point Green function, terms that have non-zero contributions to the six-point Green function can be classified into five types: $\langle {a}{a^{\dag}}{a}{a^{\dag}}{a}{a^{\dag}}\rangle$, $\langle {a}{a}{a^{\dag}}{a^{\dag}}{a}{a^{\dag}}\rangle$, $\langle {a}{a^{\dag}}{a}{a}{a^{\dag}}{a^{\dag}}\rangle$, $\langle {a}{a}{a^{\dag}}{a}{a^{\dag}}{a^{\dag}}\rangle$, and $\langle {a}{a}{a}{a^{\dag}}{a^{\dag}}{a^{\dag}}\rangle$. Take the last type as an example, we compute its contribution to the six-point Green function is \begin{eqnarray} G^{(5)}(t'_1,t'_2,t'_3; t_1,t_2,t_3)&\equiv&(-\gamma)^3\sum_{P,Q}\langle 0| {a}(t'_{Q_1}){a}(t'_{Q_2}){a}(t'_{Q_3}) {a^{\dag}}(t_{P_1}){a^{\dag}}(t_{P_2}){a^{\dag}}(t_{P_3})|0\rangle\nonumber\\ &&\times\theta(t'_{Q_1}-t'_{Q_2})\theta(t'_{Q_2}-t'_{Q_3}) \theta(t'_{Q_3}-t_{P_1})\theta(t_{P_1}-t_{P_2})\theta(t_{P_2}-t_{P_3})\nonumber\\ &=&6(-\gamma)^3\sum_{P,Q} e^{-i\alpha(t'_{Q_1}-t_{P_3})}e^{-i(\alpha+\chi)(t'_{Q_2}-t_{P_2})}e^{-i(\alpha+2\chi)(t'_{Q_3}-t_{P_1})}\nonumber\\ &&\times\theta(t'_{Q_1}-t'_{Q_2})\theta(t'_{Q_2}-t'_{Q_3}) \theta(t'_{Q_3}-t_{P_1})\theta(t_{P_1}-t_{P_2})\theta(t_{P_2}-t_{P_3})\,, \end{eqnarray} and then its Fourier transformation \begin{eqnarray}\label{G5} G^{(5)}(p_1,p_2,p_3; k_1,k_2,k_3)&=&\frac{3i\gamma}{2\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_3}}\frac{1}{k_1+k_2+k_3-3\alpha-3\chi} \frac{1}{p_{Q_1}+p_{Q_2}-2\alpha-\chi}\frac{1}{k_{P_2}+k_{P_3}-2\alpha-\chi}\nonumber\\ &&\times\delta(p_1+p_2+p_3-k_1-k_2-k_3)\nonumber\\ &\equiv&i{\cal{M}}^{(5)}_{p_1p_2p_3;k_1k_2k_3}\delta(p_1+p_2+p_3-k_1-k_2-k_3) \end{eqnarray} where $P,Q$ are permutations over indices $\{1,2,3\}$. Since (\ref{G5}) contains only a single $\delta$ function, all terms in (\ref{G5}) contribute to the connected three-photon S matrix. Similarly, we calculate the other four types' contributions to $S^C_{p_1p_2p_3;k_1k_2k_3}$ by applying (\ref{dirac}) and keeping only the principal parts. The final result is summarized as: \begin{equation} S^C_{p_1p_2p_3;k_1k_2k_3}=\sum^5_{j=1}i{\cal{M}}^{(j)}_{p_1p_2p_3;k_1k_2k_3}\delta(p_1+p_2+p_3-k_1-k_2-k_3)\,, \end{equation} where \begin{equation}\label{M1} i{\cal{M}}^{(1)}_{p_1p_2p_3;k_1k_2k_3}=\frac{1}{4\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}+k_{P_3}-p_{Q_3}}s_{k_{P_3}} \frac{{\cal{P}}}{p_{Q_3}-k_{P_3}}\frac{{\cal{P}}}{p_{Q_1}-k_{P_1}}\,, \end{equation} \begin{equation} i{\cal{M}}^{(2)}_{p_1p_2p_3;k_1k_2k_3}=\frac{1}{2\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}+k_{P_3}-p_{Q_3}}s_{k_{P_3}} \frac{{\cal{P}}}{p_{Q_3}-k_{P_3}}\frac{1}{p_{Q_1}+p_{Q_2}-2\alpha-\chi}\,, \end{equation} \begin{equation} i{\cal{M}}^{(3)}_{p_1p_2p_3;k_1k_2k_3}=-\frac{1}{2\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}+k_{P_3}-p_{Q_3}}s_{k_{P_3}} \frac{{\cal{P}}}{p_{Q_1}-k_{P_1}}\frac{1}{k_{P_2}+k_{P_3}-2\alpha-\chi}\,, \end{equation} \begin{equation} i{\cal{M}}^{(4)}_{p_1p_2p_3;k_1k_2k_3}=-\frac{1}{\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_2}+k_{P_3}-p_{Q_3}}s_{k_{P_3}} \frac{1}{p_{Q_1}+p_{Q_2}-2\alpha-\chi}\frac{1}{k_{P_2}+k_{P_3}-2\alpha-\chi}\,, \end{equation} \begin{equation}\label{M5} i{\cal{M}}^{(5)}_{p_1p_2p_3;k_1k_2k_3}=\frac{3i\gamma}{2\pi^2}\sum_{P,Q}s_{p_{Q_1}}s_{k_{P_3}}\frac{1}{k_1+k_2+k_3-3\alpha-3\chi} \frac{1}{p_{Q_1}+p_{Q_2}-2\alpha-\chi}\frac{1}{k_{P_2}+k_{P_3}-2\alpha-\chi}\,. \end{equation} As a check, let $\chi\rightarrow \infty$, only (\ref{M1}) contributes to the connected three-photon S matrix, which agrees with the result in the case of single two-level atom \cite{ss}, as expected. Also, according to the cluster decomposition principle, the connected three-photon S matrix should only contain poles corresponding to the single, two- and three-photon excitations \cite{xf}. In (\ref{M1})-(\ref{M5}), in addition to various poles corresponds to single, two- and three-photon excitations, there are also various singularities associated with the principal parts. One can actually prove that these principal parts cancel each other when all permutations are summed together. A systematic treatment of such analytic properties of the connected $N$-photon S matrix is beyond the scope of this paper and will be carried out in future works. \section{Summary and Final Remarks} To summarize, in this paper, using the input-output formalism, we provide a computation of $N$-photon S matrix in waveguide QED systems. The main result here is the connection between the $N$-photon S matrix and the Green function of the local system. We also discuss the connectedness structure of the S matrix and the Green function, and how such structure can be used to simplify the computation. Our results are applicable independent of the details of the local system's Hamiltonian, and therefore point to some universal aspects of the properties of waveguide QED systems. As a computational tool, the results here lead to a powerful scheme for $N$-photon S matrix calculation. Aside from waveguide QED systems, understanding the scattering property of a local quantum system coupled to a continuum has been a problem of fundamental importance in many other branches of physics. For example, in condensed matter physics, the transport properties of a quantum dot can also be formulated in a similar fashion \cite{ma}. We therefore expect our development here to be useful beyond waveguide QED systems. \section{Appendix} Our aim here is to prove the cluster decomposition property of the $N$-photon S matrix (\ref{dSC}). We start from (\ref{cenm}), and use the cluster decomposition property of the Green function (\ref{dGC}) to expand every term in the summation in terms of sum over products of connected Green functions and "bare" $\delta$ functions (i.e. the $\delta$ functions that arise explicitly in (\ref{cenm})). Among all these terms resulting from the expansion, we consider the term as represented by the diagram in the Fig.\ref{fig5} (a). This diagram contains a sub-piece with $M$ legs. Within the sub-piece all connected parts are 2$n$-point Green functions with $n \ge 2$. By (\ref{SCGC}) this sub-piece is already in the form of the products of connected S matrices. The rest of this term contains only bare $\delta$ functions. This term obviously arises from the expansion of a term in (\ref{cenm}) containing a $2M$-point Green function. \begin{figure} \caption{ (a) A term in (\ref{cenm} \label{fig5} \end{figure} \begin{figure} \caption{ All the terms that will be summed over with the term represented in Fig.\ref{fig5} \label{fig6} \end{figure} We combine the term as shown in Fig.\ref{fig5} (a), with the terms shown in Fig.\ref{fig6}. These terms were chosen from the expansion of the terms in (\ref{cenm}) containing $2M'$-point Green function with $M'>M$. In selecting these terms, we keep the $M$-leg sub-piece identical in all the terms, and choose only terms where the remaining sub-pieces are either bare $\delta$ functions or two-point Green functions. Repeatedly using (\ref{S1G1}), the summation of all these terms then results in a diagram that has the same structure as Fig.\ref{fig5} (a), but with the bare $\delta$ functions all replaced by the single-photon S matrices, as shown in Fig.\ref{fig5}(b). The summation therefore results in a product of the connected S matrices. Since after the expansion of (\ref{cenm}) in terms of the product of the connected Green's function, every term shows up once and only once in a summation of the form shown in Fig.\ref{fig5} (a) and Fig.\ref{fig6}. We have therefore proved that (\ref{cenm}) can be summed to give the cluster decomposition property of (\ref{dSC}). \end{document}
\begin{document} \baselineskip = 5,2mm \newcommand \ZZ {{\mathbb Z}} \newcommand \NN {{\mathbb N}} \newcommand \QQ {{\mathbb Q}} \newcommand \RR {{\mathbb R}} \newcommand \CC {{\mathbb C}} \newcommand \PR {{\mathbb P}} \newcommand \AF {{\mathbb A}} \newcommand \bcA {{\mathscr A}} \newcommand \bcB {{\mathscr B}} \newcommand \bcC {{\mathscr C}} \newcommand \bcF {{\mathscr F}} \newcommand \bcG {{\mathscr G}} \newcommand \bcK {{\mathscr K}} \newcommand \bcN {{\mathscr N}} \newcommand \bcO {{\mathscr O}} \newcommand \bcP {{\mathscr P}} \newcommand \bcR {{\mathscr R}} \newcommand \bcS {{\mathscr S}} \newcommand \bcT {{\mathscr T}} \newcommand \bcU {{\mathscr U}} \newcommand \bcX {{\mathscr X}} \newcommand \bcY {{\mathscr Y}} \newcommand \bcZ {{\mathscr Z}} \newcommand \catC {{\sf C}} \newcommand \catD {{\sf D}} \newcommand \catF {{\sf F}} \newcommand \catG {{\sf G}} \newcommand \catE {{\sf E}} \newcommand \catS {{\sf S}} \newcommand \catW {{\sf W}} \newcommand \catX {{\sf X}} \newcommand \catY {{\sf Y}} \newcommand \catZ {{\sf Z}} \newcommand \goa {{\mathfrak a}} \newcommand \gob {{\mathfrak b}} \newcommand \goc {{\mathfrak c}} \newcommand \gom {{\mathfrak m}} \newcommand \gop {{\mathfrak p}} \newcommand \goT {{\mathfrak T}} \newcommand \goC {{\mathfrak C}} \newcommand \goD {{\mathfrak D}} \newcommand \goM {{\mathfrak M}} \newcommand \goN {{\mathfrak N}} \newcommand \goP {{\mathfrak P}} \newcommand \goS {{\mathfrak S}} \newcommand \goH {{\mathfrak H}} \newcommand \uno {{\mathbbm 1}} \newcommand \Le {{\mathbbm L}} \newcommand \Ta {{\mathbbm T}} \newcommand \Spec {{\rm {Spec}}} \newcommand \bSpec {{\bf {Spec}}} \newcommand \Proj {{\rm {Proj}}} \newcommand \bProj {{\bf {Proj}}} \newcommand \Div {{\rm {Div}}} \newcommand \Pic {{\rm {Pic}}} \newcommand \Jac {{{J}}} \newcommand \Alb {{\rm {Alb}}} \newcommand \NS {{{NS}}} \newcommand \Corr {{Corr}} \newcommand \Chow {{\mathscr C}} \newcommand \Sym {{\rm {Sym}}} \newcommand \Alt {{\rm {Alt}}} \newcommand \Prym {{\rm {Prym}}} \newcommand \cone {{\rm {cone}}} \newcommand \eq {{\rm {eq}}} \newcommand \length {{\rm {length}}} \newcommand \cha {{\rm {char}}} \newcommand \ord {{\rm {ord}}} \newcommand \eff {{\rm {eff}}} \newcommand \shf {{\rm {a}}} \newcommand \spd {{\rm {s}}} \newcommand \glue {{\rm {g}}} \newcommand \equi {{\rm {equi}}} \newcommand \tr {{\rm {tr}}} \newcommand \ab {{\rm {ab}}} \newcommand \add {{\rm {ad}}} \newcommand \Fix {{\rm {Fix}}} \newcommand \pty {{\mathbf P}} \newcommand \type {{\mathbf T}} \newcommand \prim {{\rm {prim}}} \newcommand \trp {{\rm {t}}} \newcommand \cat {{\rm {cat}}} \newcommand \deop {{\Delta \! }^{op}\, } \newcommand \pr {{\rm {pr}}} \newcommand \ev {{\it {ev}}} \newcommand \defect {{\rm {def}}} \newcommand \aff {{\rm {aff}}} \newcommand \Const {{\rm {Const}}} \newcommand \interior {{\rm {Int}}} \newcommand \sep {{\rm {sep}}} \newcommand \td {{\rm {tdeg}}} \newcommand \tdf {{\mathbf {t}}} \newcommand \num {{\rm {num}}} \newcommand \conv {{\it {cv}}} \newcommand \alg {{\rm {alg}}} \newcommand \im {{\rm im}} \newcommand \rat {{\rm rat}} \newcommand \stalk {{\rm st}} \newcommand \SG {{\rm SG}} \newcommand \term {{*}} \newcommand \Pre {{\mathscr P}} \newcommand \Funct {{\rm Funct}} \newcommand \Sets {{\sf Set}} \newcommand \op {{\rm op}} \newcommand \Hom {{\rm Hom}} \newcommand \uHom {{\underline {\rm Hom}}} \newcommand \HilbF {{\it Hilb}} \newcommand \HilbS {{\rm Hilb}} \newcommand \Sch {{\sf Sch}} \newcommand \cHilb {{\mathscr H\! }{\it ilb}} \newcommand \cHom {{\mathscr H\! }{\it om}} \newcommand \cExt {{\mathscr E\! }{\it xt}} \newcommand \colim {{{\rm colim}\, }} \newcommand \End {{\rm {End}}} \newcommand \coker {{\rm {coker}}} \newcommand \id {{\rm {id}}} \newcommand \van {{\rm {van}}} \newcommand \spc {{\rm {sp}}} \newcommand \Ob {{\rm Ob}} \newcommand \Aut {{\rm Aut}} \newcommand \cor {{\rm {cor}}} \newcommand \res {{\rm {res}}} \newcommand \tors {{\rm {tors}}} \newcommand \coeq {{{\rm coeq}\, }} \newcommand \Gal {{\rm {Gal}}} \newcommand \PGL {{\rm {PGL}}} \newcommand \Gr {{\rm {Gr}}} \newcommand \Bl {{\rm {Bl}}} \newcommand \supp {{\rm Supp}} \newcommand \Sing {{\rm {Sing}}} \newcommand \spn {{\rm {span}}} \newcommand \Nm {{\rm {Nm}}} \newcommand \PShv {{\sf PShv}} \newcommand \Shv {{\sf Shv}} \newcommand \Stk {{\sf Stk}} \newcommand \sm {{\rm sm}} \newcommand \reg {{\rm reg}} \newcommand \nor {{\rm nor}} \newcommand \noe {{\rm Noe}} \newcommand \Sm {{\sf Sm}} \newcommand \Reg {{\sf Reg}} \newcommand \Nor {{\sf Nor}} \newcommand \Seminor {{\sf sNor}} \newcommand \Noe {{\sf Noe}} \newcommand \inv {{\rm {inv}}} \newcommand \hc {{\rm {hc}}} \newcommand \codim {{\rm {codim}}} \newcommand \ptr {{\pi _2^{\rm tr}}} \newcommand \Vect {{\mathscr V\! ect}} \newcommand \ind {{\rm {ind}}} \newcommand \Ind {{\sf {Ind}}} \newcommand \Gm {{{\mathbb G}_{\rm m}}} \newcommand \trdeg {{\rm {tr.deg}}} \newcommand \seminorm {{\rm {sn}}} \newcommand \norm {{\rm {norm}}} \newcommand \Mon {{\sf Mon }} \newcommand \Mod {{\sf Mod}} \newcommand \Ab {{\sf Ab }} \newcommand \tame {\rm {tame }} \newcommand \prym {\tiny {\Bowtie }} \newcommand \znak {{\natural }} \newcommand \et {\rm {\acute e t}} \newcommand \Zar {\rm {Zar}} \newcommand \Nis {\rm {Nis}} \newcommand \Nen {\rm {N\acute en}} \newcommand \cdh {\rm {cdh}} \newcommand \h {\rm {h}} \newcommand \con {\rm {conn}} \newcommand \sing {{\rm {sing}}} \newcommand \Top {{\sf {Top}}} \newcommand \Ringspace {{\sf {Ringspace}}} \newcommand \qand {{\quad \hbox{and}\quad }} \newcommand \qqand {{\quad \hbox{and}\quad }} \newcommand \heither {{\hbox{either}\quad }} \newcommand \qor {{\quad \hbox{or}\quad }} \newcommand \Cycl {{\it Cycl }} \newcommand \PropCycl {{\it PropCycl }} \newcommand \cycl {{\it cycl }} \newcommand \PrimeCycl {{\it PrimeCycl }} \newcommand \PrimePropCycl {{\it PrimePropCycl }} \mathchardef\mhyphen="2D \newtheorem{theorem}{Theorem} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{corollary}[theorem]{Corollary} \newtheorem{proposition}[theorem]{Proposition} \newtheorem{remark}[theorem]{Remark} \newtheorem{definition}[theorem]{Definition} \newtheorem{conjecture}[theorem]{Conjecture} \newtheorem{example}[theorem]{Example} \newtheorem{question}[theorem]{Question} \newtheorem{warning}[theorem]{Warning} \newtheorem{assumption}[theorem]{Assumption} \newtheorem{fact}[theorem]{Fact} \newtheorem{crucialquestion}[theorem]{Crucial Question} \newcommand \lra {\longrightarrow} \newcommand \hra {\hookrightarrow} \def\color{blue}{\color{blue}} \def\color{red}{\color{red}} \def\color{green}{\color{green}} \newenvironment{pf}{\par\noindent{\em Proof}.}{ \framebox(6,6) \par } \title[Tangent spaces to zero-cycles] {\bf The tangent space to the space of 0-cycles} \author{Vladimir Guletski\u \i } \date{07 March 2018} \begin{abstract} \noindent Let $S$ be a Noetherian scheme, and let $X$ be a scheme over $S$, such that all relative symmetric powers of $X$ over $S$ exist. Assume that either $S$ is of pure characteristic $0$ or $X$ is flat over $S$. Assume also that the structural morphism from $X$ to $S$ admits a section, and use it to construct the connected infinite symmetric power $\Sym ^{\infty }(X/S)$ of the scheme $X$ over $S$. This is a commutative monoid whose group completion $\Sym ^{\infty }(X/S)^+$ is an abelian group object in the category of set valued sheaves on the Nisnevich site over $S$, which is known to be isomorphic, as a Nisnevich sheaf, to the sheaf of relative $0$-cycles in Rydh's sense. Being restricted on seminormal schemes over $\QQ $, it is also isomorphic to the sheaf of relative $0$-cycles in the sense of Suslin-Voevodsky and Koll\'ar. In the paper we construct a locally ringed Nisnevich-\'etale site of $0$-cycles $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$, such that the category of \'etale neighbourhoods, at each point $P$ on it, is cofiltered. This yields the sheaf of K\"ahler differentials $\Omega ^1_{\Sym ^{\infty }(X/S)^+}$ and its dual, the tangent sheaf $T_{\Sym ^{\infty }(X/S)^+}$ on the space $\Sym ^{\infty }(X/S)^+$. Applying the stalk functor, we obtain the stalk $T_{\Sym ^{\infty }(X/S)^+,P}$ of the tangent sheaf at $P$, whose tensor product with the residue field $\kappa (P)$ is our tangent space to the space of $0$-cycles at $P$. \end{abstract} \subjclass[2010]{14A20, 14C25, 14D23, 14J29} \keywords{Sheaves, atlases, ringed sites, K\"ahler differentials, tangent sheaf, tangent space, \'etale neighbourhood, cofiltered categories, stalk functor, toposes, locally Noetherian schemes, Nisnevich topology, symmetric powers, free monoids, group completions, relative algebraic cycles, fat points, pullback of relative cycles, relative $0$-cycles, rational equivalence, free rational curve, Bloch's conjecture, surfaces of general type} \maketitle \tableofcontents \section{Introduction} \label{intro} The aim of this paper is to make it precise the intuitive feeling that rational equivalence of $0$-cycles on an algebraic variety is the same as rational connectedness of the corresponding points on the group completed infinite symmetric power of that variety. To be more precise, let $X$ be a smooth projective variety over a field $k$, and assume for simplicity that $k$ is algebraically closed of zero characteristic. Fix a point on $X$ and use it to embed the $d$-th symmetric power in to the $(d+1)$-th symmetric power of $X$. Passing to colimit, we obtain the infinite connective symmetric power $\Sym ^{\infty }(X)$ of the variety $X$ over $k$. Looking at this infinite symmetric power as a commutative monoid, we can consider its group completion $\Sym ^{\infty }(X)^+$ in the category of groups. If now $P$ and $Q$ are closed points on $X$, they can be also considered as elements of the group completed symmetric power $\Sym ^{\infty }(X)^+$. Then $P$ is rationally equivalent to $Q$ on $X$ if and only if one can draw a rational curve through $P$ and $Q$ on $\Sym ^{\infty }(X)^+$. This philosophy tracks back through the cult paper by Mumford, \cite{Mumford}, to Francesco Severi and possibly earlier, but it does not give us too much, as the object $\Sym ^{\infty }(X)^+$ is not a variety, and it is not clear what could be a rational curve on it and, more importantly, an appropriate deformation theory of rational curves on the object $\Sym ^{\infty }(X)^+$ in the style of Koll\'ar's book \cite{KollarRatCurvesOnVar}. Though Roitman had managed working with the group $\Sym ^{\infty }(X)^+$ as a geometrical object replacing it by the products $\Sym ^d(X)\times \Sym ^d(X)$, see \cite{Roitman1} and \cite{Roitman2}, his approach seems to be a compromise, which is not amazing as the necessary technique to deform weird objects was not developed in the early seventies. So, this is our aim in this paper to develop a technical foundation of deformation theory of rational curves on $\Sym ^{\infty }(X)^+$, as we see it, and now we are going to explain and justify the concepts promoted in the paper. First of all, we should ask ourselves what is the broadest notion of a geometrical object nowadays? One possible answer might be that a geometrical object is a locally ringed site whose Grothendieck topology is of some geometric nature. On the other hand, whereas the monoid $\Sym ^{\infty }(X)$ is an ind-scheme, so can be managed in terms of schemes, the group completion $\Sym ^{\infty }(X)^+$ clearly requires a spacewalk in the category of sheaves on schemes with an appropriate topology, such as \'etale topology or maybe the better Nisnevich one. Therefore, we choose that our initial environment is the category of set valued Nisnevich sheaves on locally Noetherian schemes over a base scheme $S$, and the latter will be always Noetherian. But sheaves on a site are still not geometrical enough. To produce geometry on a sheaf $\bcX $ we suggest to use the notion of an {\it atlas}, which roughly means that we have a collection of schemes $X_i$ and morphisms of sheaves $X_i\to \bcX $, such that the induced morphism from the coproduct $\coprod _iX_i$ to $\bcX $ is an effective epimorphism (see {\footnotesize{\tt nLab}}). Sheaves with atlases will be called {\it spaces}. The idea of an atlas gives us a possibility to speak about whether a morphism from a scheme to a Nisnevich sheaf $\bcX $ is \'etale with regard to a given atlas on $\bcX $. A Nisnevich-\'etale site $\bcX _{\Nis \mhyphen \et }$ is then the site whose underlying category is the category of morphisms from schemes to $\bcX $, which are \'etale with regard to the atlas on $\bcX $, and whose topology is the restriction of the Nisnevich topology on schemes. For the local study, let $P$ be a point on $\bcX $, i.e. a morphism from the spectrum of a field to $\bcX $, and let $\bcN _P$ be the category of \'etale neighbourhoods of the point $P$ on the site $\bcX _{\Nis \mhyphen \et }$. If the category $\bcN _P$ is cofiltered, we obtain an honest stalk functor at $P$, which yields the corresponding point of the topos of sheaves on the site $\bcX _{\Nis \mhyphen \et }$. If now $\bcO _{\bcX }$ is the sheaf of rings on the site $\bcX _{\Nis \mhyphen \et }$, inherited from the regular functions on schemes, its stalk $\bcO _{\bcX \! ,\, P}$ is a local ring, for each point $P$ on $\bcX $. Then $(\bcX _{\Nis \mhyphen \et },\bcO _{\bcX })$ is a locally ringed site. The standard procedure then gives us the sheaf of K\"ahler differentials $\Omega ^1_{\bcX /S}$ and its dual, the tangent sheaf $T_{\bcX /S}$ to the space $\bcX $. Applying the stalk at $P$ functor to the latter, we obtain the stalk $T_{\bcX \! ,\, P}$, and tensoring by the residue field $\kappa (P)$ of the local ring $\bcO _{\bcX \! ,\, P}$ we obtain the tangent space $$ T_{\bcX }(P)=T_{\bcX \! ,\, P}\otimes \kappa (P) $$ to the space $\bcX $ at $P$, with regard to the atlas on $\bcX $. Thus, a geometrical object to us is a sheaf $\bcX $ with an atlas, such that $\bcN _P$ is cofiltered for each point $P$ on $\bcX $, and hence the site $\bcX _{\Nis \mhyphen \et }$ is locally ringed by the ring $\bcO _{\bcX }$. This approach works very well when we want to geometrize groups of $0$-cycles. Indeed, let $X$ be a locally Noetherian scheme over $S$, such that the relative symmetric power $\Sym ^d(X/S)$ exists for each $d$ (this is always the case if, say, $X$ is quasi-affine or quasi-projective over $S$). Assume, moreover, that the structural morphism from $X$ to $S$ admits a section. Use this section to construct the monoid $\Sym ^{\infty }(X/S)$, which is and ind-scheme over $S$. Then we look at the group completion $\Sym ^{\infty }(X/S)^+$ in the category of Nisnevich sheaves on locally Noetherian schemes over $S$. The point here is that if $S$ is either of pure characteristic $0$ or flat over $S$, then $\Sym ^{\infty }(X/S)^+$ is isomorphic to the sheaf of relative $0$-cycles in the sense of Rydh, see \cite{RydhThesis}. If, moreover, $S$ is seminormal over $\Spec (\QQ )$, then the restriction of the sheaf $\Sym ^{\infty }(X/S)^+$ on schemes seminormal over $S$ gives us a sheaf isomorphic to the sheaves of relative $0$-cycles constructed by Suslin and Voevodsky, \cite{SV-ChowSheaves}, and by Koll\'ar, \cite{KollarRatCurvesOnVar}. This is why the sheaf $\Sym ^{\infty }(X/S)^+$ is really the best reincarnation of a sheaf of relative $0$-cycles on $X$ over $S$. Now, the fibred squares $\Sym ^d(X/S)\times _S\Sym ^d(X/S)$ yield a natural atlas, the {\it Chow atlas}, on the sheaf $\Sym ^{\infty }(X/S)^+$. The problem, however, is that we do not know a priori whether the category $\bcN _P$ of \'etale neigbourhoods of a point $P$ on $\Sym ^{\infty }(X/S)^+$, constructed with regard to the Chow atlas, is cofiltered. This is our main technical result in the paper (Theorem \ref{cofilter}) which asserts that $\bcN _P$ is cofiltered indeed, for every point $P$ on $\Sym ^{\infty }(X/S)^+$. It follows that we obtain the locally ringed site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$ with the structural sheaf $\bcO _{\Sym ^{\infty }(X/S)^+}$ on it. As a consequence of that, we also obtain the sheaf of K\"ahler differentials $\Omega ^1_{\Sym ^{\infty }(X/S)^+}$ and the tangent sheaf $T_{\Sym ^{\infty }(X/S)^+}$ on $\Sym ^{\infty }(X/S)^+$, as well as the tangent space $$ T_{\Sym ^{\infty }(X/S)^+}(P) $$ to the space $\Sym ^{\infty }(X/S)^+$ at a point $P$. Assume now for simplicity that $S$ is the spectrum of an algebraically closed field $k$ of zero characteristic, such as $\CC $ or $\bar \QQ $, for example. Any $k$-rational point $P$ on $\Sym ^{\infty }(X/S)^+$ corresponds to a $0$-cycle on $X$, which we denote by the same symbol $P$. Two points $P$ and $Q$ are rationally equivalent, as two $0$-cycles on $X$, if and only if there exists a rational curve $$ f:\PR ^1\to \Sym ^{\infty }(X/S)^+ $$ on the space of $0$-cycles passing through $P$ and $Q$. Suppose, for example, that $X$ is a smooth projective surface of general type with trivial transcendental part in the second \'etale $l$-adic cohomology group $H^2_{\et }(X)$. Bloch's conjecture predicts that any two closed points on $X$ are rationally equivalent to each other. Reformulating, the space of $0$-cycles $\Sym ^{\infty }(X)^+$ is rationally connected. The usual way of proving that a variety is rationally connected is that we first find a rational curve on it, and then prove that this curve is sufficiently free. As we have now K\"ahler differentials and the tangent sheaf with tangent spaces at points on the space of $0$-cycles, one can try to do the same on $\Sym ^{\infty }(X)^+$. The pullback of the tangent sheaf on $0$-cycles to $\PR ^1$ by $f$ is a coherent sheaf. Therefore, $$ f^*T_{\Sym ^{\infty }(X)^+}= \bcO _{\PR ^1}(a_1)\oplus \ldots \oplus \bcO _{\PR ^1}(a_n) \oplus \bcT \; , $$ where $\bcT $ is a torsion sheaf, and $\bcO (a_i)$ are Serre's twists. If the deformation theory of rational curves on $\Sym ^{\infty }(X/S)^+$ would be properly developed, we could apply the ``same" arguments as in deforming curves on varieties, to prove that $\Sym ^{\infty }(X)^+$ is rationally connected, in case when $X$ is a surface of general type with no transcendental part in the second cohomology group. Another approach to the same subject had been developed by Green and Griffiths in the book \cite{GreenGriffiths}, which contains a lot of new deep ideas, supported by masterly computations, towards infinitesimal study of $0$-cycles on algebraic varieties. The problem to us with Green-Griffiths' approach is, however, that their tangent space is the stalk of a sheaf on the variety itself, but not on a space of $0$-cycles, see, for example, the definition on page 90, or formula (8.1) on page 105 in \cite{GreenGriffiths}, and, moreover, the space of $0$-cycles, as a geometrical object, is missing in the book. Our standpoint here is that the concept of a space of $0$-cycles should be taken seriously, and we believe that many of our constructions are implicitly there, in the Green-Griffiths' book. In a sense, the present paper can be also considered as an attempt to prepare a technical basis to rethink the approach by Green and Griffiths, and then try to put a ``functorial order" upon the heuristic discoveries in \cite{GreenGriffiths}. {\sc Acknowledgements.} The main ideas of this manuscript were thought out in Grumbinenty village in Belarus in the summer 2017, and I am grateful to its inhabitants for the meditative environment and hospitality. I am also grateful to Lucas das Dores who spotted a few omissions in the first version of the manuscript. \section{K\"ahler differentials on spaces with atlases} \label{kaehler} Throughout the paper we will systematically choose and fix Grothendieck universes, and then working with categories small with regard to these universes, but not mentioning this in the text explicitly. A discussion of the foundational aspects of category theory can be found, for example, in \cite{Shulman} or \cite{therisingsea}. Let $\catS $ be a topos, and let $\catC $ be a full subcategory in $\catS $, which is closed under finite fibred products. For the purposes which will be clear later, objects in the smaller category $\catC $ will be denoted by Latin letters $X$, $Y$, $Z$ etc, whereas objects in the topos $\catS $ will be denoted by the calligraphic letters, such as $\bcX $, $\bcY $, $\bcZ $ etc. Let $\tau $ be a topology on $\catC $, and let $\bcO $ be a sheaf of commutative rings on the site $\catC _{\tau }$, which will be considered as the structural sheaf of the ringed site $\catC _{\tau }$. Then $\bcO $ is an object of the topos $\Shv (\catC _{\tau })$ of set valued sheaves on $\catC _{\tau }$, so that the latter is a ringed topos with the structural sheaf $\bcO $. Given an object $\bcX $ in $\catS $ consider the category $\catC /\bcX $ whose objects are morphisms $X\to \bcX $ in $\catS $, where $X$ are objects of $\catC $, and morphisms are morphism $f:X\to Y$ in $\catC $ over the object $\bcX $. Let $(\catC /\bcX )_{\tau }$ be the big site whose underlying category is $\catC /\bcX $ and the topology on $\catC /\bcX $ is induced by the topology $\tau $ on $\catC $. For short of notation, we denote this site by $\bcX _{\tau }$. Let also $\bcO _{\bcX }$ be the restriction of the structural sheaf $\bcO $ on the site $\bcX _{\tau }$. We shall look at $\bcO _{\bcX }$ as the structural sheaf of the site $\bcX _{\tau }$. Naturally, $\bcO _{\bcX }$ is an object of the topos $\Shv (\bcX _{\tau })$. The following definitions are slightly extended versions of the definitions in stack theory. An {\it atlas} $A$ on $\bcX $ is a collection of morphisms $$ A=\{ X_i\to \bcX \} _{i\in I}\; , $$ indexed by a set $I$, such that all the objects $X_i$ are objects of the category $\catC $, the induced morphism $$ e_A:\coprod _{i\in I}X_i\to \bcX $$ is an epimorphism in $\catS $, and if $$ X\to \bcX $$ is in $A$ and $$ X'\to X $$ is a morphism in $\catC $, the composition $$ X'\to X\to \bcX $$ is again in $A$. The epimorphism $e_A$ will be called the {\it atlas epimorphism} of the atlas $A$. Notice that since the category $\catS $ is a topos, and in a topos every epimorphism is regular, for any atlas $A$ on an object $\bcX $ in $\catS $ the atlas epimorphism $e_A$ is a regular epimorphism. Moreover, since every topos is a regular category, and in a regular category regular epimorphisms are preserved by pullbacks, every pullback of $e_A$ is again an epimorphism. If $A$ is an atlas on $\bcX $ and $B$ is a sunset in $A$, such that $B$ is an atlas on $\bcX $, then we will say that $B$ is a {\it subatlas} on $\bcX $. If $A_0$ is a collection of morphisms from objects of $\catC $ whose coproduct gives an epimorphism onto $\bcX $, the set $A$ of all possible precompositions of morphisms from $A_0$ with morphisms from $\catC $ is an atlas on $\bcX $. We will say that $A$ is generated by the collection $A_0$, and write $$ A=\langle A_0\rangle \; . $$ If $A$ consists of all morphisms from objects of $\catC $ to $\bcX $, then we will say that the atlas $A$ is {\it complete}. In contrast, if $A$ is generated by $A_0$ and the latter collection consists of one morphism only, then we will be saying that $A$ is a {\it monoatlas} on the object $\bcX $. Let $$ f:\bcX \to \bcY $$ be a morphism in $\catS $, and assume that the object $\bcY $ has an atlas $B$ on it. We will be saying that $f$ is {\it representable}, with regard to the atlas $B$, if for any morphism $$ Y\to \bcY $$ from $B$ the fibred product $$ \bcX \times _{\bcY }Y $$ is an object in $\catC $. Let $\pty $ be a property of morphisms in $\catC $ which is $\tau $-local on the source and target, with regard to the topology $\tau $ and in the sense of Definitions 34.19.1 and 34.23.1 in \cite{StacksProject}. We will say that the morphism $f:\bcX \to \bcY $ possesses the property $\pty $, with regard to the atlas $B$ on $\bcY $, if (i) $f$ is representable with regard to $B$, and (ii) for any morphism $Y\to \bcY $ from $B$ the base change $$ \bcX \times _{\bcY }Y\to Y $$ possesses $\pty $. The stability of $\pty $ under base change and compositions is then straightforward. Let $\bcX $ and $\bcY $ be objects in $\catS $ and assume that $\bcX $ is endowed with an atlas $A$ and $\bcY $ with an atlas $B$ on them. In such a case the product $\bcX \times \bcY $ also admits an atlas $A\times B$ which consists of products of morphisms from the atlases on $\bcX $ and $\bcY $. We will say the $A\times B$ is the {\it product atlas} on $\bcX \times \bcY $. For example, if $\bcX $ admits an atlas $A$, the product $\bcX \times \bcX $ admits the square $A\times A$ of the atlas $A$, which is an atlas on $\bcX \times \bcX $. For short, we will write $A^2$ instead of $A\times A$. The diagonal morphism $$ \Delta :\bcX \to \bcX \times \bcX $$ is representable with regard to $A^2$ if and only if for any two morphisms $$ X\to \bcX \qqand Y\to \bcX $$ from $A$ the fibred product $$ X\times _{\bcX }Y $$ is an object in $\catC $. In other words, $\Delta $ is representable with regard to $A^2$ if and only if any morphism from $A$ is representable with regard to $A$. If $\Delta $ is representable with regard to $A^2$ then, for short, we will say that $\bcX $ is {\it $\Delta $-representable} with regard to $A$. Let $\bcX $ be an object in $\catS $ with an atlas $A$ on it. Let $(\catC /\bcX )_{\pty }$ be the subcategory in $\catC /\bcX $ generated by morphisms $X\to \bcX $ which are representable and possess the property $\pty $ with regard to the atlas $A$ on $\bcX $. Since the property $\pty $ is $\tau $-local on the source and target, the subcategory $(\catC /\bcX )_{\pty }$ is closed under fibred products, and therefore we can restrict the topology $\tau $ from $\catC /\bcX $ to $(\catC /\bcX )_{\pty }$ to obtain a small site $\bcX _{\tau \mhyphen \pty }$. This site depends on the atlas on $\bcX $. The site $\bcX _{\tau \mhyphen \pty }$ can be further tuned as follows. Let $\type $ be a type of objects in $\catC $, and let $\catC _{\type }$ be the corresponding full subcategory in $\catC $. Assume that $\type $ is closed under fibred products in $\catC $, i.e. for any two morphisms $X\to Z$ and $Y\to Z$ in $\catC _{\type }$ the fibred product $X\times _ZY$ in $\catC $ is again an object of type $\type $. Let $(\catC _{\type }/\bcX )_{\pty }$ be the full subcategory in the category $(\catC /\bcX )_{\pty }$ generated by morphisms $X\to \bcX $ possessing the property $\pty $ and such that $X$ is of type $\type $. Since $\pty $ is $\tau $-local on source and target and type $\type $ is closed under fibred products in $\catC $, the category $(\catC _{\type }/\bcX )_{\pty }$ is closed under fibred products. Then we restrict the topology $\tau $ from the category $(\catC /\bcX )_{\pty }$ to the category $(\catC _{\type }/\bcX )_{\pty }$ and obtain a smaller site $\bcX _{\tau \mhyphen \pty \mhyphen \type }$. Let $\bcX $ and $\bcY $ be two objects in $\catS $ with atlases $A$ and $B$ respectively, and let $$ f:\bcX \to \bcY $$ be a morphism in $\catS $. For any morphism $$ X\to \bcX $$ from $\bcX _{\tau \mhyphen \pty \mhyphen \type }$ consider the category $$ X/(\catC _{\type }/\bcY )_{\pty } $$ of morphisms $$ X\to Y\to \bcY $$ such that the square \begin{equation} \label{moh} \xymatrix{ X\ar[rr]^-{} \ar[dd]_-{} & & Y \ar[dd]^-{} \\ \\ \bcX \ar[rr]^-{f} & & \bcY } \end{equation} commutes, and the morphism $Y\to \bcY $ is in $\bcY _{\tau \mhyphen \pty \mhyphen \type }$. If the category $X/(\catC _{\type }/\bcY )_{\pty }$ is nonempty, for any morphism $X\to \bcX $ from $\bcX _{\tau \mhyphen \pty \mhyphen \type }$, the morphism $f$ creates a functor $$ f^{-1}:\Shv (\bcY _{\tau \mhyphen \pty \mhyphen \type })\to \Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type }) $$ which associates, to any sheaf $\bcF $ on $\bcY _{\tau \mhyphen \pty \mhyphen \type }$, the sheaf $f^{-1}\bcF $ on $\bcX _{\tau \mhyphen \pty \mhyphen \type }$, such that, by definition $$ f^{-1}\bcG (X\to \bcX )=\colim \bcF (Y\to \bcY )\; , $$ where the colimit is taken over the category $X/(\catC _{\type }/\bcY )_{\pty }$. If $\bcF $ is a sheaf of rings\footnote{in the paper all rings are commutative rings, if otherwise is not mentioned explicitly} on $\bcY _{\tau }$, it is {\it not} true in general that $f^{-1}\bcF $ is a sheaf of rings on $\bcX _{\tau }$. The reason for that is that the forgetful functor from rings to sets commutes with only filtered colimits, whereas the category $X/(\catC _{\type }/\bcY )_{\pty }$ might be well not filtered. But whenever the category $X/(\catC _{\type }/\bcY )_{\pty }$ is nonempty and filtered, the set $f^{-1}\bcF (X\to \bcX )$ inherits the structure of a ring, and if, moreover, this category is nonempty and filtered for any morphism $X\to \bcX $ from $\bcX _{\tau \mhyphen \pty \mhyphen \type }$ the sheaf $f^{-1}\bcF $ is a sheaf of rings on the site $\bcX _{\tau \mhyphen \pty \mhyphen \type }$. Let us apply the pullback functor $f^{-1}$ to the structural sheaf of rings $\bcO _{\bcY }$. For each pair of two morphisms $$ X\stackrel{g}{\lra }Y\to \bcY \; , $$ such that the square (\ref{moh}) commutes and the second morphism possesses $\pty $, we have a homomorphism of rings $$ \bcO _{\bcY }(Y\to \bcY )= \bcO (Y)\stackrel{\bcO (g)}{\lra }\bcO (X)= \bcO _{\bcX }(X\to \bcX )\; . $$ Such homomorphisms induce a morphism $$ f^{-1}\bcO _{\bcY }(X\to \bcX )\to \bcO _{\bcX }(X\to \bcX )\; , $$ for all morphisms $X\to \bcX $, and hence a morphism of set valued sheaves \begin{equation} \label{lisichki} f^{-1}\bcO _{\bcY }\to \bcO _{\bcX } \end{equation} If we assume that the category $X/(\catC _{\type }/\bcY )_{\pty }$ is nonempty and filtered for every $X\to \bcX $ from $\bcX _{\tau \mhyphen \pty \mhyphen \type }$, the morphism (\ref{lisichki}) is a morphism of ring valued sheaves on the site $\bcX _{\tau \mhyphen \pty \mhyphen \type }$. In such a case, though $f$ does not in general give us a morphism of ring topoi, still we can define the sheaf of K\"ahler differentials on $\bcX _{\tau \mhyphen \pty \mhyphen \type }$ of the morphism $f$ as $$ \Omega ^1_{\bcX /\bcY }= \Omega ^1_{\bcO _{\bcX }/f^{-1}\bcO _{\bcY }}\; , $$ in terms of page 115 in the first part of \cite{Illusie} (see also the earlier book \cite{GrothCotang}). Any Gothendieck topos is a cartesian closed category. In particular, the topos $\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })$ is a cartesian closed category, for each object $\bcX $ in $\catS $. The internal Hom-objects are given by the following formula. For any two set valued sheaves $\bcF $ and $\bcG $ on the site $\bcX _{\tau \mhyphen \pty \mhyphen \type }$, $$ \cHom (\bcF ,\bcG )(X\to \bcX )= \Hom _{\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })} (\bcF \times X,\bcG )\; , $$ where $X$ is considered as a sheaf on $\bcX _{\tau \mhyphen \pty \mhyphen \type }$ via the Yoneda embedding. Notice also that, if $$ \Hom _X(\bcF \times X,\bcG \times X) $$ is a subset of morphisms from $\bcF $ to $\bcG $ over $X$, i.e. the set of morphisms in the slice category $\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })/X$, then $$ \Hom _X(\bcF \times X,\bcG \times X)= \Hom _{\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })}(\bcF \times X,\bcG ) $$ for elementary categorical reasons. Then the internal Hom can be equivalently defined by setting $$ \cHom (\bcF ,\bcG )(X\to \bcX )= \Hom _X(\bcF \times X,\bcG \times X)\; . $$ Now, if the category $X/(\catC _{\type }/\bcY )_{\pty }$ is nonempty and filtered, for every $X\to \bcX $ in $\bcX _{\tau \mhyphen \pty \mhyphen \type }$, so that we have the sheaf of K\"ahler differentials $\Omega ^1_{\bcX /\bcY }$, then we can also define the tangent sheaf on $\bcX _{\tau \mhyphen \pty \mhyphen \type }$ to be the dual sheaf $$ T_{\bcX /\bcY }= \cHom (\Omega ^1_{\bcX /\bcY },\bcO _{\bcX })\; . $$ If $$ \bcY =Z\in \Ob (\catC _{\type })\; , $$ the category $X/(\catC _{\type }/\bcY )_{\pty }$ has a terminal object $$ \xymatrix{ X\ar[rr]^-{} \ar[dd]_-{} & & Z \ar[dd]^-{\id } \\ \\ \bcX \ar[rr]^-{f} & & Z } $$ And since every category with a terminal object is nonempty and filtered, the morphism (\ref{lisichki}) is a morphism of ring valued sheaves, and we obtain the sheaf of K\"ahler differentials $$ \Omega ^1_{\bcX /Z}\in \Ob (\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })) $$ and the tangent sheaf $$ T_{\bcX /Z}\in \Ob (\Shv (\bcX _{\tau \mhyphen \pty \mhyphen \type })) $$ The above constructions of K\"ahler differentials and tangent sheaves apply to all kinds of geometric setups, embracing smooth and complex-analytic manifolds in terms of synthetic differential geometry, algebraic varieties, schemes, algebraic spaces, stacks, etc. All we need is to choose an appropriate category $\catC $, a topology $\tau $ on $\catC $, a sheaf of rings $\bcO $ and then take $\catS $ to be the category $\PShv (\catC )$ of set valued presheaves on $\catC $ or, when the topology $\tau $ is subcanonical, the category $\Shv (\catC _{\tau })$ of sheaves on the site $\catC _{\tau }$. If a set valued sheaf $\bcX $ on $\catC _{\tau }$ is endowed with an atlas $A$ of morphisms from objects of the category $\catC $ to $\bcX $, then we will say that $\bcX $ is a {\it space}, with regard to the atlas $A$. In other words, a space to us is a sheaf with a fixed atlas on it. For the purposes of the present paper we need to work in terms of schemes. All schemes in this paper will be separated by default. If $X$ is a scheme and $P$ is a point of $X$ then $\varkappa (P)$ will be the residue field of the scheme $X$ at $P$. Let $\Sch $ be the category of schemes. If $S$ is a scheme, let $\Sch /S$ be the category of schemes over $S$. We will always assume that the base scheme $S$ is Noetherian. Let $\Noe /S$ be the full subcategory in $\Sch /S$ generated by locally Noetherian schemes over $S$. We will also need the full subcategory $\Nor /S$ in $\Noe /S$ generated by locally Noetherian schemes which are locally of finite type over $S$ whose structural morphism is normal in the sense of Definition 36.18.1 in \cite{StacksProject}, the full subcategory $\Reg /S$ in $\Nor /S$ generated by locally Noetherian schemes locally of finite type over $S$ whose structural morphism is regular, in the sense of Definition 36.19.1 in \cite{StacksProject} (since every regular local ring is integrally closed, every regular scheme is normal). Finally, let $\Sm /S$ be the full subcategory in $\Reg /S$ generated by locally Noetherian schemes locally of finite type over $S$ whose structural morphism is smooth. Recall that every smooth scheme over a field is regular, this is why $\Sm /S$ is indeed a full subcategory in $\Reg /S$. Since every regular scheme over a perfect field is smooth, if the residue fields of points on the base scheme $S$ are perfect, the categories $\Sm /S$ and $\Reg /S$ coincide. Thus, we obtain the following chain of full embeddings \begin{equation} \label{chainofcats3} \Sm /S\subset \Reg /S\subset \Nor /S\subset \Noe /S \subset \Sch /S\; . \end{equation} The category $\Sch $ possesses the following well-known topologies: the Zariski topology $\Zar $, $\h $-topology, the \'etale topology $\et $, the Nisnevich topology $\Nis $ and the completely decomposed $\h $-topology denoted by $\cdh $. Notice that only the topologies $\Zar $, $\Nis $ and $\et $ are subcanonical, the topologies $\cdh $ and $\h $ are not subcanonical. The relation between these topologies is given by the chains of inclusions \begin{equation} \label{chainoftops1} \Zar \subset \Nis \subset \et \subset \h \end{equation} and \begin{equation} \label{chainoftops2} \Nis \subset \cdh \subset \h \; . \end{equation} The categories $\Sch /S$ and $\Noe /S$ are obviously closed under fibred products. Moreover, the categories $\Nor /S$, $\Reg /S$ and $\Sm /S$ are also closed under fibred products by Propositions 6.8.2 and 6.8.3 in \cite{EGAIV(2)}. For simplicity of notation, the restrictions of all five topologies from (\ref{chainoftops1}) and (\ref{chainoftops2}) on the categories from (\ref{chainofcats3}) will be denoted by the same symbols. For our purposes the most convenient setup is this: $$ \catC =\Noe /S\; ,\; \; \tau =\Nis \, ,\; \; \pty =\et $$ and $$ \type \in \{ \sm \, ,\; \reg \, ,\; \nor \, ,\; \noe \} \; , $$ i.e. $$ \catC _{\type }\in \{ \Sm /S\, ,\; \Reg /S\, ,\; \Nor /S\, ,\; \Noe /S\} \; . $$ Since the Nisnevich topology is subcanonical, we can choose $$ \catS =\Shv ((\Noe /S)_{\Nis }) $$ to be the category of set valued sheaves on the Nisnevich site $(\Noe /S)_{\Nis }$. If a Nisnevich sheaf $\bcX $ is endowed with an atlas $A$ on it, then we will say that $\bcX $ is a {\it Nisnevich space}, with regard to the atlas $A$. Accordingly, for any Nisnevich space $\bcX $ we have the site $$ \bcX _{\Nis \mhyphen \et \mhyphen \type } $$ of morphisms from locally Noetherian schemes of type $\type $ over $S$ to $\bcX $, \'etale with regard to the atlas on $\bcX $, endowed with the induced Nisnevich topology on it. If $$ \type =\noe \; , $$ i.e. $$ \catC _{\type }=\Noe /S\; , $$ then, for short of notation, we will write $$ \bcX _{\Nis \mhyphen \et } $$ for instead of $\bcX _{\Nis \mhyphen \et \mhyphen \noe }$. Notice also that $S$ is a terminal object in the category $\Noe /S$, and, since any sheaf in $\Shv ((\Noe /S)_{\et })$ is the colimit of representable sheaves, $S$ is also a terminal object in the category $\Shv ((\Noe /S)_{\et })$. Let $\bcX $ be a Nisnevich sheaf on $\Noe /S$. A {\it point} $P$ on $\bcX $ is an equivalence class of morphisms $$ \Spec (K)\to \bcX $$ from spectra of fields to $\bcX $ in the category $\Shv _{\Nis }(\Noe /S)$. Two morphisms $$ \Spec (K)\to \bcX \qqand \Spec (K')\to \bcX $$ are said to be equivalent if there exists a third field $K''$, containing the fields $K$ and $K'$, such that the diagram $$ \diagram \Spec (K'') \ar[dd]_-{} \ar[rr]^-{} & & \Spec (K') \ar[dd]^-{} \\ \\ \Spec (K) \ar[rr]^-{} & & \bcX \enddiagram $$ commutes. If a morphism from $\Spec (K)$ to $\bcX $ represents $P$ then, by abuse of notation, we will write $$ P:\Spec (K)\to \bcX \; . $$ The set of points on $\bcX $ will be denoted by $|\bcX |$. Certainly, if $\bcX $ is represented by a locally Noetherian scheme $X$ over $S$, then $|\bcX |$ is the set of points of the scheme $X$. A geometric point on $\bcX $ is a morphism from $\Spec (K)$ to $\bcX $, where $K$ is algebraically closed. Any geometric point on $\bcX $ represents a point on $\bcX $, and any point on $\bcX $ is represented by a geometric point. Fix an atlas $A$ on the sheaf $\bcX $. If a point $P$ on $\bcX $ has a representative $$ \Spec (K)\to \bcX \; , $$ and the latter factors through a morphism from $A$, then we will say that $P$ factors through $A$. Let $P$ be a point of $\bcX $ which factors through $A$. Choose a representative $$ \Spec (K)\to \bcX $$ of the point $P$ with $K$ being algebraically closed. Define a functor $$ u_P:\bcX _{\Nis \mhyphen \et \mhyphen \type }\to \Sets $$ sending an \'etale morphism $$ X\to \bcX \; , $$ where $X$ is of type $\type $ over $S$, to the set $$ u_P(X\to \bcX )=|X_P| $$ of points on the fibre $$ X_P=X\times _{\bcX }\Spec (K) $$ of the morphism $X\to \bcX $ at $P$. Notice that since the morphism $X\to \bcX $ is \'etale, it is representable with regard to the atlas $A$ on the sheaf $\bcX $. And since $P$ factorizes through $A$, the fibre $X_P$ is a locally Noetherian scheme over $S$. If $X$ and $X'$ are two schemes of type $\type $ over $S$ and endowed with two \'etale morphisms $X\to \bcX $ and $X'\to \bcX $, and if $$ f:X\to X' $$ is a morphism of schemes over $S$ and over $\bcX $, i.e. a morphism in $\bcX _{\Nis \mhyphen \et \mhyphen \type }$, then $$ u_P(f):u_P(X)\to u_P(X') $$ is the map of sets $$ |X_P|\to |X'_P| $$ induced by the scheme-theoretical morphism $$ X_P\to X'_P\; , $$ which is, in turn, induced by the morphism $X\to X'$. Let $X$ be a locally Noetherian scheme of type $\type $ over $S$, let $$ \{ X_i\to X\} _{i\in I} $$ be a Nisnevich covering in $\Noe /S$, and let $$ X\to \bcX $$ be a morphism in $\Shv ((\Noe /S)_{\et })$, \'etale with regard to the atlas $A$ on $\bcX $. Since every morphism $X_i\to X$ is smooth, and therefore of type $\type $ over $X$, the cover $\{ X_i\to X\} $ is also a Nisnevich cover of the site $\bcX _{\Nis \mhyphen \et \mhyphen \type }$. Applying the functor $u_P$ we obtain the morphism $$ \coprod _{i\in I}u_P(X_i)\to u_P(X)\; , $$ which is nothing else but the set-theoretical map $$ \coprod _{i\in I}|(X_i)_P|\to |X_P|\; . $$ Since $P$ factors through $A$, the latter map is surjective. If $X'$ is another locally Noetherian scheme of type $\type $ over $S$ and $$ X'\to X $$ is a morphism of schemes over $S$, such that the composition $$ X'\to X\to \bcX $$ is \'etale with regard to $A$, then we look at the morphism $$ u_P(X_i\times _XX')\to u_P(X_i)\times _{u_P(X)}u_P(X')\; , $$ that is the map $$ |(X_i\times _XX')_P|\to |(X_i)_P|\times _{|X_P|}|X'_P|\; . $$ Now again, since $P$ factors through $A$, the latter map is bijective. In other words, the functor $u_P$ satisfies the items (1) and (2) of Definition 7.31.2 in \cite{StacksProject}. The last item (3) of the same definition is satisfied when, for example, the category of neighbourhoods of the point $P$ is cofiltered. Let us discuss item (3) in some more detail. An \'etale neighbourhood of $P$, in the sense of the site $\bcX _{\Nis \mhyphen \et \mhyphen \type }$, is a pair $$ N=(X\to \bcX ,T\in u_P=|X_P|)\; , $$ where $X$ is of type $\type $ over $S$, $X\to \bcX $ is a morphism over $S$, \'etale with regard to the atlas $A$ on $\bcX $, and $T$ is a point of the scheme $X_P$, represented by, say, the morphism $$ \Spec (\kappa (T))\to X_P\; . $$ Equivalently, an \'etale neighbourhood of $P$ is just a commutative diagram of type $$ \diagram \Spec (K) \ar[dd]_-{} \ar[rrdd]^-{} & & \\ \\ X \ar[rr]^-{} & & \bcX \enddiagram $$ \iffalse $$ \diagram \Spec (K) \ar[dd]_-{} \ar[rrdd]^-{} & & \\ \\ X \ar[rr]^-{} \ar[dd]^-{} & & \bcX \ar[lldd]^-{} \\ \\ S \enddiagram $$ \fi where the morphism $X\to \bcX $ is \'etale with regard to the atlas $A$ on $\bcX $, the morphism $\Spec (K)\to \bcX $ represents the point $P$, and all morphisms are over the base scheme $S$. If $$ N'=(X'\to \bcX ,T'\in |X_P|) $$ is another neighbourhood of $P$, a morphism $$ N\to N' $$ is a morphism $$ X\to X' $$ over $\bcX $, and hence over $S$, such that, if $$ X_P\to X_{P'} $$ is the morphism induced on fibres, the composition $$ \Spec (\kappa (T))\to X_P\to X_{P'} $$ represents the point $T'$. Equivalently, if $$ \diagram \Spec (K') \ar[dd]_-{} \ar[rrdd]^-{P} & & \\ \\ X' \ar[rr]^-{} & & \bcX \enddiagram $$ is another neighbourhood of $P$, a morphism of neighbourhoods is a morphism $$ X\to X' $$ over $\bcX $, and hence over $S$, such that, there is a common field extension $K''$ of $K$ and $K'$, such that $\Spec (K'')\to \bcX $ represents $P$, and the diagram $$ \diagram \Spec (K'') \ar[dd]_-{} \ar[rr]_-{} & & X' \ar[dd]_-{} \\ \\ X \ar[rruu]^-{} \ar[rr]^-{} & & \bcX \enddiagram $$ \iffalse $$ \xymatrix{ & & \Spec (K'') \ar[lldd]_-{} \ar[rrdd]^-{} & & \\ \\ X \ar[rrdddd]^-{} \ar[rrrr]^-{} \ar[rrdd]_-{} & & & & X' \ar[lldddd]^-{} \ar[lldd]^-{} \\ \\ & & \bcX \ar[dd]^-{} & & \\ \\ & & S & & } $$ \fi commutes. Notice that the above definition of a neighbourhood of a point $P$ on $\bcX $ depends on the functor $u_P$, sending $X\to \bcX $ to $|X_P|$. If we change the functor $u_P$, the notion of neighbourhood will be different, see Section 7.31 in \cite{StacksProject}. Let $\bcN _P$ be the category of neighbourhoods of the point $P$ on $\bcX $, in the sense of the site $\bcX _{\Nis \mhyphen \et \mhyphen \type }$. If $\bcF $ is a set valued sheaf on $\bcX _{\Nis \mhyphen \et \mhyphen \type }$, it is, in particular, a set valued presheaf on the same category, and, as such, it induces a functor $$ \bcF |_{\bcN _P^{\op }}:\bcN _P^{\op }\to \Sets $$ sending $N=(X\to \bcX ,T\in |X_P|)$ to $\bcF (X)$ and a morphism $N\to N'$ to the obvious map $$ \bcF (X')\to \bcF (X)\; . $$ The stalk functor $$ \stalk _P: \Shv (\bcX _{\Nis \mhyphen \et \mhyphen \type }) \to \Sets $$ sends a sheaf $\bcF $ on $\bcX _{\Nis \mhyphen \et \mhyphen \type }$ to the colimit $$ \colim (\bcF |_{\bcN _P^{\op }}) $$ of the functor $\bcF |_{\bcN _P^{\op }}$. Once again, we should not forget here that the stack functor $\stalk _P$ depends on the definition of a neighbourhood, and the latter depends on the choice of the functor $u_P$, see Section 7.31 in \cite{StacksProject}. Now, as finite limits commute with filtered colimits, if the category $\bcN _P$ is cofiltered, the stalk functor $\stalk _P$ is left exact, and item (3) of Definition 7.31.2 in \cite{StacksProject} holds true as well, and the stalk functor $\stalk _P$ gives rise to a point of the topos $\Shv (\bcX _{\Nis \mhyphen \et \mhyphen \type })$, see Lemma 7.31.7 in \cite{StacksProject}. If this is the case, it gives us the well-behaved stalks $$ \bcO _{\bcX ,\, P}=\stalk _P(\bcO _{\bcX })\; , $$ $$ \Omega ^1_{\bcX /S,\, P}=\stalk _P(\Omega ^1_{\bcX /S}) $$ and $$ T_{\bcX /S,\, P}=\stalk _P(T_{\bcX /S}) $$ at the point $P$. The latter stalk is not, however, a tangent space to $\bcX $ at $P$. To achieve an honest tangent space we need to observe that, whenever $\bcN _P$ is cofiltered for each $P$, the site $\bcX _{\Nis \mhyphen \et \mhyphen \type }$ is locally ringed in the sense of the definition appearing in Exercise 13.9 on page 512 in \cite{SGA4-1} (see page 313 in the newly typeset version), as well as in the sense of a sightly different Definition 18.39.4 in \cite{StacksProject}. Indeed, any scheme $U$ is a locally ring site with enough points. Applying Lemma 18.39.2 in loc.cit we see that for any Zariski open subset $V$ in $U$ and for any function $\bcO _U(V)$ there exists an open covering $V=\cup V_i$ of the set $V$ such that for each index $i$ either $f|_{V_i}$ is invertible or $(1-f)|_{V_i}$ is invertible. If now $U\to \bcX $ is an \'etale morphism from a scheme $U$ to $\bcX $ over $S$, with regard to the atlas on $\bcX $, since $$ \Gamma (U,\bcO _{\bcX })=\Gamma (U,\bcO _U)\; , $$ we obtain item (1) of Lemma 18.39.1 in \cite{StacksProject}, and the condition (18.39.2.1) in loc.cit. is obvious. Now, since the site $\bcX _{\Nis \mhyphen \et \mhyphen \type }$ is locally ringed, we consider the maximal ideal $$ \gom _{\bcX \! ,\, P}\subset \bcO _{\bcX \! ,\, P} $$ and let $$ \kappa (P)=\bcO _{\bcX \! ,\, P}/\gom _{\bcX \! ,\, P} $$ be the residue field of the locally ring site at the point $P$. Then we also have two vector spaces $$ \Omega ^1_{\bcX /S}(P)= \Omega ^1_{\bcX /S\!,\, P} \otimes _{\bcO _P}\kappa (P) $$ and $$ T_{\bcX /S}(P)= T_{\bcX /S\!,\, P} \otimes _{\bcO _P}\kappa (P) $$ over the residue field $\kappa (P)$. The latter is our {\it tangent space} to the space $\bcX $ at the point $P$. \section{Categorical monoids and group completions} \label{freemongrcompl} Let $\catS $ be a cartesian monoidal category, so that the terminal object $\! \term \! $ is the monoidal unit in $\catS $. Denote by $\Mon (\catS )$ the full subcategory of monoids\footnote{all monoids in this paper will be commutative by default}, and by $\Ab (\catS )$ the full subcategory of abelian group objects in the category $\catS $. Assume that $\catS $ is closed under finite colimits and countable coproducts which are distributive with regard to the cartesian product in $\catS $. Then the forgetful functor from $\Mon (\catS )$ to $\catS $ has left adjoint which can be constructed as follows. For any object $\bcX $ in $\catS $ and for any natural number $d$ let $\bcX ^d$ be the $d$-fold monoidal product of $\bcX $. Consider the $d$-th symmetric power $$ \Sym ^d(\bcX )\; , $$ i.e. the quotient of the object $\bcX ^d$ by the natural action of the $d$-th symmetric group $\Sigma _d$ in the category $\catS $. In particular, $$ \Sym ^0(\bcX )=\term \qqand \Sym ^1(\bcX )=\bcX \; . $$ The coproduct $$ \coprod _{d=0}^{\infty }\Sym ^d(\bcX ) $$ is a monoid, whose concatenation product $$ \coprod _{d=0}^{\infty }\Sym ^d(\bcX )\times \coprod _{d=0}^{\infty }\Sym ^d(\bcX )\to \coprod _{d=0}^{\infty }\Sym ^d(\bcX ) $$ is induced by the obvious morphism $$ \coprod _{d=0}^{\infty }\bcX ^{d}\times \coprod _{d=0}^{\infty }\bcX ^{d}\to \coprod _{d=0}^{\infty }\bcX ^{d} $$ and the embeddings of $\Sigma _i\times \Sigma _j$ in to $\Sigma _{i+j}$. The unit $$ \term \to \coprod _{d=0}^{\infty }\Sym ^d(\bcX ) $$ identifies $\term $ with $\bcX ^{(0)}$. This monoid will be called the {\it free monoid} generated by $\bcX $ and denoted by $\NN (\bcX )$. Thus, $$ \NN (\bcX )=\coprod _{d=0}^{\infty }\Sym ^d(\bcX )\; . $$ For example, $$ \NN (\term )=\NN \; . $$ It is easy to verify that the functor $$ \NN :\catS \to \Mon (\catS ) $$ is left adjoint to the forgetful functor from $\Mon (\catS )$ to $\catS $. The full embedding of $\Ab (\catS )$ in to $\Mon (\catS )$ admits left adjoint, if we impose some extra assumption on the category $\catS $. Namely, let $\bcX $ be a monoid in $\catS $, and look at the obvious diagonal morphism \begin{equation} \label{diag} \Delta :\bcX \to \bcX \times \bcX \end{equation} in the category $\catS $, which is also a morphism in the category $\Mon (\catS )$. The terminal object $*$ in the category $\catS $ is a trivial monoid, i.e. a terminal object in the category $\Mon (\catS )$. Assume there exists a co-Cartesian square \begin{equation} \label{completiondiagr} \diagram \bcX \ar[rr]^-{\Delta } \ar[dd]^-{} & & \bcX \times \bcX \ar[dd]^-{} \\ \\ \term \ar[rr]^-{} & & \bcX ^+ \enddiagram \end{equation} in the category of monoids $\Mon (\catS )$. Then $\bcX ^+$ is an abelian group object in the category $\catS $. Let $$ \iota _{\bcX }:\bcX \to \bcX ^+ $$ be the composition of the canonical embedding $$ \iota _1:\bcX \to \bcX \times \bcX \; , $$ $$ x\mapsto (x,0) $$ with the projection $$ \pi _{\bcX }:\bcX \times \bcX \to \bcX ^+\; . $$ If $$ f:\bcX \to \bcY $$ is a morphism of monoids and $\bcY $ is an abelian group object in $\catS $, the precomposition of the homomorphism $$ (f,-f):\bcX \times \bcX \to \bcY \; , $$ sending $(x_1,x_2)$ to $f(x_1)-f(x_2)$ with the diagonal embedding is $0$, whence there exists a unique group homomorphism $h$ making the diagram $$ \xymatrix{ \bcX \ar[rr]^-{\iota _{\bcX }} \ar[ddrr]_-{f} & & \bcX ^+ \ar@{.>}[dd]^-{\hspace{+1mm}\exists ! h} \\ \\ & & \bcY } $$ commutative. This all shows that $\bcX ^+$ is nothing else but the the {\it group completion} of the monoid $\bcX $, and the group completion functor $$ -^+:\Mon (\catS )\to \Ab (\catS ) $$ is left adjoint to the forgetful functor from $\Ab (\catS )$ to $\Mon (\catS )$. For example, $$ \ZZ =\NN ^+ $$ is the group completion of the free monoid $\NN $, generated by the terminal object $\term $ in the category $\catS $. Notice that, as the categories $\Mon (\catS )$ and $\Ab (\catS )$ are pointed, one can show the existence of the canonical isomorphism of monoids $$ (\bcX \times \bcX )^+\stackrel{\sim }{\to } \bcX ^+\times \bcX ^+\; . $$ In other words, the group completion functor is monoidal. It is useful to understand how all these constructions work for set-theoretical monoids. Since monoids are not groups, some care is in place here. Let $M$ be a monoid in the category of sets $\Sets $, written additively, and assume first that we are given with a submonoid $N$ in $M$. To understand what would be the quotient monoid of $M$ by $N$, we define a relation $$ R\subset M\times M $$ saying that, for any two elements $m,m'\in M$, \begin{equation} \label{defofeq} mRm'\; \Leftrightarrow \; \exists n,n'\in N\; \hbox{with}\; m+n=m'+n'\; . \end{equation} Then $R$ is a congruence relation on $M$, i.e. an equivalence relation compatible with the operation in $M$. Indeed, the reflexivity and symmetry are obvious. Suppose that we have three elements $$ m,m',m''\in M\; , $$ and $$ \exists n,n'\in N,\; \hbox{such that}\; m+n=m'+n'\; . $$ and $$ \exists l',l''\in N,\; \hbox{such that}\; m'+l'=m''+l''\; . $$ Then $$ m+n+l'=m'+n'+l'=m''+l''+n'\; . $$ Clearly, $$ n+l',\; l''+n'\in N\; , $$ and we get transitivity. Thus, $R$ is an equivalence relation. Let $M/N$ be the corresponding quotient set, and let $$ \pi :M\to M/N $$ $$ m\mapsto [m] $$ be the quotient map. The structure of a monoid on $M/N$ is obvious, $$ [m]+[\tilde m]=[m+\tilde m]\; , $$ and since $M$ is a commutative monoid\footnote{recall that, within this paper, all monids are commutative by defaul}, it follows easily that the map $\pi $ is a homomorphism of monoids. In other terms, the above relation $R$ on $M$ is a congruence relation. Moreover, the quotient homomorphism $$ M\to M/N $$ enjoys the standard universal property, loc.cit. To be more precise, for any homomorphism of monoids $$ f:M\to T\; , $$ such that $$ N\subset \ker (f)=\{ m\in M\; |\; f(m)=0\} \; , $$ there exists a commutative diagram of type $$ \xymatrix{ M \ar[rr]^-{} \ar[ddrr]_-{} & & M/N \ar@{.>}[dd]^-{\hspace{+1mm}\exists !} \\ \\ & & T } $$ Now, let $M\times M$ be the product monoid, let $$ \Delta :M\to M\times M $$ be the diagonal homomorphism, and let $$ \Delta (M) $$ be the set-theoretical image of the homomorphism $\Delta $. Trivially, $\Delta (M)$ is a submonoid in the product monoid $M\times M$, and we can construct the quotient monoid $$ M^+=(M\times M)/\Delta (M)\; , $$ using the procedure explained above. The universal property of the quotient monoid gives us that the diagram \begin{equation} \label{mohoviki} \diagram M\ar[rr]^-{\Delta } \ar[dd]^-{} & & M\times M \ar[dd]^-{} \\ \\ \term \ar[rr]^-{} & & M^+ \enddiagram \end{equation} is pushout in the category $\Mon (\Sets )$. It follows that $M^+$ is the group completion of $M$ in the sense of our definition given for the general category $\catS $. Clearly, the composition $$ \xymatrix{ M \ar[rr]^-{m\mapsto (m,0)} \ar[ddrr]_-{\iota _M} & & M\times M \ar[dd]^-{} \\ \\ & & M^+ } $$ is a homomorphism of monoids. If $$ f:M\to A $$ is a homomorphism from the monoid $M$ to an abelian group $A$, then we define a homomorphism of monoids $$ M\times M\to A $$ sending $$ (m_1,m_2)\mapsto f(m_1)-f(m_2)\; , $$ and the universal property of the diagram (\ref{mohoviki}) gives us the needed commutative diagram $$ \xymatrix{ M \ar[rr]^-{\iota _M} \ar[ddrr]_-{} & & M^+ \ar@{.>}[dd]^-{\hspace{+1mm}\exists !} \\ \\ & & A } $$ Moreover, if $M$ is cancellative, the diagram (\ref{mohoviki}) is not only a pushout square in $\Mon (\Sets )$ but also a pullback square in $\Sets $. Indeed, if $$ (m_1,m_2),\; (m_1',m_2')\in M\times M\; , $$ then, according to (\ref{defofeq}), $$ \exists n,n'\in M $$ such that $$ (m_1,m_2)+(n,n)=(m_1',m_2')+(n',n') $$ in $M\times M$, or, equivalently, \begin{equation} \label{kott} m_1+n=m_1'+n' \qqand m_2+n=m_2'+n'\; . \end{equation} Now, suppose we want to find $h$ completing a commutative diagram of type \begin{equation} \label{ryzhiikot} \xymatrix{ T\ar@/_/[dddr] \ar@/^/[drrr]^-{f} \ar@{.>}[dr]^-{\hspace{-1mm}\exists ! h} \\ & M \ar[dd] \ar[rr] & & M\times M \ar[dd]^-{\pi } \\ \\ & \term \ar[rr] & & M^+} \end{equation} in the category $\Sets $. If $(m_1,m_2)$ is an element of $M\times M$, the equivalence class $[m_1,m_2]$ is $0$ in $M^+$, i.e. the ordered pair $(m_1,m_2)$ is equivalent to $(0,0)$ in $M\times M$ modulo the subtractive submonoid $\Delta (M)$, if and only if, by (\ref{kott}), $$ m_1+n=n' \qqand m_2+n=n'\; , $$ whence $$ m_1+n=m_2+n\; . $$ Since $M$ is a cancellation monoid, the latter equality gives us that $m_1=m_2$, i.e. $(m_1,m_2)$ is in $\Delta (M)$. In other words, $[m_1,m_2]=0$ in $M^+$ if and only if $(m_1,m_2)$ is in $\Delta (M)$. And as the diagram (\ref{ryzhiikot}) is commutative without $h$, it follows that the set-theoretical image of the map $f$ is in $\Delta (M)$. It follows that $f$ factorizes through $\Delta $, i.e. the needed map $h$ exists. Thus, we see that the abstract constructions relevant to group completions are generalizations of the standard constructions in terms of set-theoretical monoids. All the same arguments apply when $\catS $ is the category $\PShv (\catC )$ of set valued presheaves on a category $\catC $, as all limits and colimits in $\PShv (\catC )$ are sectionwise. Thus, for any monoid $\bcX $ in $\PShv (\catC )$ the group completion $\bcX ^+$ exists and it is a section wise group completion. If $\bcX $ is cancellative, and this is equivalent to saying that $\bcX $ is section wise cancellative, then the diagram (\ref{completiondiagr}) is Cartesian in $\PShv (\catC )$. Now let us come back to the general setting. Let again $\bcX $ be a monoid in $\catS $. The notion of a cancellative monoid can be categorified as follows. A morphism $$ \iota :\NN \to \bcX $$ in the category $\Mon (\catS )$, that is a homomorphism of monoids from $\NN $ to $\bcX $, is uniquely defined by the restriction $$ \iota (1):\term \to \bcX $$ of $\alpha $ on to the subobject $\term =\Sym ^1(\term )$ of the object $\NN =\coprod _{d=0}^{\infty }\Sym ^d(\term )$ in $\catS $. Vice versa, as soon as we have a morphism $\term \to \bcX $ in the category $\catS $, it uniquely defines the obvious morphism $\iota :\NN \to \bcX $ in the category $\Mon (\catS )$. The homomorphism of monoids $\iota $ will be said to be {\it cancellative} if the composition $$ \add _{\iota (1)}:\bcX \simeq \bcX \times *\stackrel{\id \times \iota (1)}{\lra }\bcX \times \bcX \to \bcX \; , $$ that is the addition of $\iota (1)$ on $\bcX $, is a monomorphism in $\catS $. The monoid $\bcX $ is a {\it cancellation monoiod} if any homomorphism $\iota :\NN \to \bcX $ is cancellative. Clearly, if $\bcX $ is a monoid in $\PShv (\catC )$, then $\bcX $ is cancellative if and only if it is section wise cancellative. A {\it pointed monoid} in $\catS $ is a pair $(\bcX ,\iota )$, where $\bcX $ is a monoid in $\catS $ and $\iota $ is a morphism of monoids from $\NN $ to $\bcX $. A {\it graded pointed monoid} is a triple $(\bcX ,\iota ,\sigma )$, where $(\bcX ,\iota )$ is a pointed monoid and $\sigma $ is a morphism of monoids from $\bcX $ to $\NN $, such that $$ \sigma \circ \iota =\id _{\NN }\; . $$ If $\bcX $ is a pointed graded monoid in $\catS $, for any natural number $d\in \NN $ one can consider the cartesian square $$ \diagram \bcX _d\ar[rr]^-{} \ar[dd]^-{} & & * \ar[dd]^-{d} \\ \\ \bcX \ar[rr]^-{\sigma } & & \NN \enddiagram $$ in the category $\catS $. The addition of $\iota (1)$ in $\bcX $ induces morphisms $$ \bcX _d\to \bcX _{d+1} $$ for all $d\geq 0$. Let $\bcX _{\infty }$ be the colimit $$ \bcX _{\infty }= \colim (\bcX _0\to \bcX _1\to \bcX _2\to \dots ) $$ in $\catS $. Equivalently, $\bcX _{\infty }$ is the coequalizer of the addition of $\iota (1)$ in $\bcX $ and the identity automorphism of $\bcX $. Since filtered colimits commute with finite products, there is a canonical isomorphism between the colimit of the obvious diagram composed by the objects $\bcX _d\times \bcX _{d'}$, for all $d,d'\geq 0$, and the product $\bcX _{\infty }\times \bcX _{\infty }$. Since the colimit of that diagram is the colimit of its diagonal, this gives the canonical morphism from $\bcX _{\infty }\times \bcX _{\infty }$ to $\bcX _{\infty }$. The latter defines the structure of a monoid on $\bcX _{\infty }$, such that the canonical morphism $$ \pi :\bcX =\coprod _{d\geq 0}\bcX _d\to \bcX _{\infty } $$ is a homomorphism of monoids in $\catS $. We call $\bcX _{\infty }$ the {\it connective} monoid associated to the pointed graded monoid $\bcX $. Notice that if the category $\catS $ is exhaustive\footnote{see {{\tt https://ncatlab.org/nlab/show/exhaustive+category}}}, monomorphicity of the morphisms $\bcX _d\to \bcX _{d+1}$ yields that the transfinite compositions $\bcX _d\to \bcX _{\infty }$ are monomorphisms too. The morphisms $\bcX _d\to \bcX _{d+1}$ are monomorphic, for example, if $\bcX $ is a cancelation monoid. Now assume that the colimit $\bcX _{\infty }^+$ exists in the category $\Mon (\catS )$. Since $\bcX _{\infty }$ is the coequalizer of $\add _{\iota (1)}$ and $\id _{\bcX }$, the group completion $\bcX _{\infty }^+$ is the coequalizer of the corresponding homomorphism $\add _{\iota (1)}^+:\bcX ^+\to \bcX ^+$ and $\id _{\bcX ^+}$. It follows that the sequence $$ 0\to \ZZ \stackrel{\iota ^+}{\lra }\bcX ^+\to \bcX _{\infty }^+\to 0 $$ is short exact. Moreover, this sequens splits by the morphism $\sigma ^+$. This gives us that $$ \bcX ^+=\ZZ \oplus \bcX _{\infty }^+ $$ in the abelian category $\Ab (\catS )$. A typical example of a pointed graded monoid in $\catS $ is the free monoid $$ \NN (\bcX )=\coprod _{d=0}^{\infty }\Sym ^d(\bcX )\; , $$ where $\bcX $ is a pointed object in $\catS $, i.e. the morphism from $\bcX $ to the terminal object $*$ has a section. For this pointed graded monoid we have that $$ \NN (\bcX )_d=\Sym ^d(\bcX )\; , $$ for all natural numbers $d$, and the pointing of each symmetric power $\Sym ^d(\bcX )$ is induced by the pointing of $\bcX $ in the obvious way. The section gives embeddings $$ \Sym ^d(\bcX )\to \Sym ^{d+1}(\bcX )\; , $$ and the corresponding connective monoid $$ \NN (\bcX )_{\infty }=\colim _d\; \Sym ^d(\bcX ) $$ will be denoted by $\Sym ^{\infty }(\bcX )$ and called the {\it free connective monoid} of the object $\bcX $. Then, of course, $$ \Sym ^{\infty }(\bcX )^+=\NN (\bcX )_{\infty }^+\; . $$ Moreover, both free monoids $\NN (\bcX )$ and $\NN (\bcX )_{\infty }$ are cancellative monoids in $\catS $. Now, let $\catC $ be a cartesian monoidal category with a terminal object $\! \term \, $, closed under finite fibred products and equipped with a subcanonical topology $\tau $. Let $\PShv (\catC )$ be the category of set valued presheaves on $\catC $, and let $\Shv (\catC _{\tau })$ be the full subcategory in $\PShv (\catC )$ of sheaves on $\catC $ with regard to the topology $\tau $. Since the category $\catC $ is cartesian, so are the categories $\PShv (\catC )$ and $\Shv (\catC _{\tau })$, and therefore we can consider the monoids in the categories of sheaves and pre-sheaves. Our aim is now to apply the constructions above in the case when $$ \catS =\PShv (\catC ) \qor \catS =\Shv (\catC _{\tau })\; . $$ The Yoneda embedding $$ h:\catC \to \PShv (\catC ) $$ is a continuous functor, i.e. it preserves limits. It follows that, if $\bcX $ is a monoid in $\PShv (\catC )$, then it is equivalent to saying that $\bcX $ is a section wise monoid, and the two diagrams $$ \diagram \Mon (\catC ) \ar[dd]^-{h} \ar[rr]^-{} & & \catC \ar[dd]^-{h} \\ \\ \Mon (\PShv (\catC )) \ar[rr]^-{} & & \PShv (\catC ) \enddiagram $$ and $$ \diagram \Ab (\catC ) \ar[dd]_-{} \ar[rr]^-{} & & \Mon (\catC ) \ar[dd]^-{} \\ \\ \Ab (\PShv (\catC )) \ar[rr]^-{} & & \Mon (\PShv (\catC )) \enddiagram $$ are commutative. Moreover, the diagonal morphism (\ref{diag}) for a presheaf $\bcX $ is diagonal section-wise. It follows that the colimit diagram (\ref{completiondiagr}) exists in $\Mon (\PShv (\catC ))$ and, accordingly, the group completion $\bcX ^+$ is then the section wise group completion of $\bcX $. In particular, the group completion $\bcX ^+$ of the presheaf monoid $\bcX $ is topology free. Let $\PShv (\catC )_{\spd }$ be the full subcategory of separated presheaves in $\PShv (\catC )$, and let $$ -^{\spd }:\PShv (\catC )\to \PShv (\catC )_{\spd } $$ $$ \bcF \mapsto \bcF ^{\spd } $$ be the left adjoint to the forgetful functor from $\PShv (\catC )_{\spd }$ to $\PShv (\catC )$, as constructed on page 40 in \cite{FGAexplained}. Let also $$ -^{\glue }:\PShv (\catC )_{\spd }\to \Shv (\catC _{\tau }) $$ $$ \bcF \mapsto \bcF ^{\glue } $$ be the second stage of sheafification, i.e. the gluing of sections as described on the same page of the same book, or, in other words, the left adjoint to the forgetful functor from $\Shv (\catC _{\tau })$ to $\PShv (\catC )_{\spd }$. The composition $$ -^{\shf }:\PShv (\Sch /S)\to \Shv (\catC _{\tau }) $$ of these two functors $-^{\spd }$ and $-^{\glue }$ is the left adjoint to the forgetful functor from $\Shv (\catC _{\tau })$ to $\PShv (\catC )$, i.e. the functor which associates to any presheaf the corresponding associated sheaf in the topology $\tau $, see pp 39 - 40 in \cite{FGAexplained}. Now, since the sheafification functor $-^{\shf }$ is left adjoint to the forgetful functor from sheaves to presheaves, the latter is right adjoint, and hence it commutes with limits. In particular, the forgetful functor from sheaves to presheaves commutes with products. It follows that the diagrams $$ \xymatrix{ \Mon (\PShv (\catC )) \ar[r]^-{} & \PShv (\catC ) \\ \\ \Mon (\Shv (\catC _{\tau })) \ar[r]^-{} \ar[uu]_-{} & \Shv (\catC _{\tau }) \ar[uu]_-{} } $$ and $$ \xymatrix{ \Ab (\PShv (\catC )) \ar[r]^-{} & \Mon (\PShv (\catC )) \\ \\ \Ab (\Shv (\catC _{\tau })) \ar[r]^-{} \ar[uu]_-{} & \Mon (\Shv (\catC _{\tau })) \ar[uu]_-{} } $$ are commutative. Next, it is well-known that the functor $-^{\shf }$ is exact too, and hence it commutes with products. It follows that $-^{\shf }$ takes monoids to monoids, and abelian groups to abelian groups, and therefore we have the commutative diagrams $$ \xymatrix{ \Mon (\PShv (\catC )) \ar[r]^-{} \ar[dd]_-{-^{\shf }} & \PShv (\catC ) \ar[dd]_-{-^{\shf }} \\ \\ \Mon (\Shv (\catC _{\tau })) \ar[r]^-{} & \Shv (\catC _{\tau }) } $$ and $$ \xymatrix{ \Ab (\PShv (\catC )) \ar[r]^-{} \ar[dd]_-{-^{\shf }} & \Mon (\PShv (\catC )) \ar[dd]_-{-^{\shf }} \\ \\ \Ab (\Shv (\catC _{\tau })) \ar[r]^-{} & \Mon (\Shv (\catC _{\tau })) } $$ Now, the functor $\NN $ exists for set valued presheave monoids and it is given section wise. Moreover, as we mentioned above, the group completion functor exists for set valued presheaf monoids, and it is also given section wise. It follows that the functors $\NN $ and $-^+$ exist also for sheaves on the site $\catC _{\tau }$, and can be constructed by means of composing of the corresponding functors for presheaves with the sheafification functor. To be more precise, since the sheafification $-^{\shf }$ is left adjoint, it also commutes with all colimits. And as the functors $\NN $ and $-^+$ are constructed merely by means of products and colimits, we conclude that that these two functors are preserved by sheafification. In other words, the diagrams $$ \xymatrix{ \Mon (\PShv (\catC )) \ar[dd]_-{-^{\shf }} & \PShv (\catC ) \ar[l]_-{\NN } \ar[dd]_-{-^{\shf }} \\ \\ \Mon (\Shv (\catC _{\tau })) & \Shv (\catC _{\tau }) \ar[l]_-{\NN } } $$ and $$ \xymatrix{ \Ab (\PShv (\catC )) \ar[dd]_-{-^{\shf }} & \Mon (\PShv (\catC )) \ar[l]_-{-^+} \ar[dd]_-{-^{\shf }} \\ \\ \Ab (\Shv (\catC _{\tau })) & \Mon (\Shv (\catC _{\tau })) \ar[l]_-{-^+} } $$ both commute. As a consequence of that, the diagrams $$ \xymatrix{ \Mon (\PShv (\catC )) \ar[dd]_-{-^{\shf }} & \PShv (\catC ) \ar[l]_-{\NN } \\ \\ \Mon (\Shv (\catC _{\tau })) & \Shv (\catC _{\tau }) \ar[l]_-{\NN } \ar[uu]_-{} } $$ and $$ \xymatrix{ \Ab (\PShv (\catC )) \ar[dd]_-{-^{\shf }} & \Mon (\PShv (\catC )) \ar[l]_-{-^+} \\ \\ \Ab (\Shv (\catC _{\tau })) & \Mon (\Shv (\catC _{\tau })) \ar[l]_-{-^+} \ar[uu]_-{} } $$ are also both commutative. The latter two commutative diagrams mean the following. If $\bcX $ is a set valued sheaf on $\catC _{\tau }$, then, in order to construct the free monoid $\NN (\bcX )$ in the category $\Mon (\Shv (\catC _{\tau }))$ we first forget the sheaf property on $\bcX $ and construct $\NN (\bcX )$ in the category $\Mon (\PShv (\catC ))$, looking at $\bcX $ as a presheaf, and then sheafify to get an object in $\Mon (\Shv (\catC _{\tau }))$. Similarly, if $\bcX $ is a set valued sheaf monoid, i.e. an object of the category $\Mon (\Shv (\catC _{\tau }))$, then, in order to construct its group completion in the category $\Ab (\Shv (\catC _{\tau }))$ we forget the sheaf property on $\bcX $ and construct $\bcX ^+$ in the category $\Ab (\PShv (\catC ))$, looking at $\bcX $ as a presheaf monoid, and then sheafify to get an object in $\Ab (\Shv (\catC _{\tau }))$. Similarly, if $\bcX $ is a pointed graded monoid in presheaves, then it is a pointed graded monoid section wise. The construction of the connective monoid $\bcX _{\infty }$, as an object in the category $\Mon (\PShv (\catC ))$, is then section wise and topology free. But if $\bcX $ is a pointed graded monoid in sheaves, the construction of $\bcX _{\infty }$ follows the rule above. Namely, we first forget the sheaf property of $\bcX $ and construct $\bcX _{\infty }$ section wise, i.e. in the category $\Mon (\PShv (\catC ))$, and then sheafify to get the object $\bcX _{\infty }$ in the category $\Ab (\PShv (\catC ))$. As in the previous section, for simplicity of notation, we will write $X$ instead of the sheaf $h_X$, for any object $X$ in $\catC $, and denote objects in $\PShv (\catC )$ and $\Shv (\catC _{\tau })$ by calligraphic letters $\bcX $, $\bcY $, etc. Notice also that if $X$ is a pointed object of $\catC $ and for any $d$ the $d$-th symmetric power $\Sym ^d(X)$ exists already in $\catC $, then $\NN (X)_{\infty }$ is an $\ind $-object of $\catC $. Recall that an $\ind $-object in $\catC $ is the colimit of the composition of a functor $$ I\to \catC $$ with the embedding of $\catC $ in to $\PShv (\catC )$, taken in the category $\PShv (\catC )$, such that the category $I$ is filtered. Such a colimit is section-wise. Since $\catC $ is equipped with a topology, one can also give the definition of a sheaf-theoretical $\ind $-object. An $\ind $-object in $\catC _{\tau }$ is the colimit of the same composition, but now taken in the category $\Shv (\catC _{\tau })$. The latter is obviously the sheafification of the previous $\ind $-object, and therefore it depends on the topology $\tau $. Let $\Ind (\catC )$ be the full subcategory in $\PShv (\catC )$ of $\ind $-objects in $\catC $, and let $\Ind (\catC _{\tau })$ be the full subcategory in $\Shv (\catC _{\tau })$ of $\ind $-objects of $\catC _{\tau }$. Our aim will be to apply these abstract constructions to the case when $$ \catC =\Noe /S $$ and $$ \tau =\Nis \; . $$ The choice of the topology will be explained in the next section. Now we need to recall relative symmetric powers of locally Noetherian schemes $X$ over $S$. Assume that the structural morphism $$ X\to S $$ satisfies the following property: \begin{itemize} \item[(AF)]{} for any point $s\in S$ and for any finite collection $\{ x_1,\ldots ,x_l\} $ of points in the fibre $X_s$ of the structural morphism $X\to S$ at $s$ there exists a Zariski open subset $U$ in $X$, such that $$ \{ x_1,\ldots ,x_l\} \subset U $$ and the composition $$ U\to X\to S $$ is a quasi-affine morphism of schemes. \end{itemize} Quasi-affine morphisms possess various nice properties, see Section 28.12 in \cite{StacksProject}, which can be used to prove that if $U\to S$ is a morphism of locally Noetherian schemes and $X$ is AF over $S$ then $X\times _SU$ is AF over $U$. If, moreover, $U$ is AF over $S$ the $X\times _SU$ is AF over $S$. The property AF is satisfied if, for example, $X\to S$ is a quasi-affine or quasi-projective morphism of schemes, see Prop. (A.1.3) in Paper I in \cite{RydhThesis}. As we now assume that AF holds true for $X$ over $S$, the $d$-th symmetric group $\Sigma _d$ acts admissibly on the $d$-th fibred product $$ (X/S)^d=X\times _S\ldots \times _SX $$ over $S$ in the sense of \cite{SGA1}, Expos\'e V, and the relative symmetric power $$ \Sym ^d(X/S) $$ exists in the category $\Noe /S$. Then, according to the abstract constructions above, we obtain the free monoid $\NN (X/S)$ generated by the scheme $X$ over $S$ in the category $\Shv ((\Noe /S)_{\Nis })$. For every integer $d\geq 0$ the object $\NN (X/S)_d$ is the relative $d$-th symmetric power $\Sym ^d(X/S)$ of $X$ over $S$, and as such it is an object of the category $\Noe /S$. The free monoid of the scheme $X$ over $S$ is nothing else but the coproduct $$ \NN (X/S)=\coprod _{i=0}^{\infty }\Sym ^d(X/S) $$ taken in the category $\Shv ((\Noe /S)_{\Nis })$. Assume, in addition, that the structural morphism $X\to S$ has a section $$ S\to X\; . $$ Notice that the terminal object $*$ in the category $\Noe /S$ is the identity morphism of the scheme $S$, and therefore the splitting of the structural morphism $X\to S$ by the section $S\to X$ induces the splitting $$ \xymatrix{ & \NN (X/S) \ar[rdd]^-{\sigma } & & \\ \\ \NN \ar[ruu]^-{\iota } \ar[rr]^-{\id } & & \NN & } $$ in the category $\Shv ((\Noe /S)_{\Nis })$. The corresponding connective monoid $$ \Sym ^{\infty }(X/S)=\NN (X/S)_{\infty }= \colim _d\, \Sym ^d(X/S) $$ is an $\ind $-scheme over $S$. As such it can be considered as an object of the category $\Ind ((\Noe /S)_{\Nis })$. The colimit $$ \Sym ^{\infty }(X/S)^+ $$ in the category $\Mon (\Shv ((\Noe /S)_{\Nis }))$ is the group completion of the monoid $\Sym ^{\infty }(X/S)$, and, according to what we discussed above, this colimit is nothing else but the Nisnevich sheafification of the corresponding section wise colimit. \section{Nisnevich spaces of $0$-cycles over locally Noetherian schemes} \label{relcycles} The purpose of this section is define what exactly do we mean when we speak about spaces of $0$-cycles. First we will discuss the latest approach presented in \cite{RydhThesis}. Rydh's construction of a sheaf of relative $0$-cycles is compatible with the earlier approaches due to Suslin-Voevodsky, \cite{SV-ChowSheaves}, and Koll\'ar, \cite{KollarRatCurvesOnVar}, if we restrict all the sheaves on seminormal schemes. We think it is important to understand these two earlier approaches, but for the purpose of not enraging the manuscript unreasonably, we will discuss the necessary definitions and results from Suslin-Voevodsky's paper \cite{SV-ChowSheaves} only. \noindent {\it Rydh's approach} \noindent So let again $X$ be AF over $S$, and for any nonnegative integer $d$ let $\Gamma ^d(X/S)$ be the $d$-divided power of $X$ over $S$, as explained in Paper I in \cite{RydhThesis}. The infinite coproduct $$ \coprod _{d=0}^{\infty }\Gamma ^d(X/S) $$ is a monoid in $\Shv ((\Noe /S)_{\Nis })$. The canonical morphism $$ (X/S)^d\to \Gamma ^d(X/S) $$ is $\Sigma _d$-equivariant on the source, see Prop. 4.1.5 in loc.cit, so that there exists also a canonical morphism $$ \Sym ^d(X/S)\to \Gamma ^d(X/S)\; . $$ If the base scheme $S$ is of pure characteristic $0$, or if $X$ is flat over $S$, the latter morphism is an isomorphism of schemes by Corollary 4.2.5 in Paper I in \cite{RydhThesis}. In other words, the divided power $\Gamma ^d(X/S)$ differs from the symmetric power $\Sym ^d(X/S)$ only if the residue fields $\kappa (s)$ can have positive characteristic for points $s\in S$ and, at the same time, $X$ is not flat over $S$. From the point of view of the applications which we have in mind, this is quite a bizarre situations, so that the difference between divided and symmetric powers can be ignored in practice, and we introduce it merely for completeness of the theory. Now, let $U$ be a locally Noetherian scheme over $S$. According to Paper IV in \cite{RydhThesis}, a {\it relative $0$-cycle of degree $d$} on $X\times _SU$ over $U$ is the equivalence class of ordered pairs $(Z,\alpha )$, where $Z$ is a closed subscheme in $X\times _SU$, such that the composition $$ Z\to X\times _SU\to U $$ is a finite, and $$ \alpha :U\to \Gamma ^d(Z/U) $$ is a morphism of schemes over $U$. Notice that since the morphism $Z\to U$ is finite, it is AF, and therefore the scheme $\Gamma ^d(Z/U)$ does exist. Two such pairs $(Z_1,\alpha _1)$ and $(Z_2,\alpha _2)$ are said to be equivalent if there is a scheme $Z$ and two closed embeddings $Z\to Z_1$ and $Z\to Z_2$, and a morphism of schemes $\alpha :U\to \Gamma ^d(Z/U)$ over $U$, such that the obvious composition $$ U\stackrel{\alpha }{\lra }\Gamma ^d(Z/U) \to \Gamma ^d(Z_i/U) $$ is $\alpha _i$ for $i=1,2$, see page 9 in Paper IV in \cite{RydhThesis}. If a relative cycle is represented by a pair $(Z,\alpha )$, we will denote it by $[Z,\alpha ]$. An important property of divided powers is that if $$ g:U'\to U $$ is a morphism of locally Noetherian schemes over $S$, the natural map \begin{equation} \label{dozhdvlesu} \Hom _{U'}(U',\Gamma ^d(X\times _SU/U)\times _UU')\to \Hom _{U'}(U',\Gamma ^d(X\times _SU'/U')) \end{equation} is a bijection, see page 12 in paper I in \cite{RydhThesis}. This allows us to define pullbacks of relative $0$-cycles. Indeed, let $[Z,\alpha ]$ be a relative cycle on $X\times _SU$ over $U$. Define $Z'$ and a closed embedding of $Z'$ in to $X\times _SU'$ by the Cartesian square $$ \diagram Z' \ar[dd]_-{} \ar[rr]^-{} & & X\times _SU' \ar[dd]^-{} \\ \\ Z \ar[rr]^-{} & & X\times _SU \enddiagram $$ The composition $$ U'\to U\to \Gamma ^d(Z/U) $$ induces the unique morphism \begin{equation} \label{paporotnik} U'\to \Gamma ^d(Z/U)\times _UU' \end{equation} over $U'$ whose composition with the projection onto $\Gamma ^d(Z/U)$ is the initial composition. A particular case of the bijection (\ref{dozhdvlesu}) is the bijection \begin{equation} \label{dozhdvlesu2} \Hom _{U'}(U',\Gamma ^d(Z/U)\times _UU') \stackrel{\sim }{\to } \Hom _{U'}(U',\Gamma ^d(Z'/U')) \end{equation} Applying (\ref{dozhdvlesu2}) to (\ref{paporotnik}) we obtain the uniquely defined morphism $$ \alpha ':U'\to \Gamma ^d(Z'/U')\; . $$ Then $$ g^*[Z,\alpha ]=[Z',\alpha '] $$ is, by definition, the pullback of the relative $0$-cycle $[Z,\alpha ]$ along the morphism $g$. It is easy to verify that such defined pullback is functorial, and we obtain the corresponding set valued presheaf $$ \bcY _{0,d}(X/S):(\Noe /S)^{\op }\to \Sets $$ sending any locally Noetherian scheme $U$ over $S$ to the set of all relative $0$-cycles of degree $d$ on $X\times _SU$ over $U$. Let also $$ \bcY _0(X/S)=\coprod _{d=0}^{\infty }\bcY _{0,d}(X/S) $$ be the total presheaf of relative $0$-cycles of all degrees. An important thing here is that the presheaf $\bcY _{0,d}(X/S)$ is represented by the scheme $\Gamma ^d(X/S)$, see Paper I and Paper II in \cite{RydhThesis}. And as the Nisnevich topology is subcanonical, it follows that $\bcY _{0,d}(X/S)$ is a sheaf in Nisnevich topology, i.e. an object of the category $\Shv ((\Noe /S)_{\Nis })$, and the same is true with regard to the presheaf $\bcY _0(X/S)$. Since each sheaf $\bcY _{0,d}(X/S)$ is represented by the divided power $\Gamma ^d(X/S)$, the sheaf $\bcY _0(X/S)$ is represented by the infinite coproduct $\coprod _{d=0}^{\infty }\Gamma ^d(X/S)$, the sheaf $\bcY _0(X/S)$ is a graded monoid in $\Shv ((\Noe /S)_{\Nis })$, and hence we also have its group completion $$ \bcZ _0(X/S)=\bcY _0(X/S)^+\; . $$ Moreover, if the structural morphism $X\to S$ admits a section, the graded monoid $\bcY _0(X/S)$ is pointed, and we can also construct the connective monoid $$ \bcY _0^{\infty }(X/S)=\colim _d\, \bcY _{0,d}(X/S) $$ and its group completion $$ \bcZ _0^{\infty }(X/S)=\bcY _0^{\infty }(X/S)^+\; . $$ \noindent {\it Suslin-Voevodsky's approach} \noindent For any scheme $X$ let $t(X)$ be the topological space of the scheme $X$, and let $c(X)$ be the set of closed subschemes in $X$. Then we have a map $$ t(X)\to c(X) $$ sending any point $\zeta \in X$ to its closure $\overline {\{ \zeta \} }$ with the induced reduced structure of a closed subscheme on it. Let $$ \Cycl ^{\eff }(X)=\NN (t(X)) $$ be the free monoid generated by points on $X$. Elements of $\Cycl ^{\eff }(X)$ are the {\it effective algebraic cycles}, or simply {\it effective cycles} on the scheme $X$. Let also $$ C^{\eff }(X)=\NN (c(X)) $$ free monoid generated by closed subschemes of $X$. For any closed subscheme $$ Z\to X\; \in \; c(X) $$ let $\zeta _1,\ldots ,\zeta _n$ be the generic points of the irreducible components of the scheme $Z$, let $$ m_i=\length (\bcO _{\zeta _i,Z}) $$ be the multiplicity of the component $Z_i=\overline {\zeta _i}$ in $Z$, and let $$ \cycl _X(Z)=\sum _im_iZ_i $$ be the fundamental class of the closed subscheme $Z$ of the scheme $X$. Then we obtain the standard map $$ \cycl _X:c(X)\to \Cycl ^{\eff }(X)\; , $$ $$ Z\mapsto \cycl _X(Z)\; . $$ The map $\cycl _X$ extends to the homomorphism of monoids $$ \cycl _X:C^{\eff }(X)\to \Cycl ^{\eff }(X)\; , $$ If $$ C(X)=C^{\eff }(X)^+ $$ and $$ \Cycl (X)=\Cycl ^{\eff }(X)^+ $$ then we also have the corresponding homomorphism of abelian groups $$ \cycl _X:C(X)\to \Cycl (X)\; . $$ Elements of the free abelian group $\Cycl (X)$ will be called {\it algebraic cycles}, or simply {\it cycles} on the scheme $X$. Points $$ \zeta \in t(X)\; , $$ or, equivalently, their closures $$ Z=\overline {\{ \zeta \} }\; , $$ considered as closed subschemes in $X$ with the induced reduced closed subscheme structure, can be also considered as {\it prime cycles} on $X$. If $$ Z=\sum _im_iZ_i\in \Cycl (X) $$ is a cycle on $X$, where $Z_i$ are prime cycles, define its support $\supp (Z)$ to be the union $$ \supp (Z)=\cup _iZ_i\in c(X) $$ with the induced reduced structure of a closed subscheme of $X$. Let $S$ be a Noetherian scheme. A point on $S$ can be understood as a morphism $$ P:\Spec (k)\to S $$ from the spectrum of a field $k$ to $S$. A {\it fat point} of $S$ over $P$ is then two morphisms of schemes $$ P_0:\Spec (k)\to \Spec (R)\quad \hbox{and}\quad P_1:\Spec (R)\to S\; , $$ where $R$ is a DVR whose residue field is $k$, such that $$ P_1\circ P_0=P\; , $$ the image of $P_0$ is the closed point of $\Spec (R)$, and $P_1$ sends the generic point $\Spec (R_{(0)})$ to the generic point of the scheme $S$. Let now $$ f:X\to S $$ be a scheme of finite type over $S$, and let $$ Z\to X $$ be a closed subscheme in $X$. Let $R$ be a discrete valuation ring, $$ D=\Spec (R)\; , $$ and let $$ g:D\to S $$ be a morphism of schemes from $D$ to $S$. Let also $$ \eta =\Spec (R_{(0)}) $$ be the generic point of $D$, $$ X_D=X\times _SD\; ,\quad Z_D=Z\times _SD\qand Z_{\eta }=Z\times _S\eta \; . $$ Then there exists a unique closed embedding $$ Z'_D\to Z_D\; , $$ such that its pull-back $$ Z'_{\eta }\to Z_{\eta } $$ along the morphism $Z_{\eta }\to Z_D$, is an isomorphism, and the composition $$ Z'_D\to Z_D\to D $$ is a flat morphism of schemes, see Proposition 2.8.5 in \cite{EGAIV(2)}. In particular, one can apply this ``platification" process to a fat point $(P_0,P_1)$ over a point $P\in S$ with $g=P_1$. Let $X_P$ be the fibre of the morphism $X_D\to D$ over the point $P_0$, $$ Z_P=Z_D\times _{X_D}X_P\qand Z'_P=Z'_D\times _{Z_D}Z_P\; . $$ Since the closed subscheme $Z'_D$ of $X_D$ is flat over $D$, we define the pull-back $(P_0,P_1)^*(Z)$ of the closed subscheme $Z$ to the fibre $X_P$ by the formula $$ (P_0,P_1)^*(Z)=\cycl _{X_P}(Z'_P)\; . $$ This gives the definition of a pullback along $(P_0,P_1)$ for primes cycles and, by linearity, extends to a homomorphism $$ (P_0,P_1)^*:\Cycl (X)\to \Cycl (X_P)\; . $$ The following definition of Suslin and Voevodsky is of crucial importance, see pp 23 - 24 in \cite{SV-ChowSheaves}. Let $$ Z=\sum m_iZ_i\in \Cycl (X) $$ be a cycle on $X$, and let $\zeta _i$ be the generic point of the prime cycle $Z_i$ for each index $i$. Then $Z$ is said to be a {\it relative cycle} on $X$ over $S$ if: \begin{itemize} \item{} for any generic point $\eta $ of the scheme $S$ there exists $i$, such that $$ f(\zeta _i)=\eta \; , $$ \item{} for any point $P$ on $S$, and for any two fat points $(P_0,P_1)$ and $(P_0',P_1')$ over $P$, $$ (P_0,P_1)^*(Z)=(P_0',P_1')^*(Z) $$ in $\Cycl (X_P)$. \end{itemize} The sum of relative cycles is a relative cycle again, and the same for taking the opposite cycle in $\Cycl (X)$. The $0$ in $\Cycl (X)$ is relative by convention. Then we see that relative cycles form a subgroup $$ \Cycl (X/S)= \{ Z\in \Cycl (X)\; |\; \hbox{$Z$ is relative over $S$} \} \; . $$ in $\Cycl (X)$. Let also $$ \Cycl ^{\eff }(X/S)= \{ Z=\sum m_iZ_i\in \Cycl (X/S)\; |\; m_i\geq 0\; \forall i\; \} $$ be a monoid of effective relative cycles in $X$ over $S$. In general the monoid $\Cycl (X/S)$ is {\it not} a free monoid generated by prime relative cycles, and the group $\Cycl (X/S)$ is {\it not} a free abelian group generated by prime relative cycles. If $\zeta \in t(X)$, the dimension of $\zeta $ in $X$, $$ \dim (\zeta ,X)\; , $$ is, by definition, the dimension of the closure $$ Z=\overline {\{ \zeta \} } $$ inside $X$. A relative cycle $$ Z=\sum m_iZ_i\in \Cycl (X/S) $$ is said to be of {\it relative dimension} $r$ if the generic point $\zeta _i$ of each prime cycle $Z_i$ has dimension $r$ in its fibre over $S$. In other words, if $$ \eta _i=f(\zeta _i)\; , $$ we look at the fibre $X_{\eta _i}$ of the morphism $f$ at $\eta _i$. The cycle $Z$ is of relative dimension $r$ over $S$ if $$ \dim (\zeta _i,X_{\eta _i})=r $$ for each index $i$. If $Z$ is a relative cycle of relative dimension $r$ on $X$, then we write $$ \dim _S(Z)=r\; . $$ Following \cite{SV-ChowSheaves}, p 24, we define $$ \Cycl (X/S,r)=\{ Z\in \Cycl (X/S)\; |\; \dim _S(Z)=r\} $$ to be the subset of relative algebraic cycles of relative dimension $r$ on $X$, which is obviously a subgroup in $\Cycl (X/S)$. The definition of $$ \Cycl ^{\eff }(X/S,r)= \{ Z=\sum m_iZ_i\in \Cycl (X/S,r)\; |\; m_i\geq 0\; \forall i\; \} $$ is straightforward. Notice that if $Z$ is a relative cycle of relative dimension $r$, it does not mean that all the components $Z_i$ are of the same dimension $r$. To pick up equidimensional cycles, we need the following definition. For any point $\zeta \in t(X)$ let $$ \dim (X/S)(x)=\dim _{\zeta }(f^{-1}(f(\zeta ))) $$ be the dimension of the fibre $f^{-1}(f(\zeta ))$ of the morphism $f$ at $\zeta $. The morphism $f$ is said to be {\it equidimensional} of dimension $r$ if every irreducible component of $X$ dominates an irreducible component of $S$ and the function $$ \dim (X/S):t(X)\to \ZZ $$ is constant and equals $r$ for every point $\zeta $ on the scheme $X$. A cycle $Z\in Cycl (X/S)$ is equidimensional of dimension $r$ over $S$ if so is the composition $$ \supp (Z)\to X\to S\; . $$ Let then $$ \Cycl _{\equi }(X/S,r)= \{ Z\in \Cycl (X/S,r)\; |\; \hbox{$Z$ is equidim. of dim. $r$}\} \; . $$ Accordingly, $$ \Cycl _{\equi }^{\eff }(X/S,r)= \{ Z=\sum _im_iZ_i\in \Cycl _{\equi }(X/S,r)\; |\; m_i\geq 0\; \forall i\; \} \; . $$ Next, let $$ U\to S $$ be a locally Noetherian scheme over $S$ (not necessarily of finite type over $S$). In \cite{SV-ChowSheaves}, for any cycle $$ Z\in \Cycl (X/S,r) $$ Suslin and Voevodsky constructed a uniquely defined cycle $$ Z_U\in \Cycl (X\times _SU/U,r)_{\QQ }\; , $$ a pullback of $Z$ along $U\to S$, such that it is compatible with pullbacks long fat points. Here and below, for any abelian group $A$ we denote by $A_{\QQ }$ the tensor product $A\otimes _{\ZZ }\QQ $. Thus, following Suslin and Voevodsky, we obtain the obvious presheaf $$ \Cycl (X/S,r)_{\QQ } $$ on the category $\Noe /S$, such that for any morphism $$ U\to S $$ in $\Noe /S$, $$ \Cycl (X/S,r)_{\QQ }(U)=\Cycl (X\times _SU/U,r)_{\QQ }\; , $$ and the restriction morphisms are induced by the Suslin-Voevodsky's pullbacks of relative cycles. Following \cite{SV-ChowSheaves}, we will say that the pullback $Z_U$ of a cycle $Z\in \Cycl (X/S,r)$ is {\it integral} if it lies in the image of the canonical homomorphism $$ \Cycl (X\times _SU/U,r)\to \Cycl (X\times _SU/U,r)_{\QQ } $$ for all schemes $U$ in $\Noe /S$, and define the subgroup $$ z(X/S,r)=\{ Z\in \Cycl (X/S,r)\; |\; \hbox{$Z_U$ is integral}\} \; . $$ Then $z(X/S,r)$ is an abelian subpresheaf in the presheaf $\Cycl (X/S,r)_{\QQ }$ on the category $\Noe /S$. Let also $$ z^{\eff }(X/S,r)= \{ Z=\sum m_iZ_i\in z(X/S,r)\; |\; m_i\geq 0\; \forall i\} $$ and $$ z_{\equi }(X/S,r)= \{ Z\in z(X/S,r)\; |\; \hbox{$Z$ is equidim. of dim. $r$ over $S$}\} \; . $$ Clearly, $z^{\eff }(X/S,r)$ is a subpresheaf of monoids and $z^{\equi }(X/S,r)$ is a presheaf of abelian groups in $z(X/S,r)$. For any morphism $$ U\to S\; , $$ which is an object of $\Noe /S$, set $$ \PrimeCycl (X\times _SU/U,r)= \{ Z\in \Cycl (X\times _SU/U,r)\; |\; \hbox{$Z$ is prime}\} $$ and $$ \PrimeCycl _{\equi }(X\times _SU/U,r)= \{ Z\in \PrimeCycl (X\times _SU/U,r)\; |\; \hbox{$Z$ is equidim.}\} $$ If $S$ is regular, and if the morphism $U\to S$ is an object of $\Reg /S$, then $$ z^{\eff }(X/S,r)= \NN (\PrimeCycl _{\equi }(X\times _SU/U,r))\; , $$ and $$ z_{\equi }(X/S,r)= \NN (\PrimeCycl _{\equi }(X\times _SU/U,r))^+\; , $$ see Corollary 3.4.5 in \cite{SV-ChowSheaves}. It does not mean, however, that $z^{\eff }(X/S,r)$ is a free monoid in the category of set valued presheaves freely generated by a set valued ``presheaf of relative prime cycles of dimension $r$" on the category $\Reg /S$, as the Suslin-Voevodsky pullback of a relative prime cycle is not necessarily a prime cycle, so that the needed set valued presheaf does not exist. But $z_{\equi }(X/S,r)$ is certainly the group completion of $z^{\eff }(X/S,r)$ as a presheaf on $\Reg /S$. \begin{theorem} \label{susvoe_sheaf} Let $S$ be a Noetherian scheme, and let $X$ be a scheme of finite type over $S$. Then the presheaves $z(X/S,r)$ and $z^{\eff }(X/S,r)$ are sheaves in $\cdh $-topology and, as a consequence, in the Nisnevich topology on the category $\Noe /S$. \end{theorem} \begin{pf} See Theorem 4.2.9(1) on page 65 in \cite{SV-ChowSheaves}. \end{pf} Relative cycles can be classified by their degrees, provided there exists a projective embedding of $X$ over $S$. Indeed, assume that $X$ is projective over $S$, i.e. there is a closed embedding $$ i:X\to \PR ^n_S $$ over $S$. For each cycle $$ Z=\sum m_jZ_j\in \Cycl (X/S) $$ one can define its degree $$ \deg (Z,i)=\sum \deg (i(Z_j)) $$ with regard to the embedding $i$. Let also $$ z^{\eff }_d((X,i)/S,r)=\{ Z\in z_{\equi }(X/S,r)\; |\; \deg (Z,i)=d\} \; . $$ The set valued presheaf $$ z^{\eff }_d((X,i)/S,r):\Noe /S\to \Sets $$ is given by the formula $$ z^{\eff }_d((X,i)/S,r)(U)= \{ Z\in z_{\equi }(X\times _SU/U,r)\; |\; \deg (Z,i\times _S\id _U)=d\} \; , $$ for any locally Noetherian scheme $U$ over $S$. Now recall that if $\bcF $ is a set-valued presheaf on $\Noe /S$ then $\bcF $ is said to be $\h $-representable if there is a scheme $Y$ over $S$, such that the $\h $-sheafification $\bcF _{\h }$ of the sheaf $\bcF $ is isomorphic to the $\h $-sheafification $\Hom _S(-X)_{\h }$ of the representable presheaf $\Hom _S(-X)$, see Definition 4.4.1 in \cite{SV-ChowSheaves}. \begin{theorem} \label{hrep} Let $X$ be a projective scheme of finite type over $S$ and fix a projective embedding $i:X\to \PR ^n_S$ over $S$. Then, for any two nonnegative integers $r$ and $d$, the presheaf $z^{\eff }_d((X,i)/S,r)$ is $\h $-representable by a scheme $C_{r,d}(X/S,i)$ projective over $S$, i.e. there is an isomorphism $$ z^{\eff }_d((X,i)/S,r)_{\h }\simeq \Hom _S(-,C_{r,d}(X/S,i))_{\h } $$ of set valued sheaves in $\h $-topology on $\Noe /S$. Moreover, $$ z^{\eff }(X/S,r)= \coprod _{d=0}^{\infty }z^{\eff }_d((X,i)/S,r)\; , $$ and then $z^{\eff }(X/S,r)$ is $\h $-representable by the scheme $$ C_r(X/S)=\coprod _{d=0}^{\infty }C_{r,d}(X/S,i)\; . $$ \end{theorem} \begin{pf} See Section 4.2 in \cite{SV-ChowSheaves}. \end{pf} A disadvantage of Theorem \ref{hrep} is in the presence of $\h $-sheafification. The latter is a retribution for the generality of the representability result. For relative $0$-cycles this obstacle can be avoided as follows. Recall that we have already defined the category $\Nor /S$, a full subcategory in $\Sch /S$ generated by schemes over $S$ whose structural morphism is normal, i.e. the fibre at every point is a normal scheme, see Definition 36.18.1 in \cite{StacksProject}. Similarly, one can define the notion of a seminormal morphism and introduce a full subcategory $\Seminor /S$ generated by locally Noetherian schemes over $S$ whose structural morphisms are seminormal, so that we have a chain of subcategories $$ \Nor /S\subset \Seminor /S\subset \Noe /S\; . $$ For any presheaf $\bcF $ on $\Noe /S$ let $\bcF |_{\Seminor /S}$ be the restriction of $\bcF $ on the subcategory $\Seminor /S$. To avoid divided powers, suppose that either the base scheme $S$ is of pure characteristic $0$ or $X$ is flat over $S$. Recall that it follows that $$ \Gamma ^d(X/S)=\Sym ^d(X/S) $$ by Corollary 4.2.5 in Paper I in \cite{RydhThesis}, and hence one can work with symmetric powers instead of divided ones. By Theorem 3.1.11 on page 30 of the same paper, we have the canonical identifications \begin{equation} \label{maincanonical1*} \bcY _{0,d}(X/S)=\Sym ^d(X/S)\; , \end{equation} $$ \bcY _0(X/S)=\left( \coprod _{d=0}^{\infty } \Sym ^d(X/S)\right)\; , $$ $$ \bcY _0^{\infty }(X/S)=\Sym ^{\infty }(X/S)\; , $$ $$ \bcZ _0(X/S)= \left( \coprod _{d=0}^{\infty }\Sym ^d(X/S)\right)^+ $$ and $$ \bcZ _0^{\infty }(X/S)= \Sym ^{\infty }(X/S)^+\; . $$ In other words, we do not need $\h $-sheafification to prove representability of sheaves of $0$-cycles in Rydh's terms. The point here is that, assuming that $S$ is semi-normal over $\Spec (\QQ )$, after restricting of these five sheaves on the category $\Seminor /S$, we also have the corresponding canonical isomorphisms \begin{equation} \label{maincanonical1'} \bcY _{0,d}(X/S)|_{\Seminor /S}\simeq z^{\eff }_d((X,i)/S,0)|_{\Seminor /S}\; , \end{equation} \begin{equation} \label{maincanonical2'} \bcY _0(X/S)|_{\Seminor /S}\simeq z^{\eff }(X/S,0)|_{\Seminor /S}\; , \end{equation} \begin{equation} \label{maincanonical3'} \bcY _0^{\infty }(X/S)|_{\Seminor /S}\simeq z^{\eff }(X/S,0)_{\infty }|_{\Seminor /S}\; , \end{equation} \begin{equation} \label{maincanonical3.5'} \bcZ _0(X/S)|_{\Seminor /S}\simeq z(X/S,0)|_{\Seminor /S} \end{equation} and \begin{equation} \label{maincanonical4'} \bcZ _0^{\infty }(X/S)|_{\Seminor /S}\simeq z(X/S,0)_{\infty }|_{\Seminor /S}\; . \end{equation} Moreover, the same result holds true when we compare Rydh's sheaves of $0$-cycles with Koll\'ar's sheaves constructed in Chapter I of the book \cite{KollarRatCurvesOnVar}. These important comparison results are proven in Section 10 of Paper IV in \cite{RydhThesis}. Thus, since now we will always assume that either the base scheme $S$ is of pure characteristic $0$ or $X$ is flat over $S$, to work with symmetric powers, and in all cases when $S$ will be semi-normal over $\QQ $, we will systematically identify the restrictions of Suslin-Voevodsky's and Rydh's sheaves of $0$-cycles on semi-normal schemes via the isomorphisms (\ref{maincanonical1'}), (\ref{maincanonical2'}), (\ref{maincanonical3'}), (\ref{maincanonical3.5'}) and (\ref{maincanonical4'}). The Nisnevich sheaf $\Sym ^{\infty }(X/S)^+$ will be now used to construct what then will be the most preferable reincarnation of the space of $0$-cycles on $X$ over the base scheme $S$. \section{Chow atlases on the Nisnevich spaces of $0$-cycles} To consider the sheaf $\Sym ^{\infty }(X/S)^+$ as a geometrical object, we need to endow it with an atlas, in the line of the definitions in Section \ref{kaehler}. The aim of this section is to present a natural atlas, the Chow atlas, on the sheaf of $0$-cycles $\Sym ^{\infty }(X/S)^+$. First of all, the sheaf of $0$-cycles possesses a natural inductive structure on it. For each non-negative integer $d$ let $$ \iota _d:\Sym ^d(X/S)\to \Sym ^{\infty }(X/S) $$ be the canonical morphism in to the colimit. For short of notation, let also $$ \Sym ^{d,d}(X/S)= \Sym ^d(X/S)\times _S\Sym ^d(X/S)\; , $$ $$ \Sym ^{\infty ,\infty }(X/S)= \Sym ^{\infty }(X/S)\times _S\Sym ^{\infty }(X/S) $$ and let $$ \iota _{d,d}:\Sym ^{d,d}(X/S)\to \Sym ^{\infty ,\infty }(X/S) $$ be the fibred product of $\iota _d$ with itself over $S$. Recall that $\Sym ^{\infty } (X/S)^+$ is the group completion of the monoid $\Sym ^{\infty }(X/S)$ in the category $\Shv ((\Noe /S)_{\Nis })$. It means that the we have a pushout square $$ \diagram \Sym ^{\infty }(X/S)\ar[rr]^-{\Delta } \ar[dd]^-{} & & \Sym ^{\infty ,\infty }(X/S) \ar[dd]^-{\sigma _{\infty }} \\ \\ S \ar[rr]^-{} & & \Sym (X/S)^+ \enddiagram $$ in the category $\Mon (\Shv ((\Noe /S)_{\Nis }))$. In particular, the quotient morphism $\sigma _{\infty }$ is a morphism of monoids, i.e. it respects the monoidal operations in the source and target. Let $$ \sigma _d:\Sym ^{d,d}(X/S)\to \Sym ^{\infty }(X/S)^+ $$ be the composition of the morphisms $\iota _{d,d}$ and $\sigma _{\infty }$ in the category $\Shv ((\Noe /S)_{\Nis })$, and let $$ \Sym ^d(X/S)^+ $$ be the sheaf-theoretical image of the morphism $\sigma _d$, i.e. the image of $\sigma _d$ in the category $\Shv ((\Noe /S)_{\Nis })$. Some explanation is in place here. A priori, for any nonnegative integer $d$, one can compute the $d$-th symmetric power $$ S^d(X/S) $$ in the category of presheaves $\PShv (\Noe /S)$, and the $d$-th symmetric power $$ \Sym ^d(X/S)\; , $$ computed in the category of sheaves $\Shv ((\Noe /S)_{\Nis })$, is the Nisnevich sheafification of the presheaf $S^d(X/S)$. But since the symmetric power $S^d(X/S)$ exists already as a scheme in the category $\Noe /S$, and since the Nisnevich topology is subcanonical, we have that $$ S^d(X/S)=\Sym ^d(X/S)\; , $$ for any $d\geq 0$. Let $$ \coprod _{d=0}^{\infty }S^d(X/S) $$ be the free monoid $\NN (X/S)$ of $X$ over $S$ computed in the category of presheaves $\PShv (\Noe /S)$. Since the category $\Noe /S$ is a Noetherian category, one can show that this infinite coproduct is a Nisnevich sheaf, and hence it coincides with the free monoid $\NN (X/S)$ of $X$ over $S$ computed in the category of sheaves $\Shv ((\Noe /S)_{\Nis })$. In other words, there is no difference between $\NN (X/S)$ in $\PShv (\Noe /S)$ and $\NN (X/S)$ in $\Shv ((\Noe /S)_{\Nis })$, and we write $$ \NN (X/S)=\coprod _{d=0}^{\infty }\Sym ^d(X/S)= \coprod _{d=0}^{\infty }S^d(X/S)\; . $$ Similarly, let $$ S^{\infty }(X/S) $$ be the free connective monoid $\NN (X/S)_{\infty }$ of $X$ over $S$ computed in the category of presheaves $\PShv (\Noe /S)$, so that the free connective monoid $\Sym ^{\infty }(X/S)$ of $X$ over $S$, computed in the category of sheaves $\Shv ((\Noe /S)_{\Nis })$, is nothing else but the Nisnevich sheafification of $S^{\infty }(X/S)$. Again, as the category $\Noe /S$ is a Noetherian category, one can show that $S^{\infty }(X/S)$ is a sheaf in Nisnevich topology, and hence $$ S^{\infty }(X/S)=\Sym ^{\infty }(X/S)\; . $$ This gives us that, if $$ S^{\infty }(X/S)^+ $$ is the group completion of the presheaf free monoid $S^{\infty }(X/S)$ in the category $\Mon (\PShv (\Noe /S))$, i.e. the square \begin{equation} \label{completiondiagr*} \diagram S^{\infty }(X/S)\ar[rr]^-{\Delta } \ar[dd]^-{} & & S^{\infty ,\infty }(X/S) \ar[dd]^-{\sigma _{\infty }} \\ \\ S \ar[rr]^-{} & & S(X/S)^+ \enddiagram \end{equation} is co-Cartesian, the sheaf group completion $\Sym ^{\infty }(X/S)^+$ of $\Sym ^{\infty }(X/S)$ in the category $\Mon (\Shv ((\Noe /S)_{\Nis }))$ is the sheafification of $S^{\infty }(X/S)^+$, i.e. $$ \Sym ^{\infty }(X/S)^+=S^{\infty }(X/S)^+_{\shf }\; . $$ \begin{lemma} \label{smallbutimportant} The presheaf $S^{\infty }(X/S)^+$ is separated. Equivalently, the canonical morphism $$ S^{\infty }(X/S)^+\to \Sym ^{\infty }(X/S)^+ $$ is a monomorphism in $\PShv (\Noe /S)$. \end{lemma} \begin{pf} Since $S^{\infty }(X/S)^+$ is an abelian group object in the category $\PShv (\Noe /S)$, to prove the lemma it is enough to show that, if $$ F\in S^{\infty }(X/S)^+(U) $$ is a section of the presheaf $S^{\infty }(X/S)^+$ on some locally Noetherian scheme $U$ over $S$, and if there exists a Nisnevich covering $$ \{ f_i:U_i\to U\} _{i\in I}\; , $$ such that the pullback $F_i$ of the section $F$ to $U_i$ along each morphism $U_i\to U$ is $0$ in the abelian group $S^{\infty }(X/S)^+(U_i)$, then $F$ is $0$ in the abelian group $S^{\infty }(X/S)^+(U)$. The section $F$ can be interpreted as a morphism $$ F:U\to S^{\infty }(X/S)^+\; . $$ For short of notation, let $$ S^{\infty ,\infty }(X/S)= S^{\infty }(X/S)\times _SS^{\infty }(X/S)\; , $$ and, for any nonnegative integer $d$ let $$ S^{d,d}(X/S)=S^d(X/S)\times _SS^d(X/S)\; . $$ In these terms, the morphism $F$ is the composition of a certain morphism $$ (f_1,f_2):U\to S^{\infty ,\infty }(X/S)\; , $$ induced by two morphisms of presheaves $$ f_1:U\to S^{\infty }(X/S) \qqand f_1:U\to S^{\infty }(X/S)\; , $$ and the quotient morphism $$ \sigma _{\infty }:S^{\infty ,\infty }(X/S)\to S^{\infty }(X/S)^+\; . $$ Moreover, there exists $d$, such that both morphisms $f_1$ and $f_2$ factorize through $S^d(X/S)$, and then $F$ is the composition \begin{equation} \label{zimodry} U\stackrel{(f_1,f_2)}{\lra }S^{d,d}(X/S) \stackrel{\iota _{d,d}}{\lra }S^{\infty ,\infty }(X/S) \stackrel{\sigma _{\infty }}{\lra }S^{\infty }(X/S)^+\; . \end{equation} and the morphisms $f_1$ and $f_2$ are morphisms of locally Noetherian schemes over the base scheme $S$. Now, since $S^{\infty }(X/S)$ is a cancellative monoid in $\PShv (\Noe /S)$, the commutative square (\ref{completiondiagr*}) is a Cartesian square in $\PShv (\Noe /S)$. It follows that, since $F_i=0$ for all $i\in I$, the images of the compositions $$ U_i\to U\stackrel{(f_1,f_2)}{\lra } S^{d,d}(X/S)\stackrel{\iota _{d,d}}{\lra } S^{\infty ,\infty }(X/S) \stackrel{\sigma _{\infty }}{\lra }S^{\infty }(X/S)^+ $$ are all in the image of the diagonal morphism $$ \Delta :S^{\infty }(X/S)\to S^{\infty ,\infty }(X/S)\; . $$ And since the morphism $$ \coprod _{i\in I}U_i\to U $$ is a scheme-theoretical epimorphism, we see that the image of the morphism (\ref{zimodry}) is also in the image of the diagonal morphism $\Delta $. The latter means that the section $F$ equals $0$. \end{pf} Let $$ \sigma _d:S^{d,d}(X/S)\to S^{\infty }(X/S)^+ $$ be the composition of the morphisms $\iota _{d,d}$ and $\sigma _{\infty }$ in the category $\PShv (\Noe /S)$, and let $$ S^d(X/S)^+ $$ be the image of the morphism $\sigma _d$ in the category $\PShv (\Noe /S)$. Then $S^d(X/S)^+$ is a sub-presheaf in $\Sym ^{\infty }(X/S)^+$. As the sheafification functor is exact, it preserves monomorphisms. It follows that $$ \Sym ^d(X/S)^+=(S^d(X/S)^+)^{\shf }\; , $$ i.e. $\Sym ^d(X/S)^+$ is the Nisnevich sheafification of the preshaef $S^d(X/S)^+$. And, once again, the sheaf-theoretical image $\Sym ^d(X/S)^+$ of the morphism $\sigma _d$ comes together with the epimorphism \begin{equation} \label{vechervgrumbi_z} \sigma _d:\Sym ^{d,d}(X/S)\to \Sym ^d(X/S)^+ \end{equation} in the category $\Shv ((\Noe /S)_{\Nis })$. Next, the section $S\to X$ of the structural morphism $X\to S$ induces the closed embeddings $$ \Sym ^d(X/S)\to \Sym ^{d+1}(X/S)\; , $$ which, in turn, induce the closed embeddings $$ \Sym ^{d,d}(X/S)\to \Sym ^{d+1,d+1}(X/S)\; . $$ The latter morphisms induce the corresponding morphisms $$ \Sym ^d(X/S)^+\to \Sym ^{d+1}(X/S)^+ $$ in the category $\Shv ((\Noe /S)_{\Nis })$. Then \begin{equation} \label{indstructure1} \Sym ^d(X/S)^+=\colim _d\; \Sym ^d(X/S)^+\; , \end{equation} i.e. the space $\Sym ^d(X/S)^+$ is naturally the colimit of the spaces $\Sym ^d(X/S)^+$. \begin{remark} {\rm The sheaf $\Sym ^d(X/S)^+$ is {\it not} a group completion of any monoid. } \end{remark} The constructions above allow us to consider a natural atlas for the $$ CA_0(X/S,0)=\{ \sigma _d\; |\; d\in \ZZ \; ,\; d\geq 0\} $$ be the set of all morphisms $\sigma _d$, and let $$ CA(X/S,0)=\langle CA_0(X/S,0)\rangle $$ be the {\it Chow atlas} on the Nisnevich connective space $\Sym ^{\infty }(X/S)^+$. According to Section \ref{kaehler}, the sheaf $\Sym ^{\infty }(X/S)^+$ is now the Nisnevich space of relative $0$-cycles on $X$ over $S$, with regard to the Chow atlas $$ CA=CA(X/S,0)\; . $$ For short, we will say that $\Sym ^{\infty }(X/S)^+$ is the {\it space of $0$-cycles} on $X$ over $S$ Hilbert schemes allow us to consider a natural subatlas in the Chow atlas $CA$. Indeed, let $U$ be a locally Noetherian scheme over $S$, and let $$ Z\to X\times _SU $$ be a closed subscheme in $X\times _SU$. Suppose the composition $$ g:Z\to U $$ of the closed embedding of $Z$ into $X\times _SU$ with the projection onto $U$ is flat. Then, if $V$ is an irreducible component of $Z$, the closure $\overline {g(V)}$ is an irreducible component of $U$. Therefore, if $U$ is irreducible, $\overline {g(V)}=U$. If, moreover, $g$ is proper, then $\overline {g(V)}=g(V)$, and hence $g$ is a surjection. Since $X$ is embedded in to $\PR ^n_S$ over $S$ via the closed embedding $i$, the scheme $X\times _SU$ embeds into $\PR ^m_U$ over $U$, and the morphism $g:Z\to U$ factorizes through the embedding of $Z$ into $\PR ^m_U$ followed by the projection from $\PR ^m_U$ onto $U$. Therefore, if $u\in U$ and $Z_u$ is the fibre of $g$ at $u$, the Hilbert polynomial of the structural sheaf $\bcO _{Z_u}$ does not depend on $u$, see Theorem 9.9 on page 261 in \cite{Hartshorne}. This fact allows us to consider, for every polynomial $$ P\in \QQ [x] $$ the standard Hilbert set valued presheaf $$ \HilbF _P(X/S):\Noe /S\to \Sets $$ sending a locally Noetherian $S$-scheme $U$ to the set of closed subschemes $Z$ in the product $X\times _SU$, which are flat and proper over $U$, and such that the Hilbert polynomial of $\bcO _{Z_u}$ is $P$. Let also $$ \HilbF(X/S)=\coprod _{P\in \QQ [x]}\HilbF _P(X/S): \Noe /S\to \Sets $$ be the total Hilbert functor on locally Noetherian schemes over $S$. Since $X$ is projective over $S$, the Hilbert functors $\HilbF _P(X/S)$ are representable. This result is due to Grothendieck, see Chapter 5 in \cite{FGAexplained} or Chapter I.1 in \cite{KollarRatCurvesOnVar}. For each polynomial $P$ in $\QQ [x]$ there exists a scheme, called the Hilbert scheme, $$ \HilbS _P(X/S) $$ over $S$ representing the functor $\HilbF _P(X/S)$. Moreover, this scheme is projective over $S$. Within this paper we are interested in the case when $P=d$ is a non-negative integer. In that case the Hilbert scheme $$ \HilbS ^d(X/S)=\HilbS _P(X/S)|_{P=d} $$ is a scheme over the $d$-th relative symmetric power, and we have the so-called Hilbert-Chow morphism of schemes \begin{equation} \label{enot_d} \hc _d:\HilbS ^d(X/S)\to \Sym ^d(X/S)\; . \end{equation} For any nonnegative integer $d$ let $$ \HilbS ^{d,d}(X/S)= \HilbS ^d(X/S)\times _S\HilbS ^d(X/S)\; , $$ and let $$ HA_0(X/S,0)=\{ a_d\circ (\hc _{d,d})\; |\; d\in \ZZ \; ,\; d\geq 0\} \; , $$ where $$ \hc _{d,d}:\HilbS ^{d,d}(X/S)\to \Sym ^{d,d}(X/S) $$ is the fibred self-product over $S$ of the $d$-th Hilbert-Chow morphism $\hc _d$. Let also $$ HA(X/S,0)=\langle HA_0(X/S,0)\rangle $$ be the {\it Hilbert atlas} on the space $\Sym ^{\infty }(X/S)^+$. Obviously, the Hilbert atlas is a subatlas of the Chow atlas on $\Sym ^{\infty }(X/S)^+$. Now, let $$ \bcO _{\Sym ^{\infty }(X/S)^+} $$ be the sheaf of regular functions on the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$, constructed with regard to the Chow atlas $CA$ on the sheaf $\Sym ^{\infty }(X/S)^+$, as explained in Section \ref{kaehler}. In particular, if $U\to \Sym ^{\infty }(X/S)^+$ is a morphism from a scheme $U$ to $\Sym ^{\infty }(X/S)^+$ over $S$, which is \'etale with regard to the Chow atlas on $\Sym ^{\infty }(X/S)^+$, then since $$ \Gamma (U\to \Sym ^{\infty }(X/S)^+, \bcO _{\Sym ^{\infty }(X/S)^+})= \Gamma (U,\bcO _U)\; . $$ As soon as the sheaf $\bcO _{\Sym ^{\infty }(X/S)^+}$ is defined, we can also define the sheaf of K\"ahler differentials $$ \Omega ^1_{\Sym ^{\infty }(X/S)^+}= \Omega ^1_{\Sym ^{\infty }(X/S)^+/S} $$ on the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$, see Section \ref{kaehler}. Let also $$ T_{\Sym ^{\infty }(X/S)^+}= T_{\Sym ^{\infty }(X/S)^+/S} $$ be the tangent sheaf, i.e. the dual to the sheaf of K\"ahler differentials on the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$. Since now the sheaf of K\"ahler differentials and the tangent sheaf on the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$ will be considered as the sheaf of K\"ahler differentials and the tangent sheaf on the space of $0$-cycles $\Sym ^{\infty }(X/S)^+$. Notice that both sheaves are given in terms of the Chow atlas on $\Sym ^{\infty }(X/S)^+$. Similar sheaves can be also defined in terms of the Hilbert atlas on the same space, and the connection between two types is an interesting question, also considered in \cite{GreenGriffiths}, but in different terms. Next, recall that a point $P$ on $\Sym ^{\infty }(X/S)^+$ is an equivalence class of morphisms from spectra of fields to $\Sym ^{\infty }(X/S)^+$, as explained in Section \ref{kaehler}. By abuse of notation, we write $$ P:\Spec (K)\to \Sym ^{\infty }(X/S)^+\; . $$ We will always assume that $P$ factorizes through the Chow atlas $CA$ on the space $\Sym ^{\infty }(X/S)^+$. As in Section \ref{kaehler}, consider the functor $$ u_P:\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }\to \Sets $$ sending an \'etale morphism $$ U\to \Sym ^{\infty }(X/S)^+\; , $$ where $U$ is a locally Noetherian scheme over $S$, to the set $$ u_P(U)=|U_P| $$ of points on the fibre $$ U_P=U\times _{\Sym ^{\infty }(X/S)^+}\Spec (K) $$ of the morphism $U\to \Sym ^{\infty }(X/S)^+$ at $P$. As soon as the functor $u_P$ is introduced, we also define the notion of a neighbourhood of $P$, with regard to the functor $u_P$, as we did it in Section \ref{kaehler}. Namely, an \'etale neighbourhood of $P$ on $\Sym ^{\infty }(X/S)^+$ is a pair $$ N=(U\to \Sym ^{\infty }(X/S)^+,T\in u_P=|U_P|)\; , $$ where the morphism $U\to \Sym ^{\infty }(X/S)^+$ is over $S$ and \'etale with regard to the Chow atlas $CA$ on $\Sym ^{\infty }(X/S)^+$, and $T$ is a point of the fibre $U_P$. Or, equivalently, an \'etale neighbourhood of $P$ is an \'etale morphism $$ U\to \Sym ^{\infty }(X/S)^+ $$ over $S$ such that the point $$ P:\Spec (K)\to \Sym ^{\infty }(X/S)^+ $$ factorizes through $U$. As in Section \ref{kaehler}, all \'etale neighbourhoods form the category of \'etale neighbourhoods of $P$ on $\Sym ^{\infty }(X/S)^+$ denoted by $\bcN _P$. Now, Lemma 7.31.7 \cite{StacksProject} gives us that in order to show that the corresponding stalk functor $$ \stalk _P: \Shv (\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et })\to \Sets $$ induces a point of the topos $\Shv (\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et })$, we need to show that the functor $u_P$ satisfies all the three items of Definition 7.31.2 in loc.cit. The items (1) and (2) are satisfied in general, see Section \ref{kaehler}. The last item (3) of Definition 7.31.2 in \cite{StacksProject} is satisfied when the category $\bcN _P$ is cofiltered. Therefore, our aim is now to show that, in case of the space of $0$-cycles $\Sym ^{\infty }(X/S)^+$ the category $\bcN _P$ is cofiltered. \section{\'Etale neigbourhoods of a point on $\Sym ^{\infty }(X/S)^+$} We start with the following representability lemma, which will be necessary for the study of the category $\bcN _P$. \begin{lemma} \label{keylemma} For any nonnegative integer $d$ and for any two morphisms $$ U\to S^{\infty }(X/S)^+ \qqand V\to S^{\infty }(X/S)^+\; , $$ where $U$ and $V$ are locally Noetherian schemes over $S$, the fibred product $$ U\times _{S^{\infty }(F/S)^+}V\; , $$ in the category of presheaves $\PShv (X/S)$, is represented by a locally Noetherian scheme over $S$. \end{lemma} \begin{pf} We need to find a locally Noetherian scheme over $S$ representing the fibred product $$ U\times _{S^{\infty }(X/S)^+}V $$ in the category $\PShv (\Noe /S)$. Denote the morphism from $U$ to $S^{\infty }(X/S)^+$ by $F$, and the morphism from $V$ to $S^{\infty }(X/S)^+$ by $G$. \iffalse $$ \diagram U\times _SV \ar@<0.5ex>[dddrrr]^-{G\circ \pr _V} \ar@<-0.5ex>[dddrrr]_-{F\circ \pr _U} \ar[dddd]_-{\pr _U} \ar[rrrr]^-{\pr _V} & & & & V \ar[dddl]_-{G}\ar[dddd]^-{} \\ \\ \\ & & & S^{\infty }(X/S)^+ \ar[rd]^-{} & \\ U \ar[rrru]^-{F} \ar[rrrr]^-{} & & & & S \enddiagram $$ \fi Clearly, the object $U\times _{S^{\infty }(F/S)^+}V$ is the coequalizer of the compositions of the projections from $U\times _SV$ on to $U$ and $V$ with the morphisms $F$ and $G$ respectively. \iffalse $$ U\times _{\Sym ^{\infty }(F/S)^+}V= \coeq (\xymatrix{ U\times _SV \ar@<+0.5ex>[r]^-{} \ar@<-0.5ex>[r]^-{} & S^{\infty }(X/S)^+}) $$ \fi Since $S^{\infty }(X/S)^+$ is an abelian group object, one can consider the difference $$ H=F\circ \pr _U-G\circ \pr _V:U\times _SV\to S^{\infty }(X/S)^+ $$ between these two compositions in the category $\PShv (\Noe /S)$. Then the coequalizer $U\times _{\Sym ^{\infty }(F/S)^+}V$ fits in to the Cartesian square $$ \diagram U\times _{\Sym ^{\infty }(F/S)^+}V \ar[dd]_-{} \ar[rr]^-{} & & U\times _SV \ar[dd]^-{H} \\ \\ S \ar[rr]^-{} & & S^{\infty }(X/S)^+ \enddiagram $$ and the lemma reduces to the case when $U=S$, and $F$ is a section of the structural morphism from $S^{\infty }(X/S)^+$ to $S$. Next, the morphism of presheaves $$ G:V\to S^{\infty }(X/S)^+ $$ is uniquely determined by sending the identity morphism $\id _V$ to some element in the abelian group $S^{\infty }(X/S)^+(V)$, which is the equivalence class $$ [(g_1,g_2)] $$ of two morphisms $$ g_1:V\to S^{\infty }(X/S) \qqand g_2:V\to S^{\infty }(X/S) $$ of presheaves over $S$. In particular, the morphism $G$ factorized through the product $$ S^{\infty ,\infty }(X/S)= S^{\infty }(X/S)\times _SS^{\infty }(X/S)\; . $$ As we mentioned already, since $S^{\infty }(X/S)$ is a cancellation monoid, the commutative square (\ref{completiondiagr*}) is a Cartesian square in $\PShv (\Noe /S)$. Let $$ V_S=S^{\infty }(X/S)\times _{S^{\infty ,\infty }(X/S)}V $$ be the fibred product of $S^{\infty }(X/S)$ and $V$ over $S^{\infty ,\infty }(X/S)$. The composition of the two Cartesian squares $$ \xymatrix{ V_S \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{} \\ \\ S^{\infty }(X/S) \ar[rr]^-{\Delta } & & S^{\infty ,\infty }(X/S) } $$ and $$ \xymatrix{ S^{\infty }(X/S) \ar[rr]^-{} \ar[dd]^-{} & & S^{\infty ,\infty }(X/S) \ar[dd]^-{} \\ \\ S \ar[rr]^-{} & & S^{\infty }(X/S)^+ } $$ shows that the object $V_S$ fits in to the Cartesian square $$ \xymatrix{ V_S \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{G} \\ \\ S \ar[rr]^-{} & & S^{\infty }(X/S)^+ } $$ In other words, $$ V_S=S\times _{S^{\infty }(X/S)^+}V $$ is, at the same time, the fibred product of $S$ and $V$ over $S^{\infty }(X/S)^+$. Choose $d$ such that the image of the morphism $$ V\to S^{\infty ,\infty }(X/S) $$ is in $$ S^{d,d}(X/S)=S^d(X/S)\times _SS^d(X/S)\; . $$ Since the morphism $$ S^{d,d}(X/S)\to S^{\infty ,\infty }(X/S) $$ is a monomorphism in $\PShv (\Noe /S)$, it follows that the object $V_S$ fits also in to the Cartesian square $$ \xymatrix{ V_S \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{} \\ \\ S^d(X/S) \ar[rr]^-{} & & S^{d,d}(X/S) } $$ In other words, $V_S$ is the fibred product of the schemes $S^d(X/S)$ and $V$ over the scheme $S^{d,d}(X/S)$. In particular, $V_S$ is a scheme itself. \end{pf} We need one easy but useful technical notion. Suppose we are given with a locally Noetherian scheme $U$ over $S$ and a morphism $$ F:U\to \Sym ^{\infty }(X/S)^+ $$ in the category of sheaves $\Shv ((\Noe /S)_{\Nis })$. Any such a morphism is uniquely determined by sending $\id _U:U\to U$ to a section $$ s_F\in \Sym ^{\infty }(X/S)^+(U)\; . $$ Since $\Sym ^{\infty }(X/S)^+$ is the Nisnevich sheafification of the presheaf $S^{\infty }(X/S)^+$ and the morphism $$ S^{\infty }(X/S)^+\to \Sym ^{\infty }(X/S)^+ $$ is a monomorphism in $\PShv (\Noe /S)$ by Lemma \ref{smallbutimportant}, we obtain that the section $s_F$ is the equivalence class of pairs, each of which consists of a Nisnevich cover $$ \{ U_i\to U\} _{i\in I} $$ and a collection of sections $$ s_i\in S^{\infty }(X/S)^+(U_i)\; , $$ such that the restrictions of $s_i$ and $s_j$ on $U_i\times _UU_j$ coincide for all indices $i$ and $j$ in $I$. Therefore, if $$ \hat U=\coprod _{i\in I}U_i\; , $$ we obtain two morphisms $$ \hat U\to U $$ and $$ \hat F:\hat U\to S^{\infty }(X/S)^+ $$ such that the square $$ \diagram \hat U\ar[dd]_-{} \ar[rr]^-{\hat F} & & S^{\infty }(X/S)^+ \ar[dd]^-{} \\ \\ U \ar[rr]^-{F} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ is commutative. For short, we will say that $\hat U$ (respectively, $\hat F$) is an extension of $U$ (resp., $F$) by the pair $(\{ U_i\to U\} _{i\in I},\{ s_i\} _{i\in I})$ representing the section $s_F$. \begin{theorem} \label{cofilter} Let $P$ be a point of the space $\Sym ^{\infty }(X/S)^+$, and let $\bcN _P$ be the category of \'etale neighbourhoods of the point $P$ on $\Sym ^{\infty }(X/S)^+$. Then $\bcN _P$ is cofiltered. \end{theorem} \begin{pf} The proof follows a pretty standard way of argumentation, see, for example, Lemma 57.18.3. First of all, Lemma \ref{keylemma} gives us that the category $\bcN _P$ is nonempty, so that the first axiom of a cofiltered category is satisfied. Let $$ F:U\to \Sym ^{\infty }(X/S)^+ \qqand G:V\to \Sym ^{\infty }(X/S)^+ $$ \iffalse $$ \xymatrix{ \Spec (K) \ar[dd]^-{} \ar[ddrr]^-{P} & & & \Spec (K) \ar[dd]^-{} \ar[ddrr]^-{P} & & \\ \\ U \ar[rr]^-{F} & & \Sym ^{\infty }(X/S)^+ & V \ar[rr]^-{G} & & \Sym ^{\infty }(X/S)^+} $$ \fi be two \'etale neighbourhoods of the point $P$, and look at the fibred product $$ \diagram U\times _{\Sym ^{\infty }(X/S)^+}V \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{G} \\ \\ U \ar[rr]^-{F} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ Let $s_F$ be the section of the sheaf $\Sym ^{\infty }(X/S)^+$ on $U$ which determines the morphism $F$, and let $$ \hat F:\hat U\to S^{\infty }(X/S)^+ $$ be the extension of the morphism $F$ given by a pair $(\{ U_i\to U\} _{i\in I},\{ s_i\} _{i\in I})$ representing $s_F$. Similarly, one can construct an extension $\hat G$ of the morphism $G$ induced by a pair representing the section $s_G$. By Lemma \ref{keylemma}, the fibred product $\hat U\times _{S^{\infty }(X/S)^+}\hat V$ is a locally Noetherian scheme over $S$. Consider the universal morphism $$ \hat U\times _{S^{\infty }(X/S)^+}\hat V\to U\times _{\Sym ^{\infty }(X/S)^+}V\; , $$ commuting with the extensions of $U$ and $V$. \iffalse $$ \diagram \hat U\times _{S^{\infty }(X/S)^+}\hat V \ar[dddd]^-{} \ar[rr]^-{} \ar[ddr]^-{} & & \hat V \ar[dddd]^-{} \ar[ddr]^-{} & & \\ \\ & U\times _{\Sym ^{\infty }(X/S)^+}V \ar[dddd]^-{} \ar[rr]^-{} & & V \ar[dddd]^-{G} & \\ \\ \hat U \ar[rr]^-{} \ar[ddr]^-{} & & S^{\infty }(X/S)^+ \ar[ddr]^-{} & & \\ \\ & U \ar[rr]^-{F} & & \Sym ^{\infty }(X/S)^+ & \enddiagram $$ \fi Let us show that the composition $$ H:\hat U\times _{S^{\infty }(X/S)^+}\hat V\to U\times _{\Sym ^{\infty }(X/S)^+}V\to \Sym ^{\infty }(X/S)^+ $$ is \'etale, with regard to the Chow atlas on $\Sym ^{\infty }(X/S)^+$. Indeed, since the morphism $$ S^{\infty }(X/S)^+\to \Sym ^{\infty }(X/S)^+ $$ is a monomorphism in $\PShv (\Noe /S)$ by Lemma \ref{smallbutimportant}, the square $$ \diagram \hat U\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]_-{\id } \ar[rr]^-{} & & S^{\infty }(X/S)^+ \ar[dd]^-{} \\ \\ \hat U\times _{S^{\infty }(X/S)^+}\hat V \ar[rr]^-{} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ is Cartesian, so that the obvious morphism $$ h:\hat U\times _{S^{\infty }(X/S)^+}\hat V\to S^{\infty }(X/S)^+ $$ is the pullback of the morphism $H$. For short, let $$ \hat U_{d,d}= \hat U\times _{S^{\infty }(X/S)^+}S^{d,d}(X/S)\; , $$ and $$ \hat V_{d,d}= \hat V\times _{S^{\infty }(X/S)^+}S^{d,d}(X/S)\; . $$ Then $$ h_0:\hat U_{d,d}\times _{S^{d,d}(X/S)}\hat V_{d,d} \to S^{d,d}(X/S) $$ is the pullback of the morphism $h$, and since $h$ is the pullback of $H$, we obtain the Cartesian square $$ \diagram \hat U_{d,d}\times _{S^{d,d}(X/S)}\hat V_{d,d} \ar[dd]_-{} \ar[rr]^-{h_0} & & S^{d,d}(X/S) \ar[dd]^-{} \\ \\ \hat U\times _{S^{\infty }(X/S)^+}\hat V \ar[rr]^-{H} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ \iffalse $$ \diagram \hat U_{d,d}\times _{S^{d,d}(X/S)}\hat V_{d,d} \ar[dddd]^-{} \ar[rr]^-{} \ar[ddr]^-{} & & \hat V_{d,d} \ar[dddd]^-{} \ar[ddr]^-{} & & \\ \\ & \hat U\times _{S^{\infty }(X/S)^+}\hat V \ar[dddd]^-{} \ar[rr]^-{} & & \hat V \ar[dddd]^-{\hat G} & \\ \\ \hat U_{d,d} \ar[rr]^-{} \ar[ddr]^-{} & & S^{d,d}(X/S) \ar[ddr]^-{} & & \\ \\ & \hat U \ar[rr]^-{\hat F} & & S^{\infty }(X/S)^+ & \enddiagram $$ \fi Therefore, in order to prove that $H$ is \'etale, we need only to show that $h_0$ is \'etale. Now again, since the morphism from $S^{\infty }(X/S)^+$ to $\Sym ^{\infty }(X/S)^+$ is a monomorphism in $\PShv (\Noe /S)$ by Lemma \ref{smallbutimportant}, we see that the commutative square $$ \diagram \hat U \ar[dd]_-{\id } \ar[rr]^-{} & & S^{\infty }(X/S)^+ \ar[dd]^-{} \\ \\ \hat U \ar[rr]^-{} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ is Cartesian. Composing it with the Cartesian square $$ \diagram \hat U_{d,d} \ar[dd]_-{} \ar[rr]^-{} & & S^{d,d}(X/S) \ar[dd]^-{} \\ \\ \hat U \ar[rr]^-{} & & S^{\infty }(X/S)^+ \enddiagram $$ we obtain the Cartesian square $$ \diagram \hat U_{d,d} \ar[dd]_-{} \ar[rr]^-{} & & S^{d,d}(X/S) \ar[dd]^-{} \\ \\ \hat U \ar[rr]^-{} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ The bottom horizontal morphism is the composition of two \'etale morphisms, and hence it is \'etale. Since \'etale morphisms are stable under pullbacks, the top horizontal morphism $$ \hat U_{d,d}\to S^{d,d}(X/S) $$ in the latter square is \'etale as well. Similarly, the morphism $$ \hat V_{d,d}\to S^{d,d}(X/S) $$ is \'etale. Thus, the bottom horizontal and the right vertical morphisms in then Cartesian square $$ \diagram \hat U_{d,d}\times _{S^{d,d}(X/S)}\hat V_{d,d} \ar[dd]_-{} \ar[rr]^-{} & & \hat V_{d,d} \ar[dd]^-{} \\ \\ \hat U_{d,d} \ar[rr]^-{} & & S^{d,d}(X/S)^+ \enddiagram $$ are \'etale. Since \'etale morphisms are stable under pullbacks and compositions, the diagonal composition $h_0$ of this square is \'etale as well. As this is true for any $d$, we see that the morphism $$ \hat U\times _{S^{\infty }(X/S)^+}\hat V\to \Sym ^{\infty }(X/S)^+ $$ is \'etale. The fact that the point $P:\Spec (K)\to \Sym ^{\infty }(X/S)^+$ factorizes through $\hat U\times _{S^{\infty }(X/S)^+}\hat V$ is obvious. Now we need to prove the last axiom of a cofiltered category. Assume again that we have two \'etale neighbourhoods $U$ and $V$ of $P$ as above, and assume also that we have two morphisms $$ \xymatrix{a,b:U \ar@<+0.7ex>[r]^-{} \ar@<-0.1ex>[r]^-{} & V} $$ \iffalse \begin{equation} \label{reppa1} \xymatrix{ & & \Spec (K) \ar[lldd]_-{} \ar[rrdd]^-{} & & \\ \\ U \ar@<+0.5ex>[rrrr]^-{a} \ar@<-0.5ex>[rrrr]_-{b} \ar[rrdd]^-{F} & & & & V \ar[lldd]_-{G} \\ \\ & & \Sym ^{\infty }(X/S)^+ & & } \end{equation} \fi between these neighbourhoods. Let $$ s_G\in \Sym ^{\infty }(X/S)^+(V) $$ be the section determined by the morphism $G$, and choose a representative in $s_G$. Such a representative consists of a Nisnevich covering $$ \{ V_i\to V\} _{i\in I} $$ and a collection of sections $$ s_i\in S^{\infty }(X/S)^+(V_i)\; , $$ such that the restrictions of $s_i$ and $s_j$ on $V_i\times _VV_j$ coincide for all indices $i$ and $j$ in $I$. Construct the corresponding extension $$ \hat G:\hat V\to S^{\infty }(X/S)^+ $$ of the morphism $G$ getting the commutative square $$ \diagram \hat V \ar[dd]_-{} \ar[rr]^-{\hat G} & & S^{\infty }(X/S) \ar[dd]^-{} \\ \\ V \ar[rr]^-{G} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ Pulling back the \'etale covering $\{ V_i\to V\} _{i\in I}$ along the morphisms $a$ and $b$, and taking the unification $$ \{ U_{ij}\to U\} _{(i,j)\in I\times I} $$ of these two pullback coverings in to one, one can construct the extension $$ \hat F:\hat U\to S^{\infty }(X/S)^+\; , $$ such that the diagram \begin{equation} \label{reppa2} \diagram \Spec (K)^{\hat {\; }} \ar[dd]_-{} \ar[rr]_-{} & & \hat V \ar[dd]^-{\hat G} \\ \\ \hat U \ar@<+0.5ex>[rruu]^-{\hat a} \ar@<-0.5ex>[rruu]_-{\hat b} \ar[rr]^-{\hat F} & & S^{\infty }(X/S)^+ \enddiagram \end{equation} is commutative, where $\Spec (K)^{\hat {\; }}$ is an extension over $\Spec (K)$. Moreover, the squares $$ \xymatrix{ \hat U \ar[rr]^-{\hat a} \ar[dd]^-{} & & \hat V \ar[dd]^-{} & & \hat U \ar[dd]^-{} \ar[rr]^-{\hat b} & & \hat V \ar[dd]^-{} \\ \\ U \ar[rr]^-{a} & & V & & U \ar[rr]^-{b} & & V} $$ $$ \xymatrix{ \Spec (K)^{\hat {\; }} \ar[rr]^-{} \ar[dd]^-{} & & \Spec (K) \ar[dd]^-{} & \Spec (K)^{\hat {\; }} \ar[dd]^-{} \ar[rr]^-{} & & \Spec (K) \ar[dd]^-{} \\ \\ \hat U \ar[rr]^-{} & & U & \hat V \ar[rr]^-{} & & V} $$ are commutative. Now, let $W$ be the fibred product of $U$ and $V$ over $V\times _{\Sym ^{\infty }(X/S)^+}V$, with regard to the morphisms $(a,b)$ and $\Delta $, and let $h$ be the corresponding universal morphism, as it is shown in the commutative diagram \begin{equation} \label{muhomory1a} \xymatrix{ \Spec (K)\ar@/_/[dddr]^-{} \ar@/^/[drrr]^-{} \ar@{.>}[dr]^-{\hspace{-1mm}\exists ! h} \\ & W \ar[dd] \ar[rr] & & V \ar[dd]^-{\Delta } \\ \\ & U \ar[rr]^-{(a,b)} & & V\times _{\Sym ^{\infty }(X/S)^+}V} \end{equation} Notice that the external commutativity is guaranteed by the fact that $a$ and $b$ are two morphisms from the neigbourhood $U$ to the neigbourhood $V$ of the same point $P$. The diagram (\ref{muhomory1a}) can be also extended by the commutative diagram \begin{equation} \label{muhomory1b} \diagram V\times _{\Sym ^{\infty }(X/S)^+}V \ar[dd]^-{} \ar[rr]^-{} & & V \ar[dd]^-{G} \\ \\ V \ar[rr]^-{G} & & \Sym ^{\infty }(X/S)^+ \enddiagram \end{equation} \iffalse Summarizing, we obtain the commutative diagram \begin{equation} \label{muhomory1} \diagram \Spec (K)\ar[rrrd]^-{} \ar@{.>}[rd]_-{\hspace{+10mm}\exists ! h} \ar[rddd]^-{} & & & & & \\ & W \ar[dd]_-{} \ar[rr]^-{} & & V \ar[rrdd]^-{\id } \ar[dd]^-{\Delta } & & \\ \\ & U \ar[rrdd]^-{b} \ar[rr]^-{(a,b)} & & V\times _{\Sym ^{\infty }(X/S)^+}V \ar[dd]^-{} \ar[rr]^-{} & & V \ar[dd]^-{G} \\ \\ & & & V \ar[rr]^-{G} & & \Sym ^{\infty }(X/S)^+ \enddiagram \end{equation} \fi Consider also the corresponding ``underlying" commutative diagrams \begin{equation} \label{muhomory2a} \xymatrix{ \Spec (K)^{\hat {\; }}\ar@/_/[dddr]^-{} \ar@/^/[drrr]^-{} \ar@{.>}[dr]^-{\hspace{-1mm}\exists ! \hat h} \\ & \hat W \ar[dd] \ar[rr] & & \hat V \ar[dd]^-{\Delta } \\ \\ & \hat U \ar[rr]^-{(\hat a,\hat b)} & & \hat V\times _{S^{\infty }(X/S)^+}\hat V} \end{equation} and \begin{equation} \label{muhomory2b} \diagram \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]^-{} \ar[rr]^-{} & & \hat V \ar[dd]^-{\hat G} \\ \\ \hat V \ar[rr]^-{\hat G} & & S^{\infty }(X/S)^+ \enddiagram \end{equation} where $\hat h$ exists and unique due to the commutativities coming from the commutativities in the diagram (\ref{reppa2}). \iffalse Summarizing, we obtain the commutative diagram \begin{equation} \label{muhomory2} \diagram \Spec (K)^{\hat {\; }} \ar[rrrd]^-{} \ar@{.>}[rd]_-{\hspace{+10mm}\exists ! \hat h} \ar[rddd]^-{} & & & & & \\ & \hat W \ar[dd]_-{} \ar[rr]^-{} & & \hat V \ar[rrdd]^-{\id } \ar[dd]^-{\Delta } & & \\ \\ & \hat U \ar[rrdd]^-{\hat b} \ar[rr]^-{(\hat a,\hat b)} & & \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]^-{} \ar[rr]^-{} & & \hat V \ar[dd]^-{\hat G} \\ \\ & & & \hat V \ar[rr]^-{\hat G} & & S^{\infty }(X/S)^+ \enddiagram \end{equation} \fi Clearly, the commutative diagrams (\ref{muhomory1a}), (\ref{muhomory1b}), (\ref{muhomory2a}) and (\ref{muhomory2b}) can be joined in to one large commutative diagram by means of the morphisms $$ \hat U\to U\; ,\; \; \hat V\to V\; ,\; \; \hbox{etc} $$ One of the subdiagrams of that join is the commutative square $$ \diagram \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{} \\ \\ \hat V \ar[rr]^-{} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ As we know from the first part of the proof, applied to the case when $U=V$, the diagonal composition $$ \hat V\times _{S^{\infty }(X/S)^+}\hat V \to \Sym ^{\infty }(X/S)^+ $$ is an \'etale neighbourhood of the point $P$. Since the diagrams $$ \xymatrix{ \hat U \ar[rr]^-{} \ar[ddrr]^-{\hat b} & & \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]^-{} \\ \\ & & \hat V } $$ and $$ \xymatrix{ \hat U \ar[dd]^-{} \ar[rr]^-{\hat b} & & \hat V \ar[dd]^-{} \\ \\ U \ar[rr]^-{b} & & V} $$ are commutative, we see that the square $$ \xymatrix{ \hat U \ar[dd]_-{} \ar[rr]^-{} & & \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]^-{} \\ \\ U \ar[rr]^-{F} & & \Sym ^{\infty }(X/S)^+ } $$ is commutative. The left vertical arrow in the latter square is \'etale, and the morphism $F$ is \'etale by assumption. Therefore, their composition is \'etale, and we obtain the commutative diagram \begin{equation} \label{ehma} \xymatrix{ \hat U \ar[rr]^-{} \ar[ddrr]^-{} & & \hat V\times _{S^{\infty }(X/S)^+}\hat V \ar[dd]^-{} \\ \\ & & \Sym ^{\infty }(X/S)^+ } \end{equation} in which the morphisms targeted at $\Sym ^{\infty }(X/S)^+$ are \'etale. Now, if $f:Y\to Y'$ is a morphism between locally Noetherian schemes over a space $\bcZ $, if the structural morphisms $Y\to \bcZ $ and $Y'\to \bcZ $ are \'etale, with regard to the atlas on $\bcZ $, then $f$ is also \'etale. This is an obvious modification of Lemma 57.15.6 in \cite{StacksProject}. Applying this property to the diagram (\ref{ehma}), we obtain that the morphism $$ \hat U\to \hat V\times _{S^{\infty }(X/S)^+}\hat V $$ is \'etale. As \'etale morphisms are stable under base change, the Cartesian square from the diagram (\ref{muhomory2a}) then shows that the morphism $$ \hat W\to \hat V $$ is \'etale. And since the morphisms $\hat V \to V$ is \'etale, the composition $$ \hat W\to \hat V\to V $$ is \'etale. Since $G$ is \'etale by assumption, we see that the composition \begin{equation} \label{vottak} \hat W\to \hat V\to V\stackrel{G}{\lra }\Sym ^{\infty }(X/S)^+ \end{equation} is also \'etale. Finally, analyzing the above join of the commutative diagrams (\ref{muhomory1a}), (\ref{muhomory1b}), (\ref{muhomory2a}) and (\ref{muhomory2b}) by means of the extension morphisms, we see that the composition (\ref{vottak}) is the same as the composition $$ \hat W\to W\to U\stackrel{b}{\lra } V\stackrel{G}{\lra }\Sym ^{\infty }(X/S)^+\; . $$ Thus, we have obtained the commutative diagram $$ \diagram \hat W \ar[dd]_-{} \ar[rr]^-{} & & V \ar[dd]^-{} \\ \\ U \ar[rr]^-{} & & \Sym ^{\infty }(X/S)^+ \enddiagram $$ \iffalse $$ \xymatrix{ & & \hat W \ar[lldd]_-{} \ar[rrdd]^-{} & & \\ \\ U \ar[rrdd]_-{} & & & & V \ar[lldd]^-{} \\ \\ & & \Sym ^{\infty }(X/S)^+ & & } $$ \fi whose diagonal composition \begin{equation} \label{neuzhtokonez} \hat W\to \Sym ^{\infty }(X/S)^+ \end{equation} is \'etale. Analyzing the commutative diagrams above, it is easy to see that $P$ factorizes through (\ref{neuzhtokonez}), so that the latter morphism is an \'etale neighbourhood of $P$. \end{pf} \section{Rational curves on the locally ringed site of $0$-cycles} Theorem \ref{cofilter} has the following important implication. Namely, since all the items of Definition 7.31.2 in \cite{StacksProject} are now satisfied, the stack functor $$ \stalk _P: \Shv (\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et })\to \Sets $$ induces a point of the topos $\Shv (\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et })$ by Lemma 7.31.7 in loc.cit. In particular, we obtain the full-fledged stalk $$ \bcO _{\Sym ^{\infty }(X/S)^+\! ,\, P}= \stalk _P\, (\bcO _{\Sym ^{\infty }(X/S)^+}) $$ Moreover, the ringed site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$ is a locally ringed site in the sense of the definition appearing in Exercise 13.9 on page 512 in \cite{SGA4-1} (see page 313 in the newly typeset version), as well as in the sense of a sightly different Definition 18.39.4 in \cite{StacksProject}. This is explained in Section \ref{kaehler}. For short of notation, let us write $$ \bcO _P=\bcO _{\Sym ^{\infty }(X/S)^+\! ,\, P}\; . $$ This should not lead to a confusion, as the point $P$ is a point on $\Sym ^{\infty }(X/S)^+$. Since the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$ is locally ringed, for each point $P$ on this site the stalk $\bcO _P$ is a local ring by the same Lemma 18.39.2 in \cite{StacksProject}. Then we also have the maximal ideal $$ \gom _P\subset \bcO _P $$ and the residue field $$ \kappa (P)=\bcO _P/\gom _P $$ at the point $P$. The stalk functor also gives us the stalks $$ \Omega ^1_{\Sym ^{\infty }(X/S)^+\!,\, P}= \stalk _P\, (\Omega ^1_{\Sym ^{\infty }(X/S)^+}) $$ and $$ T_{\Sym ^{\infty }(X/S)^+\! ,\, P}= \stalk _P\, (T_{\Sym ^{\infty }(X/S)^+}) $$ at $P$. Tensoring by $\kappa (P)$ we obtain the vector spaces $$ \Omega ^1(P)=\Omega ^1_{\Sym ^{\infty }(X/S)^+}(P)= \Omega ^1_{\Sym ^{\infty }(X/S)^+\!,\, P} \otimes _{\bcO _P}\kappa (P) $$ and $$ T(P)=T_{\Sym ^{\infty }(X/S)^+}(P)= T_{\Sym ^{\infty }(X/S)^+\! ,\, P} \otimes _{\bcO _P}\kappa (P) $$ over the residue field $\kappa (P)$. The second vector space $T(P)$ is then our {\it tangent space} to the space of $0$-cycles $\Sym ^{\infty }(X/S)^+$ at the point $P$. Notice that, since $\Sym ^{\infty }(X/S)^+$ is an abelian group object in the category of Nisnevich sheaves on locally Noetherian schemes over $S$, whenever $S$ is the spectrum of a field $k$, all tangent spaces $T(P)$ at $k$-rational points $P$ are uniquely determined by the tangent space $T(0)$ at the zero point $0$ on $\Sym ^{\infty }(X/S)^+$ provided by the section of the structural morphism from $X$ to $S$. In other words, one can develop a Lie theory on $\Sym ^{\infty }(X/S)^+$. Now we are fully equipped to promote the idea of understanding of rational equivalence of $0$-cycles as rational connectivity on the space $\Sym ^{\infty }(X/S)^+$. First of all, looking at any scheme $U$ over $S$ as a representable sheaf, we have the corresponding locally ringed site $U_{\Nis \mhyphen \et }$. Then a {\it regular morphism} from $U$ to $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$ is just a morphism of locally ringed sites $$ U_{\Nis \mhyphen \et }\to \Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et } $$ in the sense of Definition 18.39.9 in \cite{StacksProject}. Notice that since \'etale morphisms are stable under base change, if $U\to \Sym ^{\infty }(X/S)^+$ is a morphism of sheaves, then it induces the corresponding morphism of locally ringed sites.\label{proverit' !!!} A rational curve on $\Sym ^{\infty }(X/S)^+$ is a morphism of sheaves $$ f:\PR ^1\to \Sym ^{\infty }(X/S)^+\; . $$ If $$ P:\Spec (K)\to \Sym ^{\infty }(X/S)^+ $$ is a point on the sheaf $\Sym ^{\infty }(X/S)^+$, then we will be saying that $f$ passes through the point $P$ if $P$, as a morphism to $\Sym ^{\infty }(X/S)^+$, factorizes through the morphism $f:\PR ^1\to \Sym ^{\infty }(X/S)^+$. Now, two points $P$ and $Q$ on $\Sym ^{\infty }(X/S)^+$ are {\it elementary rationally connected} if there exists a rational curve on $\Sym ^{\infty }(X/S)^+$ passing through $P$ and $Q$. The points $P$ and $Q$ are said to be {\it rationally connected} if there exists a finite set of points $R_1,\ldots ,R_n$ on $\Sym ^{\infty }(X/S)^+$, such that $R_1=P$, $R_n=Q$ and $R_i$ is elementary rationally connected to $R_{i+1}$ for each $i\in \{ 1,\ldots ,n-1\} $. If any two points on $\Sym ^{\infty }(X/S)^+$ are rationally connected, then we will say that this space is rationally connected. Let $$ P:\Spec (K)\to \Sym ^{\infty }(X/S)^+ \qqand Q:\Spec (L)\to \Sym ^{\infty }(X/S)^+ $$ be two points on $\Sym ^{\infty }(X/S)^+$, represented by morphisms from the spectra of two fields $K$ and $L$ respectively. Suppose, in addition, that the fields $K$ and $L$ are embedded in to a common field, in which case we can replace both $K$ and $L$ by their composite $KL$. Then we can assume, without loss of generality, that $K=L$. In such a case, the points $P$ and $Q$, as morphisms from the scheme $\Spec (K)$ to the sheaf $\Sym ^{\infty }(X/S)^+$ induce two sections $s_P$ and $s_Q$ in $$ \Sym ^{\infty }(X/S)^+(\Spec (K))= \bcZ _0^{\infty }(X/S)(\Spec (K))= $$ $$ z(X/S,0)_{\infty }(\Spec (K))\; . $$ Assume, in addition, that $$ S=\Spec (K)\; . $$ Then $s_P$ and $s_Q$, as elements of the group $$ z(X/\Spec (K),0)_{\infty }(\Spec (K))\; , $$ are two $0$-cycles on the scheme $X$ over $\Spec (K)$. And since relative $0$-cycles are representable, see Section \ref{relcycles}, rational connectivity of the points $P$ and $Q$ on $\Sym ^{\infty }(X/S)^+$ is equivalent to rational equivalence of the $0$-cycles $s_P$ and $s_Q$ on the scheme $X$. This all means that we can look at rational connectedness between points on $\Sym ^{\infty }(X/S)^+$ as the generalized rational equivalence in the relative setting. Let, for example, $X$ be a smooth projective surface over an algebraically closed field $k$, and assume that $X$ is of general type, i.e. the Kodaira dimension is $2$, and that the transcendental part $H^2_{\tr }(X)$ in the second \'etale $l$-adic cohomology group $H^2_{\et }(X,\QQ _l)$ is trivial, where $l$ is different from the characteristic of $k$. Bloch's conjecture predicts that any two closed points $P$ and $Q$ on $X$ are rationally equivalent as $0$-cycles on $X$. This is equivalent to saying that the space $\Sym ^{\infty }(X/k)^+$ s rationally connected in the sense above. Let $V$ be an arbitrary smooth projective variety over $k$. According to Koll\'ar, \cite{KollarRatCurvesOnVar}, if we wish to show that $V$ is rationally connected, we should to two steps. The first one is that we need to find a rational curve $$ f:\PR ^1\to V $$ on $V$. If the first step is done, then we need to show that the rational curve $f$ is free on $V$, i.e that the numbers $$ a_1\geq \ldots \geq a_n $$ in the decomposition $$ f^*T_V= \bcO _{\PR ^1}(a_1)\oplus \ldots \oplus \bcO _{\PR ^1}(a_n) $$ have appropriate positivity, where $T_V$ is the tangent sheaf on the variety $V$, see Section II.3 in the canonical book \cite{KollarRatCurvesOnVar}, or many other sources about free curves on varieties. Now, since we have the tangent sheaf $T_{\Sym ^{\infty }(X/k)^+}$ for our surface $X$ over $k$, we can try to do the same on the space $\Sym ^{\infty }(X/k)^+$. Namely, we should first find a rational curve $$ f:\PR ^1\to \Sym ^{\infty }(X/k)^+ $$ on the space of $0$-cycles. Of course, we do not know (at the moment) whether the tangent sheaf $T_{\Sym ^{\infty }(X/k)^+}$ is locally free on the site $\Sym ^{\infty }(X/S)^+_{\Nis \mhyphen \et }$, and, accordingly, we do not know whether the pullback $f^*T_{\Sym ^{\infty }(X/k)^+}$ decomposes in to the direct sum of Serre twists. But it is not hard to show that $f^*T_{\Sym ^{\infty }(X/k)^+}$ is a coherent sheaf on the projective line $\PR ^1$ over $k$. Being a coherent sheaf, it decomposes uniquely in to a direct sum of a torsion sheaf and a locally free sheaf, see, for example, Proposition 5.4.2. in \cite{ChenKrause}. Then $$ f^*T_{\Sym ^{\infty }(X/k)^+}= \bcO _{\PR ^1}(a_1)\oplus \ldots \oplus \bcO _{\PR ^1}(a_n) \oplus \bcT \; , $$ where $\bcT $ is a torsion sheaf on $\PR ^1$. Though the sheaf $\bcT $ is possibly non-zero, we still can apply the same line of arguments as in the proof of Theorem 3.7 in \cite{KollarRatCurvesOnVar} or Proposition 4.8 in \cite{Debarre}. \section{Appendix: representability of $0$-cycles} It is important to understand the action of the isomorphism obtained by composing the isomorphisms (\ref{maincanonical1*}) and (\ref{maincanonical1'}) after the restriction on semi-normal schemes. The aim of the appendix is to describe this action in detail. Actually all we need is to slightly extend the arguments from \cite{SuslinVoevodsky}. Recall that symmetric powers can be also defined for objects in an abitrary symmetric monoidal category with finite colimits. Let, for example, $R$ be a commutative ring, and let $M$ a module over $R$. The $d$-th symmetric power $\Sym ^d(M)$ of the module $M$ in the category of modules over $R$ can be defined as the quotient of $M^{\otimes d}$ by the submodule generated over $R$ by the differences $$ m_1\otimes \dots \otimes m_d-m_{\sigma (1)}\otimes \dots \otimes m_{\sigma (d)}\; , $$ where $\sigma \in \Sigma _d$. For any collection $\{ m_1,\dots ,m_d\} $ in $M$ let $(m_1,\dots ,m_d)$ be the same collection as an element of the $d$-th symmetric power $\Sym ^d(M)$ of the module $M$, i.e. the image of the tensor $m_1\otimes \dots \otimes m_d$ under the quotient homomorphism $$ M^{\otimes d}\to \Sym ^d(M)\; . $$ The image of the injective homomorphism $$ \Sym ^d(M)\to M^{\otimes d}\; , $$ sending $(m_1,\dots ,m_d)$ to the sum $$ \sum _{\sigma \in \Sigma _d}m_{\sigma (1)}\otimes \dots \otimes m_{\sigma (d)}\; , $$ coincides with submodule of invariants $(M^{\otimes d})^{\Sigma _d}$ of the action of $\Sigma _d$ on $M^{\otimes d}$. Therefore, one can identify $\Sym ^d(M)$ with submodule of invariants $(M^{\otimes d})^{\Sigma _d}$. A similar but Koszul dual theory applies to wedge powers, where the wedge power $\wedge ^dM$ can be initially constructed as the quotient of $M^{\otimes d}$ by the submodule $E(M^{\otimes d})$ in $M^{\otimes d}$ generated by the tensors $v_1\otimes \dots \otimes v_d$ in which at least two vectors $v_i$ and $v_j$ are equal. This all is a folklore and can be found in, for example, $\S $B.2 in \cite{FultonHarris}. In schematic terms, let $B$ be an algebra over a ring $A$, i.e. one has a ring homomorphism $$ \phi :A\to B\; , $$ and let $$ f:X=\Spec (B)\to \Spec (A)=Y $$ be the affine morphism induced by the homomorphism $\phi $. Then one has the diagonal ring homomorphism $$ \phi _d:A\to B^{\otimes d}\; , $$ where the $d$-fold tensor product $B^{\otimes d}$ is taken over $A$. The homomorphism $\phi _d$ gives the structural morphism $$ (X/Y)^d\to Y\; , $$ where $(X/Y)^d$ is the $d$-fold product of $X$ over $Y$. Let $(B^{\otimes d})^{\Sigma _d}$ be the subring of invariants of the action of the symmetric group. Since the image of $\phi _d$ is obviously in $(B^{\otimes d})^{\Sigma _d}$, we obtain the surjective homomorphism $$ \phi _d':A\to (B^{\otimes d})^{\Sigma _d} $$ induced by $\phi _d$. This gives us the decomposition $$ (X/Y)^d\to \Sym ^d(X/Y)\to Y\; , $$ where the second morphism is $\Spec(\phi _d')$. The multiplication in the $A$-algebra $B$ induces the multiplication in $B^{\otimes d}$ by the formula $$ (b_1\otimes \dots \otimes b_d) \cdot (b'_1\otimes \dots \otimes b'_d)= (b_1b'_1\otimes \dots \otimes b_1b'_d)\; . $$ It is easy to see that if $(b_1\otimes \dots \otimes b_d)$ is in $(B^{\otimes d})^{\Sigma _d}$ and $(b'_1\otimes \dots \otimes b'_d)$ is in $E(B^{\otimes d})$, then the product $(b_1b'_1\otimes \dots \otimes b_1b'_d)$ is again in $E(B^{\otimes d})$. This is why the above product induces the product $$ (B^{\otimes d})^{\Sigma _d}\otimes \wedge ^dB\to \wedge ^dB\; . $$ If $B$ is, moreover, is freely generated of dimension $d$, as an $A$-module, then the determinant $$ \det : \wedge ^dB\stackrel{\sim }{\to }A $$ is an isomorphism, and we obtain a homomorphism $$ \psi _d:(B^{\otimes d})^{\Sigma _d}\lra A\; , $$ such that $\phi _d$ is the section for $\psi _d$, thus bringing the section $$ s_{X/Y,d}:Y\to \Sym ^d(X/Y) $$ of the above morphism $\Sym ^d(X/Y)\to Y$. Let us now explore the same situation globally. Let $$ f:X\to Y $$ be a morphism of schemes over a field $k$. Recall that $f$ is said to be affine if and only if $Y$ can be covered by affine open subsets $$ V_i=\Spec (A_i)\; , $$ such that $$ U_i=f^{-1}(V_i) $$ is affine for each $i$, so $$ U_i=\Spec (B_i)\; , $$ and $$ f|_{U_i}:U_i\to V_i $$ is induced by the homomorphism $$ A_i\to B_i\; , $$ see page 128 in \cite{Hartshorne}. If $f$ is affine, then $$ \bcB =f_*\bcO _X $$ is a quasi-coherent sheaf of $\bcO _Y$-algebras on $Y$, and $$ X=\bSpec (\bcB ) $$ in the sense of loc.cit. The $d$-fold fibred product of $X$ over $Y$ is $$ \bSpec (\bcB ^{\otimes d})\; , $$ and the structural morphism from $(X/Y)^{\times d}$ to $Y$ is induced by the homomorphism $$ \phi _d:\bcO _Y\to \bcB ^{\otimes d}\; , $$ where the tensor product $\bcB ^{\otimes d}$ is over $\bcO _Y$. The image of $\phi _d$ is $\Sigma _d$-invariant, so that we obtain the homomorphism $$ \phi _d:\bcO _Y\to (\bcB ^{\otimes d})^{\Sigma _d}\; . $$ Then the relative $d$-th symmetric power $\Sym ^d(X/Y)$ exists and in fact $$ \Sym ^d(X/Y)= \bSpec ((\bcB ^{\otimes d})^{\Sigma _d})\; . $$ The structural morphism $$ \Sym ^d(X/Y)\to Y $$ is induced by the homomorphism $\phi _d$ above. Following \cite{SuslinVoevodsky}, let us now show that there exists also a section of the structural morphism $\Sym ^d(X/Y)\to Y$, provided $X$ is finite surjective of degree $d$ over $Y$. Assume first that $f$ is finite and flat. The finiteness of $f$ means, by definition, that $f$ is affine and $B_i$ is a finitely generated $A_i$-module for each $i$, see page 84 in \cite{Hartshorne}. Then $\bcB $ is a coherent flat $\bcO _Y$-module, with respect to the morphism $$ \bcO _Y\to \bcB =f_*\bcO _X\; , $$ and so $\bcB $ is a locally free $\bcO _Y$-module by Proposition 9.2 (e) on page 254 in \cite{Hartshorne}. Let $W$ be an irreducible component of the scheme $X$, and let $V$ be the closure of $f(W)$ in $Y$. Since $f$ is flat, $V$ is an irreducible component of $Y$. Moreover, if $\xi $ is the generic point of $W$ in $X$, then $f(\xi )$ is the generic point of $V$ in $Y$. Let $d_{\xi }$ be the degree $[R(W):R(V)]$, where $R(W)$ and $R(V)$ stay for the fields of rational functions on $W$ and $V$ respectively, endowed with the induced reduced closed subscheme structures on them. We will say that $f:X\to Y$ is of constant degree $d$ if the degrees $d_{\xi }$ are equal to $d$ for all irreducible components of the scheme $X$. If $f$ is finite flat of constant degree $d$, then $\bcB $ is a locally free sheaf of rank $d$ on $\bcO _Y$, so that one has the determinantal isomorphism $$ \det :\wedge ^d\bcB \stackrel{\sim }{\lra }\bcO _Y\; . $$ Applying the sheaf-theoretical version of the above local construction, we get the morphism of $\bcO _Y$-modules $$ (\bcB ^{\otimes d})^{\Sigma _d}\otimes _{\bcO _Y} \wedge ^d\bcB \to \wedge ^d\bcB \; , $$ where the tensor power $\bcB ^{\otimes d}$ is taken over $\bcO _Y$. For one's turn, this gives the morphism $$ (\bcB ^{\otimes d})^{\Sigma _d}\to \End _{\bcO _Y}(\wedge ^d\bcB )\; . $$ Composing it with the above determinantal isomorphism we get the homomorphism of $\bcO _Y$-algebras $$ \psi _d:(\bcB ^{\otimes d})^{\Sigma _d}\to \bcO _Y\; . $$ Since $\psi _d\circ \phi _d=\id _{\bcO _Y}$ we see that $\psi _d$ induces the canonical section $$ s_{X/Y,d}:Y\to \Sym ^d(X/Y) $$ of the structural morphism $$ \Sym ^d(X/Y)\to Y\; . $$ Following \cite{SuslinVoevodsky}, assume now that $f$ is a finite and surjective (but maybe not flat) morphism of schemes over $k$. For our interests in this paper, it is sufficient to assume that the scheme $X$ is integral and the scheme $Y$ is normal and connected. Since $X$ is integral, it is irreducible. As $f$ is surjective, $Y$ is irreducible too. Moreover, since $f$ is finite, it is affine. As $f$ is surjective, locally $f$ is a collection of morphisms $$ \phi ^*:\Spec (B)\to \Spec (A)\; , $$ such that $\phi :A\to B$ is injective. Since $X$ is integral, it is reduced, so that there is no nilpotens in $B$. Then there is also no nilpotens in $A$. Therefore, $Y$ is reduced as well. Collecting these small observations we conclude that $Y$ is integral. Now, take any affine open $$ V=\Spec (A) $$ in $Y$ with the preimage $$ f^{-1}(V)=\Spec (B) $$ in $X$, so that $A$ is a subring in $B$, as $f|_U$ is surjective and both $A$ and $B$ are integral domains. Since $B$ is a finitely generated $A$-module, it follows that $B$ is integral over $A$ by Proposition 5.1 in \cite{AM}. Then, for any non-zero element $b$ in $B$ there exists a monic polynomial $$ x^n+a_{n-1}x^{n-1}+\dots +a_1x+a_0 $$ with coefficients in $A$, such that $b$ is a root of it. Without loss of generality one can assume that $a_0\neq 0$. Then $$ 1/b=1/a_0\cdot (-b^{n-1}-a_{n-1}b^{n-2}-\dots -a_1)\; . $$ It means that the localization $B_{(0)}$ is a finitely generated $A_{(0)}$-module, i.e. $R(X)$ is a finite field extension of $R(Y)$. Let $d$ be the degree $[R(X):R(Y)]$. Let $U$ be the set of points $x\in X$, such that $f$ is flat at $x$. Then $U$ is open in $X$, see 9.4 on page 266 in \cite{Hartshorne}. Since both $X$ and $Y$ are integral, $f$ is flat at the generic point of $X$. Therefore, the set $U$ is non-empty. Next, shrink $U$ if necessary and assume that it is affine, $$ U=\Spec (B)\; , $$ which is surjectively mapped onto the affine set $V=\Spec (A)$ in $Y$. Then $$ f|_U:U\to V $$ is a finite surjective flat morphism of schemes over the ground field $k$. Since $R(X)$ is a flat algebra over $R(Y)$, by the above local construction, we get the homomorphism $$ \psi _d:(R(X)^{\otimes d})^{\Sigma _d}\lra R(Y)\; . $$ Let now again $\bcB $ be the quasi-coherent sheaf $f_*\bcO _X$ of $\bcO _Y$-algebras on $Y$, so that $$ X=\bSpec (\bcB )\; . $$ Let $y$ be a point on $Y$. Locally, $$ y\in V\subset Y\; , $$ where $$ V=\Spec (A) $$ and $y$ is a prime ideal $\gop $ in $A$. Let $$ U=f^{-1}(V)=\Spec (B)\; . $$ By Propositions 5.1 and 5.2 on pages 110 - 111 in \cite{Hartshorne}, we have that the stalk $\bcB _y$ is $$ ((f|_U)_*\bcO _U)_y=((f|_U)_*B)_{\gop }=B_{\gop } $$ and $$ B_{\gop }\subset B_{(0)}\; , $$ i.e. $\bcB _y$ is canonically embedded into $R(X)$. Respectively, $\bcB _y^{\otimes d}$ is canonically embedded into $R(X)^{\otimes d}$. The homomorphism $\psi _d$ is nothing but the homomorphism $$ \psi _{(0),d}:(B_{(0)}^{\otimes d})^{\Sigma _d}\to A_{(0)}\; , $$ where the tensor product is taken over $A_{(0)}$. As above, $\psi _{(0),d}$ has the section $$ \phi _{(0),d}:A_{(0)}\to (B_{(0)}^{\otimes d})^{\Sigma _d}\; , $$ induced by the canonical homomorphism $A_{(0)}\to B_{(0)}^{\otimes d}$. Since $A_{\gop }$ is embedded into $A_{(0)}$ and $B_{\gop }$ is embedded into $B_{(0)}$, we have the homomorphism from $(B_{\gop }^{\otimes d})^{\Sigma _d}$, where the tensor product is taken over $A_{\gop }$, to $(B_{(0)}^{\otimes d})^{\Sigma _d}$. Certainly, the canonical homomorphism $\phi _{\gop ,d}:A_{\gop }\to B_{\gop }^{\otimes d}$ induces the homomorphism $\phi _{\gop ,d}:A_{\gop }\to (B_{\gop }^{\otimes d})^{\Sigma _d}$, so that we have the obvious commutative diagram $$ \diagram (B_{\gop }^{\otimes d})^{\Sigma _d} \ar[dd]_-{} & & A_{\gop } \ar[ll]_-{\phi _{\gop ,d}} \ar[dd]^-{} \\ \\ (B_{(0)}^{\otimes d})^{\Sigma _d} & & A_{(0)} \ar[ll]_-{\phi _{(0),d}} \enddiagram $$ The bottom horizontal homomorphism is the canonical section of the homomorphism $\psi _{(0),d}$. One can construct a suitable homomorphism $\psi _{\gop ,d}$ from $(B_{\gop }^{\otimes d})^{\Sigma _d}$ to $A_{\gop }$, such that $\phi _{\gop ,d}$ would be a section for it, and the diagram $$ \diagram (B_{\gop }^{\otimes d})^{\Sigma _d} \ar[rr]^-{\psi _{\gop ,d}} \ar[dd]_-{} & & A_{\gop } \ar[dd]^-{} \\ \\ (B_{(0)}^{\otimes d})^{\Sigma _d} \ar[rr]^-{\psi _{(0),d}} & & A_{(0)} \enddiagram $$ would be commutative. This is due to the normality of $Y$ and the finiteness of the morphism $f$. Indeed, let $\alpha $ be an element in $(B_{\gop }^{\otimes d})^{\Sigma _d}$. Considering it as an element in $(B_{(0)}^{\otimes d})^{\Sigma _d}$ and applying $\psi _{(0),d}$ we get the element $\beta =\psi _{(0),d}(\alpha )$ in $A_{(0)}$. Since $f$ is finite, so that $B$ is a finitely generated module over $A$, the algebra $B$ is integral over $A$. Then $B_{\gop }$ is integrals over $A_{\gop }$. Hence, $(B_{\gop }^{\otimes d})^{\Sigma _d}$ is integral over $A_{\gop }$, see Exercise 3 on page 67 in \cite{AM}. Then $\alpha $ is integral element over $A_{\gop }$. Since the bottom horizontal homomorphism $\phi _{(0),d}$ is the canonical section of the homomorphism $\psi _{(0),d}$, we see that the integrality of $\alpha $ implies integrality of $\beta $ over $A_{\gop }$. Since $Y$ is a normal scheme, it means that $A_{\gop }$ is integrally closed in the fraction field $A_{(0)}$. Therefore, $\beta $ belongs to $A_{\gop }$. Thus, we obtain the desired homomorphism $\psi _{\gop ,d}$ from $(B_{\gop }^{\otimes d})^{\Sigma _d}$ to $A_{\gop }$. The local homomorphism $\psi _{\gop ,d}$ can be also denoted as $$ \psi _{y,d}:(\bcB _y^{\otimes d})^{\Sigma _d}\to \bcO _{Y,y}\; . $$ Using the fact that $(\bcB ^{\otimes d})^{\Sigma _d}$ and $\bcO _Y$ are sheaves, we can patch all the local homomorphisms $\psi _{y,d}$ into the global one, $$ \psi _d:(\bcB ^{\otimes d})^{\Sigma _d}\to \bcO _Y\; . $$ Since locally $\phi _{y,d}$ is a section of $\psi _{y,d}$, the same holds globally. Likewise in the case of finite flat morphisms, since $\phi _d$ is a section of $\psi _d$ globally, the homomorphism $\psi _d$ gives the induced section $$ s_{X/Y,d}:Y\to \Sym ^d(X/Y) $$ of the structural morphism $$ \Sym ^d(X/Y)\to Y\; . $$ \begin{remark} {\rm The section $s_{X/Y,d}$ has been achieved specifically for the $d$-th symmetric power of $X$ over $Y$, where $d$ is the degree of the morphism from $X$ onto $Y$. In other circumstances the existence of the section section $s_{X/Y,d}$ is not guaranteed at all. } \end{remark} \begin{example} {\rm Let $X$ be the affine plane $\AF ^2$ and $Y$ be the cone. The morphism from $\AF ^2$ onto $Y$ is given by the embedding of the ring of symmetric polynomials $$ k[x^2,xy,y^2] $$ into the ring $k[x,y]$. In other words, the morphism $X\to Y$ glues any two antipodal points into one. Then $s_{X/Y,d}$ doesn't exists for $d=1$, as there is no way to send the vertex of the cone to the plane. But $s_{X/Y,2}$ does exist as we can send the vertex to the doubled origin of coordinates as a point of the symmetric square. } \end{example} Now, let $S$ be a scheme of finite type over a field $k$, let $X$ be a scheme projective over $S$, and fix a closed embedding $$ i:X\to \PR ^n_S $$ over $S$. In particular, $X$ is AF over $S$ and all relative symmetric powers $\Sym ^d(X/S)$ exist in $\Noe /S$. Notice that since $X$ is projective over $S$, so is the scheme $\Sym ^d(X/S)$, for every nonnegative integer $d$. Let $U$ be a noetherian scheme of finite type over $S$ and let $Z$ be a prime cycle in $z^{\eff }_d(X/S,0)(U)$, considered with the induced reduced close subscheme structure on it. Let $$ f_Z:Z\to X\times _SU\to U $$ be the composition of the closed embedding of $Z$ in to $X\times _SU$ with the projection onto $U$. Since the morphism $f_Z$ is finite, $f_Z$ is affine, and hence the relative symmetric powers of $Z/U$ exist. Then, as above, we have the canonical section $$ s_{Z/U,d}:U\to \Sym ^d(Z/U) $$ of the structural morphism $$ \Sym ^d(Z/U)\to U\; . $$ The closed embedding $$ Z\to X\times _SU $$ induces the morphism $$ \Sym ^d(Z/U)\to \Sym ^d(X\times _SU/U)\; , $$ and we also have the obvious morphism $$ \Sym ^d(X\times _SU/U)\to \Sym ^d(X/S)\; . $$ Composing all these morphisms, we obtain the morphism $$ \theta _{X/S}(U,Z):U\to \Sym ^d(X/S) $$ over $S$. The morphisms $\theta _{X/S}(U,Z)$ for degrees $d'\leq d$ extend by linearity and induce a map $$ \theta _{X/S,d}(U):z^{\eff }_d((X,i)/S,0)(U)\to \Hom _S(U,\Sym ^d(X/S))\; . $$ The latter maps for all schemes $U$ yield a morphism of set valued presheaves $$ \theta _{X/S,d}:z^{\eff }_d((X,i)/S,0)\to \Hom _S(-,\Sym ^d(X/S)) $$ on $\Noe /S$. Assume now that $S$ is semi-normal over $\QQ $. We claim that the restriction of the morphism $\theta _{X/S,d}$ on seminormal schemes is exactly the isomorphism obtained by composing the isomorphisms (\ref{maincanonical1*}) and (\ref{maincanonical1'}) considered in Section \ref{relcycles}. \begin{small} \end{small} \begin{small} \end{small} \begin{small} {\sc Department of Mathematical Sciences, University of Liverpool, Peach Street, Liverpool L69 7ZL, England, UK} \end{small} \begin{footnotesize} {\it E-mail address}: {\tt [email protected]} \end{footnotesize} \end{document}
\begin{document} \def\spacingset#1{\renewcommand{\baselinestretch} {#1}\small\normalsize} \spacingset{1} \title{R-NL: Covariance Matrix Estimation for Elliptical Distributions based on Nonlinear Shrinkage} \thispagestyle{empty} \begin{abstract} We combine Tyler's robust estimator of the dispersion matrix with nonlinear shrinkage. This approach delivers a simple and fast estimator of the dispersion matrix in elliptical models that is robust against both heavy tails and high dimensions. We prove convergence of the iterative part of our algorithm and demonstrate the favorable performance of the estimator in a wide range of simulation scenarios. Finally, an empirical application demonstrates its state-of-the-art performance on real data. \end{abstract} \noindent \textbf{Keywords}: Heavy Tails, Nonlinear Shrinkage, Portfolio Optimization \section{Introduction} Many statistical applications rely on covariance matrix estimation. Two common challenges are (1)~the presence of heavy tails and (2)~the high-dimensional nature of the data. Both problems lead to suboptimal performance or even inconsistency of the usual sample covariance estimator $\ensuremath{ \mathbf{\hat{S}} }$. Consequently, there is a vast literature on addressing these problems. Two prominent ways to address (1) are (Maronna's) \mbox{$M$-estimators} of scatter (\cite{MestimatorofScatter}), as well as truncation of the sample covariance matrix; for example, see \cite{user_friendly_cov}. There also appear to be two main approaches to solving problem (2). The first is to assume a specific structure on the covariance matrix to reduce the number of parameters. One example of this is the ``spiked covariance model'', as explored e.g., in \cite{spikemodel1, spikemodel2, optimalshrinkageinspikedcovariancemodel}, a second is to assume (approximate) sparsity and to use thresholding estimators (\cite{sparsity1,sparsity2,sparsity3,sparsity4}). We also refer to \cite{user_friendly_cov} who present a range of general estimators under heavy tails and extend to the case $n > p$, by assuming specific structures on the covariance matrix. If one is not willing to assume such structure, a second approach is to leave the eigenvectors of the sample covariance matrix unchanged and to only adapt the eigenvalues. This leads to the class of estimators of \cite{stein:1975,stein:1986}. Linear shrinkage (\cite{Wolflinear}) as well as nonlinear shrinkage developed in \cite{WolfNL, Wolf2015, Analytical_Shrinkage, QIS2020} are part of this class. One promising line of research to address both problems at once is to extend (Maronna's) \mbox{$M$-estimators} of scatter \citep{MestimatorofScatter} with a form of shrinkage for high dimensions. This approach is in particular popular with a specific example of $M$-estimators called ``Tyler's estimator'' (\cite{Tyler}), which is derived in the context of elliptical distributions. Several papers have studied this approach, using a convex combination of the base estimator and a target matrix, usually the (scaled) identity matrix. We generally refer to such approaches as robust linear shrinkage estimators. For instance, \cite{possiblecompet_2014, M_estimatortheory_2016, Shrinkage_2021, Hall_Cov} combine the linear shrinkage with Maronna's $M$-estimators, whereas \cite{firstshrinkage, linearshrinkageinheavytails, yang2014minimum, zhang2016automatic} do so with Tyler's estimator. Since this approach of combining linear shrinkage with a robust estimator entails choosing a hyperparameter determining the amount of shrinkage, the second step often consists of deriving some (asymptotically) optimal parameter that then can be estimated from data. The approach results in estimation methods that are generally computationally inexpensive and it also enables strong theoretical results on the convergence of the underlying iterative algorithms. Despite these advantages, several problems remain. First, the performance of these robust methods sometimes does not exceed the performance of the basic linear shrinkage estimator of \cite{Wolflinear} in heavy-tailed models, except for small sample sizes $n$ (say $n < 100$). In fact, the theoretical analysis of \cite{couillet2014large,M_estimatortheory_2016} shows that robust $M$-estimators using linear shrinkage are asymptotically equivalent to scaled versions of the linear shrinkage estimator of \cite{Wolflinear}. Depending on how the data-adaptive hyperparameter is chosen, the performance can even deteriorate quickly as the tails get lighter, as we demonstrate in our simulation study in Section~\ref{sec:mc}. Second, some robust methods cannot handle the case when the dimension $p$ is larger than the sample size $n$, such as \cite{Shrinkage_2021}. Third, some methods propose a choice of hyperparameter(s) through cross-validation, such as \cite{Shrinkage_2017, convexpenalties}, which can be computationally expensive. In this paper, we address these problems by developing a simple algorithm based on \emph{nonlinear} shrinkage (\cite{WolfNL, Wolf2015, Analytical_Shrinkage, QIS2020}), inspired by the above robust approaches and the work of \cite{comfortNL}. In essence, the algorithm applies the quadratic inverse shrinkage (QIS) method of \cite{QIS2020} to appropriately standardized data, thereby greatly increasing its finite-sample performance in heavy-tailed models. Thus, we refer to the new method as ``Robust Nonlinear Shrinkage'' (\mbox{R-NL}); in particular, we extend the proposal of \cite{comfortNL} from a parametric model to general elliptical distributions. This approach includes an iteration over the space of orthogonal matrices, which we prove converges to a stationary point. We motivate our approach using properties of elliptical distributions along the lines of \cite{linearshrinkageinheavytails, zhang2016automatic, Hall_Cov} and demonstrate the favorable performance of our method in a wide range of settings. Notably, our approach (i) greatly improves the performance of (standard) nonlinear shrinkage in heavy-tailed settings; does not deteriorate when moving from heavy to Gaussian tails; (iii) can handle the case $p>n$; and (iv) does not require the choice of a tuning parameter. The remainder of the article is organized as follows. Section \ref{sec: cont} lists our contributions. Section \ref{motivatingexample} presents an example to motivate our methodology. Section \ref{sec:method} describes the proposed new methodology and provides results concerning the convergence of the new algorithm. Section \ref{sec:mc} showcases the performance of our method in a simulation study using various settings for both $p < n$ and $p > n$. Section \ref{sec:empirics} applies our method to financial data, illustrating the performance of the method on real data. \subsection{Contributions} \label{sec: cont} To the best of our knowledge, no paper has so far attempted to combine nonlinear shrinkage of \cite{WolfNL, Wolf2015, Analytical_Shrinkage, QIS2020} with Tyler's method. As such, our approach differs markedly from previous ones. It is partly based on an $M$-estimator interpretation, but also adds the nonparametric nonlinear shrinkage approach. A downside of this approach is that theoretical convergence results are harder to come by. Nonetheless, we are able to show that the iterative part of our algorithm converges to a stationary point, a crucial result for the practical usefulness of the algorithm. Maybe the closest paper to our method is \cite{ourclosestcompetitor}, where the eigenvalues of Tyler's estimator are iteratively shrunken towards predetermined target eigenvalues, with a parameter $\alpha$ determining the shrinkage strength. Through different objectives, they arrive at an algorithm from which the iterative part of our Algorithm \ref{RNL} can be recovered when setting $\alpha= \infty$. Additionally, using the eigenvalues from nonlinear shrinkage as the target eigenvalues, their method presents an alternative way of combining Tyler's estimator with nonlinear shrinkage. Though they did not originally propose this, this was suggested by an anonymous reviewer. However, while there is an overlap in the two algorithms for the corner case of $\alpha=\infty$, they arrive at their Algorithm 1 from a different angle than we do. Consequently, their theoretical results cannot be applied in our analysis. Moreover, they do not suggest how to choose the tuning parameter $\alpha$. In Appendix \ref{sec:further_results}, simulations indicate that when the target eigenvalues are obtained from nonlinear shrinkage, setting $\alpha=\infty$, and thus maximally shrinking towards the nonlinear shrinkage eigenvalues, is usually beneficial. In addition, these simulations show that the updating of eigenvalues we propose after the iterations converged can lead to an additional boost in performance over their method. Whereas many of the aforementioned robust linear shrinkage papers have important theoretical results, the empirical examination of their estimators in simulations and real data applications is often limited. We attempt to give a more comprehensive empirical overview in this paper. Contrary to most of the previous papers, we also consider a comparatively large sample size of $n=300$ in our simulation study. Compared to 6 competing methods, our new approach displays a superior performance over a wide range of scenarios. We also provide a Matlab implementation of our method, as well as the code to replicate all simulations on \url{https://github.com/hedigers/RNL_Code}. \begin{table*} \caption{Notation} \centering \begin{tabular}{l|l} Symbol & Description \\ \hline $n$ & Sample size\\ $p$ & Dimensionality\\ $\boldsymbol{\Sigma}\defeq \Var(\mathbf{Y})$ & The covariance matrix of the random vector $\mathbf{Y}$.\\ $\Tr(\mathbf{A})$ & Trace of a square matrix $\mathbf{A}$\\ $\|\mathbf{A}\|_F$ & Frobenius norm $\sqrt{\Tr(\mathbf{A}^{\top}\mathbf{A})}$ of a sqaure matrix $\mathbf{A}$\\ $\mathbf{H}$ & dispersion matrix \\ $\ensuremath{\mathcal{O}}$ & the orthogonal group\\ $\ensuremath{\mathcal{O}}eq$ & equivalence class in $\ensuremath{\mathcal{O}}$\\ $\mathbf{U}$ & arbitrary element of $\ensuremath{\mathcal{O}}$\\ $\ensuremath{\mathbf{V}}$ & Eigenvectors of $\mathbf{H}=\ensuremath{\mathbf{V}} \ensuremath{\boldsymbol{\Lambda}} \ensuremath{\mathbf{V}}^{\top}$\\ $\Vit{\ell}$ & $\ell$th iteration of the algorithm\\ $\ensuremath{\mathbf{\hat{V}}}$ & critical point/solution/estimate\\ $\ensuremath{\mathcal{V}}$ & subset of critical points of $\ensuremath{\mathcal{O}}$\\ $\ensuremath{\boldsymbol{\Lambda}}$ & True ordered eigenvalues of $\mathbf{H}$, up to scaling\\ $\ensuremath{\boldsymbol{\Lambda}_{0}}$ & Initial (shrunken) estimate of $\ensuremath{\boldsymbol{\Lambda}}$\\ $\ensuremath{\boldsymbol{\Lambda}_{R}}$ & Final \mbox{R-NL} (shrunken) estimate of $\ensuremath{\boldsymbol{\Lambda}}$\\ $\ensuremath{{\mathbb E}}igFVhat$ & Eigenvalues of $\f{\ensuremath{\mathbf{\hat{V}}}}$\\ $\ensuremath{{\mathbb E}}igFl{\ell+1}$ & Eigenvalues of $\f{\Vit{\ell}}$\\ $\ensuremath{\mbox{diag}}()$ & Transforms a vector $\mathbf{a} \in \ensuremath{{\mathbb R}}^p$ into an $p \times p$ diagonal matrix $\ensuremath{\mbox{diag}}(\mathbf{a})$ \end{tabular} \label{tab:notation} \end{table*} \section{Motivational Example}\label{motivatingexample} For a collection of $n$ independent and identically distributed (i.i.d.) random vectors with values in $\ensuremath{{\mathbb R}}^p$, let $\ensuremath{\mathbf{\hat{V}}}=\begin{pmatrix} \mathbf{\hat{v}}_{1}, \ldots, \mathbf{\hat{v}}_{p} \end{pmatrix}$ be the matrix of eigenvectors of the sample covariance matrix $\ensuremath{ \mathbf{\hat{S}} }$. Nonlinear shrinkage, just as the linear shrinkage of \cite{Wolflinear}, only changes the eigenvalues of the sample covariance matrix, while keeping the eigenvectors $\ensuremath{\mathbf{\hat{V}}}$. That is, nonlinear shrinkage is also in the class of estimators of the form $\ensuremath{\mathbf{\hat{V}}} \Delta \ensuremath{\mathbf{\hat{V}}}^{\top}$, with $\Delta$ diagonal, a class that goes back to \cite{stein:1975,stein:1986}. It is well known that \begin{align*} &\argmin_{\Delta \text{ diagonal }} \| \boldsymbol{\Sigma} - \ensuremath{\mathbf{\hat{V}}} \Delta \ensuremath{\mathbf{\hat{V}}}^{\top} \|_{F} = \ensuremath{\mbox{diag}} \Bigl ( \begin{pmatrix} \delta_1 & \ldots& \delta_N \end{pmatrix}^{\top} \Bigr ) \\ &\mbox{with} \quad \delta_j:=\mathbf{\hat{v}}_{j}^{\top} \boldsymbol{\Sigma}\mathbf{\hat{v}}_{j}~; \end{align*} for example, see \cite[Section 3.1]{ledoit:wolf:power}. Nonlinear shrinkage takes the sample covariance matrix $\ensuremath{ \mathbf{\hat{S}} }$ as an input and outputs a shrunken estimate of $\boldsymbol{\Sigma}$ of the form $\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}^{\top}$, where $\ensuremath{\boldsymbol{\Lambda}_{0}}=\mbox{diag}(\hat{\delta}_1, \ldots, \hat{\delta}_N)$ is a diagonal matrix. Although there are different schemes to come with estimates $\{\hat \delta_j\}$, each scheme uses as the only inputs $p$, $n$, and the set of eigenvalues of $\ensuremath{ \mathbf{\hat{S}} }$. In this paper we derive a new estimator that is not in the class of \cite{stein:1975,stein:1986} but applies nonlinear shrinkage to a transformation of the data. It thereby implicitly uses more information than just the sample covariance matrix (together with $p$ and $n$). Since we focus in the following on the class of elliptical distributions, we will differentiate between the dispersion matrix $\mathbf{H}$ and the covariance matrix $\boldsymbol{\Sigma}$. The former will be defined in Section \ref{sec:method}, but the main difference between the two population quantities is that $\boldsymbol{\Sigma}$ might not exist. If it does exist, $\boldsymbol{\Sigma}$ is simply given by $c \mathbf{H}$, with $c > 0$ depending on the underlying distribution. To illustrate the advantage of our method, we now present a motivational toy example before moving on to the general methodology. We first consider a multivariate Gaussian distribution in dimension $p=200$ with mean $\boldsymbol{\mu}=\mathbf{0}$ and covariance matrix $\boldsymbol{\Sigma}=\mathbf{H}$, where the $(i,j)$ element of $\mathbf{H}$ is $0.7^{\mid i - j\mid}$, as in \cite{linearshrinkageinheavytails}. We simulate $n=300$ i.i.d.\ observations from this distribution. For $j=1,\ldots,p$, the left panel of Figure \ref{fig:Eig} displays the theoretical optimum $\delta_j$, $\mathbf{\hat{v}}_{j}^{\top}\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}\mathbf{\hat{v}}_{j}\eqdef \hat{\delta}_j$, as well as $\mathbf{\hat{v}}_{j}^{\top}\mathbf{\hat{H}}\mathbf{\hat{v}}_{j}$, where $\mathbf{\hat{H}}$ is the proposed R-NL estimator. Importantly, the estimated values are very close to the theoretical optimum $\delta_j$, $j=1,\ldots,p$, for both nonlinear shrinkage and our proposed method. We next consider the same setting, but instead simulate from a multivariate $t$ distribution with $4$ degrees of freedom and dispersion matrix $\mathbf{H}$, such that the covariance matrix~$\boldsymbol{\Sigma}$ is $4/(4-2) \cdot \mathbf{H}$. In particular the $\mathbf{\hat{v}}_{j}^{\top}\mathbf{\hat{H}}\mathbf{\hat{v}}_{j}$ are multiplied by $c=2$ in this case to obtain an estimate of $\mathbf{\hat{v}}_{j}^{\top}\boldsymbol{\Sigma}\mathbf{\hat{v}}_{j}$. (The value $c=2$ would not be known in practice but is `fair' to use it in this toy example, since doing so does not favor one estimation method over the other). The left panel of Figure \ref{fig:Eig} displays the results. It can be seen that nonlinear shrinkage overestimates large values of $\delta_j$ (by a lot) and underestimates small values of $\delta_j$; on the other hand, our new method does not have this problem and its performance (almost) matches the one from the Gaussian case. \begin{figure} \caption{Comparison of the estimated values of nonlinear shrinkage and R-NL. The theoretical optimal eigenvalues $\mathbf{\hat{v} \label{fig:Eig} \end{figure} \section{Methodology} \label{sec:method} We assume to observe an i.i.d.\ sample $\mathbb{Y}\defeq \{\mathbf{Y}_1, \ldots, \mathbf{Y}_n\} $ from a $p$-dimensional elliptical distribution. If $\mathbf{Y}$ has an elliptical distribution it can be represented as \begin{align}\label{distform} \mathbf{Y} \stackrel{D}{=} \boldsymbol{\mu} + R \mathbf{H}^{1/2} \boldsymbol{\xi}~, \end{align} where $R$ is a positive random variable, and $\boldsymbol{\xi}$ is uniformly distributed on the $p$-dimensional unit sphere, independently of~$R$, and $\stackrel{D}{=}$ denotes equality in distribution \citep{ellipticaldisttheory}. The dispersion matrix $\mathbf{H}$ is assumed to be symmetric positive-definite (pd), with eigendecomposition $\mathbf{H}=\ensuremath{\mathbf{V}} \ensuremath{\boldsymbol{\Lambda}} \ensuremath{\mathbf{V}}^{\top} $. If $\mathbf{Y}$ meets \eqref{distform}, we write $\mathbf{Y} \sim E_{p}(\boldsymbol{\mu}, \mathbf{H}, g )$, where $g$ is the ``generator'' that identifies the distribution of $R$; for example, see \cite{Kotzelliptical}. We assume this generator to exist, which is equivalent to $R$ having a density \citep{Kotzelliptical}. In the following we restrict ourselves to distributions of the form \eqref{distform} with $\boldsymbol{\mu}=\mathbf{0}$ and such that second moments exist. The assumption $\boldsymbol{\mu}=\mathbf{0}$ is used for simplicity, though it is not necessarily restrictive in the context of elliptical distributions. We refer to the discussion in \cite[Section 2]{thresholdingandTyler}. Then $\Var(\mathbf{Y})=c \mathbf{H}$ for some $c > 0$. Following \cite{linearshrinkageinheavytails}, we will normalize our estimators of $\mathbf{H}$ to have trace $p$. We note however that, to obtain an estimator of $\boldsymbol{\Sigma}$, one could instead normalize the estimator to have the same trace as $\hat{\mathbf{S}}$. As an illustration, in the example from the right panel of Figure~\ref{fig:Eig}, $\Tr(\mathbf{\hat{S}}) \approx 428$ whereas $\Tr(\boldsymbol{\Sigma}) = 400$. \subsection{Robust Nonlinear Shrinkage} We start by outlining our main idea. Let $\|\cdot \|$ be the Euclidean norm on $\ensuremath{{\mathbb R}}^p$. As shown in \cite{centralangulargaussian}, if $\mathbf{Y} \sim E_{p}(\boldsymbol{\mu}, \mathbf{H}, g )$, $\mathbf{Z}\defeq\frac{\mathbf{Y}}{\|\mathbf{Y} \|}$ has a central angular Gaussian distribution with density \begin{align}\label{angularGaussiandist} p(\mathbf{z}; \mathbf{H} ) \propto |\mathbf{H}|^{-1/2} \cdot \left(\mathbf{z}^{\top} \mathbf{H}^{-1}\mathbf{z} \right)^{-p/2}~, \end{align} where for $a,b \in \ensuremath{{\mathbb R}}$, $a \propto b$ means there exists $c > 0$ with $a=cb$. We will also write $\mathbf{A} \propto \mathbf{B}$, for two $p \times p$ matrices $\mathbf{A}, \mathbf{B}$ if $\mathbf{A} =c \mathbf{B}$. The likelihood in \eqref{angularGaussiandist} is the starting point of the original Tyler's method. Taking the derivative of \eqref{angularGaussiandist}, Tyler's estimator $ \ensuremath{\mathbf{\hat{H}}_{T}}$ is implicitly given by the following condition: \begin{align}\label{Tylerestimation} \ensuremath{\mathbf{\hat{H}}_{T}} = \frac{p}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{H}}_{T}}^{-1} \mathbf{Z}_t}~. \end{align} This estimator is obtained as the limit of the iterations \begin{align}\label{Tyleriteration} \mathbf{\hat{H}}^{[\ell+1]} \ \propto \ \frac{p}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} (\mathbf{\hat{H}}^{[\ell]})^{-1} \mathbf{Z}_t}~, \end{align} where $\propto$ indicates that $\mathbf{\hat{H}}^{[\ell+1]}$ is actually obtained after an additional trace-normalization step; for example, see \cite{Tyler} or~\cite{linearshrinkageinheavytails}. Robust linear shrinkage methods such as the method of~\cite{linearshrinkageinheavytails} augment~\eqref{Tyleriteration} by shrinking towards the identity matrix in each iteration. That is, if for an $p \times p$ matrix $\mathbf{A}$ and $\rho \in [0,1]$, we define $\LS{\mathbf{A}, \rho} \defeq (1-\rho) \mathbf{A} + \rho \mathbf{I} $, then the robust linear shrinkage estimator is obtained from the iterations \begin{align}\label{LSTyleriteration} \mathbf{\hat{H}}^{[\ell+1]} \ \propto \ \LS{ \frac{p}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} (\mathbf{\hat{H}}^{[\ell]})^{-1} \mathbf{Z}_t}, \rho}~, \end{align} where again $\propto$ indicates a trace-normalization step. Similarly, denote for any symmetric pd matrix $\mathbf{A}$ by $\ensuremath{{\mathbb N}}L{\mathbf{A}}$ the matrix that is obtained when using nonlinear shrinkage on $\mathbf{A}$. A few clarifications are in order at this point. First, in the existing literature on nonlinear shrinkage, $\mathbf{A}$ is always the sample covariance matrix; but the `algorithm' of nonlinear shrinkage allows for a more general input instead. Second, there are (at least) three different nonlinear shrinkage schemes by now: the numerical scheme called QuEST of \cite{Wolf2015}, the analytical scheme of \cite{Analytical_Shrinkage}, and the QIS method of \cite{QIS2020}, which is also of analytical nature; our methodology allows for the use of any such scheme, with our personal choice being the QIS method. Third, any `algorithm' of nonlinear shrinkage needs as an additional input to $\mathbf{A}$, which of course determines the dimension $p$, also the sample size $n$, which we may treat as fixed and known in our methodology. Applying nonlinear shrinkage to the matrix $\mathbf{A}$ leaves its eigenvectors unchanged and only changes its eigenvalues. The way the eigenvalues are changed depends on the particular nonlinear shrinkage scheme; for example, see \cite[Section 4.5]{QIS2020} for the details concerning the QIS method. In analogy to the case of linear shrinkage, we could now apply nonlinear shrinkage each time in the above iteration. That is, we could iterate \begin{align}\label{QISTyleriteration} \mathbf{\hat{H}}^{[\ell+1]} \ \propto \ \ensuremath{{\mathbb N}}L{ \frac{p}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} (\mathbf{\hat{H}}^{[\ell]})^{-1} \mathbf{Z}_t}}~, \end{align} where the input to NL corresponds to the sample covariance matrix of the scaled data $\mathbf{Z}_t/(\mathbf{Z}_t^{\top} (\mathbf{\hat{H}}^{[\ell]})^{-1} \mathbf{Z}_t/p)^{1/2}.$ Unfortunately, contrary to the case of linear shrinkage, it is not clear how to ensure convergence for such an approach. However, we note that iteration~\eqref{QISTyleriteration} can be seen as a simultaneous iteration over the eigenvalues and eigenvectors, whereby only the former is changed by nonlinear shrinkage. Following the ideas in \cite{comfortNL}, we instead aim to iterate over the eigenvectors for fixed (shrunken) eigenvalues. That is, after the first iteration, we fix the eigenvalues obtained by nonlinear shrinkage, denoted $\ensuremath{\boldsymbol{\Lambda}_{0}}$. Choosing $\mathbf{\hat{H}}^{[0]}=\mathbf{I}$, this corresponds to using nonlinear shrinkage on the sample covariance matrix of $\mathbb{Z}\defeq \{\mathbf{Z}_1, \ldots, \mathbf{Z}_T \}$, with $\mathbf{Z}_t\defeq \mathbf{Y}_t/\| \mathbf{Y}_t \|$. It should be mentioned here that any nonlinear shrinkage scheme ensures that the elements on the diagonal of $\ensuremath{\boldsymbol{\Lambda}_{0}}$, denoted $\hat{\delta}_j$, $j=1,\ldots,p$, are all strictly positive. We then optimize the likelihood of the central angular Gaussian distribution only with respect to the orthogonal matrix $\ensuremath{\mathbf{V}}$. That is, we solve, \begin{align}\label{problem} \ensuremath{\mathbf{\hat{V}}} &\defeq \argmax_{ \mathbf{U} \in \ensuremath{\mathcal{O}}} \sum_{t=1}^{n} \ln(p(\mathbf{Z}_t; \mathbf{U}, \ensuremath{\boldsymbol{\Lambda}_{0}})) \nonumber \\ &= \argmin_{ \mathbf{U} \in \ensuremath{\mathcal{O}}} \frac{1}{n} \sum_{t=1}^{n} \ln\left( \mathbf{Z}_t^{\top} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top} \mathbf{Z}_t \right)~, \end{align} where $\ensuremath{\mathcal{O}}\defeq \{\mathbf{U}: \mathbf{U}^{\top}\mathbf{U}=\mathbf{U}\mathbf{U}^{\top}=\mathbf{I}$\} is the orthogonal group. Finally, once $\ensuremath{\mathbf{\hat{V}}}$ is obtained, $\ensuremath{\boldsymbol{\Lambda}_{0}}$ is updated. That is, we apply nonlinear shrinkage to the covariance matrix of the standardized data \begin{align}\label{standardized} \tilde{\mathbf{Z}}_t\defeq\frac{\mathbf{Z}_t }{\sqrt{\mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t/p}}~, t=1,\ldots,n, \end{align} to obtain $\ensuremath{\boldsymbol{\Lambda}_{R}}$. The final estimate is then given as \begin{align}\label{Hestimate} \mathbf{\hat{H}}\defeq p\cdot \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{R}} \ensuremath{\mathbf{\hat{V}}}^{\top}/\Tr( \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{R}} \ensuremath{\mathbf{\hat{V}}}^{\top})~. \end{align} Since, as we will show below, the eigenvectors of the sample covariance matrix of $\{\mathbf{\tilde{Z}}_1, \ldots, \mathbf{\tilde{Z}}_n\}$ are again given by $\ensuremath{\mathbf{\hat{V}}}$, it holds that, \begin{align*} \mathbf{\hat{H}} \ \propto \ \ensuremath{{\mathbb N}}L{ \frac{1}{n} \sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t}/p}~. \end{align*} The whole procedure is summarized in Algorithm \ref{RNL}. We now detail how to solve \eqref{problem}. For an $p \times p$ symmetric pd matrix $\mathbf{A}$, let \[ \mathbf{A}=\mathbf{U}_A \boldsymbol{\Lambda }_A\mathbf{U}_A^{\top}\ , \] be its eigendecomposition, where we assume the elements of $\boldsymbol{\Lambda }_A$ to be ordered from smallest to largest. We define $\mathcal{E}$ to be the operator that returns all possible matrices of eigenvectors. That is, $\mathcal{E}(\mathbf{A})$ is a subset of $\ensuremath{\mathcal{O}}$ and for any $\mathbf{U} \in \mathcal{E}(\mathbf{A})$, $\mathbf{U}^{\top} \mathbf{A} \mathbf{U}$ is a diagonal matrix with elements ordered from smallest to largest. We also define in the following for $\mathbf{U} \in \ensuremath{\mathcal{O}}$, \begin{align}\label{fdef} \f{\mathbf{U}}\defeq\sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top} \mathbf{Z}_t }~, \end{align} where the dependence on $\mathbf{Z}_1,\dots,\mathbf{Z}_n$ and $\ensuremath{\boldsymbol{\Lambda}_{0}}$ is suppressed to keep notation compact. \begin{restatable}{lemma}{lone}\label{existence} A minimizer $\ensuremath{\mathbf{\hat{V}}}$ of \eqref{problem} exists and meets the condition \begin{align}\label{critical_conditions} \ensuremath{\mathbf{\hat{V}}}^{\top} \f{\ensuremath{\mathbf{\hat{V}}}} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} = \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \f{\ensuremath{\mathbf{\hat{V}}}} \ensuremath{\mathbf{\hat{V}}}~. \end{align} \end{restatable} \begin{proof} Since the orthogonal group is compact \citep[Ch. 3]{MahonyBook} and \begin{align} \mathbf{U} \mapsto f(\mathbf{U}):= \frac{1}{n}\sum_{t=1}^{n} \ln\left( \mathbf{Z}_t^{\top} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top} \mathbf{Z}_t \right)~, \end{align} is continuous, $f(\mathbf{U})$ takes its minimal and maximal value on $\ensuremath{\mathcal{O}}$. Thus there exists $\ensuremath{\mathbf{\hat{V}}} \in \ensuremath{\mathcal{O}}$ such that $\ensuremath{\mathbf{\hat{V}}}$ minimizes $f$. On the other hand, according to \cite{Wen2013}, if $\ensuremath{\mathbf{\hat{V}}}$ is a minimizer of $f$, it must satisfy the following first-order conditions: \begin{align*} &G\ensuremath{\mathbf{\hat{V}}}^{\top} - \ensuremath{\mathbf{\hat{V}}} G^{\top} = \mathbf{0}~, \end{align*} where $G$ is the unconstrained gradient of problem \eqref{problem}, \begin{align*} G:=\frac{1}{n}\sum_{t=1}^{n} \frac{1}{ \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t } \mathbf{Z}_t \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}}\ensuremath{\boldsymbol{\Lambda}_{0}}^{-1}~. \end{align*} Thus \begin{align*} &\sum_{t=1}^{n} \left( \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}}\ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top}}{ \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t } - \frac{\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t } \right) = \mathbf{0}\\ & \ensuremath{\mathbf{\hat{V}}}^{\top} \sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{Z}_t }\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \\ &- \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t } \ensuremath{\mathbf{\hat{V}}} = \mathbf{0}~. \end{align*} Hence, any minimizer $\ensuremath{\mathbf{\hat{V}}} \in \ensuremath{\mathcal{O}}$ meets \eqref{critical_conditions}.\\ \null \ensuremath{\Box} \end{proof} The necessary condition in \eqref{critical_conditions} is true in particular if $\ensuremath{\mathbf{\hat{V}}}$ diagonalizes $\f{\ensuremath{\mathbf{\hat{V}}}}$, or \begin{align}\label{recursion} \ensuremath{\mathbf{\hat{V}}} \in \mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \ensuremath{\mathbf{\hat{V}}}^{\top} \mathbf{Z}_t} \right)~, \end{align} in analogy to \eqref{Tylerestimation}. Thus given $\ensuremath{\boldsymbol{\Lambda}_{0}}$, we propose the following iterations \begin{align} \Vit{\ell + 1}& \in \mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t} \right)~, \label{iteration1} \end{align} starting with \begin{align} \Vit{1} \in \mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\| \mathbf{Z}_t \|} \right)=\mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \mathbf{Z}_t \mathbf{Z}_t^{\top} \right)~. \end{align} As noted above, this corresponds to the iterations in \cite[Algorithm 1]{ourclosestcompetitor}, for $\alpha=\infty$. We also note that the time complexity in each iteration is the same as for the iterations of the original Tyler's method in \eqref{Tyleriteration}, namely $O(np^2 + p^3)$; for example, see \cite{timecomplexityTylerest}. We now proceed by showing that any sequence generated by these iterations has a limit $\Vit{\infty}$ such that \eqref{critical_conditions} holds. To this end we adapt the approach taken in \cite{Wieselhottheory, majorizationpaper, existence_uniqueness_algorithms} and define the surrogate function \begin{align}\label{surrogate} g(\mathbf{U}\mid \Vit{\ell} ) \defeq &\frac{1}{n}\sum_{t=1}^{n} \ln\left( \mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t \right) \nonumber \\ &+\frac{1}{n}\sum_{t=1}^{n} \frac{\mathbf{Z}_t^{\top} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top} \mathbf{Z}_t}{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t} - 1~. \end{align} Then for $f(\mathbf{U}) \defeq \frac{1}{n}\sum_{t=1}^{n} \ln\left( \mathbf{Z}_t^{\top} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top} \mathbf{Z}_t \right)$, \begin{restatable}{lemma}{lthree}\label{surrogateproperites} The surrogate function $g$ satisfies: \begin{align} f(\mathbf{U}) &\leq g(\mathbf{U}\mid \Vit{\ell} ) \text{ for all } \mathbf{U}, \Vit{\ell} \in \ensuremath{\mathcal{O}}\\ f(\Vit{\ell}) &= g(\Vit{\ell}\mid \Vit{\ell} )~, \end{align} and for $\Vit{\ell + 1}$ as in \eqref{iteration1}, \begin{align} g(\Vit{\ell + 1}\mid \Vit{\ell} ) \leq g(\mathbf{U}\mid \Vit{\ell} ) \text{ for all } \mathbf{U} \in \ensuremath{\mathcal{O}}~. \end{align} \end{restatable} \begin{proof} The first inequality follows from the fact that, for~any \mbox{$a > 0$}, $\log(x) \leq \log(a) + (\frac{x}{a} - 1)$ (\cite{Wieselhottheory}), whereas the second equality is trivial. For the last claim, we can write \begin{align*} &\argmin_{\mathbf{U} \in \ensuremath{\mathcal{O}}} g(\mathbf{U}\mid \Vit{\ell} )\\ &= \argmin_{\mathbf{U} \in \ensuremath{\mathcal{O}}} \mbox{Tr}\left(\frac{1}{n}\sum_{t=1}^{n} \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t} \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{U}^{\top}\right)~. \end{align*} Since we assume $\ensuremath{\boldsymbol{\Lambda}_{0}}$ has ordered values, this is globally minimized when $\mathbf{U}$ is chosen to diagonalize $\frac{1}{n}\sum_{t=1}^{n} \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top}}{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t}$; for example, see \cite{comfortNL}.\\ \null \ensuremath{\Box} \end{proof} Define now the set of critical points as $\ensuremath{\mathcal{V}} \subset \ensuremath{\mathcal{O}}$, that is, \[ \ensuremath{\mathcal{V}}:=\{\ensuremath{\mathbf{\hat{V}}} \in \ensuremath{\mathcal{O}} \text{ such that } \eqref{critical_conditions} \text{ holds}\}~, \] and let for all $\mathbf{U} \in \ensuremath{\mathcal{O}}$, \begin{align*} d(\mathbf{U},\ensuremath{\mathcal{V}}) := \inf_{\ensuremath{\mathbf{\hat{V}}} \in \ensuremath{\mathcal{V}}} \|\ensuremath{\mathbf{\hat{V}}}-\mathbf{U} \|_{F}~, \end{align*} as in \cite{majorizationpaper}. Using Lemma \ref{surrogateproperites} the following convergence result can be obtained. \begin{restatable}{theorem}{thmone}\label{awesometheorem} For any sequence $(\Vit{\ell})_{\ell=1}^{\infty}$ generated by the above iterations, \begin{align}\label{monotonicity} f(\Vit{\ell + 1}) \leq f(\Vit{\ell})~, \text{ for all } \ell, \end{align} and \begin{align}\label{convergence} \lim_{\ell \to \infty }d(\Vit{\ell},\ensuremath{\mathcal{V}}) = 0~. \end{align} \end{restatable} \begin{proof} The proof closely follows the argument in \cite[Theorem 1, Corollary 1]{majorizationpaper}. Using Lemma \ref{surrogateproperites}, we have that for all $\ell$, \begin{align*} f(\Vit{\ell + 1}) \leq g(\Vit{\ell + 1} \mid \Vit{\ell}) \leq g(\Vit{\ell} \mid \Vit{\ell}) =f(\Vit{\ell})~, \end{align*} proving the first part. For the second, since the orthogonal group $\ensuremath{\mathcal{O}}$ is compact, there exists a subsequence $(\Vit{\ell_k})_{k=1}^{\infty}$ of $(\Vit{\ell})_{\ell=1}^{\infty}$ that converges to $\Vit{\infty} \in \ensuremath{\mathcal{O}}$. Additionally, for all $\mathbf{U} \in \ensuremath{\mathcal{O}}$, \begin{align*} &g(\mathbf{U} \mid \Vit{\ell_k}) \geq g(\Vit{\ell_{k} + 1 } \mid \Vit{\ell_k}) \geq f(\Vit{\ell_{k} + 1 })\\ &\geq f(\Vit{\ell_{k+1}}) =g(\Vit{\ell_{k+1}} \mid \Vit{\ell_{k+1}})~, \end{align*} since by the properties of subsequences $\ell_{k+1} \geq \ell_k +1 $. Letting $k \to \infty$, thanks to the joint continuity of $(\mathbf{U}_1, \mathbf{U}_2) \mapsto g(\mathbf{U}_1 \mid \mathbf{U}_2)$, this implies \begin{align*} g(\mathbf{U} \mid \Vit{\infty}) \geq g(\Vit{\infty} \mid \Vit{\infty})~, \text{for all } \mathbf{U} \in \ensuremath{\mathcal{O}} . \end{align*} Thus $\Vit{\infty}$ is the global minimizer of the function $\mathbf{U} \mapsto g(\mathbf{U} \mid \Vit{\infty})$. In particular, the first-order conditions must hold: Thus \begin{align*} &G(\Vit{\infty})^{\top} - \Vit{\infty} G^{\top} = \mathbf{0}~, \end{align*} where $G$ is the unconstrained derivative at $\Vit{\infty}$: \begin{align*} G:=\frac{1}{n}\sum_{t=1}^{n} \frac{1}{ \mathbf{Z}_t^{\top} \Vit{\infty} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\infty})^{\top} \mathbf{Z}_t } \mathbf{Z}_t \mathbf{Z}_t^{\top} \Vit{\infty}\ensuremath{\boldsymbol{\Lambda}_{0}}^{-1}~. \end{align*} Thus it holds that \begin{align*} &(\Vit{\infty})^{\top} \sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} (\Vit{\infty})^{\top} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \Vit{\infty} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \mathbf{Z}_t }\Vit{\infty} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \\ &- \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\infty})^{\top} \sum_{t=1}^{n}\frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{ \mathbf{Z}_t^{\top} \Vit{\infty} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\infty})^{\top} \mathbf{Z}_t } \Vit{\infty} = \mathbf{0}~, \end{align*} which corresponds to the desired first-order conditions for the minimization of $f$ and thus $\Vit{\infty} \in \ensuremath{\mathcal{V}}$, or $d(\Vit{\infty}, \ensuremath{\mathcal{V}})=0$. Repeating this argument, it follows that any subsequence of $(\Vit{\ell})_{\ell=1}^{\infty}$ has a further subsequence converging to some $\Vit{\infty}$ (depending on the subsequence) with $d(\Vit{\infty}, \ensuremath{\mathcal{V}})=0$. Now assume the overall sequence does not converge to a point in $\ensuremath{\mathcal{V}}$. Then there is a subsequence $(\Vit{\ell_k})_{k=1}^{\infty}$ such that for all $k$ \begin{align*} d(\Vit{\ell_k}, \ensuremath{\mathcal{V}}) \geq \varepsilon ~, \end{align*} for some $\varepsilon>0$. But then this would be true also for any subsequence, a contradiction.\\ \null \ensuremath{\Box} \end{proof} Thus, as $\ell \to \infty$, $\Vit{\ell}$ gets arbitrary close to a critical point. This leads to the following convergence criterion: \begin{align*} &\|(\Vit{\ell-1})^{\top} \f{\Vit{\ell-1}} \Vit{\ell-1} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} \\ &- \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \f{\Vit{\ell}} \Vit{\ell}\|_F\leq \epsilon~, \end{align*} where $\epsilon>0$ is some convergence tolerance. This criterion is used in Algorithm \ref{VIteration} and we set $\epsilon=10^{-10}$ in Sections \ref{sec:mc} and~\ref{sec:empirics}. Although \mbox{R-NL} is no longer in the same class of estimators as nonlinear shrinkage, namely the class of \cite{stein:1975,stein:1986}, an interesting question is whether it is still \emph{rotation-equivariant}. An estimator $\mathbf{\hat{H}}$ applied to $\mathbb{Y}=\{\mathbf{Y}_1, \ldots, \mathbf{Y}_T\}$ is rotation-equivariant if, for any rotation $\mathbf{R}$ and rotated data $\mathbf{\bar{Y}}_t\defeq \mathbf{R} \mathbf{Y}_t$, $t=1,\ldots,n$, the estimate of the rotated data, $\mathbf{\hat{H}}_{\mathbf{R}}$, satisfies \begin{align}\label{H_R} \mathbf{\hat{H}}_{\mathbf{R}}= \mathbf{R} \mathbf{\hat{H}} \mathbf{R}^{\top}~. \end{align} This is true for any estimator in the class of \cite{stein:1975,stein:1986} and, therefore, in particular for nonlinear shrinkage. We now show that this is true for \mbox{R-NL} as well, using the following lemma: \begin{restatable}{lemma}{lfour}\label{rotationeq} Let $\mathbf{R}$ be an arbitrary rotation, $\mathbf{\bar{Z}}_t\defeq \mathbf{R} \mathbf{Z}_t$ and $\Vit{\ell}_r$ be the $\ell$th iteration of Algorithm \ref{VIteration} applied to $\{\mathbf{\bar{Z}}_1, \ldots, \mathbf{\bar{Z}}_T \}$. Then \begin{align} &\mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{\bar{Z}}_t \mathbf{\bar{Z}}_t^{\top} }{\mathbf{\bar{Z}}_t^{\top} \Vit{\ell}_r \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell}_r)^{\top} \mathbf{\bar{Z}}_t} \right)=\left\{\mathbf{R} \Vit{\ell +1 }:\right. \nonumber \\ &\left. \Vit{\ell + 1} \in \mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \mathbf{Z}_t} \right) \right\}~. \label{rotationsolution} \end{align} \end{restatable} \begin{proof} It clearly holds that \eqref{rotationsolution} is true for $\ell=0$. Assume \eqref{rotationsolution} holds for $\ell$, we show that it holds for $\ell + 1$: By assumption we can write $\Vit{\ell}_r=\mathbf{R} \Vit{\ell}$. Thus \begin{align*} &\mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{\bar{Z}}_t \mathbf{\bar{Z}}_t^{\top} }{\mathbf{\bar{Z}}_t^{\top} \Vit{\ell}_r \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell}_r)^{\top} \mathbf{\bar{Z}}_t} \right)\\ &=\mathcal{E} \left(\mathbf{R} \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} ( \Vit{\ell})^{\top} \mathbf{Z}_t} \mathbf{R}^{\top} \right)\\ &=\mathbf{R}\mathcal{E} \left( \frac{1}{n} \sum_{t=1}^n \frac{\mathbf{Z}_t \mathbf{Z}_t^{\top} }{\mathbf{Z}_t^{\top} \Vit{\ell} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} ( \Vit{\ell})^{\top} \mathbf{Z}_t} \right)~, \end{align*} and thus \eqref{rotationsolution} hold true.\\ \null \ensuremath{\Box} \end{proof} We note that the rotation of the original data in $\mathbb{Y}$ corresponds to a rotation of $\mathbb{Z}$, since for all $t$, $\mathbf{R} \mathbf{Y}_t/\|\mathbf{R} \mathbf{Y}_t \|=\mathbf{R} \mathbf{Y}_t/\|\mathbf{Y}_t \|=\mathbf{R} \mathbf{Z}_t$. Thus if $(\Vit{\ell})_{\ell=1}^{\infty}$ is a sequence generated by Algorithm \ref{VIteration} for the data $\{\mathbf{Y}_1, \ldots, \mathbf{Y}_T \}$, then $(\mathbf{R}\Vit{\ell})_{\ell=1}^{\infty}$ is a sequence generated for the rotated data. Consequently, the corresponding estimate of $\mathbf{H}_{\mathbf{R}}$ for the rotated data will be of the form \eqref{H_R}. \subsection{Uniqueness}\label{practiceimprovsec} The matrix $\ensuremath{\mathbf{\hat{V}}}$ in \eqref{recursion} (and consequently in \eqref{critical_conditions}) is not unique in general. However, we are ultimately not interested in $\ensuremath{\mathbf{\hat{V}}}$, but in $\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}^{\top}$. A natural question is thus whether $\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}^{\top}$ is unique even if $\ensuremath{\mathbf{\hat{V}}}$ is not. This turns out to be true with probability one, as we detail now. Let in the following $\ensuremath{{\mathbb E}}igFVhat$ be the diagonal matrix of (ordered) eigenvalues of $F \bigl (\ensuremath{\mathbf{\hat{V}}} \bigr )$. It has the form $\ensuremath{{\mathbb E}}igFVhat=\ensuremath{{\mathbb E}}igFVhat_{+}$, if $p < n$ and \begin{align*} \ensuremath{{\mathbb E}}igFVhat= \begin{pmatrix} \mathbf{0}_{p-n+1, p-n+1} & \mathbf{0}_{p-n+1, n-1}\\ \mathbf{0}_{n-1, p-n+1} & \ensuremath{{\mathbb E}}igFVhat_{+}\end{pmatrix} , & \text{ if } p \geq n~, \end{align*} where $\ensuremath{{\mathbb E}}igFVhat_{+}$ is a diagonal matrix with the largest $\min(n-1,p)$ eigenvalues and $\mathbf{0}_{n, p}$ is an $n \times p$ matrix of zeros. We denote the diagonal elements of $\ensuremath{{\mathbb E}}igFVhat$ as $\hat{\lambda}_1, \ldots \hat{\lambda}_p$. Define similarly the matrix of eigenvalues of $\f{\Vit{\ell-1}}$ as $\ensuremath{{\mathbb E}}igFl{\ell}$, with elements $\hat{\lambda}_1^{[\ell]}, \ldots \hat{\lambda}_p^{[\ell]}$, and $\ensuremath{{\mathbb E}}igFVhat_{+}^{[\ell]}$ as the largest $\min(n-1,p)$ eigenvalues of $\ensuremath{{\mathbb E}}igFl{\ell}$. \begin{restatable}{lemma}{ltwo}\label{unique} Assume that whenever $\hat{\lambda}_i=\hat{\lambda}_j$, also $\hat{\delta}_i=\hat{\delta}_j$, for $j,i \in \{1,\ldots,p \}$. Then if $\ensuremath{\mathbf{\hat{V}}}_1$ and $\ensuremath{\mathbf{\hat{V}}}_2$ meet \eqref{recursion}, \[ \ensuremath{\mathbf{\hat{V}}}_1\ensuremath{\boldsymbol{\Lambda}_{0}} (\ensuremath{\mathbf{\hat{V}}}_1)^{\top} =\ensuremath{\mathbf{\hat{V}}}_2 \ensuremath{\boldsymbol{\Lambda}_{0}} (\ensuremath{\mathbf{\hat{V}}}_2)^{\top}. \] \end{restatable} \begin{proof} If $\hat{\lambda}_i$ is unique, the corresponding eigenvector $\mathbf{v}_i$ is the basis of the one-dimensional space $\{\mathbf{u}: (F(\ensuremath{\mathbf{\hat{V}}}) - \hat{\lambda}_i \mathbf{I})\mathbf{u}=\mathbf{0} \}$. As such if $\mathbf{v}_i^1$, $\mathbf{v}_i^2$ are the $i$th column of $\ensuremath{\mathbf{\hat{V}}}_1 $ and $\ensuremath{\mathbf{\hat{V}}}_2$ respectively, it must hold that $\mathbf{v}_i^1=\mathbf{v}_i^2$ or $\mathbf{v}_i^1=-\mathbf{v}_i^2$. However as $\ensuremath{\mathbf{\hat{V}}}_1 \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}_1^{\top}= \sum_{i=1}^{p} \hat{\delta}_i \mathbf{v}_i^1 (\mathbf{v}_i^1)^{\top}$ this does not affect the overall matrix. This holds true whether or not $\hat{\delta}_i$ in $\ensuremath{\boldsymbol{\Lambda}_{0}}$ is unique. Now assume there is $\hat{\lambda}_i$ with multiplicity $p_0$, whereas all other $\hat{\lambda}_j$ are unique. By assumption, $\ensuremath{\boldsymbol{\Lambda}_{0}}$ mimics this pattern and we can reorder their values such that: \[ \ensuremath{{\mathbb E}}igFVhat=\begin{pmatrix} \ensuremath{{\mathbb E}}igFVhat_1 & \mathbf{0}_{p-p_0, p_0}\\ \mathbf{0}_{p_0, p- p_0} & \ensuremath{{\mathbb E}}igFVhat_2\end{pmatrix} \text{ and } \ensuremath{\boldsymbol{\Lambda}_{0}}=\begin{pmatrix} \ensuremath{\boldsymbol{\Lambda}_{0}}^1 & \mathbf{0}_{p-p_0, p_0}\\ \mathbf{0}_{p_0, p- p_0} & \ensuremath{\boldsymbol{\Lambda}_{0}}^2\end{pmatrix}~, \] where $\ensuremath{{\mathbb E}}igFVhat_1$ contains unique ordered values and $\ensuremath{{\mathbb E}}igFVhat_2$ of size \mbox{$p_0 \times p_0$} contains one value with multiplicity. By assumption, $\ensuremath{\boldsymbol{\Lambda}_{0}}^1$ might have values with multiplicity larger one, but $\ensuremath{\boldsymbol{\Lambda}_{0}}^2$ also contains only copies of one value. We similarly decompose the newly ordered $\ensuremath{\mathbf{\hat{V}}}_{1}, \ensuremath{\mathbf{\hat{V}}}_{2}$: \[ \ensuremath{\mathbf{\hat{V}}}_{1}=[\ensuremath{\mathbf{\hat{V}}}_{11}, \ensuremath{\mathbf{\hat{V}}}_{12}] \text{ and } \ensuremath{\mathbf{\hat{V}}}_{2}=[\ensuremath{\mathbf{\hat{V}}}_{21}, \ensuremath{\mathbf{\hat{V}}}_{22}]~. \] The columns of $ \ensuremath{\mathbf{\hat{V}}}_{12}, \ensuremath{\mathbf{\hat{V}}}_{22}$ now form an orthogonal basis of the $p_0$-dimensional eigenvectorspace. As such, we can express each column of $\ensuremath{\mathbf{\hat{V}}}_{12}$ as a linear combination of columns in $ \ensuremath{\mathbf{\hat{V}}}_{22}$, that is there exists $\mathbf{A} \in \ensuremath{{\mathbb R}}^{p_0 \times p_0}$, such that $\ensuremath{\mathbf{\hat{V}}}_{12}=\ensuremath{\mathbf{\hat{V}}}_{22} \mathbf{A}$. Moreover \begin{align*} \mathbf{I}=\ensuremath{\mathbf{\hat{V}}}_{12}^{\top} \ensuremath{\mathbf{\hat{V}}}_{12} = \mathbf{A}^{\top} \ensuremath{\mathbf{\hat{V}}}_{22}^\top \ensuremath{\mathbf{\hat{V}}}_{22} \mathbf{A}=\mathbf{A}^{\top}\mathbf{A}~. \end{align*} Thus the columns of $\mathbf{A}$ are orthogonal and since it is square, it has full rank and $\mathbf{A}\mathbf{A}^{\top}=\mathbf{I}$ holds as well. Finally, \begin{align*} \ensuremath{\mathbf{\hat{V}}}_1 \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}_1^{\top} &= \ensuremath{\mathbf{\hat{V}}}_{11} \ensuremath{\boldsymbol{\Lambda}_{0}}^1 \ensuremath{\mathbf{\hat{V}}}_{11} ^{\top} + \ensuremath{\mathbf{\hat{V}}}_{12} \ensuremath{\boldsymbol{\Lambda}_{0}}^2 \ensuremath{\mathbf{\hat{V}}}_{12}^{\top} \\ &= \ensuremath{\mathbf{\hat{V}}}_{21} \ensuremath{\boldsymbol{\Lambda}_{0}}^1 \ensuremath{\mathbf{\hat{V}}}_{21}^{\top} + \hat{\delta}_i \ensuremath{\mathbf{\hat{V}}}_{12} \ensuremath{\mathbf{\hat{V}}}_{12}^{\top}\\ &= \ensuremath{\mathbf{\hat{V}}}_{21} \ensuremath{\boldsymbol{\Lambda}_{0}}^1 \ensuremath{\mathbf{\hat{V}}}_{21}^{\top} + \hat{\delta}_i \ensuremath{\mathbf{\hat{V}}}_{22} \mathbf{A} \mathbf{A}^{\top} \ensuremath{\mathbf{\hat{V}}}_{22}^{\top}\\ &= \ensuremath{\mathbf{\hat{V}}}_2 \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}_2^{\top}~. \end{align*} A similar approach can be used to show that the equality holds if several $\hat{\lambda}_i$ have multiplicity larger than one.\\ \null \ensuremath{\Box} \end{proof} Thus if the multiplicity of eigenvalues of $\ensuremath{{\mathbb E}}igFVhat$ implies the multiplicity of the corresponding eigenvalue in $\ensuremath{\boldsymbol{\Lambda}_{0}}$, the resulting matrix will also be the same. This is true in particular if $\hat{\lambda}_i \neq \hat{\lambda}_j$ for all $i,j$. Consequently, under the conditions of Lemma \ref{unique}, $\ensuremath{\mathbf{\hat{V}}}$ in \eqref{recursion} is unique under the equivalence relation $\sim_{\boldsymbol{\Lambda}_{0}}$ with \[ \mathbf{U}_1 \sim_{\boldsymbol{\Lambda}_{0}} \mathbf{U}_2 \iff \mathbf{U}_1 \ensuremath{\boldsymbol{\Lambda}_{0}} (\mathbf{U}_1)^{\top} =\mathbf{U}_2 \ensuremath{\boldsymbol{\Lambda}_{0}} (\mathbf{U}_2)^{\top}. \] More generally if we consider the space of equivalence classes $\ensuremath{\mathcal{O}}eq:=\ensuremath{\mathcal{O}}\setminus \sim_{\boldsymbol{\Lambda}_{0}} $ and define the metric \[ \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}([\mathbf{U}_1], [\mathbf{U}_2]):=\| \mathbf{U}_1 \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}_1^{\top} - \mathbf{U}_2 \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}_2^{\top} \|_F, \] where $[\mathbf{U}]:=\{\mathbf{U}_0 \in \ensuremath{\mathcal{O}}: \mathbf{U}_0\sim_{\boldsymbol{\Lambda}_{0}} \mathbf{U}\} $, we obtain the following lemma. \begin{restatable}{lemma}{lfive}\label{uniqueoverell} Assume that \begin{align}\label{cond} \forall \ell \ \ \hat{\lambda}_i^{[\ell]}=\hat{\lambda}_j^{[\ell]} \implies \hat{\delta}_i=\hat{\delta}_j. \end{align} Then we can write iteration \eqref{iteration1} in terms of equivalence classes: \begin{align}\label{iterationwithequiv} [\Vit{\ell + 1}] = \mathcal{E}( F([\Vit{\ell}]))~. \end{align} Moreover there exists $[\ensuremath{\mathbf{\hat{V}}}] \in \ensuremath{\mathcal{O}}eq$ such that \eqref{recursion} holds and the generated sequence $\left([\Vit{\ell}]\right)_{\ell=1}^{\infty}$ satisfies \begin{align*} \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}([\Vit{\ell}], [\ensuremath{\mathbf{\hat{V}}}]) \to 0~. \end{align*} \end{restatable} \begin{proof} First we note that, since the values in $\ensuremath{\boldsymbol{\Lambda}_{0}}$ are all strictly larger than zero, \[ \mathbf{U}_1 \sim_{\boldsymbol{\Lambda}_{0}} \mathbf{U}_2 \iff \mathbf{U}_1 \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\mathbf{U}_1)^{\top} =\mathbf{U}_2 \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\mathbf{U}_2)^{\top}. \] Thus for any two $\mathbf{U}_1 \in [\mathbf{U}]$, $\mathbf{U}_2 \in [\mathbf{U}]$, $F(\mathbf{U}_1)=F(\mathbf{U}_2)$, such that we may write $F$ directly as a function of the equivalence class, $F([\mathbf{U}])$. Moreover, since the eigenvalues of $F([\mathbf{V}]^{[\ell]})$ meet the multiplicity condition, the same proof as in Lemma \ref{unique} gives that any $\mathbf{U}_1 \in \mathcal{E}(F(\Vit{\ell}))$, $\mathbf{U}_2 \in \mathcal{E}(F(\Vit{\ell}))$ have $\mathbf{U}_1 \sim_{\boldsymbol{\Lambda}_{0}} \mathbf{U}_2 $. Thus \eqref{iterationwithequiv} holds and we can write $[\mathbf{V}]^{[\ell]}=[\Vit{\ell}]$. Moreover, by the same argument as above, the function value $f(\mathbf{U})$ of any member of an equivalence class is the same, such that we may again write $f([\mathbf{U}])$. Finally $\ensuremath{\mathcal{O}}eq$ is still compact with the metric $\tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}$. Indeed consider a sequence $([\mathbf{U}_n])_n$ in $\ensuremath{\mathcal{O}}eq$. For each $n$ we choose an arbtriary representative $\mathbf{U}_n \in \ensuremath{\mathcal{O}}$, to form the sequence $\left( \mathbf{U}_n \right)_n$. Since $\ensuremath{\mathcal{O}}$ is compact, this sequence will have a convergent subsequence $\left( \mathbf{U}_{n_k} \right)_k$. We now show that the corresponding subsequence in $\ensuremath{\mathcal{O}}$, $\left( [\mathbf{U}]_{n_k} \right)_k$ converges in $\ensuremath{\mathcal{O}}eq$. Indeed notice that for any convergent sequence, that is, $\left( \mathbf{U}_n \right)_n$ such that $\mathbf{U}_n \to \mathbf{U}$, it follows by the continuity of the matrix product that \begin{align*} \mathbf{U}_n \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}_n ^{\top} \to \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}^{\top}~, \end{align*} or \begin{align*} \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}([\mathbf{U}_n], [\mathbf{U}]) = \| \mathbf{U}_n \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}_n ^{\top} \to \mathbf{U} \ensuremath{\boldsymbol{\Lambda}_{0}} \mathbf{U}^{\top} \|_F \to 0~. \end{align*} Applying this to $\left( [\mathbf{U}]_{n_k} \right)_k$, $ \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}([\mathbf{U}_{n_k}], [\mathbf{U}]) \to 0$. Since the sequence was arbitrary, every sequence in $\ensuremath{\mathcal{O}}eq$ has a convergent subsequence in $\ensuremath{\mathcal{O}}eq$ and thus $(\ensuremath{\mathcal{O}}eq, \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}})$ is compact. Finally we can trace the same steps as in Theorem \ref{awesometheorem} to show that \begin{align*} \inf_{[\ensuremath{\mathbf{\hat{V}}}] \in \ensuremath{\mathcal{V}}_0} \tilde{d}_{\ensuremath{\boldsymbol{\Lambda}_{0}}}([\Vit{\ell}],[\ensuremath{\mathbf{\hat{V}}}] ) \to 0~, \end{align*} where now the set $\ensuremath{\mathcal{V}}_0 \subset \ensuremath{\mathcal{O}}eq$ such that \eqref{recursion} holds has only one member $[\ensuremath{\mathbf{\hat{V}}}]$.\\ \null \ensuremath{\Box} \end{proof} At first, it might seem unclear how to enforce the eigenvalue condition in Lemma \ref{uniqueoverell}. However, since $\hat{\lambda}_i^{[\ell]}$, $i=1,\ldots,p$ are eigenvalues of the sample covariance matrix of the standardized sample, Theorem 1 of \cite{Amazingsampleeigenvalueresult} applies. This implies that the eigenvalues of $\ensuremath{{\mathbb E}}igFVhat_{+}^{[\ell]}$ are all nonzero and distinct with probability one. Thus we only need to ensure that, for $p \geq n$, the smallest $p-n+1$ eigenvalues in $\ensuremath{\boldsymbol{\Lambda}_{0}}$ are all the same. This is enforced in Algorithm \ref{VIteration} by simply setting the smallest $p-n+1$ values of $\ensuremath{\boldsymbol{\Lambda}_{0}}$ to the value with the highest multiplicity. The following lemma now obtains. \begin{restatable}{lemma}{none} Condition \eqref{cond} holds with probability one. \end{restatable} We thus obtain uniqueness of $\ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{0}} \ensuremath{\mathbf{\hat{V}}}^{\top}$ and of $\mathbf{\hat{H}}$, up to scaling. \begin{algorithm} \normalsize \textbf{Inputs}: centered data $\mathbb{Z}$, eigenvalue matrix $\ensuremath{\boldsymbol{\Lambda}_{0}}$\; \textbf{Output}: $\ensuremath{\mathbf{\hat{V}}}$\; \textbf{Hyper-parameters}: Convergence Tolerance $\epsilon$ \; Initiate $\ell=0$, $c(0)=-\infty$, $c(1)=0$, $\Vit{0}=\mathbf{I}$\; \While{$c(\ell+1)-c(\ell) > \epsilon $}{ - Calculate $\f{\Vit{\ell}}$ as in \eqref{fdef} and its eigendecomposition $\mathbf{U} \ensuremath{{\mathbb E}}igFl{\ell+1} \mathbf{U}^{\top}$, with $\ensuremath{{\mathbb E}}igFl{\ell+1}$ ordered\; - Take $\Vit{\ell + 1}=\mathbf{U}$\; - $\ell=\ell+1$\; - $c(\ell+1)= \|(\Vit{\ell-1})^{\top} \f{\Vit{\ell-1}} \Vit{\ell-1} \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} - \ensuremath{\boldsymbol{\Lambda}_{0}}^{-1} (\Vit{\ell})^{\top} \f{\Vit{\ell}} \Vit{\ell}\|_F$\; } \ensuremath{{\mathbb R}}eturn{$\ensuremath{\mathbf{\hat{V}}}=\Vit{\ell}$} \caption{VIteration($\mathbb{Z}$, $\ensuremath{\boldsymbol{\Lambda}_{0}}$)} \label{VIteration} \end{algorithm} \begin{algorithm} \normalsize \textbf{Inputs}: centered data $\mathbb{Y}$\; \textbf{Output}: $\mathbf{\hat{H}}$\; - Calculate $\mathbb{Z}\defeq \{\mathbf{Z}_1, \ldots \mathbf{Z}_T \}$ with $\mathbf{Z}_t\defeq \mathbf{Y}_t/\| \mathbf{Y}_t\|$, $t=1,\ldots,n$\; - Obtain the sorted eigenvalues $\ensuremath{\boldsymbol{\Lambda}_{0}}$ by applying NL to $\mathbb{Z}$\; - If $p \geq n$: Ensure that the last $p-n+1$ elements of $\ensuremath{\boldsymbol{\Lambda}_{0}}$ are equal\; - Obtain $\ensuremath{\mathbf{\hat{V}}}\defeq $ VIteration($\mathbb{Z}, \ensuremath{\boldsymbol{\Lambda}_{0}}$) using Algorithm \ref{VIteration}\; - Calculate $\tilde{\mathbf{Z}}_t$, $t=1,\ldots,n$, as in \eqref{standardized}\; - Apply NL to the sample $\tilde{\mathbf{Z}}_1, \ldots \tilde{\mathbf{Z}}_n$ to obtain $\ensuremath{\boldsymbol{\Lambda}_{R}}$\; - Calculate $\mathbf{\hat{H}}$ as in \eqref{Hestimate}\; \ensuremath{{\mathbb R}}eturn{$\mathbf{\hat{H}}$} \caption{R-NL($\mathbb{Y}$)} \label{RNL} \end{algorithm} \begin{algorithm} \normalsize \textbf{Inputs}: centered data $\mathbb{Y}$\; \textbf{Output}: $\mathbf{\hat{H}}$\; - Calculate the diagonal matrix of sample standard deviations $\boldsymbol{\hat{\sigma}}$ of $\mathbb{Y}$\; - Calculate $\mathbb{X}\defeq \{\mathbf{X}_1, \ldots, \mathbf{X}_T\}$ as in \eqref{firststand}\; - Obtain $\mathbf{\hat{H}}_0\defeq$ R-NL($\mathbb{X}$) using Algorithm \ref{RNL}\; - Calculate $\mathbf{\hat{H}}\defeq p \cdot \boldsymbol{\hat{\sigma}}\mathbf{\hat{H}}_0 \boldsymbol{\hat{\sigma}}/\Tr(\boldsymbol{\hat{\sigma}}\mathbf{\hat{H}}_0 \boldsymbol{\hat{\sigma}})$\; \ensuremath{{\mathbb R}}eturn{$\mathbf{\hat{H}}$} \caption{R-C-NL($\mathbb{Y}$)} \label{RCNL} \end{algorithm} \subsection{Robust Correlation-Based Nonlinear Shrinkage} In the context of covariance matrix estimation, an alternative approach is to use shrinkage estimation for the correlation matrix and to estimate the vector of variances separately, after which one combines the two estimators to obtain a `final' estimator of the covariance matrix itself. Such an approach is used by \cite{comfortNL} in a static setting (that~is, for i.i.d.\ data) and by \cite{engle:ledoit:wolf:2019,denard:engle:ledoit:wolf:2022} in a dynamic setting (that~is, for time series data). It turns out that by adapting this approach for our method, a considerable boost in performance can be achieved in some settings. In particular, we first calculate the sample variances $\hat{\sigma}_1^2, \ldots, \hat{\sigma}_p^2$ and obtain the scaled data as \begin{align}\label{firststand} \mathbf{X}_t\defeq \boldsymbol{\hat{\sigma}}^{-1} \mathbf{Y}_t~, \end{align} where $\boldsymbol{\hat{\sigma}} \defeq \ensuremath{\mbox{diag}}(\hat{\sigma}_1, \ldots, \hat{\sigma}_p)$. Then \mbox{R-NL} is applied to $\mathbf{Z}_t \defeq \boldsymbol{\hat{\sigma}}^{-1} \mathbf{Y}_t/\|\boldsymbol{\hat{\sigma}}^{-1} \mathbf{Y}_t \|$ to obtain $\hat{\mathbf{H}}_0\defeq p \ensuremath{\mathbf{\hat{V}}} \ensuremath{\boldsymbol{\Lambda}_{R}}\ensuremath{\mathbf{\hat{V}}}^{\top}/\Tr(\ensuremath{\boldsymbol{\Lambda}_{R}})$. From these two inputs, we calculate the `final' estimator of~$\mathbf{H}$~as \begin{align}\label{Hestimatewithsig} \mathbf{\hat{H}}\defeq p\cdot\boldsymbol{\hat{\sigma}} \hat{\mathbf{H}}_0 \boldsymbol{\hat{\sigma}}/\Tr(\boldsymbol{\hat{\sigma}} \hat{\mathbf{H}}_0 \boldsymbol{\hat{\sigma}})~. \end{align} The approach is called ``R-C-NL" and is summarized in Algorithm \ref{RCNL}. As we demonstrate in Section \ref{sec:mc} this `variation' on our methodology can have a substantial (beneficial) effect on the performance of the estimator. However, a potential disadvantage of this approach is that \mbox{R-C-NL} is no longer rotation-equivariant. \section{Simulation Study} \label{sec:mc} We compare our proposed two methods to several competitors in various simulation scenarios. From the collection of approaches that use Tyler's method together with (linear) shrinkage, we tried to pick the ones most appropriate for our analysis, without handpicking them to showcase the performance of our method. In particular, we did not pick methods that require the choice of a tuning parameter (such as \cite{existence_uniqueness_algorithms, Shrinkage_2017, convexpenalties}) or require $p < n$ such as \cite{Shrinkage_2021}. This leads us to the following benchmarks: \begin{itemize} \item LS: the linear shrinkage estimator of \cite{Wolflinear}. \item NL: the quadratic inverse shrinkage (QIS) estimator of \cite{QIS2020}. \item R-LS: the robust linear shrinkage estimator of \cite{linearshrinkageinheavytails}. An estimator that is ``widely used and performs well in practice'' \cite{existence_uniqueness_algorithms}. \item R-GMV-LS: the robust linear shrinkage estimator of \cite{yang2014minimum}. This estimator is designed for global-minimum-variance portfolios. \item R-A-LS: the regularized estimator of \cite{zhang2016automatic}, a variation of the robust linear shrinkage approach. \item R-TH: the robust estimator of \cite{thresholdingandTyler}, based on thresholding Tyler's M-estimator. \end{itemize} \noindent Moreover, we consider the following six structures for the true dispersion matrix $\mathbf{H}$: \begin{itemize} \item (I)\ Identity: The identity matrix $\mathbf{I}$. \item (A)\ AR: the $(i,j)$ element of $\mathbf{H}$ is $0.7^{\mid i - j\mid}$, as in \cite{linearshrinkageinheavytails}. \item (F)\ Full matrix: $1$ on the diagonal and $0.5$ on the off-diagonal. \item (I)sig\ Base: diagonal matrix, where $20\%$ of the diagonal elements are equal to $1$, $40\%$ of the diagonal elements are equal to $3$, and $40\%$ of the diagonal elements are equal to $10$, as in \cite{WolfNL, Analytical_Shrinkage, QIS2020}. \item (A)sig\ AR (non-constant diag): start with $\mathbf{H}$ as in (A) and then pre- and post-multiply with the square-root of the diagonal matrix as in (I$'$). \item (F)sig\ Full Matrix (non-constant diag): start with $\mathbf{H}$ as in (F) and then pre- and post-multiply with the square-root of the diagonal matrix as in (I$'$). \end{itemize} Together these settings cover a wide range of structures for the dispersion matrix $\mathbf{H}$, from sparse to a ``full'' matrix with nonzero elements everywhere. Most papers related to our method simulate from a multivariate $t$-distribution with $3$ or $4$ degrees of freedom. In contrast, we let the degrees of freedom vary on a grid from $3$ to ``infinity'', that is, to~the \mbox{Gaussian} case. It appears the actual sample size does not matter as much as the concentration ratio in the relative performance of the methods. As such we choose two concentration ratios, $2/3$ and $4/3$, with a fixed sample size of $n=300$. In Appendix \ref{sec:further_results} the same analysis is done for $n=150$ and $p \in \{100, 200\}$. In addition, it contains an analysis comparing \mbox{R-NL} to the methodology of \cite{ourclosestcompetitor} over various shrinkage parameters $\alpha$ and using $\boldsymbol{\Lambda}_0$ as target eigenvalues. Finally, as a measure for comparing the different methods we consider the Percentage Relative Improvement in Average Loss (PRIAL) defined as $$\text{PRIAL}(\hat{\mathbf{H}}_*) \defeq 100 \times \left( 1- \frac{\ensuremath{{\mathbb E}}[\Vert\hat{\mathbf{H}}_* - \mathbf{H}\Vert^2]}{\ensuremath{{\mathbb E}}[\Vert\ensuremath{ \mathbf{\hat{S}} } - \mathbf{H}\Vert^2]}\right) \%~,$$ where $\hat{\mathbf{H}}_*$ denotes a generic estimator of $\mathbf{H}$. Note that our definition of PRIAL differs from the one in \cite{QIS2020}: For both definitions, the value of 0 corresponds to the (scaled) sample covariance matrix; but the value of 100 corresponds to the true matrix in our definition whereas it corresponds to the `oracle' estimator in the class of \cite{stein:1975,stein:1986} in the definition of \cite{QIS2020}. It does not make sense for us to use the definition of \cite{QIS2020}, since our estimator, unlike the their QIS estimator, is not in the Steinian class. We also note that all matrices in the above PRIAL are scaled to have trace $p$. Figures \ref{fig:SimN200} and \ref{fig:SimN400} show the results. It is immediately visible that in the majority of considered settings both \mbox{R-NL} and \mbox{R-C-NL} outperform the other estimators. One major exception is the AR case, where the tresholding algorithm R-TH dominates all other methods. Also, in the setting (I), for both $p=200$ and $p=400$, LS and R-A-LS recognize that shrinking maximally towards a multiple of the identity matrix is optimal, reaching a PRIAL of almost $100\%$ through all $\nu$. Although \mbox{R-NL} and \mbox{R-C-NL} are close, they cannot quite match this strong performance. For the cases (F) \ and (F)sig, when $p=200$, there is a performance drop of our methods compared to LS and NL for $\nu \geq 30$. However, the values of the methods again stay close. As one would expect, the PRIAL values of \mbox{R-NL} and \mbox{R-C-NL} are similar in the first row, where the true matrix~$\mathbf{H}$ has constant diagonal elements. Moreover, although NL is greatly improved upon with both \mbox{R-NL} and \mbox{R-C-NL} for small to moderate $\nu$, both converge to the performance of NL as the degrees of freedom increase. In the case (I)sig, where the diagonal elements are non-constant, \mbox{R-C-NL} attains a strong boost compared to \mbox{R-NL} and NL, such that it outperforms all other benchmarks by a considerable margin. The improvement is smaller, but consistent, for (A)sig \ for both $p$ and for (F)sig \ in the case $p=400$. We note that the consistently high performance of both methods through most settings is quite remarkable. In Appendix \ref{sec:further_results} further simulations with similar findings are presented. Given that other methods, such as that of \cite{zhang2016automatic}, perform quite well on balance, but can collapse in some cases, this consistent-throughout performance appears remarkable. It is also worth noting how well the linear shrinkage methods perform in the setting (I)sig\ with non-constant diagonal elements. This may seem counterintuitive, since the shrinkage target is a multiple of the identity matrix. Indeed, the diagonal elements of the linear shrinkage estimates are close to constant in these cases. However, at least for heavy-tails, the errors the sample matrix admits on the off-diagonal elements far outweigh the errors of constant diagonal elements. Additionally, LS, to which most other papers compare their methods, is extremely competitive with the robust methods (even if $\nu$ is very small). This is especially true for $\mathbf{H}$ with non-constant diagonal elements, which most of the previous papers do not consider. The good performance of LS might also be due to the relatively high sample size used in this paper compared to others. \begin{figure*} \caption{Percentage Relative Improvement in Average Loss (PRIAL) for various dispersion matrix structures, $\nu \in \{3,4,\dots,14,15,30,60,90,120,240,500,\infty\} \label{fig:SimN200} \end{figure*} \begin{figure*} \caption{Percentage Relative Improvement in Average Loss (PRIAL) for various dispersion matrix structures, $\nu \in \{3,4,\dots,14,15,30,60,90,120,240,500,\infty\} \label{fig:SimN400} \end{figure*} \section{Empirical Study} \label{sec:empirics} From the Center for Research in Security Prices (CRSP) we download daily simple percentage returns of the NYSE, AMEX, and NASDAQ stock exchanges, see \url{https://www.crsp.org/node/1/activetab\%3Ddocs} for a documentation. The historical data ranges from 02.01.1976 until 31.12.2020 and contains $p^* = 23'131$ stocks in total. The outline of the empirical section is inspired by \cite{Gian2019}. We conduct a rolling window type exercise, where we consider an estimation window of one year (252 days) and another one with five years (1260 days). In each rolling window we estimate the covariance matrix and perform a minimum variance portfolio optimization with no short selling limits. In the unconstrained case, the global minimum variance portfolio problem is formulated as $$\min\limits_{\tilde{w}} \tilde{w}^{\top} \boldsymbol{\hat{\Sigma}} \tilde{w}$$ $$\text{subject to} \quad \tilde{w}^{\top}\mathbbm{1} = 1~,$$ where $\mathbbm{1}$ denotes a conformable vector of ones. In the absence of any short-sales constraints the problem has the analytical solution $$w \defeq \frac{\boldsymbol{\hat{\Sigma}}^{-1}\mathbbm{1}}{\mathbbm{1}^{\top}\boldsymbol{\hat{\Sigma}}^{-1}\mathbbm{1}}~.$$ The resulting number of shares (rather than portfolio weights) are then kept fixed for the following 21~days (out-of-sample window). Afterwards the rolling window moves forward by 21~days, the covariance matrix is re-estimated and the weights are updated accordingly. In short, we rebalance the portfolio once a `month', and there are no transaction costs during the `month', where our definition of a `month' corresponds to 21 consecutive trading days rather than a calendar month. Depending on the size of the estimation window, the out-of-sample period starts on 14-Jan-1977 respectively 13-Jan-1981. This results in 528 respectively 480 out-of-sample months. In each month we only consider stocks which have no more than 32 days of missing values during the estimation window and a complete return in the out-of-sample window. The missing values in the remaining universe are set to $0$. Further, every month, only the $p$ stocks with the highest market capitalization are considered, where $p~\in~\{100,500,1000\}$. The solution of the minimum variance portfolio only depends on the second moment, that is, the covariance matrix. Therefore, as a portfolio evaluation criterion, the out-of-sample standard deviation is the leading criterion of interest. Hence, in the main text we consider the following two portfolio performance measures: \begin{itemize} \item SD: annualized standard deviation of portfolio returns. \item TO: average monthly turnover given by $$\text{TO} \defeq \frac{1}{(\tau-1)}\sum_{h=1}^{\tau-1}\sum_{j=1}^{p^{*}}\mid w_{j,h+1} - w_{j,h}^\text{hold}\mid~,$$ Here $p^*$ denotes the size of the `combined' investment universe over both months, $h$ and $h+1$; in general some stocks leave the universe, while the same number of new stocks enter the universe, as one advances from month $h$ to month $h+1$ such that $p^* \ge p$. Furthermore, $\tau$ denotes the number of out-of-sample months (528 and 480, respectively) and $$w_{j,h}^\text{hold} \defeq \frac{w_{j,h}\alpha_{j,h}}{\sum_{j=1}^{p^{*}}w_{j,h}\alpha_{j,h}}~,$$ with $$\alpha_{j,h} \defeq \prod_{s=0}^{20}(1+r_{j,t_h+s})$$ representing the return evolution in the days of month $h$. Note: if stock $j$ is not contained in the universe during month $h$, we set $w_{j,h} = 0$ and $r_{j,t_h+s} = 0 \ \forall \ s\in\{0,1,\dots,20\}$. \end{itemize} \noindent We consider the same competitors as in Section \ref{sec:mc}, with one exception: We remove the thresholding method ``R-TH'', since the matrix inverse necessary for the portfolio optimization cannot always be computed. We also add the (scaled) sample covariance matrix $\ensuremath{ \mathbf{\hat{S}} }$, which we denote with $S$. Additionally, in order to test whether the difference of the out-of-sample standard deviation between NL and \mbox{R-C-NL} is significantly different from zero, we apply the HAC inference of \cite{ledoit2011robust}. Table~\ref{table:Tab:Performance_CRSP} presents the main results; for additional results pertaining to alternative performance measures, see Appendix \ref{sec:further_results}. The findings are as follows: \begin{itemize} \item \mbox{R-C-NL} has the lowest SD in every scenario. \item \mbox{R-C-NL} has a significantly lower SD than NL in every scenario. \item \mbox{R-NL} has a lower SD than NL, except for $p=1000$, where they have a similar SD. \item The difference in SD between \mbox{R-NL} and \mbox{R-C-NL} increases as $p$ increases. \item For both $n=252$ and $n=1260$, the robust estimators have a lower SD than the non-robust estimators when $p$ is small. For large $p$ the non-robust estimators perform similarly or better than the robust estimators. This holds for linear shrinkage and nonlinear shrinkage estimators; an exception is R-GMV-LS in the case $n=1260$. \item For $p=500$ and $p=1000$, NL always outperforms the robust linear shrinkage estimators. \item For $p=100$, NL and the robust linear shrinkage estimators perform similarly. \item Except for the case $p=100$, \mbox{R-C-NL} has the lowest TO in every scenario. \item \mbox{R-NL} always has lower TO than NL. \end{itemize} We note that the improvement of \mbox{R-NL} over NL in terms of SD is relatively small, and indeed substantial improvements in SD only occur for \mbox{R-C-NL}. In other words, the strongest improvement appears to stem from using NL on the correlation matrix. However, NL is known to be a very strong benchmark in unconstrained PF optimization. Consequently, already the comparatively small gain of \mbox{R-NL} over NL leads to the lowest SD in five out of six considered scenarios. \begin{table*}[!ht] \footnotesize \caption{Out-of-sample portfolio statistics for the largest $p$ stocks on CRSP and an estimation window of $n=252$ and $n=1260$ days, respectively. Significant outperformance of the introduced R-C-NL estimator over NL in terms of SD is indicated by asterisks: **~indicates significance at the 0.05 level and ***~indicates significance at the 0.01 level.} \begin{adjustwidth}{-1.25cm}{} \begingroup \setlength{\tabcolsep}{6pt} \renewcommand{1.25}{1.25} \begin{tabular}{L{0.5cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}} \hline & S & LS & NL & R-LS & R-GMV-LS & R-A-LS & R-NL & R-C-NL \\ \midrule \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$n=252$}} \\ \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=100$}} \\ \hline SD & $13.41$ & $12.36$ & $11.91$ & $12.29$ & $11.91$ & $12.20$ & $11.86$ & $\textcolor{blue}{11.60}\text{***}$ \\ \hline TO & $2.23$ & $1.40$ & $1.02$ & $1.38$ & $\textcolor{blue}{0.79}$ & $1.25$ & $0.91$ & $0.86$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=500$}} \\ \hline SD & $-$ & $9.65$ & $9.10$ & $10.01$ & $9.41$ & $9.65$ & $9.08$ & $\textcolor{blue}{8.49}\text{***}$ \\ \hline TO & $-$ & $2.29$ & $1.16$ & $2.72$ & $1.59$ & $2.18$ & $1.04$ & $\textcolor{blue}{1.03}$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=1000$}} \\ \hline SD & $-$ & $8.33$ & $8.23$ & $8.38$ & $8.45$ & $8.34$ & $8.23$ & $\textcolor{blue}{7.04}\text{***}$ \\ \hline TO & $-$ & $1.59$ & $1.01$ & $1.76$ & $0.93$ & $1.56$ & $0.92$ & $\textcolor{blue}{0.90}$ \\ \midrule \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$n=1260$}} \\ \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=100$}} \\ \hline SD & $12.83$ & $12.78$ & $12.68$ & $12.67$ & $12.69$ & $12.68$ & $12.59$ & $\textcolor{blue}{12.55}\text{**}$ \\ \hline TO & $0.65$ & $0.60$ & $0.57$ & $0.54$ & $\textcolor{blue}{0.38}$ & $0.53$ & $0.49$ & $0.48$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=500$}} \\ \hline SD & $10.25$ & $9.82$ & $9.35$ & $9.72$ & $9.44$ & $9.69$ & $9.32$ & $\textcolor{blue}{9.12}\text{***}$ \\ \hline TO & $1.97$ & $1.53$ & $0.92$ & $1.45$ & $0.72$ & $1.41$ & $0.79$ & $\textcolor{blue}{0.67}$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=1000$}} \\ \hline SD & $12.55$ & $8.91$ & $8.04$ & $9.04$ & $8.12$ & $8.91$ & $8.05$ & $\textcolor{blue}{7.46}\text{***}$ \\ \hline TO & $6.56$ & $2.38$ & $0.94$ & $2.52$ & $0.84$ & $2.35$ & $0.80$ & $\textcolor{blue}{0.64}$ \\ \hline \end{tabular} \endgroup \label{table:Tab:Performance_CRSP} \end{adjustwidth} \end{table*} \normalsize \section{Conclusion} \label{sec:conclusion} This paper combines nonlinear shrinkage with Tyler's method, thereby creating a fast and stable algorithm to estimate the dispersion matrix in elliptical models; the resulting estimator is robust against both heavy tails and high dimensions. We developed the algorithm by separating calculation of the eigenvalues and the eigenvectors and showed that eigenvectors could be obtained by an iterative procedure. We also showed that the resulting \mbox{R-NL} estimator is still rotation-equivariant, although it no longer is contained in the Steinian class of rotation-equivariant estimators that keeps the vectors of the sample covariance matrix and only shrinks the sample eigenvalues. We also compared our approach to existing methods from the literature using both extensive simulations and an application to real data, showcasing its favorable performance. Last but not least, it turns out that a further performance boost can be obtained by using our method on scaled data, which basically amounts to separating the problem of estimating a covariance matrix into estimation of individual variances and estimation of the correlation matrix; the resulting estimator is called \mbox{R-C-NL}. \appendix \section{Further Empirical and Simulation Results} \label{sec:further_results} Figures \ref{fig:SimN100T150} and \ref{fig:SimN200T150} show the simulation results for $n=150$ and $p=100$ and $p=200$ respectively. Figure \ref{fig:SimN200T300_alpha} compares with the ``SRTy'' estimator of \cite{ourclosestcompetitor}, by using the NL shrinkage eigenvalues as target eigenvalues. That is, the target matrix of eigenvalues is given as $\boldsymbol{\Lambda}_0$ as in the main paper. The shrinkage strength $\beta$ is given by \[ \beta \defeq \frac{\alpha}{1+\alpha}, \] in \cite{ourclosestcompetitor}, so that $\beta=0$ corresponds to no shrinkage, whereas $\beta=1$ corresponds to maximal shrinkage (or setting $\alpha=\infty$). Interestingly, in all but the settings (F) and (F'), setting $\beta=1$, and thus maximally shrinking towards $\boldsymbol{\Lambda}_0$ appears beneficial. In fact, the performance in this settings of $\beta=1$ is about the same as \mbox{R-NL}. However, in the settings (F) and (F'), the situation is reversed and performance gets worse, the higher $\beta$ is chosen. With the additional updating step performed in \mbox{R-NL}, performance is about the same as choosing $\beta=0$, showing that \mbox{R-NL} can have a substantial benefit over using SRTy with $\beta=1$. \begin{figure*} \caption{Percentage Relative Improvement in Average Loss (PRIAL) for various dispersion matrix structures, $\nu \in \{3,4,\dots,14,15,30,60,90,120,240,500,\infty\} \label{fig:SimN100T150} \end{figure*} \begin{figure*} \caption{Percentage Relative Improvement in Average Loss (PRIAL) for various dispersion matrix structures, $\nu \in \{3,4,\dots,14,15,30,60,90,120,240,500,\infty\} \label{fig:SimN200T150} \end{figure*} \begin{figure*} \caption{Percentage Relative Improvement in Average Loss (PRIAL) for various dispersion matrix structures, $\nu \in \{3,4,\dots,14,15,30,60,90,120,240,500,\infty\} \label{fig:SimN200T300_alpha} \end{figure*} For our empirical application in Section 5 of the main paper, the following four additional performance measures are reported: \begin{itemize} \item AV: annualized average simple percentage portfolio return. \item TR: final cumulative simple percentage portfolio return. \item MD: percentage maximum drawdown given by $$\text{MD} \defeq \max_{\bar{t}\in (0,n)}\left(\max_{t\in (0,\bar{t})}\left[\frac{\tilde{R}_t-\tilde{R}_{\tilde{t}}}{\tilde{R}_t}\right]\right)~,$$ where $\tilde{R}_t$ is the cumulative simple percentage portfolio return at day $t$. \item IR: annualized information ratio given by $$IR \defeq \frac{AV}{SD}~.$$ \end{itemize} \begin{table*}[!ht] \footnotesize \caption{Additional out-of-sample portfolio statistics for the daily $p$ largest stocks on CRSP and an estimation window of $n=252$ and $n=1260$ days, respectively.} \begin{adjustwidth}{-1.25cm}{} \begingroup \setlength{\tabcolsep}{6pt} \renewcommand{1.25}{1.25} \begin{tabular}{L{0.5cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}C{1.8cm}} \hline & S & LS & NL & R-LS & R-GMV-LS & R-A-LS & R-NL & R-C-NL \\ \midrule \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$n=252$}} \\ \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=100$}} \\ \hline IR & $0.69$ & $0.80$ & $0.87$ & $0.81$ & $0.87$ & $0.82$ & $0.87$ & $0.91$ \\ \hline AV & $9.24$ & $9.94$ & $10.31$ & $9.97$ & $10.37$ & $9.97$ & $10.32$ & $10.55$ \\ \hline TR & $3,813.45$ & $5,548.60$ & $6,710.69$ & $5,641.21$ & $6,902.58$ & $5,677.55$ & $6,767.45$ & $7,607.62$ \\ \hline MD & $46.56$ & $35.20$ & $38.48$ & $37.94$ & $33.40$ & $37.05$ & $35.48$ & $33.26$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=500$}} \\ \hline IR & $-$ & $1.17$ & $1.21$ & $1.16$ & $1.20$ & $1.20$ & $1.22$ & $1.35$ \\ \hline AV & $-$ & $11.27$ & $11.01$ & $11.57$ & $11.26$ & $11.56$ & $11.07$ & $11.50$ \\ \hline TR & $-$ & $11,493.55$ & $10,447.08$ & $12,921.46$ & $11,521.84$ & $13,053.61$ & $10,776.80$ & $13,303.94$ \\ \hline MD & $-$ & $29.41$ & $29.24$ & $29.92$ & $30.33$ & $28.56$ & $30.61$ & $28.22$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=1000$}} \\ \hline IR & $-$ & $1.47$ & $1.44$ & $1.46$ & $1.42$ & $1.46$ & $1.43$ & $1.74$ \\ \hline AV & $-$ & $12.21$ & $11.87$ & $12.23$ & $12.04$ & $12.21$ & $11.76$ & $12.22$ \\ \hline TR & $-$ & $18,342.65$ & $15,877.34$ & $18,502.15$ & $16,942.34$ & $18,347.06$ & $15,080.18$ & $19,254.29$ \\ \hline MD & $-$ & $33.88$ & $34.39$ & $34.52$ & $34.82$ & $33.63$ & $35.19$ & $26.75$ \\ \midrule \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$n=1260$}} \\ \hline \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=100$}} \\ \hline IR & $0.93$ & $0.94$ & $0.94$ & $0.94$ & $0.95$ & $0.94$ & $0.93$ & $0.95$ \\ \hline AV & $11.95$ & $12.06$ & $11.97$ & $11.89$ & $12.01$ & $11.90$ & $11.74$ & $11.86$ \\ \hline TR & $8,428.17$ & $8,863.69$ & $8,585.57$ & $8,323.75$ & $8,709.89$ & $8,341.21$ & $7,862.18$ & $8,267.73$ \\ \hline MD & $39.08$ & $37.83$ & $38.08$ & $36.21$ & $34.00$ & $36.12$ & $35.31$ & $34.98$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=500$}} \\ \hline IR & $1.08$ & $1.14$ & $1.21$ & $1.16$ & $1.24$ & $1.17$ & $1.23$ & $1.30$ \\ \hline AV & $11.04$ & $11.23$ & $11.33$ & $11.23$ & $11.67$ & $11.30$ & $11.47$ & $11.82$ \\ \hline TR & $6,599.24$ & $7,256.96$ & $7,703.75$ & $7,271.52$ & $8,779.64$ & $7,487.65$ & $8,162.71$ & $9,462.95$ \\ \hline MD & $33.16$ & $31.64$ & $32.92$ & $30.85$ & $31.07$ & $30.79$ & $31.48$ & $31.56$ \\ \hline & \multicolumn{8}{c}{\rule[-2.5mm]{0mm}{8mm}\emph{$p=1000$}} \\ \hline IR & $0.93$ & $1.37$ & $1.52$ & $1.37$ & $1.53$ & $1.39$ & $1.53$ & $1.67$ \\ \hline AV & $11.71$ & $12.23$ & $12.26$ & $12.38$ & $12.46$ & $12.38$ & $12.29$ & $12.50$ \\ \hline TR & $7,799.17$ & $11,257.77$ & $11,731.10$ & $11,893.95$ & $12,676.34$ & $11,951.51$ & $11,857.04$ & $13,137.98$ \\ \hline MD & $37.38$ & $33.98$ & $32.18$ & $33.42$ & $32.07$ & $33.58$ & $32.28$ & $27.65$ \\ \hline \end{tabular} \endgroup \label{table:Tab:Performance_CRSP_appendix} \end{adjustwidth} \end{table*} \end{document}
\begin{document} \date{} \title{Variable dispersion beta regressions with parametric link functions } \author{ Diego Ramos Canterle\thanks{Bacharelado em Estatística and LACESM, Universidade Federal de Santa Maria, Santa Maria, RS, Brazil, e-mail: \texttt{[email protected]}} \and F\'abio Mariano Bayer\thanks{Departamento de Estatística and LACESM, Universidade Federal de Santa Maria, Santa Maria, RS, Brazil, e-mail: \texttt{[email protected]}} } \maketitle \begin{abstract} This paper presents a new class of regression models for continuous data restricted to the interval $(0,1)$, such as rates and proportions. The proposed class of models assumes a beta distribution for the variable of interest with regression structures for the mean and dispersion parameters. These structures consider covariates, unknown regression parameters, and parametric link functions. Link functions depend on parameters that model the relationship between the random component and the linear predictors. The symmetric and assymetric Aranda-Ordaz link functions are considered in details. Depending on the parameter values, these link functions refer to particular cases of fixed links such as logit and complementary log-log functions. Joint estimation of the regression and link function parameters is performed by maximum likelihood. Closed-form expressions for the score function and Fisher’s information matrix are presented. Aspects of large sample inferences are discussed, and some diagnostic measures are proposed. A Monte Carlo simulation study is used to evaluate the finite sample performance of point estimators. Finally, a practical application that employs real data is presented and discussed. \textbf{Keywords}: Aranda-Ordaz link function, maximum likelihood estimator, parametric link functions, variable dispersion beta regression. \textbf{Mathematics Subject Classification (2000)}: MSC 62J99, MSC 62-07. \end{abstract} \section{Introduction} The beta regression model introduced by \cite{Ferrari2004} has broad practicality for modeling variables belonging to the continuous interval $(0,1)$. In this model, it is assumed that the dependent variable $Y$ has a beta distribution, where the mean of $Y$ is modeled by a regression structure involving unknown parameters, covariates, and a link function. An extension of this model is the beta regression with varying dispersion, which has been discussed by \cite{Paolino2001}, \cite{smithson2006}, \cite{simas2010}, \cite{Pinheiro2011} and \cite{Bayer2015}. In this broader model, the dispersion parameter of $Y$ is modeled by a regression structure in the same way as the conditional mean. The manner in which the dispersion parameter is modeled has direct implications on the efficiency of the estimators of the mean regression structure parameters~\citep{Smyth1999, Bayer2015}. In addition to improving the inferences about the mean structure parameters, many applications are directly interested in modeling the dispersion to identify the sources of data variability~\citep{Smyth1999}. In the variable dispersion beta regression model, the relationship between the mean and dispersion parameters of the random component $Y$ and its linear predictors are established through link functions. In this model, considering the beta density parameterization with mean $\mu \in (0,1)$ and dispersion $\sigma \in (0,1)$, as in \cite{Souza2012} and \cite{Bayer2015}, it is possible to use link functions $g(\cdot)$, such that $g(x):(0,1) \rightarrow \mathbb R$. Typical fixed link functions in these cases include the logit, probit, log-log (loglog), complementary log-log (cloglog), and Cauchy functions \citep{Koenker2009}. The fact that the possible values of $\mu$ and $\sigma$ belong to the same standard unit interval $(0,1)$ means that these link functions can be considered for both the mean and the dispersion structure. In practice, in addition to the selection of important covariates in the mean and dispersion regression structures, as broadly discussed by \cite{Zhao2014} and \cite{Bayer2015}, the correct specification of the link functions deserves special attention. An incorrect specification of these functions may distort the inferences of the model parameters \citep[Pag. 401]{McCullagh1989} leading to misinterpretations and errors in the model predictions. To circumvent the problem of selecting an appropriate link function, a parametric link function can be considered \citep{Guerrero1982,Scallan1984,Stukel1988,Czado1994,Kaiser1997, Smith2003, Czado2006,Koenker2009, Adewale2010, Ramalho2011, Gomes2013, Dehbi2014, Taneichi2014, Geraci2015, Dehbi2016}. Such functions involve an unknown parameter that must be estimated. In general, depending on the value of this parameter, some known link functions arise as special cases. The link functions proposed by Aranda-Ordaz \citep{Ordaz1981} are the parametric type most widely used in cases where the parameters of interest lie in the interval $(0,1)$. Special cases of the Aranda-Ordaz link functions include the logit and cloglog functions. Some regression models with parametric link functions have been described in the literature. \cite{Guerrero1982} used a transformation of the Box-Cox link function in binary response models. \cite{Scallan1984} proposed generalized linear models (GLM) \citep{McCullagh1989} with general parametric link functions by presenting certain estimation aspects and identifying some special cases. \cite{Stukel1988} adjusted the binary response models to consider a two-parameter link function. \cite{Czado1994} developed a two-parameter link function that modifies the two tails of the function. \cite{Kaiser1997} considered the likelihood inferences of link function parameters in GLM. \cite{Czado2006} chose the link function in GLM using Bayes factors. \cite{Koenker2009} studied the selection of the link function in binary data using the parametric link functions of Gosset and Pregibon. Quantile regression with Aranda-Ordaz link function is considered by \cite{Dehbi2016}. According to \cite{Czado1997}, the maximum likelihood fit in GLM is improved by using parametric link functions in place of canonical link functions. Regarding the beta regression model, some problems associated with the correct specification of the link function have been investigated. \cite{Oliveira2013} evaluated the performance of the RESET test by checking the misspecification of the link function in the beta regression model, and \cite{Pereira2013} evaluated the RESET test in the inflated beta regression model. \cite{Andrade2007} generalized the seminal model proposed by \cite{Ferrari2004} by considering the Aranda-Ordaz link function for the regression structure of the mean; however, this approach still considered constant dispersion. Nevertheless, there is a lack of studies focusing on the specification of the link function in the dispersion submodel. Based on the above discussion, we propose a generalization of the variable dispersion beta regression model, considering parametric link functions for the structures of both $\mu$ and $\sigma$. The parametric estimators of the link functions for the mean and dispersion submodels are proposed together with other parameters for the regression structures. The estimation of these parameters is performed using maximum likelihood estimation. Diagnostic measures and tools for model selection are also proposed. This paper unfolds as follows. Section \ref{s:model} presents the beta regression model with parametric link functions. In Section \ref{s:emv}, we discuss all aspects of maximum likelihood estimation. Section \ref{s:diag} introduces some diagnostic measures to check the goodness-of-fit in the resulting model. Section \ref{S:AO} presents two special cases of the parametric link functions based on the symmetric and asymmetric Aranda-Ordaz \citep{Ordaz1981} families of link functions. The finite sample performance of the estimators is assessed in Section \ref{s:simu}. Section \ref{s:apli} presents and discusses an application to real data on religious disbelief. Our concluding remarks are given in Section \ref{s:conclu}. \section{The model} \label{s:model} The beta regression model proposed by \cite{Ferrari2004} considers a constant precision parameter $\phi$ throughout the observations. Nevertheless, by erroneously assuming a constant $\phi$, the losses in efficiency for the estimators can be substantial, as discussed by \cite{Bayer2015}. In beta regression with varying dispersion, the precision parameter is assumed to be variable throughout the observations and modeled by covariates, unknown parameters, and one link function, in the same way as the mean. In this work, as in that reported by \cite{Souza2012} and \cite{Bayer2015}, a beta density reparameterization is considered. Rather than focusing on the precision parameter $\phi$, a dispersion parameter $\sigma$ is considered. With such parameterization, the beta density is written as follows: \begin{align}\label{E:densidadenova} f(y;\mu,\sigma)&=\frac{\Gamma\left(\frac{1-\sigma^2}{\sigma^2}\right)}{\Gamma\left(\mu\left(\frac{1-\sigma^2}{\sigma^2}\right)\right)\Gamma\left((1-\mu)\left(\frac{1-\sigma^2}{\sigma^2}\right)\right)} y^{\mu\left(\frac{1-\sigma^2}{\sigma^2}\right)-1}(1-y)^{(1-\mu)\left(\frac{1-\sigma^2}{\sigma^2}\right)-1}, \end{align} where $0<\mu<1$, $0<\sigma<1$, and $\Gamma(u)=\int_0^\infty t^{u-1} e^{-t}\rm{d}t$ is the gamma function, for $u>0$. The two parameters indexing the density assume values in the standard unit interval $(0,1)$, which enables the same link function to be used in the two regression structures. The expectation and variance of $Y$ are given by $\Bbb{E}(Y)=\mu$ and ${\rm Var}(Y)=V(\mu)\sigma^2$, respectively, where $V(\mu)=\mu(1-\mu)$ is the variance function. However, the proposed model is still useful for response variable restricted to the double bounded interval $(a,b)$, where $a$ and $b$ are known scalars, $a<b$. In this case, we would model $(Y-a)/(b-a)$ instead of modeling $Y$ directly \citep{Ferrari2004,smithson2006,Zimprich2010}. Let $Y_1, \ldots, Y_n$ be independent random variables, where each $Y_t$, $t=1,\ldots, n$, has a density given by \eqref{E:densidadenova} with mean $\mu_t$ and dispersion $\sigma_t$. The variable dispersion beta regression model with parametric link functions is defined by \begin{align*} g_1(\mu_{t},\lambda_1)=\sum\limits_{i=1}^{r}x_{ti}\beta_{i}=\eta_{1t}, \\ g_2(\sigma_{t},\lambda_2)=\sum\limits_{j=1}^{s}z_{tj}\gamma_{j}=\eta_{2t}, \end{align*} where $\boldsymbol{\beta}=(\beta_{1}, \ldots ,\beta_{r})^{\top} \in \mathbb R^{r}$ and $ \boldsymbol{\gamma}=(\gamma_{1}, \ldots ,\gamma_{s})^{\top}\in \mathbb R^{s}$ are the vectors of unknown regression parameters ($r+s+2=q<n$), $\boldsymbol{x}^{\top}_{t}=(x_{t1}, \ldots, x_{tr})$ and $\boldsymbol{z}^{\top}_{t}=(z_{t1}, \ldots, z_{ts})$ represent the $t$th observations of the explanatory variables, which are assumed to be fixed and known, and $\eta_{1t}=\boldsymbol{x}^{\top}_{t}\boldsymbol{\beta}$ and $\eta_{2t}=\boldsymbol{z}^{\top}_{t}\boldsymbol{\gamma}$ are the linear predictors for the mean and dispersion, respectively. Finally, $g_1(\cdot,\cdot)$ and $g_2(\cdot,\cdot)$ are strictly monotonic in the first argument and twice differentiable in both arguments, such that $g_\delta :(0,1)\rightarrow\mathbb R$, for $\delta=1,2$. The second arguments of $g_\delta(\cdot,\cdot)$, $\lambda_1\in \Lambda_1$ and $\lambda_2\in \Lambda_2$, are the link function parameters. Further, note that \begin{align} \mu_t=g^{-1}_1(\eta_{1t},\lambda_1),\label{E:invmu}\\ \sigma_t=g^{-1}_2(\eta_{2t},\lambda_2).\label{E:invsigma} \end{align} The parameters $\lambda_1$ and $\lambda_2$ are shape parameters that generally influence the symmetry and heaviness of tails of the fitted curves for $\mu$ and $\sigma$ \citep{Stukel1988}. Unlike models that consider fixed link functions, the proposed model captures different relationships between the linear predictors $\eta_{\delta t}$, $\delta=1,2$, and their respective parameters $\mu_t$ and $\sigma_t$. Depending on the parametric value $\lambda$ for a given function $g(\cdot, \lambda )$, there is a particular family of link functions given by \begin{align*} \mathcal{G} = \left\lbrace g(\cdot,\lambda): \lambda \in \Lambda \right\rbrace. \end{align*} Different link function families can be considered. When the parameters of interest are in the continuous interval $(0,1)$, such as $\mu_t$ and $\sigma_t$ in the proposed model, possibilities include the symmetric and asymmetric link functions proposed by \cite{Ordaz1981}, Box-Cox transformation link function~\citep{Guerrero1982}, Gosset link function~\citep{Koenker2009}, Pregibon link function~\citep{Pregibon1980}, and generalized logit function considered by \cite{Ramalho2011}. In particular, the Pregibon link function has two parameters, and is not contextualized in this work. Gosset link function fails to consider the possible asymmetric relationship between the random component and the linear predictors. In this regard, and in addition to the overall results of any one-parametric links, this work presents results for the symmetric and asymmetric Aranda-Ordaz link functions. \section{Likelihood inference} \label{s:emv} The maximum likelihood estimation of the parametric vector $\boldsymbol{\theta}=(\boldsymbol{\beta}^\top \!, \boldsymbol{\gamma}^\top \!, \lambda_1, \lambda_2)^\top$ is given by maximizing the logarithm of the likelihood function. Given a sample size $n$ and considering the form of the density in \eqref{E:densidadenova}, the log-likelihood is given by \begin{align}\label{E:logvero} \ell(\boldsymbol{\theta})=\sum\limits_{t=1}^{n} \ell_{t}(\mu_{t},\sigma_{t}), \end{align} where \begin{align*} \ell_{t}(\mu_{t},\sigma_t)&=\log\Gamma\left(\frac{1-\sigma_t^2}{\sigma_t^2}\right)-\log\Gamma\left(\mu_{t}\frac{1-\sigma_t^2}{\sigma_t^2}\right)-\log \Gamma\left((1 - \mu_{t})\frac{1-\sigma_t^2}{\sigma_t^2}\right) \\ &+ \left(\mu_{t}\frac{1-\sigma_t^2}{\sigma_t^2}-1\right) \log y_{t}+\left((1-\mu_{t})\frac{1-\sigma_t^2}{\sigma_t^2}-1\right)\log(1-y_{t}), \end{align*} in which $\mu_t$ and $\sigma_t$ are given by the regression structures in \eqref{E:invmu} and \eqref{E:invsigma}, respectively. By deriving the log-likelihood function in \eqref{E:logvero} with respect to the parametric vector $\boldsymbol{\theta}$, we obtain the score vector $U(\boldsymbol{\theta})= \left( U_{\boldsymbol{\beta}}(\boldsymbol{\theta})^\top, U_{\boldsymbol{\gamma}}(\boldsymbol{\theta})^\top, U_{\lambda_1}(\boldsymbol{\theta}), U_{\lambda_2}(\boldsymbol{\theta}) \right)^\top $. Details of the analytical derivations are given in detail in the Appendix. The score function with respect to $\beta$ is given by \begin{align*} U_{\boldsymbol{\beta}}(\boldsymbol{\theta})= \boldsymbol{X}^{\top}\boldsymbol{\Sigma} \boldsymbol{T}(\boldsymbol{y}^*-\boldsymbol{\mu}^*), \end{align*} where $\boldsymbol{X}$ is the $n\times r$ matrix in which the $t$th row is $\boldsymbol{x}_t$, $\boldsymbol{\Sigma}\!=\!{\rm diag}\!\left(\!\frac{1-\sigma_1^2}{\sigma_1^2}, \ldots ,\! \frac{1-\sigma_n^2}{\sigma_n^2}\!\right)$, $\boldsymbol{T} = {\rm diag} \bigg( \left[\frac{\partial g_1(\mu_{1},\lambda_1)}{\partial\mu_1}\right]^{-1},$ $ \ldots , \left[\frac{\partial g_1(\mu_{n},\lambda_1)}{\partial\mu_n}\right]^{-1}\bigg)$, $\boldsymbol{y}^*=(y^*_1,\ldots,y^*_n)^{\top}$, $\boldsymbol{\mu}^*=(\mu^*_1,\ldots,\mu^*_n)^{\top}$, with $y_t^*=\log (y_t/(1-y_t))$, $\mu_t^*=\psi\left(\mu_t\frac{1-\sigma_t^2}{\sigma_t^2}\right)-\psi\left((1-\mu_t)\frac{1-\sigma_t^2}{\sigma_t^2}\right)$, and $\psi(\cdot)$ is the digamma function, i.e., $\psi(u)=\frac{d \log \Gamma(u)}{d u}$. The score function with respect to $\boldsymbol{\gamma}$ is given by \begin{align*} U_{\boldsymbol{\gamma}}(\boldsymbol{\theta})= \boldsymbol{Z}^{\top}\boldsymbol{H}\boldsymbol{a}, \end{align*} where $\boldsymbol{Z}$ is the $n\times s$ matrix whose $t$th row is $\boldsymbol{z}_t$, $\boldsymbol{H}= {\rm diag} \left( \left[\frac{\partial g_2(\sigma_{1},\lambda_2)}{\partial\sigma_1}\right]^{-1},\ldots , \left[\frac{\partial g_2(\sigma_{n},\lambda_2)}{\partial\sigma_n}\right]^{-1}\right)$, $\boldsymbol{a}=(a_1,\ldots,a_n)^{\top}$, with \begin{align*} a_t=-\dfrac{2}{\sigma^3_t}\bigg[\mu_t(y_t^*-\mu_t^*)+\psi\left(\frac{1-\sigma_t^2}{\sigma_t^2}\right) -\psi\left((1-\mu_t)\frac{1-\sigma_t^2}{\sigma_t^2}\right)+\log (1-y_t)\bigg]. \end{align*} The score functions with respect to $\lambda_1$ and $\lambda_2$ are given by \begin{align*} U_{\lambda_1}(\boldsymbol{\theta})&=\sum_{t=1}^{n}\frac{1-\sigma_t^2}{\sigma_t^2}(y^*_t-\mu^*_t)\rho_t,\\ U_{\lambda_2}(\boldsymbol{\theta})&=\sum_{t=1}^{n}a_t\varrho_t, \end{align*} respectively, where $\rho_t = \dfrac{\partial \mu_t}{\partial \lambda_1}$ depends on the parametric link function to be used in the mean submodel and $\varrho_t = \dfrac{\partial\sigma_t}{\partial \lambda_2}$ depends on the link function considered in the dispersion submodel. In Section \ref{S:AO}, the quantities $\rho_t = \dfrac{\partial \mu_t}{\partial \lambda_1}$ and $\varrho_t = \dfrac{\partial\sigma_t}{\partial \lambda_2}$ are presented for the symmetric and asymmetric Aranda-Ordaz link functions. The maximum likelihood estimators (MLEs) for the beta regression model with parametric link functions are obtained by solving the following nonlinear system: \begin{align}\label{E:vescore} \left\{ \begin{array}{ll} U_{\boldsymbol{\beta}}(\boldsymbol{\theta})&= 0 \\ U_{\boldsymbol{\gamma}}(\boldsymbol{\theta})&= 0 \\ U_{\lambda_1}(\boldsymbol{\theta})&= 0 \\ U_{\lambda_2}(\boldsymbol{\theta})&= 0 \end{array} \right. . \end{align} Solving Equation \eqref{E:vescore} requires the use of nonlinear optimization algorithms. In this work, the quasi-Newton BFGS method~\citep{press} was used for the computational implementations. Fisher's information matrix, which is useful for large sample inferences, requires the expectations of the second derivatives of the log-likelihood function. Details of the analytical derivation of these quantities are given in the Appendix. The joint information matrix for the parametric vector $\boldsymbol{\theta}$ is given by \begin{align}\label{E:MIF} \boldsymbol{K}=K(\boldsymbol{\theta})= \begin{pmatrix} K_{(\beta,\beta)} & K_{(\beta, \gamma)} & K_{(\beta,\lambda_1)} & K_{(\beta, \lambda_2)} \\ K_{(\gamma,\beta)} & K_{(\gamma, \gamma)} & K_{(\gamma,\lambda_1)} & K_{(\gamma, \lambda_2)} \\ K_{(\lambda_1,\beta)} & K_{(\lambda_1, \gamma)} & K_{(\lambda_1,\lambda_1)} & K_{(\lambda_1, \lambda_2)} \\ K_{(\lambda_2,\beta)} & K_{(\lambda_2, \gamma)} & K_{(\lambda_2,\lambda_1)} & K_{(\lambda_2, \lambda_2)} \end{pmatrix}, \end{align} where $K_{(\beta ,\beta)}= \boldsymbol{X}^\top \boldsymbol{\Sigma} \boldsymbol{W} \boldsymbol{X}$, $K_{(\beta,\gamma)}=K_{(\gamma ,\beta )}^{\top }=\boldsymbol{X}^{\top }\boldsymbol{C} \boldsymbol{T} \boldsymbol{H} \boldsymbol{Z}$, $K_{(\beta,\lambda_1)}=K_{(\lambda_1,\beta )}^{\top }=\boldsymbol{X}^{\top }\boldsymbol{V} \boldsymbol{T} \boldsymbol{\rho}$, $K_{(\beta,\lambda_2)}=K_{(\lambda_2,\beta )}^{\top }=\boldsymbol{X}^{\top }\boldsymbol{C} \boldsymbol{T} \boldsymbol{\varrho}$, $K_{(\gamma,\gamma)} = \boldsymbol{Z}^{\top}\boldsymbol{D}^*\boldsymbol{H}\boldsymbol{H}^{\top}\boldsymbol{Z}$, $K_{(\gamma,\lambda_1)} = K_{(\lambda_1,\gamma)}^{\top} = \boldsymbol{Z}^{\top}\boldsymbol{C}\boldsymbol{H}\boldsymbol{\rho}$, $K_{(\gamma,\lambda_2)} = K_{(\lambda_2,\gamma)}^{\top} = \boldsymbol{Z}^{\top}\boldsymbol{D}^*\boldsymbol{H}\boldsymbol{\varrho}$, $K_{(\lambda_1,\lambda_1)} = \boldsymbol{\rho}^{\top}\boldsymbol{V}\boldsymbol{\rho}$, $K_{(\lambda_1,\lambda_2)} = K_{(\lambda_2,\lambda_1)}^{\top} = \boldsymbol{\rho}^{\top}\boldsymbol{C}\boldsymbol{\varrho}$, and $K_{(\lambda_2,\lambda_2)} = \boldsymbol{\varrho}^{\top}\boldsymbol{D}^*\boldsymbol{\varrho}$, with $\boldsymbol{\rho}=(\rho_1,\ldots,\rho_n)^{\top}$, $\boldsymbol{\varrho}=(\varrho_1,\ldots,\varrho_n)^{\top}$, $\boldsymbol{W} = {\rm diag}(w_1,\ldots,w_n)$, $\boldsymbol{C} = {\rm diag}(c_1,\ldots,c_n)$, $\boldsymbol{V} = {\rm diag}(\nu_1,\ldots,\nu_n)$, and $\boldsymbol{D}^* = {\rm diag}(d^*_1,\ldots,$\\$d^*_n)$. Finally, \begin{align*} w_t &= \dfrac{1-\sigma_t^2}{\sigma_t^2} \left[ \psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) + \psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \right] \left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-2},\\ c_t &= \dfrac{1-\sigma^2_t}{\sigma^2_t}\dfrac{2}{\sigma_t^3}\bigg[ (1-\mu_t)\psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) -\mu_t\psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg],\\ \nu_t &= \left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right)^2 \bigg[ \psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) + \psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg],\\ d_t^* &= \dfrac{4}{\sigma^6_t}\bigg[\!-\!\psi'\left(\dfrac{1\!-\!\sigma_t^2}{\sigma_t^2}\right) + \mu_t^2\psi'\left(\mu_t\dfrac{1\!-\!\sigma_t^2}{\sigma_t^2}\right) + (1\!-\!\mu_t)^2\psi'\left((1\!-\!\mu_t)\dfrac{1\!-\!\sigma_t^2}{\sigma_t^2}\right)\bigg], \end{align*} where $\psi'(\cdot)$ is the trigamma function, i.e., $\psi'(u)=\frac{d \psi(u)}{d u}$, for $u>0$. According to the concept of orthogonality by \cite{Cox1987}, \eqref{E:MIF} can be used to ascertain that the model parameters are not orthogonal because the information matrix is not a diagonal block matrix. \subsection{Large sample inference} \label{S:IGA} Under the usual regularity conditions for MLE \citep{Pawitan2001}, the joint distribution of the MLEs is approximately $q$-multivariate normal when the sample size is large, i.e., \begin{align*} \left(\begin{array}{llll} \widehat{\boldsymbol{\beta}}\\ \widehat{\boldsymbol{\gamma}}\\ \widehat{\lambda}_1\\ \widehat{\lambda}_2 \end{array} \right ) \sim \mathcal{N}_{q} \left(\begin{array}{llll} \left(\begin{array}{llll} \boldsymbol{\beta} \\ \boldsymbol{\gamma} \\ \lambda_1\\ \lambda_2 \end{array} \right ), \boldsymbol{K}^{-1} \end{array} \right ) , \end{align*} where $\widehat{\boldsymbol{\beta}}$, $\widehat{\boldsymbol{\gamma}}$, $\widehat{\lambda}_1$, and $\widehat{\lambda}_2$ are the MLEs of $\boldsymbol{\beta}$, $\boldsymbol{\gamma}$, $\lambda_1$, and $\lambda_2$, respectively, and $\boldsymbol{K}^{-1}$ is the inverse Fisher's information matrix. The Wald confidence intervals model parameters $\theta_m$, $m=1,\ldots,q$, are defined by \citep{Pawitan2001,Ferrari2004}: \begin{align*} [\widehat{\theta}_m - \Phi^{-1}(1-\alpha/2)\widehat{{\rm se}}(\widehat{\theta}_m) ; \widehat{\theta}_m + \Phi^{-1}(1-\alpha/2)\widehat{{\rm se}}(\widehat{\theta}_m)], \end{align*} where $\widehat{\theta}_m$ represents the MLE of $\theta_m$, the standard error of $\widehat{\theta}_m$ is given by $\widehat{{\rm se}}(\widehat{\theta}_m)=[{\rm diag}({\rm \widehat{cov}}(\widehat{\boldsymbol{\theta}}))]_m^{1/2}$, in which ${\rm \widehat{cov}}(\widehat{\boldsymbol{\theta}}) = \boldsymbol{K}^{-1}(\widehat{\boldsymbol{\theta}})$ is the asymptotic variance and covariance matrix of $\widehat{\boldsymbol{\theta}}$, $\Phi^{-1}$ is the quantile function of the standard normal distribution, and $\alpha$ is the nominal level of the confidence interval. Similar to \cite{Ferrari2004}, for $\mu_t$ and $\sigma_t$, for $\delta=1,2$ respectively, we have the following confidence intervals: \begin{align*} [g_\delta^{-1}(\widehat{\eta}_{\delta t}-\Phi^{-1}(1-\alpha/2)\widehat{{\rm se}}(\widehat{\eta}_{\delta t}),\widehat{\lambda}_\delta); g_\delta^{-1}(\widehat{\eta}_{\delta t}+\Phi^{-1}(1-\alpha/2)\widehat{{\rm se}}(\widehat{\eta}_{\delta t}),\widehat{\lambda}_\delta)], \end{align*} where the standard errors of $\widehat{\eta}_{\delta t}$, for $\delta=1,2$, are estimated by $\widehat{{\rm se}}(\widehat{\eta}_{1t})=(x_t \widehat{{\rm cov}}(\widehat{\beta}) x_t^\top)^{1/2}$ and $\widehat{{\rm se}}(\widehat{\eta}_{2t})=(z_t \widehat{{\rm cov}}(\widehat{\gamma}) z_t^\top)^{1/2}$. To test the hypotheses on the parameters, we consider the null hypothesis $\mathcal{H}_0:\theta_m=\theta_{m}^0$ versus $\mathcal{H}_1:\theta_m \neq \theta_{m}^0$. The Wald test can be considered by using the following statistic \citep{Pawitan2001}: \begin{align*} z = \dfrac{\widehat{\theta}_m-\theta_{m}^0}{\widehat{{\rm se}}(\widehat{\theta}_m)}. \end{align*} Because the $z$ statistic has an asymptotically standard normal distribution under $\mathcal{H}_0$, the test is performed by comparing the calculated $z$ statistic with the usual quantiles of the standard normal distribution. For more general hypotheses, $\mathcal{H}_0:\boldsymbol{\theta}_I=\boldsymbol{\theta}_I^0$ versus $\mathcal{H}_1:\boldsymbol{\theta}_I\neq\boldsymbol{\theta}_I^0$, where $\boldsymbol{\theta}=(\boldsymbol{\theta}_I^\top,\boldsymbol{\theta}_N^\top)^\top$ has dimension $q$, $\boldsymbol{\theta}_I$ is the vector of parameters of interest with dimension $\iota$, and $\boldsymbol{\theta}_N$ is the vector of nuisance parameters with dimension $q-\iota$, four test statistics can be considered, namely: the likelihood ratio (LR)~\citep{Person1928}, Wald (W)~\citep{Wald1943}, score (S)~\citep{Rao1948}, and gradient (G)~\citep{Terrell2002}. Under $\mathcal{H}_0$ and the usual conditions of regularity, the four test statistics have the asymptotic chi-squared distribution with $\iota$ degrees of freedom ($\chi_{\iota}^{2}$), where $\iota$ is the number of restrictions imposed by the null hypothesis \citep{Vargas2014}. The test can be performed by comparing the calculated value of the statistic considered, i.e., LR, W, S, or G, with the usual quantile of $\chi_{\iota}^{2}$. \section{Diagnostics} \label{s:diag} After estimating the model, it is necessary to evaluate possible departures from the model assumptions, as well as the detection of unadjusted or aberrant points. This section introduces some diagnostic measures to determine the correct adjustment of the proposed model. Residuals are an important measure in checking for deviations from the unknown population model, disparate observations, and adjustment quality. Initially, the standardized ordinary residual is proposed. This is given by \begin{align*} r_t = \dfrac{y_t-\widehat{\mu}_t}{\sqrt{\widehat{\rm Var}(Y_t)}}, \end{align*} where ${\rm \widehat{Var}}(Y_t)=\widehat{\mu}_t(1-\widehat{\mu}_t)\widehat{\sigma}_t^2$. Additionally, the standardized weighted residual 2 can be used, as proposed by \cite{Ferrari2011} for the varying dispersion beta regression model. This is given by \begin{align*} r_t^{pp} = \dfrac{y_t^*-\widehat{\mu}_t^*}{\sqrt{{\rm \widehat{Var}}(y_t^*)(1-h_{tt})}}, \end{align*} where ${\rm \widehat{Var}}(y_t^*) = \psi'\left(\widehat{\mu}_t\frac{1-\widehat{\sigma}_t^2}{\widehat{\sigma}_t^2}\right)-\psi'\left((1-\widehat{\mu}_t)\frac{1-\widehat{\sigma}_t^2}{\widehat{\sigma}_t^2}\right)$, and $h_{tt}$ is the $t$th diagonal element of the `hat matrix' $\mathbf{H}=(\widehat{\boldsymbol{W}}\widehat{\boldsymbol{\Sigma}})^{1/2}\boldsymbol{X}(\boldsymbol{X}^\top \widehat{\boldsymbol{\Sigma}}\widehat{\boldsymbol{W}}\boldsymbol{X})^{-1}\boldsymbol{X}^\top (\widehat{\boldsymbol{\Sigma}}\widehat{\boldsymbol{W}})^{1/2}$. This residual provides an improved approximation of the standard normal distribution when the model is correctly adjusted and when a model with fixed links is considered \citep{espinheira2008b}. In prior simulations and analyses, the performance of the $r_t^{pp}$ residuals was found to be good in the proposed model considering parametric links. A residual chart is typically used to analyze the residuals against their respective indices. In this chart, the residuals are expected to be randomly distributed around zero, and no more than $5\%$ of the values can occur outside of the $[-2,2]$ interval. To verify that the distribution assumed for the dependent variable is adequate, we can examine half-normal plots with simulated envelopes by evaluating the quality of the fitted model \citep{Atkinson1981}. The simulated envelope can be built as follows \citep{Atkinson1985,Ferrari2004}: \begin{enumerate}[(i)] \item \label{i} fit the model and generate a simulated sample set of $n$ independent observations using the fitted model as if it were the true model; \item \label{ii} fit the model from the generated sample, calculate the absolute values of the residuals and arrange them in order; \item repeat steps \eqref{i} and \eqref{ii} $k$ times; \item consider the $n$ sets of the $k$ order statistics; for each set, calculate the quantile $\alpha/2$, the mean, and the quantile $1-\alpha/2$; \item plot these values and the ordered residuals of the original sample set against the $\Phi^{-1}((t+n+1/2)/(2n+10/8))$ scores. \end{enumerate} No more than $\alpha\times 100\%$ of the observations are expected to occur outside the envelope bands. A very large proportion of points lying outside the bands suggests that the model is inadequate. The overall influence measures of each observation under the estimates of the model parameters can be considered using Cook's distance \citep{Cook1977}. In this study, we use the Cook-like distance proposed by \cite{espinheira2008} for the beta regression model. This distance combines leverage measures and the model residuals, and is defined by \begin{align*} C_t = \dfrac{h_{tt}}{1-h_{tt}}(r_t^{pp})^2. \end{align*} To check for possible points of influence, it is common to produce a chart of $C_t$ against their respective $t$ indices. Candidate models can be selected using information criteria, such as the generalized Akaike information criterion (GAIC)~\citep{Akaike1983,Rigby2005}, which is given by \begin{align*} {\rm GAIC} = -2\ell(\widehat{\boldsymbol{\theta}})+\mathcal{P}q, \end{align*} where $\mathcal{P}$ can take different real values. Values of $\mathcal{P}=2$ and $\mathcal{P}={\rm log}(n)$, give the Akaike information criterion (AIC) \citep{Akaike1974} and the Schwarz information criterion (SIC) \citep{Schwarz1978}, respectively. These criteria take into account the maximized log-likelihood penalized by the number of parameters in the adjusted model. For the selection of competitive models, that with the lowest GAIC value should be chosen. To ascertain the correct model specification, the RESET tests \citep{Ramsey1969} are recommended. \cite{McCullagh1989} suggested using a RESET-type test in GLM, whereas \cite{Pereira2013} and \cite{Oliveira2013} argued they are suitable for the beta regressions. To run the RESET-type test for the proposed model, $\widehat{\boldsymbol{\eta}}_1^2$ should be added as a covariate in both the mean and dispersion submodels. This new model should be fitted with $\lambda_1$ and $\lambda_2$ fixed to their previously estimated values. The parameters of the artificial covariates $\widehat{\boldsymbol{\eta}}_1^2$ should then be tested according to the $\mathcal{H}_0:(\boldsymbol{\beta}_{r+1},\boldsymbol{\gamma}_{s+1})=(0,0)$ null hypothesis, where $\boldsymbol{\beta}_{r+1}$ and $\boldsymbol{\gamma}_{s+1}$ are the parameters pertaining to the artificial covariates in the mean and dispersion submodels, respectively. If $\mathcal{H}_0$ is not rejected, the model is specified correctly; otherwise, the model is specified incorrectly. To run the RESET-type test, any one of the four test statistics cited in Subsection \ref{S:IGA} can be used. We can use the LR, W, S, and G statistics to test the incorrect specification of some fixed link function. Considering the asymmetric Aranda-Ordaz link function, we can test $H_0: (\lambda_1, \lambda_2) = (1,1)$ to check whether the logit link function for mean and dispersion submodels is appropriate. If $\mathcal{H}_0$ is not rejected, the fixed logit links are correctly specified. As a global measure of the goodness-of-fit, we consider the generalized coefficient of determination \citep{Nagelkerke1991}. This is given by \begin{align*} R^2_{G} = 1 - \left( \dfrac{L_{null}}{L_{fit}} \right)^{(2/n)} = 1 - {\rm exp}\left( -\dfrac{2}{n}\left[ \ell(\widehat{\boldsymbol{\theta}})-\ell(0) \right] \right), \end{align*} where $\ell(0)$ is the maximized log-likelihood of the null model, i.e., under constant mean and dispersion\footnote{When constant mean and dispersion are considered, no regression structures are considered; thus, there are no estimates for $\lambda_\delta$.}, $\ell(\widehat{\boldsymbol{\theta}})$ is the maximized log-likelihood of the fitted model, $\ell(0)={\rm log}L_{null}$, and $\ell(\widehat{\boldsymbol{\theta}})={\rm log}L_{fit}$. $R^2_{G}$ measures the proportion of the variability of $Y$ that can be explained by the fitted model; this lies in the interval $[0,1]$. A higher value of $R^2_{G}$ implies that the model predictions are more accurate. \section{Aranda-Ordaz link functions - two particular cases} \label{S:AO} As mentioned earlier in this paper, the Aranda-Ordaz link function families \citep{Ordaz1981} can be used to relate the mean and dispersion parameters with their respective linear predictors. We considered these link functions because they are two one-parameter families of symmetric and asymmetric links that includes several well-known links as particular cases \citep{Dehbi2016}. They can be also considered in several works in a multitude of regression models \citep{Morgan1992, Colosimo2000, Smith2003, Adewale2010, Gomes2013, Dehbi2014, Taneichi2014, Geraci2015, Dehbi2016}. Because the two parameters $\mu$ and $\sigma$ of the proposed model assume values in the same interval $(0,1)$, the relationships established immediately below are valid for both of these parameters. The symmetric Aranda-Ordaz link function is given by: \begin{align*} \eta = {\rm g}(\mu,\lambda) = \frac{2 \left(\mu^{\lambda }-(1-\mu)^{\lambda }\right)}{\lambda \left(\mu^{\lambda }+(1-\mu)^{\lambda }\right)}, \end{align*} where $\lambda \neq 0$ and $\mu \in (0,1)$. The symmetry refers to the fact that ${\rm g}(\mu,\lambda) = -{\rm g}(1-\mu,\lambda)$ and ${\rm g}(\mu,\lambda) = {\rm g}(\mu,-\lambda)$ \citep{Dehbi2016}. This link function family reduces to the linear link function if $\lambda = 1$, to the logit if $\lambda \rightarrow 0$, close to the probit link if $\lambda = 0.39$, and close to the arc sine link function if $\lambda = 0.67$ \citep{Ordaz1981, Dehbi2016}. Figure~\ref{F:ao-sym} shows some different forms of the symmetric Aranda-Ordaz link function considering different values of the link function parameter $\lambda$. For this symmetric link function, the inverse function can be written as follows: \begin{align*} \mu = {\rm g}^{-1}(\eta,\lambda) = \frac{\left(\frac{\lambda \eta}{2}+1\right)^{\frac{1}{\lambda }}}{\left(1-\frac{\lambda \eta}{2}\right)^{\frac{1}{\lambda }}+\left(\frac{\lambda \eta}{2}+1\right)^{\frac{1}{\lambda }}}. \end{align*} \begin{figure} \caption{Aranda-Ordaz link functions for different values of $\lambda$.} \label{F:ao-sym} \label{F:ao-asym} \label{F:f1} \end{figure} In the general formulation of the proposed model presented in Section \ref{s:emv}, the score vector and Fisher's information matrix involve the quantities $\left(\frac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t}\right)^{-1}$, $\left(\frac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t}\right)^{-1}$, $\boldsymbol{\rho}$, and $\boldsymbol{\varrho}$, which depend on the considered parametric link functions. Considering the symmetric Aranda-Ordaz link function in both regression structures, we have: \begin{align*} &\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} = \frac{4 (\mu_t (1-\mu_t))^{\lambda_1 -1}}{\left(\mu_t^{\lambda_1 }+(1-\mu_t)^{\lambda_1 }\right)^2},\\ &\dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} = \frac{4 (\sigma_t (1-\sigma_t))^{\lambda_2 -1}}{\left(\sigma_t^{\lambda_2 }+(1-\sigma_t)^{\lambda_2 }\right)^2},\\ &\dfrac{\partial \mu_t}{\partial \eta_{1t}} =\frac{4 \left(4-\lambda_1^2 \eta_{1t}^2\right)^{\frac{1}{\lambda_1}-1}}{\left((2-\lambda_1 \eta_{1t})^{\frac{1}{\lambda_1}}+(\lambda_1 \eta_{1t}+2)^{\frac{1}{\lambda_1}}\right)^2},\\ &\dfrac{\partial \sigma_t}{\partial \eta_{2t}} =\frac{4 \left(4-\lambda_2^2 \eta_{2t}^2\right)^{\frac{1}{\lambda_2}-1}}{\left((2-\lambda_2 \eta_{2t})^{\frac{1}{\lambda_2}}+(\lambda_2 \eta_{2t}+2)^{\frac{1}{\lambda_2}}\right)^2},\\ &\rho_t =\dfrac{\partial\mu_t}{\partial\lambda_1} = \frac{2 \left(4-\lambda_1 ^2 \eta_{1t}^2\right)^{\frac{1}{\lambda_1 }-1} \left(\left(\lambda_1 ^2 \eta_{1t}^2-4\right) \tanh ^{-1}\left(\frac{\lambda_1 \eta_{1t}}{2}\right)+2 \lambda_1 \eta_{1t}\right)}{\lambda_1 ^2 \left((2-\lambda_1 \eta_{1t})^{\frac{1}{\lambda_1 }}+(\lambda_1 \eta_{1t}+2)^{\frac{1}{\lambda_1 }}\right)^2}, \end{align*} and \begin{align*} \varrho_t&= \dfrac{\partial\mu_t}{\partial\lambda_2} = \frac{2 \left(4-\lambda_2 ^2 \eta_{2t}^2\right)^{\frac{1}{\lambda_2 }-1} \left(\left(\lambda_2 ^2 \eta_{2t}^2-4\right) \tanh ^{-1}\left(\frac{\lambda_2 \eta_{2t}}{2}\right)+2 \lambda_2 \eta_{2t}\right)}{\lambda_2 ^2 \left((2-\lambda_2 \eta_{2t})^{\frac{1}{\lambda_2 }}+(\lambda_2 \eta_{2t}+2)^{\frac{1}{\lambda_2 }}\right)^2}. \end{align*} The asymmetric Aranda-Ordaz link function is given by~\citep{Ordaz1981}: \begin{align*} \eta = g(\mu,\lambda) = {\rm log} \left( \dfrac{(1-\mu)^{-\lambda}-1}{\lambda} \right) , \end{align*} where $\lambda > -1/e^{\eta}$, $\mu \in (0,1)$, and its inverse can be written as follows: \begin{align*} \mu = g^{-1}(\eta,\lambda) = 1-\left[ 1+\lambda {\rm exp}(\eta) \right]^{-\frac{1}{\lambda}}. \end{align*} The asymmetric Aranda-Ordaz function is more flexible than the symmetric version and it captures the possible asymmetry between the linear predictors and the parameters $\mu$ and $\sigma$. In Figure~\ref{F:ao-asym}, this relationship can be seen for different values of the parameter $\lambda$. The logit and cloglog link functions are special cases for $\lambda = 1$ and $\lambda \rightarrow 0$, respectively. Compared with the usual logit function, $\mu$ or $\sigma$ tends to 1 more quickly as $\eta_\delta$ increases when $\lambda<1$; and for $\lambda>1$, the parameters $\mu$ or $\sigma$ tends more slowly to 1 as $\eta_\delta$ increases. It is notable that a link function with a lower parameter value results in a greater variation in $\mu$ and/or $\sigma$ in relation to $\eta_\delta$. In contrast, very high values for the link function parameter might indicate that the parameters $\mu$ and/or $\sigma$ are not variable and should be estimated without independent variables, i.e., as constants. Considering the asymmetric Aranda-Ordaz link function the quantities needed for score vector and Fisher's information matrix are given by: \begin{align*} &\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} = \dfrac{\lambda_1(1-\mu_t)^{-(\lambda_1+1)}}{(1-\mu_t)^{-\lambda_1}-1},\\ &\dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} = \dfrac{\lambda_2(1-\sigma_t)^{-(\lambda_2+1)}}{(1-\sigma_t)^{-\lambda_2}-1},\\ &\dfrac{\partial \mu_t}{\partial \eta_{1t}} = {\rm exp}(\eta_{1t})(1+\lambda_1{\rm exp}(\eta_{1t}))^{\frac{-(1+\lambda_1)}{\lambda_1}},\\ &\dfrac{\partial \sigma_t}{\partial \eta_{2t}} = {\rm exp}(\eta_{2t})(1+\lambda_2{\rm exp}(\eta_{2t}))^{\frac{-(1+\lambda_2)}{\lambda_2}},\\ &\rho_t=\dfrac{\partial\mu_t}{\partial\lambda_1} = \dfrac{1}{\lambda_1}\left[ \dfrac{1}{({\rm exp}(-\eta_{1t})+\lambda_1)} - \dfrac{{\rm log}(1+\lambda_1{\rm exp}(\eta_{1t}))}{\lambda_1} \right] (1+\lambda_1 {\rm exp}(\eta_{1t}))^{-\frac{1}{\lambda_1}}, \end{align*} and \begin{align*} \varrho_t=\dfrac{\partial\sigma_t}{\partial\lambda_2} = \dfrac{1}{\lambda_2}\left[ \dfrac{1}{({\rm exp}(-\eta_{2t})+\lambda_2)} - \dfrac{{\rm log}(1+\lambda_2{\rm exp}(\eta_{2t}))}{\lambda_2} \right] (1+\lambda_2 {\rm exp}(\eta_{2t}))^{-\frac{1}{\lambda_2}}. \end{align*} From these quantities, we can obtain the score vector and Fisher's information matrix given in Section \ref{s:emv}. These quantities assume that $\mu$ depends on $\lambda_1$ and $\sigma$ depends on $\lambda_2$. \section{Numerical evaluation} \label{s:simu} To assess the finite sample performance of the point estimators, this section provides a numerical evaluation using Monte Carlo simulations. This assessment considers the mean, bias, relative bias (RB), standard deviation (SD), and mean squared error (MSE) of the point estimates. We used $R=50,000$ Monte Carlo replications in each scenario, and considered sample sizes of $n=100$ and $n=500$. For each Monte Carlo replication, $n$ instances of the random variable $Y_t$ were generated with the density function in \eqref{E:densidadenova}, where the mean and dispersion parameters are given by $\mu_t=g_1^{-1}(\eta_{1t},\lambda_1)$ and $\sigma_t=g_2^{-1}(\eta_{2t},\lambda_2)$, respectively. As discussed in Section~\ref{S:AO}, we considered two families of Aranda-Ordaz link functions, namely: symmetric and asymmetric. The values of $\boldsymbol{\beta}$, $\boldsymbol{\gamma}$, $\lambda_1$, and $\lambda_2$ are listed in Tables \ref{T:sym} and \ref{T:asym}, respectively, along with the numerical results. The covariates for the mean and dispersion submodels were generated from the uniform distribution $(0,1)$, and were considered to be constant for all Monte Carlo replications. Computational implementations were conducted using the {\tt R} language~\citep{R2012}. An {\tt R} function for fitting the proposed model with asymmetric Aranda-Ordaz link function, along with the diagnostic measures, is available at \url{http://www.ufsm.br/bayer/betareglink.zip}. \begin{table}[] \setlength{\tabcolsep}{1.2pt} \tablesize \caption{Monte Carlo simulation results of point estimation evaluation for symmetric Aranda-Ordaz link functions.} \label{T:sym} \begin{center} \begin{tabular}{lrrrrrrrr} \hline \multicolumn{9}{c}{Scenario 1}\\ \hline &$\beta_0$ & $\beta_1$& $\beta_2$& $\gamma_0$ & $\gamma_1$ & $\gamma_2$& $\lambda_1$ & $\lambda_2$ \\ \hline parameters & $ 1.500 $ & $ -1.000 $ & $ -1.500 $ & $ -1.700 $ & $ 1.000 $ & $ -2.000 $ & $ 0.500 $ & $ 0.500 $ \\ \hline \multicolumn{9}{c}{$n=100$}\\ \hline mean & $ 1.502 $ & $ -1.001 $ & $ -1.504 $ & $ -1.786 $ & $ 0.780 $ & $ -1.126 $ & $ 0.301 $ & $ 0.688 $ \\ bias & $ 0.002 $ & $ -0.001 $ & $ -0.004 $ & $ -0.086 $ & $ -0.220 $ & $ 0.874 $ & $ -0.199 $ & $ 0.188 $ \\ RB & $ 0.151 $ & $ 0.059 $ & $ 0.251 $ & $ 5.012 $ & $ -22.036 $ & $ -43.721 $ & $ -39.765 $ & $ 37.686 $ \\ SD & $ 0.102 $ & $ 0.185 $ & $ 0.105 $ & $ 0.190 $ & $ 0.331 $ & $ 0.553$ & $ 0.308 $ & $ 0.206 $ \\ MSE & $ 0.010 $ & $ 0.034 $ & $ 0.011 $ & $ 0.043 $ & $ 0.158 $ & $ 1.070 $ & $ 0.135 $ & $ 0.078 $ \\ \hline \multicolumn{9}{c}{$n=500$}\\ \hline mean & $ 1.508$ & $ -1.006 $ & $ -1.509 $ & $ -1.769 $ & $ 0.871 $ & $ -1.376 $ & $ 0.411 $ & $ 0.626 $ \\ bias & $ 0.008 $ & $ -0.006 $ & $ -0.009 $ & $ -0.069 $ & $ -0.129 $ & $ 0.626 $ & $ -0.089 $ & $ 0.126 $ \\ RB & $ 0.509 $ & $ 0.557 $ & $ 0.612 $ & $ 4.048 $ & $ -12.851 $ & $ -31.308 $ & $ -17.859 $ & $ 25.229 $ \\ SD & $ 0.018 $ & $ 0.017 $ & $ 0.022 $ & $ 0.146 $ & $ 0.291 $ & $ 0.623 $ & $ 0.161 $ & $ 0.219 $ \\ MSE & $ 0.000 $ & $ 0.000 $ & $ 0.001 $ & $ 0.026 $ & $ 0.101 $ & $ 0.780 $ & $ 0.034 $ & $ 0.064 $ \\ \hline \multicolumn{9}{c}{Scenario 2}\\ \hline &$\beta_0$ & $\beta_1$& $\beta_2$& $\gamma_0$ & $\gamma_1$ & $\gamma_2$& $\lambda_1$ & $\lambda_2$ \\ \hline parameters & $ 1.500 $ & $ -2.000 $ & $ 1.000 $ & $ -2.000 $ & $ 1.000 $ & $ -1.000 $ & $ 0.250 $ & $ 0.850$ \\ \hline \multicolumn{9}{c}{$n=100$}\\ \hline mean & $ 1.507 $ & $ -2.008 $ & $ 1.000 $ & $ -2.264 $ & $ 0.699 $ & $ -0.713 $ & $ 0.200 $ & $ 0.614 $ \\ bias & $ 0.008 $ & $ -0.008 $ & $ -0.000 $ & $ -0.264 $ & $ -0.301 $ & $ 0.287 $ & $ -0.050 $ & $ -0.237 $ \\ RB & $ 0.472 $ & $ 0.389 $ & $ -0.027 $ & $ 13.190 $ & $ -30.084 $ & $ -28.740 $ & $ -19.878 $ & $ -27.750 $ \\ SD & $ 0.053 $ & $ 0.058 $ & $ 0.048 $ & $ 0.325 $ & $ 0.494 $ & $ 0.552 $ & $ 0.125 $ & $ 0.235 $ \\ MSE & $ 0.003 $ & $ 0.003 $ & $ 0.002 $ & $ 0.175 $ & $ 0.334 $ & $ 0.387 $ & $ 0.018 $ & $ 0.111 $ \\ \hline \multicolumn{9}{c}{$n=500$}\\ \hline mean & $ 1.503 $ & $ -2.004 $ & $ 1.002 $ & $ -2.337 $ & $ 0.880 $ & $ -0.876 $ & $ 0.224 $ & $ 0.612 $ \\ bias & $ 0.003 $ & $ -0.004 $ & $ 0.002 $ & $ -0.337 $ & $ -0.120 $ & $ 0.124 $ & $ -0.025 $ & $ -0.238 $ \\ RB & $ 0.171 $ & $ 0.219 $ & $ 0.231 $ & $ 16.859 $ & $ -11.964 $ & $ -12.367 $ & $ -10.191 $ & $ -28.007 $ \\ SD & $ 0.015$ & $ 0.026 $ & $ 0.018$ & $ 0.268 $ & $ 0.496 $ & $ 0.630 $ & $ 0.079 $ & $ 0.214 $ \\ MSE & $ 0.000 $ & $ 0.001 $ & $ 0.000 $ & $ 0.185 $ & $ 0.261 $ & $ 0.412 $ & $ 0.007 $ & $ 0.102 $ \\ \hline \end{tabular} \end{center} \end{table} \begin{table}[] \setlength{\tabcolsep}{1.2pt} \tablesize \caption{Monte Carlo simulation results of point estimation evaluation for asymmetric Aranda-Ordaz link functions.} \label{T:asym} \begin{center} \begin{tabular}{lrrrrrrrr} \hline \multicolumn{9}{c}{Scenario 1}\\ \hline &$\beta_0$ & $\beta_1$& $\beta_2$& $\gamma_0$ & $\gamma_1$ & $\gamma_2$& $\lambda_1$ & $\lambda_2$ \\ \hline parameters & $ 1.000 $ & $ 6.000 $ & $ -4.000 $ & $ -1.000 $ & $ -5.000 $ & $ 3.000 $ & $ 5.000 $ & $ 10.000 $ \\ \hline \multicolumn{9}{c}{$n=100$}\\ \hline mean & $ 1.019 $ & $ 6.029 $ & $ -4.017 $ & $ 0.415 $ & $ -7.122 $ & $ 4.056 $ & $ 5.025 $ & $ 20.697 $ \\ bias & $ 0.019 $ & $ 0.029 $ & $ -0.017 $ & $ 1.415 $ & $ -2.122 $ & $ 1.056 $ & $ 0.025 $ & $ 10.697 $ \\ RB & $ 1.090 $ & $ 0.478 $ & $ 0.434 $ & $ -141.521 $ & $ 42.444 $ & $ 35.207 $ & $ 0.509 $ & $ 106.971 $ \\ SD & $ 0.159 $ & $ 0.415 $ & $ 0.258 $ & $ 5.459 $ & $ 7.111 $ & $ 3.392 $ & $ 0.423 $ & $ 36.193 $ \\ MSE & $ 0.0256 $ & $ 0.173 $ & $ 0.067 $ & $ 31.805 $ & $ 55.068 $ & $ 12.618 $ & $ 0.180 $ & $ 1424.330 $ \\ \hline \multicolumn{9}{c}{$n=500$}\\ \hline mean & $ 1.007 $ & $ 6.002 $ & $ -4.001 $ & $ -0.918 $ & $ -5.166 $ & $ 3.103 $ & $ 5.001 $ & $ 10.829 $ \\ bias & $ 0.001 $ & $ 0.002 $ & $ -0.001 $ & $ 0.082 $ & $ -0.166 $ & $ 0.103 $ & $ 0.001 $ & $ 0.829 $ \\ RB & $ 0.069 $ & $ 0.025 $ & $ 0.025 $ & $ -8.150 $ & $ 3.330 $ & $ 3.437 $ & $ 0.029 $ & $ 8.290 $ \\ SD & $ 0.046 $ & $ 0.108 $ & $ 0.072 $ & $ 0.299 $ & $ 0.442 $ & $ 0.279 $ & $ 0.112 $ & $ 2.632 $ \\ MSE & $ 0.002 $ & $ 0.012 $ & $ 0.005 $ & $ 0.096 $ & $ 0.223 $ & $ 0.089 $ & $ 0.012 $ & $ 7.616 $ \\ \hline \multicolumn{9}{c}{Scenario 2}\\ \hline &$\beta_0$ & $\beta_1$& $\beta_2$& $\gamma_0$ & $\gamma_1$ & $\gamma_2$& $\lambda_1$ & $\lambda_2$ \\ \hline parameters & $ 1.000 $ & $ 3.000 $ & $ -4.000 $ & $ -1.000 $ & $ -8.000 $ & $ 1.000 $ & $ 1.000 $ & $ 1.000 $ \\ \hline \multicolumn{9}{c}{$n=100$}\\ \hline mean & $ 1.000 $ & $ 3.000 $ & $ -4.000 $ & $ -0.863 $ & $ -8.301 $ & $ 1.032 $ & $ 1.000 $ & $ 2.449$ \\ bias & $ -0.000 $ & $ -0.000 $ & $ 0.000 $ & $ 0.137 $ & $ -0.301 $ & $ 0.032 $ & $ -0.000 $ & $ 1.449 $ \\ RB & $ -0.001 $ & $ -0.000 $ & $ -0.000 $ & $ -13.685 $ & $ 3.766 $ & $ 3.241 $ & $ -0.001 $ & $ 144.888 $ \\ SD & $ 0.002 $ & $ 0.003 $ & $ 0.003 $ & $ 0.336 $ & $ 0.491 $ & $ 0.295 $ & $ 0.002 $ & $ 2.931 $ \\ MSE & $ 0.000 $ & $ 0.000 $ & $ 0.000 $ & $ 0.136 $ & $ 0.332 $ & $ 0.088 $ & $ 0.000 $ & $ 10.692 $ \\ \hline \multicolumn{9}{c}{$n=500$ }\\ \hline mean & $ 1.000 $ & $ 3.000 $ & $ -4.000 $ & $ -0.976 $ & $ -8.059 $ & $ 1.010 $ & $ 1.000 $ & $ 1.270 $ \\ bias & $ -0.000 $ & $ -0.000 $ & $ 0.000 $ & $ 0.024 $ & $ -0.059 $ & $ 0.010 $ & $ -0.000 $ & $ 0.270 $ \\ RB & $ -0.001 $ & $ -0.000 $ & $ -0.000 $ & $ -2.425 $ & $ 0.736 $ & $ 0.962 $ & $ -0.001 $ & $ 26.986859 $ \\ SD & $ 0.001 $ & $ 0.001 $ & $ 0.001 $ & $ 0.127 $ & $ 0.185 $ & $ 0.120 $ & $ 0.001 $ & $ 1.016 $ \\ MSE & $ 0.000 $ & $ 0.000 $ & $ 0.000 $ & $ 0.017 $ & $ 0.038 $ & $ 0.015 $ & $ 0.000 $ & $ 1.105 $ \\ \hline \end{tabular} \end{center} \end{table} In general, according to Tables~\ref{T:sym} and \ref{T:asym}, the parameter estimates related to the mean submodel are not biased, unlike those for the dispersion submodel. This bias in the dispersion parameter estimators has been verified in other variations of the beta regression model, that consider fixed links \citep{Ospina2006,simas2010,Ospina2012}. Considering symmetric family of Aranda-Ordaz link function, Table~\ref{T:sym} shows that the estimators for the dispersion submodel parameters are more biased than when we consider the asymmetric family (Table~\ref{T:asym}). We also note that the estimator of $\lambda_2$ was biased even in moderate sample sizes. This results can be justified by numerical problems in the log-likelihood maximization. The symmetric Aranda-Ordaz link function is numerically more unstable than the asymmetric one, due to fact that it fails to be differentiable at some points for some values of $\lambda$ (cf. Figure~\ref{F:ao-sym}). For results about asymmetric family in Table~\ref{T:asym}, it can be observed that the bias in the dispersion structure estimators is concentrated at the intercept and for higher values of $\lambda_2$. The estimator of the link function parameter in the dispersion submodel also produced a considerable value of RB in small samples. For example, in Scenario 1, with $n=100$ and $\lambda_2=10$, ${\rm RB}=-141.521\%$ for the intercept of the dispersion submodel. As for $\lambda_2=2$, considering $n=100$, ${\rm RB}=106.971\%$ was observed for $\widehat{\lambda}_2$. This bias considerably decreases as the sample size increases; for $n=500$, the bias for the same estimators are reduced to $-8.150\%$ and $8.290\%$, respectively. In all cases, it is possible to verify that the MSE values tend fastly toward zero as the size of the sample increases, as was expected because of the consistency of the MLEs. The simulation results indicate that the MLE in the proposed model performs well. The bias in the dispersion submodel parameter estimators is in accordance with previews results \citep{Ospina2006, Andrade2007, simas2010, Ospina2012}. However, when the symmetric link was considered, the numerical maximization of the log-likelihood function presented some drawbacks. In addition, the asymmetric family of Aranda-Ordaz link function is more flexible than the symmetric version, because it considers the possible asymmetry between the random component and the linear predictors. This way, we suggest the asymmetric family to empirical applications. It is noteworthy that adequate link functions must be selected when using the usual models with fixed link functions (logit, probit, etc.) in actual data applications, in addition to the selection of the covariates. This model selection procedure can be time-consuming and inconclusive. When considering the proposed model, the selection of link functions is no longer a practical problem. Furthermore, the possible relationships between the parameters of interest, $\mu$ and $\sigma$, and their respective linear predictors, become more flexible. \section{Application}\label{s:apli} In this section, the proposed model is employed with actual data to demonstrate its practical applicability. For parametric link functions we choose the asymmetric Aranda-Ordaz family, because it is much more flexible than the symmetric function and its computational implementation is more stable. We considered the data used by \cite{Cribari2013} about religious belief in 124 countries. The proportion of nonbelievers in each country is the dependent variable, $Y$. The covariates considered are the average intelligence quotient of the population in each country ($IQ$), $IQ$ squared ($IQ^2$), a dummy variable that equals 1 if the percentage of Muslims is greater than $50\%$ and 0 otherwise ($MUSL$), the per capita income adjusted by the purchasing power parity in 2008 in thousands of dollars ($INCOME$), the logarithm of the ratio between the sum of imports and exports and the Gross Domestic Product in 2008 (${\rm log}OPEN$), and the interaction between $MUSL$ and $INCOME$ ($M\times I$). After some adjustments and diagnostic analyses, the model presented in Table~\ref{t:par} was selected. The RESET-type test considering the LR statistic suggests this model was correctly specified ($p\text{-value}=0.153$). It can also be verified that all covariates were significant at the nominal $10\%$ level. Comparatively, using the usual logit link function for the mean and dispersion, with the fitted model covariates given in Table~\ref{t:par}, the RESET-type test indicated that the model was not correctly specified ($p\text{-value}=0.008 $) at the usual nominal levels. We also tested the hypothesis $H_0: (\lambda_1, \lambda_2) = (1,1)$ by LR statistic. With $p\text{-value}=0.024$ we reject the hypothesis that the logit is the correct link function in both submodels. \begin{table}[t] \tablesize \caption{Fitted model for religious belief data.} \label{t:par} \begin{center} \begin{tabular}{lcccc} \hline & Estimate & Std. error & $z$ stat & $p\text{-value}$ \\ \hline \multicolumn{5}{c}{Mean submodel}\\ \hline Intercept & $25.183$ & $7.041$ & $3.576$ & $0.000$\\ $IQ$ & $-0.881$ & $0.190$ & $4.623$ & $0.000$\\ $IQ^2$ & $0.006$ & $0.001$ & $4.861$ & $0.000$\\ $INCOME$ & $0.029$ & $0.017$ & $1.690$ & $0.091$\\ $MUSL$ & $-0.761$ & $0.142$ & $5.354$ & $0.000$\\ ${\rm log}OPEN$ & $0.481$ & $0.162$ & $2.967$ & $0.003$\\ $\lambda_1$ & $9.255$ & $3.892$ & &\\ \hline \multicolumn{5}{c}{Dispersion submodel}\\ \hline Intercept & $-8.817$ & $1.354$ & $6.510$ & $0.000$\\ $IQ$ & $0.059$ & $0.011$ & $5.250$ & $0.000$\\ $MUSL$ & $-1.608$ & $0.256$ & $6.281$ & $0.000$\\ ${\rm log}OPEN$ & $0.548$ & $0.213$ & $2.580$ & $0.010$\\ $M\times I$ & $0.118$ & $0.036$ & $3.308$ & $0.001$\\ $\lambda_2$ & $0.853$ & $1.605$ & &\\ \hline \multicolumn{5}{c}{$R^2_{G}=0.841$}\\ \multicolumn{5}{c}{${\rm AIC}=-560.271$.}\\ \hline \end{tabular} \end{center} \end{table} \begin{figure*} \caption{Diagnostic charts.} \label{F:f2} \label{F:f2.2} \label{F:f2.3} \label{F:f2.4} \label{F:f2.5} \label{F:f2} \end{figure*} Figure \ref{F:f2} presents a diagnostic analysis of the fitted model. The residual analysis in Figures \ref{F:f2.2} and \ref{F:f2.4}, and the observed values ($y_t$) versus the predicted values ($\widehat{\mu}_t$) in Figure \ref{F:f2.3}, indicates that the model was correctly adjusted. The Cook-like distance shown in Figure \ref{F:f2.5}, highlights four observations ($C_t>0.5$), namely: 17, 77, 97, and 118, corresponding to Burkina Faso, Mozambique, Sierra Leone, and the United States of America (USA), respectively. In Burkina Faso and Sierra Leone, just $0.5\%$ of the population are atheists, which is the smallest percentage of nonbelievers. Mozambique and Sierra Leone present the smallest average $IQ$ among the considered countries. In addition, Mozambique has a large proportion of atheists compared to other countries with similar $IQ$. Finally, the USA has very high $IQ$ and $INCOME$ values, as well as small $OPEN$ values compared with countries that present a similar percentage of nonbelievers (just $10.5\%$). Although the influence measures described by \cite{Cribari2013} did not highlight the USA, the authors did discuss this atypical religious characteristic for a country with high $IQ$. Conclusions regarding the mean submodel parameter estimates (Table \ref{t:par}) corroborate those of \cite{Cribari2013}. The variables $IQ$ and $MUSL$ have a negative influence on the mean submodel, whereas $IQ^2$, $INCOME$ and ${\rm log}OPEN$ have a positive influence. In the dispersion submodel, the variable $MUSL$ has a negative influence, whereas the variables $IQ$, ${\rm log}OPEN$, and $M\times I$ have a positive influence. It is easy to see that the per capita income adjusted by the purchasing power parity ($INCOME$) is directly proportional to religious disbelief. To assess the impact of $IQ$ on the mean proportion of nonbelievers, the following measure of impact was considered \citep{Cribari2013}: \begin{align*} \dfrac{\partial \Bbb{E}(y_t)}{\partial IQ_t} = \dfrac{\partial g_1^{-1}(\eta_{1t},\lambda_1)}{\partial IQ_t} = \dfrac{\partial \mu_t}{\partial IQ_t} = \dfrac{\partial \mu_t}{\partial \eta_{1t}} \dfrac{\partial \eta_{1t}}{\partial IQ_t}. \end{align*} This average impact on the proportion of nonbelievers resulting from changes in the $IQ$ covariate when the other covariates remain constant. Figure \ref{F:f3.1} shows the impact of variations in $IQ$ on the average percentage of nonbelievers, with the other covariates set to their mean values. The impact is not constant and varies according to $IQ$. Up to $IQ=100$, the impact first increases before decreasing. Figure \ref{F:f3.2} shows the relationship between the estimated mean proportion of nonbelievers and intelligence. This chart suggests that higher values of $IQ$ are related to larger proportions of nonbelievers, with greater impact for $IQ$ values above 85. \begin{figure*} \caption{Relationship between religious disbelief and intelligence.} \label{F:f3} \label{F:f3.1} \label{F:f3.2} \end{figure*} In order to compare our proposed model adjusted for religious belief data with the model in \cite{Cribari2013}, we elected some goodness-of-fit measures. The generalized coefficient of determination ($R^2_{G}$), the maximized log-likelihood function ($\ell(\widehat{\boldsymbol{\theta}})$), the Akaike information criterion (AIC) and the mean square error (MSE) between the observed ($y$) and predicted ($\widehat{\mu}$) values of the two fitted models are in Table~\ref{t:compare}. We note that our proposed model outperforms the model with fixed link functions in all measures. In particular, regarding $R^2_{G}$, our fitted model explains the variability of $y$ about $8\%$ more than the model with fixed links. It is worth noting that the proposed model considers the dispersion parameter $\sigma$, unlike the model used by \cite{Cribari2013}, which considered the precision parameter $\phi$. Note that \cite{Cribari2013} selected the loglog link function for the mean and the log link function for the precision. \begin{table} \tablesize \caption{ A comparison between the proposed fitted model for religious belief data and the model in \cite{Cribari2013}. } \label{t:compare} \begin{center} \begin{tabular}{ccccc} \hline Model & $R^2_{G}$ & $\ell(\widehat{\boldsymbol{\theta}})$ & AIC & MSE($y$,$\widehat{\mu}$) \\ \hline Model with fixed links & \multirow{2}{*}{$0.760$} & \multirow{2}{*}{$267.489$} & \multirow{2}{*}{$-518.979$} & \multirow{2}{*}{$0.015$} \\ \citep{Cribari2013} & & & & \\ \hline Model with parametric links & \multirow{2}{*}{$0.841$} & \multirow{2}{*}{$293.135$} & \multirow{2}{*}{$-560.271$} & \multirow{2}{*}{$0.013$} \\ (proposed) & & & & \\ \hline \end{tabular} \end{center} \end{table} \section{Conclusion} \label{s:conclu} In this paper, we have proposed a beta regression model with parametric link functions, that is useful for modeling variables contained in the interval $(0,1)$, such as rates and proportions. The vector score and Fisher's information matrix were derived analytically, and aspects of large sample inference were presented. Diagnostic measures that allow researchers to identify influential points, outlier observations, or shortcomings of the fitted model were also proposed. A simulation study highlighted the accurate finite sample performance of the point estimators. An application to actual data was presented and discussed to demonstrate the practical usefulness of the proposed model. Moreover, the use of parametric link functions enables problems arising from the incorrect specification of link functions to be circumvented, thereby facilitating the construction of an adequate model. Finally, all of the evidence from this study suggests that the proposed model is both useful and adequate for modeling rate and proportion variables. \section*{Appendix}\label{S:apA} In this appendix we obtain the score function and the Fisher's information matrix for all parameters ($\boldsymbol{\beta}$,$\boldsymbol{\gamma}$,$\lambda_1$,$\lambda_2$). The elements of the score vector are given by: \begin{align*} U_{\beta_i}(\boldsymbol{\theta})=\frac{\partial\ell(\boldsymbol{\theta})}{\partial\beta_i}=&\sum\limits_{t=1}^{n}\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\frac{\partial\mu_t}{\partial\eta_{1t}}\frac{\partial\eta_{1t}}{\partial\beta_{i}},\\ U_{\gamma_j}(\boldsymbol{\theta})=\frac{\partial\ell(\boldsymbol{\theta})}{\partial\gamma_j}=&\sum_{t=1}^{n}\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}},\\ U_{\lambda_1}(\boldsymbol{\theta})=\frac{\partial\ell(\boldsymbol{\theta})}{\partial\lambda_1} =& \sum_{t=1}^{n}\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\lambda_1},\\ U_{\lambda_2}(\boldsymbol{\theta})=\frac{\partial\ell(\boldsymbol{\theta})}{\partial\lambda_2} =& \sum_{t=1}^{n}\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\lambda_2}, \end{align*} for $i=1,\ldots,r$ and $j=1, \ldots, s$, where $\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t} = \dfrac{1-\sigma^2_t}{\sigma^2_t}(y^*_t-\mu^*_t)$, $\dfrac{\partial\mu_t}{\partial\eta_{1t}} = \left[\dfrac{\partial g_1(\mu_{t},\lambda_1)}{\partial\mu_t}\right]^{-1}$, $\dfrac{\partial\eta_{1t}}{\partial\beta_{i}}=x_{ti}$, $\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}=a_t$, $\dfrac{\partial\sigma_t}{\partial\eta_{2t}} = \left[\dfrac{\partial g_2(\sigma_{t},\lambda_2)}{\partial\sigma_t}\right]^{-1}$ and $\dfrac{\partial\eta_{2t}}{\partial\gamma_{i}}=z_{tj}$. The second order derivatives of the log-likelihood function are given by: \begin{align*} \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\beta_p} &= \sum_{t=1}^{n}\dfrac{\partial}{\partial\mu_t} \left( \dfrac{\partial\ell_t(\mu_t,\sigma_t)}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\eta_{1t}} \right) \dfrac{\partial\mu_t}{\partial\eta_{1t}} \dfrac{\partial\eta_{1t}}{\partial\beta_p} \dfrac{\partial\eta_{1t}}{\partial\beta_i} \\ &= \sum_{t=1}^{n} \left( \dfrac{\partial_2\ell_t(\mu_t,\sigma_t)}{\partial\mu_t^2}\dfrac{\partial\mu_t}{\partial\eta_{1t}} + \dfrac{\partial\ell_t(\mu_t,\sigma_t)}{\partial\mu_t} \dfrac{\partial}{\partial\mu_t} \left(\dfrac{\partial\mu_t}{\partial\eta_{1t}}\right) \right) \\ &\times \left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} x_{ti}x_{tp}, \; p=1,\ldots,r,\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\gamma_j} &= \sum_{t=1}^{n} \left(\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}} \right)\frac{\partial\mu_t}{\partial\eta_{1t}} \frac{\partial\eta_{1t}}{\partial\beta_{i}} \\ &=\sum_{t=1}^{n} \left(\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}\left( \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{-1} z_{tj} \right) \left(\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} x_{ti},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_1} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_1} \left(\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\eta_{1t}} \right)\frac{\partial\eta_{1t}}{\partial\beta_{i}}\\ &=\sum_{t=1}^{n} \bigg[ \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t^2} \dfrac{\partial\mu_t}{\partial\lambda_1} \dfrac{\partial\mu_t}{\partial\eta_{1t}} + \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t} \dfrac{\partial}{\partial\lambda_1} \left( \dfrac{\partial\mu_t}{\partial\eta_{1t}} \right) \bigg] x_{ti},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_2} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_2} \left(\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\eta_{1t}} \right)\frac{\partial\eta_{1t}}{\partial\beta_{i}}\\ &=\sum_{t=1}^{n} \bigg[ \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t} \dfrac{\partial\sigma_t}{\partial\lambda_2} \dfrac{\partial\mu_t}{\partial\eta_{1t}} + \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t} \dfrac{\partial}{\partial\lambda_2} \left( \dfrac{\partial\mu_t}{\partial\eta_{1t}} \right) \bigg] x_{ti}\\ & = \sum_{t=1}^{n} \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t} \varrho_t \left(\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} x_{ti},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\gamma_l} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\gamma_l}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}} \right) \\ &= \sum_{t=1}^{n} \bigg( \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2} \dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}} \dfrac{\partial\sigma_t}{\partial\eta_{2t}} + \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t} \dfrac{\partial}{\partial\gamma_l} \left( \dfrac{\partial\sigma_t}{\partial\eta_{2t}} \right) \bigg)z_{tj},\\ l=1,\ldots,s,\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_1} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_1}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}} \right)\\ & = \sum_{t=1}^{n} \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t\partial\mu_t}\dfrac{\partial\mu_t}{\partial\lambda_1} \left( \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{-1} z_{tj},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_2} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_2}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\eta_{2t}}\dfrac{\partial\eta_{2t}}{\partial\gamma_{j}} \right)\\ &= \sum_{t=1}^{n} \bigg( \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2}\dfrac{\partial\sigma_t}{\partial\lambda_2} \dfrac{\partial\sigma_t}{\partial\eta_{2t}} + \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t} \dfrac{\partial}{\partial\lambda_2}\left( \dfrac{\partial\sigma_t}{\partial\eta_{2t}} \right) \bigg) z_{tj},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1^2} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_1}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\lambda_1}\right) \\ &= \sum_{t=1}^{n} \bigg( \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t^2}\dfrac{\partial\mu_t}{\partial\lambda_1} \dfrac{\partial\mu_t}{\partial\lambda_1} \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t} \dfrac{\partial^2\mu}{\partial\lambda_1^2} \bigg),\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1\partial\lambda_2} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_2}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t}\dfrac{\partial\mu_t}{\partial\lambda_1}\right) = \sum_{t=1}^{n} \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\lambda_2} \dfrac{\partial\mu_t}{\partial\lambda_1},\\ \dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_2^2} &= \sum_{t=1}^{n} \dfrac{\partial}{\partial\lambda_2}\left( \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\dfrac{\partial\sigma_t}{\partial\lambda_2}\right) \\ &= \sum_{t=1}^{n} \bigg( \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2}\dfrac{\partial\sigma_t}{\partial\lambda_2} \dfrac{\partial\sigma_t}{\partial\lambda_2} \dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t} \dfrac{\partial^2\sigma}{\partial\lambda_2^2} \bigg), \end{align*} where $\dfrac{\partial}{\partial\lambda_2} \left( \dfrac{\partial\mu_t}{\partial\eta_{1t}} \right)=0$, \begin{align*} \dfrac{\partial^2\ell_t(\mu_t,\sigma_t)}{\partial\mu_t^2} &= - \left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right)^2 \bigg[ \psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) + \psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg],\\ \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}&=-\dfrac{2}{\sigma_t^3}(y^*_t-\mu^*_t) - \dfrac{1-\sigma^2_t}{\sigma^2_t}\dfrac{2}{\sigma_t^3} \bigg[ (1-\mu_t)\psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \\ &-\mu_t\psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg],\\ \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2} &= -\dfrac{4}{\sigma^6_t}\bigg[-\psi'\left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) + \mu_t^2\psi'\left(\mu_t\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) + (1-\mu_t)^2\\ &\times\psi'\left((1-\mu_t)\dfrac{1-\sigma_t^2}{\sigma_t^2}\right)\bigg] +\dfrac{3}{\sigma_t}\dfrac{2}{\sigma_t^3} \bigg[ \mu_t(y_t^*-\mu_t^*)+\psi\left(\frac{1-\sigma_t^2}{\sigma_t^2}\right) \\ &-\psi\left((1-\mu_t)\frac{1-\sigma_t^2}{\sigma_t^2}\right)+\log (1-y_t) \bigg]. \end{align*} Taking the expected value of the second order derivatives given above, since $\Bbb{E}\left( \dfrac{\partial\ell_t(\mu_t,\sigma_t)}{\partial\mu_t} \right) = 0$, we have: \begin{align*} \Bbb{E}\left( \dfrac{\partial\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\beta_p} \right) &= \sum_{t=1}^{n} \Bbb{E} \Bigg[ \left( \dfrac{\partial_2\ell_t(\mu_t,\sigma_t)}{\partial\mu_t^2}\left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} \right) \left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1}\!\!\!\! x_{ti}x_{tp} \Bigg]\\ &= \sum_{t=1}^{n} \Bbb{E} \left[ \dfrac{\partial_2\ell_t(\mu_t,\sigma_t)}{\partial\mu_t^2}\left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-2} x_{ti}x_{tp} \right]\\ &= - \sum_{t=1}^{n} \Bbb{E} \Bigg[ \left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) \left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) \bigg[ \psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \\ &+ \psi'\left( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg]\left( \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-2} x_{ti}x_{tp} \Bigg]\\ &= - \sum_{t=1}^{n} \left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) w_t x_{ti} x_{tp}. \end{align*} Since \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}\right)&=- \dfrac{1-\sigma^2_t}{\sigma^2_t}\dfrac{2}{\sigma_t^3}\bigg[ (1-\mu_t)\psi'\bigg( (1-\mu_t) \dfrac{1-\sigma_t^2}{\sigma_t^2} \bigg)\\ &-\mu_t\psi'\left( \mu_t \dfrac{1-\sigma_t^2}{\sigma_t^2} \right) \bigg], \end{align*} we arrive at the conclusion that \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\gamma_j}\right)&=-\sum_{t=1}^{n}c_t \left( \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{-1} \left(\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} z_{tj} x_{ti}. \end{align*} In relation to $\beta_i$ and $\lambda_1$, we have: \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_1}\right) &= \sum_{t=1}^{n} \Bbb{E}\left( \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t^2} \dfrac{\partial\mu_t}{\partial\lambda_1} \dfrac{\partial\mu_t}{\partial\eta_{1t}} \right) = \sum_{t=1}^{n} \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t^2} \dfrac{\partial\mu_t}{\partial\lambda_1} \dfrac{\partial\mu_t}{\partial\eta_{1t}} \\ &= -\sum_{t=1}^{n} \nu_t \rho_t \left(\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} x_{ti}. \end{align*} The expected value of the second order derivative with respect to $\beta_i$ and $\lambda_2$ is given by: \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_2}\right)\! &= \sum_{t=1}^{n} \Bbb{E}\left(\!\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\mu_t\partial\sigma_t}\right)\! \varrho_t \!\left(\! \dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{\!\!-1} \!\!\!\!x_{ti} \\ &= \sum_{t=1}^{n} c_t \varrho_t \left(\dfrac{\partial g_1(\mu_t,\lambda_1)}{\partial\mu_t} \right)^{-1} x_{ti}. \end{align*} Since $\Bbb{E}\left(\dfrac{\partial\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t}\right)=0$, we have \begin{align*} &\Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\gamma_l}\right) = \sum_{t=1}^{n} \Bbb{E}\left(\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2}\right) \!\!\left( \frac{g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{\!\!-2}\!\!\!\! z_{tl} z_{tj}, \end{align*} where \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2}\right) &= -\dfrac{4}{\sigma^6_t}\bigg[-\psi'\left(\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) + \mu_t^2\psi'\left(\mu_t\dfrac{1-\sigma_t^2}{\sigma_t^2}\right) \\ &+ (1-\mu_t)^2\psi'\left((1-\mu_t)\dfrac{1-\sigma_t^2}{\sigma_t^2}\right)\bigg]. \end{align*} With respect to $\gamma_j$ and $\lambda_1$, we have: \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_1}\right) \! &= \sum_{t=1}^{n} \Bbb{E}\left(\! \dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t\partial\mu_t} \right) \! \rho_t \!\left(\! \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{\!\!-1} \!\!\!\! z_{tj} \\ &= - \sum_{t=1}^{n} c_t \rho_t \left( \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{-1} z_{tj}. \end{align*} For $\gamma_j$ and $\lambda_2$, we have: \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_2}\right)\! &= \sum_{t=1}^{n} \Bbb{E}\left( \!\dfrac{\partial^2\ell_{t}(\mu_{t},\sigma_{t})}{\partial\sigma_t^2} \right) \! \varrho_t \!\left( \!\dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{\!-1} \!\!\!\! z_{tj} \\ &= - \sum_{t=1}^{n} d_t^* \varrho_t \left( \dfrac{\partial g_2(\sigma_t,\lambda_2)}{\partial\sigma_t} \right)^{-1} z_{tj}. \end{align*} Finally, we have: \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1^2}\right) = -\sum_{t=1}^{n}\nu_t\rho_t\rho_t, \end{align*} \begin{align*} &\Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1\partial\lambda_2}\right)\! = \sum_{t=1}^{n} \Bbb{E}\left(\!\dfrac{\partial^2\ell_t(\mu_t\sigma_t)}{\partial\mu_t\partial\sigma_t}\right)\! \varrho_t\rho_t=\! -\!\sum_{t=1}^{n} c_t\varrho_t\rho_t, \end{align*} and \begin{align*} \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_2^2}\right)\! =\sum_{t=1}^{n} \Bbb{E}\left( \!\dfrac{\partial^2\ell_t(\mu_t,\sigma_t)}{\partial\sigma_t^2} \right) \! \varrho_t\varrho_t = \!-\!\sum_{t=1}^{n}d_t^*\varrho_t\varrho_t. \end{align*} In matrix form, we have: \begin{align*} \Bbb{E}\left( \dfrac{\partial\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\beta_p} \right) &= -\boldsymbol{X}^\top \boldsymbol{\Sigma} \boldsymbol{W}\boldsymbol{X},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\gamma_j}\right)&=-\boldsymbol{X}^\top \boldsymbol{C}\boldsymbol{T}\boldsymbol{H}\boldsymbol{Z},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_1}\right) &= -\boldsymbol{X}^\top \boldsymbol{V}\boldsymbol{T}\boldsymbol{\rho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\beta_i\partial\lambda_2}\right) &= -\boldsymbol{X}^\top \boldsymbol{C}\boldsymbol{T}\boldsymbol{\varrho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\gamma_l}\right) &= -\boldsymbol{Z}^\top \boldsymbol{D}^*\boldsymbol{H}\boldsymbol{H}^\top \boldsymbol{Z},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_1}\right) &= -\boldsymbol{Z}^\top \boldsymbol{C}\boldsymbol{H}\boldsymbol{\rho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\gamma_j\partial\lambda_2}\right) &= -\boldsymbol{Z}^\top \boldsymbol{D}^*\boldsymbol{H}\boldsymbol{\varrho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1^2}\right) &= -\boldsymbol{\rho}^\top \boldsymbol{V}\boldsymbol{\rho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_1\partial\lambda_2}\right) &= -\boldsymbol{\rho}^\top \boldsymbol{C}\boldsymbol{\varrho},\\ \Bbb{E}\left(\dfrac{\partial^2\ell(\boldsymbol{\theta})}{\partial\lambda_2^2}\right) &= -\boldsymbol{\varrho}^\top \boldsymbol{D}^*\boldsymbol{\varrho}. \end{align*} \end{document}
\begin{document} \title{Fast algorithm for border bases of Artinian Gorenstein algebras} \author{ Bernard Mourrain\thanks{UCA, Inria M\'editerran\'ee, \textsc{aromath}, Sophia Antipolis, France, \email{[email protected]}} } \date{April 25, 2017} \maketitle \begin{abstract} Given a multi-index sequence $\sigma$, we present a new efficient algorithm to compute generators of the linear recurrence relations between the terms of $\sigma$. We transform this problem into an algebraic one, by identifying multi-index sequences, multivariate formal power series and linear functionals on the ring of multivariate polynomials. In this setting, the recurrence relations are the elements of the kernel $I_{\sigma}$ of the Hankel operator $H_{\sigma}$ associated to $\sigma$. We describe the correspondence between multi-index sequences with a Hankel operator of finite rank and Artinian Gorenstein Algebras. We show how the algebraic structure of the Artinian Gorenstein algebra $\mathcal{A}_{\sigma}$ associated to the sequence $\sigma$ yields the structure of the terms $\sigma_{\alpha}$ for all $\alpha \in \mathbbm{N}^n$. This structure is explicitly given by a border basis of $\mathcal{A}_{\sigma}$, which is presented as a quotient of the polynomial ring $\mathbbm{K} [x_1, \ldots, x_n]$ by the kernel $I_{\sigma}$ of the Hankel operator $H_{\sigma}$. The algorithm provides generators of $I_{\sigma}$ constituting a border basis, pairwise orthogonal bases of $\mathcal{A}_{\sigma}$ and the tables of multiplication by the variables in these bases. It is an extension of Berlekamp-Massey-Sakata (BMS) algorithm, with improved complexity bounds. We present applications of the method to different problems such as the decomposition of functions into weighted sums of exponential functions, sparse interpolation, fast decoding of algebraic codes, computing the vanishing ideal of points, and tensor decomposition. Some benchmarks illustrate the practical behavior of the algorithm. \end{abstract} \section{Introduction} Discovering hidden structures from probing or sampling is a problem which appears in many contexts and in many applications. An interesting instance of this general problem is recovering the structure of a sequence of values, from the knowledge of some of its terms. It consists in guessing any term of the sequence from the first known terms. A classical way to tackle this problem, which goes back to Bernoulli, is to find linear recurrence relations between the first terms of a sequence, to compute the roots of the associated characteristic polynomial and to deduce the expression of any term of the sequence from these roots. In this paper, we consider the structure discovering problem for multi-index sequences $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$ of values in a field $\mathbbm{K}$. Given a finite set of values $\sigma_{\alpha}$ for $\alpha \in \tmmathbf{a} \subset \mathbbm{N}^n$, we want to guess a formula for the general terms of the sequence $\sigma$. An important step of this approach is to compute characteristic polynomials of the sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n}$. They correspond to multi-index {\tmem{recurrence relations}} with constant coefficients between the terms of $\sigma$. The ideal of these recurrence relation polynomials define an Artinian Gorenstein algebra. We present a fast algorithm to compute a border basis of this ideal from the first terms of the sequence $\sigma$. This method also yields a basis of the Artinian Gorenstein algebra as well as its multiplicative structure. \paragraph{Related works} The approach that we present is related to Prony's method in the univariate case and to its variants {\cite{swindlehurst_performance_1992}}, {\cite{roy_esprit-estimation_1989}}, {\cite{golub_separable_2003}}, {\cite{beylkin_approximation_2005}}, ... and to the more recent extensions in the multivariate case {\cite{andersson_nonlinear_2010}}, {\cite{potts_parameter_2013}}, {\cite{kunis_multivariate_2016}}, {\cite{sauer_pronys_2016-1}}. Linear algebra tools are used to determine a basis of the quotient algebra $\mathcal{A}_{\sigma}$ or to compute an $H$-basis for the presentation of $\mathcal{A}_{\sigma}$. An analysis of the complexity of these approaches yields bounds in $\mathcal{O} (\tilde{s}^3)$ (or $\mathcal{O} (\tilde{s}^{\omega})$), where $\tilde{s}$ is the size of the Hankel matrices involved in these methods, typically the number $\binom{d' + n}{n}$ of monomials in degree at most $d' < \frac{d}{2}$ if the terms of the sequence are known up to the degree $d$. The problem is also related to Pad{\'e} approximants, well investigated in the univariate case {\cite{brent_fast_1980}}, {\cite{beckermann_uniform_1994}}, {\cite{gathen_modern_2013}}, but much less developed in the multivariate case {\cite{power_finite_1982}}, {\cite{cuyt_how_1999}}. Finding recurrence relations is a problem which is also well developed in the univariate case. Berlekamp {\cite{berlekamp_nonbinary_1968}} and Massey {\cite{massey_shift-register_1969}} proposed an efficient algorithm to compute such recurrence relations, with a complexity in $\mathcal{O} (r^2)$ where $r$ is the size of the minimal recurrence relation. Exploiting further the properties of Hankel matrices, the complexity of computing recurrence relations can be reduced in the univariate case to $\tilde{\mathcal{O}}(r)$. Sakata extended Berlekamp-Massey algorithm to multi-index sequences, computing a Gr{\"o}bner basis of the polynomials in the kernel of a multi-index Hankel matrix {\cite{sakata_finding_1988}}. See also {\cite{saints_algebraic-geometric_1995}} for an analysis and overview of the algorithm. The computation of multivariate linear recurrence relations have been further investigated, e.g. in {\cite{fitzpatrick_finding_1990}} and more recently in {\cite{berthomieu_linear_2015}}, where the coefficients of the Gr{\"o}bner basis are computed by solving multi-index Hankel systems. \paragraph{Contributions} We translate the structure discovering problem into an algebraic setting, by identifying multi-index sequences of values, generating formal power series and linear functionals on the ring of polynomials. Through this identification, we associate to a multi-index sequence ${\sigma}$, a Hankel operator $H_{\sigma}$ which kernel $I_{\sigma}$ defines an Artinian Gorenstein Algebra $\mathcal{A}_{\sigma}$ when $H_{\sigma}$ is of finite rank. We present a new efficient algorithm to compute the algebraic structure of $\mathcal{A}_{\sigma}$, using the first terms $\sigma_{\alpha}$ for $\alpha \in \tmmathbf{a} \subset \mathbbm{N}^n$. The structure $\mathcal{A}_{\sigma}$ is described by a border basis of the ideal $I_{\sigma}$. This algorithm is an extension of the Berlekamp-Massey-Sakata (BMS) algorithm. It computes border bases of the recurrence relations, which are more general than Gr{\"o}bner bases. They also offer a better numerical stability \cite{mourrain_stable_2008} in the solving steps required to address the decomposition problem. The algorithm, based on a Gram-Schmidt orthogonalisation process, is simplified. The complexity bound also improves the previously known bounds for computing such recurrence relations. We show that the arithmetic complexity of computing a border basis is in $\mathcal{O} ((r + \delta) r s)$ where $r$ is the number of roots of $I_{\sigma}$ (counted with multiplicities), $\delta$ is the size of the border of the monomial basis and $s$ is the number of known terms of the sequence $\sigma$. The algorithm outputs generators of the recurrence relations, a monomial basis, an orthogonal basis and the tables of multiplication by the variables in this basis of $\mathcal{A}_{\sigma}$. The structure of the terms of the sequence $\sigma$ can be deduced from this output, by applying classical techniques for solving polynomial systems from tables of multiplication. We show how the algorithm can be applied to different problems such as the decomposition of functions into weighted sums of exponential functions, sparse interpolation, fast decoding of algebraic codes, vanishing ideal of points, and tensor decomposition. \paragraph{Notation} Let $\mathbbm{K}$ be a field, $\bar{\mathbbm{K}}$ its algebraic closure, $\mathbbm{K} [x_1, \ldots, x_n] =\mathbbm{K} [\tmmathbf{x}]$ be the ring of polynomials in the variables $x_1, \ldots, x_n$ with coefficients in the field $\mathbbm{K}$, $\mathbbm{K} [[y_1, $ $\ldots, y_n]] =\mathbbm{K} [\tmmathbf{y}]$ be the ring of formal power series in the variables $y_1, \ldots, y_n$ with coefficients in $\mathbbm{K}$. We denote by $\mathbbm{K}^{\mathbbm{N}^n}$ the set of sequences $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n}$ \ of numbers $\sigma_{\alpha} \in \mathbbm{K}$, indexed by $\mathbbm{N}^n$. $\forall \alpha = (\alpha_1, \ldots, \alpha_n) \in \mathbbm{N}^n,$ $\alpha ! = \prod_{i = 1}^n \alpha_i !$, $\tmmathbf{x}^{\alpha} = \prod_{i = 1}^n x_i^{\alpha_i} $. The monomials in $\mathbbm{K} [\tmmathbf{x}]$ are the elements of the form $\tmmathbf{x}^{\alpha}$ for $\alpha \in \mathbbm{N}^n$. For a set $B \subset \mathbbm{K} [\tmmathbf{x}]$, $B^+ = \cup_{i = 1}^n x_i B \cup B$, $\partial B = B^+ \setminus B$. A set $B$ of monomials of $\mathbbm{K} [\tmmathbf{x}]$ is connected to $1$, if $1 \in B$ and for $\tmmathbf{x}^{\beta} \in B$ different from $1$, there exists $\tmmathbf{x}^{\beta'} \in B$ and $i \in [1, n]$ such that $\tmmathbf{x}^{\beta} = x_i \tmmathbf{x}^{\beta'}$. For $F \subset \mathbbm{K} [\tmmathbf{x}]$, $\langle F \rangle$ is the vector space of $\mathbbm{K} [\tmmathbf{x}]$ spanned by $F$ and $(F)$ is the ideal generated by $F$. For $V,V'\subset \mathbbm{K} [\tmmathbf{x}]$, $V\cdot V'$ is the set of products of an element of $V$ by an element of $V'$. \section{ Polynomial-Exponential series} In this section, we recall the correspondence between sequences $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$ associated to polynomial-exponential series and Artinian Gorenstein Algebras. \subsection{Duality} A sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$ is naturally associated to a linear form operating on polynomials, that is, an element of $\tmop{Hom}_{\mathbbm{K}} (\mathbbm{K} [\tmmathbf{x}], \mathbbm{K}) = \ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}$, as follows: \[ p = \sum_{\alpha \in A \subset \mathbbm{N}^n} p_{\alpha} \tmmathbf{x}^{\alpha} \in \mathbbm{K} [\tmmathbf{x}] \mapsto \langle \sigma \mid p \rangle = \sum_{\alpha \in A \subset \mathbbm{N}^n} p_{\alpha} \sigma_{\alpha} . \] This correspondence is bijective since a linear form $\sigma \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}$ is uniquely defined by the sequence $\sigma_{\alpha} = \langle \sigma \mid \tmmathbf{x}^{\alpha} \rangle$ for $\alpha \in \mathbbm{N}^n$. The coefficients\tmtextbf{} $\sigma_{\alpha} = \langle \sigma \mid \tmmathbf{x}^{\alpha} \rangle$ for $\alpha \in \mathbbm{N}^n$ are also called the {\tmem{moments}} of $\sigma$. Hereafter, we will identify $\mathbbm{K}^{\mathbbm{N}^n}$ with $\ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast} = \tmop{Hom}_{\mathbbm{K}} (\mathbbm{K} [\tmmathbf{x}], \mathbbm{K})$. The dual space $\ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}$ has a natural structure of $\ensuremath{\mathbbm{K}[\tmmathbf{x}]}$-module, defined as follows: $\forall \sigma \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}, \forall p, q \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}$, \begin{eqnarray*} \langle p \star \sigma \mid q \rangle & = & \langle \sigma \mid p q \rangle . \end{eqnarray*} We check that $\forall \sigma \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}, \forall p \nocomma, q \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}$, $(p q) \star \sigma = p \star (q \star \sigma)$. See e.g. {\cite{emsalem_geometrie_1978}}, {\cite{mourrain_isolated_1996}} for more details. For any $\sigma \in \ensuremath{\mathbbm{K}[\tmmathbf{x}]}^{\ast}$, the inner product associated to $\sigma$ on $\mathbbm{K} [\tmmathbf{x}]$ is defined as follows: \begin{eqnarray*} \ensuremath{\mathbbm{K}[\tmmathbf{x}]} \times \ensuremath{\mathbbm{K}[\tmmathbf{x}]} & \rightarrow & \mathbbm{K}\\ (p, q) & \mapsto & \langle p, \nosymbol \nosymbol q \rangle_{\sigma} := \langle \sigma | \nobracket p \nosymbol \nosymbol q \rangle . \end{eqnarray*} Sequences in $\mathbbm{K}^{\mathbbm{N}^n}$ are also in correspondence with series in $\mathbbm{K} [[\tmmathbf{z}]]$, via the so-called $\tmmathbf{z}$-transform: \[ \sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n} \mapsto \sigma (\tmmathbf{z}) = \sum_{\alpha \in \mathbbm{N}^n} \sigma_{\alpha} \tmmathbf{z}^{\alpha} \in \mathbbm{K} [[\tmmathbf{z}]] . \] If $\mathbbm{K}$ is a field of characteristic $0$, we can identify the sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$with the series $\sigma (\tmmathbf{y}) = \sum_{\alpha \in \mathbbm{N}^n} \sigma_{\alpha} \frac{\tmmathbf{y}^{\alpha}}{\alpha !} \in \mathbbm{K} [[\tmmathbf{y}]] .$ Using this identification, we have \ $\forall p \in \mathbbm{K} [\tmmathbf{x}], \forall \sigma \in \mathbbm{K} [[\tmmathbf{y}]]$, $p \star \sigma (\tmmathbf{y} \nosymbol \nosymbol) = p (\partial_{y_1}, \ldots, \partial_{y_n}) (\sigma (\tmmathbf{y}))$. Through these identifications, the dual basis of the monomial basis $(\tmmathbf{x}^{\alpha})_{\alpha \in \mathbbm{N}^n}$ is $(\tmmathbf{z}^{\alpha})_{\alpha \in \mathbbm{N}^n}$ in $\mathbbm{K} [[\tmmathbf{z}]]$ and $\left( \frac{\tmmathbf{y}^{\alpha}}{\alpha !} \right)_{\alpha \in \mathbbm{N}^n}$ in $\mathbbm{K} [[\tmmathbf{y}]]$. Among the elements of $\tmop{Hom} (\mathbbm{K} [\tmmathbf{x}], \mathbbm{K})$, we have the evaluation $\tmmathbf{e}_{_{\xi}} : p (\tmmathbf{x}) \in \mathbbm{K} [\tmmathbf{x}] \mapsto p (\xi) \in \mathbbm{K}$ at a point $\xi \in \mathbbm{K}^n$, which corresponds to the sequence $(\xi^{\alpha})_{\alpha \in \mathbbm{N}^n}$ or to the series $\tmmathbf{e}_{_{\xi}} (\tmmathbf{z}) = \sum_{\alpha \in \mathbbm{N}^n} \xi^{\alpha} \tmmathbf{z}^{\alpha} = \hspace{0.25em} \frac{1 \hspace{0.25em}}{\prod_{i = 1}^n (1 - \xi_i z_i)} \in \mathbbm{K} [[\tmmathbf{z}]]$, or to the series $\tmmathbf{e}_{_{\xi}} (\tmmathbf{y}) = \sum_{\alpha \in \mathbbm{N}^n} \xi^{\alpha} \frac{\tmmathbf{y}^{\alpha}}{\alpha !} = \hspace{0.25em} e^{\xi_1 y_1 + \cdots + \xi_n y_n} = e^{\langle \xi, \mathbf{y} \rangle}$ in $\mathbbm{K} [[\tmmathbf{y}]] $. These series belong to the more general family $\mathcal{{POLYEXP}}$ of polynomial-exponential series $\sigma = \sum_{i = 1}^r \omega_i \tmmathbf{e}_{\xi_i} \in \mathbbm{K} [[\tmmathbf{y}]]$ with $\xi_i \in \mathbbm{K}^n, \omega_i \in \mathbbm{K} [\tmmathbf{y}]$. This set corresponds in $\mathbbm{K} [[\tmmathbf{z}]]$ to the set of series of the form \[ \sigma = \sum_{i = 1}^r \sum_{\alpha \in A_i} \frac{\omega_{i, \alpha} \tmmathbf{z}^{\alpha} \hspace{0.25em}}{\prod_{j = 1}^n (1 - \xi_{i, j} z_j)^{1 + \alpha_j}} \] with $\xi_i \in \mathbbm{K}^n, \omega_{i, \alpha} \in \mathbbm{K} \nocomma, \alpha \in A_i \subset \mathbbm{N}^n$ and $A_i$ finite. \begin{definition} For a subset $D \subset \mathbbm{K} [[\tmmathbf{y}]]$, the {\tmem{inverse system}} generated by $D$ is the vector space spanned by the elements $p \star \delta$ for $\delta \in D$, $p \in \mathbbm{K} [\tmmathbf{x}]$, that is, by the elements in $D$ and all their derivatives. \ For $\omega \in \mathbbm{K} [\tmmathbf{y}]$, we denote by ${\mu} (\omega)$ the dimension of the inverse system of $\omega$, generated by $\omega$ and all its derivatives. For $\sigma = \sum_{i = 1}^r \omega_i \tmmathbf{e}_{\xi_i} \in \mathcal{{POLYEXP}} (\tmmathbf{y})$, ${\mu} (\sigma) = \sum_{i = 1}^r {\mu} (\omega_i)$. \end{definition} \subsection{Hankel operators} The external product $\star$ allows us to define a Hankel operator as a multiplication operator by a dual element $\in \mathbbm{K} [\tmmathbf{x}]^{\ast}$: \begin{definition} The Hankel operator associated to an element $\sigma \in \mathbbm{K} [\tmmathbf{x}]^{\ast} =\mathbbm{K}^{\mathbbm{N}^n}$ is \begin{eqnarray*} H_{\sigma} : \mathbbm{K} [\tmmathbf{x}] & \rightarrow & \mathbbm{K} [\tmmathbf{x}]^{\ast}\\ p = \sum_{\beta \in B} p_{\beta} \tmmathbf{x}^{\beta} & \mapsto & p \star \sigma = \left( \sum_{\beta \in B} p_{\beta} \sigma_{\alpha + \beta} \right)_{\alpha \in \mathbbm{N}^n} . \end{eqnarray*} Its kernel is denoted $I_{\sigma}$. We say that the series $\sigma$ has finite rank $r \in \mathbbm{N}$ if $\tmop{rank} H_{\sigma} = r < \infty$. \end{definition} As $\forall p, q \in \mathbbm{K} [\tmmathbf{x}]$, \ $p q \star \sigma = p \star (q \star \sigma)$, we easily check that $I_{\sigma} = \ker H_{\sigma}$ is an ideal of $\mathbbm{K} [\tmmathbf{x} \mathbf{}]$ and that $\mathcal{A}_{\sigma} =\mathbbm{K} [\tmmathbf{x}] / I_{\sigma}$ is an algebra. Given a sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$, the kernel of $H_{\sigma}$ is the set of polynomials $p = \sum_{\beta \in B} p_{\beta} \tmmathbf{x}^{\beta}$ such that $\sum_{\beta \in B} p_{\beta} \sigma_{\alpha + \beta}=0$ for all $\alpha \in \mathbbm{N}^n$. This kernel is the set of {\tmem{linear recurrence relations}} of the sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n}$. \begin{remark} The matrix of the operator $H_{\sigma}$ in the bases $(\tmmathbf{x}^{\alpha})_{\alpha \in \mathbbm{N}^n}$ and its dual basis $(\tmmathbf{z}^{\alpha})_{\alpha \in \mathbbm{N}^n}$ is \begin{eqnarray*} {}[H_{\sigma}] & = & (\sigma_{\alpha + \beta})_{\alpha, \beta \in \mathbbm{N}^n} = (\langle \sigma | \nobracket \tmmathbf{x}^{\alpha + \beta}_{\nocomma} \rangle)_{\alpha, \beta \in \mathbbm{N}^n} . \end{eqnarray*} \end{remark} The coefficients of $[H_{\sigma}]$ depend only the sum of the multi-indices indexing the rows and columns, which explains why it is called a {\tmem{Hankel}} operator. \ In the reconstruction problem, we are dealing with truncated series with known coefficients $\sigma_{\alpha}$ for $\alpha$ in a subset $\tmmathbf{a}$ of $\mathbbm{N}^n$. This leads to the definition of truncated Hankel operators. \begin{definition} For \ two vector spaces $V, V' \subset \mathbbm{K} [\tmmathbf{x}]$ and \ $\sigma \in \langle V \cdot V' \rangle^{\ast} \subset \mathbbm{K} [\tmmathbf{x}]^{\ast}$, the {\tmem{truncated Hankel operator}} on $(V, V')$, denoted by $H_{\sigma}^{V, V'}$, is the following map: \begin{eqnarray*} H^{V, V'}_{\sigma} : V & \rightarrow & V'^{\ast} = \tmop{Hom}_{\mathbbm{K}} (V', \mathbbm{K})\\ p (\tmmathbf{x}) & \mapsto & p \star \sigma_{| V' \nobracket} . \end{eqnarray*} \end{definition} If $B = \{ b_1, \ldots, b_r \}$ (resp. $B' = \{ b_1', \ldots \nocomma, b_r' \}$) is a basis of $V$ (resp. $V'$), then the matrix of the operator $H_{\sigma}^{V, V'}$ in $B$ and the dual basis of $B'$ is \[ [H_{\sigma}^{B, B'}] = (\langle \sigma | \nobracket b_j b_i' \rangle)_{1 \leqslant i, j \leqslant r} . \] If $B$ and $B'$ are monomial sets, we obtain the so-called {\tmem{truncated moment matrix}} of $\sigma$: \begin{eqnarray*} {}[H_{\sigma}^{B, B'}] & = & (\sigma_{\beta + \beta'})_{\beta' \in B', \beta \in B} \end{eqnarray*} (identifying a monomial $\tmmathbf{x}^{\beta}$ with its exponent $\beta$). These structured matrices share with the classical univariate Hankel matrices many interesting properties (see e.g. in {\cite{mourrain_multivariate_2000}}). \subsection{Artinian Gorenstein algebra} A $\mathbbm{K}$-algebra $\mathcal{A}$ is {\tmem{Artinian}} if $\dim_{\mathbbm{K}} (\mathcal{A}) < \infty$. It can be represented as the quotient $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$ of a polynomial ring $\mathbbm{K} [\tmmathbf{x}]$ by a (zero-dimension) ideal $I \subset \mathbbm{K} [\tmmathbf{x}]$. A classical result states that the quotient algebra $\mathcal{A} =\mathbbm{K} [\tmmathbf{x}] / I$ is finite dimensional, i.e. Artinian, iff $\mathcal{V}_{\bar{\mathbbm{K}}} (I)$ is finite, that is, $I$ defines a finite number of (isolated) points in $\bar{\mathbbm{K}}^n$ (see e.g. {\cite{cox_ideals_2015}}[Theorem 6] or {\cite{elkadi_introduction_2007}}[Theorem 4.3]). The dual $\mathcal{A}^{\ast} = \tmop{Hom}_{\mathbbm{K}} (\mathcal{A}, \mathbbm{K})$ of $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$ is naturally identified with the sub-space \[ I^{\bot} = \{ \sigma \in \mathbbm{K} [\tmmathbf{x}]^{\ast} \mid \forall p \in I, \langle \sigma | \nobracket p \rangle = 0 \} . \] A {\tmem{Gorenstein}} algebra is defined as follows: \begin{definition} A $\mathbbm{K}$-algebra $\mathcal{A}$ is Gorenstein if $\exists \sigma \in \mathcal{A}^{\ast} = \tmop{Hom}_{\mathbbm{K}} (\mathcal{A}, \mathbbm{K})$ such that $\forall \rho \in \mathcal{A}^{\ast}, \exists a \in \mathcal{A}$ with $\rho = a \star \sigma$ and $a \star \sigma = 0$ implies $a = 0$. \end{definition} In other words, $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$ is Gorenstein iff $\mathcal{A}^{\ast} = \{ p \star \sigma \mid p \in \mathbbm{K} [\tmmathbf{x}] \} = \tmop{im} H_{\sigma}$ and $p \star \sigma = 0$ implies $p \in I$. Equivalently, $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$ is Gorenstein iff there exists $\sigma \in \mathbbm{K} [\tmmathbf{x}]^{\ast}$ such that we have the exact sequence: \begin{equation} 0 \rightarrow I \rightarrow \mathbbm{K} [\tmmathbf{x}] \xrightarrow{H_{\sigma}} \mathcal{A}^{\ast} \rightarrow 0 \label{eq:seq} \end{equation} so that $H_{\sigma}$ induces an isomorphism between $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$ and $\mathcal{A}^{\ast}$. In other words, a Gorenstein algebra $\mathcal{A}$ is the quotient of a polynomial ring by the kernel of a Hankel operator, or equivalently by an ideal of recurrence relations of a multi-index sequence. An Artinian Gorenstein can thus be described by an element $\sigma \in \mathbbm{K} [\tmmathbf{x}]^{\ast}$, such that $\tmop{rank} H_{\sigma} = \dim \mathcal{A}^{\ast} = \dim \mathcal{A}$ is finite. In the following, we will assume that the Artinian Gorenstein algebra is given by such an element $\sigma \in \mathbbm{K} [\tmmathbf{x}]^{\ast} \equiv \mathbbm{K}^{\mathbbm{N}^n}$. The corresponding algebra will be $\mathcal{A}_{\sigma} =\mathbbm{K} [\tmmathbf{x}] / I_{\sigma}$ where $I_{\sigma} = \ker H_{\sigma}$. By a multivariate generalization of Kronecker's theorem {\cite{mourrain_polynomial-exponential_2016}}[Theorem 3.1], the sequences $\sigma$ such that $\tmop{rank} H_{\sigma} = r < \infty$ are the polynomial-exponential series $\sigma \in \mathcal{{POLYEXP}}$ with ${\mu} (\sigma) = r$. The aim of the method we are presenting, is to compute the structure of the Artinian Gorenstein algebra $\mathcal{A}_{\sigma}$ from the first terms of the sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n}$. We are going to determine bases of $\mathcal{A}_{\sigma}$ and generators of the ideal $I_{\sigma}$, from which we can deduce directly the multiplicative structure of $\mathcal{A}_{\sigma}$. The following lemma gives a simple way to test the linear independency in $\mathcal{A}_{\sigma}$ using truncated Hankel matrices (see {\cite{mourrain_polynomial-exponential_2016}}[Lemma 3.3]): \begin{lemma} \label{lem:basis} Let $\sigma\in \mathbbm{K} [\tmmathbf{x}]^{\ast}$, $B = \{ b_1, \ldots, b_r \}$, $B' = \{ b_1',\ldots,$ $ b_r' \} \subset \mathbbm{K} [\tmmathbf{x}]$. If the matrix $H_{\sigma}^{B, B'} = (\langle \sigma | \nobracket b_i b_j' \rangle)_{\in B, \beta' \in B'}$ is invertible, then $B$ (resp. $B'$) is linearly independent in $\mathcal{A}_{\sigma}$. \end{lemma} This lemma implies that if $\dim \mathcal{A}_{\sigma} = r < + \infty$, $| B | = | B' | = r = \dim \mathcal{A}_{\sigma}$ and $H_{\sigma}^{B, B'}$ is invertible, then $(\tmmathbf{x}^{\beta})_{\beta \in B}$ and $(\tmmathbf{x}^{\beta'})_{\beta' \in B'}$ are bases of $\mathcal{A}_{\sigma}$. Given a Hankel operator $H_{\sigma}$ of finite rank $r$, it is clear that the truncated operators will have at most rank $r$. We are going to use the so-called {\tmem{flat extension}} property, which gives conditions under which a truncated Hankel operator of rank $r$ can be extended to a Hankel operator of the same rank (see {\cite{laurent_generalized_2009}} and extensions {\cite{brachat_symmetric_2010}}, {\cite{bernardi_general_2013}}, {\cite{mourrain_polynomial-exponential_2016}}). \begin{theorem} \label{thm:flatext}Let $V, V' \subset \mathbbm{K} [\tmmathbf{x}]$ be vector spaces connected to $1$, \tmtextbf{} such that $x_1, \ldots, x_n \in V$ and let $\sigma \in \langle V \cdot V' \rangle^{\ast}$. Let $B \subset V$, $B' \subset V'$ such that $B^+ \subset V, B'^+ \subset V'$. If $\tmop{rank} H_{\sigma}^{V, V'} = \tmop{rank} H_{\sigma}^{B, B'} = r$, then there is a unique extension $\tilde{\sigma} \in \mathbbm{K} [[\tmmathbf{y}]]$ such that $\tilde{\sigma}$ coincides with $\sigma$ on $\langle V \cdot V' \rangle$ and $\tmop{rank} H_{\tilde{\sigma}} = r$. In this case, $\tilde{\sigma} \in \mathcal{{POLYEXP}}$ with $r ={\mu} (\sigma)$ and $I_{\tilde{\sigma}} = (\ker H_{\sigma}^{B^+, B'})$. \end{theorem} \subsection{Border bases} We recall briefly the definition of border basis and the main properties, that we will need. Let $B$ be a monomial set of $\mathbbm{K} [\tmmathbf{x}]$. \begin{definition} A rewriting family $F$ for a (monomial) set $B$ is a set of polynomials $F = \{ f_i \}_{i \in \tmmathbf{i}} \subset \mathbbm{K} [\tmmathbf{x}]$ such that $f_i =\tmmathbf{x}^{\alpha_i} + b_i$ with $b_i \in \langle B \rangle$, $\alpha_i \in \partial B$, $\alpha_i \neq \alpha_j$ if $i \neq j$. The rewriting family $f$ is complete if $(\tmmathbf{x}^{\alpha_i})_{i \in \tmmathbf{i}} = \partial B$. \end{definition} The monomial $\tmmathbf{x}^{\alpha_i}$ is called the leading monomial of $f_i$ and denoted $\gamma (f_i) .$ \begin{definition} A family $F \subset \mathbbm{K} [\tmmathbf{x}]$ is a border basis with respect to $B$ if it is a complete rewriting family for $B$ such that $\mathbbm{K} [\tmmathbf{x}] = \langle B \rangle \oplus (F)$. \end{definition} This means that any element of $\mathbbm{K} [\tmmathbf{x}]$ can be projected along the ideal $I=(F)$ onto a unique element of $\langle B \rangle$. In other words, $B$ is a basis of the quotient algebra $\mathcal{A}=\mathbbm{K} [\tmmathbf{x}] / I$. Let $B^{[0]} = B$ and for $k \in \mathbbm{N}$, $B^{[k + 1]} = (B^{[k]})^+$. If $1 \in B$, then for any $p \in \mathbbm{K} [\tmmathbf{x}]$, there exist $k \in \mathbbm{N}$, such that $p \in \langle B^{[k]} \rangle$. For a complete rewriting family $F$ with respect to a monomial set $B$ containing $1$, a projection $\pi_F$ of $\mathbbm{K} [\tmmathbf{x}]$ on $\langle B \rangle$ can be defined recursively on the set of monomials $m$ of $\mathbbm{K} [\tmmathbf{x}]$ by \begin{itemize} \item if $m \in B$, $\pi_F (m) = m$; \item if $m \in \partial B$, $\pi_F (m) = m - f$ where $f$ is the (unique) polynomial in $F$ for which $\gamma (f) = m$, \item if $m \in B^{[k + 1]} - B^{[k]}$ for $k > 1$, there exists $m' \in B^{[k]}$ and $i_0 \in [1, n]$ such that $m = x_{i_0} m'$. Let $\pi_F (m) = \pi_F (x_{i_0} \pi_F (m'))$. \end{itemize} This map defines a projector from $\mathbbm{K} [\tmmathbf{x}]$ onto $\langle B \rangle$. The kernel of $\pi_F$ is contained in the ideal $(F)$. The family $F$ is a border basis iff $\ker (\pi_F) = (F)$. Checking that a complete rewriting family is a border basis reduces to checking commutation properties. This leads to efficient algorithms to compute a border basis. For more details, see {\cite{mourrain_generalized_2005}}, {\cite{mourrain_stable_2008}}, {\cite{mourrain_border_2012}}. A special case of border basis is when the leading term $\gamma (f)$ of $f \in F$ is the maximal monomial of $f$ for a monomial ordering $\succ$. Then $F$ is a Gr{\"o}bner basis of $I$ for this monomial ordering $\succ$. A border basis $F$ with respect to a monomial set $B$ gives directly the tables of multiplication $M_i$ by the variables $x_i$ in the basis $B$. For a monomial $b \in B$, $M_i (b) = \pi_F (x_i b) = x_i b - f$ with$f \in F$ such that $\gamma (f) = x_i b$ if $x_i b \in \partial B$ and $f = 0$ otherwise. \section{Border bases of series}\label{sec:5} Given the first terms $\sigma_{\alpha}$ for $\alpha \in \tmmathbf{a}$ of the sequence $\sigma = (\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n} \in \mathbbm{K}^{\mathbbm{N}^n}$, where $\tmmathbf{a} \subset \mathbbm{N}^n$ is a finite set of exponents, we are going to compute a basis of $\mathcal{A}_{\sigma}$ and generators of $I_{\sigma}$. We assume that the monomial set $\tmmathbf{x}^{\tmmathbf{a}}=\{\tmmathbf{x}^{\alpha}, \alpha \in \tmmathbf{a}\}$ is connected to 1. \subsection{Orthogonal bases of $\mathcal{A}_{\sigma}$} An important step in the decomposition method consists in computing a basis $B$ of $\mathcal{A}_{\sigma}$. In this section, we describe how to compute a monomial basis $B = \{ \tmmathbf{x}^{\beta} \}$ and two other bases $\tmmathbf{p}= (p_{\beta})$ and $\tmmathbf{q}= (q_{\beta})$, which are pairwise orthogonal for the inner product $\langle \cdot, \cdot \rangle_{\sigma}$: \[ \langle p_{\beta}, q_{\beta'} \rangle_{\sigma} = \left\{ \begin{array}{ll} 1 & \tmop{if} \beta = \beta'\\ 0 & \tmop{otherwise} . \end{array} \right. \] To compute these pairwise orthogonal bases, we will use a projection process, similar to Gram-Schmidt orthogonalization process. The main difference is that we compute pairs $p_{\beta}, q_{\beta}$ of orthogonal polynomials. As the inner product $\langle \cdot, \cdot \rangle_{\sigma}$ may be isotropic, the two polynomials $p_{\beta}, q_{\beta}$ may not be equal, up to a scalar. For a polynomial $f$ and two families of polynomials $\tmmathbf{p}= [p_1, \ldots, p_l]$, $\tmmathbf{m}= [m_1, \ldots, m_l]$, we will use the following procedure $\tmop{proj} (f, \tmmathbf{p}, \tmmathbf{m})$. {\begin{algorithm}[H]\caption{\label{algo:proj}Orthogonal projection}{\tmstrong{Input:} $f \in \mathbbm{K} [\tmmathbf{x}]$, $\tmmathbf{p}= [p_1, \ldots, p_l]$ and $\tmmathbf{m}= [m_1, \ldots, m_l]$ such that $\langle p_i, m_j \rangle_{\sigma} = 0$ if $j < i$ \ and $\langle p_i, m_i \rangle_{\sigma} = 1$. \begin{itemizeminus} \item $g = f$; \item for $i$ in $1 \ldots l$ do $g \,-\!\!= \langle g, m_i \rangle_{\sigma} p_i$; \end{itemizeminus} {\tmstrong{Output:}} $g := \tmop{proj} (f, \tmmathbf{p}, \tmmathbf{m})$} \end{algorithm}} Algorithm \ref{algo:proj} corresponds to the Modified Gram-\-Schmidt algorithm, when the scalar product is definite positive. It is known to have a better numerical behavior than the direct Gram-Schmidt orthogonalization process {\cite{trefethen_numerical_1997}}[Lecture 8]. It computes the polynomial $\tmop{proj} (f, \tmmathbf{p}, \tmmathbf{m})$ characterized by the following lemma. \begin{lemma} \label{lem:proj}If $\langle p_i, m_j \rangle_{\sigma} = 0$ if $j < i$ \ and $\langle p_i, m_i \rangle_{\sigma} = 1$, there is a unique polynomial $g$ such that $g = f - \sum_{i = 1}^l \lambda_i p_i$ with $\lambda_i \in \mathbbm{K}$ and $\langle g, m_i \rangle_{\sigma} = 0$ for $i = 1, \ldots, l$. \end{lemma} \begin{proof} We prove by induction on the index $i$ of the loop that $g$ is orthogonal to $[m_1, \ldots, m_i]$. For $i = 1$, $g = f - \langle f, m_1 \rangle_{\sigma} p_1$ is such that $\langle g, m_1 \rangle_{\sigma} = \langle f, m_1 \rangle_{\sigma} - \langle f, m_1 \rangle_{\sigma} \langle p_1, m_1 \rangle_{\sigma}$ $ = 0$. If the property is true at step $k \leqslant l$, i.e. $\langle g, m_i \rangle_{\sigma} = 0$ for $i < k$, then $g' = g - \langle g, m_k \rangle_{\sigma} p_k$ is such that $\langle g, m_i \rangle_{\sigma} - \langle g, m_k \rangle_{\sigma} \langle p_k, m_i \rangle_{\sigma} = \langle g, m_i \rangle_{\sigma} = 0$ by induction hypothesis. By construction, $\langle g', m_k \rangle = \langle g, m_k \rangle_{\sigma} - \langle g, m_k \rangle_{\sigma} \langle p_k, m_k \rangle_{\sigma} = 0$ and the induction hypothesis is true for $k$. As the matrix $(\langle p_{\tmop{ij}}, m_i \rangle_{\sigma})_{1 \leqslant i, j \leqslant l}$ is invertible, there exists a unique polynomial of the form $g = f - \sum_{j = 1}^l \lambda_j p_j$, such that $\langle g, m_i \rangle_{\sigma} = 0$ for $i = 1, \ldots, l$, which concludes the proof of the lemma. \end{proof} Algorithm \ref{algo:1} for computing a border basis of $\mathcal{A}_{\sigma}$ proceeds inductively, starting from $\tmmathbf{p}= [], \tmmathbf{m}= [], \tmmathbf{b}= []$, extending the basis $\tmmathbf{p}$ with a new polynomial $p_{\alpha}$, orthogonal to the vector space spanned by $\tmmathbf{m}$ for the inner product $\langle \cdot, \cdot \rangle_{\sigma}$, extending $\tmmathbf{m}$ with a new monomial $m_{\alpha}$, such that $\langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} = 1$ and $\langle p_{\beta}, m_{\alpha} \rangle = 0$ for $\beta \in \tmmathbf{b}$ and extending $\tmmathbf{b}$ with $\alpha$. {\begin{algorithm}[ht]\caption{\label{algo:1}Artinian Gorenstein border basis }{\tmstrong{Input:} the coefficients $\sigma_{\alpha}$ of a series $\sigma \in \mathbbm{K} [[\tmmathbf{y}]]$ for $\alpha \in \tmmathbf{a} \subset \mathbbm{N}^n$ with $\tmmathbf{a}$ a finite set of exponents connected to $\mathbf{0}$. \begin{itemizeminus} \item Let $\tmmathbf{b} := []$; $\tmmathbf{c} := []$; $\tmmathbf{d}= []$; $\tmmathbf{n} := [\tmmathbf{0}]$; $\tmmathbf{s} := \tmmathbf{a}$; $\tmmathbf{t} := \tmmathbf{a}$; \item while $\tmmathbf{n} \neq \emptyset$ do \begin{itemizeminus} \item for each $\alpha \in \tmmathbf{n}$, \begin{enumeratealpha} \item $p_{\alpha} := \tmop{proj} (\tmmathbf{x}^{\alpha}, [p_{\beta}]_{\beta \in \tmmathbf{b}}, [m_{\beta}]_{\beta \in \tmmathbf{b}})$; \item find the first $\gamma \in \tmmathbf{t}$ such that $\tmmathbf{x}^{\gamma} p_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$ and $\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$; \item if such a $\gamma$ exists then \ \ let $m_{\alpha} := \frac{1}{\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma}} \tmmathbf{x}^{\gamma} ;$ \hspace{-2cm} {\small\tmverbatim{[optional]}} \ \ \ \ \,let $q_{\alpha} := \tmop{proj} (m_{\alpha}, [q_{\beta}]_{\beta \in \tmmathbf{b}}, [p_{\beta}]_{\beta \in \tmmathbf{b}})$; \ \ add $\alpha$ to $\tmmathbf{b}$; remove $\alpha$ from $\tmmathbf{s}$; \ \ add $\gamma$ to $\tmmathbf{c}$; remove $\gamma$ from $\tmmathbf{t}$; else \ \ let $k_{\alpha} = p_{\alpha}$; \ \ add {\tmem{}}$\alpha$ to $\tmmathbf{d}$; remove $\alpha$ from $\tmmathbf{s}$; end; \end{enumeratealpha} \item $\tmmathbf{n} := \tmop{next} (\tmmathbf{b}, \tmmathbf{d}, \tmmathbf{c}, \tmmathbf{s}) ;$ \end{itemizeminus} \end{itemizeminus} {\tmstrong{Output:}} \begin{itemizeminus} \item exponent sets $\tmmathbf{b}= [\beta_1, \ldots, \beta_r]$, $\tmmathbf{c}= [\gamma_1, \ldots, \gamma_r]$. \item bases $\tmmathbf{p}= [p_{\beta_i}]$, {\small\tmverbatim{[optional]}} $\tmmathbf{q}= [q_{\beta_i}]$. \item the relations $\tmmathbf{k}= [p_{\alpha}]_{\alpha \in \tmmathbf{d}}$ where $p_{\alpha} =\tmmathbf{x}^{\alpha} - \sum_{i = 1}^{r} \lambda_{\beta_i} p_{\beta_i}$ for $\alpha \in \tmmathbf{d}$. \end{itemizeminus}} \end{algorithm}} The main difference with Algorithm 4.1 in {\cite{mourrain_polynomial-exponential_2016}} is the projection procedure and the list of monomials $\tmmathbf{s}$, $\tmmathbf{t}$ used to generate new monomials and to perform the projections. The lists $\tmmathbf{b}, \tmmathbf{d}, \tmmathbf{c}, \tmmathbf{s}, \tmmathbf{t}$ are lists of exponents, identified with monomials. We verify that at each loop of the algorithm, the lists $\tmmathbf{b}$, $\tmmathbf{d}$ and $\tmmathbf{s}$ are disjoint and $\tmmathbf{b} \cup \tmmathbf{d} \cup \tmmathbf{s}=\tmmathbf{a}$. We also verify that $m_{\alpha}$ are monomials up to a scalar, that the set of their exponents is $\tmmathbf{c}$, that $\tmmathbf{c}$ and $\tmmathbf{t}$ are disjoint and that $\tmmathbf{c} \cup \tmmathbf{t}=\tmmathbf{a}$. The algorithm uses the function $\tmop{next} (\tmmathbf{b}, \tmmathbf{d}, \mathbf{c}, \tmmathbf{s})$, which computes the set of monomials $\ensuremath{\mathfrak{n}}$ in $\partial \tmmathbf{b} \cap \tmmathbf{s}$, which are not in $\tmmathbf{d}$ and such $\ensuremath{\mathfrak{n}}\cdot \mathbf{c}\subset \mathbf{a} =\tmmathbf{b} \cup \tmmathbf{d} \cup \tmmathbf{s}$. We denote by $\prec$ the order induced by the treatment of the monomials of $\tmmathbf{a}$ in the loops of the algorithm, so that the monomials treated at the $l^{\tmop{th}}$ loop are smaller than the monomials in $\tmmathbf{n}$ at the $(l + 1)^{\tmop{th}} $ loop. For $\alpha \in \tmmathbf{a}$, we denote by $\tmmathbf{b}_{\prec \alpha}$ the list of monomial exponents $\beta \in \tmmathbf{b}$ with $\beta \prec \alpha$ and by $B_{\prec \alpha}$ the vector space spanned by these monomials. For $\alpha \in \tmmathbf{b}$, let $\tmmathbf{b}_{\preccurlyeq \alpha} =\tmmathbf{b}_{\prec \alpha} \cup [\alpha]$. The following properties are also satisfied during this algorithm: \begin{lemma} \label{lem:semiortho}For $\alpha \in \tmmathbf{b}$, we have $\forall \beta \in \tmmathbf{b}_{\prec \alpha}$, $\langle p_{\alpha}, m_{\beta} \rangle_{\sigma} = 0$ and $\langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} = 1$. For $\alpha \in \tmmathbf{d}$, $\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} = 0$ for all $\gamma \in \tmmathbf{a}$ such that $\tmmathbf{x}^{\gamma} p_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. \end{lemma} \begin{proof} By construction, \[ p_{\alpha} = \tmop{proj} (\tmmathbf{x}^{\alpha}, [p_{_{\beta}}]_{\beta \in \tmmathbf{b}_{\prec \alpha}}, [m_{\beta}]_{\beta \in \tmmathbf{b}_{\prec \alpha}}) \] is orthogonal to $m_{\beta}$ for $\beta \in \tmmathbf{b}_{\prec \alpha}$. We consider two exclusive cases: $\alpha \in \tmmathbf{b}$ and $\alpha \in \tmmathbf{d}$. \begin{itemize} \item If $\alpha \in \tmmathbf{b}$, then there exists $\tmmathbf{x}^{\gamma} \in \tmmathbf{s}$ such that $\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$. Thus $m_{\alpha} = \frac{1}{\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma}} \tmmathbf{x}^{\gamma}$ is such that $\langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} = 1$. By construction, $\langle p_{\alpha}, m_{\beta} \rangle_{\sigma} = 0$ for $\beta \in \tmmathbf{b}_{\prec \alpha}$. \ \item If $\alpha \in \tmmathbf{d}$, then there is no $\tmmathbf{x}^{\gamma} \in \tmmathbf{s}$ such that $\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$ and $\tmmathbf{x}^{\gamma} p_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. Thus $p_{\alpha}$ is orthogonal to $\tmmathbf{x}^{\gamma}$ for all $\gamma \in \tmmathbf{s}$ with $\tmmathbf{x}^{\gamma} p_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. By construction, $p_{\alpha}$ is orthogonal to $m_{\beta}$ for $\beta \in \tmmathbf{b}$. As $\tmmathbf{b} \cup \tmmathbf{s}=\tmmathbf{a}$, \ $\langle p_{\alpha}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} = 0$ for all $\gamma \in \tmmathbf{a}$ such that $\tmmathbf{x}^{\gamma} p_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. \end{itemize} This concludes the proof of this lemma. \end{proof} \begin{lemma} \label{lem:ortho}For $\alpha \in \tmmathbf{b}$, $\langle m_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}} = \langle q_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}}$ and the bases $\tmmathbf{p}= [p_{\beta}]_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}}, \tmmathbf{q}= [q_{\beta}]_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}}$ are pairwise orthogonal. \end{lemma} \begin{proof} We prove it by induction on $\alpha$. If $\alpha =\tmmathbf{0}$ is not in {\tmem{{\tmstrong{b}}}}, then $\sigma_{\alpha} = 0$ for all $\alpha \in \tmmathbf{a}$, $\tmmathbf{p}$ and $\tmmathbf{q}$ are empty and the property is satisfied. If $\alpha =\tmmathbf{0}$ is in $\tmmathbf{b}$, then $p_{\alpha} = 1$ and $q_{\alpha} = m_{\alpha}$ is such that $\langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} = 1$. The property is true for $\alpha =\tmmathbf{0}$. Suppose that it is true for all $\beta \in \tmmathbf{b}_{\prec \alpha}$. By construction, the polynomial $q_{\alpha} = \tmop{proj} (m_{\alpha}, [q_{_{\beta}}]_{\beta \in \tmmathbf{b}_{\prec \alpha}},$ $[p_{\beta}]_{\beta \in \tmmathbf{b}_{\prec \alpha}})$ is orthogonal to $p_{\beta}$ for $\beta \prec \alpha$. By induction hypothesis, $[p_{\beta}]_{\beta \in \tmmathbf{b}_{\prec \alpha}}, \tmmathbf{q}= [q_{\beta}]_{\beta \in \tmmathbf{b}_{\prec \alpha}}$ are pairwise orthogonal, thus \[ q_{\alpha} = m_{\alpha} - \sum_{\beta \in \tmmathbf{b}_{\prec \alpha}} \langle p_{\beta}, m_{\alpha} \rangle_{\sigma} q_{\beta} . \] By the induction hypothesis, we deduce that \begin{eqnarray*} \langle m_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}} & = & \langle m_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\prec \alpha}} + \langle m_{\alpha} \rangle = \langle q_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\prec \alpha}} + \langle m_{\alpha} \rangle\\ & = & \langle q_{\beta} \rangle_{\beta \in \tmmathbf{b}_{\prec \alpha}} + \langle q_{\alpha} \rangle = \langle q_{\beta} \rangle_{\tmmathbf{b}_{\preccurlyeq \alpha}} . \end{eqnarray*} By Lemma \ref{lem:semiortho}, $p_{\alpha}$ is orthogonal to $m_{\beta}$ for $\beta \in \tmmathbf{b}_{\prec \alpha}$ and thus to $q_{\beta}$ for $\beta \in \tmmathbf{b}_{\prec \alpha}$. We deduce that \begin{eqnarray*} \langle p_{\alpha}, q_{\alpha} \rangle_{\sigma} &=& \langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} - \sum_{\beta \in \tmmathbf{b}_{\prec \alpha}} \langle p_{\beta}, m_{\alpha} \rangle_{\sigma} \langle p_{\alpha}, q_{\beta} \rangle_{\sigma} \\&=& \langle p_{\alpha}, m_{\alpha} \rangle_{\sigma} = 1. \end{eqnarray*} This shows that $[p_{\beta}]_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}}$ and $\tmmathbf{q}= [q_{\beta}]_{\beta \in \tmmathbf{b}_{\preccurlyeq \alpha}}$ are pairwise orthogonal and concludes the proof by induction. \end{proof} \begin{lemma} \label{lem:leadingterm}At the $l^{\tmop{th}}$ loop of the algorithm, the polynomials $p_{\alpha}$ for $\alpha \in \tmmathbf{n}$ are of the form $p_{\alpha} =\tmmathbf{x}^{\alpha} + b_{\alpha}$ with $b_{\alpha} \in B_{\prec \alpha}$. \end{lemma} \begin{proof} We prove by induction on the loop index $l$ that we have \ $p_{\alpha} =\tmmathbf{x}^{\alpha} + b_{\alpha}$ with $b_{\alpha} \in B_{\prec \alpha}$. The property is clearly true for $l = 0$, $\alpha =\tmmathbf{0}$ and $p_{\alpha} = 1 =\tmmathbf{x}^{\tmmathbf{0}}$. Suppose that it is true for any $l' < l$ and consider the $l^{\tmop{th}}$ loop of the algorithm. The polynomial $p_{\alpha}$ is constructed by projection of $\tmmathbf{x}^{\alpha}$ on $\langle p_{\alpha} \rangle_{\beta \in \tmmathbf{b}}$ orthogonally to $\langle m_{\beta} \rangle_{\beta \in \tmmathbf{b}}$ where $\tmmathbf{b}=\tmmathbf{b}_{\prec \alpha}$. By induction hypothesis, $p_{\beta} =\tmmathbf{x}^{\beta} + b_{\beta}$ with $b_{\beta} \in B_{\prec \beta} \subset B_{\prec \alpha}$. Then by Lemma \ref{lem:proj}, we have \[ p_{\alpha} =\tmmathbf{x}^{\alpha} + \sum_{\beta \prec \alpha} \lambda_{\beta} p_{\beta} \noplus =\tmmathbf{x}^{\alpha} \noplus + b_{\alpha} \] with $\lambda_{\beta} \in \mathbbm{K}$, $b_{\alpha} \in B_{\prec \alpha}$. Thus, the induction hypothesis is true for $l$, which concludes the proof. \end{proof} \subsection{Quotient algebra structure} We show now that the algorithm outputs a border basis of an Artinian Gorenstein algebra $\mathcal{A}_{\tilde{\sigma}}$ for an extension $\tilde{\sigma}$ of $\sigma$, when all the border relations are computed, that is, when $\tmmathbf{d}= \partial \tmmathbf{b}$. \begin{theorem} \label{thm:flatextalgo}Let $\tmmathbf{b}= [\beta_1, \ldots, \beta_r]$, $\tmmathbf{c}= [\gamma_1, \ldots, \gamma_r]$, $\tmmathbf{p}= [p_{\beta_1}, \ldots, p_{\beta_r}]$, $\tmmathbf{q}= [q_{\beta_1}, \ldots, q_{\beta_r}]$ and $\tmmathbf{k}= [p_{\alpha_1}, \ldots, p_{\alpha_s}]$ be the output of Algorithm \ref{algo:1}. Let $V = \langle \tmmathbf{x}^{\tmmathbf{b}^+} \rangle$. If $\tmmathbf{d}= \partial \tmmathbf{b}$ and $\tmmathbf{c}^+ \subset \tmmathbf{b}'$ connected to $1$ such that $\tmmathbf{x}^{\tmmathbf{b}^+} \cdot \tmmathbf{x}^{\tmmathbf{b}'} =\tmmathbf{x}^{\tmmathbf{a}}$ then $\sigma$ coincides on $\langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$ with a series $\tilde{\sigma} \in \mathbbm{K} [[\tmmathbf{y}]]$ such that \begin{itemize} \item $\tmop{rank} H_{\tilde{\sigma}} = r$, \item $(\tmmathbf{p}, \tmmathbf{q})$ are pairwise orthogonal bases of $\mathcal{A}_{\tilde{\sigma}}$ for the inner product $\langle \cdot, \cdot \rangle_{\tilde{\sigma}}$, \item The family $\tmmathbf{k}= \{ p_{\alpha}, \alpha \in \partial \tmmathbf{b} \}$ is a border basis of the ideal $I_{\tilde{\sigma}}$, with respect to $\tmmathbf{x}^{\tmmathbf{b}}$. \item The matrix of multiplication by $x_k$ in the basis $\tmmathbf{p}$ (resp. {\tmem{{\tmstrong{q}}}}) of $\mathcal{A}_{\tilde{\sigma}}$ is $M_k := (\langle \sigma | x_k p_{\beta_j} q_{\beta_i} \rangle)_{1 \leqslant i, j \leqslant r} \nobracket$ (resp. $M_k^t$). \end{itemize} \end{theorem} \begin{proof} By construction, $\tmmathbf{x}^{\tmmathbf{b}^+}$ is connected to $1$. Let $V = \langle \tmmathbf{x}^{\tmmathbf{b}^+} \rangle$ and $V' = \langle \tmmathbf{x}^{\tmmathbf{b}'} \rangle$. As $\tmmathbf{b}^+ =\tmmathbf{b} \cup \tmmathbf{d}$, a basis of $V$ is formed by the monomials $\tmmathbf{x}^{\tmmathbf{b}}$ and the polynomials $p_{\alpha} =\tmmathbf{x}^{\alpha} + b_{\alpha}$ with $b_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{b}} \rangle$ for $\alpha \in \tmmathbf{d}$. The matrix of $H_{\sigma}^{V, V'}$ in this basis of $V$ and a basis of $V',$ which first elements are $m_{\beta_1}, \ldots, m_{\beta_r}$, is of the form \[ H_{\sigma}^{V, V'} = \left(\begin{array}{cc} L_r & 0\\ \ast & 0 \end{array}\right) \] where $L_r$ is a lower triangular invertible matrix of size $r$. The kernel of $H_{\sigma}^{V, V'}$ is generated by the polynomials $p_{\alpha}$ for $\alpha \in \tmmathbf{d}$. By Theorem \ref{thm:flatext}, $\sigma$ coincides on $V \cdot V' = \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$ with a series $\tilde{\sigma}$ such that $\tmmathbf{x}^{\tmmathbf{b}}$ is a basis of $\mathcal{A}_{\bar{\sigma}} =\mathbbm{K} [\tmmathbf{x}] / I_{\tilde{\sigma}}$ and $I_{\tilde{\sigma}} = (\ker H_{\tilde{\sigma}}^{V, V'}) = (p_{\alpha})_{\alpha \in \tmmathbf{d}}$. By Lemma \ref{lem:leadingterm}, $p_{\alpha} =\tmmathbf{x}^{\alpha} + b_{\alpha}$ with $\alpha \in \partial \tmmathbf{b}$ and $b_{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{b}} \rangle$. Thus $(p_{\alpha})_{\alpha \in \partial \tmmathbf{b}}$ is a border basis with respect to $\tmmathbf{x}^{\tmmathbf{b}}$ for the ideal $I_{\tilde{\sigma}}$, since $\tmmathbf{x}^{\tmmathbf{b}}$ is a basis of of $\mathcal{A}_{\bar{\sigma}}$. This shows that $\tmop{rank} H_{\tilde{\sigma}} = \dim \mathcal{A}_{\tilde{\sigma}} = | \tmmathbf{b} | = r$. By Lemma \ref{lem:ortho}, $(\tmmathbf{p}, \tmmathbf{q})$ are pairwise orthogonal for the inner product $\langle \cdot, \cdot \rangle_{\sigma}$, which coincides with $\langle \cdot, \cdot \rangle_{\tilde{\sigma}}$ on $\langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. Thus they are pairwise orthogonal bases of $\mathcal{A}_{\tilde{\sigma}}$ for the inner product $\langle \cdot, \cdot \rangle_{\tilde{\sigma}}$. As we have $x_k p_{\beta_j} \equiv \sum_{i = 1}^{r} \langle x_k p_{\beta_j} \nobracket, q_{\beta_i} \rangle_{\sigma} \nobracket p_{\beta_i}$, the matrix of multiplication by $x_k$ in the basis $\tmmathbf{p}$ of $\mathcal{A}_{\tilde{\sigma}}$ is $$M_k := (\langle x_k p_{\beta_j} \nobracket, q_{\beta_i} \rangle_{\sigma} \nobracket)_{1 \leqslant i, j \leqslant r} = (\langle \sigma | x_k p_{\beta_j} q_{\beta_i} \rangle)_{1 \leqslant i, j \leqslant r}. $$ Exchanging the role of {\tmstrong{p}} and {\tmstrong{q}}, we obtain $M_k^t$ for the matrix of multiplication by $x_k$ in the basis {\tmem{{\tmstrong{q}}}}.\tmtextbf{} \end{proof} \begin{lemma} If $\prec$ is a monomial ordering and if at the end of the algorithm $\tmmathbf{d}= \partial \tmmathbf{b}$ and $\tmmathbf{c}^+ \subset \tmmathbf{b}'$ connected to $1$ with $\tmmathbf{x}^{\tmmathbf{b}^+} \cdot \tmmathbf{x}^{\tmmathbf{b}'} =\tmmathbf{x}^{\tmmathbf{a}}$, then $\tmmathbf{b}=\tmmathbf{c}$ and $\tmmathbf{k}$ is a Gr{\"o}bner basis of the ideal $I_{\sigma}$ for the monomial ordering. \end{lemma} \begin{proof} If $\prec$ is a monomial ordering, then the polynomials $p_{\alpha} =\tmmathbf{x}^{\alpha} + b_{\alpha}$, $\alpha \in \partial \tmmathbf{b}$ are constructed in such a way that their leading term is $\tmmathbf{x}^{\alpha}$. Therefore the border basis $\tmmathbf{k}= (p_{\alpha})_{\alpha \in \partial \tmmathbf{b}}$ is also a Gr{\"o}bner basis. By construction, $\tmmathbf{c}$ is the set of monomials $\gamma \in \tmmathbf{a}$ such that $\langle p_{\beta}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$ for some $\beta \in \tmmathbf{b}$. Suppose that $\gamma \in \tmmathbf{c}$ is not in $\tmmathbf{b}$. Then $\tmmathbf{x}^{\gamma} \in (\tmmathbf{x}^{\tmmathbf{d}})$ and there is $\delta \in \tmmathbf{d}$ and $\gamma' \in \tmmathbf{a}$ such that $\gamma = \delta + \gamma'$. As $p_{\delta} \in \tmmathbf{k}$, we have $\langle p_{\delta}, \tmmathbf{x}^{\alpha} \rangle_{\sigma} = 0$ for $\alpha \in \tmmathbf{a}$ such that $p_{\delta} \tmmathbf{x}^{\alpha} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. By Lemma \ref{lem:leadingterm}, $p_{\delta} =\tmmathbf{x}^{\delta} + b_{\delta}$ with $b_{\delta} \in \tmmathbf{b}_{\prec \delta}$ with $\tmmathbf{x}^{\delta} \succ b_{\delta}$. \ \[ \langle p_{\beta}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} = \langle p_{\beta}, \tmmathbf{x}^{\delta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma} = \langle p_{\beta}, p_{\delta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma} \noplus \noplus - \langle p_{\beta}, b_{\delta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma} . \] We have $\langle p_{\beta}, p_{\delta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma} \noplus \noplus = \langle p_{\delta}, p_{\beta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma} \noplus \noplus = 0$ since $p_{\delta} \in \tmmathbf{k}$ and $p_{\delta} p_{\beta} \tmmathbf{x}^{\gamma'} \in \langle \tmmathbf{x}^{\tmmathbf{a}} \rangle$. As $\gamma$ is the first monomial of $\tmmathbf{a}$ such that $\langle p_{\beta}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$ and $b_{\delta} \tmmathbf{x}^{\gamma'} \prec \tmmathbf{x}^{\delta + \gamma'} =\tmmathbf{x}^{\gamma}$, we have $\langle p_{\beta}, b_{\delta} \tmmathbf{x}^{\gamma'} \rangle_{\sigma}$, which implies that $\langle p_{\beta}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} = 0$. This is in contradiction with the hypothesis $\langle p_{\beta}, \tmmathbf{x}^{\gamma} \rangle_{\sigma} \neq 0$, therefore $\gamma \in \tmmathbf{b}$. We deduce that \tmtextbf{}$\tmmathbf{c} \subset \tmmathbf{b}$ and the equality holds since the two sets have the same cardinality. \end{proof} Notice that to construct a minimal reduced Gr{\"o}bner basis of $I_{\tilde{\sigma}}$ for the monomial ordering $\prec$, it suffices to keep the elements $p_{\alpha} \in \tmmathbf{k}$ with $\alpha$ minimal for the component-wise partial ordering. \subsection{Complexity} Let $s = | \tmmathbf{a} |$ and $r = | \tmmathbf{b} |$, $\delta = | \partial \tmmathbf{b} |$. As $\tmmathbf{b} \subset \tmmathbf{a}$ and the monomials in $\partial \tmmathbf{b}$ are the product of a monomial in $\tmmathbf{b}$ by one of the variables $x_1, \ldots, x_n$, we have $r \leqslant s$ and $\delta \leqslant n r$. \begin{proposition} \label{prop:complexity}The complexity of the algorithm to compute the bases $\tmmathbf{p}$ and $\tmmathbf{q}$ is $\mathcal{O} ((r + \delta) r s)$. \end{proposition} \begin{proof} At each step, the computation of $p_{\alpha}$ (resp. $q_{\alpha}$) requires $\mathcal{O} (r^2)$ arithmetic operations, since the support of the polynomials $p_{\beta}$, $q_{\beta}$ $(\beta \in \tmmathbf{b})$ is in $\tmmathbf{b}$ and $| \tmmathbf{b} | \leqslant r$. Computing $\langle \tmmathbf{x}^{\gamma}, p_{\alpha} \rangle_{\sigma}$ for all $\gamma \in \tmmathbf{t}$ requires $\mathcal{O} (r s)$ arithmetic operations. As the number of polynomials $p_{\alpha}$ is at most $| \tmmathbf{b}^+ | = r + \delta$, the total cost for computing $\tmmathbf{p}$ and $\tmmathbf{q}$ is thus in $\mathcal{O} ((r + \delta) (r^2 + r s)) =\mathcal{O} ((r + \delta) \noplus r s)$. \end{proof} As $\delta \leqslant n r$, the complexity of this algorithm is in $\mathcal{O} (n r^2 s)$. The algorithm is connected to the {\tmem{Berlekamp-Massey-\-Sakata}} algorithm, which computes a Gr{\"o}bner basis for a monomial ordering $\prec$. In the BMS algorithm, a minimal set $\mathcal{F}$ of recurrence polynomials valid for the monomials smaller that a given monomial $m$ is computed. A monomial basis $\tmmathbf{b}^{\ast}$ generated by all the divisors of some corner elements is constructed. The successor $m^+$ of the monomial $m$ for the monomial ordering $\prec$ is considered and the family $\mathcal{F}$ of valid recurrence polynomials is updated by computing their discrepancy at the monomial $m^+$ and by cancelling this discrepancy, if necessary, by combination with one lower polynomial {\cite{saints_algebraic-geometric_1995}}. Let $\delta$ be the size of the border $\partial \tmmathbf{b}^{\ast}$ of the monomial basis $\tmmathbf{b}^{\ast}$computed by BMS algorithm. At each update, there are at most $\delta$ polynomials in $\mathcal{F}$. Let $s'$ be the maximum number of their non-zero terms. Then the update of $\mathcal{F}$ requires $\mathcal{O} (\delta s')$ arithmetic operations. The number of updates is bounded by the number $r + \delta$ of monomials in $\tmmathbf{b}^+$. Checking the discrepency of a polynomial in $\mathcal{F}$ for all the monomials in $\tmmathbf{x}^{\tmmathbf{a}}$ requires $\mathcal{O} (s' s)$ arithmetic operations. Thus, the total cost of the BMS algorithm is in $\mathcal{O} ((r + \delta) \delta s' + \delta s' s)$. As the output polynomials in the Gr{\"o}bner basis are not necessarily reduced, the maximal number of terms $s' \leqslant s$ can be of the same order than $s$. Thus the complete complexity of BMS algorithm is in $\mathcal{O} (\delta s^2) =\mathcal{O} (n r s^2)$, which is an order of magnitude larger than the bound of Proposition \ref{prop:complexity}, assuming that $r\ll s$. The method presented in {\cite{berthomieu_linear_2015}} for computing a Gr{\"o}bner basis of the recurrence polynomials computes the rank of a Hankel matrix of size the number $\tilde{s}$ of monomials of degree $\leqslant d$ for a bound $d$ on the degree of the recurrence relations. It deduces a monomial basis $\tmmathbf{b}$ stable by division and obtains the valid recurrence relations for the border monomials by solving a linear Hankel system of size $r$. Thus the complexity is in $\mathcal{O} (\delta r^{\omega} + \tilde{s}^{\omega})$ where $2.3 \leqslant \omega \leqslant 3$. It is also larger than the bound of Proposition \ref{prop:complexity}. This bound could be improved by exploiting the rank displacement of the structured matrices involved in this method \cite{bostan_solving_2008}, but the known bounds on the displacement rank of the matrices involved in the computation do not improve the bound of Proposition \ref{prop:complexity}. \section{Examples \nopunct} \subsection{Multivariate Prony method} Given a function $h (u_1, \ldots, u_n) = \sum_{i = 1}^r \omega_i e^{\zeta_{i, 1} u_1 + \cdots + \zeta_{i, n} u_n}$, the problem is to compute its decomposition as a weighted sum of exponentials, from values of $h$. The method proposed by G. Riche de Prony for sums of univariate exponential functions consists in sampling the function at regularly spaced values {\cite{baron_de_prony_essai_1795}}. In the multivariate extension of this method, the function is sampled on a grid in $\mathbbm{R}^n$, for instance $\mathbbm{N}^n$. The decomposition is computed from a subset of the multi-index sequence of evaluation $\sigma_{\alpha} = h (\alpha_1, \ldots, \alpha_n)$ for $\alpha = (\alpha_1, \ldots, \alpha_n) \in \mathbbm{N}^n$. The ideal $I_{\sigma}$ associated to this sequence is the ideal defining the points $\xi_i = (e^{\zeta_{i, 1}}, \ldots, e^{\zeta_{i, 1 n}})$. To compute this decomposition, we apply the border basis algorithm to the sequence $\sigma_{\alpha}$ for $| \alpha | \leqslant d$ with $d$ high enough, and obtain a border basis of the ideal $I_{\sigma}$ defining the points $\xi_1, \ldots, \xi_r \in \mathbbm{K}^n$, a basis of $\mathcal{A}_{\sigma}$ and the tables of multiplication in this basis. By applying the decomposition algorithm in {\cite{mourrain_polynomial-exponential_2016}}, we deduce the points $\xi_i = (e^{\zeta_{i, 1}}, \ldots, e^{\zeta_{i, 1 n}})$. Taking the log of their coordinates $\log(\xi_{i,j})=\zeta_{i,j}$ yields the coordinates of the frequencies $\zeta_i$. \subsection{Fast decoding of algebraic-geometric codes} Let $\mathbbm{K}$ be a finite field. We consider an algebraic-geometric code $C$ obtained by evaluation of polynomials in $\mathbbm{K} [x_1, \ldots, x_n] \overset{}{}$ of degree $\leqslant d$ at points $\xi_1, \ldots, \xi_l \in \mathbbm{K}^n$. It is a finite vector space in $\mathbbm{K}^l$. We use the words of the orthogonal code $C^{\perp} = \{ (m_1, \ldots, m_l) \mid m \nosymbol \cdot c = m_1 c_1 + \cdots + m_l c_l = 0 \}$ for the transmission of information. Suppose that an error $\omega = (\omega_1, \ldots, \omega_l)$ occurs in the transmission of a message $m = (m_1, \ldots, m_l)$ so that the message $m^{\ast} = m + \omega$ is received. Let $\omega_{i_1}, \ldots, \omega_{i_r}$ be the non-zero coefficients of the error vector $\omega$. To correct the message $r$, we use the moments or syndromes $\sigma_{\alpha} = (\xi_1^{\alpha}, \ldots, \xi_l^{\alpha}) \cdot m^{\ast} = (\xi_1^{\alpha}, \ldots, \xi_l^{\alpha}) \cdot \omega = \sum_{j = 1}^r w_{i_j} \xi_{i_j}^{\alpha}$ for $\alpha = (\alpha_1, \ldots, \alpha_n) \in \mathbbm{N}^n$ with $| \alpha | \leqslant d$. We compute generators of the set of error-locator polynomials, that is, the polynomials vanishing at the points $\xi_{i_1}, \ldots, \xi_{i_r}$ and deduce the weights or errors $\omega_{i_j}$ by solving the Vandermonde system \[ [\xi_{i_j}^{\alpha}]_{| \alpha | \leqslant d, 1 \leqslant j \leqslant r} (\omega_{i_j}) = (\sigma_{\alpha})_{| \alpha | \leqslant d} . \] The points $\xi_{i_j}$ correspond to the position of the errors and $\omega_{i_j}$ to their amplitude. By applying the border basis algorithm, we obtain a border basis of \ the ideal of error-locator polynomials, from which we deduce the position and amplitude of the errors. \subsection{Sparse interpolation} Given a sparse polynomial $h (u_1, \ldots, u_n) = \sum_{i = 1}^r \omega_i u_1^{\gamma_{i, 1}} $ $\cdots$ $ u_n^{\gamma_{i, n}}$, which is a weighted sum of $r$ monomials with non-zero weights $\omega_i \in \mathbbm{K}$, the problem is to compute the exponents $(\gamma_{i, 1}, \ldots, \gamma_{i, n}) \in \mathbbm{N}^n$ of the monomials and the weights $\omega_i$, from evaluations of the blackbox functions $h$. \ The approach, proposed initially in {\cite{ben-or_deterministic_1988}}, {\cite{zippel_interpolating_1990}}, consists in evaluating the function at points of the form $(\zeta_1^k, \ldots, \zeta_n^k)$ for some values of $\zeta_1, \ldots \zeta_n \in \mathbbm{K}$ and to apply univariate Prony-type methods or Berlekamp-Massey algorithms to the sequence $\sigma_k = h (\zeta_1^k, \ldots, \zeta_n^k)$, for $k \in \mathbbm{N}$. The approach can be extended to multi-index sequences $(\sigma_{\alpha})_{\alpha \in \mathbbm{N}^n}$ by computing the terms \[ \sigma_{\alpha} = h (\zeta_1^{\alpha_1}, \ldots, \zeta_n^{\alpha_n}) = \sum_{i = 1}^r \omega_i (\zeta_1^{\gamma_{i, 1}})^{\alpha_1} \cdots (\zeta_n^{\gamma_{i, n}})^{\alpha_n} \] for $\alpha = (\alpha_1, \ldots, \alpha_n) \in \mathbbm{N}^n$. \ It can also be extended to sequences constructed from polylog functions {\cite{mourrain_polynomial-exponential_2016}}. By applying the border basis algorithm to the multi-index sequence $\sigma_{\alpha} = h (\zeta_1^{\alpha_1}, \ldots, \zeta_n^{\alpha_n})$ for $| \alpha | \leqslant d$ with $d \in \mathbbm{N}$ high enough, we obtain generators of the ideal $I_{\sigma}$ defining the points $\xi_i = (\zeta_1^{\gamma_{i, 1}}, \ldots, \zeta_n^{\gamma_{i, n}})$ and deduce the weights $\omega_i$, $i = 1, \ldots, r$. By computing the log of the coordinates of the points $\xi_i$, we deduce the exponent vectors $\gamma_i = (\gamma_{i, 1}, \ldots, \gamma_{i, n}) \in \mathbbm{N}^n$ for $i = 1, \ldots, r$. \subsection{Tensor decomposition} Given a homogeneous polynomial \[ t = \sum_{\alpha_0 + \alpha_1 + \cdots + \alpha_n = d} t_{\alpha} \binom{d}{\alpha} x_0^{\alpha_0} x_1^{\alpha_1} \ldots x_n^{\alpha_n} \] of degree $d \in \mathbbm{N}$ with $t_{\alpha} \in \mathbbm{K}$, $\binom{d}{\alpha} = \frac{d!}{\alpha_0 ! \cdots \alpha_n !}$, we want to a decomposition of $t$ as sum of powers of linear forms: \begin{equation} t = \sum_{i = 1}^r \omega_i (\xi_{i, 0} x_0 + \xi_{i, 1} x_1 + \cdots + \xi_{i, n} x_n)^d \label{eq:tensor} \end{equation} with a minimal $r$, $\omega_i \neq 0$ and $(\xi_{i, 0}, \ldots, \xi_{i, n}) \neq 0$. By a change of variables, we can assume that $\xi_{i, 0} \neq 0$ in such a decomposition, and by dividing each linear form by $\xi_{i, 0}$ and multiplying $\omega_i$ by $\xi_{i, 0}^d$, we can even assume that $\xi_{i, 0} = 1$. Then by expansion of the powers of the linear forms and by identification of the coefficients, we obtain \ \[ \sigma_{\alpha} : = t_{(d - \alpha_1 \cdots - \alpha_n, \alpha_1, \ldots, \alpha_n)} = \sum_{i = 1}^r \omega_i \xi_{i, 1}^{\alpha_1} \cdots \xi_{i, n}^{\alpha_n} = \sum_{i = 1}^r \omega_i \xi_i^{\alpha} \] for $\alpha = (\alpha_1, \ldots, \alpha_n) \in \mathbbm{N}^n$ with $| \alpha | \leqslant d$. We apply the border basis algorithm to this sequence, in order to obtain generators of the ideal $I_{\sigma}$ defining the points $\xi_1, \ldots, \xi_r \in \mathbbm{K}^n$ and providing the weights $\omega_i$. If the number of terms $r$ is small enough compared to the number of terms $\sigma_{\alpha}$, then the set of border relations are complete and it is possible to compute the decomposition (\ref{eq:tensor}). \subsection{Vanishing ideal of points} Given a set of points $\Xi = \{ \xi_1, \ldots, \xi_r \} \subset \mathbbm{K}^n$, we want to compute polynomials defining these points, that is, a set of generators of the ideal of polynomials vanishing on $\Xi$. For that purpose, we choose non-zero weights $w_i \in \mathbbm{K}$, a degree $d \in \mathbbm{N}$ and we compute the sequence of moments{\tmabbr{}} $\sigma_{\alpha} = \sum_{i = 1}^r \omega_i \xi^{\alpha}$ for $| \alpha | \leqslant d$. The generating series $\sigma$ associated to these moments define an Artinian Gorenstein algebra $\mathcal{A}_{\sigma} =\mathbbm{K} [\tmmathbf{x}] / I_{\sigma}$, where $I_{\sigma}$ is the ideal of polynomials vanishing $\Xi$ {\cite{mourrain_polynomial-exponential_2016}}. This ideal $I_{\sigma}$ defines the points $\xi_i$ \ with multiplicity $1$. The idempotents $\{ \tmmathbf{u}_i \}_{i = 1 \ldots r}$ associated to the points $\Xi$ form a family of interpolation polynomials at these points: $\tmmathbf{u}_i (\xi_j) = 0$ if $i \neq j$ and $\tmmathbf{u}_i (\xi_i) = 1$. They are the common eigenvectors of the multiplication operators in $\mathcal{A}_{\sigma}$. By applying the border basis algorithm to the sequence $\sigma_{\alpha}$ for $| \alpha | \leqslant d$ with $d$ high enough, we obtain generators of the ideal $I_{\sigma}$ defining the points $\xi_1, \ldots, \xi_r \in \mathbbm{K}^n$, a basis of $\mathcal{A}_{\sigma}$ and the tables of multiplication in this basis. By computing the eigenvectors of a generic combination of the multiplication tables by a variable, we obtain a family of interpolation polynomials at the roots $\Xi$. \subsection{Benchmarks} We present some experimentations of an implementation of Algorithm \ref{algo:1}\footnote{available at {\url{https://gitlab.inria.fr/mourrain/PolyExp}}} in the programming language \textsc{Julia}\footnote{\url{https://julialang.org/}}. The arithmetic operations are done in the finite field $\mathbbm{Z}/ 32003\mathbbm{Z}$. We choose $r$ random points $\xi_i$ with $n$ coordinates in $\mathbbm{Z}/ 32003\mathbbm{Z}$, take the sequence of moments $\sigma_{\alpha} = \sum_{i = 1}^r \xi_i^{\alpha}$ up for $| \alpha | \leqslant d$ \ with weights equal to $1$. Figure 4.6 shows the timing (in sec.) to compute the border basis, checking the validity of the recurrence relations up to degree $d$. The computation is done on a MacOS El Capitan, 2.8 GHz Intel Core i7, 16 Go. \begin{center} {\includegraphics[height=4.5cm]{bbs_bench1.png} \label{fig:timing}}\\ Fig. 4.6: Vanishing ideal of random points. \end{center} The timing is approximately linear in the number $r$ of points, with a slope increasing quadratically in $n$. {\small } \appendix \section*{Examples \nopunct} \begin{example} \ \end{example} We consider the sequence $\sigma \in \mathbbm{K}^{\mathbbm{N}}$ such that $\sigma_{d_1} = 1$ and $\sigma_i = 0$ for $0 \leqslant i \neq d_1 \leqslant d$ and $d_1 < d$. \ In the first step of the algorithm, we take $p_0 = 1$ and compute the first $\gamma \in [0,\ldots,d]$ such that $\langle x^\gamma, p_1 \rangle_{\sigma}$ is not zero. This yields $m_0 = x^{d_1}$ and $\tmmathbf{b}= [0]$, $\tmmathbf{c}= [d_1]$. In a second step, we have $p_1 = x - \langle x, m_1 \rangle_{\sigma} p_0 = x$. The first $\gamma \in [0,\ldots,d]\setminus\{d_{1}\}$ such that $\langle x^i, p_1 \rangle_{\sigma}$ is not zero yields $\tmmathbf{b}= [0, 1]$, $\tmmathbf{c}= [d_1, d_1 - 1]$, $m_1 = x^{d_1 - 1}$. We repeat this computation until $\tmmathbf{b}= [0, \ldots, d_1]$, $\tmmathbf{c}= [d_1, d_1 - 1, \ldots, 1]$ with $m_{i} = x^{d_1 - i}$, $p_i = x^i $ for $i = 0, \ldots, d_1$. In the following step, we have $p_{d_1 + 1} = \tmop{proj} (x^{d_1 + 1}, \tmmathbf{p}, \tmmathbf{m}) = x^{d_1 + 1} - \langle x^{d_1 + 1}, m_1 \rangle_{\sigma} p_1 - \cdots - \langle x^{d_1 + 1}, m_{d_1} \rangle_{\sigma} p_{d_1} = x^{d_{1 + 1}}$ such that $\langle x^{d_1 + 1}, x^j \rangle_{\sigma} = 0$ for $0 \leqslant j \leqslant d$. The algorithm stops and outputs $\tmmathbf{b}= [1, \ldots, x^{d_1}]$, $\tmmathbf{c}= [x^{d_1}, x^{d_1 - 1}, \ldots, 1]$, $\tmmathbf{k}= [x^{d_1 + 1}]$.\\ \begin{example} \end{example} We consider the function $h (u_1, u_2) = {\color{green} {\color{green} 2 + 3} \hspace{0.25em}} \cdot {\color{blue} 2^{u_1} 2^{u_2} {\color{green} {\color{green} -}} 3^{u_1}}$. Its associated generating series is $\sigma = \sum_{\alpha \in \mathbbm{N}^2} h (\alpha) \tmmathbf{z}^{\alpha} = 4 \noplus + 5 z_1 + 7 z_2 + 5 z_1^2 + 11 z_1 z_2 + 13 z_2^2 + \cdots$. At the first step, we have $\mathbf{x}^{\tmmathbf{b}}= [1]$, $\tmmathbf{p}= [1]$, $\tmmathbf{q}= \left[ \frac{1}{4} \right]$. At the second step, we compute $\mathbf{x}^{\tmmathbf{b}}= [1, x_1, x_2]$, $\tmmathbf{p}= [1, x_1 - \frac{5}{4}, x_2 - \frac{9}{5} x_1 - 4] = [p_1, p_{x_1}, p_{x_2}]$ and $\tmmathbf{q}= \left[ \frac{1}{4} p_1, - \frac{4}{5} p_{x_1}, \frac{5}{24} p_{x_2} \right]$. At the next step, we obtain $\tmmathbf{k}= [], \tmmathbf{d}= [x_1^2, x_1 x_2, x_2^2]$. \begin{eqnarray*} x_1 p_1 & \equiv & \frac{5}{4} p_1 + p_{x_1}\\ x_1 \hspace{0.25em} p_{x_1} & \equiv & - \frac{5}{16} p_1 + \frac{91}{20} p_{x_1} - p_{x_2}\\ x_1 p_{x_2} & \equiv & \sum_{i = 1}^3 \langle x_1 p_{x_2}, \tmmathbf{q}_i \rangle_{\sigma} \tmmathbf{p}_i = \frac{96}{25} p_{x_1} + \frac{1}{5} p_{x_2} \end{eqnarray*} The matrix of multiplication by $x_1$ in the basis $\tmmathbf{p}$ is \[ M_1 = \left[ \begin{array}{ccc} \frac{5}{4} & - \frac{5}{16} & 0\\ 1 & \frac{91}{20} & \frac{96}{25}\\ 0 & - 1 & \frac{1}{5} \end{array} \right] . \] Its eigenvalues are ${\color{blue} [\nobracket 1, 2, 3]}$ and the corresponding matrix of eigenvectors is \[ U := \left[ \begin{array}{ccc} \frac{1}{2} & \frac{3}{4} & - \frac{1}{4}\\ \frac{2}{5} & - \frac{9}{5} & \frac{7}{5}\\ - \frac{1}{2} & 1 & - \frac{1}{2} \end{array} \right], \] that is, the polynomials $U (x) = [2 - \frac{1}{2} \hspace{0.25em} x_1 - \frac{1}{2} \hspace{0.25em} x_2, - 1 + x_2, \frac{1}{2} \hspace{0.25em} x_1 - \frac{1}{2} \hspace{0.25em} x_2]$. By computing the Hankel matrix \[ H_{\sigma}^{U, [1, x_1, x_2]} = \left[ \begin{array}{ccc} {\color{green} 2} & {\color{green} 3} & {\color{green} - 1}\\ {\color{green} {\color{green} 2 \times}} {\color{blue} 1} & {\color{green} {\color{green} 3 \times}} {\color{blue} 2} & {\color{green} {\color{green} - 1 \times}} {\color{blue} 3}\\ {\color{green} {\color{green} 2 \times}} {\color{blue} 1} & {\color{green} 3 \times} {\color{blue} 2} & {\color{green} {\color{green} - 1 \times}} {\color{blue} 1} \end{array} \right] \] we deduce the weights {\color{green} ${\color{green} {\color{green} 2, 3, - 1}}$} and the frequencies ${\color{blue} (1, 1),}$ ${\color{blue} (2, 2), (3, 1)}$, which corresponds to the decomposition $\sigma = e^{y_1 + y_2} + 3 e^{2 y_1 + 2 y_2} - e^{2 y_1 + y_2} $ associated to $h (u_1, u_2) = 2 \noplus + 3 \cdot 2^{u_1 + u_2} - 3^{u_1}$. \begin{example} \ \end{example} We consider the following symmetric tensor or homogeneous polynomial: {\small \[ \begin{array}{rl} \tau = & - x_0^4 - 24 \hspace{0.17em} x_0^3 x_1 - 8 \hspace{0.17em} x_0^3 x_2 - 60 \hspace{0.17em} x_0^2 x_1^2 - 168 \hspace{0.17em} x_0^2 x_1 x_2 - 12 \hspace{0.17em} x_0^2 x_2^2\\ & - 96 \hspace{0.17em} x_0 x_1^3 - 240 \hspace{0.17em} x_0 x_1^2 x_2 - 384 \hspace{0.17em} x_0 x_1 x_2^2 + 16 \hspace{0.17em} x_0 x_2^3\\ & - 46 \hspace{0.17em} x_1^4 - 200 \hspace{0.17em} x_1^3 x_2 - 228 \hspace{0.17em} x_1^2 x_2^2 - 296 \hspace{0.17em} x_1 x_2^3 + 34 \hspace{0.17em} x_2^4 . \end{array} \] } The associated series is \begin{eqnarray*} \sigma & = & - 1 - 6 \hspace{0.17em} z_1 - 2 \hspace{0.17em} z_2 - 10 \hspace{0.17em} z_1^2 - 14 \hspace{0.17em} z_2 z_1 - 2 \hspace{0.17em} z_2^2\\ & & - 24 \hspace{0.17em} z_1^3 - 20 \hspace{0.17em} z_2 z_1^2 - 32 \hspace{0.17em} z_2^2 z_1 + 4 \hspace{0.17em} z_2^3\\ & & - 46 \hspace{0.17em} z_1^4 - 50 \hspace{0.17em} z_2 z_1^3 - 38 \hspace{0.17em} z_2^2 z_1^2 - 74 \hspace{0.17em} z_2^3 z_1 + 34 \hspace{0.17em} z_2^4 \end{eqnarray*} To decompose it into a sum of powers of linear forms, we apply the border basis algorithm to the series $\sigma$. The algorithm projects successively the monomials $1, x_1, x_2, x_1^2, x_1 x_2, x_2^2, \ldots$ onto the family of polynomials $\tmmathbf{p}$, starting with $\tmmathbf{p}= [1]$. We obtain $\mathbf{x}^{\tmmathbf{b}}=\tmmathbf{c}= [1, x_1, x_2]$, $\tmmathbf{p}= [1, x_1 - 6, x_2 + \frac{1}{13} x_1 - \frac{32}{13}]$ and the border basis is \[ \tmmathbf{k}= [{\color{red} x_1^2} - \frac{3}{2} x_1 - \frac{3}{2} x_2 + 2, {\color{red} x_1 x_2} - \frac{5}{2} x_1 - \frac{1}{2} x_2 + 2, {\color{red} x_2^2} + \frac{1}{2} x_1 - \frac{7}{2} x_2 + 2], \] giving the projection of the border monomials $\tmmathbf{d}= [{\color{red} x_1^2, x_1 x_2, x_2^2}]$ on the basis $\mathbf{x}^{\tmmathbf{b}}$. The decomposition of $\tau$ is deduced from the eigenvectors of the operator of multiplication by $x_1$: {\scriptsize \[ M_1 = \left[ \begin{array}{ccc} 0 & - 2 & - 2\\ 0 & \frac{1}{2} & \frac{3}{2}\\ 1 & \frac{5}{2} & \frac{3}{2} \end{array} \right] . \]} Its eigenvalues are $[{\color{red} - 1, 1, 2]}$ and the eigenvectors correspond to the polynomials \[ \tmmathbf{u}= \left[ \begin{array}{ccc} \frac{1}{2} \hspace{0.17em} x_2 - \frac{1}{2} \hspace{0.17em} x_1 & - 2 + \frac{3}{4} \hspace{0.17em} x_2 + \frac{1}{4} \hspace{0.17em} x_1 & - 1 + \frac{1}{2} \hspace{0.17em} x_2 + \frac{1}{2} \hspace{0.17em} x_1 \end{array} \right] . \] Computing $\omega_i = \langle \sigma \mid \tmmathbf{u}_i \rangle$ and $\xi_{i, j} = \frac{\langle \sigma \mid x_j \tmmathbf{u}_i \rangle}{\langle \sigma \mid \tmmathbf{u}_i \rangle}$ (see {\cite{mourrain_polynomial-exponential_2016}}), we obtain the decomposition: \[ \tau = \left( x_0 - x_1 + 3 \hspace{0.17em} x_2 \right)^4 + (x_0 + x_1 + x_2)^4 - 3 \hspace{0.17em} \left( x_0 + 2 \hspace{0.17em} x_1 + 2 \hspace{0.17em} x_2 \right)^4 . \overset{}{} \] \begin{example} \ \end{example} We consider the algebraic code over $\ensuremath{\mathbbm{K}}=\ensuremath{\mathbbm{Z}}/32003\ensuremath{\mathbbm{Z}}$ defined by $$ C =\{ c\in \ensuremath{\mathbbm{K}}^{11} \mid \sum_{i=1}^{11} c_{i}\, \xi_{i}^{\alpha} =0,\ \forall \alpha\in \mathbbm{N}^{3}\ s.t.\ |\alpha|\leq 2\} $$ where $$ \Xi = \left[ \begin{array}{ccccccccccc} 1 & 1 & 1 &-1 &-1 &0 &0 & 2 &1 & 1 & 0\\ 0 & 1 &-1 & 1 &-1 & 1 &1 &-1 & 2 &-2 & 0\\ 0 & 0 & 0 & 0 & 0 &0 &1 & 1 & 1 & 1 & 1\\ \end{array} \right] $$ and $\xi_{i}$ is the $i^{\mathrm{th}}$ column of $\Xi$. Suppose that we receive the word $$ r = [0, 3, 3, 3, 0, 0, -6, -2, 0, -1, 0] $$ which is the sum $r= c+\omega$ of a code word $c\in C$ and an error vector $\omega\in \ensuremath{\mathbbm{K}}^{11}$. We want to correct it and find the corresponding word $c$ of the code $C$. Computing the syndromes $\sigma_{\alpha}= \sum_{i=1}^{11} r_{i} \xi_{i}^{\alpha}= \sum_{i=1}^{11} \omega_{i} \xi_{i}^{\alpha}$ for $|\alpha|\le 2$ and the corresponding (truncated) generating series, we get $$ \sigma = - 2\, z_1 + z_2 + 3\,z_1\, z_2 - 2\,z_1\,z_3 - 3\,z_2^2 + z_2\,z_3. $$ We apply the border basis algorithm to obtain error locator polynomials. The monomials are considered in the order $\mathbf{x}^{\mathbf{a}}=[1, x_{1},x_{2},x_{3},x_{1}^{2}, x_{1}\,x_{2},\ldots,x_{3}^{2}]$. Here are the different steps, where $\ensuremath{\mathfrak{n}}$ denotes the new monomial introduced at each loop of the algorithm. \noindent{}Step 1. $\ensuremath{\mathfrak{n}}=1$, $\mathbf{x}^{\tmmathbf{b}}=[1]$, $\mathbf{x}^{\tmmathbf{c}}= [ x_1]$, $\tmmathbf{k}= []$. \noindent{}Step 2. $\ensuremath{\mathfrak{n}}=x_{1}$, $\mathbf{x}^{\tmmathbf{b}}=[1,x_{1}]$, $\mathbf{x}^{\tmmathbf{c}}= [x_1,1]$, $\tmmathbf{k}= []$. \noindent{}Step 3. $\ensuremath{\mathfrak{n}}=x_{2}$, $\mathbf{x}^{\tmmathbf{b}}=[1,x_{1}]$, $\mathbf{x}^{\tmmathbf{c}}= [x_1,1]$, $\tmmathbf{k}= [x_{2}+\frac{1}{2} x_{1}+\frac{3}{2}]$. \noindent{}Step 4. $\ensuremath{\mathfrak{n}}=x_{3}$, $\mathbf{x}^{\tmmathbf{b}}=[1,x_{1}]$, $\mathbf{x}^{\tmmathbf{c}}= [x_1,1]$, $\tmmathbf{k}= [x_{2}+\frac{1}{2} x_{1}+\frac{3}{2}, x_{3}-1]$. The algorithm stops at this step, since the new monomial $\ensuremath{\mathfrak{n}}=x_{1}^{2}$ is of degree $2$ and $\ensuremath{\mathfrak{n}} \cdot \mathbf{x}^{\tmmathbf{c}} \not\subset \mathbf{x}^{\mathbf{a}}$. It outputs two error locator polynomials: $x_{2}+\frac{1}{2} x_{1}+\frac{3}{2}, x_{3}-1$. We check that only $\xi_{5}, \xi_{10}$ are roots of the error locator polynomials. We deduce the non-zero weights $\omega_{5}, \omega_{10}$ by solving the system $ \omega_{5} \xi_{5}^{\alpha}+ \omega_{10} \xi_{10}^{\alpha} = \sigma_{\alpha}$ for $\alpha\in \{(0,0,0), (1,0,0)\}$. This yields $\omega_{5}=1, \omega_{10}=-1$, so that the code word is $$ c = [ 0, 3, 3, 3, -1, 0, -6, -2, 0, 0, 0]. $$ \end{document}
\begin{document} \title{On stable Baire classes} \author{Olena Karlova}\email{[email protected]} \author{Volodymyr Mykhaylyuk}\email{[email protected]} \address{Yurii Fedkovych Chernivtsi National University, Ukraine} \subjclass{Primary 54C08, 54H05; Secondary 26A21} \keywords{stable convergence, stable Baire class, adhesive space} \date{} \begin{abstract} We introduce and study adhesive spaces. Using this concept we obtain a characterization of stable Baire maps $f:X\to Y$ of the class $\alpha$ for wide classes of topological spaces. In particular, we prove that for a topological space $X$ and a contractible space $Y$ a map $f:X\to Y$ belongs to the $n$'th stable Baire class if and only if there exist a sequence $(f_k)_{k=1}^\infty$ of continuous maps $f_k:X\to Y$ and a sequence $(F_k)_{k=1}^\infty$ of functionally ambiguous sets of the $n$'th class in $X$ such that $f|_{F_k}=f_k|_{F_k}$ for every $k$. Moreover, we show that every monotone function $f:\mathbb R\to \mathbb R$ is of the $\alpha$'th stable Baire class if and only if it belongs to the first stable Baire class. \end{abstract} \maketitle \section{Introduction, terminology and notations} We say that a sequence $(f_n)_{n=1}^\infty$ of maps $f_n:X\to Y$ between topological spaces $X$ and $Y$ is {\it stably convergent to a map $f:X\to Y$ on $X$}, if for every $x\in X$ there exists $k\in\mathbb N$ such that $f_n(x)=f(x)$ for all $n\ge k$. A map $f:X\to Y$ belongs to {\it the first stable Baire class}, if there exists a sequence of continuous maps between $X$ and $Y$ which is stably convergent to $f$ on $X$. Real-valued functions of the first stable Baire class on a topological space $X$ naturally appear both as an interesting subclass of all differences of semi-continuous functions on $X$ \cite{ChRos,HOR} and in problems on the Baire classification of integrals depending on a parameter \cite{BKMM} as well as in problems concerning a composition of Baire functions \cite{Karlova:Mykhaylyuk:Comp}. Real-valued functions of higher stable Baire classes were introduced and studied by \'{A}.~Cs\'{a}sz\'{a}r and M.~Laczkovich in \cite{CsLacz:1,CsLacz:2}. A characterization of maps of the first Baire class defined on a perfectly paracompact hereditarily Baire space with the Preiss-Simon property and with values in a path-connected space with special extension properties was established by T.~Banakh and B.~Bokalo in \cite{BaBo}. This paper is devoted to obtain a characterization of stable Baire maps for wide classes of topological spaces and any ordinal $\alpha\in[1,\omega_1)$. To do this we introduce a class of adhesive spaces and study their properties in Section~\ref{sec:AdhesiveSpaces}. In Section~\ref{sec:Stable} we give a characterization of stable Baire maps defined on a topological space and with values in adhesive spaces (see Theorem~\ref{th:char_B1st}). Finally, in Section~\ref{sec:monotone} we apply this characterization to classify monotone functions within stable Baire classes (see Theorem~\ref{th:mon_stable}). Let us give some notations and recall several definitions. For topological spaces $X$ and $Y$ by ${\rm C}(X,Y)$ we denote the collection of all continuous maps between $X$ and $Y$. If $A\subseteq Y^X$, then the symbol $\overline{A}^{\,\,{\rm st}}$ stands for the collection of all stable limits of sequences of maps from~$A$. We put $$ {\rm B}_0^{{\rm st}}(X,Y)={\rm C}(X,Y) $$ and for each ordinal $\alpha\in (0,\omega_1)$ let ${\rm B}_\alpha^{\rm st}(X,Y)$ be the family of all maps of {\it the $\alpha$'th stable Baire class} which is defined by the formula $$ {\rm B}_\alpha^{\rm st}(X,Y)=\overline{\bigcup\limits_{\beta<\alpha}{\rm B}_\beta^{\rm st}(X,Y)}^{\,\,{\rm st}}. $$ Recall that a set $A\subseteq X$ is {\it functionally closed}, if there exists a continuous function $f:X\to [0,1]$ with $A=f^{-1}(0)$. If the complement of $A$ is functionally closed, then $A$ is called {\it functionally open}. Let $\mathcal M_0(X)$ be the family of all functionally closed subsets of $X$ and let $\mathcal A_0(X)$ be the family of all functionally open subsets of $X$. For every $\alpha\in [1,\omega_1)$ we put \begin{gather*} \mathcal M_{\alpha}(X)=\Bigl\{\bigcap\limits_{n=1}^\infty A_n: A_n\in\bigcup\limits_{\beta<\alpha}\mathcal A_{\beta}(X),\,\, n=1,2,\dots\Bigr\}\,\,\,\mbox{and}\\ \mathcal A_{\alpha}(X)=\Bigl\{\bigcup\limits_{n=1}^\infty A_n: A_n\in\bigcup\limits_{\beta<\alpha}\mathcal M_{\beta}(X),\,\, n=1,2,\dots\Bigr\}. \end{gather*} Elements from the class $\mathcal M_\alpha(X)$ belong to {\it the $\alpha$'th functionally multiplicative class} and elements from $\mathcal A_\alpha(X)$ belong to {\it the $\alpha$'th functionally additive class} in $X$. We say that a set is {\it functionally ambiguous of the $\alpha$'th class}, if it belongs to $\mathcal M_\alpha(X)$ and $\mathcal A_\alpha(X)$ simultaneously. A topological space $X$ is called {\it contractible}, if there exist a point $x_0\in X$ and a continuous map $\gamma:X\times[0,1]\to X$ such that $\gamma(x,0)=x$ and $\gamma(x,1)=x_0$ for all $x\in X$. A space $Y$ is {\it an extensor for $X$}, if for any closed set $F\subseteq X$ each continuous map $f:F\to Y$ can be extended to a continuous map $g:X\to Y$. A map $f:X\to Y$ is called {\it piecewise continuous}, if there exists a cover $(F_n:n\in\mathbb N)$ of $X$ by closed sets such that each restriction $f|_{F_n}$ is continuous. We denote by symbols $C(f)$ and $D(f)$ the sets of all points of continuity and discontinuity of a map $f:X\to Y$, respectively. \section{Adhesive spaces}\label{sec:AdhesiveSpaces} \begin{defn}\label{Def:Adhesive} We say that a topological space $Y$ is {\it an adhesive for $X$} (we denote this fact by $Y\in {\rm Ad}(X)$), if for any two disjoint functionally closed sets $A$ and $B$ in $X$ and any continuous maps $f,g:X\to Y$ there exists a continuous map $h:X\to Y$ such that $h|_A=f|_A$ and $h|_B=g|_B$. A space $Y$ is said to be {\it an absolute adhesive} for a class $\mathcal C$ of topological spaces and is denoted by $Y\in {\rm AAd(\mathcal C)}$, if $Y\in{\rm Ad}(X)$ for any $X\in\mathcal C$. \end{defn} If the case when $Y$ is an adhesive for any space $X$, then we will write $Y\in {\rm AAd}$. \begin{rem} \begin{enumerate} \item Every extensor is an adhesive. \item Let $\pi(X,Y)$ be the set of all homotopy classes of continuous maps between $X$ and $Y$. If $\pi(X,Y)={\rm C}(X,Y)$, then $Y$ is an adhesive for $X$. \item Example~\ref{exm:cantorAD}(a) shows that the class of adhesive spaces is strictly wider than the class of extensors. Example~\ref{exm:cantorAD}(b) contains an adhesive $Y$ for $X$ such that $\pi(X,Y)\ne{\rm C}(X,Y)$. \end{enumerate} \end{rem} \begin{defn}\label{Def:StarAdhesive} Let $E$ be a subspace of a topological space $Y$. The pair $(Y,E)$ is called {\it \mbox{a $*$-adhesive} for $X$} (and is denoted by $(Y,E)\in{\rm Ad}^*(X)$), if there exists a point $y^*\in E$ such that for any continuous map $f:X\to Y$ and any two disjoint functionally closed sets $A$ and $B$ in $X$ with $f(A)\subseteq E$ there exists a continuous map $h:X\to Y$ such that $h|_A=f|_A$ and $h|_B=y^*$. \end{defn} \begin{rem} A pair $(Y,E)$ is a $*$-adhesive for $X$, if \begin{enumerate} \item $E$ is a subspace of $Y\in {\rm Ad}(X)$; \item $(Y,E)\in{\rm AE}(X)$ (i.e., each continuous map $f:F\to E$ has a continuous extension \mbox{$g:X\to Y$}); \item $E\in {\rm Ad}(X)$ and $E$ is a retract of $Y$. \end{enumerate} \end{rem} \begin{defn} A topological space $Y$ is said to be {\it a $\sigma$-adhesive for $X$}, if there exists a cover $(Y_n:n\in\mathbb N)$ of $Y$ by functionally closed subspaces $Y_n$ such that $(Y,Y_n)\in {\rm Ad}^*(X)$ for every $n$. \end{defn} \begin{defn} A topological space $X$ is {\it low-dimensional}, if each point of $X$ has a base of open neighborhoods with discrete (probably, empty) boundaries. \end{defn} It is clear that for a regular low-dimensional space we have ${\rm ind}X\le 1$. The following result gives examples of adhesive spaces. \begin{thm}\label{Thm:AbsAdh} Let $Y$ be a topological space. Then \begin{enumerate} \item\label{Thm:AbsAdh:it:1} $Y$ is an absolute adhesive for the class of all strongly zero-dimensional spaces; \item\label{Thm:AbsAdh:it:2} $Y$ is an absolute adhesive for the class of all low-dimensional compact Hausdorff spaces, if $Y$ is path-connected; \item\label{Thm:AbsAdh:it:3} $Y\in {\rm AAd}$ if and only if $Y$ is contractible. \end{enumerate} \end{thm} \begin{proof} Let $X$ be a topological space, $A,B$ be disjoint functionally closed subsets of $X$ and $f,g:X\to Y$ be continuous maps. {\bf (\ref{Thm:AbsAdh:it:1}).} Assume that $X$ is strongly zero-dimensional and choose a clopen set $U\subseteq X$ such that $A\subseteq U\subseteq X\setminus B$. Then the map $h:X\to Y$, $$ h(x)=\left\{\begin{array}{ll} f(x), & x\in U, \\ g(x), & x\in X\setminus U, \end{array} \right. $$ is continuous, $h|_A=f|_A$ and $h|_B=g|_B$. {\bf (\ref{Thm:AbsAdh:it:2}).} Assume that $X$ is a low-dimensional Hausdorff compact space and $Y$ is a path-connected space. We choose a continuous function $\varphi:X\to [0,1]$ such that $A=\varphi^{-1}(0)$ and $B=\varphi^{-1}(1)$. For each point $x\in A\cup B$ we take its open neighborhood $O_x$ with the discrete boundary $\partial O_x$ such that $O_x\subseteq \varphi^{-1}([0,1/3))$ for $x\in A$ and $O_x\subseteq \varphi^{-1}((2/3,1])$ for $x\in B$. Since $X$ is compact, every boundary $\partial O_x$ is finite. Moreover, there exist finite subcovers $\mathcal U$ and $\mathcal V$ of $(O_x:x\in A)$ and $(O_x:x\in B)$, respectively. Then the sets $U=\cup\mathcal U$ and $V=\cup\mathcal V$ are open neighborhoods of $A$ and $B$, respectively, $\partial U\cap\partial V=\emptyset$ and $|\partial U\cup\partial V|<\aleph_0$. In the case when one of the boundaries of the sets $U$ or $V$ is empty, we can construct the required map $h$ as in case (\ref{Thm:AbsAdh:it:1})). Hence, we may suppose that $\partial U\ne\emptyset\ne\partial V$. Let $\partial U=\{x_1,\dots,x_n\}$ and $\partial V=\{x_{n+1},\dots,x_{n+m}\}$ for some $n,m\in\mathbb N$. Taking into account that the space $D=\partial U\cup\partial V$ is finite and Hausdorff, we obtain that a function $\psi:D\to [1,n+m]$, which is defined by the equality $\psi(x_i)=i$ for $i\in\{1,\dots,n+m\}$, is continuous. Let $\tilde\psi:X\to [1,n+m]$ be a continuous extension of $\psi$. Denote $y_i=f(x_i)$ for $i\in\{1,\dots,n\}$ and $y_i=g(x_i)$ for $i\in\{n+1,\dots,n+m\}$. Now we use the path-connectedness of $Y$ and for every $i\in\{1,\dots,n+m-1\}$ find a continuous map $\gamma_i:[i,i+1]\to Y$ such that $\gamma_i(i)=y_i$ and $\gamma_i(i+1)=y_{i+1}$. The maps $\gamma_i$ compose a single continuous map $\gamma:[1,n+m]\to Y$ such that $\gamma|_{[i,i+1]}=\gamma_i$ for all $i\in\{1,\dots,n+m-1\}$. We define a map $h:X\to Y$, $$ h(x)=\left\{\begin{array}{ll} \gamma(\tilde\psi(x)), & x\in X\setminus (U\cup V), \\ f(x),& x\in U,\\ g(x),& x\in V. \end{array} \right. $$ Notice that $h$ is continuous, $h|_A=f|_A$ and $h|_B=g|_B$. {\bf (\ref{Thm:AbsAdh:it:3}).} {\it Necessity.} Fix $y^*\in Y$. Consider the space $X=Y\times [0,1]$, its disjoint functionally closed subsets $A=Y\times\{0\}$ and $B=Y\times\{1\}$, and continuous maps $f,g:X\to Y$ such that $f(y,t)=y$ and $g(y,t)=y^*$ for all $y\in Y$ and $t\in [0,1]$. Since $Y$ is adhesive for $X$, there exists a continuous map $\lambda:X\to Y$ such that $\lambda|_A=f|_A$ and $\lambda|_B=g|_B$. This implies that $Y$ is contractible. {\it Sufficiency.} Assume that $Y$ is contractible and let $\lambda:Y\times[0,1]\to Y$ be a continuous map such that $\lambda(y,0)=y$ and $\lambda(y,1)=y_0$ for all $y\in Y$. Consider a topological space $X$, its disjoint functionally closed subsets $A$ and $B$, and continuous maps $f,g:X\to Y$. Let $\varphi:X\to [0,1]$ be a continuous function such that $A=\varphi^{-1}(0)$ and $B=\varphi^{-1}(1)$. Then a map $h:X\to Y$, $$ h(x)=\left\{\begin{array}{ll} \lambda(f(x),2\varphi(x)), & \varphi(x)\in [0,\tfrac 12], \\ \lambda(g(x),2-2\varphi(x)), & \varphi(x)\in (\tfrac 12,1], \end{array} \right. $$ is continuous and satisfies conditions from Definition~\ref{Def:Adhesive}. \end{proof} The following example indicates the essentiality of path-connectedness in Theorem~\ref{Thm:AbsAdh}~(\ref{Thm:AbsAdh:it:2}). \begin{exm} There exists a connected set $Y\subseteq\mathbb R^2$ such that for any two distinct continuous functions $f,g:[0,1]\to Y$ and any two disjoint closed sets $A$ and $B$ in $[0,1]$ there is no continuous function $h:[0,1]\to Y$ with $h|_A=f|_A$ and $h|_B=g|_B$. \end{exm} \begin{proof} Let $\mathbb Q=\{r_n:n\in\mathbb N\}$ be the set of all rational numbers. For every $n\in\mathbb N$ we consider the function $\varphi_n:\mathbb R\to\mathbb R$, $$ \varphi_n(x)=\left\{\begin{array}{ll} \sin\frac{1}{x-r_n}, & x\ne r_n, \\ 0, & x=r_n. \end{array} \right. $$ Define the function $\varphi:\mathbb R\to\mathbb R$, $$ \varphi(x)=\sum\limits_{n=1}^\infty \frac{1}{2^n}\varphi_n(x). $$ Let $$ Y=\{(x,y)\in\mathbb R^2:y=\varphi(x)\}. $$ Observe that for every $n$ the function $\psi_n(x)=\sum\limits_{k=1}^n \frac{1}{2^k}\varphi_k(x)$ is a Baire-one Darboux function. Since the sequence $(\psi_n)_{n=1}^\infty$ is uniformly convergent to $\varphi$ on $\mathbb R$, $\varphi$ is a Baire-one Darboux function~\cite[Theorem 3.4]{Bru}. Consequently, the graph $Y$ of $\varphi$ is connected according to \cite[Theorem 1.1]{Bru}. Notice that the space $Y$ is punctiform (i.e., $Y$ does not contain any continuum of the cardinality larger than one), since $\varphi$ is discontinuous on everywhere dense set $\mathbb Q$ (see \cite{KuSerp}). Then each continuous function between $\mathbb R$ and $Y$ is constant. The statement of the example follows immediately. \end{proof} \begin{exm}\label{exm:cantorAD}{\rm a) Let $X=[0,1]$, $C\subseteq [0,1]$ be the Cantor set and $Y=\Delta (C)\subseteq\mathbb R^2$ be the cone over $C$, i.e., the collection of all segments which connect the point $v=(\tfrac 12,1)$ with points $(x,0)$ for all $x\in C$. Then, being contractible, $Y\in {\rm AAd}$. We show that $Y$ is not an extensor for $X$. Indeed, denote by $((a_n,b_n))_{n=1}^\infty$ the sequence of contiguous intervals to the Cantor set and consider the identity embedding $f:C\to C\times\{0\}$. Assume that there exists a continuous extension $g:[0,1]\to Y$ of the function $f$. Then $g([a_n,b_n])\supseteq [a_n,v]\cup[b_n,v]$ for every $n\in\mathbb N$, which implies the equality $g([0,1])=Y$. Therefore, we obtain a contradiction, since $Y$ is not locally path-connected. b) Let $X=Y=S^1=\{(x,y)\in\mathbb R^2: x^2+y^2=1\}$. By Theorem~\ref{Thm:AbsAdh} the space $S^1$ is adhesive for itself. On the other hand, the continuous maps $f,g:S^1\to S^1$ defined by the equalities $f(x,y)=(x,y)$ and $g(x,y)=(1,0)$, are not homotopic.} \end{exm} \section{Stable Baire classes and their characterization}\label{sec:Stable} We omit the proof of the following fact, since it is completely similar to the proof of Theorem 2 from \cite[p.~357]{Kuratowski:Top:1}. \begin{lem}\label{amb} If $A$ is a functionally ambiguous set of the class $\alpha>1$ in a topological space $X$, then there exists a sequence $(A_n)_{n=1}^\infty$ of functionally ambiguous sets of classes $<\alpha$ such that \begin{gather}\label{gath:limitset} A=\mathop{\rm Lim}\limits_{n\to\infty} A_n=\bigcup\limits_{n=1}^\infty \bigcap\limits_{k=0}^\infty A_{n+k}=\bigcap\limits_{n=1}^\infty \bigcup\limits_{k=0}^\infty A_{n+k}. \end{gather} Moreover, if $\alpha=\beta+1$ for a limit ordinal $\beta$, then all the sets $A_n$ can be taken from classes $<\beta$. \end{lem} \begin{thm}\label{th:char_B1st} Let $X$ be a topological space, $Y$ be a topological space with the functionally closed diagonal $\Delta=\{(y,y):y\in Y\}$, $\alpha\in[1,\omega_1)$ and let $\beta=\alpha$, if $\alpha<\omega_0$, and $\beta=\alpha+1$, if $\alpha\ge\omega_0$. For a map $f:X\to Y$ we consider the following conditions: \begin{enumerate} \item\label{it:th:char_B1st:1} $f\in {\rm B}_\alpha^{\rm st}(X,Y)$; \item\label{it:th:char_B1st:2} there exist an increasing sequence $(X_n)_{n=1}^\infty$ of sets of functionally multiplicative classes $<\beta$ and a sequence $(f_n)_{n=1}^\infty$ of maps $f_n\in{\rm B}_{<\alpha}^{\rm st}(X,Y)$ such that $X=\bigcup\limits_{n=1}^\infty X_n$ і $f_n|_{X_n}=f|_{X_n}$ for all $n\in\mathbb N$; \item\label{it:th:char_B1st:3} there exist a partition $(X_n:n\in\mathbb N)$ of $X$ by functionally ambiguous sets of the class $\beta$ and a sequence of continuous maps $f_n:X\to Y$ such that $f_n|_{X_n}=f|_{X_n}$ for all $n\in\mathbb N$. \end{enumerate} Then $(\ref{it:th:char_B1st:1})\Leftrightarrow(\ref{it:th:char_B1st:2})\Rightarrow (\ref{it:th:char_B1st:3})$. If one of the following conditions hold \begin{enumerate} \item[(a)] $Y$ is adhesive for $X$, or \item[(b)] $Y$ is path-connected $\sigma$-adhesive for $X$, \end{enumerate} then $(\ref{it:th:char_B1st:3})\Rightarrow(\ref{it:th:char_B1st:2})$. \end{thm} \begin{proof} {\bf (\ref{it:th:char_B1st:1})$\Rightarrow$(\ref{it:th:char_B1st:2}).} Assume that the diagonal $\Delta$ is functionally closed in $Y^2$. Let $(f_n)_{n=1}^\infty$ be a sequence of maps $f_n\in{\rm B}_{<\alpha}^{\rm st}(X,Y)$ which is stably convergent to $f$ on $X$. For $k,n\in\mathbb N$ we put \begin{gather*} X_{k,n}=\{x\in X:f_k(x)=f_n(x)\}\quad\mbox{and}\quad X_n=\bigcap\limits_{k=n}^\infty X_{k,n}. \end{gather*} Clearly, $X_n\subseteq X_{n+1}$, $X=\bigcup\limits_{n=1}^\infty X_n$ and $f_n|_{X_n}=f|_{X_n}$ for every $n\in\mathbb N$. For all $x\in X$ and $k,n\in\mathbb N$ we put $h_{k,n}(x)=(f_k(x),f_n(x))$. In the case $\alpha=\gamma+1<\omega_0$ we can assume that $f_n\in {\rm B}_{\gamma}^{\rm st}(X,Y)$ for all $n\in\mathbb N$. Then the equality $$ X_{k,n}=h_{k,n}^{-1}(\Delta) $$ implies that $X_{k,n}\in \mathcal M_{\gamma}$ and $X_n\in \mathcal M_\gamma$. Suppose $\alpha\ge\omega_0$. If $\alpha=\omega_0$, then each map $f_n$ can be taken from the class ${\rm B}_{n}^{\rm st}(X,Y)$. Then $X_{k,n}\in\mathcal M_{k}$ for all $k\ge n$, which implies that $X_n\in\mathcal M_{\omega_0}$. Now let $\alpha>\omega_0$. We can assume that $f_n\in {\rm B}_{\alpha_n}^{\rm st}(X,Y)$, where $\omega_0\le\alpha_n<\alpha_n+1\le\alpha$ for all $n\in\mathbb N$. Then $X_{k,n}\in\mathcal M_{\max\{\alpha_n,\alpha_k\}+1}\subseteq\mathcal M_\alpha$ and $X_n\in\mathcal M_\alpha$. {\bf (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:1}).} Since the sequence $(X_n)_{n=1}^\infty$ is increasing, $(f_n)_{n=1}^\infty$ is convergent stably to $f$ on $X$. {\bf (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:3}).} We will argue by the induction. For $\alpha=1$ we take a sequence $(F_n)_{n=1}^\infty$ of functionally closed sets and a sequence $(f_n)_{n=1}^\infty$ of continuous maps $f_n:X\to Y$ such that $f_n|_{F_n}=f|_{F_n}$ and $X=\bigcup\limits_{n=1}^\infty F_n$. We set $X_1=F_1$ and $X_{n}=F_n\setminus (F_1\cup\dots\cup F_{n-1})$ for $n\ge 2$. Then the family $(X_n:n\in\mathbb N)$ is the required partition of the space $X$. Assume that the implication (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:3}) holds for all $\gamma<\alpha$ for some $\alpha\in[1,\omega_0)$. Let $(A_n)_{n=1}^\infty$ be an increasing sequence of sets of the $(\alpha-1)$'th functionally multiplicative class and let $(g_n)_{n=1}^\infty$ be a sequence of maps from the class ${\rm B}_{\alpha-1}^{\rm st}(X,Y)$ such that \begin{gather}\label{gath:th:char_B1st:1} X=\bigcup\limits_{n=1}^\infty A_n\quad\mbox{and}\quad g_n|_{A_n}=f|_{A_n}\quad\mbox{for every\,\,} n\in\mathbb N \end{gather} By the inductive assumption and by equivalence $(\ref{it:th:char_B1st:1})\Leftrightarrow(\ref{it:th:char_B1st:2})$, for every $n$ there exist a sequence $(B_{nm})_{m=1}^\infty$ of mutually disjoint functionally ambiguous sets of the class $\alpha-1$ and a sequence $(h_{nm})_{m=1}^\infty$ of continuous maps $h_{nm}:X\to Y$ such that \begin{gather}\label{gath:th:char_B1st:2} h_{nm}|_{B_{nm}}=g_n|_{B_{nm}}\quad\mbox{for all}\quad m\in\mathbb N. \end{gather} For all $n,m\in\mathbb N$ we set \begin{gather}\label{gath:th:char_B1st:3} X_{nm}=(A_{n}\setminus \bigcup\limits_{k=0}^{n-1} A_{k})\cap B_{nm}, \end{gather} where $A_0=\emptyset$. Then the partition $(X_{nm}:n,m\in\mathbb N)$ of the space $X$ is the required one. We show that (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:3}) for all $\alpha\in [\omega_0,\omega_1)$ under the assumption that $Y$ has the functionally closed diagonal. Again we will argue by the transfinite induction. Let $\alpha=\omega_0$, $(A_n)_{n=1}^\infty$ be an increasing sequence of sets of the $\omega_0$'th functionally multiplicative class and $(g_n)_{n=1}^\infty$ be a sequence of maps $g_n\in{\rm B}_n^{\rm st\,\,}(X,Y)$ such that (\ref{gath:th:char_B1st:1}) holds. By implication (\ref{it:th:char_B1st:1})$\Rightarrow$(\ref{it:th:char_B1st:2}) proved above for $g_n$ and by implication (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:3}) proved above for finite ordinals, we obtain that for every $n\in\mathbb N$ there exist a partition $(B_{nm})_{m=1}^\infty$ of the space $X$ by functionally ambiguous sets of the class $n$ and a sequence $(h_{nm})_{m=1}^\infty$ of continuous maps such that (\ref{gath:th:char_B1st:2}) is valid. It remains to apply (\ref{gath:th:char_B1st:3}). Further, the inductive step is proved similarly to the case of finite ordinals. Now we prove that {\bf (\ref{it:th:char_B1st:3})$\Rightarrow$(\ref{it:th:char_B1st:2})} in the case $\alpha=1$. Assume that condition (a) holds. For every $n\in\mathbb N$ we take an increasing sequence $(F_{nm})_{m=1}^\infty$ of functionally closed sets in $X$ such that $X_n=\bigcup\limits_{m=1}^\infty F_{nm}$ and set $\tilde X_n=\bigcup\limits_{m=1}^n F_{mn}$. Then $(\tilde X_n)_{n=1}^\infty$ is an increasing sequence of functionally closed sets which covers the space $X$. Since $Y$ is adhesive for $X$, for every $n\in\mathbb N$ there exists a continuous map $\tilde f_n:X\to Y$ such that $\tilde f_n|_{F_{mn}}=f_m|_{F_{mn}}$ for all $m\in\{1,\dots,n\}$. Clearly, $\tilde f_n|_{\tilde X_n}=f|_{\tilde X_n}$ for all $n\in\mathbb N$. Now suppose that condition {\bf (b)} holds. Let $Y$ be a path-connected $\sigma$-adhesive for $X$ and $(Y_n:n\in\mathbb N)$ be a cover of the space $Y$ by functionally closed subspaces $Y_n$ such that $(Y,Y_n)\in {\rm Ad}^*(X)$ for every $n$. We prove that the preimage of each functionally closed subset of $Y$ under the mapping $f$ is functionally ambiguous of the class $\beta$ in $X$. Indeed, take a functionally closed set $B\subseteq Y$. Then $f^{-1}(B)=\bigcup\limits_{n=1}^\infty (f_n^{-1}(B)\cap X_n)$. Since $f_n:X\to Y$ is continuous, $f_n^{-1}(B)$ is functionally closed in $X$. Therefore, $f^{-1}(B)$ belongs to the $\beta$'th additive class in $X$. Moreover, $X\setminus f^{-1}(B)=\bigcup\limits_{n=1}^\infty (f_n^{-1}(Y\setminus B)\cap X_n)$. Since $f_n^{-1}(Y\setminus B)$ is functionally open in $X$, we have that $X\setminus f^{-1}(B)$ belongs to the functionally additive class $\beta$ in $X$. Thus, $f^{-1}(B)$ is functionally ambiguous of the $\beta$'th class in $X$. For every $k,n\in\mathbb N$ we put $X_{k,n}=f^{-1}(Y_k)\cap X_n$. Let us remove from the sequence $(X_{k,n})_{k,n=1}^\infty$ empty sets and let $(Z_n)_{n=1}^\infty$ be an enumeration of the double sequence. Denote $\tilde X_1=Z_1$ and $\tilde X_n=Z_n\setminus \bigcup\limits_{k<n}Z_k$ for $n>1$. Notice that $(\tilde X_n:n\in\mathbb N)$ is a partition of $X$ by functionally ambiguous sets of the class $\beta$ in $X$. For every $k\in\mathbb N$ we set $N_k=\{n\in\mathbb N: f(\tilde X_n)\subseteq Y_k\}$ and put $\tilde Y_i=Y_k$ for all $i\in N_k$. Hence, we obtain a partition $(\tilde X_n:n\in\mathbb N)$ of $X$ by functionally ambiguous sets of the $\beta$'th class in $X$ such that $f(\tilde X_n)\subseteq \tilde Y_n$ for every $n$ and it is evident that $(Y_n:n\in\mathbb N)$ has the same properties as $(Y_n:n\in\mathbb N)$. For every $n\in\mathbb N$ we take an increasing sequence $(F_{nm})_{m=1}^\infty$ of functionally closed sets in $X$ such that $\tilde X_n=\bigcup\limits_{m=1}^\infty F_{nm}$ and denote $C_n=\bigcup\limits_{m=1}^n F_{mn}$. We fix $n\in\mathbb N$ and show that the restriction $f|_{C_n}:C_n\to Y$ has a continuous extension $g:X\to Y$. Since the sets $F_{1n}$,\dots, $F_{nn}$ are disjoint and functionally closed, we may choose open sets $U_1$,\dots, $U_n$ and a continuous function $\varphi:X\to [1,n]$ such that $F_{mn}\subseteq U_m$ and $\overline{U}_{m}\subseteq\varphi^{-1}(m)$ for every $m\in\{1,\dots,n\}$. Further, in each $\tilde Y_m$ we take a point $y_m^*$ from Definition~\ref{Def:StarAdhesive}. Since $(Y,\tilde Y_m)\in {\rm Ad}^*(X)$, there exists a continuous map $g_m:X\to Y$ such that $g_m|_{F_{mn}}=f|_{F_{mn}}$ and $g_m|_{X\setminus U_m}=y_{m}^*$. It implies from the path-connectedness of $Y$ that there exists a continuous map $\gamma:[1,+\infty)\to Y$ such that $\gamma(m)=y_m^*$ for every $m\in\mathbb N$. For all $x\in X$ we set $$ g(x)=\left\{\begin{array}{ll} g_m(x), & \mbox{if}\,\,\, x\in U_m\,\,\,\mbox{for some}\,\,\,m\in\{1,\dots,n\}, \\ \gamma(\varphi(x)), & \mbox{otherwise}. \end{array} \right. $$ Then the map $g:X\to Y$ is continuous and $g|_{C_n}=f|_{C_n}$. Hence, condition (\ref{it:th:char_B1st:2}) holds. Now we suppose that under conditions (a) or (b) the implication (\ref{it:th:char_B1st:3})$\Rightarrow$(\ref{it:th:char_B1st:1}) is valid for all ordinals $\gamma\in [1,\alpha)$ for some $\alpha\in(1,\omega_1)$ and prove it for $\alpha$. Consider the case $\alpha=\gamma+1<\omega_0$. By Lemma~\ref{amb} for every $m$ there exists a sequence $(A_{mn})_{n=1}^\infty$ of functionally ambiguous sets of the class $\gamma$ such that $X_m=\mathop{\rm Lim}\limits_{n\to\infty}A_{mn}$. For all $m,n\in\mathbb N$ we set \begin{gather}\label{eq:cond_on_Dmn1} B_{mn}=A_{mn}\setminus\bigcup\limits_{k<m} A_{kn}. \end{gather} Then each set $B_{mn}$ is functionally ambiguous of the class $\gamma$. For every $n\in\mathbb N$ we set $$ g_n(x)=\left\{\begin{array}{ll} f_{m}(x), & \mbox{if}\,\,\, x\in B_{mn}\,\,\,\mbox{for}\,\,\,m<n,\\ f_{n}(x), & \mbox{otherwise} \end{array} \right. $$ and, applying the inductive assumption, we get $g_n\in {\rm B}_{\gamma}^{\rm st}(X,Y)$. It remains to prove that $(g_n)_{n=1}^\infty$ is stably convergent to $f$ on $X$. Fix $x\in X$ and choose a number $m$ such that $x\in X_{m}$ and $x\not\in X_k$ for all $k\ne {m}$. Equality~(\ref{gath:limitset}) implies that there are numbers $N_1,\dots,N_{m}$ such that $$ x\not\in \bigcup\limits_{n\ge N_k} A_{kn}\,\,\mbox{if}\,\, k<m\,\,\,\,\mbox{and}\,\,\,\, x\in \bigcap\limits_{n\ge N_{m}} A_{mn}. $$ Hence, for all $n\ge n_{0}=\max\{N_1,\dots,N_{m}\}$ the inclusion $x\in B_{mn}\cap X_m$ holds. Therefore, $g_n(x)=f_m(x)=f(x)$ for all $n\ge n_0$. In the case $\alpha\ge \omega_0$ we observe that each set $X_n$ is functionally ambiguous of the class $\alpha+1$ and the sets $A_{mn}$ (together with the sets $B_{mn}$) are functionally ambiguous either of the class $\alpha$, or of classes $<\alpha$ in the case of limit $\alpha$. Then by the inductive assumption we have $g_n\in {\rm B}_{\gamma}^{\rm st}(X,Y)$, if $\alpha=\gamma+1>\omega_0$, and $g_n\in {\rm B}_{<\alpha}^{\rm st}(X,Y)$, if $\alpha$ is limit. In any case, $f\in {\rm B}_{\alpha}^{\rm st}(X,Y)$. \end{proof} Let us observe that in the proof of implication (\ref{it:th:char_B1st:2})$\Rightarrow$(\ref{it:th:char_B1st:1}) we do not use the fact that $Y$ has the functionally closed diagonal. The following example show that this property is essential for implication (\ref{it:th:char_B1st:1})$\Rightarrow$(\ref{it:th:char_B1st:2}). \begin{exm} Let $D$ be an uncountable discrete space and $X=Y=D\sqcup\{a \}$ be the Alexandroff compactification of $D$. Then there exists $f\in {\rm B}^{{\rm st}}_1(X,Y)$ such that for every functionally measurable subset $A\subseteq X$ with $a\in A$ the restriction $f|_A$ is discontinuous at $a$. \end{exm} \begin{proof} Let $D=\bigsqcup\limits_{n=1}^{\infty}D_n$ such that $|D|=|D_n|$ for every $n\in\mathbb N$. We choose a sequence of bijections $\varphi_n:D_n\to D$ and consider the function $f:X\to Y$, $$ f(x)=\left\{\begin{array}{ll} a, & x=a,\\ \varphi_n(x), & n\in\mathbb N\,\,\,{\rm and}\,\,\, x\in D_n. \end{array} \right. $$ Note that $f\in {\rm B}^{\rm st}_1(X,Y)$, because $f$ is the stable limit of the sequence of continuous functions $f_n:X\to Y$, $$ f_n(x)=\left\{\begin{array}{ll} a, & x\in \{a\}\cup(\bigcup\limits_{k=n+1}^\infty D_k),\\ \varphi_k(x), & k\leq n\,\,\,{\rm and}\,\,\, x\in D_k. \end{array} \right. $$ Fix a functionally measurable subset $A\ni a$ of $X$. Since $|D\setminus B|\leq\aleph_0$ for every functionally open or functionally closed subset $B\ni a$ of $X$, we have $|D\setminus A|\leq\aleph_0$. Thus every set $B_n=D_n\setminus A$ is at most countable. Therefore the set $C=\bigcup\limits_{n=1}^\infty \varphi_n(B_n)$ is at most countable too. We choose $d\in D\setminus C$ and consider the neighborhood $V=Y\setminus \{d\}$ of $a$ in $Y$. Since $$ A\setminus\{x\in A:f(x)\in V\}=\{\varphi_n^{-1}(d):n\in\mathbb N\}, $$ the restriction $f|_A$ is discontinuous at $a$. \end{proof} We show in the following example that the properties (a) and (b) in Theorem~\ref{th:char_B1st} are essential. \begin{exm} {\rm a) Let $X=[0,1]^2$ and $Y\subseteq X$ be the Sierpi\'{n}ski carpet. Notice that $Y$ is a Peano continuum. Fix any $x^*\in Y$ and consider the map $f:X\to Y$ such that $f(x)=x$ for $x\in Y$ and $f(x)=x^*$ for $x\in X\setminus Y$. We put $f_1(x)=x$ and $f_2(x)=x^*$ for all $x\in X$. Notice that $X_1=Y$ and $X_2=X\setminus Y$ are ambiguous subsets of the first class in $X$, $X_1\cup X_2=X$ and $f|_{X_i}=f_i|_{X_i}$ for $i=1,2$. Therefore, condition~(\ref{it:th:char_B1st:3}) of Theorem~\ref{th:char_B1st} holds. Assume that $f\in {\rm B}_1^{\rm st}(X,Y)$. Take a sequence $(g_n)_{n=1}^\infty$ of continuous functions $g_n:X\to Y$ and a closed cover $(\tilde X_n:n\in\mathbb N)$ of $X$ such that $g_n|_{\tilde X_n}=f|_{\tilde X_n}$ and $\tilde X_n\subseteq \tilde X_{n+1}$ for every $n$. By the Baire category theorem, there exists $k\in\mathbb N$ such that the set $\tilde X_k\cap Y$ has the nonempty interior in $Y$. Then there exists an open square $L$ in $Y$ such that $\partial L\subseteq \tilde X_k\cap Y$. Since $g_k|_{\partial L}(x)=x$ for all $x\in \partial L$ and $g_k({\rm int}L)\subseteq X\setminus {\rm int}L$, we have that $\partial L$ is a retract of $\overline L$, which is impossible. Hence, condition~(\ref{it:th:char_B1st:2}) of Theorem~\ref{th:char_B1st} does not hold. Consequently, $Y$ fails to be adhesive for $X$. b) Let $Y=\{0,1\}$ and $f:[0,1]\to Y$ be the characteristic function of the set $\{0\}$. Clearly, $Y$ is a disconnected $\sigma$-adhesive space for $[0,1]$. Moreover, the partition $(\{0\},(0,1])$ of $[0,1]$ and the functions $f_1\equiv 1$ and $f_2\equiv 0$ satisfy condition~(\ref{it:th:char_B1st:3}) of Theorem~\ref{th:char_B1st}, but $f$ is not of the first stable Baire class, since each continuous function between $[0,1]$ and $\{0,1\}$ is constant.} \end{exm} Clearly, if $\alpha=1$, then condition (\ref{it:th:char_B1st:3}) of Theorem~\ref{th:char_B1st} implies that $f$ is piecewise continuous. It was proved in~\cite[Theorem 6.3]{BaBo} that every piecewise continuous map belongs to the class ${\rm B}_1^{\rm st}(X,Y)$ when $X$ is a normal space and $Y$ is a path-connected space such that $Y\in\sigma {\rm AE}(X)$. The following example shows that a piecewise continuous map need not be of the first stable Baire class even if $X=\mathbb R^2$ and $Y$ is a contractible subspace of $\mathbb R^2$. \begin{exm} Let $Y=\Delta(C)$ be the cone over the Cantor set $C\subseteq [0,1]$ defined in Example~\ref{exm:cantorAD}, $y^*\in Y$ be a point and $f:[0,1]^2\to Y$ be a map such that $$ f(x)=\left\{\begin{array}{ll} x, & x\in Y, \\ y^*, & \mbox{otherwise}. \end{array} \right. $$ Then $f$ is piecewise continuous and $f\not\in {\rm B}_1^{\rm st}([0,1]^2,Y)$. \end{exm} \begin{proof} Since $f|_Y$ and $f|_{[0,1]^2\setminus Y}$ is continuous and $Y$ is closed in $[0,1]^2$, $f$ is piecewise continuous. Assume that there exist a sequence $(f_n)_{n=1}^\infty$ of continuous functions $f_n:[0,1]^2\to Y$ and a closed cover $(X_n:n\in\mathbb N)$ of the square $[0,1]^2$ such that $f_n|_{X_n}=f|_{X_n}$ for every $n$. By the Baire category theorem, there exists $k\in\mathbb N$ such that the set $X_k\cap Y$ has the nonempty interior in $Y$. Let $F$ be a closed square in $[0,1]^2$ such that $F\cap Y\subseteq X_k\cap Y$ and the interior of $F\cap Y$ in $Y$ is nonempty. Consider the set $B=f_k(F)$. Since $B$ is a continuous image of $F$, it should be locally connected. On the other hand, $B$ is not a locally connected set, since $B$ is a closed subspace of $Y$ with nonempty interior. \end{proof} \section{Monotone functions and their stable Baire measurability}\label{sec:monotone} We will establish in this section that $$ \mathcal M\cap{\rm B}_1^{\rm st}(\mathbb R,\mathbb R)=\mathcal M\cap {\rm B}_2^{\rm st}(\mathbb R,\mathbb R)=\dots=\mathcal M\cap {\rm B}_\alpha^{\rm st}(\mathbb R,\mathbb R)=\dots, $$ where $\mathcal M$ is the class of all monotone functions. The following fact immediately follows from definitions and we omit its proof. \begin{lem}\label{lemma:monotone_dense} Let $X\subseteq\mathbb R$, $f:X\to\mathbb R$ be a monotone function and $g:X\to\mathbb R$ be a continuous function such that $f|_D=g|_D$ for some dense set $D\subseteq X$. Then $f=g$ on $X$. \end{lem} A map $f:X\to Y$ between topological spaces $X$ and $Y$ is said to be {\it weakly discontinuous}, if for any subset $A\subseteq X$ the discontinuity points set of the restriction $f|_A$ is nowhere dense in $A$. It is easy to see that a map $f$ is weakly discontinuous if and only if the discontinuity points set of the restriction $f|_F$ to any closed set $F\subseteq X$ is nowhere dense in $F$. \begin{thm}\label{th:mon_stable} For a monotone function $f:\mathbb R\to\mathbb R$ the following conditions are equivalent: \begin{enumerate} \item\label{it:th:mon_stable:1} $f$ is weakly discontinuous; \item\label{it:th:mon_stable:2} $f\in {\rm B}_1^{\rm st}(\mathbb R,\mathbb R)$; \item\label{it:th:mon_stable:3} $f\in \bigcup\limits_{\alpha<\omega_1}{\rm B}_\alpha^{\rm st}(\mathbb R,\mathbb R)$. \end{enumerate} \end{thm} \begin{proof} The equivalence of (\ref{it:th:mon_stable:1}) and (\ref{it:th:mon_stable:2}) was established in~\cite{BKMM} (see also~\cite{BaBo} for a more general case). The implication (\ref{it:th:mon_stable:2})$\Rightarrow$(\ref{it:th:mon_stable:3}) is evident. We prove that (\ref{it:th:mon_stable:3})$\Rightarrow$(\ref{it:th:mon_stable:1}). Let $f\in {\rm B}_\alpha^{\rm st}(\mathbb R,\mathbb R)$ for some $\alpha\in[0,\omega_1)$. By Theorem~\ref{th:char_B1st} there exist a sequence of continuous functions $f_n:\mathbb R\to\mathbb R$ and a partition $(X_n:n\in\mathbb N)$ of the real line such that $f_n|_{X_n}=f|_{X_n}$ for every $n\in\mathbb N$. Consider a nonempty closed set $F\subseteq \mathbb R$. For every $n$ we denote $G_n={\rm int}_F\overline{X_n\cap F}$. Since $F$ is a Baire space, the union $G=\bigcup\limits_{n=1}^\infty G_n$ is dense in $F$. The equality $f|_{X_n\cap F}=f_n|_{X_n\cap F}$ and Lemma~\ref{lemma:monotone_dense} imply that $f|_{G_n}=f_n|_{G_n}$ for every $n$. Since every function $f_n$ is continuous and the set $G_n$ is open in $F$, we have $G_n\subseteq C(f|_F)$. Hence, $G\subseteq C(f|_F)$, which implies that $f$ is weakly discontinuous. \end{proof} As a corollary of Theorem~\ref{th:mon_stable} we obtain the following result. \begin{prop} There exists a function $f\in {\rm B}_1(\mathbb R,\mathbb R)\setminus \bigcup\limits_{\alpha<\omega_1}{\rm B}_\alpha^{\rm st}(\mathbb R,\mathbb R)$. \end{prop} \begin{proof} We consider the increasing function $f:\mathbb R\to\mathbb R$, $$ f(x)=\sum\limits_{r_n\le x}\frac{1}{2^n}, $$ where $\mathbb Q=\{r_n:n\in\mathbb N\}$. Since $f$ is monotone, $f\in{\rm B}_1(\mathbb R,\mathbb R)$. But $D(f)=\mathbb Q$. Therefore, $f$ is not weakly discontinuous. Hence, $f\not\in \bigcup\limits_{\alpha<\omega_1}{\rm B}_\alpha^{\rm st}(\mathbb R,\mathbb R)$ by Theorem~\ref{th:mon_stable}. \end{proof} \end{document}
\begin{document} \title{On the correctness of finite--rank approximations by series of shifted Gaussians} \date{} \maketitle \author{ \begin{center} \textbf{S.~M. Sitnik}\\ Belgorod State University, Belgorod, Russia\\ [email protected]\\ \textbf{A.~S. Timashov}\\ Belgorod State University, Belgorod, Russia\\ [email protected]\\ \textbf{S.~N. Ushakov}\\ Voronezh State University, Voronezh, Russia\\ [email protected] \end{center} } Keywords: integer shifts, Gaussian, linear systems, quadratic exponents, Vandermond matrices. \begin{abstract} In this paper we consider interpolation problem connected with series by integer shifts of Gaussians. Known approaches for these problems met numerical difficulties. Due to it another method is considered based on finite--rank approximations by linear systems. The main result for this approach is to establish correctness of the finite--rank linear system under consideration. And the main result of the paper is to prove correctness of the finite--rank linear system approximation. For that an explicit formula for the main determinant of the linear system is derived to demonstrate that it is non--zero. \end{abstract} \section{Introduction and problem statement} For a long period the main instrument for approximations was based on expansions by complete orthogonal systems. But nowadays in different fields of mathematics and applications occurred more and more problems solutions of which needs expansions by incomplete, overdetermined or non-orthogonal systems. Such problems are widely considered for electric or optic signals, filtration, holography, tomography and medicine. We have such examples of overdetermined systems as frames, and non-orthogonal as wavelets, Gabor systems and coherent states, Rvachev's systems and so on. Consider a problem of approximation for arbitrary function defined by its values at integer points. And stress once more that functions considered are really arbitrary as we work only with function values at integer points. Let consider approximations by special type series in integer shifts of Gaussians (quadratic exponents with parameters). This system is incomplete in standard spaces but all the same very effective. For the history of this class of approximations, basic results and multiple applications see \cite{S1}--\cite{S3}. Exactly we will study the next main \textit{Interpolation problem:} consider arbitrary function $f(x)$ defined on the line $x \in \mathbb{R}$, and a parameter $\sigma>0$, which in probabilistic applications means the standard deviation. We seek for an interpolating function $\tilde{f}(x)$ also defined on the line $x \in \mathbb{R}$ in the form of the series by integer shifts of Gaussian \begin{equation} \label{eq1} \tilde{f}\left( x \right) \sim \sum\limits_{k=-\infty }^\infty {f_{k}e^{-\frac{\left( x-k \right)^{2}}{2\sigma^{2}}}} \end{equation} which coincides with $f(x)$ at integer points \begin{equation} \label{eq2} f\left( m \right)=\tilde{f}\left( m \right), \ m \in \mathbb{Z}. \end{equation} Some approaches to this problem were studied. In \cite{S1} the solution is derived with using special functions, exactly Jacobi theta--functions. But as it was demonstrated in \cite{S2}--\cite{S3} in spite of theoretical value such approach has not computational perspectives as it involves division on very small denominators which leads to large numerical errors. Another approach based on discreet Fourier transform (DFT) is effective but for very small range of parameters \cite{S3}. To overcome these difficulties in \cite{S4} another approach was considered based on a reduction to finite--rank linear system solution. A serious obstacle for this last approach was that it was not grounded as a correctness of this method was not established. In this paper we eliminate this gap by proving that a finite--rank linear system is correctly solved. We prove that in a direct and clear way by calculating explicitly the main system determinant and show that it is non--zero. This result is a necessary theoretical ground for a development of numerical methods and algorithms and also rid of difficulties with special functions and DFT. \section{Reducing to an infinite system of linear equations} Let introduce a convenient notation \begin{equation} \label{eq3} e \left( \sigma ,x,k \right)=e^{-\frac{{(x-k)}^{2}}{2\sigma^{2}}} \end{equation} Solving of the problem formulated is reduced to determination of a sequence of unknown coefficients $f_{k}$ from (\ref{eq1}). For this following the standard way for interpolation problems we need to find nod functions for every interpolation integer nods $x=m,~ m \in \mathbb{Z}$. For our case we have to find a single \textit{basic node function} for the node $x=0$ which we will seek in the form \begin{equation} \label{eq4} G\left( \sigma, x \right)=\sum\limits_{k=-\infty }^\infty g_{k}\ e(\sigma ,x,k). \end{equation} We shortly consider steps to derive an infinite linear system for finding coefficients of the basic node function \eqref{eq4}, cf. \cite{S2}--\cite{S3}. It follows from (\ref{eq2}) that this basic node function satisfies for all $m \in \mathbb{Z}$: \begin{equation} \label{eq5} G \left( \sigma, m \right)=\sum\limits_{k=-\infty }^\infty g_{k}\ e(\sigma, m, k) =\sigma_{m0}, \end{equation} where $\sigma_{m0}$ is the Kroneker symbol $$ \sigma_{m0}=\left\{ {\begin{array}{l} 1,\, m=0 \\ 0,\, m\ne 0. \\ \end{array}} \right. $$ If such a function $G\left( \sigma ,x \right)$ obeying a condition (\ref{eq5}) is found then it is easy to find a formal solution of the problem considered. In fact the function $$ G_{l} \left( \sigma, x \right)= G\left( \sigma, x-l \right) $$ is the node function for the node $x=l$ as for all $m$ $$ G_{l}\left( \sigma, m \right)= G\left( \sigma, m-l \right)=\sigma_{ml}. $$ Then one solution of the interpolating problem considered will be a function \begin{equation} \label{eq6} \tilde{f}\left( x \right)=\sum\limits_{l=-\infty }^\infty f\left( l \right) G_{l}\left( \sigma, x \right), \end{equation} because for $x=m$ in the sum (\ref{eq6}) only one term is non--zero: $$ f\left( m \right)G_{m}\left( \sigma, m \right) = f\left( m \right)\cdot 1=f\left( m \right). $$ To move from solution representation (\ref{eq6}) to the one we seek in the form (\ref{eq1}) do a necessary substitution. By it we derive using (\ref{eq4}): $$ \tilde{f}\left( x \right)=\sum\limits_{l=-\infty }^\infty f\left( l \right) G_{l}\left( \sigma, x \right)= $$ $$ =\sum\limits_{l=-\infty }^\infty f \left( l \right) G \left( \sigma, x-l \right) =\sum\limits_{l=-\infty}^\infty f\left( l \right) \sum\limits_{l=-\infty }^\infty g_{k}\ e\left( \sigma, x-l, k \right) $$ Let introduce a new summation index $j=l+k$ instead of $l=j-k$ and formally change the order of summation. We derive \begin{equation} \label{eq7} \begin{array}{c} \tilde{f} (x) = \sum\limits_{j=-\infty }^\infty \sum\limits_{k=-\infty }^\infty f (j-k) g_{k}\ e (\sigma, x-j ,k) = \\ =\sum\limits_{l=-\infty }^\infty \left\{ \sum\limits_{l=-\infty }^\infty f\left( j-k \right)g_{k} \right\}\ e\left( \sigma, x-j, k \right)= \sum\limits_{j=-\infty }^\infty f_{j}\ e\left( \sigma, x, j \right), \end{array} \end{equation} where coefficients to find are represented in the form (after index change $j \rightleftarrows k$) to agree with (\ref{eq1}) \begin{equation} \label{eq8} f_{k}=\sum\limits_{j=-\infty }^\infty f\left( k-j \right)g_{j}, \end{equation} and $f(m)$ are values of the given function at integer points, and $g_{j}$ are coefficients of the basic node function series (\ref{eq4}). Now transform the system of equations: $$ \sum\limits_{k=-\infty }^\infty g_{k} e \left( \sigma, m, k \right)=\sigma_{m0},~m\in \mathbb{Z}. $$ For that let introduce a new variable $q=e^{-\frac{1}{2\sigma^{2}}}$. And after that we derive \begin{equation} \label{eq9} \sum\limits_{k=-\infty }^\infty g_{k} q^{{(m-k)}^{2}}=\sigma_{m0},~ m \in \mathbb{Z}. \end{equation} The next important step is to consider and study finite--rank cuts of this infinite system of equations (\ref{eq9}). \section{Reducing to a finite--rank system of linear equations} As it was considered before the key moment in the interpolation approach is to define the node function. Now we pass to study finite--rank approximations of the initial problem. These approximations are naturally finite--rank cuts of infinite system of equations (\ref{eq9}). This direct approach was studied in \cite{S4}. Being in some way restrictive this approach allows to avoid difficulties from other approaches in \cite{S1}--\cite{S3}. And the most important is that our method allows to construct effective numerical calculations based on it, cf. \cite{S4}. So let us seek for node function (\ref{eq4}) $G\left( \sigma, x \right)$ approximations of the form $H\left( \sigma, x \right)$ as finite sums \begin{equation} \label{eq10} H\left( n, x, \sigma \right)=\sum\limits_{k=-n}^n d_{k} \cdot q^{(x-k)^{2} }, \ q=\exp \left( -\frac{1}{2\sigma^{2}} \right), \ 0<q<1, \end{equation} after that an infinite system (\ref{eq9}) is reduced to a finite one, and number of equations may be greater than a number of unknown quantities. \begin{equation} \label{eq11} H(n, m,j, \sigma )=\delta_{0j}, \ j=-m,\dots ,0,\dots m, \ m\ge n. \end{equation} The system of linear equations which follows from conditions (\ref{eq10})--(\ref{eq11}) there are $2m+1$ equations and $d_{k}$, $-n\le k\le n$ unknown numbers. Transform the system following from (\ref{eq10}) -- (\ref{eq11}) in the matrix form \begin{equation} \label{eq12} A\cdot \overline d=\overline y, \end{equation} with matrix and vector elements $$ a_{ij}=q^{(i-j)^{2}},~ y_{j}=\delta_{0j}, ~ i=-n, \dots, 0, \dots, n, ~ j=-m, \dots, 0, \dots, m. $$ For considered for $m=n$ coefficients $d_{k}$ of approximate node function we denote as $H(n, x, \sigma )$, and for $m>n$ as $H(n, m, x, \sigma )$. Let introduce some notation. Vandermond determinant $n\times n$ we denote as $W \lr{x_1, \dots, x_n}$, this determinant without line $l$ and column $k$ as $W_{l,\, k} \lr{x_1, \dots, x_n}$. For illustrating further considerations let use $n=5$ as examples of arbitrary dimensions, e.g. $$ W \lr{x_1, x_2, x_3, x_4, x_5} = \left| \begin{array}{ccccc} 1 & x_1 & x_1^2 & x_1^3 & x_1^4 \\ 1 & x_2 & x_2^2 & x_2^3 & x_2^4 \\ 1 & x_3 & x_3^2 & x_3^3 & x_3^4 \\ 1 & x_4 & x_4^2 & x_4^3 & x_4^4 \\ 1 & x_5 & x_5^2 & x_5^3 & x_5^4 \end{array} \right|, $$ $$ W_{3,\, 2} \lr{x_1, x_2, x_3, x_4, x_5} = \left| \begin{array}{ccccc} 1 & x_1^2 & x_1^3 & x_1^4 \\ 1 & x_2^2 & x_2^3 & x_2^4 \\ 1 & x_4^2 & x_4^3 & x_4^4 \\ 1 & x_5^2 & x_5^3 & x_5^4 \end{array} \right|. $$ It is known that \begin{equation} \label{eq13} W_{l,\, k} \lr{x_1, \dots, x_n} = \sum x_{\alpha_1} x_{\alpha_2} \dots x_{\alpha_{n-k}} \cdot \prod\limits_{n \geq i > j \geq 1,~ i \neq l, j \neq l} \lr{x_i - x_j}, \end{equation} with summation by all sets of $n-k$ numbers $\alpha_1, \alpha_2, \dots, \alpha_{n-k}$ from $1, 2, \dots, n$. We have also to mention that considered problems are deeply connected with the third Jacobi Theta--function \cite{S5} $$ \vartheta_{3} \left( z,q \right)=1+2\sum\limits_{k=1}^\infty q^{k^{2}}\cos {\left( 2kz \right),} $$ and Jacobi--Poisson transformation formula \begin{equation} \label{eq14} \sum\limits_{k=-\infty }^{+\infty } e^{-a\left( t+\pi k \right)^{2}} \, =\frac{1}{\sqrt {\pi a} }\, \, \sum\limits_{k=-\infty }^{+\infty } e^{-\frac{k^{2}}{a}} \, e^{i2kt}. \end{equation} For these connections see \cite{S1}--\cite{S3}. \section{A main case then number of equations equals to a number of unknown coefficients} \subsection{A correct solvability and formula for the determinant for the $n\times n$ system} Now the main object of our study is a finite system of linear equations (\ref{eq12}) with square matrices. We use notation introduced in (\ref{eq10})--(\ref{eq12}). Let prove our main result of correctness of system solution for square matrix. From it follows that the system is always uniquely solved. \begin{theorem} \label{t1} The matrix $A$ for $m=n$ is non--degenerate and its determinant equals to \begin{equation} \label{eq15} |A|= q^{\frac{2n (n+1)(2n+1)}{3}} \cdot W \lr{q^{-2 n}, \dots 1, \dots, q^{2 n}}. \end{equation} \end{theorem} The proof. $$ |A| = \left| \begin{array}{ccccc} 1 & q & q^4 & q^9 & q^{16} \\ q & 1 & q & q^4 & q^9 \\ q^4 & q & 1 & q & q^4 \\ q^9 & q^4 & q & 1 & q \\ q^{16} & q^9 & q^4 & q & 1 \end{array} \right| $$ Elements of this determinant is possible to factorize $$ a_{i j} = q^{(i-j)^2} = q^{{i^2}} \cdot q^{-2ij} \cdot q^{j^2}. $$ Consequently it is possible to factor out from $i$--th line a value $ q^{{i^2}}$ and from $j$--th column --- $q^{j^2}$. Repeat this operation for all lines and columns $$ |A| = q^4 \cdot q \cdot 1 \cdot q \cdot q^4 \cdot \left| \begin{array}{ccccc} q^{-4} & q^{-3} & 1 & q^5 & q^{12} \\ 1 & q^{-1}& 1 & q^3 & q^8 \\ q^4 & q & 1 & q & q^4 \\ q^8 & q^3 & 1 & q^{-1} & 1 \\ q^{12} & q^5 & 1 & q^{-3} & q^{-4} \end{array} \right| = q^{20} \cdot \left| \begin{array}{ccccc} q^{-8} & q^{-4} & 1 & q^4 & q^8 \\ q^{-4} & q^{-2}& 1 & q^2 & q^4 \\ 1 & 1 & 1 & 1 & 1 \\ q^4 & q^2 & 1 & q^{-2} & q^{-4}\\ q^8 & q^4 & 1 & q^{-4} & q^{-8} \end{array} \right|. $$ Now elements of intermediate determinant are $q^{-2ij}$, further factor out from $i$--th lines multiplicators $q^{2 n i }$, $i = -2 n, \dots, 2 n$ with a unit product: $$ q^{20} \cdot \left| \begin{array}{ccccc} q^{-8} & q^{-4} & 1 & q^4 & q^8 \\ q^{-4} & q^{-2}& 1 & q^2 & q^4 \\ 1 & 1 & 1 & 1 & 1 \\ q^4 & q^2 & 1 & q^{-2} & q^{-4}\\ q^8 & q^4 & 1 & q^{-4} & q^{-8} \end{array} \right| = q^{20} \cdot \left| \begin{array}{ccccc} 1 & q^{4} & q^8 & q^{12} & q^{16} \\ 1 & q^{2} & q^4 & q^{6} & q^{8} \\ 1 & 1 & 1 & 1 & 1 \\ 1 & q^{-2} & q^{-4} & q^{-6} & q^{-8}\\ 1 & q^{-4} & q^{-8} & q^{-12} & q^{-16} \end{array} \right|= $$ $$ = q^{20} \cdot \left| \begin{array}{ccccc} 1 & q^{4} & \lr{q^{4}}^2 & \lr{q^{4}}^3 & \lr{q^{4}}^4 \\ 1 & q^{2} & \lr{q^{2}}^2 & \lr{q^{2}}^3 & \lr{q^{2}}^4 \\ 1 & 1 & 1 & 1 & 1 \\ 1 & q^{-2} & \lr{q^{-2}}^2 & \lr{q^{-2}}^3 & \lr{q^{-2}}^4\\ 1 & q^{-4} & \lr{q^{-4}}^2 & \lr{q^{-4}}^3 & \lr{q^{-4}}^4 \end{array} \right|. $$ In the general case the result is: $$ \det A = \lr{\prod\limits_{i=-n}^{n} q^{{i^2}}}^2 \cdot \lr{\prod\limits_{j=-n}^{n} q^{{j^2}}}^2 \cdot \prod\limits_{i=-n}^{n}q^{2ni} \cdot W \lr{q^{-2n}, \dots 1, \dots, q^{2n}} = $$ $$ = q^{\frac{2n (n+1)(2n+1)}{3}} \cdot W \lr{q^{-2 n}, \dots 1, \dots, q^{2 n}} = q^{\frac{2n (n+1)(2n+1)}{3}} \cdot \prod\limits_{i,j=-n, i \neq j}^{n} \lr{q^{-2i}-q^{-2j}}. $$ Now it is obvious that the main system determinant is non--zero due to inequalities under considered restrictions $0<q<1$, because of the obvious fact all multipliers in the last formula for the determinant are non--zeros. Theorem 1 is completely proved. \textbf{Remark.} The problem to prove correctness of the finite linear system (\ref{eq12}) and so to ground strictly the method of interpolation by shifted Gaussian was first formulated by L.A. Minin and S.M. Sitnik. After rather a time on the basis of numerical computations the formula for the determinant (\ref{eq15}) was found by A.S. Timashov. The strict proof was found by S.N. Ushakov. After that the correctness of the finite linear system (\ref{eq12}) was established and it is a basis for numerical computer methods and algorithms. \subsection{Further properties in case of $n\times n$ system: palindromes, symmetry and dimension reduction} To formulate further results we define the notion of vector palindrome , this notion is connected with vector components symmetry with respect to its "middle" components. \begin{definition} The vector $\overline x$ in $n$ dimensions is called \textit{a palindrome} if the next relation for its components is fulfilled $$ \overline{x_i}=\overline{x_{n+1-i}}, \ i= 1,\dots,n. $$ \end{definition} \begin{theorem}\label{t2} Let consider the next linear system of equations \begin{equation} \label{eq16} A \cdot \overline x = \overline b, \end{equation} where $A$ is a non--degenerate matrix $n \times n$ and for its elements the next relation is valid $$ a_{i, j}=a_{n+1-i, n+1-j}\ \forall i,j = 1,\dots,n, $$ and vector $\overline b$ is a palindrome. Then the system solution $\overline x$ is also a palindrome. \end{theorem} In fact the condition from theorem 2 means that every matrix $A$ line and column are also palindromes. The proof. As the matrix $A$ is non--degenerate then the unique solution $\overline x$ exists. Every $i$--th system line may be written as $$ \sum\limits_{j=1}^{n} a_{i, j} \cdot x_j = b_i. $$ Let us prove that a vector $\overline y=\lr{x_{n}, x_{n-1} \dots, x_1 }$ is also a solution to (\ref{eq16}), and from it due to uniqueness property the statement of the theorem 2 follows. Really for the $i$--th line $$ \sum\limits_{j=1}^{n} a_{i, j} \cdot y_j = \sum\limits_{j=1}^{n} a_{{n+1-i, n+1-j}} \cdot y_j= \sum\limits_{j=1}^{n} a_{{n+1-i, j}} \cdot x_j = b_{n+1-i}=b_i. $$ And from it the theorem 2 follows. Now let us demonstrate the importance of the palindrome property. Using it and symmetry of the system it is possible to decrease a number of calculations essentially, approximately twice. It leads to more effective and robust numerical algorithms. \begin{corollary} For the system (\ref{eq12}) the solution is symmetrical, namely $d_{k}=d_{-k}$. \end{corollary} Really let us note that for the system (\ref{eq12}) conditions of the theorem 2 is valid: $$ a_{ij}=q^{(i-j)^{2}}=q^{(n+1-i-(n+1-j))^{2}}=a_{n+1-i,n+1-j}. $$ It follows that $d_{k}=d_{-k}$. This corollary leads to a very important conclusion. Due to it we can reduce a number of equations to solve numerically in fact twice, and it also reduce gaps in values of coefficients. Because of that the numerical difficulties of the problem and computational time are reduced essentially. It permits to solve twice larger systems for the same time as systems without using palindrome symmetry. Another important fact for the system analysis is that we can find coefficients for approximations of the node function $d_{k}$ explicitly, they are defined by (\ref{eq10}) and are solutions to the system (\ref{eq11}) -- (\ref{eq12}). \begin{theorem}\label{t3} For coefficients $d_k$ the next formula is valid: \begin{equation} \label{eq17} d_k = (-1)^{k} q^{-k^2} \frac{ W_{k, \, n+1} \lr{q^{-2 n}, \dots, q^0, \dots, q^{2 n} } }{W \lr{q^{-2 n}, \dots, q^0, \dots, q^{2 n} }}. \end{equation} \end{theorem} The proof. Really by Cramer's rule $$ d_k = \frac{\Delta_k} {|A|}. $$ Manipulating with $\Delta_k$ in the same way as in the proof of the theorem 1 we derive $$ \Delta_k = (-1)^{n+1+k+1+n} q^{-k^2} q^{\frac{2n (n+1)(2n+1)}{3}} W_{k} \lr{q^{- 2 n}, \dots, q^0, \dots, q^{2 n} }, $$ So $$ d_k=(-1)^{k} q^{-k^2} \frac{ W_{k} \lr{q^{-2n}, \dots, q^0, \dots, q^{2n} } }{W \lr{q^{-2n}, \dots, q^0, \dots, q^{2n} }}. $$ The theorem 3 is proved. For example let illustrate the determinant formula \eqref{eq17} by determinants $5\times 5$ as we do in this paper for clarity and reader's convenience. $$ \Delta_1 = \left| \begin{array}{ccccc} 1 & q & q^4 & 0& q^{16} \\ q & 1 & q & 0 & q^9 \\ q^4 & q & 1 & 1 & q^4 \\ q^9 & q^4 & q & 0 & q \\ q^{16} & q^9 & q^4 & 0 & 1 \end{array} \right| = q^{20} \cdot \left| \begin{array}{ccccc} q^{-8} & q^{-4} & 1 & 0 & q^8 \\ q^{-4} & q^{-2}& 1 & 0 & q^4 \\ 1 & 1 & 1 & q^{-1} & 1 \\ q^4 & q^2 & 1 & 0 & q^{-4}\\ q^8 & q^4 & 1 & 0 & q^{-8} \end{array} \right|= $$ $$ =q^{20} \cdot q^{-1} \cdot \left| \begin{array}{ccccc} q^{-8} & q^{-4} & 1 & 0& q^8 \\ q^{-4} & q^{-2}& 1 & 0 & q^4 \\ 1 & 1 & 1 & 1 & 1 \\ q^4 & q^2 & 1 & 0 & q^{-4}\\ q^8 & q^4 & 1 & 0 & q^{-8} \end{array} \right| = q^{20} \cdot q^{-1} \cdot \left| \begin{array}{ccccc} q^{-8} & q^{-4} & 1 & 0& q^8 \\ q^{-4} & q^{-2}& 1 & 0 & q^4 \\ 1 & 1 & 1 & 1 & 1 \\ q^4 & q^2 & 1 & 0 & q^{-4}\\ q^8 & q^4 & 1 & 0 & q^{-8} \end{array} \right| = $$ $$ = - q^{20} \cdot q^{-1} \cdot \left| \begin{array}{ccccc} 1 & q^{4} & \lr{q^{4}}^2 & \lr{q^{4}}^4 \\ 1 & q^{2} & \lr{q^{2}}^2 & \lr{q^{2}}^4 \\ 1 & q^{-2} & \lr{q^{-2}}^2 & \lr{q^{-2}}^4\\ 1 & q^{-4} & \lr{q^{-4}}^2 & \lr{q^{-4}}^4 \end{array} \right|. $$ And finally let us mention that to problems considered in this paper the method of transmutations may be applied \cite{S6,S7}. An idea of applying transmutations is to represent a set of Gauss functions as transmuted ones from a set of more standard functions, e.g. linear exponentials. It means that for some transmutation operator $T$ it is valid that \begin{equation} \exp(-(x-k)^2) = T (\exp(-(x-k))), k\in \mathbb{Z}. \end{equation} So the series in shifted Gaussians may be transformed to series in shifted linear exponents. Such transmutation operators $T$ may be found explicitly but are rather complicated. \section{Final conclusions and short paper resume} In this paper we consider interpolation problem connected with series by integer shifts of Gaussians. Known approaches for these problems met numerical difficulties. Due to it the direct method is considered based on finite--rank approximations by linear systems. The main result of the paper (theorem 1) is to prove correctness of the finite--rank linear system approximation. For that an explicit formula for the main determinant of the linear system is derived to demonstrate that it is non--zero. More we consider symmetry properties of the system based on the palindrome notion. It leads to the reduction of the considered system essentially, almost twice in dimension (theorem 2). And at last we derive an explicit formula for the system solution as a ratio of two Vandermond determinants (theorem 3). \end{document}
\begin{document} \begin{abstract} In this paper we establish necessary and sufficient conditions for the limit set of a projective Anosov representation to be a $C^{\alpha}$-submanifold of projective space for some $\alpha\in(1,2)$. We also calculate the optimal value of $\alpha$ in terms of the eigenvalue data of the Anosov representation. \end{abstract} \title{Regularity of limit sets of Anosov representations} \tableofcontents \section{Introduction} Suppose that $\Hb_{\Rb}^d$ is real hyperbolic $d$-space. Let $\partial_\infty\Hb_{\Rb}^d$ denote the geodesic boundary of $\Hb_{\Rb}^d$ and let ${ \rm Isom}(\Hb_{\Rb}^d)$ denote the isometry group of $\Hb_{\Rb}^d$. Given a representation $\rho: \Gamma \rightarrow { \rm Isom}(\Hb_{\Rb}^d)$, the \emph{limit set of $\rho$} is defined to be \begin{align*} \Lc_\rho = \overline{\rho(\Gamma) \cdot x_0} \cap \partial_\infty \Hb_{\Rb}^d \end{align*} where $x_0 \in \Hb_{\Rb}^d$ is any point. If we further assume that $\Gamma$ is a hyperbolic group and $\rho$ is a convex co-compact representation, then there is a $\rho$-equivariant, continuous map from $\partial_\infty \Gamma$, the Gromov boundary of $\Gamma$, to the limit set $\Lc_\rho$. The limit set in this setting is generically very irregular, for instance when $\partial_\infty\Gamma$ is a topological manifold Yue~\cite{Y1996} proved: unless $\rho$ is a co-compact action on a totally geodesic subspace of $\Hb_{\Rb}^d$, its limit set is fractal like, and in particular, has Hausdorff dimension strictly greater than its topological dimension. The group ${ \rm Isom}(\Hb_{\Rb}^d)$ is a semisimple Lie group. For a general semisimple Lie group $G$, there is a rich class of representations from a hyperbolic group $\Gamma$ to $G$ called \emph{Anosov representations}, which generalize the convex co-compact representations from $\Gamma$ to ${ \rm Isom}(\Hb_{\Rb}^d)$. Anosov representations were introduced by Labourie~\cite{L2006} and extended by Guichard-Wienhard~\cite{GW2012}. Since then, they have been heavily studied, \cite{KLP2013, KLP2014,KLP2014b,GGKW2015,BPS2016}. One reason for their popularity is that they are rigid enough to retain many of the good geometric properties that convex co-compact representations have, while at the same time are flexible enough to admit many new and interesting examples. In this paper, we investigate the regularity of the limit sets of Anosov representations from $\Gamma$ into $\PGL_d(\Rb)$. We will give precise definitions in Section~\ref{sec:Anosov_repn} but informally: if $\Gamma$ is a word hyperbolic group with Gromov boundary $\partial_\infty \Gamma$, a representation $\rho:\Gamma \rightarrow \PGL_d(\Rb)$ is said to be $k$-Anosov if there exist continuous $\rho$-equivariant maps \begin{align*} \xi^{(k)} : \partial_\infty \Gamma \rightarrow \Gr_k(\Rb^d) \text{ and } \xi^{(d-k)} : \partial_\infty \Gamma \rightarrow \Gr_{d-k}(\Rb^d) \end{align*} which satisfy certain dynamical properties. For a $k$-Anosov representation, it is reasonable to call the image of $\xi^{(k)}$ in $\Gr_k(\Rb^d)$ the ``$k$-limit set of $\rho$ in $\Gr_k(\Rb^d)$.'' We will largely focus our attention on $1$-Anosov representations; by a result of Guichard-Wienhard \cite[Proposition 4.3]{GW2012}, for any Anosov representation $\rho : \Gamma \rightarrow G$ into a semisimple Lie group $G$, there exists $d > 0$ and an irreducible representation $\phi : G \rightarrow \PGL_d(\Rb)$ such that $\phi \circ \rho$ is $1$-Anosov. Thus, up to post composition with irreducible representations, the class of $1$-Anosov representations contains all other types of Anosov representations. Further, the flag maps induced by $\phi$ are smooth. Thus, another result of Guichard-Wienhard \cite[Proposition 4.4]{GW2012} implies that all regularity properties of the limit set can be investigated by reducing to the case of $1$-Anosov representations. Our first main result gives a sufficient condition for the $1$-limit set of a $1$-Anosov representation to be a $C^\alpha$-submanifold of $\Pb(\Rb^d)$ for some $\alpha>1$. \begin{theorem}\label{thm:main} (Theorem \ref{thm:main_body}) Suppose $\Gamma$ is a hyperbolic group, $\partial_\infty \Gamma$ is a topological $(m-1)$-manifold, and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is a $1$-Anosov representation. If \begin{enumerate} \item[($\dagger$)] $\rho$ is $m$-Anosov, and $\xi^{(1)}(x) + \xi^{(1)}(z) + \xi^{(d-m)}(y)$ is a direct sum for all pairwise distinct $x,y,z \in \partial_\infty \Gamma$, \end{enumerate} then \begin{enumerate} \item[($\ddagger$)] $M:=\xi^{(1)}(\partial_\infty \Gamma)$ is a $C^{\alpha}$-submanifold of $\Pb(\Rb^d)$ for some $\alpha > 1$. \end{enumerate} Moreover, $T_{\xi^{(1)}(x)} M = \xi^{(m)}(x)$ for any $x \in \partial_\infty \Gamma$. \end{theorem} \begin{remark}\label{rmk:open} \ \begin{enumerate} \item A weaker version of this result, only deducing $C^1$ regularity was independently proven by Pozzetti-Sambarino-Wienhard \cite{PSW18}. \item Property ($\dagger$) and $k$-Anosovness in Theorem~\ref{thm:main} are open conditions in $\Hom(\Gamma, \PGL_d(\Rb))$, see Section \ref{sec:stability}. \end{enumerate} \end{remark} Theorem \ref{thm:main} is a generalization of the following theorem due to Benoist in the setting of divisible, properly convex domains in $\Pb(\Rb^d)$. A group of projective transformations $\Gamma\subset\PGL_d(\Rb)$ \emph{divides} a properly convex domain $\Omega\subset\Pb(\Rb^d)$ if $\Gamma$ acts properly discontinuously and co-compactly on $\Omega$. \begin{theorem}[\cite{Ben04}]\label{thm:convex_divisible} Let $\Gamma\subset\PGL_d(\Rb)$ be a hyperbolic group that divides a properly convex domain $\Omega\subset\Pb(\Rb^d)$. Then then $\id:\Gamma\to\PGL_d(\Rb)$ is a $1$-Anosov representation whose $1$-limit set is $\partial\Omega$. Furthermore, $\partial\Omega\subset\Pb(\Rb^d)$ is a $C^\alpha$-submanifold for some $\alpha>1$. \end{theorem} Theorem \ref{thm:main} also generalizes a result due to Labourie in the setting of Hitchin representations. Let $S$ be a closed orientable hyperbolizable surface and fix a Fuchsian representation $\rho_0: \pi_1(S) \rightarrow \PGL_2(\Rb)$. Then let $\tau_d : \PGL_2(\Rb) \rightarrow \PGL_d(\Rb)$ be the standard irreducible representation (see Section \ref{sec:rhoirred}). A representation $\rho : \pi_1(S) \rightarrow \PGL_d(\Rb)$ is \emph{Hitchin} if it is conjugate to a representation in the connected component of $\Hom(\pi_1(S), \PGL_d(\Rb))$ that contains $\tau_d \circ \rho_0$. \begin{theorem}[\cite{L2006}] If $\rho:\pi_1(S)\to\PGL_d(\Rb)$ is a Hitchin representation, then $\rho$ is $k$-Anosov for every $k \in \{1,\dots, d-1\}$, and the $1$-limit set of $\rho$ is a $C^{\alpha}$-submanifold in $\Pb(\Rb^d)$ for some $\alpha>1$. \end{theorem} Using Theorem~\ref{thm:main}, we can find more examples of representations that preserve $C^\alpha$-submanifolds in $\Pb(\Rb^d)$. \begin{example}\label{cor:hyperbolic_lattices}(See Section~\ref{sec:real_hyp_lattices}) Suppose $\tau: \PO(m,1) \rightarrow \PGL_d(\Rb)$ is a irreducible representation, $\Gamma \leq \PO(m,1)$ is a co-compact lattice, and $\rho := \tau|_{\Gamma} : \Gamma \rightarrow \PGL_d(\Rb)$. If $\rho$ is $1$-Anosov, then there exists a neighborhood $\Oc$ of $\rho$ in $\Hom(\Gamma, \PGL_d(\Rb))$ such that every representation in $\Oc$ is a $1$-Anosov representation whose $1$-limit set is a $C^{\alpha}$ submanifold of $\Pb(\Rb^d)$ for some $\alpha > 1$. \end{example} \begin{example}\label{cor:hitchin} (See Section \ref{sec:Hitchin}) If $\rho : \pi_1(S) \rightarrow \PGL_d(\Rb)$ is a Hitchin representation, then for all $k=1,\dots,d-1$, there is an open set $\Oc$ of $\bigwedge^k\rho$ in $\Hom\left(\Gamma,\PGL\left(\bigwedge^k\Rb^d\right)\right)$ so that every representation in $\Oc$ is a $1$-Anosov representation whose $1$-limit set is a $C^{\alpha}$-submanifold of $\Pb\left(\bigwedge^k\Rb^d\right)$ for some $\alpha>1$. See Section \ref{sec:wedge} for the definition of $\bigwedge^k\rho$. In particular, by applying \cite[Proposition 4.4]{GW2012}, the $k$-limit set of $\rho$ is a $C^{\alpha}$-submanifold of $\Gr_k(\Rb^d)$ for some $\alpha>1$. \end{example} \begin{remark} Example~\ref{cor:hitchin} was independently observed by Pozzetti-Sambarino-Wienhard \cite{PSW18}.\end{remark} In fact, Theorem \ref{thm:main} is a consequence of a more general theorem, see Theorem \ref{thm:main_body}, that is stated using \emph{$\rho$-controlled subsets} $M\subset\Pb(\Rb^d)$, of which the $1$-limit set of $\rho$ is an example, see Definition \ref{def:controlled}. In the main body of our paper, all our results will be stated for $\rho$-controlled subsets. These statements are stronger than the results we mention in this introduction, but are more technical to state. We also investigate the extent to which the converse of Theorem \ref{thm:main} holds. In general, there are $1$-Anosov representations $\rho$ whose $1$-limit set are $C^\infty$-submanifolds of $\Pb(\Rb^d)$, but for which ($\dagger$) in Theorem \ref{thm:main} does not hold, see Example~\ref{ex:surface_bad_example}. However, we prove that when $\Gamma$ is a surface group and $\rho$ is irreducible, the conditions in Theorem~\ref{thm:main} are both necessary and sufficient. \begin{theorem}\label{thm:nec_surface}(Theorem \ref{thm:nec_surface_body}) Suppose $\Gamma$ is a hyperbolic group, $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $1$-Anosov representation, and $\partial_\infty \Gamma$ is homeomorphic to a circle. Then the following are equivalent: \begin{enumerate} \item[($\dagger$)] $\rho$ is a $2$-Anosov representation and $\xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-2)}(z)$ is a direct sum for all $x,y,z \in \partial \Gamma$ distinct, \item[($\ddagger$)] $\xi^{(1)}(\partial_\infty \Gamma)$ is a $C^{\alpha}$-submanifold of $\Pb(\Rb^d)$ for some $\alpha > 1$. \end{enumerate} \end{theorem} From Theorem \ref{thm:nec_surface} and (3) of Remark~\ref{rmk:open}, we have the following corollary. \begin{corollary} Suppose $\Gamma$ is a hyperbolic group with $\partial_\infty \Gamma$ homeomorphic to a circle. Let $\Oc \subset \Hom(\Gamma, \PGL_d(\Rb))$ denote the set of representations that are irreducible, $1$-Anosov, and whose $1$-limit set is a $C^{\alpha}$-submanifold of $\Pb(\Rb^d)$ for some $\alpha> 1$ (which may depend on $\rho$). Then $\Oc$ is an open set in $\Hom(\Gamma, \PGL_d(\Rb))$. \end{corollary} For non-surface groups the situation is more complicated; there exist irreducible $1$-Anosov representations $\rho: \Gamma \rightarrow \PGL_d(\Rb)$ whose $1$-limit set is a $C^{\infty}$-submanifold of $\Pb(\Rb^d)$, but $\rho$ does not satisfy the condition ($\dagger$) in Theorem~\ref{thm:main}, see Example~\ref{ex:irred_bad_example}. However, if one assumes a stronger irreducibility condition on $\rho$, then the conditions in Theorem~\ref{thm:main} are both necessary and sufficient. \begin{theorem}\label{thm:nec_general} (Theorem \ref{thm:nec_general_body}) Suppose $\Gamma$ is a hyperbolic group, $\partial_\infty \Gamma$ is a $(m-1)$-dimensional topological manifold, and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $1$-Anosov representation such that $\bigwedge^m \rho: \Gamma \rightarrow \PGL(\bigwedge^m \Rb^d)$ is also irreducible. Then the following are equivalent: \begin{enumerate} \item[($\dagger$)] $\rho$ is a $m$-Anosov representation and $\xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(x)$ is a direct sum for all pairwise distinct $x,y,z \in \partial_\infty \Gamma$, \item[($\ddagger$)] $\xi^{(1)}(\partial_\infty \Gamma)$ is a $C^{\alpha}$-submanifold of $\Pb(\Rb^d)$ for some $\alpha > 1$. \end{enumerate} \end{theorem} Recall that if $\rho:\Gamma\to H$ is a Zariski-dense representation and $\tau: H\to \PGL_d(\Rb)$ is an irreducible representation, then $\tau\circ\rho:\Gamma\to\PGL_d(\Rb)$ is irreducible. Thus, (3) of Remark~\ref{rmk:open} and Theorem \ref{thm:nec_general} give the following corollary. \begin{corollary} Suppose $\Gamma$ is a hyperbolic group. Let $\Oc \subset \Hom(\Gamma, \PGL_d(\Rb))$ denote the set of representations $\rho: \Gamma \rightarrow \PGL_d(\Rb)$ where $\rho$ is $1$-Anosov, has Zariski dense image, and whose $1$-limit set is a $C^{\alpha}$-submanifold of $\Pb(\Rb^d)$ for some $\alpha> 1$ (which may depend on $\rho$). Then $\Oc$ is an open set in $\Hom(\Gamma, \PGL_d(\Rb))$. \end{corollary} Finally, for representations satisfying certain irreducibility conditions, we also determine the optimal regularity of the $1$-limit set in terms of the spectral data of $\rho(\Gamma)$. More precisely, given $g\in\PGL_d(\Rb)$, let $\overline{g}\in\GL_d(\Rb)$ be a lift of $g$, and let $\lambda_1(\overline{g}) \geq \lambda_2(\overline{g}) \geq \dots \geq \lambda_d(\overline{g})$ denote the absolute values of the eigenvalues of $\overline{g}$. Note that for all $i,j$, the ratio \[\frac{\lambda_i}{\lambda_j}(g):=\frac{\lambda_i(\overline{g})}{\lambda_j(\overline{g})}\] does not depend on the choice of lift $\overline{g}$ of $g$. Then given a representation $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ and $2 \leq m \leq d-1$ define \begin{align*} \alpha_m(\rho) = \inf_{\gamma\in\Gamma }\left\{\log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)) : \frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)) \neq 1 \right\}. \end{align*} If $\rho$ is $(1,m)$-Anosov, it follows from definition that $\alpha_m(\rho) > 1$ (see Section~\ref{sec:Anosov_repn}). \begin{theorem}\label{thm:regularity} (Theorem \ref{thm:regularity_body}) Suppose $\Gamma$ is a hyperbolic group and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $(1,m)$-Anosov representation so that $\xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z)$ is a direct sum for all $x,y,z \in \partial_\infty \Gamma$ distinct. Then \begin{align*} \alpha_m(\rho) \leq \sup\left\{ \alpha \in (1,2) : \xi^{(1)}(\partial_\infty \Gamma) \text{ is a }C^{\alpha}\text{-submanifold} \right\} \end{align*} with equality if \begin{align*} \xi^{(1)}(\partial_\infty \Gamma) \cap \left(\xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z)\right) \end{align*} spans $\xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z)$ for all $x,y,z \in \partial_\infty \Gamma$ distinct. \end{theorem} \begin{remark} \label{rem:stablility} \ \begin{enumerate} \item In Theorem \ref{thm:regularity}, when $\xi^{(1)}(\partial_\infty \Gamma)$ has either dimension one or co-dimension one, the extra hypothesis for equality is automatically satisfied. If the dimension is one (i.e. $m=2$), then \begin{align*} \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z)= \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-2)}(z) = \Rb^d. \end{align*} So the extra hypothesis follows from the irreducibility of $\rho$. If the co-dimension is one (i.e. $m=d-1$), then \begin{align*} \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z)= \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(1)}(z) \end{align*} is spanned by $\xi^{(1)}(x), \xi^{(1)}(y), \xi^{(1)}(z)$. Hence the extra hypothesis always holds in this case. \item In general, the extra hypothesis for equality is an open condition, see Section \ref{sec:stability}. \item The irreducibility of $\rho$ is necessary in Theorem~\ref{thm:regularity}. For instance, if $\tau_d : \PGL_2(\Rb) \rightarrow \PGL_d(\Rb)$ is the standard irreducible representation, see Section~\ref{sec:rhoirred}, and $\Gamma \leq \PGL_2(\Rb)$ is a co-compact lattice, then $\rho = (\tau_5 \oplus \tau_2) |_{\Gamma}$ is $1$-Anosov and $\xi^{(1)}(\partial_\infty \Gamma)$ is a $1$-dimensional $C^\infty$-submanifold of $\Pb(\Rb^7)$. At the same time, for any infinite order $\gamma\in\Gamma$, \[\log\frac{\lambda_1}{\lambda_{3}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{2}}(\rho(\gamma))= 3/2.\] \item Notice that the quantity $\alpha_m(\rho)$ is invariant under passing to finite index subgroups. In particular, if $\Gamma_0 \leq \Gamma$ is a finite index subgroup and $\gamma \in \Gamma$, then there exists some $k \in \Nb$ such that $\gamma^k \in \Gamma_0$. Further, \begin{align*} \log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma^k))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma^k)) = \log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)). \end{align*} Hence $\alpha_m(\rho|_{\Gamma_0}) = \alpha_m(\rho)$. \end{enumerate} \end{remark} In Section~\ref{sec:regularity}, we establish a generalization of Theorem~\ref{thm:regularity} which holds for $\rho$-controlled subsets. One example of such a subset is the boundary of a properly convex domain $\Omega\subset\Pb(\Rb^d)$ that admits a $\Gamma$-action induced by a $1$-Anosov representation $\rho$. In this case, Theorem~\ref{thm:regularity_body} implies the following. \begin{theorem}\label{thm:regularity2} Suppose $\Gamma$ is a hyperbolic group and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $1$-Anosov representation. Also, suppose $\Omega\subset\Pb(\Rb^d)$ is a $\rho(\Gamma)$-invariant properly convex domain so that $\xi^{(d-1)}(x)\cap \partial\Omega=\xi^{(1)}(x)$ for all $x\in \partial \Gamma$. If \begin{enumerate} \item [($\star$)]$p_1+p_2+\xi^{(1)}(y)$ is a direct sum for all pairwise distinct $p_1,p_2,\xi^{(1)}(y)\in\partial\Omega$, \end{enumerate} then \begin{enumerate} \item [($\star\star$)] $\partial\Omega$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty\Gamma)$ for some $\alpha>1$. \end{enumerate} Moreover, $T_{\xi^{(1)}(x)} \partial\Omega = \xi^{(d-1)}(x)$ for any $x \in \partial_\infty \Gamma$, and \begin{align*} \alpha_{d-1}(\rho) = \sup\left\{ \alpha \in (1,2) : \partial\Omega \text{ is } C^{\alpha} \text{ along }\xi^{(1)}(\partial_\infty \Gamma) \right\}. \end{align*} \end{theorem} In the case when the $\Gamma$-action on $\Omega$ is co-compact, Theorem \ref{thm:regularity2} was previously proven by Guichard~\cite{G2005} using different techniques. Also, a weaker version of Theorem 1.13 (without the optimal bound for $\alpha$) was previously proven independently by Danciger-Gueritaud-Kassel \cite{DCG17} and the second author \cite{Zimmer17}. \subsection{Terminology}\label{sec:terminology} Through out the paper we will use the following terminology: \begin{enumerate} \item $\norm{\cdot}_{2}$ will always denote the standard $\ell^2$-norm on $\Rb^d$, \item a $(m-1)$-dimensional topological manifold $M \subset \Pb(\Rb^d)$ is $C^{\alpha}$ for some $\alpha \in (1,2)$ if for every $p \in M$ there exists local coordinates around $p$ and a differentiable map $f:\Rb^{m-1}\to\Rb^{d-m}$ such that $M$ coincides with the graph of $f$ near $p$ and \[f(u+h)=f(u)+df_u(h)+{\rm O}(\norm{h}_{2}^\alpha)\] for all $u,h\in\Rb^{m-1}$. \item a $(m-1)$-dimensional topological manifold $M \subset \Pb(\Rb^d)$ is \emph{$C^{\alpha}$ along a subset $N\subset M$} for some $\alpha \in (1,2)$ if for every $p \in N$ there exists local coordinates around $p$ and a continuous map $f:\Rb^{m-1}\to\Rb^{d-m}$ such that $M$ coincides with the graph of $f$ near $p$ and if $(u,f(u)) \in N$, then $f$ is differentiable at $u$ and satisfies \[f(u+h)=f(u)+df_u(h)+{\rm O}(\norm{h}_{2}^\alpha)\] for all $h\in\Rb^{m-1}$. \end{enumerate} \section{Anosov representations}\label{sec:Anosov_repn} For the rest of this article, $\Gamma$ will denote a hyperbolic group, and $\partial_\infty\Gamma$ will be its Gromov boundary. In this section, we define Anosov representations from $\Gamma$ to $\PGL_d(\Rb)$, and mention some of their properties. \subsection{A definition of Anosov representations} Since they were introduced, several other characterizations of Anosov representations have been given by Kapovich et al.~\cite{KLP2013, KLP2014,KLP2014b}, Gu{\'e}ritaud et al.~\cite{GGKW2015}, and Bochi et al.~\cite{BPS2016}. The definition we give below comes from~\cite[Theorem 1.7]{GGKW2015}. First, let $S$ be a finite symmetric generating set of $\Gamma$, and $d_S$ the induced word metric on $\Gamma$. For $\gamma \in \Gamma$, let $\ell_S(\gamma)$ denote the minimal translation distance of $\gamma$ acting on $\Gamma$, that is \begin{align*} \ell_S(\gamma) := \inf_{x \in \Gamma} d_S(\gamma\cdot x, x). \end{align*} Also, recall that for any $g\in\PGL_d(\Rb)$ and any $i,j\in\{1,\dots,d\}$, we have defined \[\frac{\lambda_i}{\lambda_j}(g):=\frac{\lambda_i(\overline{g})}{\lambda_j(\overline{g})},\] where $\lambda_1(\overline{g})\geq\dots\geq\lambda_d(\overline{g})$ are the absolute values of the (generalized) eigenvalues of a representative $\overline{g}\in\GL_d(\Rb)$ of $g$. A third ingredient we need to define Anosov representations are appropriate definitions of ``well-behaved " flag maps. More precisely, we have the following. \begin{definition} Let $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ be a representation. If $1 \leq k \leq d-1$, then a pair of maps $\xi^{(k)}: \partial_\infty \Gamma \rightarrow \Gr_k(\Rb^d)$ and $\xi^{(d-k)}: \partial_\infty \Gamma \rightarrow \Gr_{d-k}(\Rb^d)$ are called: \begin{itemize} \item \emph{$\rho$-equivariant} if $\xi^{(k)}(\gamma\cdot x) = \rho(\gamma)\cdot\xi^{(k)}(x)$ and $\xi^{(d-k)}(\gamma\cdot x) = \rho(\gamma)\cdot\xi^{(d-k)}(x)$ for all $x \in \partial_\infty \Gamma$ and $\gamma \in \Gamma$, \item \emph{dynamics-preserving} if for every $\gamma \in \Gamma$ of infinite order with attracting fixed point $\gamma^+ \in \partial_\infty \Gamma$, the points $\xi^{(k)}(\gamma^+) \in \Gr_k(\Rb^d)$ and $\xi^{(d-k)}(\gamma^+) \in \Gr_{d-k}(\Rb^d)$ are attracting fixed points of the action of $\rho(\gamma)$ on $\Gr_k(\Rb^d)$ and $\Gr_{d-k}(\Rb^d)$, and \item \emph{transverse} if for every distinct pair $x, y \in \partial_\infty \Gamma$ we have \begin{align*} \xi^{(k)}(x) + \xi^{(d-k)}(y) = \Rb^{d}. \end{align*} \end{itemize} \end{definition} With these definitions, we can now define Anosov representations. \begin{definition} A representation $\rho: \Gamma \rightarrow \PGL_d(\Rb)$ is \emph{$k$-Anosov} if \begin{itemize} \item there exist continuous, $\rho$-equivariant, dynamics preserving, and transverse maps $\xi^{(k)}: \partial \Gamma \rightarrow \Gr_k(\Rb^d)$, $\xi^{(d-k)}: \partial \Gamma \rightarrow \Gr_{d-k}(\Rb^d)$, and \item for any sequence $\{\gamma_i\}_{i=1}^\infty\subset\Gamma$ so that $\lim_{i\to\infty}\ell_S(\gamma_i)=\infty$, we have \[\lim_{i\to\infty}\log\frac{\lambda_k}{\lambda_{k+1}}(\rho(\gamma_i))=\infty.\] \end{itemize} If $\rho$ is $k$-Anosov for all $k \in \{ k_1,\dots, k_j\}$ we say that $\rho$ is $(k_1,\dots, k_j)$-Anosov. \end{definition} If $S'$ is another finite symmetric generating set of $\Gamma$, then $\id:(\Gamma,d_S)\to(\Gamma,d_{S'})$ is a quasi-isometry. In particular, the notion of an Anosov representation does not depend on the choice of $S$. Also, it follows from the definition that a representation $\rho:\Gamma\to\PGL_d(\Rb)$ is $k$-Anosov if and only if it is $(d-k)$-Anosov. We refer to $\xi^{(k)}$ as the \emph{$k$-flag map} of $\rho$, and $\xi^{(k)}(\partial\Gamma)\subset\Gr_k(\Rb^d)$ as the \emph{$k$-limit set} of $\rho$. Given a subspace $V \subset \Rb^N$ define \begin{align}\label{eqn:projectivization} [V]=\{ [v] \in \Pb(\Rb^N) : v \in V\}. \end{align} Often, we will view $\xi^{(k)}(x)$ as the projective subspace $[\xi^{(k)}(x)]\subset\Pb(\Rb^d)$. However, to simplify notation, we will denote $[\xi^{(k)}(x)]$ simply by $\xi^{(k)}(x)$ in those settings. \begin{remark} In many other places in the literature, what we call a $k$-Anosov representation is usually known as a \emph{$P_k$-Anosov representation}, where $P_k$ is the stabilizer in $\PGL_d(\Rb)$ of a point in $\Gr_k(\Rb^d)$. This notation is an artifact of a more general definition of Anosov representations to an arbitrary non-compact semisimple Lie group. Since we do not use that generality here, we will use $k$ in place of $P_k$ to simplify the notation. \end{remark} \subsection{Singular values and Anosov representations} We will now briefly discuss singular values, which we use to give an alternate description of Anosov representations. This description was initially due to Kapovich et al.~\cite{KLP2014,KLP2014b}, but was also later proven by Bochi et al. \cite{BPS2016} using different techniques. \begin{definition} Let $|\cdot|$ and $\norm{\cdot}$ be norms on $\Rb^d$, and let $L:(\Rb^d,|\cdot|)\to(\Rb^d,\norm{\cdot})$ be a linear map. \begin{itemize} \item For any $X\in(\Rb^d,|\cdot|)$, the \emph{stretch factor} of $X$ under $L$ is the quantity \[\sigma_X(L):=\frac{\norm{L(X)}}{|X|}.\] \item For $i=1,\dots,n$, the $i$-th \emph{singular value} of $L$ is the quantity \begin{align*} \sigma_i(L) :=\max_{W\subset\Rb^d,\dim W=i} & \min_{X\in W}\sigma_X(L) = \min_{W \subset \Rb^d, \dim W=d-i+1} \max_{X \in W} \sigma_X(L). \end{align*} \end{itemize} \end{definition} Observe that for all $i=1,\dots,d-1$, $\sigma_i(L)\geq\sigma_{i+1}(L)$, and if $L$ is invertible, then $\sigma_i(L)=\frac{1}{\sigma_{d-i+1}(L^{-1})}$. When $L=\overline{g}\in\GL_d(\Rb)$ and $\norm{\cdot}=|\cdot|$ is the standard norm $\norm{\cdot}_{2}$ on $\Rb^d$, we denote $\sigma_i(L)$ by $\mu_i(\overline{g})$. In that case, if $A$ is a $d\times d$ real-valued matrix representing $\overline{g}$ in an orthonormal basis for the standard inner product on $\Rb^d$, then the singular values $\mu_1(\overline{g})\geq \dots\geq\mu_d(\overline{g})>0$ are the square roots of the eigenvalues of $A^TA$. Using this, we may define, for any $g\in\PGL_d(\Rb)$ and all $i,j\in\{1,\dots,d\}$, the quantity \[\frac{\mu_i}{\mu_j}(g):=\frac{\mu_i(\overline{g})}{\mu_j(\overline{g})},\] where $\overline{g}\in\GL_d(\Rb)$ is a lift of $g$. We can now state the following theorem due to Kapovich et al.~\cite{KLP2014,KLP2014b}, (see Bochi et al. \cite[Proposition 4.9]{BPS2016}). \begin{theorem}\label{thm:SV_char_of_Anosov} Suppose $\Lambda$ is a finitely generated group and $S$ is a finite symmetric generating set. A representation $\rho:\Lambda\to\PGL_d(\Rb)$ is $k$-Anosov if and only if there are constants $C,c>0$ such that \begin{align*} \log \frac{\mu_k}{\mu_{k+1}}(\rho(\gamma)) \geq C d_S(\gamma,\id)-c \end{align*} for all $\gamma \in \Lambda$. \end{theorem} \begin{remark} In Theorem \ref{thm:SV_char_of_Anosov}, it is implied, not assumed, that $\Lambda$ is a hyperbolic group. \end{remark} \subsection{Properties of Anosov representations} \label{sec:properties} Next, we recall some important properties of Anosov representations. Define respectively the \emph{Cartan} and \emph{Jordan projection} $\mu,\lambda:\GL_d(\Rb)\to\Rb^d$ by \[\mu(\overline{g}):= \left ( \log \mu_1(\overline{g}), \dots, \log \mu_d(\overline{g}) \right)\,\,\,\text{ and }\,\,\,\lambda(\overline{g}) = \left ( \log \lambda_1(\overline{g}), \dots, \log \lambda_d(\overline{g}) \right).\] Observe that while the Jordan projection is invariant under conjugation in $\GL_d(\Rb)$, the Cartan projection is not. These two projections can be interpreted geometrically in the following way. Associated to the Lie group $\PGL_d(\Rb)$ is the Riemannian symmetric space $X$, on which $\PGL_d(\Rb)$ acts transitively and by isometries. As a $\PGL_d(\Rb)$-space, $X=\PGL_d(\Rb)/\PO(d)$. Furthermore, the distance $d_X$ on $X$ induced by its Riemannian metric can be computed from the Cartan projection by the formula \[d_X(g_1\cdot\PO(d),g_2\cdot\PO(d))=\norm{\mu\left(\overline{g_1^{-1}g_2}\right)}_2,\] where $\overline{g_1^{-1}g_2}\in\SL^\pm_d(\Rb):=\{g \in \GL_d(\Rb) : \det(g) = \pm 1 \}$ is a lift of $g_1^{-1}g_2$, and $\norm{\cdot}_2$ is the $l^2$-norm. On the other hand, if $g\in\PGL_d(\Rb)$ and $\overline{g}\in\SL^\pm_d(\Rb)$ is a representative of $g$, then \[\inf_{p\in X}d_X(p,g\cdot p)=\norm{\lambda(\overline{g})}_2.\] As such, if $g\in\PGL_d(\Rb)$ and $\overline{g}\in\SL^\pm_d(\Rb)$ is a lift of $g$, then $\mu(\overline{g})$ is a refinement of the distance by which $g$ translates the identity coset in $X$, and $\lambda(\overline{g})$ is a refinement of the minimal translation distance of $g$ in $X$. As an immediate consequence of Theorem~\ref{thm:SV_char_of_Anosov} an Anosov representations coarsely preserve the metric $d_S$ on $\Gamma$. \begin{corollary}\label{thm:QI_Anosov} Let $\rho:\Gamma\to\PGL_d(\Rb)$ be $k$-Anosov for any $k$. Then the map $\Gamma\to X$ defined by $\gamma\mapsto \rho(\gamma)\cdot\PSO(d)$ is a quasi-isometric embedding. In other words, there are constants $C\geq 1$ and $c\geq 0$ such that for all $\gamma_1,\gamma_2\in\Gamma$, \[\frac{1}{C}\norm{\mu\left(\overline{\rho(\gamma_1^{-1}\gamma_2)}\right)}_2-c\leq d_S(\gamma_1,\gamma_2)\leq C\norm{\mu\left(\overline{\rho(\gamma_1^{-1}\gamma_2)}\right)}_2+c,\] where $\overline{\rho(\gamma_1^{-1}\gamma_2)}\in\SL^\pm_d(\Rb)$ is a lift of $\rho(\gamma_1^{-1}\gamma_2)$. \end{corollary} We also have the following proposition due to Quint (see \cite[Lemma 2.19]{BCLS2015} for a proof), which restricts the possible Zariski closures of Anosov representations to $\PGL_d(\Rb)$. \begin{proposition}\label{prop:Zclosure} Let $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ be a $1$-Anosov representation. If $\rho$ is irreducible, then the Zariski closure of $\rho(\Gamma)$ is a semisimple Lie group without compact factors. \end{proposition} We will also use the following observation of Guichard-Wienhard. \begin{proposition}\label{prop:strongly_irreducible}\cite[Lemma 5.12]{GW2012} Let $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ be an irreducible $1$-Anosov representation. If $\Gamma_0 \leq \Gamma$ is a finite index subgroup, then $\rho|_{\Gamma_0}$ is also irreducible. \end{proposition} In many places in our argument, it will be more convenient to work with representations into $\SL_d(\Rb)$ instead of $\PGL_d(\Rb)$. The next observation allows us to make this reduction. Let $\pi:\GL_d(\Rb)\to\PGL_d(\Rb)$ denote the obvious projection. \begin{observation}\label{obs:lift} For any representation $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$, there exists a subgroup $\Lambda_\rho \leq \SL_d(\Rb)$ so that $\pi|_{\Lambda_\rho}:\Lambda_\rho\to\PGL_d(\Rb)$ is a representation whose kernel is a subgroup of $\Zb_2$, and whose image is a subgroup of $\rho(\Gamma)$ with index at most two. \end{observation} \begin{proof} Define $\Lambda_0 := \{ g \in \SL^{\pm}_d(\Rb) : [g] \in \rho(\Gamma) \}$, and let $\Lambda_\rho := \Lambda_0 \cap \SL_d(\Rb)$. Then $\pi(\Lambda_0)\subset\PGL_d(\Rb)$ coincides with $\rho(\Gamma)$, and $\Lambda_\rho$ has index at most two in $\Lambda_0$. \end{proof} In particular, if $\Gamma$ is a hyperbolic group, then so is $\Lambda_\rho$, and there are canonical identifications $\partial_\infty\Gamma=\partial_\infty\rho(\Gamma)=\partial_\infty\pi(\Lambda_\rho)=\partial_\infty\Lambda_\rho$. Furthermore, the following proposition is an immediate consequence of \cite[Corollary 1.3]{GW2012} . \begin{proposition} Let $\rho:\Gamma\to\PGL_d(\Rb)$ be a representation. The representation \[\rho':=\pi|_{\Lambda_\rho}:\Lambda_\rho\to\PGL_d(\Rb)\] is $k$-Anosov if and only if $\rho$ is $k$-Anosov. If so, the $k$-flag maps of $\rho$ and $\rho'$ agree. \end{proposition} \begin{remark}\label{rem:lift} To prove any properties about the $k$-limit sets of $\rho$, it is now sufficient to show those properties hold for the $k$-limit sets of $\rho'$. The advantage of working with $\rho'$ in place of $\rho$ is that $\rho':\Lambda_\rho\to\PGL(d,\Rb)$ admits a lift to a representation from $\Lambda_\rho$ to $\SL_d(\Rb)$. {\bf With this, we can henceforth assume that $\rho:\Gamma\to\PGL(d,\Rb)$ admits a lift to a representation $\overline{\rho}:\Gamma\to\SL(d,\Rb)$.} \end{remark} \subsection{Gromov geodesic flow space}\label{sec:flowspace} In their proof of Theorem \ref{thm:SV_char_of_Anosov}, Bochi et al. \cite{BPS2016} gave a description of Anosov representations using dominated splittings. Our next goal is to give this description. To do so, we recall the definition of the flow space of a hyperbolic group, and state some of their well-known properties. For more details, see for instance~\cite{Gromov1987},~\cite{C1994}, or~\cite{M1991}. As a topological space, the \emph{flow space} for $\Gamma$, denoted $\widetilde{U\Gamma}$, is homeomorphic to $\partial_\infty\Gamma^{(2)}\times\Rb$, where $\partial_\infty\Gamma^{(2)}:=\{(x,y)\in\partial_\infty\Gamma^{2}:x\neq y\}$. This flow space admits a natural $\Rb$-action by translation in the $\Rb$-factor called the \emph{geodesic flow on $\widetilde{U\Gamma}$}. We will use the notation $v=(v^+,v^-,v_0)\in \widetilde{U\Gamma}$, and denote the geodesic flow on $\widetilde{U\Gamma}$ by $\phi_t$, i.e. \[\phi_t(v)=(v^+,v^-,v_0+t)=(\phi_t(v)^+,\phi_t(v)^-,\phi_t(v)_0). \] There is a proper, co-compact $\Gamma$-action on $\wt{U\Gamma}$ that commutes with $\phi_t$, and satisfies $\gamma\cdot(v^+,v^-,\Rb)=(\gamma\cdot v^+,\gamma\cdot v^-,\Rb)$. There is also a natural $\mathbb{Z}/2\mathbb{Z}$ action on $\wt{U\Gamma}$ which satisfies \begin{align*} (1+2\Zb) \cdot (x,y,\Rb) = (y,x,\Rb). \end{align*} This action commutes with the $\Gamma$ action, but not the $\phi_t$ action. Instead: \begin{align*} \alpha \phi_t \alpha = \phi_{-t} \end{align*} where $\alpha = (1+2\Zb)$. So the actions of $\Gamma$, $\phi_t$, and $\mathbb{Z}/2\mathbb{Z}$ combine to yield an action of $\Gamma \times (\Rb \rtimes_{\psi}\mathbb{Z}/2\mathbb{Z})$ on $\wt{U\Gamma}$ where $\psi : \mathbb{Z}/2\mathbb{Z} \rightarrow \Aut(\Rb)$ is given by $\psi(\alpha)(t) = -t$. Since the $\Gamma$ action commutes with $\phi_t$, the geodesic flow on $\widetilde{U\Gamma}$ descends to a flow on the compact space $U\Gamma:=\widetilde{U\Gamma}/\Gamma$, which we refer to as the \emph{geodesic flow on $U\Gamma$}, and denote by $\widehat{\phi}_t$. This also implies that if $v^+=\gamma^+$ and $v^-=\gamma^-$ are the attracting and repelling fixed points of some infinite order $\gamma\in\Gamma$, then the orbit $(\gamma^+,\gamma^-,\Rb)\subset\widetilde{U\Gamma}$ of $\phi_t$ descends to a closed orbit of $\widehat{\phi}_t$ in $U\Gamma$. We will denote the period of this closed orbit by $T_\gamma\in\Rb$, and refer to $T_\gamma$ as the \emph{period of $\gamma$}. In other words, for all $v_0\in\Rb$, $\gamma\cdot (\gamma^+,\gamma^-,v_0)=(\gamma^+,\gamma^-,v_0+T_\gamma)$. Furthermore, $\widetilde{U\Gamma}$ admits a $\Gamma \times \mathbb{Z}/2\mathbb{Z}$-invariant metric so that every orbit $(v^+,v^-,\Rb)$ of $\phi_t$ is a continuous quasi-geodesic. Since the $\Gamma$-action on $\widetilde{U\Gamma}$ is also co-compact, any $\Gamma$-orbit is a quasi-isometry. As a consequence, there is a canonical $\Gamma$-invariant homeomorphism $\partial_\infty\widetilde{U\Gamma}\simeq\partial_\infty\Gamma$ between the Gromov boundaries of $\wt{U\Gamma}$ and $\Gamma$, and $v^+$ and $v^-$ in $\partial_\infty\wt{U\Gamma}$ are the forward and backward endpoints of $(v^+,v^-,\Rb)\subset\wt{U\Gamma}$ respectively. \begin{remark} In the case when $\Gamma$ is the fundamental group of a compact Riemannian manifold $X$ with negative sectional curvature, this geodesic flow space is what one would expect. In particular, let $T^1 X$ denote the unit tangent bundle of $X$, let $\wt{X}$ denote the universal cover of $X$, and let $T^1\wt{X}$ denote the unit tangent bundle of $\wt{X}$. Then we may take $\widetilde{U\Gamma}$ to be $T^1\widetilde X$ and $U\Gamma$ to be $T^1X$. The geodesic flow on both $T^1X$ and $T^1\widetilde{X}$ is the usual geodesic flow associated to the Riemannian metrics on $\widetilde{X}$ and $X$, and the $\Gamma$-invariant metric $d_{\widetilde{U\Gamma}}$ is the lift of the Riemannian metric on $T^1X$ that is locally given by the product of the Riemannian metric on $X$ and the spherical metric on the fibers. Further, the $\mathbb{Z}/2\mathbb{Z}$ action is given by $v \rightarrow -v$. \end{remark} Gromov proved that the geodesic flow space $\wt{U\Gamma}$ is unique up to homeomorphism. \begin{theorem}\cite[Theorem 8.3.C]{Gromov1987}\label{thm:Gromov_uniqueness} Suppose that $\Gc$ is a proper Gromov hyperbolic metric space such that \begin{enumerate} \item $\Gamma \times (\Rb \rtimes_{\psi} \mathbb{Z}/2\mathbb{Z})$ acts on $\Gc$, \item the actions of $\Gamma$ and $\mathbb{Z}/2\mathbb{Z}$ are isometric, \item for every $v \in \Gc$, the map $\gamma \in \Gamma \rightarrow \gamma \cdot v \in \Gc$ is a quasi-isometry. \item the $\Rb$ action is free and every $\Rb$-orbit is a quasi-geodesic in $\Gc$. Further, the induced map $\Gc / \Rb \rightarrow \partial_\infty \Gc^{(2)}$ is a homeomorphism. \end{enumerate} Then there exists a $\Gamma \times \mathbb{Z}/2\mathbb{Z}$-equivariant homeomorphism $T : \Gc \rightarrow \wt{U\Gamma}$ that maps $\Rb$-orbits to $\Rb$-orbits. \end{theorem} \subsection{Dominated Splittings} Next, we describe an alternate characterization of Anosov representations in $\GL_d(\Rb)$ using dominated splittings due to Bochi et al.~\cite{BPS2016}. Let $\rho:\Gamma\to\GL_d(\Rb)$ be a representation. Let $E:=\widetilde{U\Gamma}\times\Rb^d$ be the trivial bundle over $\widetilde{U\Gamma}$, and define the vector bundle $E_{\rho}:=E/\Gamma$ over $U\Gamma$, where the $\Gamma$ action on $E$ is given by $\gamma\cdot(v,X)=(\gamma\cdot v,\rho(\gamma)\cdot X)$. Since $E_{\rho}$ is naturally a flat vector bundle over $U\Gamma$, it admits a continuous norm, and the compactness of $U\Gamma$ ensures that any two such norms are bi-Lipschitz. For any continuous norm on $E_{\rho}$, choose a lift of this norm to a $\Gamma$-invariant, continuous norm $\norm{\cdot}$ on $E$. With this, we can state the following theorem due to Bochi et al (see Theorem 2.2, Proposition 4.5 and Proposition 4.9 in~\cite{BPS2016}). \begin{theorem}\label{prop:dom_split} A representation $\rho:\Gamma\to\GL_d(\Rb)$ is $k$-Anosov if and only if there exist \begin{itemize} \item continuous, $\phi_t$-invariant, $\rho$-equivariant maps \[F_1:\wt{U\Gamma}\to \Gr_k(\Rb^d)\,\,\,\text{ and }\,\,\,F_2:\wt{U\Gamma}\to\Gr_{d-k}(\Rb^d)\] so that $F_1(v)+F_2(v)=\Rb^d$ for all $v\in\wt{U\Gamma}$, and \item constants $C > 0$, $\beta > 0$ such that \begin{align*} \frac{\norm{X_1}_{\phi_tv}}{\norm{X_2}_{\phi_tv}} \leq C e^{-\beta t} \frac{\norm{X_1}_{v}}{\norm{X_2}_{v}} \end{align*} for all $v \in \widetilde{U\Gamma}$, $X_i \in F_i(v)$ non-zero, and $t \geq 0$. \end{itemize} \end{theorem} Here, we may think of $F_1$ and $F_2$ as $\Gamma$-invariant sub-bundles of $E$. The maps $F_1$ and $F_2$ are related to the flag maps $\xi^{(k)}$ and $\xi^{(d-k)}$ by \[F_1(v) = \xi^{(k)}(v^+)\,\,\,\text{ and }\,\,\,F_2(v) = \xi^{(d-k)}(v^-)\] for all $v = (v^+,v^-,v_0)\in\widetilde{U\Gamma}$. \section{$\rho$-controlled sets}\label{sec:rho_controlled_sets} In this section we introduce $\rho$-controlled sets and construct a useful family of projections. \begin{definition}\label{def:controlled}Suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation. A closed $\rho(\Gamma)$-invariant subset $M\subset\Pb(\Rb^d)$ is \emph{$\rho$-controlled} if \begin{enumerate} \item[(i)] $\xi^{(1)}(\partial_\infty\Gamma)\subset M$ and \item[(ii)] $M\cap\xi^{(d-1)}(x)=\xi^{(1)}(x)$ for every $x\in\partial_\infty\Gamma$. \end{enumerate} If $\rho$ also happens to be $m$-Anosov for some $m=2,\dots,d-1$, then a $\rho$-controlled subset $M\subset\Pb(\Rb^d)$ is \emph{$m$-hyperconvex} if \begin{align*} p_1+p_2+\xi^{(d-m)}(y) \end{align*} is a direct sum for all $p_1,p_2 \in M$ and $y \in \partial_\infty \Gamma$ with $p_1, p_2, \xi^{(1)}(y)$ pairwise distinct. \end{definition} \begin{remark} We will typically consider the case when $M$ is a topological $(m-1)$-dimensional manifold and then require that $M$ is $m$-hyperconvex. \end{remark} The three main examples of $\rho$-controlled subsets $M\subset\Rb^d$ that we will be concerned with are the following. \begin{example}\label{eg:limitset} When $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation, then the $1$-limit set $\xi^{(1)}(\partial_\infty\Gamma)$ for $\rho$ is obviously $\rho$-controlled. Furthermore, if $\rho$ is $m$-Anosov for some $m=1,\dots,d-1$, then $\xi^{(1)}(\partial_\infty\Gamma)$ is $m$-hyperconvex if and only if \begin{align*} \xi^{(1)}(x)+\xi^{(1)}(z)+\xi^{(d-m)}(y) \end{align*} is a direct sum for all pairwise distinct $x,y,z\in\partial_\infty\Gamma$. \end{example} \begin{example}\label{eg:convex} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is a 1-Anosov representation and $\rho(\Gamma)$ preserves a properly convex domain $\Omega\subset\Pb(\Rb^d)$ so that $\xi^{(d-1)}(x)\cap\partial\Omega=\xi^{(1)}(x)$ for all $x\in\partial_\infty\Gamma$, see Section \ref{sec:properly_convex}. Then $M:=\partial\Omega$ is obviously $\rho$-controlled. Notice that in this case, the requirement that $M$ is $(d-1)$-hyperconvex is simply that \begin{align*} p_1+p_2+\xi^{(1)}(y) \end{align*} is a direct sum for all pairwise distinct $p_1,p_2, \xi^{(1)}(y)\in\partial\Omega$. This is satisfied if and only if $\xi^{(1)}(\partial_\infty \Gamma)$ does not intersect any proper line segments in $\partial \Omega$. \end{example} \begin{example}\label{eg:limitset_subgroup} Suppose $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and $\Gamma_1 \leq \Gamma$ is a quasi-convex subgroup. Then $\rho_1:=\rho|_{\Gamma_1}: \Gamma_1 \rightarrow \PGL_d(\Rb)$ is also $1$-Anosov, and its $1$-limit set $\xi^{(1)}(\partial_\infty\Gamma)$ for $\rho$ is obviously $\rho_1$-controlled. Furthermore, if $\rho_1$ is $m$-Anosov for some $m=1,\dots,d-1$, then $\xi^{(1)}(\partial_\infty\Gamma)$ is $m$-hyperconvex if and only if \begin{align*} \xi^{(1)}(x)+\xi^{(1)}(z)+\xi^{(d-m)}(y) \end{align*} is a direct sum for all pairwise distinct $x,y,z \in\partial_\infty\Gamma$ with $y \in \partial_\infty \Gamma_1$. \end{example} Recall that $\partial_\infty \Gamma^{(2)}$ is the set of all pairs $(x,y) \in \partial_\infty \Gamma^2$ with $x \neq y$. Then for any $(x,y)\in\partial_\infty\Gamma^{(2)}$, let $L_{x,y}$ denote the orbit $(x,y,\Rb)\subset\widetilde{U\Gamma}$ of $\phi_t$. The following proposition is one of the key tools we use to investigate regularity properties of $\rho$-controlled subsets. \begin{proposition}\label{prop:rho_controlled_sets_projections} Suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and $M \subset \Pb(\Rb^d)$ is $\rho$-controlled. Then there exists a continuous family of continuous maps \begin{align*} \pi_{x,y} : M \setminus \left\{ \xi^{(1)}(x), \xi^{(1)}(y) \right\} \rightarrow L_{x,y} \end{align*} indexed by $(x,y) \in \partial_\infty \Gamma^{(2)}$ such that \begin{align*} \pi_{x,y} & =\rho(\gamma)^{-1} \circ \pi_{\gamma\cdot x,\gamma\cdot y}\circ \rho(\gamma), \\ x & = \lim_{p\to \xi^{(1)}(x)} \pi_{x,y}(p), \text{ and} \\ y & = \lim_{p\to \xi^{(1)}(y)} \pi_{x,y}(p) \end{align*} for all $(x,y) \in \partial_\infty \Gamma^{(2)}$ and $\gamma\in\Gamma$. \end{proposition} Delaying the proof of Proposition~\ref{prop:rho_controlled_sets_projections} until Section \ref{sec:controlled_proof}, we describe the main application. Suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and $M \subset \Pb(\Rb^d)$ is $\rho$-controlled. Let $\{ \pi_{x,y} : (x,y)\in \partial_\infty \Gamma^{(2)}\}$ be a family of maps satisfying Proposition~\ref{prop:rho_controlled_sets_projections}. Then define the following space \begin{align}\label{eqn:P(M)} P(M) : = \left\{ (v,p) \in \wt{U \Gamma} \times M : p \neq \xi^{(1)}(v^\pm) \text{ and } v = \pi_{v^+,v^-}(p) \right\}. \end{align} Notice that there is a natural $\Gamma$ action on $P(M)$ given by \begin{align*} \gamma \cdot (v,p) = (\gamma \cdot v, \rho(\gamma)p). \end{align*} This space has the following properties. \begin{observation}\label{obs:compact} With the notation above, \begin{enumerate} \item $\Gamma$ acts co-compactly on $P(M)$, \item for any $v \in \wt{U\Gamma}$ and $z \in M \setminus \{ \xi^{(1)}(v^+), \xi^{(1)}(v^-)\}$, there exists $t \in \Rb$ such that $(\phi_t(v), z) \in P(M)$, \item for any compact set $K \subset \wt{U\Gamma}$ there exists $\delta > 0$ such that: If $v \in K$ and $p \in M\setminus \{ \xi^{(1)}(v^+)\}$ satisfies $d_{\Pb}\left(\xi^{(1)}(v^+), p\right) \leq \delta$, then $(\phi_t (v), p) \in P(M)$ for some $t > 0$. \end{enumerate} \end{observation} \begin{proof} (1): Since the $\Gamma$-action on $\wt{U\Gamma}$ is co-compact, there exists a compact set $K \subset \wt{U\Gamma}$ such that $\Gamma \cdot K = \wt{U\Gamma}$. Since $\{ \pi_{x,y} : (x,y)\in \partial_\infty \Gamma^{(2)}\}$ is a family of maps satisfying Proposition~\ref{prop:rho_controlled_sets_projections}, the set \begin{align*} \wh{K} : = \left\{(v,z) \in K \times M:v \neq \xi^{(1)}(v^\pm) \text{ and } v=\pi_{v^+,v^-}(z) \right\} \end{align*} is compact. Further, by definition, $\Gamma\cdot \wh{K} = P(M)$. (2): Follows directly from the definition. (3): Fix a compact set $K \subset \wt{U\Gamma}$. If such a $\delta > 0$ does not exist, then there exists $v_n \in K$, $p_n \in M\setminus \{ \xi^{(1)}(v_n^+)\}$, and $t_n \leq 0$ such that \begin{align*} d_{\Pb}\left(\xi^{(1)}(v_n^+), p_n\right) \leq 1/n \end{align*} and $\phi_{t_n}(v_n)=\pi_{v_n^-,v_n^+}(p_n) $. By passing to a subsequence we can suppose that $v_n \rightarrow v \in K$. But then $p_n \rightarrow \xi^{(1)}(v^+)$ as $n\to\infty$, so \begin{align*} v^+ = \lim_{n \rightarrow \infty} \pi_{v_n^-,v_n^+}(p_n) = \lim_{n \rightarrow \infty} \phi_{t_n}(v_n) \in L_{v^+, v^-} \cup \{ v^-\} \end{align*} which is a contradiction. \end{proof} \begin{remark} The set $P(M)$ is designed to be a generalization of the following construction: Suppose $\Gamma$ is the fundamental group of $X$ a compact negatively curved Riemannian manifold, $\wt{X}$ is the universal cover of $X$, $T^1 \wt{X}$ is the unit tangent bundle of $\wt{X}$, and $\phi_t$ is the geodesic flow on $T^1 \wt{X}$. Then we define \begin{align*} {\rm Perp} \subset T^1 \wt{X} \times \partial_\infty \Gamma \end{align*} to be the set of pairs $(v,z)$ such that there exists $w \in T^1_{\pi(v)} \wt{X}$ with $w \bot v$ and $\lim_{t \rightarrow \infty} \pi(\phi_tw) = z$. \end{remark} \subsection{Properly convex domains}\label{sec:properly_convex} We now describe properly convex domains and some of their relevant properties. These will be used to prove Proposition~\ref{prop:rho_controlled_sets_projections}. \begin{definition} \ \begin{enumerate} \item An open set $\Omega\subset\Pb(\Rb^d)$ is a \emph{properly convex domain} if its closure lies in an affine chart in $\Pb(\Rb^d)$, and it is convex, i.e. for every pair of distinct points $x,y\in\Omega$, there is a projective line segment in $\Omega$ whose endpoints are $x$ and $y$. \item Given a subset $X \subset \Pb(\Rb^d)$ the \emph{projective automorphism group of $X$} is defined to be \begin{align*} \Aut(X) = \{ g \in \PGL_d(\Rb) : g X = X\}. \end{align*} \end{enumerate} \end{definition} Given a properly convex domain $\Omega\subset\Pb(\Rb^d)$, there is a canonical distance on $\Omega$ which is defined as follows. For any pair of points $x,y\in\Omega$, let $l$ be a projective line through $x$ and $y$, and let $a$ and $b$ be the two points of intersection of $l$ with $\partial\Omega$, ordered so that $a<x\leq y<b$ lie along $l$. Then define \[H_\Omega(x,y):=\log C(a,x,y,b).\] Here, $C$ is the cross ratio along the projective line $l$, i.e. \[C(a,x,y,b):=\frac{\norm{a-y}_2\norm{b-x}_2}{\norm{a-x}_2\norm{b-y}_2},\] where $\norm{\cdot}_2$ is the standard norm on some (equiv.) any affine chart of $\Pb(\Rb^d)$ containing the closure of $\Omega$. One can verify from properties of the cross ratio that the map $H_\Omega:\Omega\times\Omega\to\Rb^+\cup\{0\}$ is a well-defined, continuous, distance function. This is commonly known as the \emph{Hilbert metric} on $\Omega$. Let $T\Omega$ denote the tangent bundle of $\Omega$ and $\pi : T\Omega \rightarrow \Omega$ the natural projection. Also, for any $v\in T\Omega$, let $l_v$ denote the oriented projective line segment in $\Omega$ through $\pi(v)$ in the direction given by $v$, and with endpoints in $\partial\Omega$. Then let $v^+$ and $v^-$ be the forward and backward endpoints of $l_v$ respectively. The Hilbert metric $H_\Omega$ is infinitesimally given by the norm \begin{eqnarray*} h_\Omega:&T\Omega&\to\Rb\cup\{0\},\\ &v&\mapsto \norm{v}_2\left(\frac{1}{\norm{\pi(v)-v^+}_2}+\frac{1}{\norm{\pi(v)-v^-}_2}\right), \end{eqnarray*} where $\norm{\cdot}_2$ is the standard norm on any affine chart of $\Pb(\Rb^d)$ containing the closure of $\Omega$. With this, define the \emph{unit tangent bundle} of $\Omega$ to be \begin{align*} T^1 \Omega = \{ v \in T\Omega : h_\Omega(v) = 1\}. \end{align*} We recall the definition of a convex co-compact action on $\Omega$. \begin{definition}\label{defn:cc} A discrete subgroup $\Lambda \leq \PGL_d(\Rb)$ \emph{acts convex co-compactly} on a properly convex domain $\Omega$ if $\Lambda \leq \Aut(\Omega)$ and there exists a closed non-empty convex subset $\Cc \subset \Omega$ such that $\Lambda \leq \Aut(\Cc)$ and the quotient $\Lambda \backslash \Cc$ is compact. \end{definition} \begin{remark} This is not the definition of convex co-compactness used in~\cite{DCG17}, instead they say groups satisfying Definition~\ref{defn:cc} act \emph{naive convex co-compactly}. \end{remark} We now use work of Danciger-Gueritaud-Kassel~\cite{DCG17} and the second author~\cite{Zimmer17} to construct a convex co-compact action. \begin{theorem}\label{thm:cc_action}\cite[Theorem 1.4]{DCG17}, \cite[Theorem 1.27]{Zimmer17} Suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and there exists a properly convex domain $\Omega_0 \subset \Pb(\Rb^d)$ with $\rho(\Gamma) \leq \Aut(\Omega_0)$. Then $\rho(\Gamma)$ acts convex co-compactly on a properly convex domain $\Omega \subset \Pb(\Rb^d)$. Moreover, \begin{enumerate} \item $\xi^{(1)}(\partial_\infty \Gamma) \subset \partial \Omega$, \item for every $x,y \in \partial_\infty \Gamma$ distinct, $\Omega_0$ and $\Omega$ are contained in the same connected component of \begin{align*} \Pb(\Rb^d) \setminus \left( \xi^{(d-1)}(x) \cup \xi^{(d-1)}(y) \right), \end{align*} \item we can assume \begin{align*} \Cc = \Omega \cap {\rm ConvHull} \left\{ \xi^{(1)}(\partial_\infty \Gamma) \right\} \end{align*} and \begin{align*} \overline{\Cc} \cap \partial \Omega = \xi^{(1)}(\partial_\infty \Gamma). \end{align*} \end{enumerate} \end{theorem} \begin{remark} In~\cite[Theorem 1.27]{Zimmer17} it is assumed that $\rho$ is irreducible. \end{remark} \subsection{The proof of Proposition~\ref{prop:rho_controlled_sets_projections}}\label{sec:controlled_proof} We will prove Proposition~\ref{prop:rho_controlled_sets_projections} by constructing a projective model of the geodesic flow space $\wt{U \Gamma}$. This construction has several steps: first we post compose to obtain a new 1-Anosov representation that preserves a properly convex domain. Theorem \ref{thm:cc_action} then gives us a convex co-compact action, which we then use to construct a projective model of the geodesic flow space. Finally we use this projective model to construct the maps $\pi_{x,y}$. \subsubsection{Constructing an invariant properly convex domain}\label{subsec:constructing_properly_convex_domain} In general, a 1-Anosov representation will not preserve a properly convex domain. \begin{example}\cite[Proposition 1.7]{DCG17} If $d$ is even and $\rho : \pi_1(S) \rightarrow \PGL_d(\Rb)$ is Hitchin (see Definition~\ref{defn:hitchin_reps}), then $\rho(\pi_1(S))$ does not preserve a properly convex domain. \end{example} However, we will show that after post composing with another representation we can always find an invariant properly convex domain. In Section~\ref{sec:suff_cond_diff} we will study the regularity of these sets. Denote the vector space of symmetric 2-tensors by $\Sym_2(\Rb^d)$ and let $D :=\dim \,\Sym_2(\Rb^d)$. Then let $S : \GL_d(\Rb) \rightarrow \GL(\Sym_2(\Rb^d))$ be the representation \begin{align*} S(g)(v \otimes v) = gv \otimes gv. \end{align*} Given a representation $\rho : \Gamma \rightarrow \PGL_d(\Rb)$, let $S(\rho) : \Gamma \rightarrow \PGL_d(\Rb)$ be the representation $S(\rho) = S \circ \rho$. Associated to $S$ are smooth embeddings $\Phi : \Pb(\Rb^d) \rightarrow \Pb(\Sym_2(\Rb^d))$ and $\Phi^* : \Gr_{d-1}(\Rb^d) \rightarrow \Gr_{D-1}(\Sym_2(\Rb^d))$ defined by \begin{align*} \Phi(v) = [v \otimes v] \end{align*} and \begin{align*} \Phi^*(W) = \Span\left\{ v \otimes w + w \otimes v : w \in W, v \in \Rb^d \right\}. \end{align*} Notice that $\Phi$ and $\Phi^*$ are both $S$-equivariant. \begin{proposition}\label{prop:S_composition} If $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is $1$-Anosov with boundary maps $\xi^{(1)}$ and $\xi^{(d-1)}$, then $S(\rho)$ is 1-Anosov with boundary maps $\Phi \circ \xi^{(1)}$ and $\Phi^* \circ \xi^{(d-1)}$. \end{proposition} \begin{proof} The maps $\Phi \circ \xi^{(1)}$ and $\Phi^* \circ \xi^{(d-1)}$ are clearly $S(\rho)$-equivariant, dynamics-preserving, and transverse. Suppose that $\gamma \in \Gamma$ and $\overline{g}$ is a lift of $\rho(\gamma)$. If $\lambda_1(\overline{g}) \geq \dots \geq \lambda_d(\overline{g})$ are the absolute values of the eigenvalues of $\overline{g}$ and $\lambda_1(S(\overline{g})) \geq \lambda_2(S(\overline{g})) \geq \dots$ are the absolute values of the eigenvalues of $S(\overline{g})$, then \begin{align*} \lambda_1(S(\overline{g})) = \lambda_1(\overline{g})^2 \end{align*} and \begin{align*} \lambda_2(S(\overline{g})) = \lambda_1(\overline{g}) \lambda_2(\overline{g}). \end{align*} So \begin{align*} \frac{\lambda_1}{\lambda_2}(S(\rho)(\gamma)) =\frac{\lambda_1(\overline{g})}{\lambda_2(\overline{g})}= \frac{\lambda_1}{\lambda_2}(\rho(\gamma)). \end{align*} Then since $\rho$ is $1$-Anosov, we see that $S(\rho)$ is also $1$-Anosov. \end{proof} Now we construct a properly convex domain in $\Pb(\Sym_2(\Rb^d))$ which is invariant under the action of $S(\PGL_d(\Rb))$. Given $X \in \Sym_2(\Rb^d)$ we say that $X$ is \emph{positive definite}, and write $X > 0$, if $(f\otimes f)(X) > 0$ for every $f \in \Rb^{d*}\setminus\{0\}$. Also, we say that $X$ is \emph{positive semidefinite}, and write $X \geq 0$, if $(f\otimes f)(X) \geq 0$ for every $f \in \Rb^{d*}$. Then define \begin{align*} \Pc^+ := \left\{ [X] : X \in \Sym_2(\Rb^d), X > 0\right\}. \end{align*} \begin{observation}\label{obs:PD_matrices} \ \begin{enumerate} \item $\Pc^+$ is a properly convex domain in $\Pb(\Sym_2(\Rb^d))$, \item $S(\PGL_d(\Rb)) \leq \Aut(\Pc^+)$, \item $\Phi(\Pb(\Rb^d)) \subset \overline{\Pc^+}$. \end{enumerate} \end{observation} \begin{proof} (1): Clearly $C := \{ X : X\in \Sym_2(\Rb^d), X > 0\}$ is a convex open cone in $\Sym_2(\Rb^d)$. Since \begin{align*} (f\otimes f)(X+tY) = (f\otimes f)(X)+t(f\otimes f)(Y) \end{align*} it is clear that $C$ does not contain any real affine lines. Thus $C$ is properly convex. Since $C$ projects to $\Pc^+$ we see that $\Pc^+$ is a properly convex domain. (2): Notice that \begin{align*} (f\otimes f)(S(g)X) = \Big((f \circ g) \otimes (f \circ g) \Big)(X) \end{align*} when $f \in \Rb^{d*}$, $g \in \GL_d(\Rb)$, and $X \in\Sym_2(\Rb^d)$. So $S(\PGL_d(\Rb)) \leq \Aut(\Pc^+)$. (3): Suppose that $[v] \in \Pb(\Rb^d)$. Then $\Phi([v]) = [v \otimes v]$ and \begin{align*} (f\otimes f)(v \otimes v) = f(v)f(v) \geq 0 \end{align*} when $f \in \Rb^{d*}$. Thus $\Phi([v]) \subset \overline{\Pc^+}$. \end{proof} \subsubsection{Constructing a projective geodesic flow}\label{sec:proj_geod_flow} In this step we construct a ``projective'' geodesic flow for any $1$-Anosov representation that acts convex co-compactly on a properly convex domain. For the rest of this subsection, suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and $\rho(\Gamma)$ acts convex co-compactly on a properly convex domain $\Omega$. We can further assume that \begin{align*} \Cc = \Omega \cap {\rm ConvHull} \left\{ \xi^{(1)}(\partial_\infty \Gamma) \right\} \end{align*} and \begin{align*} \overline{\Cc} \cap \partial \Omega = \xi^{(1)}(\partial_\infty \Gamma). \end{align*} Every projective line segment in $\Omega$ can be parametrized to be a geodesic in $H_\Omega$ and thus $T^1 \Omega$ has a natural geodesic flow, which we denote by $\psi_t$, obtained by flowing along the projective line segments. Using this flow we can construct a model of the flow space $\wt{U\Gamma}$. For $x,y \in \partial_\infty \Gamma$ distinct, let $\ell_{x,y} \subset T^1 \Omega$ be the unit tangent vectors whose based points are contained in the line segment joining $\xi^{(1)}(x)$ to $\xi^{(1)}(y)$ and who point in the direction of $\xi^{(1)}(y)$. Then the set \begin{align*} \Gc : = \bigcup_{(x,y) \in \partial_\infty \Gamma^{(2)}}\ell_{x,y} \end{align*} is invariant under the action of $\rho(\Gamma)$, the flow $\psi_t$, and the natural $\mathbb{Z}/2\mathbb{Z}$ action on $T^1 \Omega$ given by $v \rightarrow -v$. Using Theorem~\ref{thm:Gromov_uniqueness} we will construct a homeomorphism $\Gc \rightarrow \wt{U\Gamma}$. \begin{corollary}\label{cor:Gromov_model} With the notation above, there exists a homeomorphism $T : \Gc \rightarrow \wt{U\Gamma}$ with the following properties: \begin{enumerate} \item $T$ is equivariant relative to the $\Gamma$ and $\mathbb{Z}/2\mathbb{Z}$ actions \item for every $(x,y) \in \partial_\infty \Gamma^{(2)}$, $T$ maps the flow line $\ell_{x,y}$ to the flow line $L_{x,y}$. \end{enumerate} \end{corollary} \begin{proof} By construction $\Gamma \times (\Rb \rtimes_{\psi} \mathbb{Z}/2\mathbb{Z})$ acts on $\Gc$ and the $\Rb$ action is free. Further, $\Gc$ is homeomorphic to $\partial_\infty \Gamma^{(2)} \times \Rb$ and so we have a homeomorphism \begin{align*} \Gc / \Rb \rightarrow \partial_\infty \Gamma^{(2)}. \end{align*} Hence to apply Gromov's theorem we just have to verify that $\Gc$ has a complete metric $d$ with the following properties: \begin{enumerate} \item the actions of $\Gamma$ and $\mathbb{Z}/2\mathbb{Z}$ are isometric, \item for every $v \in \Gc$, the map $\gamma \in \Gamma \rightarrow \gamma \cdot v \in \Gc$ is a quasi-isometry. \item every $\Rb$-orbit is a quasi-geodesic in $\Gc$. \end{enumerate} To do this, define $d:\mathcal{G}\times\mathcal{G}\to\Rb$ by \begin{align*} d(v,w) =\frac{1}{\sqrt{\pi}} \int_{\Rb} H_\Omega(\gamma_v(t), \gamma_w(t)) e^{-t^2} dt \end{align*} where $\gamma_v$ and $\gamma_w$ are the unit speed geodesics with $\gamma_v^\prime(0)=v$ and $\gamma_w^\prime(0)=w$. Then conditions (1) and (3) are easy to check. To verify condition (2), notice that \begin{align*} \abs{H_\Omega(\gamma_v(t), \gamma_w(t))-H_\Omega(\pi(v), \pi(w))} \leq 2\abs{t} \end{align*} and \begin{align*} \frac{1}{\sqrt{\pi}} \int_{\Rb}2 \abs{t}e^{-t^2} dt = \frac{2}{\sqrt{\pi}}. \end{align*} Hence \begin{align*} H_\Omega(\pi(v), \pi(w)) -\frac{2}{\sqrt{\pi}} \leq d(v,w) \leq H_\Omega(\pi(v), \pi(w)) +\frac{2}{\sqrt{\pi}}. \end{align*} And so $\pi : (\Gc, d) \rightarrow (\Cc, H_\Omega)$ is a quasi-isometry. Since $(\Cc, H_\Omega)$ is a geodesic metric space and $\Gamma$ acts co-compactly on $\Cc$, the fundamental lemma of geometric group theory states that for every $c \in \Cc$, the map $\gamma \in \Gamma \rightarrow \gamma \cdot c \in \Cc$ is a quasi-isometry. So the $\Gamma$ orbits in $\Gc$ are also quasi-isometries. \end{proof} \subsubsection{Finishing the proof of Proposition~\ref{prop:rho_controlled_sets_projections}} Suppose that $\rho : \Gamma \rightarrow \PGL_d(\Rb)$ is a $1$-Anosov representation and $M \subset \Pb(\Rb^d)$ is $\rho$-controlled. By Proposition~\ref{prop:S_composition}, the representation $S(\rho) : \Gamma \rightarrow \PGL(\Sym_2(\Rb^d))$ is 1-Anosov with boundary maps $\xi^{(1)}_S : = \Phi \circ \xi^{(1)}$ and $\xi^{(d-1)}_S : = \Phi^* \circ \xi^{(d-1)}$. By Observation~\ref{obs:PD_matrices}, \begin{align*} S(\rho)(\Gamma) \leq \Aut(\Pc^+). \end{align*} Then by Theorem~\ref{thm:cc_action} there exists a properly convex domain $\Omega \subset \Pb(\Sym_2(\Rb^d))$ where $S(\rho)(\Gamma)$ acts convex co-compactly on $\Omega$. We can assume that \begin{align*} \Cc = \Omega \cap {\rm ConvHull} \left\{ \xi^{(1)}(\partial_\infty \Gamma) \right\} \end{align*} and \begin{align*} \overline{\Cc} \cap \partial \Omega = \xi^{(1)}(\partial_\infty \Gamma). \end{align*} Now let $\Gc \subset T^1 \Omega$ be the projective model of the geodesic flow constructed in Section~\ref{sec:proj_geod_flow} and let $T : \Gc \rightarrow \wt{U\Gamma}$ denote the homeomorphism in Corollary~\ref{cor:Gromov_model}. Next, for $x,y \in \partial_\infty \Gamma$ distinct we define a projection \begin{align*} p_{x,y} : \Pb\left(\Sym_2(\Rb^d)\right) \setminus \left( \xi_S^{(d-1)}(x) \cap \xi_S^{(d-1)}(y) \right) \rightarrow \xi_S^{(1)}(x) + \xi^{(1)}_S(y) \end{align*} by \begin{align*} \{ p_{x,y}(v) \} = \Big( \xi_S^{(1)}(x) + \xi^{(1)}_S(y)\Big) \cap \Big( v + \xi_S^{(d-1)}(x) \cap \xi_S^{(d-1)}(y) \Big). \end{align*} \begin{observation} If $m \in M \setminus \{\xi^{(1)}(x),\xi^{(1)}(y) \}$, then $p_{x,y}(\Phi(m))$ is contained in the line segment joining $\xi^{(1)}_S(x)$ to $\xi^{(1)}_S(y)$ in $\Omega$. \end{observation} \begin{proof} Since $M$ is $\rho$-controlled, \begin{align*} m \notin \xi^{(d-1)}(x) \cup \xi^{(d-1)}(y). \end{align*} Hence \begin{align*} \Phi(m) \notin \xi^{(d-1)}_S(x) \cup \xi^{(d-1)}_S(y). \end{align*} Observation~\ref{obs:PD_matrices} implies that $\Phi(m) \in \overline{\Pc^+}$ and (2) of Theorem~\ref{thm:cc_action} says that $\Pc^+$ and $\Omega$ are in the same connected component of \begin{align*} \Pb\left(\Sym_2(\Rb^d)\right) \setminus \left( \xi_S^{(d-1)}(x) \cup \xi_S^{(d-1)}(y) \right). \end{align*} Hence $p_{x,y}(\Phi(m))$ is contained in the line segment joining $\xi^{(1)}_S(x)$ to $\xi^{(1)}_S(y)$ in $\Omega$. \end{proof} Next, for $x,y \in \partial_\infty \Gamma$ distinct we define a map \begin{align*} \wh{p}_{x,y} : M \setminus \{\xi^{(1)}(x),\xi^{(1)}(y) \} \rightarrow \Gc \end{align*} by letting $\wh{p}_{x,y}(m)$ be the unit vector above $\pi(p_{x,y}(\Phi(m)))$ pointing towards $y$. Finally, we define \begin{align*} \pi_{x,y} : M \setminus \left\{ \xi^{(1)}(x), \xi^{(1)}(y) \right\} \rightarrow L_{x,y} \end{align*} by $\pi_{x,y} = T \circ \wh{p}_{x,y}$. Recall that $T$ is defined in Corollary \ref{cor:Gromov_model}. By construction we have \begin{align*} \pi_{x,y}=\rho(\gamma)^{-1} \circ \pi_{\gamma\cdot x,\gamma\cdot y}\circ \rho(\gamma) \end{align*} for all $(x,y) \in \partial_\infty \Gamma^{(2)}$ and $\gamma\in\Gamma$. Further, by (2) of Corollary~\ref{cor:Gromov_model} \begin{align*} \lim_{p\to \xi^{(1)}(x)}\pi_{x,y}(p)=x \text{ and } \lim_{p\to \xi^{(1)}(y)}\pi_{x,y}(p)=y \end{align*} for all $(x,y) \in \partial_\infty \Gamma^{(2)}$. \subsubsection{The construction for non-surface groups:} It is worth noting that for many word hyperbolic groups, post composing with the representation $S : \GL_d(\Rb) \rightarrow \GL(\Sym_2(\Rb^d))$ is not necessary to construct a convex co-compact action. \begin{theorem}\cite[Theorem 1.25]{Zimmer17} Suppose $\Gamma$ is a non-elementary word hyperbolic group which is not commensurable to a non-trivial free product or the fundamental group of a closed hyperbolic surface. Then any irreducible $1$-Anosov representation $\rho: \Gamma \rightarrow \PGL_d(\Rb)$ acts convex co-compactly on a properly convex domain $\Omega \subset \Pb(\Rb^d)$. \end{theorem} \section{Sufficient conditions for differentiability of $\rho$-controlled subsets}\label{sec:suff_cond_diff} The goal of this section is to prove Theorem \ref{thm:main_body}, which is a generalization of Theorem~\ref{thm:main} in terms of $\rho$-controlled subsets of $\Pb(\Rb^d)$ instead of the $1$-limit set. \subsection{The quantity $\alpha^m(\rho)$}\label{sec:optimal1} Suppose that $\rho$ is $(1,m)$-Anosov for some $m=2,\dots,d-1$. To state Theorem \ref{thm:main_body}, we first define a quantity $\alpha^m(\rho)$ as follows. Recall that $E:=\widetilde{U\Gamma}\times\Rb^d$. Let $\overline{\rho}:\Gamma\to\SL_d(\Rb)$ be a lift of $\rho$ (see Remark \ref{rem:lift}), and let $\norm{\cdot}$ be a $\Gamma$-invariant norm on $E$, i.e. $v\mapsto \norm{\cdot}_v$ is a continuous family of norms on $\Rb^d$ parameterized by $\widetilde{U\Gamma}$, so that $\norm{\overline{\rho}(\gamma)\cdot X}_{\gamma\cdot v}=\norm{X}_v$ for all $\gamma\in\Gamma$, $v\in\widetilde{U\Gamma}$ and $X\in \Rb^d$. For any $v=(v^+,v^-,v_0) \in \widetilde{U\Gamma}$, let \begin{align} E_1(v) & = \xi^{(1)}(v^+),\nonumber \\ E_2(v) & = \xi^{(d-1)}(v^-) \cap \xi^{(m)}(v^+),\label{eqn:E}\\ E_3(v) & = \xi^{(d-m)}(v^-),\nonumber \end{align} and define $f:\widetilde{U\Gamma}\times\Rb\to\Rb$ by \begin{align}\label{eqn:f} f(v,t):=\inf_{X_i\in S_i(v)}\left\{\log\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\Bigg/\log\frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\right\}, \end{align} where $S_i(v):=\{X\in E_i(v):||X||_v=1\}$ for $i=1,2,3$. Then define \begin{equation}\label{eqn:alpham1} \alpha^m(\rho):=\liminf_{t\to\infty}\inf_{v\in \widetilde{U\Gamma}}f(v,t). \end{equation} To see that $\alpha^m(\rho)$ is well-defined and strictly larger than $1$, we need the following observation. \begin{observation} \label{lem:weak flow} There exists $C_1 \geq1$ and $\beta_1 \geq0$ such that \begin{align*} \frac{1}{C_1} e^{-\beta_1 t} \norm{X}_{v} \leq \norm{X}_{\phi_t(v)} \leq C_1 e^{\beta_1 t} \norm{X}_v \end{align*} for all $v \in \wt{U\Gamma}$, $t > 0$, and $X\in \Rb^d$. \end{observation} \begin{proof} Since $\Gamma$ acts co-compactly on $\wt{U\Gamma}$ there exists $\beta_1 \geq 0$ such that \begin{align*} e^{-\beta_1} \norm{X}_{v} \leq \norm{X}_{\phi_t(v)} \leq e^{\beta_1} \norm{X}_v \end{align*} for all $v \in \wt{U\Gamma}$, $t \in [0,1]$, and $X\in \Rb^d$. Then for any $t>0$, let $k\in\Zb^+$ so that $t\in[k-1,k)$, and note that \begin{align*} e^{-k\beta_1} \norm{X}_{v} \leq \norm{X}_{\phi_t(v)} \leq e^{k\beta_1} \norm{X}_v. \end{align*} Thus, if we let $C_1:=e^{\beta_1}$, then \begin{align*} \frac{1}{C_1}e^{-t\beta_1} \norm{X}_{v}\leq \frac{1}{C_1}e^{-(k-1)\beta_1} \norm{X}_{v} \leq \norm{X}_{\phi_t(v)} \leq C_1e^{(k-1)\beta_1} \norm{X}_v\leq C_1e^{t\beta_1} \norm{X}_v. \end{align*} \end{proof} By Theorem \ref{prop:dom_split}, the assumption that $\rho$ is $(1,m)$-Anosov ensures that there are constants $C_2,C_3\geq 1$ and $\beta_2,\beta_3\geq0$ so that for all $v \in \widetilde{U\Gamma}$, $X_i \in F_i(v)$ non-zero, and $t \geq 0$, we have \begin{align*} \frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}} \geq \frac{1}{C_2} e^{\beta_2 t} \frac{\norm{X_2}_{v}}{\norm{X_1}_{v}}\,\,\,\text{ and }\,\,\,\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}} \geq \frac{1}{C_3} e^{\beta_3 t} \frac{\norm{X_3}_{v}}{\norm{X_2}_{v}}. \end{align*} This, together with Observation \ref{lem:weak flow}, then implies that \begin{align*} \log\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\Bigg/\log\frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}&=1+\log\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}}\Bigg/\log\frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\\ &\geq1+\frac{\beta_3 t-\log C_3+\log\frac{\norm{X_3}_{v}}{\norm{X_2}_{v}}}{2\beta_1 t+2\log C_1+\log \frac{\norm{X_2}_{v}}{\norm{X_1}_{v}}} \end{align*} and \begin{align*} \log\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\Bigg/\log\frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}&\leq1+\frac{2\beta_1 t+2\log C_1+\log\frac{\norm{X_3}_{v}}{\norm{X_2}_{v}}}{\beta_2 t-\log C_2+\log \frac{\norm{X_2}_{v}}{\norm{X_1}_{v}}} \end{align*} In particular, $\alpha^m(\rho)$ is a well-defined real number that is strictly larger than $1$. Also, observe that $\alpha^m(\rho)$ does not depend on the choice of $\norm{\cdot}$, nor on the choice of lift $\overline{\rho}$ of $\rho$. With this, we can state the main theorem of this section. \begin{theorem}\label{thm:main_body} Let $\rho:\Gamma\to\PGL_d(\Rb)$ be $(1,m)$-Anosov for some $m=2,\dots, d-1$. Suppose that $M\subset\Pb(\Rb^d)$ is a $\rho$-controlled subset that is also a topological $(m-1)$-dimensional manifold. If \begin{enumerate} \item[($\dagger$)] $\rho$ is $m$-Anosov and $M$ is $m$-hyperconvex, \end{enumerate} then \begin{enumerate} \item [($\ddagger$)] $M$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty \Gamma)$ for all $\alpha$ so that $1<\alpha<\alpha^m(\rho)$. \end{enumerate} Moreover, for all $x\in\partial_\infty\Gamma$, the tangent space to $M$ at $\xi^{(1)}(x)$ is $\xi^{(m)}(x)$. \end{theorem} \begin{remark} \ \begin{enumerate} \item See Section~\ref{sec:terminology}, for the definition of ``$C^\alpha$ along.'' \item As mentioned in the introduction, in the special case when $M = \xi^{(1)}(\partial_\infty \Gamma)$ this theorem was independently proven by Pozzetti-Sambarino-Wienhard \cite{PSW18} without the estimate on $\alpha$. \end{enumerate} \end{remark} It is clear from Example \ref{eg:limitset} and \ref{eg:convex} that Theorem~\ref{thm:main} and the first part of Theorem~\ref{thm:regularity2} follow immediately from Theorem \ref{thm:main_body}. \subsection{The key inequality }\label{sec:optimal2} Suppose that $\rho$ is $(1,m)$-Anosov for some $m=2,\dots,d-1$. Fix a distance $d_{\Pb}$ on $\Pb(\Rb^d)$ induced by a Riemannian metric. The following lemma is the key inequality needed to prove Theorem \ref{thm:main_body}. \begin{lemma}\label{thm:optimal_contraction} Suppose that $M\subset\Pb(\Rb^d)$ be $\rho$-controlled and $m$-hyperconvex. Then for all $\alpha$ satisfying $0<\alpha<\alpha^m(\rho)$, there exists $D \geq 1$ with the following property: for every $x \in \partial_\infty \Gamma$ and $p\in M$, we have \begin{align} \label{eq:inequality_main} d_{\Pb}\left(p, \xi^{(m)}(x) \right) \leq D d_{\Pb}\left(p, \xi^{(1)}(x) \right)^{\alpha}. \end{align} \end{lemma} We prove Lemma \ref{thm:optimal_contraction} via a series of small observations. First, from the definition of $\alpha^m(\rho)$, one observes the following. \begin{observation}\label{obs:B0} If $0<\alpha<\alpha^m(\rho)$, then there is a constant $B\geq 1$ so that \begin{equation}\label{eqn:C1} \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_3}_{\phi_t(v)}} \leq B \left( \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}} \right)^{\alpha} \end{equation} for all $v\in\wt{U\Gamma}$, $t\geq 0$, and $X_i\in S_i(v)$ \end{observation} \begin{proof} Since $0<\alpha <\alpha^m(\rho)$, there exists $T > 0$ such that $\alpha < f(v,t)$ for all $t \geq T$ and $v \in \widetilde{U\Gamma}$, so \begin{align}\label{eqn:noconstant} \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_3}_{\phi_t(v)}} < \left( \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}} \right)^{\alpha} \end{align} for all $t \geq T$, $v \in \widetilde{U\Gamma}$, and $X_i \in S_i(v)$. On the other hand, if $0\leq t\leq T$, then the $\Gamma$-invariance of $\norm{\cdot}$ implies that both sides of the inequality \eqref{eqn:noconstant} are continuous positive functions on $[0,T]\times S_{\overline{\rho}}$, where $S_{\overline{\rho}}\subset E_{\overline{\rho}}$ is a compact fiber bundle over $U\Gamma$ whose fiber over $[v]\in U\Gamma$ is $S_1(v)\times S_2(v)\times S_3(v)$. Thus, there exists some $B \geq 1$ so that \eqref{eqn:C1} holds. \end{proof} Next, for $i=1,2,3$ and $v \in \wt{U\Gamma}$ define $P_{i,v} : \Rb^d \rightarrow E_i(v)$ to be the projection with kernel $E_{i-1}(v)+E_{i+1}(v)$, where the arithmetic in the subscripts are done modulo $3$. The following observation is an immediate consequence of the fact that $M$ is $\rho$-controlled and $m$-hyperconvex. \begin{observation} \label{obs:nonzero} If $v \in \wt{U\Gamma}$ and $p\in M \setminus \{\xi^{(1)}(v^+), \xi^{(1)}(v^-)\}$ then $P_{i,v}(X) \neq 0$ for all non-zero $X\in p$ and $i=1,2,3$. \end{observation} Choose a compact set $K\subset\wt{U\Gamma}$ so that $\Gamma\cdot K=\wt{U\Gamma}$. By enlarging $K$ if necessary, we can ensure that $\{v^+:v\in K\}=\partial_\infty\Gamma$. Next let \begin{align*} \pi_{x,y} : M \setminus \left\{ \xi^{(1)}(x), \xi^{(1)}(y) \right\} \rightarrow L_{x,y} \end{align*} be a family of maps which satisfy Proposition~\ref{prop:rho_controlled_sets_projections}. Then, as in Section~\ref{sec:rho_controlled_sets}, define \begin{align*} P(M):=\left\{ (v,z) \in \wt{U \Gamma} \times M : p \neq \xi^{(1)}(v^\pm) \text{ and } v = \pi_{v^+,v^-}(p) \right\}. \end{align*} Using the fact that $\Gamma \backslash P(M)$ is compact (see (1) of Observation~\ref{obs:compact}) and Observation~\ref{obs:nonzero}, we deduce the next three observations, which we use to prove Theorem \ref{thm:optimal_contraction}. \begin{observation}\label{obs:C} There is a constant $C\geq 1$ so that \begin{equation}\label{eqn:C2} \frac{1}{C} \leq \frac{ \norm{ P_{i,v}\left( X \right) }_v}{\norm{P_{j,v}\left( X \right)}_v} \leq C \end{equation} for all $(v,p) \in P(M)$, all non-zero $X \in p$, and all $i,j \in \{1,2,3\}$, and \begin{equation}\label{eqn:C3} \frac{1}{C} \leq \frac{\norm{X}_v }{\norm{X}_{2}}\leq C \end{equation} for all $v \in K$ and non-zero $X\in\Rb^d$. \end{observation} \begin{proof} Since $\norm{\cdot}$ is $\Gamma$-invariant, Observation \ref{obs:nonzero} implies that the map $P(M)/\Gamma\to\Rb$ defined by \begin{align*} [v,p]&\mapsto\frac{ \norm{ P_{i,v}\left( X \right) }_v}{\norm{P_{j,v}\left( X \right)}_v} \end{align*} where $X\in p$ is a non-zero vector, is a well-defined, continuous, positive function on $P(M)/\Gamma$. Hence, (1) of Observation \ref{obs:compact} implies that there exists $C\geq 1$, so that \eqref{eqn:C2} holds. Also, since the function $K\times\Pb(\Rb^d)\to\Rb$ defined by \[(v,[X])\mapsto \frac{\norm{X}_v}{\norm{X}_{2}}\] is also well-defined, continuous, and positive, by further enlarging $C$ if necessary, we may assume that \eqref{eqn:C3} holds. \end{proof} Using the fact that $d_{\Pb}$ is induced by a Riemannian metric we have the following estimates. \begin{observation} \label{obs:delta1} For any sufficiently small $\delta>0$, there exists $A \geq 1$ such that: for all $v \in K$, $p \in M$ so that $d_{\Pb}\left(\xi^{(1)}(v^+), p\right) \leq \delta$, and $X \in p$ non-zero, we have \begin{equation}\label{eqn:obs1} \frac{1}{A} \frac{\norm{P_{3,v}(X)}_{2}}{\norm{P_{1,v}(X)}_{2}}\leq d_{\Pb}\left( p, \xi^{(m)}(v^+) \right) \leq A \frac{\norm{P_{3,v}(X)}_{2}}{\norm{P_{1,v}(X)}_{2}} \end{equation} and \begin{equation}\label{eqn:obs2} \frac{1}{A} \frac{\norm{P_{2,v}(X)}_{2}}{\norm{P_{1,v}(X)}_{2}}\leq d_{\Pb}\left(p, \xi^{(1)}(v^+)\right) \leq A \frac{\norm{P_{2,v}(X)}_{2}+\norm{P_{3,v}(X)}_{2}}{\norm{P_{1,v}(X)}_{2}}. \end{equation} \end{observation} Using Observations \ref{obs:C} and \ref{obs:delta1}, we will now prove Lemma \ref{thm:optimal_contraction}. \begin{proof}[Proof of Lemma \ref{thm:optimal_contraction} ] Let $\delta>0$ be sufficiently small so that Observation \ref{obs:delta1} holds. Using (3) of Observation~\ref{obs:compact} and possibly decreasing $\delta > 0$ we may also assume that for all $v \in K$ and $p \in M \setminus \{\xi^{(1)}(x^+)\}$ satisfying $d_{\Pb}\left(\xi^{(1)}(v^+), p\right) \leq \delta$, there is some $t>0$ so that $(\phi_t (v), p) \in P(M)$. Elementary considerations imply that it is sufficient to prove Lemma \ref{thm:optimal_contraction} for all $x \in \partial_\infty \Gamma$ and $p\in M \setminus \{\xi^{(1)}(x)\}$ so that $d_{\Pb}\left(\xi^{(1)}(x), p\right) \leq \delta$. By the assumptions on $K$, there exist some $v \in K$ such that $v^+ = x$. Further, by our choice of $\delta$, there exists $t > 0$ such that $(\phi_t(v),p) \in P(M)$. For any non-zero $X \in p$ and for $i=1,2,3$, let \begin{align*} X_i := \frac{P_{i,v}(X)}{\norm{P_{i,v}(X)}_v}\in S_i(v). \end{align*} By \eqref{eqn:C2}, \eqref{eqn:C3}, and \eqref{eqn:obs1}, \begin{align} d_{\Pb}\left( p, \xi^{(m)}(x) \right) &\leq A \frac{\norm{P_{3,v}(X)}_{2}}{\norm{P_{1,v}(X)}_{2}} \nonumber \\ &\leq AC^3 \frac{\norm{P_{3,v}(X)}_{v}}{\norm{P_{1,v}(X)}_{v}} \frac{\norm{P_{1,v}(X)}_{\phi_t(v)}}{\norm{P_{3,v}(X)}_{\phi_t(v)}} \label{eqn:1AC3}\\ &= AC^3 \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_3}_{\phi_t(v)}}\nonumber \end{align} Repeating a similar argument, but with \eqref{eqn:obs2} in place of \eqref{eqn:obs1}, proves \begin{align} d_{\Pb}\left( p, \xi^{(1)}(x)\right) \geq \frac{1}{AC^3} \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}}.\label{eqn:1AC3-} \end{align} Finally, since $0<\alpha<\alpha^m(\rho)$, Observation \ref{obs:B0} and \eqref{eqn:1AC3} gives \begin{align*} d_{\Pb}\left( p, \xi^{(m)}(x) \right) \leq A BC^3 \left( \frac{\norm{X_1}_{\phi_t(v)}}{\norm{X_2}_{\phi_t(v)}}\right)^{\alpha}. \end{align*} Combining this with \eqref{eqn:1AC3-} yields \begin{align*} d_{\Pb}\left( p, \xi^{(m)}(x) \right) \leq D d_{\Pb}\left( p, \xi^{(1)}(x)\right)^{\alpha} \end{align*} where $D := A^{1+\alpha}B C^{3+3\alpha}$. \end{proof} \subsection{Proof of Theorem \ref{thm:main_body}}\label{sec:proof_of_main_thm} We now use Lemma \ref{thm:optimal_contraction} to prove Theorem \ref{thm:main_body}. Again, suppose that $\rho$ is $(1,m)$-Anosov for some $m=2,\dots,d-1$. We begin by making the following simple observation. Fix a hyperplane $\Hc\subset\Rb^d$ and a $(d-m)$-dimensional subspace $\Vc\subset \Hc$. Then consider the affine chart $\Ab_{\Hc}:=\Pb(\Rb^d)\setminus[\Hc]$ of $\Pb(\Rb^d)$. Recall that $[\Hc]$ denotes the projectivization of $\Hc$, see \eqref{eqn:projectivization}. For any $m$-dimensional subspace $\Uc\subset\Rb^d$ that is transverse to $\Vc$, let \[\Pi_{\Uc,\Vc}:\Ab_{\Hc}\to [\Uc]\cap\Ab_{\Hc}\] be the projection given by $[X]\mapsto [U_X]$, where $X=U_X+V_X$ with $U_X\setminus\{0\}\in \Uc$ and $V_X\in \Vc$. Observe that the fibers of $\Pi_{\Uc,\Vc}$ are of the form $[\Rb\cdot X+\Vc]\cap\Ab_{\Hc}$ for some $X\in\Rb\setminus \Hc$. In particular, the fibers of $\Pi_{\Uc,\Vc}$ do not depend on $\Uc$, i.e. if $\Uc'\subset\Rb^d$ is another $m$-dimensional subspace of $\Rb^d$ that is transverse to $\Vc$, then the fibers of $\Pi_{\Uc,\Vc}$ and the fibers of $\Pi_{\Uc',\Vc}$ agree. Now, fix $y\in\partial_\infty\Gamma$. We will specialize the observation in the previous paragraph to the case where $\Hc=\xi^{(d-1)}(y)$ and $\Vc=\xi^{(d-m)}(y)$. This yields the following statement, which we record as an observation. \begin{observation}\label{obs:parallel} Let $\Ab_y :=\Ab_{\xi^{(d-1)}(y)}$. If $x\in\partial_\infty\Gamma\setminus\{y\}$, then \begin{align*} \Pi_{x,y}:=\Pi_{\xi^{(m)}(x),\xi^{(d-m)}(y)} : \Ab_y\rightarrow \xi^{(m)}(z)\cap\Ab_y \end{align*} is a projection whose fibers do not depend on $x$. \end{observation} Since $M$ is $\rho$-controlled, $M\setminus\{\xi^{(1)}(y)\}\subset\Ab_y$, so we may define \[F_{x,y}:=\Pi_{x,y}|_{M\setminus\{\xi^{(1)}(y)\}}.\] \begin{lemma}\label{lem:homeo} If $x\in\partial_\infty\Gamma\setminus\{y\}$, then the map \begin{align*} F_{x,y}:M\setminus\{\xi^{(1)}(y)\} \rightarrow \xi^{(m)}(x) \cap \Ab_y. \end{align*} is a homeomorphism. \end{lemma} \begin{remark} Lemma \ref{lem:homeo} implies that $M\setminus\{\xi^{(1)}(y)\}$ can be viewed as the graph of a map from \[\xi^{(m)}(x) \cap \Ab_y\,\,\,\text{ to }\,\,\,\Pi_{x,y}^{-1}(\xi^{(1)}(x))=\left(\xi^{(1)}(x)+\xi^{(d-m)}(y)\right) \cap \Ab_y.\] In particular, $M\setminus\{\xi^{(1)}(y)\}$ is diffeomorphic to $\Rb^{m-1}$. \end{remark} The proof of Lemma \ref{lem:homeo} requires a basic result from topology. \begin{theorem}[The Invariance of Domain Theorem] If $U \subset \Rb^d$ is open and $f: U \rightarrow \Rb^d$ is continuous injective map, then $f(U)$ is open and $f$ induces a homeomorphism $U \rightarrow f(U)$. \end{theorem} \begin{proof}[Proof of Lemma~\ref{lem:homeo}] We first observe that the map $F_{x,y}$ is injective. If $p_1, p_2 \in M \setminus \{\xi^{(1)}(y)\}$ and $F_{x,y}(p_1) = F_{x,y}(p_2)$, then \begin{align*} p_1 + \xi^{(d-m)}(y) = p_2 + \xi^{(d-m)}(y), \end{align*} so $p_1 + p_2 + \xi^{(d-m)}(y)$ is not direct. The assumption that $M$ is $m$-hypercovex implies that $p_1=p_2$. Since $F_{x,y}$ is continuous and injective, we can now apply the invariance of domain theorem to deduce that $F_{x,y}$ is a homeomorphism onto an open set in $I(x,y)$ in $\xi^{(m)}(x)\cap \Ab_y$. To finish the proof, we now need to show that \begin{align}\label{eqn:Izy} I(x,y) = \xi^{(m)}(x) \cap \Ab_y. \end{align} Suppose $\gamma \in \Gamma$ has infinite order, and denote its attracting and repelling fixed points in $\partial_\infty \Gamma$ by $\gamma^+$ and $\gamma^-$ respectively. Note that $\rho(\gamma)\cdot I(\gamma^+, \gamma^-) = I(\gamma^+,\gamma^-)$ and $\xi^{(1)}(\gamma^+)\in I(\gamma^+,\gamma^-)$. Since $\rho$ is $1$-Anosov, \begin{align*} \xi^{(m)}(\gamma^+)\cap \Ab_{\gamma^-} = \bigcup_{n \in \Nb}\rho(\gamma)^{-n}\cdot \Oc \end{align*} for any open set $\Oc \subset\xi^{(m)}(\gamma^+) \cap \Ab_{\gamma^-}$ containing $\xi^{(1)}(\gamma^+)$. Hence \begin{align*} I(\gamma^+, \gamma^-) = \bigcup_{n \in \Nb} \rho(\gamma)^{-n}\cdot I(\gamma^+, \gamma^-) = \xi^{(m)}(\gamma^+) \cap \Ab_{\gamma^-}. \end{align*} The density of $\{ (\gamma^+, \gamma^-) : \gamma \in \Gamma \text{ has infinite order} \}$ in $\partial_\infty \Gamma \times \partial_\infty \Gamma$ proves \eqref{eqn:Izy}. \end{proof} With Lemma \ref{lem:homeo}, we can now proceed to the proof of Theorem \ref{thm:main_body}. \begin{proof}[Proof of Theorem \ref{thm:main_body}] Fix $y\in\partial_\infty\Gamma$, and as before, consider the affine chart \[\Ab_y:=\Pb(\Rb^d) \setminus \xi^{(d-1)}(y).\] By working in some particular affine coordinates in the affine chart $\Ab_y$, we will show that Theorem \ref{thm:main_body} holds for all $x\in \partial_\infty\Gamma\setminus\{y\}$. Since $y$ was chosen arbitrarily, this suffices to prove the theorem. Let $x \in \partial_\infty \Gamma\setminus\{y\}$ and choose affine coordinates $\Ab_y\simeq\Rb^{d-1}$ so that in these coordinates, \begin{itemize} \item $\xi^{(1)}(x) = 0$, \item $\xi^{(m)}(x)\cap\Ab_y = \Rb^{m-1} \times \{0\}$, \item $\left(\xi^{(1)}(x)+ \xi^{(d-m)}(y) \right)\cap\Ab_y = \{0\} \times \Rb^{d-m}$. \end{itemize} For any $z\in\partial_\infty\Gamma$ sufficiently close to $x$, there exists a unique affine map \begin{align*} A_z : \Rb^{m-1} \times \{0\} \rightarrow \{0\} \times \Rb^{d-m} \end{align*} whose graph is $H_z:=\xi^{(m)}(z)\cap\Ab_y $, i.e. \begin{align*} H_z = \left\{ u+A_z(u) : u \in \Rb^{m-1} \times \{0\} \right\}, \end{align*} see Figure \ref{fig:graph}. Let $L_z: \Rb^{m-1}\times\{0\} \rightarrow \{0\}\times\Rb^{d-m}$ denote the linear part of $A_z$ (in our choice of affine coordinates). Note that the maps $z \mapsto A_z$ and $z \mapsto L_z$ are continuous. \begin{figure} \caption{$M$ in the affine chart $\Ab_y$.} \label{fig:graph} \end{figure} For any $z\in\partial_\infty\Gamma\setminus\{y\}$, Observation \ref{obs:parallel} implies that $\Pi_{z,y}^{-1}\left(\xi^{(1)}(z)\right)$ is parallel to $\Pi_{x,y}^{-1}\left(\xi^{(1)}(x)\right)=\{0\}\times\Rb^{d-m}$ in $\Ab_y$. Thus, as a consequence of Lemma~\ref{lem:homeo}, there exists a map \begin{align*} f_z : H_z \rightarrow \{0\} \times \Rb^{d-m} \end{align*} whose graph is $\xi^{(1)}(\partial_\infty \Gamma \setminus \{y\} )$, i.e. \begin{align*} \xi^{(1)}(\partial_\infty \Gamma \setminus \{y\} )= \{ u + f_z(u) : u \in H_z\}. \end{align*} Further, Theorem~\ref{thm:optimal_contraction} implies that for all $\alpha$ satisfying $1\leq\alpha <\alpha^m(\rho)$ and all $\xi^{(1)}(z)+ h \in H_z$, we have \begin{align*} f_z\left(\xi^{(1)}(z)+h\right) = { \rm o}(\norm{h}^\alpha). \end{align*} Now, for any $u \in \Rb^{m-1} \times \{0\}$ and $z\in\partial_\infty\Gamma\setminus\{y\}$, \begin{align*} u + f_{x}(u) = \Big( u+ A_z(u) \Big) + f_z\Big( u+ A_z(u) \Big). \end{align*} Also, if $u_z:= \Pi_{x, y}(\xi^{(1)}(z))$ then $u_z+ A_z(u_z) = \xi^{(1)}(z)$, which means that $f_{x}(u_z) = A_z(u_z)$. Thus, for all $h\in\Rb^{m-1}\times\{0\}$, \begin{align*} f_{x}(u_z+h) &= A_z(u_z+h) + f_z\Big( u_z+h+ A_z(u_z+h) \Big) \\ & = A_z(u_z)+L_z(h) + f_z\Big( u_z+h+ A_z(u_z)+L_z(h) \Big) \\ &= f_{x}(u_z) + L_z(h) + f_z\Big(\xi^{(1)}(z)+h+L_z(h) \Big) \\ & = f_{x}(u_z) +L_z(h)+ { \rm o}(\norm{h+L_z(h)}^{\alpha}). \\ & = f_{x}(u_z) +L_z(h)+ { \rm o}(\norm{h}^{\alpha}). \end{align*} This proves the theorem. \end{proof} \section{Eigenvalue description of $\alpha^m(\rho)$}\label{sec:optimal} For the rest of this section, let $\rho:\Gamma\to\PGL(d,\Rb)$ be a $(1,m)$-Anosov representation. Recall that in the introduction, we defined \begin{align*} \alpha_m(\rho) := \inf_{\gamma\in\Gamma}\left\{\log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)): \frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)) \neq 1 \right\}. \end{align*} The main result of this section is the following theorem. \begin{theorem}\label{thm:alphas} If $\rho$ is irreducible, then \begin{align*} \alpha_m(\rho) = \alpha^m(\rho), \end{align*} where $\alpha^m(\rho)$ is the quantity defined by \eqref{eqn:alpham1}. \end{theorem} The proof of Theorem \ref{thm:alphas} will be given in the following two subsections. In the first, we will use general properties of singular values to relate the quantity $f(v,t)$ (the function $f$ was defined by \eqref{eqn:f}) to the ratios of eigenvalues of $\rho(\gamma)$ when $v^\pm=\gamma^\pm$. In the second, we will use a deep result due to Benoist to finish the proof. Before starting the proof we make several reductions. First, $\alpha_m(\rho)$ and $\alpha^m(\rho)$ are invariant under passing to a finite index subgroup (see Remark~\ref{rem:stablility}). So by Remark \ref{rem:lift}, we can assume that $\rho$ admits a lift $\overline{\rho}:\Gamma\to\SL_d(\Rb)$. Then, by passing to another finite index subgroup, we may also assume that the Zariski closure of $\rho(\Gamma)$ is connected. By Proposition~\ref{prop:strongly_irreducible} this representation is still irreducible. \subsection{Singular values along closed orbits} Let $E:=\wt{U\Gamma}\times\Rb^d$, and for $i=1,2,3$, let $E_i$ be the $\Gamma$-invariant sub-bundle of $E$ defined by \eqref{eqn:E}. (Recall that the $\Gamma$-action on $E$ is given by $\gamma\cdot (v,X)=(\gamma\cdot v,\overline{\rho}(\gamma)\cdot X)$.) Also, choose a $\Gamma$-invariant inner product $\langle\cdot,\cdot\rangle$ on $E$ so that $E=E_1\oplus E_2\oplus E_3$ is an orthogonal splitting. We may assume that the norm $\norm{\cdot}$ used in the definition of $\alpha^m(\rho)$ is given by $\norm{\cdot}_v=\sqrt{\langle\cdot,\cdot\rangle_v}$ for all $v\in \widetilde{U\Gamma}$. For any $v,w\in \widetilde{U\Gamma}$, let $\sigma_i(v,w)$ denote the $i$-th singular value of \[\id=\id_{v,w}:(\Rb^d,\norm{\cdot}_v)\to(\Rb^d,\norm{\cdot}_w),\] and for $(v,t)\in \widetilde{U\Gamma}\times\Rb$, denote $\sigma_i(v,t):=\sigma_i(v,\phi_t(v))$. Using this, define the fuction $h:\widetilde{U\Gamma}\times\Rb\to\Rb$ by \[h(v,t):=\log\frac{\sigma_{d-m}(v,t)}{\sigma_d(v,t)}\Bigg/\log\frac{\sigma_{d-m+1}(v,t)}{\sigma_{d}(v,t)}.\] The functions $h$ and $f$ (recall that $f$ is defined by \eqref{eqn:f}) are related by the following lemma. \begin{lemma} \label{lem:f and g} For all $v\in \widetilde{U\Gamma}$ and for sufficiently large $t$, we have \[f(v,t)=h(v,t).\] In particular, $\displaystyle\alpha^m(\rho)=\liminf_{t\to\infty}\inf_{v\in \widetilde{U\Gamma}}h(v,t)$. \end{lemma} \begin{proof} Since $E=E_1\oplus E_2\oplus E_3$ is an orthogonal splitting, Theorem \ref{prop:dom_split} implies that for all $v\in \widetilde{U\Gamma}$ and for sufficiently large $t$, \begin{itemize} \item $\sigma_d(v,t)=\norm{X}_{\phi_t(v)}$ for all $X\in S_1(v)$, \item $\sigma_{d-m+1}(v,t)=\sup_{X\in S_2(v)} \norm{X}_{\phi_t(v)}$, \item $\sigma_{d-m}(v,t)=\inf_{X\in S_3(v)} \norm{X}_{\phi_t(v)}$. \end{itemize} Thus, \begin{eqnarray*} h(v,t)&=&\log\frac{\sigma_{d-m}(v,t)}{\sigma_d(v,t)}\Bigg/\log\frac{\sigma_{d-m+1}(v,t)}{\sigma_{d}(v,t)}\\ &=&\log\frac{\inf_{X\in S_3(v)} \norm{X}_{\phi_t(v)}}{\sup_{X\in S_1(v)}\norm{X}_{\phi_t(v)}}\Bigg/\log\frac{\sup_{X\in S_2(v)} \norm{X}_{\phi_t(v)}}{\inf_{X\in S_1(v)}\norm{X}_{\phi_t(v)}}\\ &=&\inf_{X_i\in S_i(v)}\left\{\log\frac{\norm{X_3}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\Bigg/\log\frac{\norm{X_2}_{\phi_t(v)}}{\norm{X_1}_{\phi_t(v)}}\right\}\\ &=&f(v,t) \end{eqnarray*} Notice that in the third equality we used the fact that $\dim E_1(v) = 1$. \end{proof} The following observation gives a simple but important bound for ratios of singular values. The proof is a straightforward calculation which we omit. \begin{observation}\label{prop:easy comp} Suppose that for $i=1,\dots,4$, $\norm{\cdot}_{(i)}$ are norms on $\Rb^d$ so that for all $X\in\Rb^d$, $\frac{1}{A}\leq\frac{\norm{X}_{(1)}}{\norm{X}_{(2)}}\leq A$ and $\frac{1}{A'}\leq\frac{\norm{X}_{(3)}}{\norm{X}_{(4)}}\leq A'$ for some $A,A'>1$. Let $L:\left(\Rb^d,\norm{\cdot}_{(1)}\right)\to\left(\Rb^d,\norm{\cdot}_{(3)}\right)$ and $L':\left(\Rb^d,\norm{\cdot}_{(2)}\right)\to\left(\Rb^d,\norm{\cdot}_{(4)}\right)$ denote the identity maps. Then \[\frac{1}{AA'}\leq\frac{\sigma_i(L)}{\sigma_i(L')}\leq AA'.\] \end{observation} The next lemma relates the function $h$ to the eigenvalues of $\rho(\gamma)$. \begin{lemma}\label{lem:g and eig} Let $\gamma\in\Gamma\setminus\{\id\}$ be an infinite order element, and let $v=(v^+,v^-,v_0)\in \widetilde{U\Gamma}$ so that $v^\pm=\gamma^\pm$. Then \[\lim_{t\to\infty}h(v,t)=\log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)).\] \end{lemma} \begin{proof} Let $T$ denote the period of $\gamma$ (see Section \ref{sec:flowspace}). For all $k \in \Zb^+$ and $X \in \Rb^d$ \[\norm{X}_{\phi_{kT}(v)}=\norm{X}_{\gamma^k\cdot v}=\norm{\overline{\rho}(\gamma^{-k})\cdot X}_v.\] Hence, the singular values of the two linear maps \[\id:(\Rb^d,\norm{\cdot}_v)\to(\Rb^d,\norm{\cdot}_{\phi_{kT}(v)})\,\,\,\text{ and } \overline{\rho}(\gamma^{-k}):(\Rb^d,\norm{\cdot}_v)\to(\Rb^d,\norm{\cdot}_v)\,\,\,\] agree. It is a straightforward calculation to show that for any inner product $\langle\cdot,\cdot\rangle$ on $\Rb^d$ and any invertible linear map $\overline{g}:(\Rb^d,\langle\cdot,\cdot\rangle)\to(\Rb^d,\langle\cdot,\cdot\rangle)$, \begin{align}\label{eqn:Ben} \lim_{k\to\infty}\frac{1}{k}\log\sigma_i(\overline{g}^k)=\log\lambda_i(\overline{g}). \end{align} Thus, we can deduce that \begin{equation}\label{eqn:Benoist} \lim_{k\to\infty}\sigma_i(v,kT)^{\frac{1}{k}}=\lim_{k\to\infty}\sigma_i(\overline{\rho}(\gamma^{-k}))^{\frac{1}{k}}=|\lambda_i(\overline{\rho}(\gamma^{-1}))|=\frac{1}{|\lambda_{d+1-i}(\overline{\rho}(\gamma))|},\end{equation} which implies that \begin{eqnarray}\label{eqn:period contraction} \lim_{k\to\infty}h(v,kT)&=&\lim_{k\to\infty}\left(\log\frac{\sigma_{d-m}(v,kT)}{\sigma_d(v,kT)}\Bigg/\log\frac{\sigma_{d-m+1}(v,kT)}{\sigma_{d}(v,kT)}\right)\\ &=&\log\frac{\lambda_1}{\lambda_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\lambda_1}{\lambda_{m}}(\rho(\gamma)).\nonumber \end{eqnarray} For any $t>0$, let $k\in\Zb^+$ so that $t\in[kT,(k+1)T)$. Then Lemma \ref{lem:weak flow} implies that there are constants $C\geq 1$ and $\beta\geq 0$ so that \[\frac{1}{C}e^{-\beta T}\leq\frac{\norm{X}_{\phi_tv}}{\norm{X}_{\phi_{kT}v}}\leq Ce^{\beta T}\] for all $t \in \Rb$ and $X\in\Rb^d$. This, together with Observation \ref{prop:easy comp}, implies that for all $i=1,\dots,d$, \[\frac{1}{C}e^{-\beta T}\leq\frac{\sigma_i(v,kT)}{\sigma_i(v,t)}\leq Ce^{\beta T}.\] Also, since $\rho$ is $(1,m)$-Anosov, we know that \[\lim_{k\to\infty}\log\frac{\sigma_{d-m}(v,kT)}{\sigma_d(v,kT)}=\infty=\lim_{k\to\infty}\log\frac{\sigma_{d-m+1}(v,kT)}{\sigma_{d}(v,kT)}.\] Hence, \begin{eqnarray*} \limsup_{t\to\infty}h(v,t)&=&\limsup_{t\to\infty}\log\frac{\sigma_{d-m}(v,t)}{\sigma_d(v,t)}\Bigg/\log\frac{\sigma_{d-m+1}(v,t)}{\sigma_{d}(v,t)}\\ &\leq&\limsup_{k\to\infty}\frac{\displaystyle2\log C+2\beta T+\log\frac{\sigma_{d-m}(v,kT)}{\sigma_d(v,kT)}}{\displaystyle-2\log C-2\beta T+\log\frac{\sigma_{d-m+1}(v,kT)}{\sigma_{d}(v,kT)}}\\ &=&\lim_{k\to\infty}h(v,kT). \end{eqnarray*} By a similar argument, $\displaystyle\liminf_{t\to\infty}h(v,t)\geq\lim_{k\to\infty}h(v,kT)$, so $\displaystyle\lim_{t\to\infty}h(v,t)=\lim_{k\to\infty}h(v,kT)$. This, together with \eqref{eqn:period contraction} implies the lemma. \end{proof} \subsection{Asymptotic cones and eigenvalues}\label{sec:cones} Recall that $\lambda,\mu:\GL_d(\Rb)\to\Rb^d$ respectively denote the Jordan and Cartan projections defined in Section \ref{sec:properties}. For any subgroup $G \leq \SL_d(\Rb)$, let $\Cc_\lambda(G) \subset \Rb^d$ denote the smallest closed cone containing $\lambda(G)$, that is \begin{align*} \Cc_\lambda(G) := \overline{\bigcup_{\overline{g} \in G} \Rb_{>0} \cdot \lambda(\overline{g})}. \end{align*} Also, let $\Cc_\mu(G)$ denote the \emph{asymptotic cone of $\mu(G)$}, that is \begin{align*} \Cc_\mu(G) := \{ x \in \Rb^d : \exists \overline{g_n} \in G, \exists t_n \searrow 0, \text{ with } \lim_{n \rightarrow \infty} t_n \mu(\overline{g_n}) =x\}. \end{align*} A deep result of Benoist~\cite{B1997} implies the following. \begin{theorem}\label{thm:cones} If $G \leq \SL_d(\Rb)$ is a connected semisimple real algebraic subgroup which acts irreducibly on $\Rb^d$ and $\Lambda \leq G$ is a Zariski dense subgroup, then \begin{align*} \Cc_\mu(\Lambda) = \Cc_\lambda(\Lambda). \end{align*} \end{theorem} \begin{remark} Notice that for any subgroup $\Lambda\subset \SL_d(\Rb)$, the fact that $\Cc_\lambda(\Lambda) \subset \Cc_\mu(\Lambda)$ is a consequence of \eqref{eqn:Ben}. \end{remark} A proof of Theorem \ref{thm:cones} is given in the appendix. Theorem \ref{thm:cones} can be used to prove the following lemma. \begin{lemma} \label{lem:preliminary-inequality} For any $\epsilon > 0$ there exists $R > 0$ such that \begin{align*} \alpha_m(\rho)-\epsilon < \log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma)) \end{align*} for all $\gamma \in \Gamma$ with $\norm{\mu(\overline{\rho}(\gamma))}_2\geq R$. \end{lemma} \begin{proof} By Proposition~\ref{prop:Zclosure} and Theorem~\ref{thm:cones}, $\Cc_\mu(\overline{\rho}(\Gamma)) = \Cc_\lambda(\overline{\rho}(\Gamma))$. Fix $\epsilon > 0$ and suppose for contradiction that there exists a sequence $\{\gamma_n\}_{n=1}^\infty \subset \Gamma$ such that for all $n$, $\norm{\mu(\overline{\rho}(\gamma_n))}_2\geq n$ and \begin{align*} \alpha_m(\rho)-\epsilon \geq \log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma_n))\Bigg/\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma_n)). \end{align*} By passing to a subsequence we can suppose that \begin{align*} \frac{1}{\norm{\mu(\overline{\rho}(\gamma_n))}_2} \mu(\overline{\rho}(\gamma_n)) \rightarrow x =(x_1,\dots,x_d)\in \Cc_\mu(\overline{\rho}(\Gamma)) = \Cc_\lambda(\overline{\rho}(\Gamma)). \end{align*} It follows that $\alpha_m(\rho)-\epsilon \geq\frac{x_1-x_{m+1}}{x_1-x_m}$. On the other hand, the definition of $\alpha_m(\rho)$ and $\Cc_\lambda(\overline{\rho}(\Gamma))$, implies that \[\alpha_m(\rho) \leq \frac{x_1-x_{m+1}}{x_1-x_m},\] which is a contradiction. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:alphas}] It is clear from Lemma \ref{lem:f and g} and Lemma \ref{lem:g and eig} that $\alpha^m(\rho)\leq\alpha_m(\rho)$. We will now prove $\alpha^m(\rho)\geq\alpha_m(\rho)$. Let $K\subset \widetilde{U\Gamma}$ be a compact fundamental domain for the $\Gamma$-action on $\widetilde{U\Gamma}$. Since $h(v,t)=h(\gamma\cdot v,t)$ for all $\gamma\in\Gamma$ and all $v\in\widetilde{U\Gamma}$, by Lemma \ref{lem:f and g}, it is enough to show that \begin{align*} \alpha_m(\rho) \leq \liminf_{t\to\infty}\inf_{v\in K}h(v,t) \end{align*} Fix $C > 1$ such that $\frac{1}{C} \norm{X}_{2} \leq \norm{X}_{v} \leq C \norm{X}_{2}$ for all $v\in K$ and $X \in \Rb^d$. By Lemma \ref{lem:preliminary-inequality}, there exists, for every $\epsilon>0$, a positive number $R' > 0$ such that \begin{align*} \alpha_m(\rho)-\epsilon < \log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma))\Bigg/\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma)) \end{align*} for all $\gamma \in \Gamma$ with $\norm{\mu(\overline{\rho}(\gamma))}_2\geq R'$. Since $\rho$ is $1$-Anosov and \begin{align*} \log\frac{\mu_1}{\mu_{2}}(\rho(\gamma)) \leq \log\frac{\mu_1}{\mu_{k}}(\rho(\gamma)) \end{align*} for $k > 1$, Theorem~\ref{thm:SV_char_of_Anosov} and Corollary~\ref{thm:QI_Anosov} together imply that \[\log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma))\,,\,\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma))\geq \frac{1}{A''}\norm{\mu(\overline{\rho}(\gamma))}_2-B''\] for some $A''\geq 1$ and $B''\geq 0$. Hence, there exists $R \geq R'$ such that \begin{align*} \alpha_m(\rho)-2\epsilon < \left(\log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma))-4\log C\right)\bigg/\left(\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma))+4\log C\right) \end{align*} for all $\gamma \in \Gamma$ with $\norm{\mu(\overline{\rho}(\gamma))}_2\geq R$. Let $d=d_{\widetilde{U\Gamma}}$ denote the $\Gamma$-invariant metric on $\widetilde{U\Gamma}$ specified in Section \ref{sec:flowspace}, and let $D$ be the diameter of $K$. By Corollary~\ref{thm:QI_Anosov} and the fact that any $\Gamma$-orbit in $\widetilde{U\Gamma}$ is a quasi-isometry, there exists $A \geq 1$ and $B \geq 0$ such that \begin{align*} \frac{1}{A} \norm{\mu(\overline{\rho}(\gamma))}_2 - B \leq d(v, \gamma \cdot v) \leq A \norm{\mu(\overline{\rho}(\gamma))}_2 + B \end{align*} for all $v \in \wt{U\Gamma}$. Also, since every $\phi_t$-orbit in $\widetilde{U\Gamma}$ is a quasi-isometric embedding, there exists $A'\geq1$ and $B'\geq0$ so that , \begin{align*} \frac{1}{A'} |t|- B' \leq d(v,\phi_t(v))\leq A' |t| + B' \end{align*} for all $t\in\Rb$ and $v \in \wt{U\Gamma}$. Fix $t > A'(B'+D + AR + B)$ and $v \in K$. Let $\gamma \in \Gamma$ such that $\gamma^{-1}\cdot\phi_t (v) \in K$. By the definition of $C$, we see that for any $X\in\Rb^d$, \[\frac{1}{C}\leq\frac{\norm{X}_v}{\norm{X}_{2}}, \frac{\norm{\overline{\rho}(\gamma)^{-1}\cdot X}_{\gamma^{-1}\cdot \phi_t(v)}}{\norm{\overline{\rho}(\gamma)^{-1}\cdot X}_{2}} \leq C.\] Since $ \norm{\overline{\rho}(\gamma)^{-1}\cdot X}_{\gamma^{-1}\cdot \phi_t (v)}=\norm{X}_{\phi_t (v)} $ and $X\mapsto\norm{\overline{\rho}(\gamma)^{-1}\cdot X}_{2}$ are both norms on $\Rb^d$, it follows from Proposition \ref{prop:easy comp} that \[\frac{1}{C^2}\frac{1}{\mu_{d+1-i}(\overline{\rho}(\gamma))}=\frac{1}{C^2}\mu_{i}(\overline{\rho}(\gamma)^{-1})\leq\sigma_i(v,t)\leq C^2\mu_{i}(\overline{\rho}(\gamma)^{-1})= C^2\frac{1}{\mu_{d+1-i}(\overline{\rho}(\gamma))}.\] Also, $d(\gamma\cdot v,v)\geq d(v,\phi_t(v))-d( \phi_t (v), \gamma\cdot v) \geq \frac{1}{A'}t-B'-D$, which means \begin{align*} \norm{\mu(\overline{\rho}(\gamma))}_2 \geq \frac{1}{A} \left(d(\gamma\cdot v, v) - B\right) \geq \frac{\frac{1}{A'}t-B'-D-B}{A} \geq R. \end{align*} Hence, \begin{align*} h(v,t) &= \log\frac{\sigma_{d-m}(v,t)}{\sigma_d(v,t)}\Bigg/\log\frac{\sigma_{d-m+1}(v,t)}{\sigma_{d}(v,t)} \\ & \geq \left(\log\frac{\mu_1}{\mu_{m+1}}(\rho(\gamma))-4\log C\right)\bigg/\left(\log\frac{\mu_1}{\mu_{m}}(\rho(\gamma))+4\log C\right)\\ & > \alpha_m(\rho)-2\epsilon. \end{align*} Since $v \in K$ and $t > A'(B'+D + AR+ B)$ was arbitrary, \begin{align*} \alpha_m(\rho)-2\epsilon \leq \liminf_{t\to\infty}\inf_{v\in K}h(v,t). \end{align*} Then since $\epsilon > 0$ was also arbitrary we see that \begin{equation*} \alpha_m(\rho) \leq \liminf_{t\to\infty}\inf_{v\in K}h(v,t). \qedhere \end{equation*} \end{proof} \section{Optimal regularity}\label{sec:regularity} In this section we prove Theorem~\ref{thm:regularity} and the second part of Theorem~\ref{thm:regularity2}. By Example \ref{eg:limitset} and \ref{eg:convex}, it is sufficient to prove the following theorem. \begin{theorem} \label{thm:regularity_body}Suppose that $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible, $(1,m)$-Anosov representation for some $m=2,\dots,d-1$, and suppose that $M\subset\Pb(\Rb^d)$ is a $\rho$-controlled, $m$-hyperconvex, topological $(m-1)$-dimensional submanifold. Then \begin{align*} \alpha_m(\rho)\leq \sup\left\{ \alpha \in (1,2) : M \text{ is } C^{\alpha} \text{ along }\xi^{(1)}(\partial_\infty \Gamma) \right\} \end{align*} with equality if \begin{itemize} \item[($\ast$)] $M \cap \left(p_1 + p_2 + \xi^{(d-m)}(y)\right)$ spans $p_1 + p_2 + \xi^{(d-m)}(y)$ for all pairwise distinct $p_1,p_2,\xi^{(1)}(y)\in M$. \end{itemize} \end{theorem} As mentioned in the introduction (see (2) of Remark \ref{rem:stablility}), the condition ($\ast$) is trivial when $m=2$ and $m=d-1$. In Section \ref{sec:stability}, we show that when $M=\xi^{(1)}(\partial_\infty\Gamma)$, ($\ast$) is an open condition in $\Hom(\Gamma,\PSL_d(\Rb))$. Then, in Section \ref{sec:proof_regularity_body}, we prove Theorem \ref{thm:regularity_body}. \subsection{Stability of hypotheses}\label{sec:stability} To show that ($\ast$) is an open condition when $M=\xi^{(1)}(\partial_\infty\Gamma)$, we use the following two statements. The first is a standard fact about hyperbolic groups. \begin{proposition}\label{prop:3_point_action} The $\Gamma$-action on $\partial_\infty \Gamma^{(3)}: = \{ (x,y,z) \in \partial_\infty \Gamma^3 : x,y,z \text{ distinct} \}$ is co-compact. \end{proposition} The second is a well-known result about Anosov representations due to Guichard-Wienhard. In the case when $\Gamma$ is the fundamental group of a negatively curved Riemannian manifold, this result was established by Labourie~\cite[Proposition 2.1]{L2006}. Before stating the result we need some notation: If $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is a $k$-Anosov representation, let $\xi_\rho^{(k)}:\partial_\infty\Gamma\to\Gr_k(\Rb^d)$ denote the $k$-flag map of $\rho$. \begin{theorem}\label{thm:gwstable} \cite[Theorem 5.13]{GW2012}\label{thm:continuous_limit_curve} Let \begin{align*} \Oc_k : = \{ \rho \in \Hom(\Gamma, \PGL_d(\Rb)) : \rho \text{ is $k$-Anosov} \}. \end{align*} Then $\Oc_k$ is open, and the map \begin{align*} \rho \in \Oc_k \rightarrow \xi^{(k)}_{\rho} \in C\left( \partial_\infty \Gamma, \Gr_k(\Rb^d)\right) \end{align*} is continuous. \end{theorem} \begin{corollary} Suppose $\partial_\infty \Gamma$ is a topological $(m-1)$-manifold, and $\rho_0: \Gamma \rightarrow \PGL_{d}(\Rb)$ is a $(1,m)$-Anosov representation. If $\xi^{(1)}_{\rho_0}(x) + \xi^{(1)}_{\rho_0}(z) + \xi^{(d-m)}_{\rho_0}(y)$ is a direct sum and \begin{align*} \xi_{\rho_0}^{(1)}(\partial_\infty \Gamma) \cap \left(\xi_{\rho_0}^{(1)}(x) + \xi_{\rho_0}^{(1)}(z) + \xi_{\rho_0}^{(d-m)}(y)\right) \end{align*} spans $\xi_{\rho_0}^{(1)}(x) + \xi_{\rho_0}^{(1)}(z) + \xi_{\rho_0}^{(d-m)}(y)$ for all $x,y,z \in \partial_\infty \Gamma$ distinct, then any sufficiently small deformation of $\rho_0$ also has these properties. \end{corollary} \begin{proof} It follows easily from Theorem \ref{thm:gwstable} and Proposition~\ref{prop:3_point_action} that there exists a neighborhood $\Oc\subset\Hom(\Gamma, \PGL_d(\Rb))$ of $\rho_0$ with the following property: if $\rho \in \Oc$, then $\rho$ is a $(1,m)$-Anosov representation and $\xi^{(1)}_{\rho}(x) + \xi^{(1)}_{\rho}(z) + \xi^{(d-m)}_{\rho}(y)$ is a direct sum for all $x,y,z \in \partial_\infty \Gamma$ distinct. By Proposition~\ref{prop:3_point_action}, it is enough to fix $(x_0,y_0,z_0) \in \partial_\infty \Gamma^{(3)}$ and prove that there exists a neighborhood $U$ of $(x_0,y_0,z_0)$ in $\partial_\infty \Gamma^{(3)}$ such that \begin{align*} \xi_{\rho}^{(1)}(\partial_\infty \Gamma) \cap \left(\xi_{\rho}^{(1)}(x) + \xi_{\rho}^{(1)}(z) + \xi_{\rho}^{(d-m)}(y)\right) \end{align*} spans $\xi_{\rho}^{(1)}(x) + \xi_{\rho}^{(1)}(z) + \xi_{\rho}^{(d-m)}(y)$ for all $(x,y,z) \in U$ and any $\rho$ that is a sufficiently small deformation of $\rho_0$. Let $e_1,\dots, e_d$ be the standard basis of $\Rb^d$. By changing coordinates we can assume that \begin{align*} \xi^{(1)}_{\rho_0}(x_0) & = \Rb \cdot e_1, \\ \xi^{(m)}_{\rho_0}(x_0) & = \Span\{e_1,\dots,e_m\},\\ \xi^{(d-m)}_{\rho_0}(y_0) &= \Span\{ e_{m+1},\dots, e_d\},\\ \xi^{(d-1)}_{\rho_0}(y_0) &= \Span\{e_2,\dots, e_d\}, \text{ and} \\ \xi^{(1)}_{\rho_0}(z_0) &= \Rb\cdot(e_1+e_2+e_d). \end{align*} Using Theorem~\ref{thm:continuous_limit_curve} and possibly shrinking $\Oc$, we can find a neighborhood $U_0$ of $(x_0,y_0,z_0)$ such that there exists a continuous map \begin{align*} (\rho, (x,y,z)) \in \Oc \times U_0 \rightarrow g_{\rho,(x,y,z)} \in \PGL_d(\Rb) \end{align*} such that $g_{\rho_0,(x_0,y_0,z_0)} =\id$, \begin{align*} g_{\rho,(x,y,z)}\cdot\xi^{(1)}_{\rho}(x) & = \Rb \cdot e_1, \\ g_{\rho,(x,y,z)}\cdot\xi^{(m)}_{\rho}(x) & = \Span\{e_1,\dots,e_m\},\\ g_{\rho,(x,y,z)}\cdot \xi^{(d-m)}_{\rho}(y) &= \Span\{ e_{m+1},\dots, e_d\},\\ g_{\rho,(x,y,z)}\cdot \xi^{(d-1)}_{\rho}(y) &= \Span\{e_2,\dots, e_d\}, \text{ and} \\ g_{\rho,(x,y,z)}\cdot\xi^{(1)}_{\rho}(z) &= \Rb\cdot(e_1+e_2+e_d). \end{align*} By Theorem~\ref{thm:main}, for each $ (\rho, (x,y,z)) \in \Oc \times U_0$, there exists a unique $C^1$ function $f_{\rho, (x,y,z)} : \Rb^{m-1} \rightarrow \Rb^{d-m}$ such that \begin{align*} g_{\rho,(x,y,z)}\cdot\xi^{(1)}_{\rho}( \partial_\infty \Gamma \setminus \{y\}) = \left\{ [1:v:f_{\rho, (x,y,z)}(v)] : v \in \Rb^{m-1} \right\}. \end{align*} Then by Theorem~\ref{thm:continuous_limit_curve}, the map $\Oc \times U_0\to C\left(\Rb^{m-1}, \Rb^{d-m}\right)$ given by \begin{align*} (\rho, (x,y,z)) \mapsto f_{\rho,(x,y,z)} \end{align*} is continuous. Notice that \begin{align*} \xi^{(1)}_{\rho}( \partial_\infty & \Gamma \setminus \{y\}) \cap \left(\xi_{\rho}^{(1)}(x) + \xi_{\rho}^{(1)}(z) + \xi_{\rho}^{(d-m)}(y)\right) \\ & = g_{\rho,(x,y,z)}^{-1}\cdot\left\{ [1:te_2:f_{\rho, (x,y,z)}(te_2)] : t \in \Rb \right\}. \end{align*} So if \begin{align*} [1:t_1e_2:f_{\rho_0, (x_0,y_0,z_0)}(t_1e_2)], \dots, [1:t_{d-m+1}e_2:f_{\rho_0, (x_0,y_0,z_0)}(t_{d-m+1}e_2)] \end{align*} spans $\xi_{\rho_0}^{(1)}(x_0) + \xi_{\rho_0}^{(1)}(y_0) + \xi_{\rho_0}^{(d-m)}(z_0)$, then \begin{align*} g_{\rho,(x,y,z)}^{-1}[1:t_1e_2:f_{\rho, (x,y,z)}(t_1e_2)], \dots, g_{\rho,(x,y,z)}^{-1}[1:t_{d-m+2}e_2:f_{\rho, (x,y,z)}(t_{d-m+2}e_2)] \end{align*} spans $\xi_{\rho}^{(1)}(x) + \xi_{\rho}^{(1)}(y) + \xi_{\rho}^{(d-m)}(z)$ when $(\rho,(x,y,z))$ is sufficiently close to $(\rho_0, (x_0,y_0,z_0))$. \end{proof} \subsection{Proof of Theorem \ref{thm:regularity_body}}\label{sec:proof_regularity_body} We begin with the following observation. Let $e_1,\dots, e_d$ denote the standard basis of $\Rb^d$, and let $\overline{g} \in \GL_{d}(\Rb)$ be a proximal element so that \begin{itemize} \item $e_1$ spans the eigenspace corresponding to $\lambda_1(\overline{g})$, \item $e_m$ lies in the generalized eigenspace corresponding to $\lambda_m(\overline{g})$, and \item $e_{m+1}$ lies in the generalized eigenspace corresponding to $\lambda_{m+1}(\overline{g})$. \end{itemize} Then observe that \begin{align}\label{eqn:P2stretch} \log\lambda_1(\overline{g})&=\lim_{n \rightarrow \infty} \frac{1}{n} \log \norm{\overline{g}^n\cdot e_1},\nonumber\\ \log\lambda_m (\overline{g})&= \lim_{n \rightarrow \infty} \frac{1}{n} \log \norm{\overline{g}^n\cdot e_m},\text{ and }\\ \log\lambda_{m+1}(\overline{g}) &= \lim_{n \rightarrow \infty} \frac{1}{n} \log \norm{\overline{g}^n\cdot \sum_{j=m+1}^d v_j e_j}\text{ when }v_{m+1} \neq 0.\nonumber \end{align} \begin{proof}[Proof of Theorem \ref{thm:regularity_body}] From Theorem~\ref{thm:main_body} and Theorem \ref{thm:alphas}, we see that \begin{align*} \alpha_m(\rho) \leq \sup\left\{ \alpha \in (1,2): M \text{ is } C^{\alpha} \text{ along }\xi^{(1)}(\partial_\infty \Gamma) \right\}. \end{align*} To prove the equality case, fix some $\gamma \in \Gamma$ with infinite order and let $\gamma^{\pm} \in \partial_\infty \Gamma$ denote the attracting and repelling fixed points of $\gamma$. We can make a change of basis and assume that $\xi^{(1)}(\gamma^+) = \Rb\cdot e_1$, $\xi^{(m)}(\gamma^+) = \Span\{ e_1,\dots, e_m\}$, $\xi^{(d-m)}(\gamma^-) = \Span\{ e_{m+1}, \dots, e_d\}$, and $\xi^{(d-1)}(\gamma^-) = \Span\{e_2,\dots, e_d\}$. Now fix a lift $\overline{g}\in\GL_d(\Rb)$ of $\rho(\gamma)\in\PGL_d(\Rb)$. Then \begin{align*} \overline{g} = \begin{pmatrix} \lambda & & \\ & U & \\ & & V \end{pmatrix} \end{align*} where $\lambda \in \Rb$, $U \in \GL_{m-1}(\Rb)$, and $V \in \GL_{d-m}(\Rb)$. By a further change of basis, we can assume that $e_m$ lies in the generalized eigenspace corresponding to $\lambda_m(\overline{g})$, and $e_{m+1}$ lies in the generalized eigenspace corresponding to $\lambda_{m+1}(\overline{g})$. By Theorem \ref{thm:main_body}, $M$ is $C^1$ along $\xi^{(1)}(\partial_\infty \Gamma)$, and the tangent space to $M$ at $\xi^{(1)}(\gamma^+)$ is $\xi^{(m)}(\gamma^+)$. Thus, for any $\epsilon > 0$ sufficiently small there exists some $p \in M$ such that \begin{align*} p = \left[e_1 + \epsilon e_m + \sum_{j=m+1}^d y_j e_j \right]. \end{align*} Then \begin{align*} \xi^{(1)}(\gamma^+) + p + \xi^{(d-m)}(\gamma^-) = \Span\{e_1,e_m, e_{m+1}, \dots, e_d\} \end{align*} and by hypothesis there exists some $q \in M$ such that \begin{align*} q= \left[z_1e_1 + z_m e_m + \sum_{j=m+1}^d z_j e_j \right] \end{align*} and $z_{m+1} \neq 0$. The sums $q+\xi^{(d-1)}(\gamma^-)$ and $\xi^{(1)}(\gamma^+) + q + \xi^{(d-m)}(\gamma^-)$ are both direct, so $z_1 \neq 0\neq z_m$. Next fix a distance $d_{\Pb}$ on $\Pb(\Rb^d)$ induced by a Riemannian metric. Since \[\lim_{n\to\infty}\rho(\gamma^n)\cdot q= \xi^{(1)}(\gamma^+),\] Observation~\ref{obs:delta1} implies that if \[X_n:=\overline{\rho}(\gamma^n)\cdot \left(z_1e_1 + z_m e_m + \sum_{j=m+1}^d z_j e_j\right),\] then there is some $A\geq 1$ so that for sufficiently large $n$, \[\frac{1}{A} \frac{\norm{P_{3,v}(X_n)}_{2}}{\norm{P_{1,v}(X_n)}_{2}}\leq d_{\Pb}\left( \rho(\gamma^n)\cdot q, \xi^{(m)}(v^+) \right) \leq A \frac{\norm{P_{3,v}(X_n)}_{2}}{\norm{P_{1,v}(X_n)}_{2}}\] and \[\frac{1}{A} \frac{\norm{P_{2,v}(X_n)}_{2}}{\norm{P_{1,v}(X_n)}_{2}}\leq d_{\Pb}\left( \rho(\gamma^n)\cdot q, \xi^{(1)}(v^+) \right) \leq A \frac{\norm{P_{2,v}(X_n)}_{2}+\norm{P_{3,v}(X_n)}_{2}}{\norm{P_{1,v}(X_n)}_{2}}.\] It then follows from \eqref{eqn:P2stretch} that \begin{align*} \lim_{n \rightarrow \infty} \frac{1}{n} \log d_{\Pb}\left(\rho(\gamma^n)\cdot q, \xi^{(m)}(\gamma^+) \right)=\log \frac{\lambda_{m+1}}{\lambda_1}. \end{align*} and \begin{align*} \lim_{n \rightarrow \infty} \frac{1}{n} \log d_{\Pb}\left(\rho(\gamma^n)\cdot q, \xi^{(1)}(\gamma^+) \right)= \log \frac{\lambda_{m}}{\lambda_1} \end{align*} Finally, if $M$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty \Gamma)$, then there exists $C > 0$ such that \begin{align*} d_{\Pb}\left( \xi^{(m)}(\gamma^+), \rho(\gamma^n)\cdot q \right) \leq C d_{\Pb}\left( \xi^{(1)}(\gamma^+), \rho(\gamma^n)\cdot q \right)^{\alpha} \end{align*} for all sufficiently large $n$. By taking the logarithm to both sides, dividing by $n$, and then taking the limit, we see that \begin{align*} \alpha \leq \frac{\log \frac{\lambda_{1}}{\lambda_{m+1}}}{\log \frac{\lambda_{1}}{\lambda_m}}. \end{align*} Since $\gamma \in \Gamma$ was arbitrary, we see that $\alpha \leq \alpha_m(\rho)$. \end{proof} \section{Necessary conditions for differentiability of $\rho$-controlled subsets} In this section, we establish Theorem~\ref{thm:nec_general}. By Example \ref{eg:limitset}, it is sufficient to prove the following theorem. \begin{theorem} \label{thm:nec_general_body}Suppose $\Gamma$ is a hyperbolic group and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $1$-Anosov representation such that $\bigwedge^m \rho: \Gamma \rightarrow \PGL(\bigwedge^m \Rb^d)$ is also irreducible. Also, suppose that $M$ is a $\rho$-controlled, $(m-1)$ dimensional topological manifold. If \begin{enumerate} \item[($\ddagger$)] $M$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty\Gamma)$ for some $\alpha>1$, \end{enumerate} then \begin{enumerate} \item[($\dagger$')] $\rho$ is $m$-Anosov and $\xi^{(1)}(x) + p + \xi^{(d-m)}(y)$ is a direct sum for all pairwise distinct $\xi^{(1)}(x),p,\xi^{(1)}(y) \in M$. \end{enumerate} \end{theorem} \begin{remark} Note that ($\dagger'$) in Theorem \ref{thm:nec_general_body} is a weaker condition then ($\dagger$) in Theorem \ref{thm:main_body}. However, when $M=\xi^{(1)}(\partial_\infty\Gamma)$, then the two conditions are identical. \end{remark} First, in Section \ref{sec:wedge}, we define, for any $1\leq m\leq d$ and any representation $\rho:\Gamma\to\PGL_d(\Rb)$, the representation \[\bigwedge^m\rho:\Gamma\to\PGL\left(\bigwedge^m\Rb^d\right),\] whose irreducibility appears as a hypothesis in the statements of Theorem~\ref{thm:nec_general_body}. Then, in Section \ref{sec:irredeg}, we give an example to demonstrate the necessity of the irreducibility of $\bigwedge^m\rho$ as a hypothesis of Theorem \ref{thm:nec_general_body} (and also in Theorem~\ref{thm:nec_general}). Next, we prove Theorem \ref{thm:nec_general_body}, whose proof can be broken down into two main steps. In Section \ref {sec:egap}, we use the fact that $M$ is an $(m-1)$-dimensional topological manifold that is $C^\alpha$ along the $1$-limit set of $\rho$ for some $\alpha>1$, to deduce that $\log\frac{\lambda_m}{\lambda_{m+1}}(\rho(\gamma))$ grows linearly with respect to the word length of $\gamma$. Then, in Section \ref{sec:sgap}, we use this to deduce that $\rho$ is $m$-Anosov, and obtain the required transversality condition. \subsection{The wedge representation}\label{sec:wedge} Observe that for any $m\leq d-1$, there is a natural linear $\GL_d(\Rb)$-action on $\bigwedge^m\Rb^d$ given by \[g\cdot(u_1\wedge\dots\wedge u_m):=(g\cdot u_1)\wedge\dots\wedge (g\cdot u_m),\] where $u_i\in\Rb^d$ for all $i$. This defines a representation \[\iota_{d,m}:\GL_d(\Rb)\to\GL\left(\bigwedge^m\Rb^d\right),\] which in turn defines a representation \[\widehat{\iota_{d,m}}:\PGL_d(\Rb)\to\PGL\left(\bigwedge^m\Rb^d\right).\] Using this, we may define the \emph{$m$-wedge representation} of $\overline{\rho}:\Gamma\to\GL_d(\Rb)$ (resp. $\rho:\Gamma\to\PGL_d(\Rb)$) to be \[\bigwedge^m\overline{\rho}:=\iota_{d,m}\circ\overline{\rho}:\Gamma\to\GL\left(\bigwedge^m\Rb^d\right)\,\,\,\left(\text{resp. }\bigwedge^m\rho:=\widehat{\iota_{d,m}}\circ\rho:\Gamma\to\PGL\left(\bigwedge^m\Rb^d\right)\right).\] Also, if $\langle\cdot,\cdot\rangle_{\Rb^d}$ denotes the standard inner product on $\Rb^d$ with orthonormal basis $e_1,\dots,e_d$, then we may define a bilinear pairing $\langle\cdot,\cdot\rangle_{\bigwedge^m\Rb^d}$ on $\bigwedge^m\Rb^d$ by first defining \[\langle u_{i_1}\wedge\dots\wedge u_{i_m},v_{j_1}\wedge\dots\wedge v_{j_m}\rangle_{\bigwedge^m\Rb^d}:=\prod_{\sigma\in S_m}\prod_{k=1}^m\sgn(\sigma)\langle u_{i_k},v_{j_{\sigma(k)}}\rangle\] for all $u_{i_k},v_{j_k}\in\Rb^d$, and then extending it linearly to all of $\bigwedge^m\Rb^d$. Observe that \[\{e_{i_1}\wedge\dots\wedge e_{i_m}:1\leq i_1<\dots<i_k\leq d\}\] is an orthonormal basis of $\bigwedge\Rb^d$, so $\langle\cdot,\cdot\rangle_{\bigwedge^m\Rb^d}$ is an inner product. Using this, we may define the norm $\norm{\cdot}_{\bigwedge^m\Rb^d}$ on $\bigwedge^m\Rb^d$ associated to $\langle\cdot,\cdot\rangle_{\bigwedge^m\Rb^d}$. Next, let $\overline{g}\in\GL_d(\Rb)$. For all $i$, let $\mu_i(\iota_{d,m}(\overline{g}))$ denote the $i$-th singular value of $\iota_{d,m}(\overline{g})$ with respect to the norm $\norm{\cdot}_{\bigwedge^m\Rb^d}$ on $\bigwedge^m\Rb^d$. One can verify from the definition of the $\GL_d(\Rb)$-action on $\bigwedge^m\Rb^d$ that for all $i$, there exists $i_1 < i_2 < \dots < i_m$ such that \[\lambda_i(\iota_{d,m}(\overline{g}))=\lambda_{i_1}(\overline{g})\dots\lambda_{i_m}(\overline{g})\,\,\,\text{ and }\,\,\,\mu_i(\iota_{d,m}(\overline{g}))=\mu_{i_1}(\overline{g})\dots\mu_{i_m}(\overline{g}).\] This implies that \begin{align}\label{eqn:evalue1} \lambda_1(\iota_{d,m}(\overline{g}))=\prod_{i=1}^m\lambda_{i}(\overline{g})\,\,\,\text{ and }\,\,\,\lambda_2(\iota_{d,m}(\overline{g}))=\lambda_{m+1}(\overline{g})\prod_{i=1}^{m-1}\lambda_{i}(\overline{g}) \end{align} and \begin{align}\label{eqn:0evalue1} \mu_1(\iota_{d,m}(\overline{g}))=\prod_{i=1}^m\mu_{i}(\overline{g})\,\,\,\text{ and }\,\,\,\mu_2(\iota_{d,m}(\overline{g}))=\mu_{m+1}(\overline{g})\prod_{i=1}^{m-1}\mu_{i}(\overline{g}). \end{align} Hence, for any $\gamma\in\Gamma$, \begin{align}\label{eqn:evalue2} \frac{\lambda_1}{\lambda_2}\left(\bigwedge^m\rho(\gamma)\right)=\frac{\lambda_m}{\lambda_{m+1}}(\rho(\gamma)) \end{align} and \begin{align}\label{eqn:0evalue2} \frac{\mu_1}{\mu_2}\left(\bigwedge^m\rho(\gamma)\right)=\frac{\mu_m}{\mu_{m+1}}(\rho(\gamma)). \end{align} \subsection{Irreducibility of $\bigwedge^m\rho$}\label{sec:irredeg} Now, we will discuss an example to demonstrate that the irreducibility of $\bigwedge^m\rho$ is a necessary hypothesis for Theorem~\ref{thm:nec_general_body} to hold. The identification of $\Cb^3$ with $\Rb^6$ given by \[(z_1,z_2,z_3)\mapsto(\Re(z_1),\Im(z_1),\Re(z_2),\Im(z_2),\Re(z_3),\Im(z_3))\] defines an inclusion $j:\SL_3(\Cb)\to\SL_6(\Rb)$. The image of $j$ can be characterized as the subgroup of $\SL_6(\Rb)$ that commutes with the linear endomorphism $J$ on $\Rb^6$ defined by $J(x_1,y_1,x_2,y_2,x_3,y_3):=(-y_1,x_1,-y_2,x_2,-y_3,x_3)$. Let $\SU(2,1)\subset\SL_3(\Cb)$ be the subgroup that leaves invariant the bilinear pairing that is represented in the standard basis of $\Cb^3$ by the matrix \[\left(\begin{array}{ccc} 0&0&1\\ 0&1&0\\ 1&0&0 \end{array}\right),\] and define \[\tau_0:=(\iota_{6,2}\circ j)|_{\SU(2,1)} : \SU(2,1) \rightarrow \SL\left(\bigwedge^2 \Rb^6\right).\] Recall that $\iota_{d,m}$ was defined in Section \ref{sec:wedge}. Let $\bigwedge^2J$ be the linear endomorphism on $\bigwedge^2\Rb^6$ given by \[\left(\bigwedge^2J\right)(u_1\wedge u_2)=J(u_1)\wedge J(u_2).\] Consider the $\tau_0$-invariant subspace \begin{align*} E = \left\{ v \in \bigwedge^2 \Rb^6: \left(\bigwedge^2J\right)(v) = v\right\}, \end{align*} and let $\tau : \SU(2,1) \rightarrow \GL(E)$ be the representation defined by the $\tau_0$ action on $E$. Observe that if $e_1,\dots,e_6$ is the standard basis for $\Rb^6$, then \begin{align*} &f_1:=e_1\wedge e_2,&&f_4:=e_3\wedge e_4,&&f_7:=e_3\wedge e_5+e_4\wedge e_6,\\ &f_2:=e_2\wedge e_3-e_1\wedge e_4,&&f_5:=e_2\wedge e_5-e_1\wedge e_6,&&f_8:=e_4\wedge e_5-e_3\wedge e_6,\\ &f_3:=e_1\wedge e_3+e_2\wedge e_4,&&f_6:=e_1\wedge e_5+e_2\wedge e_6,&&f_9:=e_5\wedge e_6 \end{align*} is a basis of $E$. One can then explicitly verify that $\tau$ is irreducible. If $g \in \SU(2,1)$, then there exists some $\lambda \geq 1$ such that the (complex) eigenvalues of $g \in \SU(2,1)$ have absolute values $\lambda, 1, \lambda^{-1}$. By conjugating $g$ by an appropriate element in $h \in \SU(2,1)$, we may also assume that the generalized eigenvectors of $hgh^{-1}$ corresponding to $\lambda,1,\lambda^{-1}$ are $(1,0,0)^T$, $(0,1,0)^T$ and $(0,0,1)^T$ respectively. This implies that the eigenvalues of $j(hgh^{-1})$ have absolute values $\lambda,1,\lambda^{-1}$ (each with multiplicity $2$), and the corresponding invariant subspaces are $\Span_{\Rb}\{e_1,e_2\}$, $\Span_{\Rb}\{e_3,e_4\}$ and $\Span_{\Rb}\{e_5,e_6\}$ respectively. Using the basis of $E$ described above, one can then compute that the eigenvalues of $\tau(hgh^{-1})$, and hence $\tau(g)$, have absolute values \begin{align*} \lambda^2, \lambda, \lambda, 1,1,1, \lambda^{-1}, \lambda^{-1}, \lambda^{-2}. \end{align*} In particular, the image of $\tau$ lies in $\SL(E)$. With this set up, we can now give our example. It describes a $1$-Anosov, irreducible representation of a co-compact lattice $\Gamma\subset\SU(2,1)$ to $\SL(E)$, where $E$ is a $9$-dimensional vector space. We show that the $1$-limit set of this representation is a $3$-dimensional, $C^\infty$-submanifold of $\Pb(E)$, but $\rho$ is not $4$-Anosov. \begin{example}\label{ex:irred_bad_example} Fix a co-compact lattice $\Gamma \leq \SU(2,1)$. Since $\SU(2,1)$ is a rank-one Lie group, it acts transitively and by isometries on a negatively curved Riemannian symmetric space $\Hb_{\Cb}^2$ (the $2$-dimensional complex hyperbolic space), whose visual boundary $\partial_\infty\Hb_{\Cb}^2$ has the structure of a $3$-dimensional smooth sphere. Thus, the inclusion of $\Gamma$ into $\SU(2,1)$ specifies an identification of $\partial_\infty\Gamma=\partial_\infty\Hb_{\Cb}^2$. As $\SU(2,1)$-spaces, $\Hb_{\Cb}^2\simeq\SU(2,1)/B$, where $B\subset\SU(2,1)$ is the subgroup of upper triangular matrices. It is straightforward to check that $\tau(B)\subset\SL(E)$ lies in $P\cap Q$, where $P\subset\SL(E)$ is the subgroup that preserves the line spanned by $f_1$, and $Q\subset\SL(E)$ is the subgroup that preserves $\Span_{\Rb}(f_1,\dots,f_8)$. In particular, there are smooth, $\tau$-equivariant maps \[\xi^{(1)}:\partial_\infty\Gamma=\SU(2,1)/B\to\SL(E)/P=\Pb(E)\] and \[\xi^{(8)}:\partial_\infty\Gamma=\SU(2,1)/B\to\SL(E)/Q=\Pb^*(E)=\Gr_8(E).\] Furthermore, a result of Guichard-Wienhard \cite[Proposition 4.4]{GW2012} imply that $\tau_\Gamma:\Gamma\to\SL(E)$ is a $1$-Anosov representation whose $1$-flag map and $8$-flag map are $\xi^{(1)}$ and $\xi^{(8)}$ respectively. The eigenvalue calculation above implies that $\rho$ is not $4$-Anosov. However, the $1$-limit set $\xi^{(1)}(\partial_\infty \Gamma)$ is a $3$-dimensional $C^\infty$-submanifold of $\Pb(E)$. \end{example} \subsection{Eigenvalue gaps from the $C^\alpha$ property along the $1$-limit set}\label{sec:egap} Our goal now will be to prove the following proposition. \begin{proposition}\label{prop:eigenvalue_est} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is a $1$-Anosov representation. Also, suppose that $\bigwedge^m \rho: \Gamma \rightarrow \PGL\left(\bigwedge^m \Rb^d\right)$ is irreducible and $M$ is $\rho$-controlled, $(m-1)$-dimensional topological manifold that is $C^{\alpha}$ along the $1$-limit set of $\rho$ for some $\alpha>1$. If $\gamma \in \Gamma$, then \begin{align*} \frac{ \lambda_{m+1}}{\lambda_m}(\rho(\gamma)) \leq \left(\frac{ \lambda_{2}}{\lambda_1}(\rho(\gamma)) \right)^{\alpha-1}. \end{align*} In particular, $\log\frac{ \lambda_m}{\lambda_{m+1}}(\rho(\gamma))$ grows linearly with the word-length of $\gamma$. \end{proposition} The proof of Proposition \ref{prop:eigenvalue_est} requires two observations and a lemma. \begin{observation}\label{obs:dynamics} Let $g \in \PGL_{d}(\Rb)$ be proximal, let $\overline{g}\in\GL_d(\Rb)$ be a lift of $g$, and let $g^+ \in \Pb(\Rb^d)$ and $g^-\in\Gr_{d-1}(\Rb^d)$ be the attracting fixed point and repelling fixed hyperplane of $g$ respectively. Also, let $d_{\Pb}$ be a distance on $\Pb(\Rb^d)$ induced by a Riemannian metric. If $p\in\Pb(\Rb^d)$ satisfies $p\neq g^+$ and $p\notin g^-$, then \begin{align*} \log\frac{\lambda_2}{\lambda_1}(g) \geq \limsup_{n \rightarrow \infty} \frac{1}{n} \log d_{\Pb}\Big(g^n\cdot p, g^+ \Big). \end{align*} Moreover, there is a proper subspace $V\subset\Rb^d$ so that if $p\notin [V]$, then the above inequality holds as equality. \end{observation} \begin{remark} In the above observation, we identify $g^-\in\Gr_{d-1}(\Rb^d)$ with a hyperplane of $\Pb(\Rb^d)$, which we also denote by $g^-$. \end{remark} \begin{proof} Note that the affine chart $\Ab_{g^-}$ contains both $p$ and $g^+$. Equip $\Ab_{g^-}$ with an Euclidean metric $d_{\Ab}$, and let $\Bb$ be the unit ball in $\Ab_{g^-}$ centered at $g^+$. Since $p\notin g^-$, $g^n\cdot p\in\Bb$ for sufficiently large $n$. On $\Bb$, $d_{\Pb}$ and $d_{\Ab}$ are bi-Lipschitz, so there is a constant $A$ so that for sufficiently large $n$, \begin{align}\label{eqn:approx} \frac{1}{A}\frac{\norm{P_2(X)}_{2}}{\norm{P_1(X)}_{2}}\leq d_{\Pb}(g^n\cdot p,g^+)\leq A\frac{\norm{P_2(X)}_{2}}{\norm{P_1(X)}_{2}}, \end{align} where $X\in\Rb^d$ is a non-zero vector in $g^n\cdot p$, $P_1:\Rb^d\to g^+$ is the projection with kernel $g^-$, and $P_2:\Rb^d\to g^-$ is the projection with kernel $g^+$. On the other hand, it is straightforward that \begin{align}\label{eqn:proj} \log\frac{\lambda_2}{\lambda_1}(g) \geq \limsup_{n \rightarrow \infty} \frac{1}{n} \log \frac{\norm{P_2(\overline{g}^n\cdot X)}_{2}}{\norm{P_1(\overline{g}^n\cdot X)}_{2}}, \end{align} thus giving the desired inequality. To determine $V$, choose a basis $\{e_1,\dots,e_d\}$ for $\Rb^d$ so that $g$ is in real Jordan normal form in this basis. We may assume that $e_1$ is an eigenvector of $g$ corresponding to $\lambda_1$, and there is some $l$ so that $e_2,\dots,e_l$ spans the invariant corresponding to $\lambda_2$. Let $V$ be the span of $e_1,e_{l+1},\dots,e_d$, and it is easy to see that the inequality \eqref{eqn:proj} holds with equality when $v\notin [V]$. This proves the observation. \end{proof} \begin{observation}\label{obs:slow} Let $g\in\GL_d(\Rb)$ be so that $\frac{\lambda_1}{\lambda_d}(g)>1$. Then let $\Rb^d=V_1+V_2$ be the $g$-invariant decomposition so that every eigenvalue of $g|_{V_1}$ has absolute value $\lambda_1$ and every eignevalue of $g|_{V_2}$ has absolute value strictly less than $\lambda_1$. Suppose that $\dim(V_1)>1$ and $g$ has an invariant line $l\in[V_1]$. Then for all $p\in \Pb(\Rb^d)\setminus[l+V_2]$, \begin{align} \label{eq:W_subspace_zero_limit} 0 = \lim_{n \rightarrow \infty} \frac{1}{n} \log d_{\Pb}\Big(g^{n}\cdot p, l\Big), \end{align} where $d_{\Pb}$ is a distance on $\Pb(\Rb^d)$ induced by a Riemannian metric. \end{observation} \begin{proof} First, note that since $d_{\Pb}$ has bounded diameter, \begin{align*} \limsup_{n \rightarrow \infty} \frac{1}{n} \log d_{\Pb}\Big(g^{n}\cdot p, l\Big) \leq 0. \end{align*} Now assume for a contradiction that Equation~\eqref{eq:W_subspace_zero_limit} does not hold for some $p\in \Pb(\Rb^d)\setminus[l+V_2]$. Then by taking a subsequence, we may assume that \begin{align} \label{eq:bad_assumption} \lim_{k \rightarrow \infty} \frac{1}{n_k} \log d_{\Pb}\Big(g^{n_k}\cdot p, l\Big) < 0. \end{align} Notice that this implies that $g^{n_k}\cdot p \rightarrow l$ as $k\to\infty$. Using the real Jordan normal form of $g$, we can decompose $V_1 = \bigoplus_{j=1}^r V_{1,j}$ where \begin{enumerate} \item $V_{1,1} = l$, \item for $2 \leq j \leq r$ \begin{enumerate} \item $V_{1,j}$ is either one or two dimensional, \item there exists a linear transformation $L_j : V_{1,j} \rightarrow V_{1,j}$ such that \begin{align*} g \cdot Y \in L_j\cdot Y + V_{1,j-1} \end{align*} for all $Y\in V_{1,j}$, \item $\norm{L_j \cdot Y}_{2} = \lambda_1(g)\norm{Y}_{2}$ for all $Y \in V_{1,j}$. \end{enumerate} \end{enumerate} Also, let $P_{1,j} : \Rb^d \rightarrow V_{1,j}$ and $P_2 : \Rb^d \rightarrow V_{2}$ be the projections relative to the decomposition $\Rb^d = V_{1,1} \oplus \dots \oplus V_{1,r}\oplus V_2$. Since $g^{n_k}\cdot p$ converges to $l$, \eqref{eqn:approx} in the first part of the proof of Observation~\ref{obs:dynamics} implies that there exists $A \geq 1$ such that \begin{align} \label{eq:affine_chart_estimate_non_proximal} \frac{1}{A} \left( \frac{\sum_{j=1}^\ell \norm{P_{1,j}(g^{n_k}\cdot X)}_{2} + \norm{P_2(g^{n_k}\cdot X)}_{2}}{\norm{P_{1,1}(g^{n_k}\cdot X)}_{2}} \right) \leq d_{\Pb}\Big(g^{n_k}\cdot p, \Phi(\xi^{(1)}(\gamma^+))\Big) \end{align} for all non-zero $X\in p$ and all sufficiently large $k$. Since $X \notin l+V_2$, there exists $2 \leq j_0 \leq r$ such that $P_{1,j_0}(X) \neq 0$. By increasing $j_0$ if necessary, we can also assume that $P_{1,j}(X) = 0$ for $j_0 < j \leq r$. This implies that \begin{align} \label{eq:P1j0_estimate} \norm{P_{1,j_0}(g^{n}\cdot X)}_{2} = \lambda_1(g)^n \norm{P_{1,j_0}(X)}_{2}. \end{align} Further, by increasing $A \geq 1$ if necessary, we can assume that \begin{align} \label{eq:P11_estimate} \norm{P_{1,1}(g^{n}\cdot X)}_{2} \leq A\norm{g^{n}\cdot X}_{2} \leq A\norm{g^{n}}_{\mathrm{op}}\norm{X}_{2} \end{align} for all $n \geq 0$. Then by Equations~\eqref{eq:affine_chart_estimate_non_proximal},~\eqref{eq:P1j0_estimate}, and~\eqref{eq:P11_estimate}, \begin{align*} \lim_{k \rightarrow \infty} \frac{1}{n_k} \log d_{\Pb}\Big(g^{n_k}\cdot p, l\Big) &\geq \limsup_{k \rightarrow \infty} \frac{1}{n_k} \log \frac{\norm{P_{1,j_0}(g^{n_k}\cdot X)}_{2}}{A\norm{P_{1,1}(g^{n_k}\cdot X)}_{2}} \\ & \geq \log(\lambda_1(g))+\limsup_{k \rightarrow \infty} \frac{1}{n_k} \log \frac{\norm{P_{1,j_0}(X)}_{2}}{A^2\norm{g^{n_k}}_{\mathrm{op}}\norm{X}_{2}} \\ & \geq \log(\lambda_1(g))-\liminf_{k \rightarrow \infty} \frac{1}{n_k} \log\norm{g^{n_k}}_{\mathrm{op}}. \end{align*} But Gelfand's formula states that \begin{align*} \lim_{n \rightarrow \infty} \frac{1}{n} \log\norm{g^{n}}_{\mathrm{op}} = \log(\lambda_1(g)), \end{align*} so by \eqref{eq:bad_assumption}, \begin{align*} 0 & > \lim_{k \rightarrow \infty} \frac{1}{n_k} \log d_{\Pb}\Big(g^{n_k}\cdot p, \Phi(\xi^{(1)}(\gamma^+))\Big) \geq 0. \end{align*} and we have a contradiction. \end{proof} Next, suppose $\rho:\Gamma\to\PGL_d(\Rb)$ and $M\subset\Pb(\Rb^d)$ satisfy the hypothesis of Proposition \ref{prop:eigenvalue_est}. Define the map \begin{align}\label{eqn:Fdm} F_{d,m}:\Gr_m(\Rb^d)\to\Pb\left(\bigwedge^m\Rb^d\right)\,\,\,\text{ by }\,\,\,F_{d,m}:V\mapsto\left[\bigwedge_{i=1}^mv_i\right], \end{align} where $v_1,\dots,v_m$ is a basis of $V$. Note that $F_{d,m}$ is well defined, smooth, and $\widehat{\iota_{d,m}}$-equivariant. Since $M$ is differentiable along the $1$-limit set $\xi^{(1)}(\partial_\infty\Gamma)$ of $\rho$, we can define $\overline{\Phi}:\xi^{(1)}(\partial_\infty\Gamma)\to\Gr_m(\Rb^d)$ to be the map that associates to every point in $\xi^{(1)}(\partial_\infty\Gamma)$ its tangent space. Then define \begin{align}\label{eqn:Phi} \Phi:= F_{d,m}\circ\overline{\Phi}: \xi^{(1)}(\partial_\infty\Gamma) \rightarrow \Pb\left(\bigwedge^{m} \Rb^{d}\right). \end{align} \begin{remark}\label{rem:d1d2} Fix distances $d_1$ on $\Pb(\Rb^{d})$ and $d_2$ on $\Pb\left(\bigwedge^{m} \Rb^{d}\right)$ which are induced by Riemannian metrics. Since $M$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty\Gamma)$ and $F_{d,m}$ is smooth, a calculation shows that there is some $C\geq 1$ so that \begin{align*} d_2(\Phi(q_1), \Phi(q_2)) \leq C d_1(q_1, q_2)^{\alpha-1} \end{align*} for all $q_1, q_2 \in \xi^{(1)}(\partial_\infty\Gamma)$. \end{remark} \begin{lemma}\label{lem:proximal} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is a $1$-Anosov representation. Also, suppose that $\bigwedge^m \rho: \Gamma \rightarrow \PGL\left(\bigwedge^m \Rb^d\right)$ is irreducible and $M$ is $\rho$-controlled, $(m-1)$-dimensional topological manifold that is $C^{\alpha}$ along the $1$-limit set of $\rho$ for some $\alpha>1$. If $\gamma \in \Gamma$ has infinite order, then $g:=\left(\bigwedge^{m} \rho\right)(\gamma)$ is proximal and $\Phi(\xi^{(1)}(\gamma^+))\in\Pb\left(\bigwedge^m\Rb^d\right)$ is the attracting fixed point of $g$. \end{lemma} \begin{proof} Let $\overline{h} \in \GL_d(\Rb)$ be a lift of $\rho(\gamma)$, $\overline{g}:=\bigwedge^{m} \overline{h}$, and $\lambda_i = \lambda_i(\overline{h})$ for $i=1,\dots,d$. Then by \eqref{eqn:evalue1}, $\lambda_1(\overline{g})=\lambda_1\cdots\lambda_m$. Thus it is equivalent to prove that $\overline{g}$ is proximal and $\Phi(\xi^{(1)}(\gamma^+))$ is the eigenline of $\overline{g}$ whose eigenvalue has absolute value $\lambda_1 \cdots \lambda_{m}$. We first show that $\Phi(\xi^{(1)}(\gamma^+))$ is an eigenline of $\overline{g}$ whose eigenvalue has absolute value $\lambda_1 \cdots \lambda_{m}$. Let $\{n_k\}_{k=1}^\infty$ be an increasing sequence of integers such that \begin{align*} \frac{1}{\norm{\overline{g}^{n_k}}}\overline{g}^{n_k} \end{align*} converges to some $T \in \End\left(\bigwedge^{m} \Rb^{d}\right)$. Also, let $\bigwedge^m \Rb^d = V_1 \oplus V_2$ be a $\overline{g}$-invariant decomposition of $\bigwedge^m \Rb^d$, where every eigenvalue of $\overline{g}|_{V_1}$ has absolute value $\lambda_1 \cdots \lambda_m$ and every eignevalue of $\overline{g}|_{V_2}$ has absolute value strictly less than $\lambda_1 \cdots \lambda_m$. Observe that the image of $T$ is contained in $V_1$. Since \[\overline{g}\cdot\Phi(\xi^{(1)}(\gamma^+))=\Phi(\xi^{(1)}(\gamma\cdot \gamma^+))=\Phi(\xi^{(1)}(\gamma^+)),\] $\Phi(\xi^{(1)}(\gamma^+))$ is an eigenline of $\overline{g}$. Thus, we only need to show that $\Phi(\xi^{(1)}(\gamma^+))$ is contained in the image of $T$. We claim that the image of $T$ is exactly $\Phi(\xi^{(1)}(\gamma^+))$. Notice that if $p=[v] \in \Pb(\bigwedge^{m} \Rb^{d})$ and $v \notin \ker T$ then \begin{align*} [T(v)] = \lim_{k \rightarrow \infty} g^{n_k}\cdot p \end{align*} (recall that $[v]$ denotes the projective line containing $v$). Further, since $\bigwedge^{m} \rho : \Gamma \rightarrow \PGL\left(\bigwedge^{m} \Rb^d\right)$ is irreducible, the set $\{ \Phi(x) : x \in \xi^{(1)}(\partial_\infty\Gamma)\}$ spans $\bigwedge^{m} \Rb^d$. Thus there exists $x_1, \dots, x_N \in \partial_\infty \Gamma$ such that \begin{align*} \Phi(\xi^{(1)}(x_1)), \dots, \Phi(\xi^{(1)}(x_N)) \end{align*} span $\bigwedge^{m} \Rb^d$. By perturbing and relabelling the $x_i$ (if necessary) we can also assume that $\gamma^- \notin \{x_1, \dots, x_N\}$, and that there exists $1 \leq \ell \leq N$ such that \begin{align*} \Phi(\xi^{(1)}(x_1)) + \dots + \Phi(\xi^{(1)}(x_\ell)) + \ker T = \bigwedge^{m} \Rb^d \end{align*} is a direct sum. For $1 \leq i \leq \ell$, \begin{align*} T ( \Phi(\xi^{(1)}(x_i)) ) = \lim_{k \rightarrow \infty} g^{n_k} \Phi(\xi^{(1)}(x_i)) = \lim_{k \rightarrow \infty} \Phi( \xi( \gamma^{n_k}\cdot x_i)) = \Phi(\xi^{(1)}(\gamma^+)), \end{align*} so the image of $T$ is $\Phi(\xi^{(1)}(\gamma^+))$. Thus, $\Phi(\xi^{(1)}(\gamma^+))$ is an eigenline of $\overline{g}$ whose eigenvalue has absolute value $\lambda_1 \cdots \lambda_{m}$. We next argue that $\overline{g}$ is proximal, or equivalently that $\dim V_1 = 1$. Let $d_1$ and $d_2$ be as defined in Remark \ref{rem:d1d2}. Let \begin{align*} W := V_2 + \Phi(\xi^{(1)}(\gamma^+)), \end{align*} and suppose for contradiction that $\dim V_1 > 1$. This implies that $W\subset\Rb^d$ is a proper subspace. By Observation \ref{obs:slow}, \begin{align*} 0 = \lim_{n \rightarrow \infty} \frac{1}{n} \log d_2\Big(g^{n}\cdot p, \Phi(\xi^{(1)}(\gamma^+))\Big) \end{align*} when $p \in \Pb\left(\bigwedge^{m} \Rb^d\right) \setminus [W]$. Since $\left\{ \Phi(x) : x \in \xi^{(1)}(\partial_\infty\Gamma)\right\}$ spans $\bigwedge^{m} \Rb^d$, there exists $x \in \partial_\infty \Gamma$ such that $\Phi(\xi^{(1)}(x)) \notin [W]$. By perturbing $x$ (if necessary) we can assume that $x \neq \gamma^-$. Then \begin{align*} \lim_{n\to\infty}\rho(\gamma)^n\cdot \xi^{(1)}(x)=\xi^{(1)}(\gamma^+) \text{ and } \lim_{n\to\infty}g^{n} \cdot\Phi(\xi^{(1)}(x))=\Phi(\xi^{(1)}(\gamma^+)). \end{align*} So, by Observation~\ref{obs:dynamics}, \begin{align*} 0 > \log \frac{\lambda_2}{\lambda_1} & \geq \limsup_{n \rightarrow \infty} \frac{1}{n} \log d_1\Big(\rho(\gamma)^n\cdot \xi^{(1)}(x), \xi^{(1)}(\gamma^+)\Big) \\ &\geq \limsup_{n \rightarrow \infty} \frac{1}{(\alpha-1) n} \log d_2\Big( \Phi(\xi^{(1)}(\gamma^n\cdot x)),\Phi( \xi^{(1)}(\gamma^+))\Big) \\ & = \limsup_{n \rightarrow \infty} \frac{1}{(\alpha-1) n} \log d_2\Big( g^n\cdot\Phi(\xi^{(1)}(x)),\Phi( \xi^{(1)}(\gamma^+))\Big) =0, \end{align*} where the last inequality is Remark \ref{rem:d1d2}. This is a contradiction, so $\overline{g}$ is proximal. \end{proof} \begin{proof}[Proof of Proposition~\ref{prop:eigenvalue_est}] Fix some $\gamma \in \Gamma$. If $\gamma$ has finite order, then \begin{align*} \frac{\lambda_i}{\lambda_j}(\rho(\gamma)) = 1 \end{align*} for all $1 \leq i,j \leq d$ and there is nothing to prove. So suppose that $\gamma$ has infinite order and let $\gamma^+ \in \partial_\infty \Gamma$ be the attracting fixed point of $\gamma$. By Lemma \ref{lem:proximal}, $g:=\bigwedge^m \rho(\gamma)$ is proximal and $\Phi(\xi^{(1)}(\gamma^+)) = g^+$. By \eqref{eqn:evalue2} and Observation~\ref{obs:dynamics}, there exists a proper subspace $V \subset \bigwedge^m \Rb^d$ such that: if $p\in \Pb(\bigwedge^m \Rb^{d}) \setminus [V]$ and $p$ is not in the repelling hyperplane of $g$, then \begin{align*} \log \frac{ \lambda_{m+1}}{\lambda_m}(\rho(\gamma)) = \lim_{n \rightarrow \infty} \frac{1}{n} \log d_2\Big( g^n\cdot p, \Phi(\xi^{(1)}(\gamma^+)) \Big). \end{align*} Since $\{ \Phi(q) : q \in \xi^{(1)}(\partial_\infty\Gamma)\}$ spans $\bigwedge^m \Rb^d$ we can find $x \in \partial_\infty \Gamma$ such that $\Phi(\xi^{(1)}(x)) \notin [V]$. By perturbing $x$ if necessary, we can also assume that $x \neq \gamma^-$. Then \begin{align*} \lim_{n \rightarrow \infty} g^n\cdot \Phi(\xi^{(1)}(x)) = \Phi(\xi^{(1)}(\gamma^+)), \end{align*} so $\Phi(\xi^{(1)}(x))$ does not lie in the repelling hyperplane of $g$. Thus, by Observation~\ref{obs:dynamics} and Remark \ref{rem:d1d2} \begin{align*} \log \frac{ \lambda_{m+1}}{\lambda_m}(\rho(\gamma)) &\leq (\alpha-1) \lim_{n \rightarrow \infty} \frac{1}{n} \log d_1\Big( \rho(\gamma)^n\cdot \xi^{(1)}(x), \xi^{(1)}(\gamma^+) \Big)\\ & \leq (\alpha-1) \log \frac{ \lambda_2}{\lambda_1}(\rho(\gamma)). \end{align*} \end{proof} \subsection{Anosovness from eigenvalue gaps}\label{sec:sgap} To prove Theorem~\ref{thm:nec_general_body}, we will use the following proposition. \begin{proposition}\label{prop:nec_general} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is an irreducible $1$-Anosov representation and $M$ is a $\rho$-controlled subset which is $C^1$ along $\xi^{(1)}(\partial_\infty\Gamma)$. Suppose also that for all $\gamma\in\Gamma$ with infinite order, \begin{align*} \frac{ \lambda_{m+1}}{\lambda_m}(\rho(\gamma)) \leq \left(\frac{ \lambda_{2}}{\lambda_1}(\rho(\gamma)) \right)^{\alpha-1}, \end{align*} $g:=\left(\bigwedge^{m} \rho\right)(\gamma)$ is proximal, and $\Phi(\xi^{(1)}(\gamma^+))\in\Pb\left(\bigwedge^m\Rb^d\right)$ is the attracting fixed point of $g$. Then $\rho$ is $m$-Anosov, and $\xi^{(1)}(x) + p + \xi^{(d-m)}(y)$ is a direct sum for all pairwise distinct $\xi^{(1)}(x), p, \xi^{(1)}(y) \in M$. \end{proposition} Assuming Proposition \ref{prop:nec_general}, we can prove Theorem \ref{thm:nec_general_body}. \begin{proof}[Proof of Theorem~\ref{thm:nec_general_body}] If Condition ($\ddagger$) holds, then Proposition \ref{prop:eigenvalue_est}, Lemma \ref{lem:proximal} and Proposition \ref{prop:nec_general} imply Condition ($\dagger'$). \end{proof} We start the proof of Proposition~\ref{prop:nec_general} by making some initial reductions. First, notice that the reduction made in Remark \ref{rem:lift} does not impact the hypothesis or conclusion of the Proposition. So we may assume that there exists a lift $\overline{\rho}:\Gamma\to\SL_d(\Rb)$ of $\rho$. Second, notice that passing to a finite index subgroup also does not impact the hypotheses or conclusion of the Proposition (see Proposition~\ref{prop:strongly_irreducible}). Hence we may also assume that the Zariski closure of $\overline{\rho}(\Gamma)$ is connected. The proof of Proposition \ref{prop:nec_general} requires the following lemma. \begin{lemma}\label{lem:est_on_sing_values} If $1<\beta < \alpha$, then there exists $C > 0$ such that \begin{align*} \log \frac{ \mu_{m+1}}{\mu_m}(\rho(\gamma)) \leq (\beta-1) \log \frac{ \mu_2}{\mu_1}(\rho(\gamma)) +C \end{align*} for all $\gamma \in \Gamma$. \end{lemma} \begin{proof} Let $\Cc_\mu=\Cc_\mu(\overline{\rho}(\Gamma))$ and $\Cc_\lambda=\Cc_\lambda(\overline{\rho}(\Gamma))$ be the cones defined in Section~\ref{sec:cones}. Then $\Cc_\mu = \Cc_\lambda$ by Proposition~\ref{prop:Zclosure} and Theorem~\ref{thm:cones}. By hypothesis, if $x=(x_1, \dots, x_d) \in \Cc_\lambda$, then \begin{align*} x_{m+1}-x_m \leq (\alpha-1) (x_2 - x_1). \end{align*} Further, since $\rho$ is $1$-Anosov, $x_2 - x_1 <0$ for all $x=(x_1, \dots, x_d) \in \Cc_\lambda$. Next, we will prove that there exists $R > 0$ with the following property: if $\norm{\mu(\overline{\rho}(\gamma))}_2 \geq R$, then \begin{align*} \log \frac{ \mu_{m+1}}{\mu_m}(\rho(\gamma)) \leq (\beta-1) \log \frac{ \mu_2}{\mu_1}(\rho(\gamma)). \end{align*} Suppose for contradiction that there exists $\{\gamma_n\}_{n=1}^\infty \subset \Gamma$ with $\norm{\mu(\overline{\rho}(\gamma_n))}_2 \rightarrow \infty$ and \begin{align*} \log \frac{ \mu_{m+1}}{\mu_m}(\rho(\gamma_n)) > (\beta-1) \log \frac{ \mu_2}{\mu_1}(\rho(\gamma_n)). \end{align*} By passing to a subsequence, we can assume that \begin{align*} \frac{1}{\norm{\mu(\overline{\rho}(\gamma_n))}_2} \mu(\overline{\rho}(\gamma_n)) \rightarrow x=(x_1, \dots, x_d). \end{align*} Then $x \in \Cc_\mu = \Cc_\lambda$ and \begin{align*} x_{m+1}-x_m \geq (\beta-1)( x_2 -x_1) > (\alpha-1) (x_2 - x_1) \end{align*} so we have a contradiction. The lemma then follows from the observation that since $\rho$ is $1$-Anosov, the set $\{ \gamma \in \Gamma : \norm{\mu(\overline{\rho}(\gamma))}_2 < R\}$ is finite. \end{proof} \begin{proof}[Proof of Proposition~\ref{prop:nec_general}] Since $\rho$ is $1$-Anosov, Theorem~\ref{thm:SV_char_of_Anosov} implies that there exists $C_0, c_0 > 0$ such that \begin{align*} \frac{ \mu_2}{\mu_1}(\rho(\gamma)) \leq C_0 e^{-c_0 d_S(\gamma, \id)} \end{align*} for all $\gamma \in \Gamma$. Then by Lemma \ref{lem:est_on_sing_values}, there exists $C,c > 0$ such that \begin{align*} \frac{ \mu_{m+1}}{\mu_m}(\rho(\gamma)) \leq C e^{-c d_S(\gamma, \id)} \end{align*} for all $\gamma \in \Gamma$. Thus, Theorem~\ref{thm:SV_char_of_Anosov} implies that $\rho$ is $m$-Anosov. To finish the proof, we will now show that $\xi^{(1)}(x) + p + \xi^{(d-m)}(y)$ is a direct sum for all pairwise distinct $\xi^{(1)}(x),p,\xi^{(1)}(y) \in M$. Let \[\wh{M}: = \{ (\xi^{(1)}(x),p,\xi^{(1)}(y)) \in M^3 : \xi^{(1)}(x),p,\xi^{(1)}(y) \text{ are pairwise distinct} \}\] and let \[\Oc :=\left\{(\xi^{(1)}(x),p,\xi^{(1)}(y))\in \wh{M}:\xi^{(1)}(x) + p+ \xi^{(d-m)}(y)\text{ is a direct sum}\right\}.\] Notice that $\Oc$ is open and $\Gamma$-invariant. Also, recall that $\Gamma$ acts co-compactly on $\widetilde{U\Gamma}$, the flow space associated to $\Gamma$ described in Section~\ref{sec:flowspace}. Hence, there exists a compact set $K \subset \widetilde{U\Gamma}$ such that $\Gamma \cdot K = \widetilde{U\Gamma}$. Then define \begin{align*} 0 < \epsilon := \min\{ d_{\Pb}(\xi^{(1)}(v^+), \xi^{(1)}(v^-)) : v \in K\}, \end{align*} where $d_{\Pb}$ is a distance on $\Pb(\Rb^d)$ induced by a Riemannian metric. Given two proper subspaces $V, W \subset \Rb^d$ define \begin{align*} d(V,W):= \min\{ \norm{v-w}_2 : v \in V, w \in W, \norm{v}_2=\norm{w}_2=1\}. \end{align*} Note that $\{(x,y)\in\partial_\infty\Gamma^2:d_{\Pb}(\xi^{(1)}(x), \xi^{(1)}(y)) \geq \epsilon\}$ is compact. Since $\xi^{(m)}(x) + \xi^{(d-m)}(y)=\Rb^d$ when $x \neq y$, this implies that there exists $\theta_0 > 0$ with the following property: if $x,y \in \partial_\infty \Gamma$ and $d_{\Pb}(\xi^{(1)}(x), \xi^{(1)}(y)) \geq \epsilon$, then \[d(\xi^{(m)}(x),\xi^{(d-m)}(y))\geq\theta_0.\] Also, by hypothesis, if $\gamma \in \Gamma$ has infinite order and $\gamma^+\in \partial_\infty \Gamma$ is the attracting fixed point of $\gamma$, then \begin{align*} \xi^{(m)}(\gamma^+) = T_{\xi^{(1)}(x)} M. \end{align*} So by the continuity of $\xi^{(m)}$ and the density of $\{ \gamma^+: \gamma \in \Gamma \text{ has infinite order}\}$ in $\partial_\infty \Gamma$ we see that $\xi^{(m)}(x) = T_{\xi^{(1)}(x)} M$ for all $x \in \partial_\infty \Gamma$. Thus, the compactness of $M$ implies that there exists $\delta > 0$ with the following property: if $\xi^{(1)}(x),p \in M$ and $d_{\Pb}(\xi^{(1)}(x), p) \leq \delta$, then \begin{align*} d\left( \xi^{(1)}(x)+p,\xi^{(m)}(x) \right) < \theta_0/2. \end{align*} Using this, define \[\Uc :=\left\{(\xi^{(1)}(x),p,\xi^{(1)}(y))\in \wh{M}:d_{\Pb}(\xi^{(1)}(x),\xi^{(1)}(y)) \geq \epsilon\text{ and } d_{\Pb}(\xi^{(1)}(x), p) \leq \delta\right\}.\] We claim that $\Uc\subset \Oc$. Indeed, by the definition of $\theta_0$ and $\delta$, if $(\xi^{(1)}(x),p,\xi^{(1)}(y)) \in \Uc$ then \begin{align*} d\left( \xi^{(1)}(x)+p,\xi^{(d-m)}(x) \right) > \theta_0/2. \end{align*} This implies that $\xi^{(1)}(x) +p+ \xi^{(d-m)}(y)$ is direct, so $(\xi^{(1)}(x),p,\xi^{(1)}(y)) \in \Oc$. Next, let $P(M) \subset \widetilde{U\Gamma} \times M$ be the set defined by \eqref{eqn:P(M)}, and recall that $\phi_t$ denotes the geodesic flow on $\widetilde{U\Gamma}$. Note that there exists $T \geq 0$ such that if $v \in K$, $t \geq T$, and $(\phi_t(v),p) \in P(M)$, then $(\xi^{(1)}(v^+),p,\xi^{(1)}(v^-)) \in \Uc\subset \Oc$. Now, choose any $(\xi^{(1)}(x),p,\xi^{(1)}(y)) \in \wh{M}$. From the definition of $P(M)$, there exists $v \in \widetilde{U\Gamma}$ such that $v^+ = x$, $v^- = y$, and $(v,p) \in P(M)$. Further, there exists $\gamma \in \Gamma$ such that $w := \gamma \cdot\phi_{-T}(v) \in K$. Since the $\Gamma$-action on $\wt{U\Gamma}$ commutes with the geodesic flow, \begin{align*} (\phi_T(w),\rho(\gamma)\cdot p) = \gamma\cdot ( v, p) \in P(M) \end{align*} and so $\gamma\cdot (\xi^{(1)}(x),p,\xi^{(1)}(y))=(\xi^{(1)}(w^+),\rho(\gamma)\cdot p,\xi^{(1)}(w^-))\in \Oc$, which means $(\xi^{(1)}(x),p,\xi^{(1)}(y)) \in \Oc$. Thus, $\Oc =\wh{M}$. \end{proof} \section{Necessary conditions for differentiability of $1$-dimensional $\rho$-controlled subsets}\label{sec:nec_surface} In this section we prove Theorem~\ref{thm:nec_surface}. Again by Example \ref{eg:limitset}, it is sufficient to prove the following theorem. \begin{theorem} \label{thm:nec_surface_body} Suppose $\Gamma$ is a hyperbolic group and $\rho: \Gamma \rightarrow \PGL_{d}(\Rb)$ is an irreducible $1$-Anosov representation. Also, suppose that $M$ is a $\rho$-controlled, topological circle. If \begin{enumerate} \item[($\ddagger$)] $M$ is $C^\alpha$ along $\xi^{(1)}(\partial_\infty\Gamma)$ for some $\alpha>1$, \end{enumerate} then \begin{enumerate} \item[($\dagger$')] $\rho$ is $m$-Anosov and $\xi^{(1)}(x) + p + \xi^{(d-m)}(y)$ is a direct sum for all pairwise distinct $\xi^{(1)}(x),p,\xi^{(1)}(y) \in M$. \end{enumerate} \end{theorem} Before proving Theorem \ref{thm:nec_surface_body}, we give an example to demonstrate that the irreducibility of $\rho$ is a necessary hypothesis in Theorem \ref{thm:nec_surface_body} (and also in Theorem~\ref{thm:nec_surface}) to hold. \subsection{Irreducibility of $\rho$}\label{sec:rhoirred} For $d \in \Nb$, let $\overline{\tau}_d : \GL_2(\Rb) \rightarrow \GL_d(\Rb)$ be the standard irreducible representation, which is constructed as follows. First, identify $\Rb^d$ with the space of homogeneous degree $d-1$ polynomials in two variables with real coefficients by \[(a_1,\dots,a_d)\mapsto \sum_{i=1}^{d}a_iX^{d-i}Y^{i-1}.\] Using this, we may define an $\GL_2(\Rb)$-action on $\Rb^d$ by \begin{align*} \left(\begin{array}{cc} a&b\\ c&d \end{array}\right) \cdot P(X,Y) = P\left( \left(\begin{array}{cc} a&b\\ c&d \end{array}\right)^{-1}\cdot(X,Y)\right). \end{align*} It is easy to check that this $\GL_2(\Rb)$-action is linear. Thus, it has an associated linear representation $\overline{\tau}_d : \GL_2(\Rb) \rightarrow \GL_d(\Rb)$, which descends to a representation $\tau_d:\PGL_2(\Rb)\to\PGL_d(\Rb)$. One can verify that if $\lambda, \lambda^{-1}$ are the absolute value of the eigenvalues of $\overline{g} \in \SL_2^\pm(\Rb)$, then \begin{align} \label{eq:eigenvalues_std_repn} \lambda^{d-1}, \lambda^{d-3}, \dots, \lambda^{-(d-1)} \end{align} are the absolute values of the eigenvalues of $\overline{\tau}_d(\overline{g})$. Further, if $B_k\subset\GL_k(\Rb)$ denotes the subgroup of upper triangular matrices, then $\overline{\tau}_d(B_2)\subset B_d$. In particular, $\overline{\tau}_d$ induces a smooth map \[\Psi_d:\Pb(\Rb^2)\simeq\PGL_2(\Rb)/B_2\to\PGL_d(\Rb)/B_d.\] Since $\GL_d(\Rb)/B_d$ is the space of complete flags in $\Rb^d$, there is an obvious smooth projection $p_m:\GL_d(\Rb)/B_d\to\Gr_m(\Rb^d)$ for each $m=1,\dots,d-1$. Using this, define $\Psi_{d,m}:=p_m\circ\Psi_d:\Pb(\Rb^2)\to\Gr_m(\Rb^d)$. It is clear that $\Psi_{d,m}$ is $\overline{\tau}_d$-equivariant. Next, observe that the subgroup $\Pb(\GL_d(\Rb)\times\GL_{d+2}(\Rb))\subset\PGL_{2d+2}(\Rb)$ preserves both the subspaces $\Pb(\Rb^d)$ and $\Pb(\Rb^{d+2})$ of $\Pb(\Rb^{2d+2})$ induced respectively by the obvious inclusions of $\Rb^d\simeq\Rb^d\oplus\{0\}$ and $\Rb^{d+2}\simeq\{0\}\oplus\Rb^{d+2}$ into $\Rb^d\oplus\Rb^{d+2}\simeq\Rb^{2d+2}$. Similarly, the subspaces $\Gr_{d-1}(\Rb^d)$ and $\Gr_{d+1}(\Rb^{d+2})$ of $\Gr_{2d+1}(\Rb^{2d+2})$ that are respectively defined by the inclusions $V\mapsto V\oplus\Rb^{d+2}$ and $U\mapsto \Rb^d \oplus U$ are $\Pb(\GL_d(\Rb)\times\GL_{d+2}(\Rb))$-invariant. In particular, the representation \[\overline{\tau}_d\oplus\overline{\tau}_{d+2}:\GL_2(\Rb)\to\GL_d(\Rb)\times\GL_{d+2}(\Rb)\subset\GL_{2d+2}(\Rb)\] defines the representation $\tau_d\oplus\tau_{d+2}:\PGL_2(\Rb)\to\PGL_{2d+2}(\Rb)$ by projectivizing, and the maps \begin{align*} &\Psi_{d,1}:\Pb(\Rb^2)\to\Pb(\Rb^d)\subset\Pb(\Rb^{2d+2}),\\ &\Psi_{d+2,1}:\Pb(\Rb^2)\to\Pb(\Rb^{d+2})\subset\Pb(\Rb^{2d+2}),\\ &\Psi_{d,d-1}:\Pb(\Rb^2)\to\Gr_{d-1}(\Rb^d)\subset\Gr_{2d+1}(\Rb^{2d+2}),\\ &\Psi_{d+2,d+1}:\Pb(\Rb^2)\to\Gr_{d+1}(\Rb^{d+2})\subset\Gr_{2d+1}(\Rb^{2d+2}). \end{align*} are smooth and $\tau_d\oplus\tau_{d+2}$-equivariant. One can check that $(\Psi_{d,1},\Psi_{d,d-1})$ and $(\Psi_{d+2,1},\Psi_{d+2,d+1})$ are transverse pairs of maps. It also follows from \eqref{eq:eigenvalues_std_repn} that for any $g\in\PGL_2(\Rb)$, $\tau_d\oplus\tau_{d+2}(g)$ is proximal. However, the attracting eigenline and repelling hyperplane of $\tau_d\oplus\tau_{d+2}(g)$ lies in the image of $\Psi_{d+2,1}$ and $\Psi_{d+2,d+1}$ respectively, so only the pair of maps $(\Psi_{d+2,1},\Psi_{d+2,d+1})$ is dynamics preserving. \begin{example}\label{ex:surface_bad_example} Fix a co-compact lattice $\Gamma \leq \PGL_2(\Rb)$. The inclusion of $\Gamma$ into $\PGL_2(\Rb)$ induces an identification $\partial_\infty\Gamma\simeq\Pb(\Rb^2)$, and thus equips $\partial_\infty\Gamma$ with the structure of a smooth manifold. Consider the representation \[\rho :=\tau_d \oplus \tau_{d+2}|_{\Gamma}:\Gamma\to\PGL(\Rb^d\oplus\Rb^{d+2}).\] By the discussion above, \[\Psi_{d+2,1}:\partial_\infty\Gamma\to\Pb(\Rb^{2d+2})\,\,\,\text{ and }\,\,\,\Psi_{d+2,d+1}:\partial_\infty\Gamma\to\Gr_{2d+1}(\Rb^{2d+2})\] is a pair of smooth, dynamics preserving, $\rho$-equivariant, transverse maps. Thus, one deduces from \eqref{eq:eigenvalues_std_repn} that $\rho$ is $1$-Anosov, but it is not $2$-Anosov because \begin{align*} \frac{\lambda_2}{\lambda_3}(\rho(\gamma)) =1 \end{align*} for any $\gamma \in \Gamma$. However, since $\Psi_{d+2,1}$ is a smooth map, the $1$-limit set of $\rho$ is a $1$-dimensional, $C^{\infty}$-submanifold of $\Pb(\Rb^{2d+2})$. This shows that the conclusion of Theorem \ref{thm:nec_surface_body} does not hold if we do not assume the irreducibility hypothesis of Theorem \ref{thm:nec_surface_body}. \end{example} \subsection{Proof of Theorem \ref{thm:nec_surface_body}} Lemma \ref{lem:surface1} and Lemma \ref{lem:surface2} stated below are respectively the analogs of Lemma \ref{lem:proximal} and Proposition \ref{prop:eigenvalue_est} in the case when $M$ is a $1$-dimensional topological manifold. With these two lemmas, we can replicate the proof of Theorem \ref{thm:nec_general_body} to prove Theorem~\ref{thm:nec_surface_body} \begin{remark} In Lemma \ref{lem:proximal} and Proposition \ref{prop:eigenvalue_est}, we assumed that $\bigwedge^2\rho$ is irreducible, but in Lemma \ref{lem:surface1} and Lemma \ref{lem:surface2} we assume that $\rho$ is irreducible. \end{remark} \begin{lemma} \label{lem:surface1} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is a $1$-Anosov representation. Also, suppose that $M$ is $\rho$-controlled, topological circle that is $C^{\alpha}$ along the $1$-limit set of $\rho$ for some $\alpha>1$, and let $\Phi:M\to\Pb\left(\bigwedge^2\Rb^d\right)$ be as defined in \eqref{eqn:Phi}. If $\gamma \in \Gamma$ has infinite order, then $\bigwedge^{2} \rho(\gamma)$ is proximal and $\Phi\left(\xi^{(1)}(\gamma^+)\right)\in\Pb\left(\bigwedge^2\Rb^d\right)$ is the attracting fixed point of $\bigwedge^{2} \rho(\gamma)$. \end{lemma} \begin{proof} Define $\overline{\Psi}: \xi^{(1)}(\partial_\infty\Gamma)\times \xi^{(1)}(\partial_\infty\Gamma)\rightarrow \Gr_2(\Rb^d)$ by letting $\overline{\Psi}(p,q)$ be the projective line containing $p,q$ when $p \neq q$ and letting $\overline{\Psi}(p,p)$ be the projective line tangent to $M$ at $p$. Then define \[\Psi:=F_{d,2}\circ\overline{\Psi}: \xi^{(1)}(\partial_\infty\Gamma)\times \xi^{(1)}(\partial_\infty\Gamma)\to\Pb\left(\bigwedge^2\Rb^d\right),\] where $F_{d,2}$ is defined by \eqref{eqn:Fdm}. Observe that $\Psi$ is continuous and $\Phi(p)=\Psi(p,p)$ for all $p\in \xi^{(1)}(\partial_\infty\Gamma)$. Fix distances $d_1$ on $\Pb(\Rb^d)$ and $d_2$ on $\Pb\left(\bigwedge^2\Rb^d\right)$ that are induced by Riemannian metrics. Since $M$ is $C^{\alpha}$ along the $1$-limit set of $\rho$ for some $\alpha>1$, there exists $C > 0$ such that \begin{align}\label{eqn:m=2} d_2\Big( \Psi(p,p), \Psi(p, q) \Big) \leq Cd_1(p, q)^{\alpha-1} \end{align} for all $p,q \in \xi^{(1)}(\partial_\infty\Gamma)$. Also, since $\rho$ is irreducible, the elements of $ \xi^{(1)}(\partial_\infty\Gamma)$ span $\Rb^d$, so \begin{align*} \Psi\left( \xi^{(1)}(\partial_\infty\Gamma) \times \xi^{(1)}(\partial_\infty\Gamma)\right) \end{align*} spans $\bigwedge^2 \Rb^d$. Now the rest of the proof closely follows the proof of Lemma \ref{lem:proximal}, but we use $\Psi(\xi^{(1)}(\gamma^+),\xi^{(1)}(\gamma^+))$ in place of $\Phi(\xi^{(1)}(\gamma^+))$ and $\Psi(\xi^{(1)}(x),\xi^{(1)}(\gamma^+))$ in place of $\Phi(\xi^{(1)}(x))$. \end{proof} \begin{remark} In the case when $M$ is a topological $(m-1)$-dimensional manifold with $m>2$, it is not true that $\xi^{(1)}(x_1)+\dots+\xi^{(1)}(x_m)$ converges to $\xi^{(m)}(x)$ as $x_i\to x$, so the direct analog of \eqref{eqn:m=2} cannot hold. As such, we need the additional assumption that $\bigwedge^m\rho(\gamma)$ is irreducible in Theorem \ref{thm:nec_general_body}. \end{remark} \begin{lemma}\label{lem:surface2} Suppose that $\rho:\Gamma\to\PGL_d(\Rb)$ is a $1$-Anosov representation. Also, suppose that $M$ is $\rho$-controlled, topological circle that is $C^{\alpha}$ along the $1$-limit set of $\rho$ for some $\alpha>1$. If $\gamma \in \Gamma$, then \begin{align*} \frac{ \lambda_{m+1}}{\lambda_m}(\rho(\gamma)) \leq \left(\frac{ \lambda_{2}}{\lambda_1}(\rho(\gamma)) \right)^{\alpha-1}. \end{align*} \end{lemma} \begin{proof} Use the same argument as we did in the proof of Proposition~\ref{prop:eigenvalue_est}, but with $\Psi(\xi^{(1)}(\gamma^+),\xi^{(1)}(\gamma^+))$ and $\Psi(\xi^{(1)}(x),\xi^{(1)}(\gamma^+))$ in place of $\Phi(\xi^{(1)}(\gamma^+))$ and $\Phi(\xi^{(1)}(x))$ respectively, and Lemma \ref{lem:surface1} in place of Lemma \ref{lem:proximal}. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:nec_surface}] Use the same proof as Theorem~\ref{thm:nec_general}, but replace Lemma \ref{lem:proximal} and Proposition \ref{prop:eigenvalue_est} by Lemma \ref{lem:surface1} and Lemma \ref{lem:surface2} respectively. \end{proof} \section{$\PGL_d(\Rb)$-Hitchin representations}\label{sec:Hitchin} In this section, let $\Gamma:=\pi_1(\Sigma)$, where $\Sigma$ is a closed, orientable, connected hyperbolic surface of genus at least $2$. \begin{definition}\label{defn:hitchin_reps} A \emph{$\PGL_d(\Rb)$-Hitchin representation} is a continuous deformation (in $\Hom(\Gamma,\PGL_d(\Rb))$) of $\tau_d\circ j$, where $j:\Gamma\to\PGL_2(\Rb)$ is a Fuchsian representation, and $\tau_d:\PGL_2(\Rb)\to\PGL_d(\Rb)$ is the representation defined in Section \ref{sec:rhoirred}. \end{definition} The goal of this section is to show that if $\rho$ is a $\PGL_d(\Rb)$-Hitchin representation, then for all $k=1,\dots,d-1$, $\bigwedge^k\rho:\Gamma\to\PGL(\bigwedge^k\Rb^d)$ satisfies the hypothesis of Theorem \ref{thm:main} (see Example \ref{cor:hitchin}). The following proposition is a straightforward consequence of Labourie's deep work on the Hitchin component~\cite{L2006} and has also been observed by Pozzetti-Sambarino-Wienhard \cite{PSW18}. \begin{proposition}\label{prop:3_hyperconvex_exterior_prod} Let $\rho$ be a $\PGL_d(\Rb)$-Hitchin representation and $D := \dim \left(\bigwedge^k \Rb^d\right)$. If $k \in \{1,\dots, d-1\}$, then $\bigwedge^k \rho : \Gamma \rightarrow \PGL\left(\bigwedge^k \Rb^d\right)$ is $(1,2)$-Anosov, and its $1$-flag map $\zeta^{(1)}$ and $(D-2)$-flag map $\zeta^{(D-2)}$ satisfy the property that \begin{align*} \zeta^{(1)}(x)+\zeta^{(1)}(y) + \zeta^{(D-2)}(z), \end{align*} is a direct sum for all $x,y,z \in \partial_\infty \Gamma$ distinct. \end{proposition} For the rest of the section fix some $\PGL_d(\Rb)$-Hitchin representation $\rho$ and some finite generating set $S$ of $\Gamma$. \subsection{Preliminaries}\label{sec:prelim_Hitchin} Before proving the proposition, we recall some results of Labourie. By Theorem 4.1 and Proposition 3.2 in~\cite{L2006}, \begin{enumerate} \item\label{item:hitchin1} $\rho$ is $k$-Anosov for every $1 \leq k \leq d$. Denote the $k$-flag map of $\rho$ by $\xi^{(k)}$. \item\label{item:hitchin2} If $x,y,z \in \partial_\infty \Gamma$ are distinct, $k_1,k_2,k_3 \geq 0$, and $k_1+k_2+k_3 =d$, then \begin{align*} \xi^{(k_1)}(x) + \xi^{(k_2)}(y) + \xi^{(k_3)}(z) = \Rb^d \end{align*} is a direct sum. \item\label{item:hitchin3} If $x,y,z \in \partial_\infty \Gamma$ are distinct and $0\leq k < d-2$, then \begin{align*} \xi^{(k+1)}(y) + \xi^{(d-k-2)}(x) + \Big(\xi^{(k+1)}(z) \cap \xi^{(d-k)}(x) \Big)= \Rb^d \end{align*} is a direct sum. \item $\rho$ admits a lift $\overline{\rho}:\Gamma\to\GL_d(\Rb)$ whose image lies in $\SL_d(\Rb)$. \item\label{item:hitchin4} If $\gamma \in \Gamma \setminus \{1\}$, then the absolute values of the eigenvalues of $\overline{\rho}(\gamma)$ satisfy \begin{align*} \lambda_1(\overline{\rho}(\gamma)) > \dots > \lambda_d(\overline{\rho}(\gamma)). \end{align*} \item\label{item:hitchin5} If $\gamma \in \Gamma \setminus \{1\}$, then $ \xi^{(k)}(\gamma^+)$ is the span of the eigenspaces of $\rho(\gamma)$ corresponding to the eigenvalues \begin{align*} \lambda_1(\overline{\rho}(\gamma)), \dots, \lambda_k(\overline{\rho}(\gamma)). \end{align*} \end{enumerate} \subsection{Proof of Proposition~\ref{prop:3_hyperconvex_exterior_prod}} Since $\rho$ is $k$-Anosov, Theorem~\ref{thm:SV_char_of_Anosov} implies that there exists $C,c>0$ such that \begin{align*} \log \frac{\mu_k}{\mu_{k+1}}(\rho(\gamma)) \geq C d_S(1,\gamma) -c \end{align*} for all $\gamma \in \Gamma$ and $1 \leq k \leq d$. \begin{lemma} $\bigwedge^k \rho$ is $(1,2)$-Anosov. \end{lemma} \begin{proof} By Theorem~\ref{thm:SV_char_of_Anosov} it is enough to prove that there exists $A,a>0$ such that \begin{align*} \log \frac{\mu_1}{\mu_{2}}\left(\bigwedge^k\rho(\gamma)\right) \geq A d_S(1,\gamma) -a \end{align*} and \begin{align*} \log \frac{\mu_2}{\mu_{3}}\left(\bigwedge^k \rho(\gamma)\right) \geq A d_S(1,\gamma) -a \end{align*} for all $\gamma \in \Gamma$. Fix $\gamma \in \Gamma$ and let $\overline{g} \in \SL_d(\Rb)$ be a lift of $\rho(\gamma)$. Then let \begin{align*} \sigma_1 \geq \dots \geq \sigma_d \end{align*} denote the singular values of $\overline{g}$ (in the Euclidean norm on $\Rb^d$), and let \begin{align*} \chi_1 \geq \dots \geq \chi_D \end{align*} denote the singular values of $\bigwedge^k \overline{g}$ (in the induced norm on $\bigwedge^k\Rb^d$). Recall, that Equation~\eqref{eqn:0evalue1} says that \begin{align*} \chi_1 = \sigma_1 \cdots \sigma_k \text{ and } \chi_2 = \sigma_1 \cdots \sigma_{k-1} \sigma_{k+1}. \end{align*} Hence \begin{align*} \log \frac{\mu_1}{\mu_{2}}\left(\bigwedge^k\rho(\gamma)\right) = \log \frac{\chi_1}{\chi_2} = \log \frac{\sigma_k}{\sigma_{k+1}} \geq C d_S(1,\gamma) -c. \end{align*} To verify the other inequality, pick $1 \leq i_1 < \dots <i_k \leq d$ such that \begin{align*} \chi_3 = \sigma_{i_1} \cdots \sigma_{i_k}. \end{align*} We consider two cases based on the value of $i_{k-1}$. \noindent \textbf{Case 1:} Suppose $i_{k-1} = k-1$. Then $i_j = j$ for $j \leq k-1$ and $i_k \geq k$. Since \begin{align*} (i_1,\dots, i_k) \notin \{ (1,\dots, k), (1,\dots,k-1,k+1)\} \end{align*} we must have $i_k \geq k+2$. So \begin{align*} \log \frac{\chi_2}{\chi_3} = \log \left(\frac{\sigma_1} {\sigma_{i_1}}\cdots \frac{\sigma_{k-1}}{\sigma_{i_{k-1}}} \frac{\sigma_{k+1}}{\sigma_{i_{k}}}\right) = \log \frac{\sigma_{k+1}}{\sigma_{i_k}} \geq \log \frac{\sigma_{k+1}}{\sigma_{k+2}} \geq C \ell_S(\gamma)-c. \end{align*} \noindent \textbf{Case 2:} Suppose $i_{k-1} \geq k$. Then $i_k \geq k+1$ and $i_j \geq j$ for all $j$ so \begin{align*} \log \frac{\chi_2}{\chi_3} = \log \left(\frac{\sigma_1}{\sigma_{i_1}} \cdots \frac{\sigma_{k-2}}{\sigma_{i_{k-2}}} \frac{\sigma_{k-1}}{\sigma_{i_{k-1}}} \frac{\sigma_{k+1}}{\sigma_{i_{k}}}\right) \geq \log \frac{\sigma_{k-1}}{\sigma_{i_{k-1}}} \geq \log \frac{\sigma_{k-1}}{\sigma_{k}} \geq C \ell_S(\gamma)-c. \end{align*} In either case \begin{align*} \log \frac{\mu_2}{\mu_{3}}\left(\bigwedge^k \rho(\gamma)\right)=\log \frac{\chi_2}{\chi_3} \geq C \ell_S(\gamma) -c. \end{align*} Then since $\gamma \in \Gamma$ was arbitrary, we see that $\bigwedge^k \rho$ is $(1,2)$-Anosov. \end{proof} Given subspaces $V_1,\dots,V_k\subset\Rb^d$, we will let $V_1\wedge\dots\wedge V_k$ denote the subspace of $\bigwedge^k\Rb^d$ that is spanned by $\{X_1\wedge\dots\wedge X_k:X_i\in V_i\}$. For $\ell\in\{1,2,D-2, D-1\}$ define maps \begin{align*} \zeta^{(\ell)} : \partial_\infty \Gamma \rightarrow \Gr_\ell\left(\bigwedge^k \Rb^d\right) \end{align*} by \begin{align*} \zeta^{(1)}(x) = \bigwedge^k \xi^{(k)}(x), \end{align*} \begin{align*} \zeta^{(2)}(x) =\left( \bigwedge^{k-1} \xi^{(k-1)}(x) \right) \wedge \xi^{(k+1)}(x), \end{align*} \begin{align*} \zeta^{(D-2)}(x) =\xi^{(d-k-1)}(x) \wedge \left( \bigwedge^{k-1} \Rb^d \right) + \xi^{(d-k)}(x) \wedge \xi^{(d-k+1)}(x) \wedge \left( \bigwedge^{k-2} \Rb^d \right), \end{align*} \begin{align*} \zeta^{(D-1)}(x) = \xi^{(d-k)}(x) \wedge \left(\bigwedge^{k-1} \Rb^d \right). \end{align*} These maps are clearly continuous and $\bigwedge^k \rho$-equivariant. \begin{lemma} $\zeta^{(1)}, \zeta^{(2)}, \zeta^{(D-2)}, \zeta^{(D-1)}$ are the flag maps of $\bigwedge^k \rho$. \end{lemma} \begin{proof} By the density of attracting fixed points in $\partial_\infty \Gamma$ and the continuity of the maps, it is enough to verify that $\zeta^{(j)}(\gamma^+)$ is the attracting fixed point of $\bigwedge^k \rho(\gamma)$ in $\Gr_j(\bigwedge^k \Rb^d)$ when $\gamma^+ \in \partial_\infty \Gamma$ is the attracting fixed point of $\gamma \in \Gamma$. By Property~\eqref{item:hitchin5} in Section~\ref{sec:prelim_Hitchin}, there exists a basis $v_1, \dots, v_d$ of $\Rb^d$ of eigenvectors of $\rho(\gamma)$ such that \begin{align*} \xi^{(j)}(\gamma^+) = \Span\{ v_1,\dots, v_j\} \text{ for } j=1,\dots, d. \end{align*} Let $I_1 = \{ d-k+1, d-k+2, \dots, d\}$ and $I_2 = \{ d-k, d-k+2, d-k+3,\dots, d\}$. Then a calculation shows that \begin{align*} \zeta^{(1)}(\gamma^+) = \left[ v_1 \wedge \dots \wedge v_k\right], \end{align*} \begin{align*} \zeta^{(2)}(\gamma^+) = \left\{ v_1 \wedge \dots \wedge v_{k-1} \wedge (av_k+bv_{k+1}) : a,b \in \Rb\right\}, \end{align*} \begin{align*} \zeta^{(D-2)}(\gamma^+) = \Span\left\{ v_{i_1} \wedge \dots \wedge v_{i_k} : \{ i_1, \dots, i_k\} \notin \{ I_1, I_2\} \right\}, \end{align*} \begin{align*} \zeta^{(D-1)}(\gamma^+) = \Span\left\{ v_{i_1} \wedge \dots \wedge v_{i_k} : \{ i_1, \dots, i_k\} \neq I_1 \right\}. \end{align*} So $\zeta^{(j)}(\gamma^+)$ is the attracting fixed point of $\bigwedge^k \rho(\gamma)$ in $\Gr_j(\bigwedge^k \Rb^d)$. \end{proof} \begin{lemma} $\zeta^{(1)}(x) + \zeta^{(1)}(y) + \zeta^{(D-2)}(z)$ is a direct sum for all $x,y,z \in \partial_\infty \Gamma$ distinct. \end{lemma} \begin{proof} Fix $x,y,z \in \partial_\infty \Gamma$ distinct, and choose a basis $v_1,\dots, v_d \in \Rb^d$ such that \begin{align*} [v_\ell] = \xi^{(\ell)}(x) \cap \xi^{(d-\ell+1)}(y) \end{align*} for $1 \leq \ell \leq d$. Next pick $u_1, \dots, u_k \in \Rb^d$ such that \begin{align*} \xi^{(k)}(z) = \Span\{ u_1,\dots, u_k\}. \end{align*} Then $\zeta^{(1)}(z) = [ u_1 \wedge \dots \wedge u_k]$. If $I = \{ 1,\dots, k-1, k+1\}$, then a computation shows that \begin{align*} \zeta^{(1)}(x) + \zeta^{(D-2)}(y) = \Span\left\{ v_{i_1} \wedge \dots \wedge v_{i_k} : \{ i_1, \dots, i_k\} \neq I \right\}. \end{align*} Since \begin{align*} \xi^{(k)}(z) + \left( \xi^{(k)}(x) \cap \xi^{(d-k+1)}(y) \right) + \xi^{(d-k-1)}(y) = \Rb^d \end{align*} is a direct sum and \begin{align*} \left( \xi^{(k)}(x) \cap \xi^{(d-k+1)}(y) \right) + \xi^{(d-k-1)}(y) = \Span \{ v_k, v_{k+2}, \dots, v_d\} \end{align*} we see that \begin{align*} (u_1 \wedge \dots \wedge u_k) \wedge (v_k \wedge v_{k+2} \wedge \dots \wedge v_d) \neq 0. \end{align*} This implies that \begin{equation*} \zeta^{(1)}(x) +\zeta^{(1)}(y) + \zeta^{(D-2)}(z) = \bigwedge^k \Rb^d.\qedhere \end{equation*} \end{proof} \section{Real hyperbolic lattices}\label{sec:real_hyp_lattices} The goal of this section is to justify Example \ref{cor:hyperbolic_lattices}. More precisely, we need to prove the following proposition. \begin{proposition}\label{thm:hyperbolic} Suppose $\tau: \PO(m,1) \rightarrow \PGL_d(\Rb)$ is a representation, $\Gamma \leq \PO(m,1)$ is a co-compact lattice, and $\rho = \tau|_{\Gamma} : \Gamma \rightarrow \PGL_d(\Rb)$ is the representation obtained by restricting $\tau$ to $\Gamma$. If $\rho$ is irreducible and $1$-Anosov, then $\rho$ is also $m$-Anosov and \begin{align*} \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z) \end{align*} is a direct sum for all $x,y,z \in \partial_\infty \Gamma$ distinct. Thus, the same is true for any small deformation of $\rho$. \end{proposition} Let $\PO(m,1)\subset\PGL_{m+1}(\Cb)$ be the subgroup that leaves invariant the bilinear pairing that is represented in the standard basis of $\Rb^{m+1}$ by the matrix \[\left( \begin{array}{ccccc} 1&0&\dots&0&0\\ 0&1&\dots&0&0\\ \vdots&\vdots&\ddots&\vdots&\vdots\\ 0&0&\dots&1&0\\ 0&0&\dots&0&-1 \end{array}\right).\] \subsection{Preliminaries} Consider the unit ball $\Bb_m \subset \Rb^m$ endowed with the metric \begin{align*} d(x,y) = \frac{1}{2}\log \frac{\norm{y-a}_2\norm{x-b}_2}{\norm{x-a}_2\norm{y-b}_2} \end{align*} where $a,b \in \partial \Bb_m \cap ( x+\Rb(y-x) )$ ordered $a,x,y,b$, and $\norm{\cdot}_2$ is the standard Euclidean norm on $\Rb^m$. The metric space $(\Bb_m, d)$ is usually known as the \emph{Klein-Beltrami model} of real hyperbolic $m$-space. Further, $\PO(m,1)$ acts transitively and by isometries on $(\Bb_m, d)$ via fractional linear transformations, that is \begin{align*} \begin{bmatrix} A & u \\ {^tv} & a \end{bmatrix} \cdot x = \frac{ Ax + u}{{^tv}x + a}. \end{align*} Using the formula for the distance, one can compute that $d(e^{sH} \cdot 0, 0) = s$, where \[H:= \begin{bmatrix} 0 & e_1 \\ {^t e_1} & 0 \end{bmatrix}.\] In fact, one can verify that the map $\gamma_0:\Rb\to\Bb_m$ given by \[\gamma_0:s\mapsto\tanh(s)e_1=e^{sH}\cdot 0\] is a unit-speed geodesic in $\Bb_m$ with $-e_1$ and $e_1$ as its backward and forward endpoints respectively. A computation also verifies that $K:=\{g\in\PO(m,1):g\cdot 0=0\}$ is given by \begin{align}\label{eqn:K} K = \left\{ \begin{bmatrix} A & 0 \\ 0 & \sigma \end{bmatrix}: \sigma \in \{-1,1\}, \ A \in {\rm O}(m)\right\}. \end{align} In particular, $K$ acts transitively on the set of unit vectors in $T_0\Bb_m$. Since $\PO(m,1)$ acts transitively on $\Bb_m$, this implies that $\PO(m,1)$ acts transitively on the unit tangent bundle of $\Bb_m$. Also, if $p\in\Bb_m$, then $d(0,p)=d(0,k\cdot p)$ for all $k\in K$. This, together with the $KAK$-decomposition theorem \cite[Theorem 7.39]{knapp}, implies the following observation. \begin{observation}\label{obs:KAK} If $g \in \PO(m,1)$, then there exists $k_1, k_2 \in K$ such that \begin{align*} g = k_1 e^{d(g \cdot 0, 0)H} k_2. \end{align*} \end{observation} Recall that an element $g \in \PO(m,1)$ is called \emph{hyperbolic} if there exists some geodesic $\gamma: \Rb \rightarrow \Bb_m$ and some $\ell(g) > 0$ such that \begin{align*} g(\gamma(t)) = \gamma(t+\ell(g)) \end{align*} for all $t \in \Rb$. The number $\ell(g)$ is called the \emph{translation length of $g$}. For co-compact lattices in $\PO(m,1)$, we have the following proposition. \begin{proposition} If $\Gamma \leq \PO(m,1)$ is a co-compact lattice and $\gamma \in \Gamma$ has infinite order, then $\gamma$ is a hyperbolic element. \end{proposition} \begin{proof} See for instance~\cite[Chapter 12, Proposition 2.6]{dC1992}. \end{proof} Let $\gamma_0$ be the geodesic as defined above, and let $M$ be the subgroup of $\PO(m,1)$ that fixes the image of the geodesic $\gamma_0$ pointwise, i.e. \[M := \left\{ k \in K: k \cdot e_1 = e_1\right\}.\] \begin{proposition}\label{prop:conj} If $h \in \PO(m,1)$ is hyperbolic, then $h=ge^{\ell(h)H}k g^{-1}$ for some $k \in M$ that commutes with $e^{\ell(h)H}$. \end{proposition} \begin{proof} Since $h$ is hyperbolic, there exists some geodesic $\gamma: \Rb \rightarrow \Bb_m$ such that $h \gamma(t) = \gamma(t+\ell(h))$ for all $t \in \Rb$. Also, $\PO(m,1)$ acts transitively on the unit tangent bundle of $\Bb_m$, there exists $g \in \PO(m,1)$ so that $g\circ\gamma=\gamma_0$. Since $h$ translates along $\gamma$ by $\ell(h)$ and $e^{-\ell(h)H}$ translates along $\gamma_0$ by $-\ell(h)$, we see that $e^{-\ell(h)H}ghg^{-1}=ghg^{-1}e^{-\ell(h)H}$ fixes the image of $\gamma_0$ pointwise, and therefore lies in $M$. Hence, there is some $k\in M$ so that \begin{align*} ghg^{-1}=e^{\ell(h)H}k=ke^{\ell(h)H} \end{align*} for some $k \in M$. \end{proof} \subsection{Proof of Proposition~\ref{thm:hyperbolic}} Let $\tau$, $\rho$, and $\Gamma$ satisfy the hypothesis of Proposition~\ref{thm:hyperbolic}. To prove Proposition \ref{thm:hyperbolic}, we use the following two lemmas. Let $\tau_0 = \tau|_{e^{\Rb \cdot H}}$ and let $\overline{\tau}_0 : e^{\Rb \cdot H} \rightarrow \SL_d(\Rb)$ be the lift of $\tau_0$ (since $\Rb$ is simply connected, such a lift exists). \begin{lemma} \label{lem:prox} $\overline{\tau}_0\left(e^H\right)$ is proximal and the eigenvalue with maximal modulus is a positive real number. \end{lemma} \begin{proof}The group $\tau(M)$ is a compact subgroup of $\PGL_d(\Rb)$, so every element in $\tau(M)$ is elliptic. Now suppose that $\gamma \in \Gamma$ has infinite order. Since $\rho$ is $1$-Anosov, $\tau(\gamma)$ has a representative in $\SL_d^\pm(\Rb)$ whose eigenvalue of maximal absolute value has multiplicity $1$. On the other hand, by Proposition~\ref{prop:conj}, $\gamma$ is conjugate to $k e^{s H}$ for some $s > 0$ and $k \in M$. Then since $\tau(k)$ is elliptic and commutes with $\tau(e^{sH})$, the eigenvalues of $\tau(e^{sH})$ and $\tau(k)\tau(e^{sH}) = \tau(k e^{sH})$ have the same absolute values. So $\tau(e^{sH})$ also has a unique eigenvalue with maximal absolute value. This implies that $\tau(e^{tH})$ is proximal for every $t \geq 0$. Since $\overline{\tau}_0(\id) = \id$ has all positive eigenvalues, we see that the eigenvalue with maximal modulus of $\overline{\tau}_0(e^{tH})$ is positive for all $t \geq 0$. \end{proof} \begin{lemma}\label{lem:2espace} Let $e^\lambda$ denote the eigenvalue of $\overline{\tau}_0\left(e^H\right)$ with maximal modulus. There is some integer $k$ so that the set of eigenvalues of $\overline{\tau}_0\left(e^H\right)$ is \[\{e^{\lambda-n}:0\leq n\leq k\}.\] Furthermore, the eigenspace corresponding to $e^{\lambda-1}$ has dimension $m-1$. \end{lemma} The proof of Lemma \ref{lem:2espace} is a standard argument from the theory of weight spaces. We give this argument in Appendix \ref{app:lem}. With Lemma \ref{lem:prox} and \ref{lem:2espace}, we can prove Proposition \ref{thm:hyperbolic}. \begin{proof}[Proof of Proposition \ref{thm:hyperbolic} ] By Lemma \ref{lem:prox} and \ref{lem:2espace}, the eigenvalues of $\overline{\tau}_0(e^{sH})$ are \begin{align*} e^{\lambda s}, e^{(\lambda-1)s}, \dots, e^{(\lambda-1)s}, e^{(\lambda-2)s}, \dots, \end{align*} and the multiplicity of $e^{(\lambda-1)s}$ is $m-1$. In particular, \begin{align*} \frac{ \mu_{m}}{ \mu_{m+1}}(\tau(e^{sH})) = e^{s}. \end{align*} Also, the group $\tau(K)\subset\PGL_d(\Rb)$ is compact, so it lifts to a compact subgroup $\wh{K} \subset\SL_d^{\pm}(\Rb)$. Hence, there exists some $C > 1$ such that \begin{align*} \frac{1}{C} \mu_i(T) \leq \mu_i\Big(k_1 T k_2\Big) \leq C \mu_i(T) \end{align*} for all $1 \leq i \leq n$, all $k_1, k_2 \in \wh{K}$, and all $T \in \End(\Rb^n)$. By Observation~\ref{obs:KAK}, \begin{align*} \log \frac{ \mu_{m}}{ \mu_{m+1}}(\rho(\gamma)) \geq \log \frac{ \mu_{m}}{ \mu_{m+1}}\left(\tau\left(e^{d(\gamma\cdot 0,0)H}\right)\right)-\log (C^2) =\lambda d(\gamma \cdot 0, 0)-\log (C^2), \end{align*} which implies that $\rho$ is $m$-Anosov. Since $\rho$ is the restriction of $\tau$ to $\Gamma$, the $\rho$-equivariant flag maps \[\xi^{(i)} : \partial\Gamma\simeq\partial \Bb_m \rightarrow \Gr_i(\Rb^d)\] are $\tau$-equivariant for $i=1,d-1,m,d-m$. Further, by the description of $K$ given by \eqref{eqn:K}, we see that $\PO(m,1)$ acts transitively on triples of distinct points $x,y,z \in \partial \Bb_m$. Thus it is enough to show that \begin{align*} \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z) \end{align*} is direct for some $x,y,z \in \partial \Bb_m$ distinct. Fix $y,z \in \partial \Bb_m$ distinct. Then since $\tau$ is irreducible we must have \begin{align*} \Rb^d = \Span \{ \xi^{(1)}(x) : x \in \partial \Bb_m\} \end{align*} and so there exists some $x \in \partial \Bb_m$ such that \begin{align*} \xi^{(1)}(x) + \xi^{(1)}(y) + \xi^{(d-m)}(z) \end{align*} is direct. \end{proof} \appendix \section{Theorem \ref{thm:cones}} \begin{proof} First notice that $\Cc_\lambda(\Lambda)$ is invariant under conjugation in $\SL_d(\Rb)$, i.e. $\Cc_\lambda(\Lambda)=\Cc_\lambda(g\Lambda g^{-1})$ for all $g\in \SL_d(\Rb)$. Further, if $h \in \SL_d(\Rb)$, then from the geometric description of the Cartan projection given in \ref{sec:properties}, there exists some $C > 0$ such that \begin{align*} \norm{\mu(g) - \mu(hgh^{-1}) }_2 \leq C \end{align*} for all $g \in \SL_d(\Rb)$. Hence $\Cc_\mu(\Lambda)$ is also invariant under conjugation in $\SL_d(\Rb)$. Let $\sL_d(\Rb) = \kL + \pL$ denote the standard Cartan decomposition of $\sL_d(\Rb)$, that is \begin{align*} \kL = \{ X \in \sL_d(\Rb) : {^tX} = -X\}\,\,\,\text{ and }\,\,\,\pL = \{ X \in \sL_d(\Rb) : {^tX} = X\}. \end{align*} Let $\gL$ denote the Lie algebra of $G$. Using Theorem 7 in~\cite{M1955} and conjugating $G$ we may assume that \begin{align*} \gL = \kL \cap \gL + \pL \cap \gL \end{align*} is a Cartan decomposition of $\gL$. Fix a maximal abelian subspace $\aL \subset \pL \cap \gL$. By~\cite[Chapter V, Lemma 6.3]{H2001}, there exists some $k \in \SO(d)$ such that $\Ad(k) \aL$ is a subspace of the diagonal matrices in $\sL_d(\Rb)$. Since $\Ad(k) \pL = \pL$, by replacing $G$ with $kGk^{-1}$ we can assume that $\aL$ is itself a subspace of the diagonal matrices. Finally fix a Weyl chamber $\aL^+$ of $\aL$. Next let $K \subset G$ denote the subgroup corresponding to $\kL \cap \gL$, let $A = \exp(\aL)$, and let $A^+ = \exp(\aL^+)$. By~\cite[Chapter IX, Theorem 1.1]{H2001}, each $g \in G$ can be written as \begin{align*} g = k_1 \exp( \mu_G(g) ) k_2 \end{align*} where $k_1, k_2 \in K$ and $\mu_G(g) \in \overline{\aL^+}$ is unique. The map $\mu_G : G \rightarrow \overline{\aL^+}$ is called the \emph{Cartan projection of $G$ relative to the decomposition $G = K \overline{A}^+ K$.} Since $K \subset \SO(d)$ and $\aL$ is a subspace of the diagonal matrices, the diagonal entries of $\mu_G(g)$ coincide with the entries of $\mu(g)$ up to permuting indices. Every $g \in G$ can be written as a product $g=g_e g_h g_u$ of commuting elements, where $g_e$ is elliptic, $g_h$ is hyperbolic, and $g_u$ is unipotent. This is called the \emph{Jordan decomposition of $g$ in $G$}. The element $g_h$ is conjugate to a unique element $\exp(\lambda_G(g)) \in \overline{A^+}$ and the map $\lambda_G : G \rightarrow \overline{\aL^+}$ is called the \emph{Jordan projection}. Since $G$ is an irreducible real algebraic subgroup of $\SL_d(\Rb)$, the Jordan decomposition in $G$ coincides with the standard Jordan decomposition in $\SL_d(\Rb)$. Then, since $\aL$ is a subspace of the diagonal matrices, the diagonal entries of $\lambda_G(g)$ coincide with the entries of $\lambda(g)$ up to permuting indices. Next define cones $\Cc_1, \Cc_2 \subset \aL^+$ as follows: \begin{align*} \Cc_1 := \overline{\bigcup_{\gamma \in \Gamma} \Rb_{>0} \cdot \lambda_G(\gamma)} \end{align*} and \begin{align*} \Cc_2 := \{ x \in \Rb^d : \exists \gamma_n \in \Gamma, \exists t_n \searrow 0, \text{ with } \lim_{n \rightarrow \infty} t_n \mu_G(\gamma_n) =x\}. \end{align*} Then the main result in~\cite{B1997} says that $\Cc_1 = \Cc_2$. Since $\mu_G(g)$ and $\mu(g)$ (respectively $\lambda_G(g)$ and $\lambda(g)$) coincide up to permuting indices, this implies that $\Cc_\mu(\Lambda) = \Cc_\lambda(\Lambda)$. \end{proof} \section{Proof of Lemma \ref{lem:2espace}\label{app:lem} } Let $\mathfrak{so}(m,1)$ denote the Lie algebra of $\PO(m,1)$, and let $e_1,\dots, e_{m+1}$ be the standard basis of $\Rb^{m+1}$. By fixing the signature $(m,1)$-form on $\Rb^{m+1}$ that is represented in this basis by the matrix \begin{align*} \begin{bmatrix} 1& 0 & \cdots & 0 & 0 \\ 0 & 1 & & & 0\\ \vdots & & \ddots & & \vdots\\ 0 & & &1 & 0\\ 0 & 0 & \cdots & 0 & -1 \end{bmatrix}, \end{align*} one can compute that \begin{align*} \mathfrak{so}(m,1) = \left\{ \begin{bmatrix} A & u \\ {^tu} & 0 \end{bmatrix} : {^tA}=-A \right\}. \end{align*} Define vector following subspaces of $ \mathfrak{so}(m,1)$: \begin{align*} \aL & = \left\{ \begin{bmatrix} 0 & \lambda e_1 \\ \lambda {^te_1} & 0 \end{bmatrix} : \lambda \in \Rb \right\}, \\ \gL_0 & = \left\{ \begin{bmatrix} A & \lambda e_1 \\ \lambda {^te_1} & 0 \end{bmatrix} : {^tA}=-A, \quad Ae_1 =0, \text{ and } \lambda \in \Rb \right\}, \\ \gL_{-1} & = \left\{ \begin{bmatrix} -u{^te_1} +e_1 {^tu} & u \\ {^tu} & 0 \end{bmatrix} : \ip{u,e_1} =0\right\}, \text{ and}\\ \gL_{1} & = \left\{ \begin{bmatrix} u{^te_1} - e_1 {^tu} & u \\ {^tu} & 0 \end{bmatrix} : \ip{u,e_1} =0\right\}. \end{align*} Then $\mathfrak a\subset\mathfrak g_0$ is a maximal abelian subalgebra, and the decomposition \[\mathfrak{so}(1,m) = \gL_0 + \gL_{-1} + \gL_{1}\] is the associated (restricted) \emph{root space decomposition} of $\mathfrak{so}(1,m)$. Recall that \[H:= \begin{bmatrix} 0 & e_1 \\ {^t e_1} & 0 \end{bmatrix}\in\PO(m,1).\] The following lemma states some basic properties of the root space decomposition \cite[Chapter II.1]{knapp}, and can be verified explicitly in this special case. \begin{lemma}\label{obs:rootspace}\ \begin{enumerate} \item Let $\sigma\in\{0,1,-1\}$, and $Y \in \gL_\sigma$. Then \[[H,Y]=\sigma Y\,\,\,\text{ and }\,\,\, \Ad \left( e^{s H} \right) Y= e^{\sigma s} Y.\] \item Let $\alpha, \beta \in \{0,-1,1\}$. Then $[\gL_\alpha, \gL_\beta] \subset \gL_{\alpha+\beta}$, where $\gL_{-2}:=\{0\}=:\gL_2$. \end{enumerate} \end{lemma} Next, suppose that $\tau:\PO(m,1)\to\PGL_d(\Rb)$ is an irreducible representation so that $\tau(e^{H})$ is proximal and $\overline{\tau}_0 : e^{\Rb \cdot H} \rightarrow \SL_d(\Rb)$ is the lift of $\tau_0:=\tau|_{e^{\Rb \cdot H}}$. Let $\mathfrak{sl}_d(\Rb)$ denote the Lie algebra of $\PGL_d(\Rb)$ and let $d\tau:\mathfrak{so}(m,1)\to\mathfrak{sl}_d(\Rb)$ be the derivative at the identity of the homomorphism $\tau:\PO(m,1)\to\PGL_d(\Rb)$. The next lemma gives a description of the eigenvalues and eigenspaces of $\overline{\tau}_0(e^{H})$. \begin{lemma}\label{lem:weights} Let $e^\lambda$ denote the largest eigenvalue of $\overline{\tau}_H(e^{H})$ and let $V_0 \subset \Rb^d$ denote the eigenspace of $\overline{\tau}_0(e^{H})$ corresponding to $e^\lambda$. For $n \in \Nb$, define \begin{align*} V_{n+1} := d\tau(\gL_{-1}) V_{n}, \end{align*} \begin{enumerate} \item If $v \in V_n$, then $\overline{\tau}_0\left(e^{H}\right)v = e^{\lambda -n}v$. \item If $Z \in \gL_0$, then $d\tau(Z)V_n \subset V_n$. \item If $Z \in \gL_{1}$, then $d\tau(Z)V_0 = \{0\}$ and $d\tau(Z) V_n \subset V_{n-1}$ when $m>0$. \item $\sum_{n \geq 0} V_n = \Rb^d$. \end{enumerate} \end{lemma} \begin{proof} (1): By definition $v = d\tau(Y)w$ for some $Y \in \gL_{-1}$ and $w \in V_{n-1}$. Then by induction \begin{align*} \overline{\tau}_0\left(e^{H}\right)d\tau(Y)w &= d\tau( \Ad(e^{H})Y )\overline{\tau}_0\left(e^{H}\right)w \\ &= d\tau( e^{-1}Y )\left(e^{\lambda-(n-1)}w\right) \\ & = e^{\lambda-n} d\tau( Y )w, \end{align*} where the second equality is a consequence of (1) of Lemma \ref{obs:rootspace}. (2): Fix some $v \in V_n$. Then by definition $v = d\tau(Y)w$ for some $Y \in \gL_{-1}$ and $w \in V_{n-1}$. Then $[Z,Y] \in \gL_{-1}$ by (2) of Lemma \ref{obs:rootspace}, so \begin{align*} d\tau(Z)d\tau(Y)w = d\tau([Z,Y])w - d\tau(Y)d\tau(Z)w \in V_n \end{align*} by induction. (3): If $v_0 \in V_0$, then \begin{align*} \overline{\tau}_0\left(e^H\right)d\tau(Z)v_0 &= d\tau( \Ad(e^H)Z )\overline{\tau}_0\left(e^H\right)v_0 \\ & = e^{\lambda+1} d\tau( Z )v_0. \end{align*} Since $e^\lambda$ is the largest eigenvalue of $\overline{\tau}_0(e^H)$ we must have $d\tau(Z)v_0=0$. Since $v_0 \in V_0$ was arbitrary, we then have $d\tau(Z)V_0 = \{0\}$. Next fix some $v \in V_n$. Then by definition $v = d\tau(Y)w$ for some $Y \in \gL_{-1}$ and $w \in V_{n-1}$. Then $[Z,Y] \in \gL_{0}$ by (2) of Lemma \ref{obs:rootspace}, so \begin{align*} d\tau(Z)d\tau(Y)w = d\tau([Z,Y])w - d\tau(Y)d\tau(Z)w \in V_{n-1} \end{align*} by (2) and induction. (4): The previous parts show that $\sum_{n \geq 0} V_n$ is an $d\tau$ and hence $\tau$ invariant subspace. Since $\tau$ is irreducible, we then have $\sum_{n \geq 0} V_n=\Rb^d$. \end{proof} \begin{proof}[Proof of Lemma \ref{lem:2espace}] The first statement of the lemma is an immediate consequence of Lemma \ref{lem:prox} and \ref{lem:weights}. To prove the second statement, fix some non-zero $v_0 \in V_0$, and consider the linear map $T: \gL_{-1} \rightarrow V_1$ given by \begin{align*} T(Y) = d\tau(Y)v_0. \end{align*} Since $T$ is onto and $\dim_{\Rb} \gL_{-1} = m-1$, we see that $\dim_{\Rb} V_1 \leq m-1$. It is now sufficient to prove that $\ker T = \{0\}$. To see this, again let \begin{align*} M := \left\{ k \in K: k \cdot e_1 = e_1\right\}. \end{align*} Then a calculation shows that $\Ad(M)$ preserves and acts irreducibly on $\gL_{-1}$. Notice that $\tau(M)v_0 \subset V_0$ is a compact connected set and so $\tau(M)v_0 = v_0$. Further, if $Y \in \gL_{-1}$ and $k \in M$, then \begin{align*} T(\Ad(k)Y) = d\tau\left(\Ad(k)Y\right)v_0 = \tau(k)d\tau(Y) \tau(k^{-1}) v_0 = \tau(k)T(Y). \end{align*} So $\ker T$ is an $\Ad(M)$-invariant subspace. So either $\ker T = \{0\}$ or $\ker T = \gL_{-1}$. If $\ker T = \gL_{-1}$, then $V_0= \Rb^d$ and since $d > 1$ this is impossible. So $\ker T =\{0\}$ and hence $\dim V_1 \geq m-1$. \end{proof} \end{document}
\begin{document} \title[The Weak Order] {Inner tableau translation property of the weak order and related results} \author{ M\"{U}GE TA\c{S}K{I}N } \address{Bogazici Universitesi, Istanbul, Turkey} \thanks{This research forms a part of the author's doctoral thesis at the Univ. of Minnesota, under the supervision of Victor Reiner, and partially supported by NSF grant DMS-9877047.} \begin{abstract} Let $SYT_{n}$ be the set of all standard Young tableaux with $n$ cells and $\leq_{weak}$ be Melnikov's the weak order on $SYT_n$. The aim of this paper is to introduce a conjecture on the weak order, named the {\it property of inner tableau translation}, and discuss its significance. We will also prove the conjecture for some special cases.\end{abstract} \maketitle \section{Introduction} The weak order first introduced by Melnikov and well studied in \cite{Melnikov1, Melnikov2,Melnikov3} due its strong connections to Kazhdan-Lusztig and geometric order on standard Young tableaux, where the latter are induced from the representation theory of special linear algebra and symmetric group. We have the following inclusion among all of these three orders: $$ \text{ weak order } \subsetneq \text{ Kazhdan-Lusztig (KL) order } \subseteq \text{ geometric order } . $$ The fact that its definition uses just the combinatorics of tableaux such as Knuth relations and the weak order on symmetric group, gives the weak order an important place among these orders. On the other hand the only justification for its well-definedness is induced from above inclusion, in other words there is no self contained proof of this basic fact. Our aim here is to bring the attention to the following conjecture on the weak order (which is first asked in \cite{Taskin}), called the property of {\it inner tableau translation}. This property is known to be satisfied by Kazhdan-Lusztig and geometric orders and its importance on the weak order relies on the fact that it provides a self contained proof for the well-definedness of this order. \begin{conjecture} Given two tableau $S<_{weak}T$ having the same inner tableau $R$, replacing $R$ with another same shape tableau $R'$ in $S$ and $T$ still preserves the weak order. \end{conjecture} In the following we first provide the definitions and related background for the weak order. In the third section, by assuming the conjecture we will provide a self contained the proof for the well definedness the weak order and we close this section with the discussion on how this conjecture plays specific role in studies of Poirier and Reutenauer Hopf algebra on standard Young tableaux. In the last section, we prove the conjecture for the case when the inner tableau $R$ has hook shape or a shape which consists of two rows or two columns. \section{Related background} \subsection{Definition of the weak order} The definition of the weak order uses well known Robinson-Schensted $(RSK)$ correspondence which bijectively assigns to every permutation $w\in S_n$ a pair of same shape tableaux $(I(v), R(w)) \in SYT_{n}\times SYT_{n}$, where $I(w)$ and $R(w)$ are called the {\it insertion} and {\it recording tableau} of $w$ respectively. On the other hand an equivalence relation $\Knuth$ on $S_n$ due to Knuth \cite{Knuth1} plays a crucial role in this correspondence. Namely: $$ u~\Knuth~ w \iff I(u)=I(w).$$ We will denote the corresponding equivalence classes in $S_n$ by $\{\mathcal{Y}_T\}_{T\in SYT_n}$. Let us explain these algorithms briefly. Denote by $(I_{i-1},R_{i-1})$ the same shape tableaux obtained by insertion and recording algorithms on the first $i-1$ indices of $w=w_1\ldots w_n$. In order to get $I_i$, if $w_{i}$ is greater then the last number on the first row of $I_{i-1}$, it is concatenated to the right side of the first row of $I_{i-1}$, otherwise, $w_i$ replaces the smallest number, say $a$ among all numbers in the first row greater then $w_i$ and this time insertion algorithm is applied to $a$ on the next row. Observe that after finitely many steps the insertion algorithm terminates with a new added cell. The resulting tableau is then $I_i$ and recording tableau $R_i$ is found by filling this new cell in $R_{i-1}$ with the number $i$. We illustrate these algorithms with the following example \begin{example} Let $w=52413$. Then, $$\begin{aligned} &I_1=5 &\Rightarrow I_2=\begin{array}{c}2\\5\end{array}&\Rightarrow I_3=\begin{array}{cc}2&4\\5\end{array}&\Rightarrow I_4=\begin{array}{cc}1&4\\2\\5\end{array}&\Rightarrow I_5=\begin{array}{cc}1&3\\2&4\\5\end{array}=I(w)\\ &R_1=1&\Rightarrow R_2=\begin{array}{c}1\\2\end{array}&\Rightarrow R_3=\begin{array}{cc}1&3\\2\end{array}&\Rightarrow R_4=\begin{array}{cc}1&3\\2\\4\end{array}&\Rightarrow R_5=\begin{array}{cc}1&3\\2&5\\4\end{array}=R(w) \end{aligned} $$ \end{example} \begin{definition} We say $u, w \in \sym_{n}$ differ by one {\it Knuth relation}, written $u \stackrel{K}{\cong} w$, if $$ \begin{array}{cl} \mbox{either } & w=x_{1} \ldots yxz \ldots x_{n} \mbox{ and } u=x_{1} \ldots yzx \ldots x_{n} \\ \mbox{or } & w=x_{1} \ldots xzy \ldots x_{n} \mbox{ and } u=x_{1} \ldots zxy \ldots x_{n} \end{array} $$ for some $ x<y<z$. Two permutations are called {\it Knuth equivalent}, written $u \stackrel{K}{\cong}w$, if there is a sequence of permutations such that \[ u=u_{1} \stackrel{ K}{\cong} u_{2} \ldots \stackrel{K}{\cong} u_{k}= w.\] \end{definition} Sch\"utzenberger's {\it jeu de taquin} slides \cite{Schutzenberger2} are one of the combinatorial operations on tableaux that we apply often in the following sextions. \begin{definition} Let $\lambda=(\lambda_{1},\lambda_{2}, \ldots,\lambda_{k}) $ and $\mu=(\mu_{1},\mu_{2}, \ldots,\mu_{l}) $ be two Ferrers diagrams such that $\mu \subset \lambda$. Then the corresponding {\it skew diagram} is defined to be the set of cells $$ \lambda/\mu=\{ c : c \in \lambda, c \notin \mu\}. $$ A skew diagram is called {\it normal} if $\mu=\varnothing$. A {\it partial skew tableau} of shape $\lambda/\mu$ is an array of distinct integers elements whose rows and columns increase. A {\it standard skew tableau} of shape $\lambda/\mu$ is partial skew tableau whose elements are $\{1,2,\ldots,n\}$. \end{definition} We next illustrate the forward and backward slides of Sch\"utzenberger's {\it jeu de taquin} \cite{Schutzenberger2} without the definition. \begin{example} Let $P=\begin{array}{ccc}&&4 \\&2&5 \\1& 3 & \\\end{array}$ and $ Q=\begin{array}{ccc} & 2 & 4 \\& 3& 5 \\1&&\\ \end{array}$. Below we illustrate a forward and backward slide on $P$ and $Q$ through the cells indicated by dots. $$ \begin{array}{ccc}& \bullet & 4 \\& 2 & 5 \\1& 3 & \\\end{array}\rightarrow \begin{array}{ccc}& 2 & 4 \\& \bullet & 5 \\1& 3 & \\\end{array}\rightarrow \begin{array}{ccc}& 2 & 4 \\& 3 & 5\\1& \bullet & \\\end{array}= \begin{array}{ccc}& 2 & 4 \\& 3& 5 \\1& & \\\end{array}$$ $$ \begin{array}{ccc} & 2 & 4 \\& 3 & 5\\1& \bullet & \\\end{array} \rightarrow \begin{array}{ccc} & 2 & 4 \\& \bullet & 5 \\1& 3 & \end{array} \rightarrow \begin{array}{ccc} & \bullet & 4 \\& 2 & 5 \\1& 3 & \end{array} = \begin{array}{ccc} & & 4 \\ & 2 & 5 \\ 1& 3 & \\ \\ \end{array}$$ \end{example} The other main ingredient of the weak order is the ({\it right}) {\it weak} {\it Bruhat order}, $\leq_{weak}$, on $S_n$ which obtained by taking the transitive closure of the following relation: $$u \leq_{weak} w ~\text{if}~ w=u \cdot s_i \text{ and } \len(w)=\len(u)+1$$ where $s_i$ denotes the adjacent transposition $(i, i+1)$ and $\len(w)$ measures the size of a reduced word of $w$. The weak order has an alternative characterization \cite[Prop. 3.1]{Bjorner2} in terms of {\it (left) inversion sets} namely $$u \leq_{weak} w ~\text{if and only if} ~~ \Inv_L(u)\subset \Inv_L(w)$$ where $\Inv_L(u):=\{(i,j): 1 \leq i < j \leq n ~\text{ and }~ u^{-1}(i) > u^{-1}(j)\}$. \begin{definition} \label{weak-order-def} The {\it weak order} $(SYT_n,\leq_{weak})$, first introduced by Melnikov \cite{Melnikov1} under the name {\it induced Duflo order}, is the partial order induced by taking transitive closure of the following relation: $$ \begin{aligned} S\leq_{weak} T & \mbox{ if there exist } ~\sigma\in \kc_S, ~\tau\in \kc_T \mbox{ such that } ~\sigma\leq_{weak} \tau. \end{aligned} $$ \end{definition} The necessity of taking the transitive closure in the definition of the weak order is illustrated by the following example (cf. Melnikov \cite[Example 4.3.1]{Melnikov1}). \begin{example} Let $R=\scriptstyle{\begin{array}{ccc} 1& 2 & 5 \\ 3& 4 & \end{array}}$, \hskip .05in $S=\scriptstyle{\begin{array}{ccc} 1& 4 & 5 \\ 2& & \\ 3 & & \end{array}}$, \hskip .05in $T=\scriptstyle{ \begin{array}{ccc} 1 & 4 & \\ 2 & 5 & \\ 3 & &\end{array}}$ with $$ \begin{array}{ll} & \mathcal{Y}_{R}=\{ 31425, 34125, 31452, 34152, 34512 \}, \\ & \mathcal{Y}_{S}=\{ 32145, 32415, 32451, 34215, 34251 ,34521 \}, \\ & \mathcal{Y}_{T}= \{ 32154, 32514, 35214, 32541, 35241\}. \\ \end{array} $$ Here $R <_{weak} S $ since $34125 <_{weak} 34215$, and $S <_{weak} T $ since $32145 <_{weak} 32154$. Therefore $ R <_{weak} T $. On the other hand, for every $ \rho \in \mathcal{Y}_{R}$ and for every $\tau \in \mathcal{Y}_{T}$ we have $(2,4) \in \Inv_L(\rho)$ but $(2,4) \notin \Inv_{L}(\tau)$. \end{example} \begin{figure} \caption{ \label{figure1} \label{figure1} \end{figure} \subsection{Some basic properties of the weak order} For $u\in S_n$ and $1\leq i< j \leq n$, let $u_{[i,j]}$ be the word obtained by restricting $u$ to the segments $[i,j]$ and $\st(u_{[i,j]}) \in S_{j-i+1}$ be the permutation obtained from $u_{[i,j]}$ by subtracting $i-1$ from each letter. Similarly for $S\in SYT_n$ and $1\leq i< j \leq n$, let $S_{[i,j]}$ be the normal shape tableau obtained by restricting $S$ to the segments $[i,j]$ and by applying Sch\"utzenberger's back word jeu-de-taquin slides. Then $\st(S_{[i,j]}) \in SYT_{j-i+1}$ be the tableau obtained from $S_{[i,j]}$ by subtracting $i-1$ from each letter. In fact $\Inv_L(u)\subset \Inv_L(w)$ gives $\Inv_L(u_{[i,j]})\subset \Inv_L(w_{[i,j]})$ for all $1\leq i < j \leq n$ and hence \begin{equation}\label{welldefined-weak-order1} u \leq_{weak} w ~\mbox{ implies } ~\st(u_{[i,j]}) \leq_{weak} \st(w_{[i,j]}) ~\mbox{ for all } ~1\leq i<j\leq n. \end{equation} The following basic fact about $RSK$, Knuth equivalence, and jeu-de-taquin are essentially due to Knuth and Sch\"utzenberger; see Knuth \cite[Section 5.1.4]{Knuth} for detailed explanations. \begin{lemma} \label{j-d-t-initial-final} Given $u \in S_n$, let $I(u)$ be the insertion tableau of $u$. Then for $1\leq i<j \leq n$, $$ \st(I(u)_{[i,j]}) = I(\st(u_{[i,j]})).$$ \end{lemma} Therefore we have following: \begin{lemma} \label{inner.restriction.lemma}The weak order {\it restricts to segments}, i.e., $$S \leq T~~\text{ implies }~~\st(S_{[i,j]}) \leq \st(T_{[i,j]})~~ \text{ for all } ~~1\leq i<j \leq n. $$ \end{lemma} \begin{remark} Melnikov shows in \cite[Page 45]{Melnikov1} that the geometric order also restricts to segments. On the other hand the same fact about Kazhdan-Lusztig order was first shown by Barbash and Vogan \cite{Barbash-Vogan} for arbitrary finite Weyl groups (see also work by Lusztig \cite{Lusztig2}) whereas the generalization to Coxeter groups is due to Geck \cite[Corollary 3.4]{Geck}. \end{remark} Now recall that {\it (left) descent set} of a permutation $\tau$ is defined by $$ \Des_{L}(\tau) :=\{i: 1 \leq i \leq n-1 ~\text{ and }~ \tau^{-1}(i) > \tau^{-1}(i+1) \}. \\ $$ On the other hand the {\it descent set} of the standard Young tableau $T$ is described intrinsically by $$ \begin{aligned} \Des(T) &:= \{(i,i+1): 1 \leq i \leq n-1 \text{ and } \\ & \qquad \qquad i+1 \text{ appears in a row below } i \text{ in }T\}. \end{aligned} $$ As a consequence of a well-known properties of $RSK$ we have the following basic fact: \begin{lemma} \label{descent.lemma0} For any $\tau \in \mathcal{Y}_{T}$ we have $$ \Des_{L}(\tau)=\Des(T) $$ i.e., the left descent set is constant on Knuth classes. \end{lemma} We let $(2^{[n-1]}, \subseteq)$ be the Boolean algebra of all subsets of $[n-1]$ ordered by inclusion. \vskip .2in \begin{lemma} \label{descent.lemma} Let $\leq$ be any order on $SYT_n$ which is stronger than the weak order and restricts to segments. Then the map $$(SYT_n, \leq) \mapsto (2^{[n-1]}, \subseteq)$$ sending any tableau $T$ to its descent set $\Des(T)$ is order preserving. \end{lemma} We denote by $(\Par_n, \leq^{op}_{dom})$ the set of all partitions of the number $n$ ordered by the {\it opposite (or dual) dominance order}, that is, $\lambda \leq^{op}_{dom} \mu$ if $$ \lambda_1 + \cdots + \lambda_k \geq \mu_1 + \cdots + \mu_k \text{ for all } k. $$ The following can be easily deduced from Greene's theorem \cite{Greene}. \begin{lemma} \label{shape.lemma} $S\leq_{weak} T$ implies $\sh(T) \leq_{dom} \sh(S) $ \end{lemma} Recall that for a standard young tableau $T$, $T^t$ denotes the transpose of $T$ whereas $T^{evac}$ denotes the tableau found by applying the Sch\"utzenberger's \cite{Schutzenberger1} evacuation map on $T$. For any $\tau=\tau_1\tau_2\ldots\tau_n \in \mathcal{Y}_{T}$ we have $$\begin{aligned} &\tau^t=\tau_{n}\tau_{n-1}\ldots\tau_1 \in \mathcal{Y}_{T^t}\\ &\tau^{evac}=(n+1-\tau_{n})(n+1-\tau_{n-1})\ldots(n+1-\tau_1) \in \mathcal{Y}_{T^{evac}} \end{aligned}$$ \begin{proposition} \label{order.preserving.maps} Suppose $S\leq_{weak}T$ in $SYT_n$. Then \begin{enumerate} \item $S^{evac} \leq_{weak} T^{evac}$. \item $T^t \leq_{weak}S^t$. \end{enumerate} \end{proposition} \begin{proof} Let $w_0$ be the longest element in $S_n$. Then the maps $$ w \mapsto w_0w ~\text{ and }~w \mapsto ww_0 $$ are clearly anti-automorphisms and hence $w \mapsto w_0ww_0$ is a automorphism of $(S_n,\leq_{weak})$. On the other hand $I(ww_0)$ is just the transpose tableau of $I(w)$ \cite{Schensted} whereas $I(w_0ww_0)$ is nothing but the evacuation of $I(w)$ \cite{Schutzenberger1}. \end{proof} \subsection{Inner tableau translation property} The {\it dual Knuth} relations $\nuth$ on $S_n$ plays the main role in the definition of inner tableau translation property. In its most basic form this relation is defined through the Knuth relations applied on the inverse of permutations. Namely, $$\sigma ~\nuth~\tau ~ \text{in}~ S_n ~ \text{if and olny if}~ \sigma^{-1} ~\Knuth~ \tau^{-1}.$$ An equivalent definition can be given by taking the transitive closure of the following: We say $ \sigma$ and $\tau$ differs by a single dual Knuth relation determined by the triple $\{i, i+1, i+2\}$ if $$\begin{aligned} \mbox{ either } & \sigma=\ldots i+1 \ldots i \dots i+2 \ldots \mbox{ and } \tau=\ldots i+2 \ldots i \dots i+1 \ldots \\ \mbox{ or } & \sigma=\ldots i+1 \ldots i+2 \dots i \ldots \mbox{ and } \tau=\ldots i \ldots i+2 \dots i+1 \ldots \end{aligned}$$ Since left descent sets are all equal for the permutations lying in the same Knuth class, the dual Knuth relation defines an action on the standard Young tableaux. In order to present this action let us give the following definition. \begin{definition} For $S\in SYT_n$ let $A=\{(i,j)\} $ be a cell lying in $\sh(S)$, where $i$ denotes the row number counted from the top and $j$ denotes the column number counted from the left. Then $$\begin{aligned}(S,A,\mathrm{ne}):=&\{ (k,l) ~\mid~ k<i ~\text{and}~ l\geq j \}\\ (S,A,\mathrm{sw}):=&\{ (k,l) ~\mid~ k\geq i ~\text{and}~ l<j \} \end{aligned} $$ \end{definition} Suppose that $\sigma \in \kc_S$ has $i\in\Des(\sigma)$ but $i\not\in\Des(\sigma)$. Therefore $\sigma$ has one of the following form $$\sigma=\ldots i+1 \ldots i \dots i+2 \ldots ~~\text{or}~~\sigma=\ldots i+1 \ldots i+2\dots i\ldots.$$ Now denote by $C_i, C_{i+1}$ and $C_{i+2}$ the cells labeled by $i,i+1$ and $i+2$ in $S$, respectively. Then $$ \text{ either }~ C_i \in (S,C_{i+1},\mathrm{ne})\cap (S,C_{i+2},\mathrm{sw}) ~\text{ or }~C_{i+2} \in (S,C_{i+1},\mathrm{ne})\cap (S,C_{i},\mathrm{sw})$$ and the action of a single dual Knuth relation determined by the triple $\{i, i+1, i+2\}$ on $S$ interchanges the places of $i+1$ and $i+2$ in the fist case and it interchanges the places of $i$ and $i+1$ in the second case. The following theorem (see \cite[Proposition 3.8.1]{Sagan}) provides an important characterization of the dual Knuth relation. \begin{proposition} \label{dual.Knuth} Let $S, T \in SYT_n$. Then $ S \nuth T ~~\text{ if and only if }~~ \sh(S)=\sh(T).$ \end{proposition} \begin{definition} Let $\{\alpha,\beta\}=\{i,i+1\}$ and $SYT_n^{[\alpha,\beta]}:=\{ T \in SYT_n \mid \alpha \in \Des(T), \beta \not \in \Des(T)\}.$ Then we have {\it inner translation map} $$ \mathcal{V}_{[\alpha,\beta]}: SYT_n^{[\alpha,\beta]}\mapsto SYT_n^{[\beta,\alpha]}$$ which send every tableau $T\in SYT_n^{[\alpha,\beta]}$ to a tableau obtained as a result of the action of the single dual Knuth relation determined by the triple $\{i, i+1, i+2\}$. \end{definition} The inner translation map is first introduced by Vogan in ~\cite{Vogan} where he also shows that Kazhdan-Lusztig order is preserved under this map. For geometric order this result is due to Melnikov ~\cite[Proposition 6.6]{Melnikov4}. On the other hand the example given below shows that the weak order does not satisfy this property. \begin{example} $$\begin{array}{ccc} 1&2&\mathbf{4}\\\mathbf{3}&\mathbf{5}&6 \end{array} \leq_{weak} \begin{array}{ccc} 1&2&\mathbf{4}\\\mathbf{3}&6\\\mathbf{5} \end{array} \hskip .1in \text{ but } \hskip .1in \begin{array}{ccc} 1&2&\mathbf{3}\\\mathbf{4}&\mathbf{5}&6 \end{array} \not\leq_{weak} \begin{array}{ccc} 1&2&\mathbf{5}\\\mathbf{3}&6\\\mathbf{4} \end{array}$$ where the latter pair is obtained from the former by applying a single dual Knuth relation on the triple $\{3,4,5\}$. \end{example} A weaker version of the inner translation property can be defined in the following manner: \begin{definition} For $1 \leq k<n$ and $R\in SYT_k$ let $SYT_n^{R}:=\{ T \in SYT_n \mid T_{[1,k]}=R\}$. Then for $R,R'\in SYT_k$, having the same shape, we have {\it inner tableau translation map} $$ \mathcal{V}_{[R,R']}: SYT_n^{R}\mapsto SYT_n^{R'}$$ which send every $T\in SYT_n^R$ to the tableau $T'$ obtained by replacing $R$ with $R'$. \end{definition} As a consequence of Proposition~\ref{dual.Knuth}, one can generate $T'$ by a sequence of dual Knuth relations applied on the subtableau $R$ of $T$. Therefore if a partial order is preserved under inner translation map then it is also preserved under inner tableau translation map. Hence Kazhdan-Lusztig and geometric orders have this property. On the other hand it is still reasonable to ask whether the weak order is preserved under the inner tableau translation property. \begin{conjecture} \label{main.conjecture} Let $S\lessdot_{weak}T$ be a covering relation in $ SYT_n^{R}$ and $R'$ be a tableau obtained by applying to $R$ a single dual Knuth relation. Then $$\mathcal{V}_{[R,R']}(S)\lessdot_{weak}\mathcal{V}_{[R,R']}(T) ~~\text{in}~~ SYT_n^{R'}.$$ In other words the weak order on standard Young tableau is preserved under the inner tableau translation map. \end{conjecture} \begin{remark} Recall that any tableau $R' \in SYT_r$ with the same shape as $R$, can be obtained by applying to $R$ a sequence of dual Knuth relation by Proposition\ref{dual.Knuth}. Therefore one can generalize the conjecture for any tableau $R$ and $R'$ having the same shape. \end{remark} As it is stated earlier this conjecture is checked by computer programing up to $n=9$. In the last section we also show that for a specific case the conjecture is true. \section{Applications of the conjecture} \subsection{Well-definedness of the weak order} By assuming Conjecture~\ref{main.conjecture}, we first prove the following result. \begin{theorem} The weak order on $SYT_n$ is well defined. \end{theorem} \begin{proof} It is enough to show that if $S\leq_{weak} T$ and $S\not = T$ then $T\not\leq_{weak}S$. By Lemma~\ref{descent.lemma} we know that $S\leq_{weak} T$ implies $\Des(S)\subset\Des(T)$ and if $\Des(T)-\Des(S)\not = \emptyset$ then clearly $T\not\leq_{weak}S$. Now we suppose that $\Des(T)=\Des(S)$. Let $k$ be the smallest integer satisfying $$S_{[1,k]}=T_{[1,k]}.$$ So $k<n$ and $S_{[1,k+1]}$ and $T_{[1,k+1]}$ differ only by the position of the corner cells labeled by $k+1$. On the other hand by Lemma~\ref{inner.restriction.lemma} have $$S_{[1,k+1]}\lneq_{weak}T_{[1,k+1]}$$ and Lemma~\ref{shape.lemma} together with the fact that $\sh(T_{[1,k]}) = \sh(S_{[1,k]})$ gives \begin{equation}\label{welldefined.eq} \sh(T_{[1,k+1]}) \lneq_{dom} \sh(S_{[1,k+1]}). \end{equation} Let $A=\{(i,j)\}$ and $B=\{(i',j')\}$ denote the cells labeled by $k+1$ in $S_{[1,k+1]}$ and $T_{[1,k+1]}$ respectively. Then \eqref{welldefined.eq} implies that $$i<i' ~\text{ and } ~j>j'$$ i.e., the corner cell $B$ lies below the corner cell $A$ and therefore there exists a corner cell $C=(i'',j'')$ of $S_{[1,k]}=T_{[1,k]}$ which satisfies $$ i \leq i''< i' ~\text{ and }~ j >j'\geq j''. $$ Let $R$ denote $S_{[1,k]}=T_{[1,k]}$ and let $R'$ be another tableau in $ SYT_k$ such that $\sh(R)=\sh(R')$ and the corner cell $C$ of $R'$ is labeled by $k$. Denote also by $S'$ and $T'$ the tableaux obtained by replacing $R$ with $R'$ in $S$ and $T$ respectively. Now by Conjecture~\ref{main.conjecture} we have $S'\lessdot_{weak}T'$ and moreover $$k \in \Des(T')-\Des(S'). $$ The last argument shows that $T'\not\leq_{weak}S'$ and therefore by Conjecture~\ref{main.conjecture} $T\not\leq_{weak}S$. \end{proof} \subsection{Poirier-Reutenauer Hopf algebra on $\mathbb{Z}SYT= \oplus_{n \geq 0}\mathbb{Z}SYT_{n}$} Following the work of Malvenuto and Reutenauer on permutations \cite{Malvenuto-Reutenauer}, Poirier-Reutenauer construct two graded Hopf algebra structures on $\mathbb{Z}$ module of all plactic classes $\{PC_{T}\}_{T \in SYT}$, where $PC_{T}=\sum_{\footnotesize P(u)=T}u$. The product structure of the one that concerns us here is given by \begin{equation} \label{P-R-multiplication} PC_{T} \ast PC_{T'} = \sum_{\substack{P(u)=T \\ P(w)=T'}} \shf(u, \overline{w }) \end{equation} where $\overline{w}$ is obtained by increasing the indices of $w$ by the length of $u$ and $\shf$ denotes the shuffle product. Then the bijection sending each plactic class to its defining tableau gives us a Hopf algebra structure on the $\mathbb{Z}$ module of all standard Young tableaux, $\mathbb{Z}SYT = \oplus_{n \geq 0} \mathbb{Z}SYT_{n}$. In \cite{Poirier-Reutenauer} Poirier and Reutenauer explain this product using jeu de taquin slides. Following an analogous result of Loday and Ronco \cite[Thm. 4.1]{Loday-Ronco} on permutations, the author shows the following result in \cite{Taskin}: For $S\in SYT_k$, $T\in SYT_l$ where $k+l=n$, let $\overline{T}$ the tableau which is obtained by increasing the indices of $T$ by $k$. Denote by $S/T$ the tableau whose columns are obtained by concatenating the columns of $\overline{T}$ over $S$ below and by $S\backslash T$ the tableau whose rows are obtained by concatenating the rows of $\overline{T}$ over $S$ from the right. Then by \cite[Thm. 4.2]{Taskin} $$ S \ast T = \sum_{\substack{R \in SYT_n:\\ S\backslash T \leq_{weak} R \leq_{weak} S/T}} R$$ Namely the product structure can be read on the weak order poset of standard Young tableaux. \begin{example} Let $ S= \scriptstyle{\begin{array}{cc} 1&2 \\ 3 & \end{array}}$ and $T =\scriptstyle{\begin{array}{c} 1 \\ 2 \end{array}}$. Then $S\backslash T=\scriptstyle{\begin{array}{ccc} 1&2&4 \\ 3 & 5 \end{array}}$, ~ $S/ T=\scriptstyle{\begin{array}{cc} 1&2 \\ 3& \\ 4& \\ 5 \end{array}}$. Then $$\begin{aligned} PC_{\footnotesize{\begin{array}{ll} 1&2 \\ 3& \end{array}}}~\ast~ PC_{\footnotesize{\begin{array}{l} 1\\ 2 \end{array}}} &=\shf(312,54) + \shf(132,54)\\ &= PC_{\footnotesize{\begin{array}{lll} 1&2&4\\3&5 \end{array}}}+ PC_{\footnotesize{\begin{array}{lll} 1&2&4\\3\\ 5 \end{array}}}+ PC_{\footnotesize{\begin{array}{ll} 1&2\\3&4\\ 5 \end{array}}}+ PC_{\footnotesize{\begin{array}{ll} 1&2\\3\\4\\5 \end{array}}}. \end{aligned}$$ On the other hand one can check from Figure~\ref{figure1} that the product $S \ast T$ is equal to the sum of all tableaux in the interval $[S\backslash T, S/T]$. \end{example} By using the facts that $(S\backslash T)^{evac}=T^{evac}\backslash S^{evac}$ and $(S/T)^{evac}=T^{evac}/S^{evac}$ and Proposition~\ref{order.preserving.maps}, one can easily deduce the following corallary to Conjecture~\ref{main.conjecture}. \begin{corollary} Let $S,S',T,T'$ be standard Young tableaux satisfying $$\sh(S)=\sh(S') ~\text{ and }~ \sh(T)=\sh(T').$$ Then the intervals of the weak order $[S\backslash T, S/T]$ and $[S'\backslash T', S'/T']$ are isomorhic. Equivalently, the shuffle product $S \ast T$ is determined by the shapes of the tableaux rather than the tableaux itself. \end{corollary} \section{The cases where the conjecture holds} \begin{lemma} \label{conjecture.lemma1} Suppose that $S\leq_{weak}T$ is a covering relation in $SYT_n^{R}$ and $R,R'\in SYT_k$ has the same shape. If $\st(S_{[k+1,n]})\lneq_{weak} \st(T_{[k+1,n]})$ then $\mathcal{V}_{[R,R']}(S)\lessdot_{weak}\mathcal{V}_{[R,R']}(T)$. \end{lemma} \begin{proof}It is enough to consider the case when $R$ and $R'$ differ by only one dual Knuth relation determined by the triples $\{i,i+1,i+2\}$ for some $i\leq k-2$. Since $ S<_{weak}T$ is a covering relation, there exist $\sigma \in \kc_S$ and $\tau \in \kc_T$ such that for some $i<n$, $$ \begin{aligned} \sigma=a_1\ldots a_j a_{j+1}\ldots a_n &\leq a_1\ldots a_{j+1} a_{j}\ldots a_n=\tau, \text{ where } a_j< a_{j+1} \end{aligned} $$ i.e., $\sigma<\tau$ is also covering relation the right weak order on $S_n$. On the other hand by Lemma~\ref{inner.restriction.lemma} we have $$ I(\sigma_{[k+1,n]}) =S_{[k+1,n]} \text{ and } T_{[k+1,n]}=I(\tau_{[k+1,n]}) $$ Furthermore the assumption $S_{[k+1,n]} \lneq_{weak} T_{[k+1,n]}$ yields that $\sigma_{[k+1,n]}\lneq\tau_{[k+1,n]}$ and $\sigma_{[1,k]}=\tau_{[1,k]}$. Now applying the dual Knuth relation determined by the triple $\{i,i+1,i+2\}$ on $\sigma$ and $\tau$ gives two new permutations say $\sigma'$ and $\tau'$ such that $\sigma'\lessdot \tau'$ in the right weak Bruhat order and therefore $$\mathcal{V}_{[R,R']}(S)= I(\sigma') ~\leq_{weak}~I(\tau') =\mathcal{V}_{[R,R']}(T).$$ Now if there exist a tableau $Q \in SYT_n^{R'}$ satisfying $\mathcal{V}_{[R,R']}(S) \lneq_{weak} Q \lneq_{weak} \mathcal{V}_{[R,R']}(T)$ then we have $$S \lneq_{weak} \mathcal{V}_{[R',R]}(Q) \lneq_{weak} T$$ which is clearly a contradiction. Hence $\mathcal{V}_{[R,R']}(S) \lessdot_{weak} \mathcal{V}_{[R,R']}(T)$. \end{proof} \begin{lemma} \label{conjecture.lemma2} Suppose $$R=\tableau{{1}&{3}\\{2}} ~~\text{and}~~R'=\tableau{{1}&{2}\\{3}}.$$ Then $S\lessdot_{weak}T$ in $SYT_n^{R}$ if and only if $\mathcal{V}_{[R,R']}(S)\lessdot_{weak}\mathcal{V}_{[R,R']}(T)$ in $SYT_n^{R'}$. \end{lemma} \begin{proof} Since $ S<_{weak}T$ there exist $\sigma \in \kc_S$ and $\tau \in \kc_T$ such that for some $1\leq j<n$ we have $$ \sigma=a_1\ldots a_j a_{j+1}\ldots a_n ~\text{ and }~\tau =a_1\ldots a_{j+1} a_{j}\ldots a_n, \text{ where } a_j< a_{j+1}. $$ Observe that since $S$ and $T$ have the same inner tableau $R$, we have $\{\sigma_{[1,3]},\tau_{[1,3]}\}\subset \kc_R=\{213,231\}$. If $\{a_j, a_{j+1}\}\not=\{1,3\}$ then applying the dual Knuth relation determined by $\{1,2,3\}$ on $\sigma$ and $\tau$ yields two permutations $\sigma' \in \mathcal{V}_{[R,R']}(S)$ and $\tau'\in \mathcal{V}_{[R,R']}(T)$ which still have $\sigma'\lessdot \tau'$ in the right weak order. Therefore $\mathcal{V}_{[R,R']}(S)\leq_{weak}\mathcal{V}_{[R,R']}(T)$ and it must be a covering relation. Suppose $\{a_j, a_{j+1}\}=\{1,3\}$. Since $\kc_R=\{213,231\}$, the insertion taleau $I(a_1\ldots a_{j-1})$ must have the number $2$ its first row left most position. Let for some $i\leq j-1$ $$b_1\ldots b_{i-1}~2~b_{i+1} \ldots b_{j-1}$$ be the row word of $I(a_1\ldots a_{j-1})$ obtained by reading numbers in each row of $I(a_1\ldots a_{j-1})$ from left to right, starting from last row. Therefore the sequence $2~b_{i+1} \ldots b_{j-1}$ labels the first row and moreover $$\begin{aligned} &b_1\ldots b_{i-1}~2~b_{i+1}b_{i+2} \ldots b_{j-1}~1~3~a_{j+2}\ldots a_n \in \kc_S \\ &b_1\ldots b_{i-1}~2~b_{i+1}b_{i+2} \ldots b_{j-1}~3~1~a_{j+2}\ldots a_n\in \kc_T. \end{aligned}$$ On the other hand since $2<b_{i+1}< \ldots <b_{j-1}$ we have $$\begin{aligned} &2~b_{i+1}b_{i+2} \ldots b_{j-1}1~3~\Knuth ~2~b_{i+1}1~3~b_{i+2} \ldots b_{j-1}\\ &2~b_{i+1} b_{i+2}\ldots b_{j-1}1~3~\Knuth ~b_{i-1}b_{i+1}b_{i+2}3~1~2 \ldots b_{j-1} \end{aligned}$$ and moreover $$\begin{aligned} &b_1\ldots b_{i-1}2~b_{i+1}1~3~b_{i+2} \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_S \\ &b_1\ldots b_{i-1}b_{i+1}2~1~3~b_{i+2} \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_T.\end{aligned} $$ On the other hand applying dual Knuth relation determined by $\{1,2,3\}$ we get $$b_1\ldots b_{i-1}~3~b_{i+1}1~2~b_{i+2} \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_{\mathcal{V}_{[R,R']}(S)} \text{ and } b_1\ldots b_{i-1}b_{i+1}~3~1~2~b_{i+2} \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_{\mathcal{V}_{[R,R']}(T)} $$ which are clearly the generator of $\mathcal{V}_{[R,R']}(S)\lessdot_{weak} \mathcal{V}_{[R,R']}(T)$. Suppose now $ S<_{weak}T$ is a covering relation in $SYT_n^{R'}$. Since $R^t=R'$, by Proposition \ref{order.preserving.maps} we have $$ T^t\lessdot_{weak}S^t \text{ in } SYT_n^{R}.by Proposition \ref{order.preserving.maps}$$ Now by the previous result we have $\mathcal{V}_{[R,R']}(T^t)\lessdot_{weak} \mathcal{V}_{[R,R']}(S^t)$ and therefore $$\mathcal{V}_{[R',R]}(S)=(\mathcal{V}_{[R,R']}(S^t))^t\lessdot_{weak} (\mathcal{V}_{[R,R']}(T^t))^t=\mathcal{V}_{[R,R']}(T).$$ \end{proof} \begin{proposition}\label{two.rows.proposition} Suppose that $S\lessdot_{weak}T$ in $SYT_n^{R}$ where $R \in SYT_k$ has exactly two rows. If $R'$ is another tableau in $SYT_k$ having the same shape with $R$ then $\mathcal{V}_{[R,R']}(S)\lessdot_{weak}\mathcal{V}_{[R,R']}(T)$ in $SYT_n^{R'}$. \end{proposition} \begin{proof} Suppose that $S\leq_{weak}T$ is a covering relation in $SYT_n^{R}$ and $R\in SYT_k$ has two rows. When $k<3$ there is nothing to prove. For $k=3$ the only case that needs to be explored is when $R$ has non vertical or non horizontal shape, hence Lemma \ref{conjecture.lemma2} gives the desired result. So we suppose the statement is true for $k-1$ and let $R\in SYT_k$. It is enough to consider the case when $R$ and $R'$ differ by only one dual Knuth relation determined by the triple $\{i,i+1,i+2\}$ where $i+2= k$. If $i+2<k$ then $S_{[1,i+2]}=T_{[1,i+2]}$ of $R$ has still two rows and induction gives the desired result. If $i+2=k$ then we have the following classes of possibilities for the tableau $R$: \begin{equation}\label{row.lemma} \begin{aligned} &\tableau{{*}&{*}&_{k-2}& _k \\{*}&{*}& _{k-1}}~&~ &\tableau{{*}&{*}&_{k-2}& _{k-1} \\{*}&{*}& _{k}}~&~ &\tableau{{*}&{*}&{*}& _{k-2 }\\{*}&_{k-1}& _{k}}~&~ &\tableau{{*}&{*}&{*}& _{k-1 }\\{*}&_{k-2}& _{k}}&\\ &~~~~(a)&&~~~~(b)&&~~~~(c)&&~~~~(d)& \end{aligned} \end{equation} Observe that in the last two classes the dual Knuth relation determined by $\{k-2,k-1,k\}$ interchanges the places of $k-1$ and $k-2$ and so they refer to the cases with smaller inner tableau $S_{[1,k-1]}=T_{[1,k-1]}$ and the induction argument gives the required result. For the first two classes we have the following analysis: Since $ S\lessdot_{weak}T$ there exist $\sigma \in \kc_S$ and $\tau \in \kc_T$ such that $\sigma<\tau$ is also a covering relation the right weak order on $S_n$ i.e., for some $1\leq j<n$ we have $$ \sigma=a_1\ldots a_j a_{j+1}\ldots a_n ~\text{ and }~\tau =a_1\ldots a_{j+1} a_{j}\ldots a_n, \text{ where } a_j< a_{j+1}. $$ If $\{a_j, a_{j+1}\}\not=\{k,k-2\}$ then applying the dual Knuth relation determined by $\{k,k-1,k-2\}$ on $\sigma$ and $\tau$ yields two permutations $\sigma' \in \mathcal{V}_{[R,R']}(S)$ and $\tau'\in \mathcal{V}_{[R,R']}(T)$ which still have $\sigma'\lessdot \tau'$ in the right weak order. Therefore $\mathcal{V}_{[R,R']}(S)\leq_{weak}\mathcal{V}_{[R,R']}(T)$ and it must be a covering relation. Now let $\{a_j, a_{j+1}\}=\{k,k-2\}$, i.e., \begin{equation} \label{row.lemma2} \sigma=a_1\ldots~ a_{j-1} (k-2)~k ~a_{j+2}\ldots a_n ~\text{ and }~\tau =a_1\ldots a_{j-1} ~k~(k-2)~a_{j+2}\ldots a_n \end{equation} \noindent {\it Case 1.} We first consider the case illustrated in \eqref{row.lemma}-$(a)$, where $k-1$ comes before $k$ in every permutations in the Knuth classes of $S$ and $T$. Therefore the tableau $I(a_1\ldots a_{j-1})$ must have the number $k-1$ and it must be located in the first row, since otherwise the number $k$ drops to the second row of $T$ at the end of the insertion of $\tau$ and that is clearly a contradiction. Let for some $r\leq j-1$ $$b_1\ldots b_{r-1}~(k-1)~b_{r+1}b_{r+2} \ldots b_{j-1}$$ be the row word of $I(a_1\ldots a_{j-1})$ obtained by reading numbers in each row of $I(a_1\ldots a_{j-1})$ from left to right, starting from last row. Therefore $(k-1)~b_{r+1}b_{r+2} \ldots b_{j-1}$ lies on the first row and so $k-1<b_{r+1}<b_{r+2}< \ldots <b_{j-1}$. Now it is easy to see that $$ b_1\dots b_{r-1}(k-1)b_{r+1}b_{r+2} \ldots b_{j-1}(k-2)k~a_{j+2}\ldots a_n ~\Knuth~ b_1\ldots b_{r-1}(k-1)b_{r+1}(k-2)k~ \ldots b_{j-1}a_{j+2}\ldots a_n $$ lies in the Knuth class $S$ where as $$b_1\ldots b_{r-1}(k-1)b_{r+1}b_{r+2} \ldots b_{j-1}~k(k-2)a_{j+2}\ldots a_n ~\Knuth~ b_1\ldots b_{r-1}b_{r+1}(k-1)(k-2)k \ldots b_{j-1}a_{j+2}\ldots a_n $$ lies in the Knuth class of $T$. Moreover applying dual Knuth relation determined by $\{k,k-1,k-2\}$ on the latter permutations we get $$\begin{aligned} b_1\ldots b_{r-1}~k~b_{r+1}(k-2)~(k-1)~ \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_{\mathcal{V}_{[R,R']}(S)} \\ b_1\ldots b_{r-1}~b_{r+1}~k~(k-2)~(k-1) \ldots b_{j-1}a_{j+2}\ldots a_n \in \kc_{\mathcal{V}_{[R,R']}(T)} \end{aligned} $$ and therefore $\mathcal{V}_{[R,R']}(S)\leq_{weak} \mathcal{V}_{[R,R']}(T)$. \noindent {\it Case 2.} For the case illustrated in \eqref{row.lemma}-(b), We have $$ T^t\leq_{weak} S^t \in SYT_N^{R^t} $$ where $R^t$ is a tableau of at most two columns having $k-1$ in its first column and $k$ in its second column. Therefore we obtained $\sigma^t \in \kc_{S^t}$ and $\tau^t \in \kc_{T^t}$ by reversing $\sigma$ and $\tau$ of \eqref{row.lemma2} .i.e., $$ \sigma^t=a_n\ldots (k-1)\ldots a_{j+2}~k~(k-2)~a_{j-1}\ldots a_1 ~\text{ and }~\tau^t =a_n\ldots (k-1)\ldots a_{j+1} ~(k-2)~k~a_{j-1}\ldots a_1 $$ Now consider the tableau $I(a_n\ldots (k-1)\ldots a_{j+2})$. Suppose first that the left most cell in its first row is labeled by a number, say $x$, which is smaller then $k-1$. This implies that insertion of the sequence $(k-2)k$ in to $I(a_n\ldots (k-1)\ldots a_{j+2})$ places the sequence $(k-2)k$ to the right of $x$ but this contradicts to the fact that the inner tableau $R^t$ has at most two columns. Therefore we have $x=k-1$. Now let for some $r\leq n- j-1$ $$b_1\ldots b_{r-1}~(k-1)~b_{r+1} \ldots b_{n-j-1}$$ be the row word of $I(a_n\ldots (k-1)\ldots a_{j+2})$. Therefore $(k-1)~b_{r+1}b_{r+2} \ldots b_{n-j-1}$ lies on the first row and so $k-1<b_{r+1}<b_{r+2}< \ldots <b_{n-j-1}$ which yields $$ b_1\ldots b_{r-1}~(k-1)~b_{r+1} \ldots b_{n-j-1}~k~(k-2)~a_{j-1}\ldots a_1~\Knuth~ b_1\ldots b_{r-1}~b_{r+1}~(k-1)~(k-2)~k~ \ldots b_{n-j-1}a_{j-1}\ldots a_1 $$ lies in $\kc_{S^t}$ whereas $$ b_1\ldots b_{r-1}~(k-1)~b_{r+1} \ldots b_{n-j-1}~(k-2)~k~a_{j-1}\ldots a_1~\Knuth~ b_1\ldots b_{r-1}~(k-1)~b_{r+1}~(k-2)~k \ldots b_{n-j-1}a_{j-1}\ldots a_1 $$ lies in $\kc_{T^t}$. Now reversing and then applying dual Knuth relation determined by $\{k,k-1,k-2\}$ on the latter permutations we get $$\begin{aligned} &a_1\ldots a_{j-1}b_{n-j-1}\ldots (k-1)(k-2)k~b_{r+1}b_{r-1}\ldots b_1 \in \kc_{\mathcal{V}_{[R,R']}(S)} \\ &a_1\ldots a_{j-1}b_{n-j-1}\ldots (k-1)(k-2) b_{r+1} ~k~b_{r-1}\ldots b_1 \in \kc_{\mathcal{V}_{[R,R']}(T)} \end{aligned} $$ and therefore $\mathcal{V}_{[R,R']}(S)\leq_{weak} \mathcal{V}_{[R,R']}(T)$. Lastly the fact that resulting relations are in fact covering relations follows directly. \end{proof} \begin{corollary} Suppose that $S\leq_{weak}T$ is a covering relation in $SYT_n^{R}$ where $R \in SYT_k$ has exactly two colums. If $R'$ is another tableau in $SYT_k$ having the same shape with $R$ then $\mathcal{V}_{[R,R']}(S)\leq_{weak}\mathcal{V}_{[R,R']}(T)$ is also a coving relation in $SYT_n^{R'}$. \end{corollary} \begin{proof} By Proposition \ref{order.preserving.maps} we have $T^t\leq_{weak}S^t$ in $SYT_n^{R^t}$ where $R^t$ has exactly two rows. Now $(R')^t$ has the same shape with $R^t$ and by previous theorem $\mathcal{V}_{[R^t,(R')^t]}(T^t)\leq_{weak}\mathcal{V}_{[R^t,(R')^t]}(S^t)$. Therefore $$\mathcal{V}_{[R,R']}(S)=(\mathcal{V}_{[R^t,R'^t]}(S^t))^t\leq_{weak}(\mathcal{V}_{[R^t,R'^t]}(T^t))^t=\mathcal{V}_{[R,R']}(T).$$ \end{proof} \begin{definition} For $T\in SYT_n$ and $A$ is a corner cell of $T$, denote by $$T^{\uparrow A}~~ \text{ and }~~ \eta(T^{\uparrow A}) $$ the tableau obtained by applying reverse insertion algorithm to $T$ through the corner cell $A$ and respectively the number which leaves the tableau at the end. \end{definition} The following result on the hook shape tableaux is easy to deduce by using reverse RSK algorithm. \begin{lemma}\label{hook.lemma2} Let $R \in SYT_k$ be a tableau of hook shape with more then two rows and two columns and suppose that the only two corner cells of $R$, say $A$ and $B$ are labeled by $k$ or $k-1$. Then $$\eta(R^{\uparrow A})\not = \eta(R^{\uparrow B})$$ and if $a_1\ldots a_k$ and $b_1\ldots b_k$ be two permutations in the Knuth class of $R$ with $a_k=b_k$ then $$I(a_1\ldots a_{k-1})=I(b_1\ldots b_{k-1}).$$ \end{lemma} \begin{proof} Since the tableaux required to have more then two rows and two columns it is enough the consider the following tableaux together with their transposes, where $k$ labels the cell $A$ and $k-1$ labels the cell $B$. \begin{equation}\label{hook.lemma} \tableau{{*}&_{k-2}& _k \\{*}\\ _{k-1}}~ \mbox{ }~~ \tableau{{*}& {*}& _k \\_{k-2}\\ _{k-1}} \end{equation} Clearly $\eta(R^{\uparrow A})\not = \eta(R^{\uparrow B})$ and this shows that if $a_k=b_k$ then they must leave the tableau at the end of a reverse insertion applied on the same corner cell, say $A$. Therefore $ I(a_1\ldots a_{k-1})=R^{\uparrow A}=I(b_1\ldots b_{k-1})$. \end{proof} \begin{proposition} Suppose that $S\leq_{weak}T$ is a covering relation in $SYT_n^{R}$ where $R \in SYT_k$ has hook shape. If $Q$ is another tableau in $SYT_k$ having the same shape with $R$ then $\mathcal{V}_{[R,Q]}(S)\leq_{weak}\mathcal{V}_{[R,Q]}(T)$ is also a coving relation in $SYT_n^{Q}$. \end{proposition} \begin{proof} Here we just need to deal with the case when $R$ is not a horizontal or a vertical tableau. On the other hand for $k\leq 4$ the only non horizontal or vertical hook shape tableaux have either two rows or two columns and Proposition \ref{two.rows.proposition} gives the required result. Therefore in the rest we assume that $n>k>4$ and $R$ has more then two rows and two columns. We may assume that $R$ and $Q$ differ by only one dual Knuth relation determined by the triple $\{i,i+1,i+2\}$. If $i+2<k$ then the subtableau $S_{[1,i+2]}=T_{[1,i+2]}$ of $R$ has still hook shape and induction gives the desired result. So let $R$ and $R'$ differ by a single dual Knuth relation determined by $\{k-2,k-1,k\}$. Since $R$ has a hook shape this implies the dual Knuth relation interchanges the places of $k$ and $k-1$ i.e., the only two corner cells of $R$ are occupied by $k$ and $k-1$. Now since $ S<_{weak}T$ is a covering relation, there exist $\sigma \in \kc_S$ and $\tau \in \kc_T$ such that $\sigma<\tau$ is also a covering relation the right weak order on $S_n$ i.e., for some $1\leq j<n$ we have $$ \sigma=a_1\ldots a_j a_{j+1}\ldots a_n ~\text{ and }~\tau =a_1\ldots a_{j+1} a_{j}\ldots a_n, \text{ where } a_j< a_{j+1}. $$ If $\{a_j, a_{j+1}\}\not=\{k,k-2\}$ the result follows as discussed in the proof of Proposition~\ref{two.rows.proposition}. So in the rest we assume that $\{a_j, a_{j+1}\}=\{k,k-2\}$. Observe that we have either $\sigma_1=a_1=\tau_1$ or $\sigma_n=a_n=\tau_n$. WL.O.G assume $\sigma_n=a_n=\tau_n$ (the first one can be deal with the same method on the transposes of the tableaux). Therefore there exist some corner cells say $A_S$ and $A_T$ of $S$ and $T$ respectively such that $$\begin{aligned}&\eta(S^{\uparrow A_S}) =a_n= \eta(T^{\uparrow A_T})\\ &S'=S^{\uparrow A_S}=I(a_1\ldots a_j a_{j+1}\ldots a_{n-1}) \\ &T'=T^{\uparrow A_T}=I(a_1\ldots a_{j+1} a_{j}\ldots a_{n-1}) \end{aligned}$$ \noindent {\it Case 1.} If $a_n>k$ then $S'$ and $T'$ have still the same inner tableau $R$. Moreover since $S'\leq_{weak}T'$, we have by induction $$\mathcal{V}_{[R,R']}(S')\leq_{weak}\mathcal{V}_{[R,R']}(T') $$ and therefore $$\mathcal{V}_{[R,R']}(S)=\mathcal{V}_{[R,R']}(S')^{\downarrow a_n} \leq_{weak}\mathcal{V}_{[R,R']}(T')^{\downarrow a_n}=\mathcal{V}_{[R,R']}(T)$$ \noindent {\it Case 2.} If $a_n\leq k$ then the number $a_n$ leaves the tableaux through the sub-tableau $R$ in both reverse insertion $S'=S^{\uparrow A_S}$ and $T'=T^{\uparrow A_T}$. Recall that the only two corners of $R$ is labeled by $k$ and $k-1$. This result by Lemma \ref{hook.lemma2} that $a_n$ leaves $R$ as a result of the reverse insertion algorithm applied on the same corner cell say $C$, i.e., $$a_n= \eta(R^{\uparrow C})$$ Recall that $R$ has more than two rows and two columns which leaves us with the following possibilities: \begin{equation}\label{hook.lemma} \tableau{{*}&_{k-2}& _k \\{*}\\ _{k-1}}~ \mbox{ }~~ \tableau{{*}& {*}& _k \\_{k-2} \\_{k-1} }~ \mbox{ }~~ \tableau{{*}&_{k-2}& _{k-1} \\{*}\\ _{k}}~ \mbox{ }~~ \tableau{{*}& {*}& _{k-1} \\_{k-2} \\_{k} } \end{equation} In the first two cases of \eqref{hook.lemma}, one can observe easily that either $a_n=k$ or $a_n<k-2$, but $a_n=k$ contradicts to the assumption that $\{a_j, a_{j+1}\}=\{k,k-2\}$. Therefore $a_n<k-2$ and the application of dual Knuth relation determined by $\{k-2,k-1,k\}$ to the inner tableau $R'=R^{\uparrow C}$ gives $Q'=Q^{\uparrow C}$. Now by induction we have $\mathcal{V}_{[R',Q']}(S')^{\downarrow a_n} \leq_{weak}\mathcal{V}_{[R',Q']}(T'] $ and therefore $$\mathcal{V}_{[R,Q]}(S)=\mathcal{V}_{[R',Q']}(S')^{\downarrow a_n} \leq_{weak}\mathcal{V}_{[R',Q']}(T')^{\downarrow a_n}=\mathcal{V}_{[R,Q]}(T).$$ In the last two cases \eqref{hook.lemma}, we have either either $a_n=k-1$ or $a_n<k-2$. If $a_n<k-2$ then the required result follows as above. On the other hand if $a_n=k-1$ then for the tableau $R'=R^{\uparrow C}$ we have the following possibilities $$ \tableau{{*}&_{k-2} \\{*}\\ _{k}}~ \mbox{ }~~ \tableau{{*}& {*} \\_{k-2} \\_{k} } $$ where in both cases every permutation in the Knuth class have the subsequence $k(k-2)$. This shows that any two permutations in the Knuth class of $R$ that ends with $k-1$ must have the subsequence $k(k-2)$ and this again contradicts to the fact that $\{a_j, a_{j+1}\}=\{k,k-2\}$. \end{proof} \end{document}
\begin{document} \title[Some weighted fourth-order Hardy-H\'{e}non equations] {Some weighted fourth-order Hardy-H\'{e}non equations} \author[S. Deng]{Shengbing Deng$^{\ast}$} \address{\noindent Shengbing Deng (Corresponding author) \newline School of Mathematics and Statistics, Southwest University, Chongqing 400715, People's Republic of China}\email{[email protected]} \author[X. Tian]{Xingliang Tian} \address{\noindent Xingliang Tian \newline School of Mathematics and Statistics, Southwest University, Chongqing 400715, People's Republic of China.}\email{[email protected]} \thanks{$^{\ast}$ Corresponding author} \thanks{2020 {\em{Mathematics Subject Classification.}} Primary 35P30, 35B40; Secondly 35J30.} \thanks{{\em{Key words and phrases.}} Caffarelli-Kohn-Nirenberg inequalities; Hardy-H\'{e}non equation; Non-degeneracy; Remainder terms; Prescribed perturbation} \allowdisplaybreaks \begin{abstract} {\tiny By using a suitable transform related to Sobolev inequality, we investigate the sharp constants and optimizers in radial space for the following weighted Caffarelli-Kohn-Nirenberg-type inequalities: \begin{equation*} \int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx \geq S^{rad}(N,\alpha)\left(\int_{\mathbb{R}^N}|x|^{-\alpha}|u|^{p^*_{\alpha}} dx\right)^{\frac{2}{p^*_{\alpha}}}, \quad u\in C^\infty_c(\mathbb{R}^N), \end{equation*} where $N\geq 3$, $4-N<\alpha<2$, $p^*_{\alpha}=\frac{2(N-\alpha)}{N-4+\alpha}$. Then we obtain the explicit form of the unique (up to scaling) radial positive solution $U_{\lambda,\alpha}$ to the weighted fourth-order Hardy (for $\alpha>0$) or H\'{e}non (for $\alpha<0$) equation: \begin{equation*} \Delta(|x|^{\alpha}\Delta u)=|x|^{-\alpha} u^{p^*_{\alpha}-1},\quad u>0 \quad \mbox{in}\quad \mathbb{R}^N. \end{equation*} For $\alpha\neq 0$, it is known the solutions of above equation are invariant for dilations $\lambda^{\frac{N-4+\alpha}{2}}u(\lambda x)$ but not for translations. However we show that if $\alpha$ is a negative even integer, there exist new solutions to the linearized problem, which related to above equation at $U_{1,\alpha}$, that ``replace'' the ones due to the translations invariance. This interesting phenomenon was first shown by Gladiali, Grossi and Neves [Adv. Math. 249, 2013, 1-36] for the second-order H\'{e}non problem. Finally, as applications, we investigate the remainder term of above inequality and also the existence of solutions to some related perturbed equations. } \end{abstract} \maketitle \section{{\bfseries Introduction}}\label{sectir} \subsection{Motivation} Recall the classical Sobolev inequality: for $N\geq 3$ there exists $S=S(N)>0$ such that \begin{equation}\label{bsic} \|\nabla u\|_{L^2(\mathbb{R}^N)}\geq S\|u\|_{L^{2^*}(\mathbb{R}^N)},\quad \forall u\in D^{1,2}(\mathbb{R}^N), \end{equation} where $2^*=2N/(N-2)$ and $D^{1,2}(\mathbb{R}^N)$ denotes the closure of $C^\infty_c(\mathbb{R}^N)$ with respect to the norm $\|u\|_{D^{1,2}(\mathbb{R}^N)}=\|\nabla u\|_{L^2(\mathbb{R}^N)}$. It is well known that the Euler-Lagrange equation associated to (\ref{bsic}) is \begin{equation}\label{bec} -\Delta u=|u|^{2^*-2}u\quad \mbox{in}\quad \mathbb{R}^N. \end{equation} By Caffarelli et al. \cite{CGS89} and Gidas et al. \cite{GNN79}, it is known that all positive solutions are Talenti bubble \cite{Ta76} \[V_{z,\lambda}(x)=[N(N-2)]^{\frac{N-2}{4}}\left(\frac{\lambda}{1+\lambda^2|x-z|^2}\right)^{\frac{N-2}{2}},\] with $z\in\mathbb{R}^N$ and $\lambda>0$. The non-degeneracy of $V_{z,\lambda}$ was given by Bianchi and Egnell \cite{BE91} (see also \cite[Lemma 3.1]{AGP99}), that is, the solutions of the following linearized equation \begin{equation} -\Delta v=(2^*-1)V_{z,\lambda}^{2^*-2}v\quad \mbox{in}\quad \mathbb{R}^N,\quad v\in D^{1,2}(\mathbb{R}^N), \end{equation} are linear combinations of functions $\frac{\partial V_{z,\lambda}}{\partial \lambda}$ and $\frac{\partial V_{z,\lambda}}{\partial z_i}$, $i=1,\ldots,N$. In \cite{GGN13}, Gladiali, Grossi and Neves considered the second-order H\'{e}non equation \begin{equation}\label{Phs} -\Delta u=(N+l)(N-2)|x|^l u^{\frac{N+2+2l}{N-2}},\quad u>0 \quad \mbox{in}\quad \mathbb{R}^N, \end{equation} where $N\geq 3$ and $l>0$. This problem generalizes the well-known equation (\ref{bec}). Firstly, they gave the classification of radial solutions $V^\lambda_l$ in $D^{1,2}(\mathbb{R}^N)$ for problem (\ref{Phs}), where $V^\lambda_l(x)=\lambda^\frac{N-2}{2}V_l(\lambda x)$ and \[V_l(x)=(1+|x|^{2+l})^{-\frac{N-2}{2+l}}.\] Furthermore, they characterized all the solutions to the linearized problem related to (\ref{Phs}) at function $V_l$, that is \begin{equation}\label{Pwhls} -\Delta v=(N+l)(N+2+2l)|x|^l V_l^{\frac{N+2+2l}{N-2}-1}v \quad \mbox{in}\quad \mathbb{R}^N, \quad v\in D^{1,2}(\mathbb{R}^N). \end{equation} \vskip0.25cm \noindent{\bf Theorem~A.} \cite[Theorem 1.3]{GGN13} {\it Let $l\geq 0$. If $l>0$ is not an even integer, then the space of solutions of (\ref{Pwhls}) has dimension $1$ and is spanned by \begin{equation*} X_0(x)=\frac{1-|x|^{2+l}}{(1+|x|^{2+l})^\frac{N+l}{2+l}}, \end{equation*} where $X_0\sim\frac{\partial V^\lambda_l}{\partial \lambda}|_{\lambda=1}$. If $l=2(k-1)$ for some $k\in\mathbb{N}^+$, then the space of solutions of (\ref{Pwhls}) has dimension $1+\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$ and is spanned by \begin{equation*} X_0(x)=\frac{1-|x|^{2+l}}{(1+|x|^{2+l})^\frac{N+l}{2+l}},\quad X_{k,i}(x)=\frac{|x|^k\Psi_{k,i}(x)}{(1+|x|^{2+l})^\frac{N+l}{2+l}}, \end{equation*} where $\{\Psi_{k,i}\}$, $i=1,\ldots,\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$, form a basis of $\mathbb{Y}_k(\mathbb{R}^N)$, the space of all homogeneous harmonic polynomials of degree $k$ in $\mathbb{R}^N$. } \vskip0.25cm Theorem A highlights the new phenomenon that if $l$ is an even integer then there exist new solutions to (\ref{Pwhls}) that ``replace'' the ones due to the translations invariance. It would be very interesting to understand if these new solutions are given by some geometrical invariants of the problem or not. It is obvious that for all $l>0$, $\frac{N+2+2l}{N-2}>2^*-1$ and the solution of equation (\ref{Phs}) is invariant for dilations but not for translations, since the presence of the term $|x|^l$ prevents the application of the moving plane method to obtain the radial symmetry of the solutions around some point in $\mathbb{R}^N$. Indeed, nonradial solutions appear. They constructed the nonradial solutions to equation (\ref{Phs}) when $l=2$ and $N\geq 4$ is even, that is, for any $a\in\mathbb{R}$, the functions \begin{equation*} u(x)=u(|x'|,|x''|)=(1+|x|^4-2a(|x'|^2-|x''|^2)+a^2)^{-\frac{N-2}{4}}, \end{equation*} form a branch of solutions to (\ref{Phs}) bifurcating from $V_2$, where $(x',x'')\in \mathbb{R}^N=\mathbb{R}^{\frac{N}{2}}\times\mathbb{R}^{\frac{N}{2}}$. It is worth to mention that the authors of \cite{GGN13} came up with an interesting conjecture that the nonradial solutions exist only when $l$ is a positive even integer. Although they had no proof of this, the classification result for a Liouville-type equation with singular data \cite{PT01} supports this conjecture. See \cite{BCG21,DGG17} for the Hardy-Sobolev equation of the similar bifurcation phenomenon as in \cite{GGN13}. As applications of Theorem A, there are many results for the following second-order asymptotically critical H\'{e}non equation \begin{equation}\label{Pwhplh} -\Delta u=|x|^{l} |u|^{\frac{2(N+l)}{N-2}-2-\varepsilon}u \quad \mbox{in}\quad \Omega, \quad u=0\quad \mbox{on}\quad \partial\Omega, \end{equation} where $\Omega\subseteq \mathbb{R}^N$ is a smooth bounded domain containing the origin with $N\geq 3$, and $l>0$. Gladiali and Grossi in \cite{GG12} constructed a positive solution when $\varepsilon>0$ small enough and $0<l\leq 1$, this last bound on the exponent $l$ was removed in \cite{GGN13} getting the existence of positive solutions to equation (\ref{Pwhplh}) when $\varepsilon>0$ small enough and $l>0$ is not an even integer. Chen and Deng in \cite{CD17} constructed a sign-changing solution to (\ref{Pwhplh}) with the sharp of a tower of bubbles with alternate signs, centered at the origin when $\varepsilon\to 0^+$ and $l>0$ is not an even integer, see also \cite{CLP18} for the same result. If $l>0$ is an even integer, Alarc\'{o}n in \cite{Al18} gave further assumption about the domain that $\Omega$ is symmetric with respect to $x_1, x_2,\ldots, x_N$ and invariant for some suitable group, then constructed the same type sign-changing solutions to (\ref{Pwhplh}) as in \cite{CD17,CLP18}. For more results about the second-order H\'{e}non problem related to (\ref{Pwhplh}), readers can refer to \cite{EGPV21,FGP15,GG15,Liu20,Liu21}. Therefore, it is natural to consider the fourth-order Hardy or H\'{e}non problem and we hope to establish the analogous conclusion as \cite[Theorem 1.3]{GGN13}. \subsection{Problem setup and main results} Recently, Guo et al. \cite{GWW20} studied the weighted fourth-order elliptic equation: \begin{equation}\label{Pl} \Delta(|x|^{\alpha}\Delta u)=|x|^l u^p,\quad u\geq 0 \quad \mbox{in}\quad \mathbb{R}^N, \end{equation} where $N\geq 5$, $p>1$ and $4-N<\alpha<\min\{N,l+4\}$. Define \begin{equation}\label{defps} p_s:=\frac{N+4+2l-\alpha}{N-4+\alpha}. \end{equation} They obtained Liouville type result, that is, if $u\in C^4(\mathbb{R}^N\backslash\{0\}) \cap C^0(\mathbb{R}^N)$ with $|x|^{\alpha}\Delta u\in C^0(\mathbb{R}^N)$ is a nonnegative radial solution to (\ref{Pl}), then $u\equiv 0$ in $\mathbb{R}^N$ provided $1<p<p_s$. Successfully, Huang and Wang in \cite{HW20} gave the partial classifications of positive radial solutions for problem (\ref{Pl}) with $p=p_s$ and $l=-\alpha$, see also \cite{Ya21} for more general case. The method of \cite{HW20} is that making use of the transformation $v(t)=|x|^{\frac{N-4+\alpha}{2}}u(|x|)$, $t=-\ln |x|$, then changing problem (\ref{Pl}) to the following fourth-order ODE \begin{equation*} v^{(4)}-\frac{(N-2)^2+(2-\alpha)^2}{2}v''+\frac{(N-4+\alpha)^2(N-\alpha)^2}{16}v=v^{p}, \quad \mbox{in} \quad \mathbb{R}. \end{equation*} Equation (\ref{Pl}) is related to the H\'{e}non-Lane-Emden system \begin{eqnarray*} \left\{ \arraycolsep=1.5pt \begin{array}{ll} -\Delta u=|x|^{-\alpha}v^q, \quad &\mbox{in}\quad \mathbb{R}^N,\\[2mm] -\Delta v=|x|^{l}u^p,\quad &\mbox{in}\quad \mathbb{R}^N, \end{array} \right. \end{eqnarray*} with $q=1$. It is well-known that the following critical hyperbola plays an important role in existence results \begin{equation*} \frac{N-\alpha}{q+1}+\frac{N+l}{p+1}=N-2. \end{equation*} More precisely, Bidaut-Veron and Giacomini in \cite{BG10} have shown that if $N\geq 3$ and $\alpha, -l<2$, the above system admits a positive classical radial solution $(u,v)$ continuous at the origin if and only if $(p,q)$ is above or on the critical hyperbola. For more existence and non-existence results, refer to \cite{CH19,CM17,FG14,FKP21,Li21,Li98,Ph15} and the references therein. On the other hand, equation (\ref{Pl}) is closely related to Caffarelli-Kohn-Nirenberg-type (see \cite{CKN84} and we write (CKN) for short) inequalities \begin{equation*} \int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx \geq C\left(\int_{\mathbb{R}^N}|x|^{l}|u|^{p} dx\right)^{\frac{2}{p}}, \quad \mbox{for any}\quad u\in C^\infty_c(\mathbb{R}^N). \end{equation*} Inspired by \cite{GWW20}, and by using the (CKN) inequalities, we give a brief proof of the classification of positive radial solutions for problem (\ref{Pl}) with $l=-\alpha$ and $p=p_s$ which is different from \cite{HW20}. Firstly, we are mainly interested in a class of weighted higher-order (CKN) inequalities of the form \begin{equation}\label{Pi} \int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx \geq S(N,\alpha)\left(\int_{\mathbb{R}^N}|x|^{-\alpha}|u|^{p^*_{\alpha}} dx\right)^{\frac{2}{p^*_{\alpha}}}, \quad \mbox{for any}\quad u\in C^\infty_c(\mathbb{R}^N), \end{equation} for some positive constant $S(N,\alpha)$, where $N\geq 3$ and \begin{equation}\label{capc} 4-N<\alpha<2,\quad p^*_{\alpha}:=\frac{2(N-\alpha)}{N-4+\alpha}. \end{equation} This problem generalizes the well-known high order Sobolev inequality \begin{equation}\label{bcesi} \int_{\mathbb{R}^N}|\Delta u|^2\geq S_2\left(\int_{\mathbb{R}^N}|u|^{\frac{2N}{N-4}}\right)^\frac{N-4}{N} \end{equation} for all $u\in D_0^{2,2}(\mathbb{R}^N)$ where $D_0^{2,2}(\mathbb{R}^N)=\{u\in L^{\frac{2N}{N-4}}(\mathbb{R}^N): \Delta u\in L^2(\mathbb{R}^N)\}$. The Euler-Lagrange equation associated to (\ref{bcesi}) is \begin{equation}\label{becbbb} \Delta^2 u=|u|^{\frac{8}{N-4}}u\quad \mbox{in}\quad \mathbb{R}^N. \end{equation} Smooth positive solutions to (\ref{becbbb}) have been completely classified in \cite{EFJ90}, where the authors proved that these solutions are given by \begin{equation*} W_{z,\lambda}(x)=[(N-4)(N-2)N(N+2)]^{\frac{N-4}{8}}\left(\frac{\lambda}{1+\lambda^2|x-z|^2}\right)^{\frac{N-4}{2}}, \end{equation*} with $\lambda>0$ and $z\in\mathbb{R}^N$ and they are extremal functions for (\ref{bcesi}). Coming back to (\ref{Pi}), we define $D^{2,2}_\alpha(\mathbb{R}^N)$ as the completion of $C^\infty_c(\mathbb{R}^N)$ with the inner product \begin{equation}\label{defd22i} \langle\phi,\varphi\rangle_\alpha=\int_{\mathbb{R}^N}|x|^{\alpha}\Delta \phi\Delta \varphi dx, \end{equation} and the norm $\|\phi\|_{D^{2,2}_\alpha(\mathbb{R}^N)}=\langle\phi,\phi\rangle^{1/2}_\alpha$. Define also $L^{p^*_{\alpha}}_{\alpha}(\mathbb{R}^N)$ the space of functions $\phi$ such that $\int_{\mathbb{R}^N}|x|^{-\alpha}|\phi|^{p^*_{\alpha}} dx<\infty$ with the norm $\|\phi\|_{L^{p^*_{\alpha}}_{\alpha}(\mathbb{R}^N)}=(\int_{\mathbb{R}^N}|x|^{-\alpha}|\phi|^{p^*_{\alpha}} dx)^{1/p^*_{\alpha}}$. Therefore, (\ref{Pi}) can be stated as that the embedding $D^{2,2}_\alpha(\mathbb{R}^N)\hookrightarrow L^{p^*_{\alpha}}_{\alpha}(\mathbb{R}^N)$ is continuous. The best constant in (\ref{Pi}) is given by \begin{equation}\label{defbcsg} S(N,\alpha)=\inf_{u\in D^{2,2}_\alpha(\mathbb{R}^N)\backslash\{0\}}\frac{\int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx}{\left(\int_{\mathbb{R}^N}|x|^{-\alpha}|u|^{p^*_{\alpha}} dx\right)^{\frac{2}{p^*_{\alpha}}}}. \end{equation} In this paper, we just consider the radial extremal functions to (CKN) inequality, so we define \[ D^{2,2}_{\alpha,rad}(\mathbb{R}^N):=\{u\ :\ u(x)=u(|x|), u\in D^{2,2}_\alpha(\mathbb{R}^N)\} \] and \begin{equation}\label{defbcs} S^{rad}(N,\alpha):=\inf_{u\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\backslash\{0\}}\frac{\int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx}{\left(\int_{\mathbb{R}^N}|x|^{-\alpha}|u|^{p^*_{\alpha}} dx\right)^{\frac{2}{p^*_{\alpha}}}}. \end{equation} We will give the explicit forms for all maximizers and the exact best constant for $S^{rad}(N,\alpha)$ as the following: \begin{theorem}\label{thmbcm} Assume that $N\geq 3$, $4-N<\alpha<2$. We have \begin{equation*} S^{rad}(N,\alpha)=\left(\frac{2-\alpha}{2}\right)^{\frac{4N-4-2\alpha}{N-4}} \left(\frac{2\pi^{\frac{N}{2}}}{\Gamma(\frac{N}{2})}\right)^{\frac{4-2\alpha}{N-\alpha}}C\left(\frac{2N-2\alpha}{2-\alpha}\right), \end{equation*} where $C(M)=(M-4)(M-2)M(M+2)\left[\Gamma^2(\frac{M}{2})/(2\Gamma(M))\right]^{\frac{4}{M}}$. Moreover the extremal functions which achieve $S^{rad}(N,\alpha)$ in (\ref{defbcs}) are unique (up to scaling) and given by \begin{equation}\label{bcm} V_{\lambda,\alpha}(x)=\frac{A\lambda^{\frac{N-4+\alpha}{2}}}{(1+\lambda^{2-\alpha}|x|^{2-\alpha})^{\frac{N-4+\alpha}{2-\alpha}}}, \end{equation} for all $A\in\mathbb{R}\backslash\{0\}$ and $\lambda>0$. \end{theorem} It is well-known that Euler-Lagrange equation of (\ref{Pi}), up to scaling, is given by \begin{equation}\label{Pwh} \Delta(|x|^{\alpha}\Delta u)=|x|^{-\alpha} |u|^{p^*_{\alpha}-2}u,\quad \mbox{in}\quad \mathbb{R}^N. \end{equation} Therefore, as the direct consequence of Theorem \ref{thmbcm}, we obtain \begin{corollary}\label{thmpwh} Assume that $N\geq 3$, $4-N<\alpha<2$. Then equation (\ref{Pwh}) has a unique (up to scaling) positive radial solution of the form \begin{equation*} U_{\lambda,\alpha}(x)=\frac{C_{N,\alpha}\lambda^{\frac{N-4+\alpha}{2}}}{(1+\lambda^{2-\alpha}|x|^{2-\alpha})^\frac{N-4+\alpha}{2-\alpha}}, \end{equation*} with $\lambda>0$, where $C_{N,\alpha}=\left[(N-4+\alpha)(N-2)(N-\alpha)(N+2-2\alpha)\right]^{\frac{N-4+\alpha}{8-4\alpha}}$. \end{corollary} Inspired by \cite{GGN13}, then we concern the linearized problem related to (\ref{Pwh}) at the function $U_{1,\alpha}$. This leads to study the problem \begin{equation}\label{Pwhl} \Delta(|x|^{\alpha}\Delta v)=(p^*_{\alpha}-1)|x|^{-\alpha} U_{1,\alpha}^{p^*_{\alpha}-2}v \quad \mbox{in}\quad \mathbb{R}^N, \quad v\in D^{2,2}_\alpha(\mathbb{R}^N). \end{equation} Next theorem characterizes all the solutions to (\ref{Pwhl}). \begin{theorem}\label{thmpwhl} Assume that $N\geq 3$, $4-N<\alpha<2$. If $\alpha$ is not a negative even integer, then the space of solutions of (\ref{Pwhl}) has dimension $1$ and is spanned by \begin{equation*} Z_0(x)=\frac{1-|x|^{2-\alpha}}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}}, \end{equation*} where $Z_0\sim\frac{\partial U_{\lambda,\alpha}}{\partial \lambda}|_{\lambda=1}$, and in this case we say $U_{1,\alpha}$ is non-degenerate. Otherwise, if $\alpha=-2(k-1)$ for some $k\in\mathbb{N}^+$, then the space of solutions of (\ref{Pwhl}) has dimension $1+\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$ and is spanned by \begin{equation*} Z_0(x)=\frac{1-|x|^{2-\alpha}}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}},\quad Z_{k,i}(x)=\frac{|x|^k\Psi_{k,i}(x)}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}}, \end{equation*} where $\{\Psi_{k,i}\}$, $i=1,\ldots,\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$, form a basis of $\mathbb{Y}_k(\mathbb{R}^N)$, the space of all homogeneous harmonic polynomials of degree $k$ in $\mathbb{R}^N$. \end{theorem} \begin{remark}\label{rem:exf}\rm The key step of the proofs for Theorems \ref{thmbcm} and \ref{thmpwhl} is the change of variable $r\mapsto r^{\frac{2}{2-\alpha}}$, i.e. we set $v(s)=u(r)$ and $r=s^{\frac{2}{2-\alpha}}$, which was used in \cite{CG10} in a different context, see also \cite[Theorem A.1]{GGN13}. It is a surprising thing that we only need to suppose $N\geq 3$ when dealing with the weighted fourth-order Hardy-H\'{e}non equation. Indeed, when we deal with the minimizers for $S^{rad}(N,\alpha)$ in (\ref{defbcs}), the only fact we have used is that \begin{equation*} \int^\infty_0\left[v''(s)+\frac{M-1}{s}v'(s)\right]^2 s^{M-1}ds \geq C(M)\left(\int^\infty_0|v(s)|^{\frac{2M}{M-4}}s^{M-1}ds\right)^{\frac{M-4}{M}}, \end{equation*} for all $v\in C^2_c(\mathbb{R})\backslash\{0\}$ satisfying $\int^\infty_0\left[v''(s)+\frac{M-1}{s}v'(s)\right]^2 s^{M-1}ds<\infty$, where $M=\frac{2N-2\alpha}{2-\alpha}>4$, i.e. $4-N<\alpha<2$ which requires $N>2$, or $2<\alpha<4-N$ which shows $N=1$ and $\alpha\in(2,3)$. If $N=1$, then $\alpha\in(2,3)$ indicates that $\alpha$ can not be a negative even integer, thus we only deal with the case $N\geq 3$ and $4-N<\alpha<2$. It is worth to mention that $S(N,\alpha)\leq S^{rad}(N,\alpha)$, and $S(N,\alpha)$ in (\ref{defbcsg}) might be zero for some special $\alpha$ (see \cite{CaM11}). Furthermore, when $S(N,\alpha)>0$ it may also be achieved by non-radial functions and thus (\ref{Pwh}) might exist non-radial positive solutions. In fact, let $\alpha=-2$, $N\geq 8$ be even and $\mathbb{R}^{N}=\mathbb{R}^{\frac{N}{2}}\times \mathbb{R}^{\frac{N}{2}}$, $x=(x',x'')$ with $x'\in\mathbb{R}^{\frac{N}{2}}$ and $x''\in\mathbb{R}^{\frac{N}{2}}$, then for any $a\in\mathbb{R}$ the functions \begin{equation}\label{defbrs} v(x)=v(|x'|,|x''|)=C_{N,-2}(1+|x|^4-2a(|x'|^2-|x''|^2)+a^2)^{-\frac{N-6}{4}}, \end{equation} form a branch of solutions to (\ref{Pwh}) bifurcating from $U_{1,-2}$. \end{remark} From Theorem \ref{thmpwhl}, we know that $U_{1,\alpha}$ is non-degenerate when $\alpha$ is not a negative even integer. By this result, we can consider several simple applications of Theorem \ref{thmpwhl}. Enlightened by Brezis and Lieb \cite{BrE85}, the first thing we care about is the remainder term of (CKN) inequality (\ref{Pi}) in radial space $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$. The Sobolev inequality states that there exists constant $\mathcal{S}$ depending only on $N$ and $s$ such that \begin{equation}\label{bsics} \|u\|^2_{D^{s,2}_0(\mathbb{R}^N)}\geq \mathcal{S}\|u\|^2_{L^{\frac{2N}{N-2s}}(\mathbb{R}^N)},\quad \mbox{for all}\quad u\in D_0^{s,2}(\mathbb{R}^N), \end{equation} where $0<s<N/2$ and $D_0^{s,2}(\mathbb{R}^N)$ is the space of all tempered distributions $u$ such that \[\widehat{u}\in L^1_{loc}(\mathbb{R}^N)\quad \mbox{and}\quad \|u\|^2_{D^{s,2}_0(\mathbb{R}^N)}:=\int_{\mathbb{R}^N}|\xi|^s|\widehat{u}|^2<\infty.\] Here, as usual, $\widehat{u}$ denotes the (distributional) Fourier transform of $u$. It is well known that the extremal functions of best constant $\mathcal{S}$ are given as the set functions which, up to translation, dilation and multiplication by a nonzero constant, coincide with $W(x)=(1+|x|^2)^{-(N-2s)/2}$. For $s=1$, Brezis and Lieb \cite{BrE85} asked the question whether a remainder term - proportional to the quadratic distance of the function $u$ to be the manifold $\mathcal{M}:=\{c\lambda^{(N-2s)/2}W(\lambda(x-z): z\in\mathbb{R}^N, c\in\mathbb{R}, \lambda>0\}$ - can be added to the right hand side of (\ref{bsics}). This question was answered affirmatively in the case $s=1$ by Bianchi and Egnell \cite{BE91}, and their result was extended later to the case $s=2$ by Lu and Wei \cite{LW00} and the the case of an arbitrary even positive integer $N>2s$ in \cite{BWW03}, and the whole interval case $s\in (0,N)$ was proved in \cite{CFW13}. Furthermore, R\u{a}dulescu et. al \cite{RSW02} gave the remainder term of Hardy-Sobolev inequality for exponent two. Wang and Willem \cite{wangwil} studied Caffarelli-Kohn-Nirenberg inequalities with remainder terms. Recently, Wei and Wu \cite{WW22} established the stability of the profile decompositions to a special case of the (CKN) inequality and also gave the remainder term. As mentioned above, it is natural to establish (CKN) inequality (\ref{Pi}) with remainder terms in the radial space $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$ under the help of Theorem \ref{thmpwhl} when $\alpha$ is not a negative even integer, as an analogous result to \cite{LW00}. \begin{theorem}\label{thmprt} Assume $N\geq 3$, and let $4-N<\alpha<2$ be not a negative even integer. Then there exists constant $B=B(N,\alpha)>0$ such that for every $u\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$, it holds that \[ \int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx-S^{rad}(N,\alpha)\left(\int_{\mathbb{R}^N}|x|^{-\alpha}|u|^{p^*_{\alpha}} dx\right)^{\frac{2}{p^*_{\alpha}}} \geq B {\rm dist}(u,\mathcal{M}_2)^2, \] where $\mathcal{M}_2=\{cU_{\lambda,\alpha}: c\in\mathbb{R}, \lambda>0\}$ is a two-dimensional manifold, and ${\rm dist}(u,\mathcal{M}_2):=\inf_{\phi\in \mathcal{M}_2}\|\phi-u\|_{D^{2,2}_\alpha(\mathbb{R}^N)}=\inf_{c\in\mathbb{R}, \lambda>0}\|u-cU_{\lambda,\alpha}\|_{D^{2,2}_\alpha(\mathbb{R}^N)}$. \end{theorem} The second thing we want to study is to construct solutions by using the Lyapunove-Schmidt argument, enlightened by \cite{AGP99} (and also \cite[Sections 3 and 4]{FS03}). Now, we will establish sufficient conditions on a prescribed weighted $h(x)$ on $\mathbb{R}^N$ which guarantee the existence of solutions to the perturbative model problem \begin{equation}\label{Pwhp} \Delta(|x|^{\alpha}\Delta u)=(1+\varepsilon h(x))|x|^{-\alpha} u^{p^*_{\alpha}-1},\quad u>0 \quad \mbox{in}\quad \mathbb{R}^N, \quad u\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N). \end{equation} \begin{theorem}\label{thmpwhp} Assume $N\geq 3$, and let $4-N<\alpha<2$ be not a negative even integer, $h\in L^\infty(\mathbb{R}^N)\cap C(\mathbb{R}^N)$. If $\lim_{|x|\to 0}h(x)=\lim_{|x|\to \infty}h(x)=0$, then equation (\ref{Pwhp}) has at least one solution for any $\varepsilon$ close to zero. \end{theorem} The paper is organized as follows: In Section \ref{sectpmr} we deduce the optimizers of (CKN) inequality and characterize all solutions to the linearized Hardy-H\'{e}non equation (\ref{Pwhl}). In Section \ref{sect:rt}, we study the remainder term of (CKN) inequality (\ref{Pi}) and prove Theorem \ref{thmprt}. In Section \ref{sectprp} we investigate the existence of solutions to the related perturbed equation (\ref{Pwhp}) by using finite dimensional Lyapunov-Schmit reduction method and prove Theorem \ref{thmpwhp}. \section{{\bfseries Optimizers of (CKN) inequality and linearized problem}}\label{sectpmr} In this section, at first, we use a suitable transform that is changing the variable $r\mapsto r^{\frac{2}{2-\alpha}}$, related to Sobolev inequality to investigate the sharp constants and optimizers of (CKN) inequality (\ref{Pi}) in radial space $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$. \subsection{ Proof of Theorem \ref{thmbcm}.} We follow the arguments in the proof of \cite[Theorem A.1]{GGN13}. Let $u\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$. Making the changes that $v(s)=u(r)$ and $r=s^q$ where $q>0$ will be given later, then we have \begin{equation*} \begin{split} \int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2 dx = & \omega_{N-1}\int^\infty_0 r^\alpha\left[u''(r)+\frac{N-1}{r}u'(r)\right]^2 r^{N-1}dr \\ = & \omega_{N-1} q^{-3}\int^\infty_0\left[v''(s)+\frac{(N-1)q-(q-1)}{s}v'(s)\right]^2 s^{(N-1)q-3(q-1)+q \alpha}ds, \end{split} \end{equation*} where $\omega_{N-1}$ is the surface area for unit ball of $\mathbb{R}^N$. In order to make use of Sobolev inequality, we need $(N-1)q-(q-1)=(N-1)q-3(q-1)+q \alpha$ which requires \[q=\frac{2}{2-\alpha}.\] Now, we set \begin{equation}\label{defm} M:=(N-1)q-(q-1)+1=\frac{2(N-\alpha)}{2-\alpha}>4, \end{equation} which implies \begin{equation*} \begin{split} \int^\infty_0 r^\alpha\left[u''(r)+\frac{N-1}{r}u'(r)\right]^2 r^{N-1}dr = & q^{-3}\int^\infty_0\left[v''(s)+\frac{M-1}{s}v'(s)\right]^2 s^{M-1}ds. \end{split} \end{equation*} When $M$ is an integer we can use the classical Sobolev inequality (see \cite{Li85-1,Li85-2}) and we get \begin{equation*} \begin{split} \int^\infty_0\left[v''(s)+\frac{M-1}{s}v'(s)\right]^2 s^{M-1}ds \geq & C(M)\left(\int^\infty_0|v(s)|^{\frac{2M}{M-4}}s^{M-1}ds\right)^{\frac{M-4}{M}} \\ = & q^{-\frac{M-4}{M}}C(M)\left(\int^\infty_0|u(r)|^{\frac{2M}{M-4}}r^{\frac{M}{q}-1}dr\right)^{\frac{M-4}{M}}, \end{split} \end{equation*} where $C(M)=\pi^2(M+2)M(M-2)(M-4)\left(\Gamma(M/2)/\Gamma(M)\right)^{\frac{4}{M}}\left(2\pi^{M/2}/\Gamma(M/2)\right)^{-\frac{4}{M}}$ (see \cite[(1.4)]{Va93}). Moreover, even $M$ is not an integer we readily see that the above inequality remains true. From (\ref{defm}), we deduce that \[\frac{2M}{M-4}=\frac{2(N-\alpha)}{N-4+\alpha}=p^*_{\alpha},\quad \frac{M}{q}-1=N-1-\alpha.\] So we get \begin{equation*} \begin{split} & \int^\infty_0 r^\alpha\left[u''(r)+\frac{N-1}{s}u'(r)\right]^2 r^{N-1}dr\geq q^{-3-\frac{M-4}{M}}C(M)\left(\int^\infty_0r^{-\alpha}|u(r)|^{p^*_{\alpha}}r^{N-1}dr\right)^{\frac{2}{p^*_{\alpha}}}, \end{split} \end{equation*} which proves (\ref{defbcs}) with \begin{equation*} \begin{split} S^{rad}(N,\alpha) = & q^{-3-\frac{M-4}{M}}\omega^{1-\frac{2}{p^*_{\alpha}}}_{N-1}C(M) =\left(\frac{2-\alpha}{2}\right)^{\frac{4N-4-2\alpha}{N-4}} \left(\frac{2\pi^{\frac{N}{2}}}{\Gamma(\frac{N}{2})}\right)^{\frac{4-2\alpha}{N-\alpha}}C\left(\frac{2N-2\alpha}{2-\alpha}\right). \end{split} \end{equation*} Moreover, from the previous inequalities, we also get that the extremal functions are obtained as \begin{equation*} \begin{split} \int^\infty_0\left[v_\nu''(s)+\frac{M-1}{s}v_\nu'(s)\right]^2 s^{M-1}ds=C(M)\left(\int^\infty_0|v_\nu(s)|^{\frac{2M}{M-4}}s^{M-1}ds\right)^{\frac{M-4}{M}}. \end{split} \end{equation*} It is well known that \[v_\nu(s)=A\nu^{\frac{M-4}{2}}(1+\nu^2s^2)^{-\frac{M-4}{2}}\] for all $A\in\mathbb{R}$ and $\nu\in\mathbb{R}^+$, see \cite[Theorem 2.1]{EFJ90}. Setting $\nu=\lambda^{1/q}$ and $s=|x|^{1/q}$, then we obtain that all the radial extremal functions of $S^{rad}(N,\alpha)$ have the form \begin{equation}\label{defula} V_{\lambda,\alpha}(x)=\frac{A\lambda^{\frac{N-4+\alpha}{2}}}{(1+\lambda^{2-\alpha}|x|^{2-\alpha})^{\frac{N-4+\alpha}{2-\alpha}}}, \end{equation} for all $A\in\mathbb{R}$ and $\lambda>0$. The proof of Theorem \ref{thmbcm} is now complete. \qed Now, we are going to show the uniqueness of positive radial solutions of equation (\ref{Pwh}). Let $u(x)\in \mathcal{\mathcal{D}}^{2,2}_{\alpha,rad}(\mathbb{R}^N)$ be a positive radial solution of equation (\ref{Pwh}) and $X(s)=u(r)$ where $|x|=r=s^{q}$ and $q=2/(2-\alpha)$, then by simple calculation, (\ref{Pwh}) is equivalent to \begin{equation}\label{PpwhlWe} \begin{split} & X^{(4)}(s)+\frac{2(M-1)}{s}X'''(s)+\frac{(M-1)(M-3)}{s^2}X''(s)-\frac{(M-1)(M-3)}{s^3}X'(s) \\ = & q^{4}|X|^{\frac{8}{M-4}}X,\quad \mbox{in}\quad s\in(0,\infty) \end{split} \end{equation} where $M=\frac{2(N-\alpha)}{2-\alpha}>4$, since $p^*_{\alpha}=\frac{2M}{M-4}$. Then from \cite[Theorem 1.3]{Li98}, we know that equation (\ref{PpwhlWe}) has a unique (up to scalings) positive solution of the form \begin{equation}\label{eqsfe} X(s)=\frac{C_{M,q}\nu^{\frac{M-4}{2}}}{(1+\nu^{2}s^{2})^{\frac{M-4}{2}}}, \end{equation} for some constant $\nu>0$, where $C_{M,q}=\left[q^{-4}(M-4)(M-2)M(M+2)\right]^{\frac{M-4}{8}}$. That is, equation (\ref{Pwh}) has a unique (up to scalings) radial solution of the form \begin{equation}\label{defulae} u(x)=\frac{C_{N,\alpha}\lambda^{\frac{N-4+\alpha}{2}}}{(1+\lambda^{2-\alpha}|x|^{2-\alpha})^\frac{N-4+\alpha}{2-\alpha}}, \end{equation} for some $\lambda>0$, where $C_{N,\alpha}=\left[(N-4+\alpha)(N-2)(N-\alpha)(N+2-2\alpha)\right]^{\frac{N-4+\alpha}{8-4\alpha}}$. Therefore, Corollary \ref{thmpwh} holds. Then by using the standard spherical decomposition and taking the changes of variable $r\mapsto r^{\frac{2}{2-\alpha}}$, we can characterize all solutions to the linearized problem (\ref{Pwhl}). \subsection{Proof of Theorem \ref{thmpwhl}.} We follow the arguments in the proof of \cite[Theorem 1.3]{GGN13}, and also \cite[Theorem 2.2]{LW00}. Equation (\ref{Pwhl}) is equivalent to \begin{equation}\label{Pwhlp} \Delta(|x|^{\alpha}\Delta v)=\frac{(p^*_{\alpha}-1)C_{N,\alpha}^{p^*_{\alpha}-2}}{|x|^{\alpha} (1+|x|^{2-\alpha})^4}v \quad \mbox{in}\quad \mathbb{R}^N, \quad v\in D^{2,2}_\alpha(\mathbb{R}^N). \end{equation} We will decompose the fourth-order equation (\ref{Pwhlp}) into a system of two second-order equations. Firstly, we decompose $v$ as follows: \begin{equation}\label{defvd} v(r,\theta)=\sum^{\infty}_{k=0}\phi_k(r)\Psi_k(\theta),\quad \mbox{where}\quad r=|x|,\quad \theta=\frac{x}{|x|}\in \mathbb{S}^{N-1}, \end{equation} and \begin{equation*} \phi_k(r)=\int_{\mathbb{S}^{N-1}}v(r,\theta)\Psi_k(\theta)d\theta. \end{equation*} Here $\Psi_k(\theta)$ denotes the $k$-th spherical harmonic, i.e., it satisfies \begin{equation}\label{deflk} -\Delta_{\mathbb{S}^{N-1}}\Psi_k=\lambda_k \Psi_k, \end{equation} where $\Delta_{\mathbb{S}^{N-1}}$ is the Laplace-Beltrami operator on $\mathbb{S}^{N-1}$ with the standard metric and $\lambda_k$ is the $k$-th eigenvalue of $-\Delta_{\mathbb{S}^{N-1}}$. It is well known that $\lambda_k=k(N-2+k)$, $k=0,1,2,\ldots$ whose multiplicity is \[\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}\] and that \[{\rm Ker}(\Delta_{\mathbb{S}^{N-1}}+\lambda_k)=\mathbb{Y}_k(\mathbb{R}^N)|_{\mathbb{S}^{N-1}},\] where $\mathbb{Y}_k(\mathbb{R}^N)$ is the space of all homogeneous harmonic polynomials of degree $k$ in $\mathbb{R}^N$. It is standard that $\lambda_0=0$ and the corresponding eigenfunction of (\ref{deflk}) is the constant function. The second eigenvalue $\lambda_1=N-1$ and the corresponding eigenfunctions of (\ref{deflk}) are $\frac{x_i}{|x|}$, $i=1,\ldots,N$. Moreover, let \begin{equation*} \psi_k(r)=-\int_{\mathbb{S}^{N-1}}|x|^\alpha\Delta v(r,\theta)\Psi_k(\theta) d\theta, \quad \mbox{i.e.,}\quad r^{-\alpha}\psi_k(r)=-\int_{\mathbb{S}^{N-1}}\Delta v(r,\theta)\Psi_k(\theta)d\theta. \end{equation*} It is known that \begin{equation}\label{Ppwhl2deflklw} \begin{split} \Delta (\varphi_k(r)\Psi_k(\theta)) = & \Psi_k\left(\varphi''_k+\frac{N-1}{r}\varphi'_k\right)+\frac{\varphi_k}{r^2}\Delta_{\mathbb{S}^{N-1}}\Psi_k \\ = & \Psi_k\left(\varphi''_k+\frac{N-1}{r}\varphi'_k-\frac{\lambda_k}{r^2}\varphi_k\right). \end{split} \end{equation} Therefore, by standard regularity theory, the function $v$ is a solution of (\ref{Pwhlp}) if and only if $(\phi_k,\psi_k)\in \mathcal{C}\times \mathcal{C}$ is a classical solution of the system \begin{eqnarray}\label{p2c} \left\{ \arraycolsep=1.5pt \begin{array}{ll} \phi''_k+\frac{N-1}{r}\phi'_k-\frac{\lambda_k}{r^2}\phi_k+\frac{\psi_k}{r^{\alpha}}=0 \quad \mbox{in}\quad r\in(0,\infty),\\[3mm] \psi''_k+\frac{N-1}{r}\psi'_k-\frac{\lambda_k}{r^2}\psi_k+\frac{(p^*_{\alpha}-1)C_{N,\alpha}^{p^*_{\alpha}-2}}{r^{\alpha} (1+r^{2-\alpha})^4}\phi_k=0 \quad \mbox{in}\quad r\in(0,\infty),\\[3mm] \phi'_k(0)=\psi'_k(0)=0 \quad\mbox{if}\quad k=0,\quad \mbox{and}\quad \phi_k(0)=\psi_k(0)=0 \quad\mbox{if}\quad k\geq 1, \end{array} \right. \end{eqnarray} where $\mathcal{C}:=\{\omega\in C^2([0,\infty))| \int^\infty_0 r^\alpha |\omega''(r)+\frac{N-1}{r}\omega'(r)|^2 r^{N-1} dr<\infty\}$. Take the same variation as in the proof of Theorem \ref{thmbcm}, $|x|=r=s^q$ where $q=2/(2-\alpha)$ and let \begin{equation}\label{p2txy} X_k(s)=\phi_k(r),\quad Y_k(s)=q^2\psi_k(r), \end{equation} that transforms (\ref{p2c}) into the system \begin{eqnarray}\label{p2t} \left\{ \arraycolsep=1.5pt \begin{array}{ll} X''_k+\frac{M-1}{s}X'_k-\frac{\lambda_kq^2}{s^2}X_k+Y_k=0 \quad \mbox{in}\quad s\in(0,\infty),\\[3mm] Y''_k+\frac{M-1}{s}Y'_k-\frac{\lambda_kq^2}{s^2}Y_k+\frac{(M+4)(M-2)M(M+2)}{(1+s^2)^4}X_k=0 \quad \mbox{in}\quad s\in(0,\infty),\\[3mm] X'_k(0)=Y'_k(0)=0 \quad\mbox{if}\quad k=0,\quad \mbox{and}\quad X_k(0)=Y_k(0)=0 \quad\mbox{if}\quad k\geq 1, \end{array} \right. \end{eqnarray} in $(X_k,Y_k)\in \widetilde{\mathcal{C}}\times \widetilde{\mathcal{C}}$, where $\widetilde{\mathcal{C}}:=\{\omega\in C^2([0,\infty))| \int^\infty_0 |\omega''(s)+\frac{M-1}{s}\omega'(s)|^2 s^{M-1} ds<\infty\}$ and \begin{equation} M=\frac{2(N-\alpha)}{2-\alpha}>4. \end{equation} Here we have used the fact \begin{equation*} q^4(p^*_{\alpha}-1)C_{N,\alpha}^{p^*_{\alpha}-2}=\left[(M-4)(M-2)M(M+2)\right]\left(\frac{2M}{M-4}-1\right)=(M+4)(M-2)M(M+2). \end{equation*} Fix $M$, then let us now consider the following eigenvalue problem \begin{eqnarray}\label{p2te} \left\{ \arraycolsep=1.5pt \begin{array}{ll} X''+\frac{M-1}{s}X'-\frac{\mu}{s^2}X+Y=0 \quad \mbox{in}\quad s\in(0,\infty),\\[3mm] Y''+\frac{M-1}{s}Y'-\frac{\mu}{s^2}Y+\frac{(M+4)(M-2)M(M+2)}{(1+s^2)^4}X=0 \quad \mbox{in}\quad s\in(0,\infty). \end{array} \right. \end{eqnarray} When $M$ is an integer we can study (\ref{p2te}) as the linearized problem of the equation \begin{equation*} \Delta^2 U=(M-4)(M-2)M(M+2) U^{\frac{M+4}{M-4}},\quad U>0 \quad\mbox{in}\quad \mathbb{R}^M, \end{equation*} around the standard solution $U(x)=(1+|x|^2)^{-\frac{M-4}{2}}$ (note that we always have $M>4$). In this case, as in \cite[Theorem 2.2]{LW00}, we have that \begin{equation}\label{ptev} \mu_0=0; \quad \mu_1=M-1\quad \mbox{and}\quad X_0(s)=\frac{1-s^2}{(1+s^2)^{\frac{M-2}{2}}}; \quad X_1(s)=\frac{s}{(1+s^2)^{\frac{M-2}{2}}}. \end{equation} Moreover, even $M$ is not an integer we readily see that (\ref{ptev}) remains true. Therefore, we can conclude that (\ref{p2t}) has nontrivial solutions if and only if \begin{equation*} q^2\lambda_k\in \{0,M-1\},\quad \mbox{i.e.,}\quad \frac{4\lambda_k}{(2-\alpha)^2}\in \left\{0,\frac{2N-2-\alpha}{2-\alpha}\right\}, \end{equation*} where $\lambda_k=k(N-2+k)$, $k\in\mathbb{N}$. If $4\lambda_k/(2-\alpha)^2=0$ then $k=0$. Moreover, if \[\frac{4\lambda_k}{(2-\alpha)^2}=\frac{2N-2-\alpha}{2-\alpha},\] then \begin{equation*} \left[\alpha+2(k-1)\right]\left[\alpha-2(N+k-1)\right]=0, \end{equation*} we obtain $\alpha=-2(k-1)$ since $4-N<\alpha<2$. Turning back to (\ref{p2c}) we obtain the solutions \begin{equation}\label{pyf} \phi_0(r)=\frac{1-r^{2-\alpha}}{(1+r^{2-\alpha})^{\frac{N-2}{2-\alpha}}} \quad\mbox{if}\quad \alpha\neq-2(k-1),\quad \forall k\in\mathbb{N}^+, \end{equation} and \begin{equation}\label{pye} \phi_0(r)=\frac{1-r^{2-\alpha}}{(1+r^{2-\alpha})^{\frac{N-2}{2-\alpha}}},\quad \phi_k(r)=\frac{r^k}{(1+r^{2-\alpha})^{\frac{N-2}{2-\alpha}}} \quad\mbox{if}\quad \alpha=-2(k-1), \end{equation} for some $k\in\mathbb{N}^+$. That is, if $\alpha$ is not a negative even integer, then the space of solutions of (\ref{Pwhlp}) has dimension $1$ and is spanned by \begin{equation*} Z_0(x)=\frac{1-|x|^{2-\alpha}}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}}. \end{equation*} If $\alpha=-2(k-1)$ for some $k\in\mathbb{N}^+$, then the space of solutions of (\ref{Pwhlp}) has dimension $1+\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$ and is spanned by \begin{equation*} Z_0(x)=\frac{1-|x|^{2-\alpha}}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}},\quad Z_{k,i}(x)=\frac{|x|^k\Psi_{k,i}(x)}{(1+|x|^{2-\alpha})^\frac{N-2}{2-\alpha}}, \end{equation*} where $\{\Psi_{k,i}\}$, $i=1,\ldots,\frac{(N+2k-2)(N+k-3)!}{(N-2)!k!}$, form a basis of $\mathbb{Y}_k(\mathbb{R}^N)$, the space of all homogeneous harmonic polynomials of degree $k$ in $\mathbb{R}^N$. The proof of Theorem \ref{thmpwhl} is now complete. \qed \section{{\bfseries Remainder terms of (CKN) inequality}}\label{sect:rt} In this section, we consider the remainder terms of (CKN) inequality (\ref{Pi}) in radial space $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$ and give the proof of Theorem \ref{thmprt}. We follow the arguments as those in \cite{BE91}, and also \cite{LW00}. We define $u_\lambda(x):=\lambda^{\frac{N-4+\alpha}{2}}u(\lambda x)$ for all $\lambda>0$. Thus for simplicity of notations, we write $U_\lambda$ instead of $U_{\lambda,\alpha}$ and $S_\alpha$ instead of $S^{rad}(N,\alpha)$ if there is no possibility of confusion. Moreover, in order to shorten formulas we denote \begin{equation}\label{def:norm} \begin{split} \|\varphi\|: & =\|\varphi\|_{D^{2,2}_\alpha(\mathbb{R}^N)}=\left(\int_{\mathbb{R}^N}|x|^{\alpha}|\Delta \varphi|^2 dx\right)^{1/2}, \quad \mbox{for}\quad \varphi\in D^{2,2}_\alpha(\mathbb{R}^N), \\ \|\varphi\|_*: & =\|\varphi\|_{L^{p^*_\alpha}_\alpha(\mathbb{R}^N)}= \left(\int_{\mathbb{R}^N}|x|^{-\alpha}|\varphi|^{p^*_{\alpha}} dx\right)^{1/p^*_{\alpha}},\quad \mbox{for}\quad \varphi\in L^{p^*_\alpha}_\alpha(\mathbb{R}^N). \end{split} \end{equation} Consider the eigenvalue problem \begin{equation}\label{Pwhlep} \Delta(|x|^{\alpha}\Delta v)=\mu|x|^{-\alpha} U_{\lambda}^{p^*_{\alpha}-2}v \quad \mbox{in}\quad \mathbb{R}^N, \quad v\in D^{2,2}_\alpha(\mathbb{R}^N). \end{equation} By a simple scaling argument, we have that $\mu$ does not depending on $\lambda$. Moreover, from Theorem \ref{thmpwhl} we have: \begin{proposition}\label{propep} Assume $N\geq 3$, and let $4-N<\alpha<2$ be not a negative even integer. Let $\mu_i$, $i=1,2,\ldots,$ denote the eigenvalues of (\ref{Pwhlep}) in increasing order. Then $\mu_1=1$ is simple with eigenfunction $U_\lambda$ and $\mu_2=p^*_{\alpha}-1$ with the corresponding one-dimensional eigenfunction space spanned by $\{\frac{\partial U_\lambda}{\partial \lambda}\}$. Furthermore, eigenvalues do not depend on $\lambda$, and $\mu_3>\mu_2$. \end{proposition} The main ingredient in the proof of Theorem \ref{thmprt} is contained in the lemma below, where the behavior near $\mathcal{M}_2=\{cU_{\lambda,\alpha}: c\in\mathbb{R}, \lambda>0\}$ is studied. \begin{lemma}\label{lemma:rtnm2b} Assume $N\geq 3$, and let $4-N<\alpha<2$ be not a negative even integer. Then for any sequence $\{u_n\}\subset D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\backslash \mathcal{M}_2$ such that $\inf_n\|u_n\|>0$ and ${\rm dist}(u_n,\mathcal{M}_2)\to 0$, we have \begin{equation}\label{rtnmb} \lim\inf_{n\to\infty}\frac{\|u_n\|^2-S_\alpha\|u_n\|^2_*}{{\rm dist}(u_n,\mathcal{M}_2)^2}\geq 1-\frac{\mu_2}{\mu_3}, \end{equation} where $\mu_2=p^*_\alpha-1<\mu_3$ are given as in Proposition \ref{propep}. \end{lemma} \begin{proof} Let $d_n:={\rm dist}(u_n,\mathcal{M}_2)=\inf_{c\in\mathbb{R}, \lambda>0}\|u_n-cU_\lambda\|\to 0$. We know that for each $u_n\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$, there exist $c_n\in\mathbb{R}$ and $\lambda_n>0$ such that $d_n=\|u_n-c_nU_{\lambda_n}\|$. In fact, \begin{equation}\label{ikeda} \begin{split} \|u_n-cU_\lambda\|^2 = & \|u_n\|^2+c^2\|U_\lambda\|^2-2c\langle u_n,U_\lambda\rangle_\alpha \\ \geq & \|u_n\|^2+c^2\|U_1\|^2-2|c|\|u_n\| \|U_1\|. \end{split} \end{equation} Thus the minimizing sequence of $d_n^2$, say $\{c_{n,m},\lambda_{n,m}\}$, must satisfying $|c_{n,m}|\leq C$ which means $\{c_{n,m}\}$ is bounded. On the other hand, \begin{equation*} \begin{split} \left|\int_{|\lambda x|\leq \rho}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx\right| \leq & \int_{|y|\leq \rho}|y|^{\alpha}|\Delta (u_n)_{\frac{1}{\lambda}}(y)||\Delta U_1(y)| dy \\ \leq & \|u_n\|\left(\int_{|y|\leq \rho}|y|^{\alpha}|\Delta U_1|^2 dy\right)^{1/2} \\ = & o_\rho(0) \end{split} \end{equation*} as $\rho\to 0$ which is uniform for $\lambda>0$, where $(u_n)_{\frac{1}{\lambda}}(y)=\lambda^{-\frac{N-4+\alpha}{2}}u_n(\lambda^{-1}y)$, and \begin{equation*} \begin{split} \left|\int_{|\lambda x|\geq \rho}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx \right| \leq & \|U_1\|\left(\int_{|x|\geq \frac{\rho}{\lambda}}|x|^{\alpha}|\Delta u_n|^2 dy\right)^{1/2} = o_\lambda(0) \end{split} \end{equation*} as $\lambda\to 0$ for any fixed $\rho>0$. By taking $\lambda\to 0$ and then $\rho\to 0$, we obtain \[\left|\int_{\mathbb{R}^N}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx\right| \to 0\quad \mbox{as}\quad \lambda\to 0.\] Moreover, by the explicit form of $U_\lambda$ we have \begin{equation*} \begin{split} \left|\int_{|\lambda x|\leq R}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx \right| \leq & \|U_1\|\left(\int_{| x|\leq \frac{R}{\lambda}}|x|^{\alpha}|\Delta u_n|^2 dx\right)^{1/2} = o_\lambda(0) \end{split} \end{equation*} as $\lambda\to +\infty$ for any fixed $R>0$ and \begin{equation*} \begin{split} \left|\int_{|\lambda x|\geq R}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx\right| \leq & \int_{|y|\geq R}|y|^{\alpha}|\Delta (u_n)_{\frac{1}{\lambda}}(y)||\Delta U_1(y)| dy \\ \leq & \|u_n\|\left(\int_{|y|\geq R}|y|^{\alpha}|\Delta U_1|^2 dy\right)^{1/2} = o_R(0) \end{split} \end{equation*} as $R\to +\infty$ which is uniform for $\lambda>0$. Thus, by taking first $\lambda\to +\infty$ and then $R\to +\infty$, we also obtain \[\left|\int_{\mathbb{R}^N}|x|^{\alpha}\Delta u_n\Delta U_\lambda dx\right| \to 0\quad \mbox{as}\quad \lambda\to +\infty.\] It follows from (\ref{ikeda}) and $d_n\to 0$, $\inf_n\|u_n\|>0$ that the minimizing sequence $\{c_{n,m},\lambda_{n,m}\}$ must satisfying $|\lambda_{n,m}|\leq C$ which means $\{\lambda_{n,m}\}$ is bounded. Thus for each $u_n\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$, $d_n^2$ can be attained by some $c_n\in\mathbb{R}$ and $\lambda_n>0$. Since $\mathcal{M}_2$ is two-dimensional manifold embedded in $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$, that is \[ (c,\lambda)\in\mathbb{R}\times\mathbb{R}_+\to cU_\lambda\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N), \] then the tangential space at $(c_n,\lambda_n)$ is given by \[ T_{c_n U_{\lambda_n}}\mathcal{M}_2={\rm Span}\left\{U_{\lambda_n}, \frac{\partial U_\lambda}{\partial \lambda}\Big|_{\lambda=\lambda_n}\right\}, \] and we must have that $(u_n-c_n U_{\lambda_n})$ is perpendicular to $T_{c_n U_{\lambda_n}}\mathcal{M}_2$. Proposition \ref{propep} implies that \begin{equation}\label{epkeyibbg} \mu_3\int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-2}(u_n-c_n U_{\lambda_n})^2 \leq \int_{\mathbb{R}^N}|x|^\alpha |\Delta (u_n-c_n U_{\lambda_n})|^2. \end{equation} Let $u_n=c_n U_{\lambda_n}+d_n w_n$, then $w_n$ is perpendicular to $T_{c_n U_{\lambda_n}}\mathcal{M}_2$, $\|w_n\|=1$ and we can rewrite (\ref{epkeyibbg}) as follows: \begin{equation}\label{epkeyibbb} \int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-2}w_n^2\leq \frac{1}{\mu_3}. \end{equation} Furthermore, \begin{equation*} \|u_n\|^2=d_n^2+c_n^2\|U_{\lambda_n}\|^2, \end{equation*} and by using Taylor's expansion, it holds that \begin{equation}\label{epkeyiybb} \begin{split} \int_{\mathbb{R}^N}|x|^{-\alpha}|u_n|^{p^*_{\alpha}} = & \int_{\mathbb{R}^N}|x|^{-\alpha}|c_n U_{\lambda_n}+d_nw_n|^{p^*_{\alpha}} \\ = & |c_n|^{p^*_{\alpha}}\int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}} +d_n p^*_{\alpha}|c_n|^{p^*_{\alpha}-1}\int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-1}w_n \\ & +\frac{p^*_{\alpha}(p^*_{\alpha}-1)d_n^2 |c_n|^{p^*_{\alpha}-2} }{2}\int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-2}w_n^2 +o(d_n^2) \\ = & |c_n|^{p^*_{\alpha}}\int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}} \\ & + \frac{p^*_{\alpha}(p^*_{\alpha}-1)d_n^2 |c_n|^{p^*_{\alpha}-2} }{2} \int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-2}w_n^2 +o(d_n^2), \end{split} \end{equation} since \begin{equation*} \int_{\mathbb{R}^N}|x|^{-\alpha}U_{\lambda_n}^{p^*_{\alpha}-1}w_n=\int_{\mathbb{R}^N}|x|^\alpha \Delta U_{\lambda_n} \Delta w_n=0. \end{equation*} Then combining with (\ref{epkeyibbb}) and (\ref{epkeyiybb}), we obtain \begin{equation}\label{epkeyiyxbb} \begin{split} \|u_n\|^{2}_* \leq & \left(|c_n|^{p^*_{\alpha}}\|U_{\lambda_n}\|^{p^*_{\alpha}}_*+\frac{p^*_{\alpha}(p^*_{\alpha}-1)d_n^2 |c_n|^{p^*_{\alpha}-2} }{2\mu_3} +o(d_n^2)\right)^{\frac{2}{p^*_{\alpha}}} \\ = & c_n^2\left(\|U_{\lambda_n}\|^{p^*_{\alpha}}_*+\frac{p^*_{\alpha}(p^*_{\alpha}-1)d_n^2 c_n^{-2}}{2\mu_3} +o(d_n^2)\right)^{\frac{2}{p^*_{\alpha}}} \\ = & c_n^2\left(\|U_{\lambda_n}\|^2_*+\frac{2}{p^*_{\alpha}}\frac{p^*_{\alpha}(p^*_{\alpha}-1)d_n^2 c_n^{-2}}{2\mu_3} \|U_{\lambda_n}\|^{2-p_\alpha^*}_* +o(d^2)\right) \\ = & c_n^2\|U_{\lambda_n}\|^2_*+ \frac{d_n^2 (p^*_{\alpha}-1)}{\mu_3}\|U_{\lambda_n}\|^{2-p_\alpha^*}_*+o(d_n^2). \end{split} \end{equation} Therefore, \begin{equation}\label{epkeyiydzbb} \begin{split} \|u_n\|^2-S_\alpha\|u_n\|^{2}_* \geq & d_n^2+c_n^2\|U_{\lambda_n}\|^2- S_\alpha\left(c_n^2\|U_{\lambda_n}\|^2_*+ \frac{d_n^2 (p^*_{\alpha}-1)}{\mu_3}\|U_{\lambda_n}\|^{2-p_\alpha^*}_*+o(d_n^2)\right) \\ = & d_n^2 \left(1-\frac{p^*_\alpha-1}{\mu_3} S_\alpha \|U_{\lambda_n}\|_*^{2-p^*_\alpha}\right) +c_n^2(\|U_{\lambda_n}\|^2- S_\alpha\|U_{\lambda_n}\|^2_*)+o(d_n^2) \\ = & d_n^2\left(1-\frac{p^*_\alpha-1}{\mu_3}\right)+o(d_n^2), \end{split} \end{equation} which holds for all $n\in\mathbb{N}$ since $\|U\|^2=\|U\|_*^{p^*_\alpha}=S_\alpha^{p^*_\alpha/(p^*_\alpha-2)}$ and $\|U\|^2=S_\alpha\|U\|_*^2$ for all $U\in\mathcal{M}_2$, then (\ref{rtnmb}) follows immediately. \end{proof} \noindent{\bf Proof of Theorem \ref{thmprt}.} We argue by contradiction. In fact, if the theorem is false then there exists a sequence $\{u_n\}\subset D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\backslash \mathcal{M}_2$ such that \begin{equation*} \frac{\|u_n\|^2-S_\alpha\|u_n\|^2_*}{{\rm dist}(u_n,\mathcal{M}_2)^2}\to 0,\quad \mbox{as}\quad n\to \infty. \end{equation*} By homogeneity, we can assume that $\|u_n\|=1$, and after selecting a subsequence we can assume that ${\rm dist}(u_n,\mathcal{M}_2)\to \xi\in[0,1]$ since ${\rm dist}(u_n,\mathcal{M}_2)=\inf_{c\in\mathbb{R}, \lambda>0}\|u_n-cU_{\lambda}\|\leq \|u_n\|$. If $\xi=0$, then we have a contradiction by Lemma \ref{lemma:rtnm2b}. The other possibility only is that $\xi>0$, that is \[{\rm dist}(u_n,\mathcal{M}_2)\to \xi>0\quad \mbox{as}\quad n\to \infty,\] then we must have \begin{equation}\label{wbsi} \|u_n\|^2-S_\alpha\|u_n\|^2_*\to 0,\quad \|u_n\|=1. \end{equation} Since $\{u_n\}\subset D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\backslash \mathcal{M}_2$ are radial, making the changes that $v_n(s)=u_n(r)$ and $r=s^{2/(2-\alpha)}$, then (\ref{wbsi}) is equivalent to \begin{equation}\label{bsiy} \begin{split} \int^\infty_0\left[v_n''(s)+\frac{M-1}{s}v_n'(s)\right]^2 s^{M-1}ds -C(M)\left(\int^\infty_0|v_n(s)|^{\frac{2M}{M-4}}s^{M-1}ds\right)^{\frac{M-4}{M}}\to 0 \end{split} \end{equation} where $M=\frac{2(N-\alpha)}{2-\alpha}>4$ and $C(M)=(M-4)(M-2)M(M+2)\left[\Gamma^2(\frac{M}{2})/(2\Gamma(M))\right]^{\frac{4}{M}}$, see the proof of Theorem \ref{thmbcm}. When $M$ is an integer (\ref{bsiy}) is equivalent to \begin{equation}\label{bsib} \begin{split} \int_{\mathbb{R}^M}|\Delta v_n|^2 dx -S(M)\left(\int_{\mathbb{R}^M}|v_n|^{\frac{2M}{M-4}}dx\right)^{\frac{M-4}{M}}\to 0,\quad \|v_n\|_{D^{2,2}_{0}(\mathbb{R}^M)}=\left(\frac{2}{2-\alpha}\right)^{3/2}, \end{split} \end{equation} where $S(M)=\pi^2(M-4)(M-2)M(M+2)\left[\Gamma(\frac{M}{2})/\Gamma(M)\right]^{\frac{4}{M}}$ is the best constant for the embedding of the space $D^{2,2}_0(\mathbb{R}^M)$ into $L^{2M/(M-4)}(\mathbb{R}^M)$, see \cite{Va93}. In this case, by Lions' concentration and compactness principle (see \cite[Theorem \uppercase\expandafter{\romannumeral 2}.4]{Li85-1}) as those in \cite{LW00}, we have that there exists a sequence of positive numbers $\lambda_n$ such that \begin{equation*} \lambda_n^{\frac{M-4}{2}}v_n(\lambda_n x)\to V\quad \mbox{in}\quad D^{2,2}_0(\mathbb{R}^M)\quad \mbox{as}\quad n\to \infty, \end{equation*} where $V(x)=c(a+|x|^2)^{-(M-4)/2}$ for some $c\neq 0$ and $a>0$, that is \begin{equation*} \tau_n^{\frac{N-4+\alpha}{2}}u_n(\tau_n x)\to U\quad \mbox{in}\quad D^{2,2}_\alpha(\mathbb{R}^N)\quad \mbox{as}\quad n\to \infty, \end{equation*} for some sequence $\{\tau_n\}$ and $U\in\mathcal{M}_2$, which implies \begin{equation*} {\rm dist}(u_n,\mathcal{M}_2)={\rm dist}\left(\tau_n^{\frac{N-4+\alpha}{2}}u_n(\tau_n x),\mathcal{M}_2\right)\to 0 \quad \mbox{as}\quad n\to \infty, \end{equation*} this is a contradiction. Moreover, even $M$ is not an integer we can also get analogous contradiction. \qed \section{{\bfseries Finite-dimensional reduction}}\label{sectprp} In this section, we consider perturbation problem (\ref{Pwhp}) and give the proof of Theorem \ref{thmpwhp} by using Finite-dimensional reduction method. We always suppose that $4-N<\alpha<2$ and $\alpha$ is not a negative even integer. Given $h\in L^\infty(\mathbb{R}^N)\cap C(\mathbb{R}^N)$, we put \begin{equation}\label{defH} H[u]=\frac{1}{p^*_{\alpha}}\int_{\mathbb{R}^N}h(x)|x|^{-\alpha} u^{p^*_{\alpha}}_+dx. \end{equation} For $\varepsilon\in\mathbb{R}$ we introduce the perturbed energy functional $\mathcal{J}_\varepsilon$ and also the unperturbed energy functional $\mathcal{J}_0$ on $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$ given by \begin{equation*} \begin{split} \mathcal{J}_\varepsilon[u]=\mathcal{J}_0[u]-\varepsilon H[u] =\frac{1}{2}\int_{\mathbb{R}^N}|x|^{\alpha}|\Delta u|^2dx-\frac{1}{p^*_{\alpha}}\int_{\mathbb{R}^N}(1+\varepsilon h(x))|x|^{-\alpha} u^{p^*_{\alpha}}_+dx. \end{split} \end{equation*} Evidently, $\mathcal{J}_\varepsilon\in \mathcal{C}^2$ and any critical point $u$ of $\mathcal{J}_\varepsilon$ is a weak solution to \begin{equation*} \Delta(|x|^{\alpha}\Delta u)=(1+\varepsilon h(x))|x|^{-\alpha} u^{p^*_{\alpha}-1}_+. \end{equation*} If $u\neq 0$ and $|\varepsilon|\|h\|_\infty\leq 1$, then $u$ is positive by the strong maximum principle. Hence, $u$ solves (\ref{Pwhp}). Define now \begin{equation}\label{deful} \mathcal{U}:=\left\{U_{\lambda}(x)=\lambda^{\frac{N-4+\alpha}{2}}U_{1}(\lambda x)\big| \lambda>0\right\}, \end{equation} and \begin{equation}\label{defri0} \mathcal{E}:=\left\{\omega\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N): \langle\omega,\frac{\partial U_{\lambda}}{\partial \lambda}\rangle_\alpha=\int_{\mathbb{R}^N}|x|^{\alpha}\Delta \omega\Delta \frac{\partial U_{\lambda}}{\partial \lambda} dx=0\quad \mbox{for all}\quad \lambda>0 \right\}. \end{equation} For $\lambda>0$, we define the map $P_\lambda: D^{2,2}_\alpha(\mathbb{R}^N) \to D^{2,2}_\alpha(\mathbb{R}^N)$ by \begin{equation*} P_\lambda(u):=\lambda^{\frac{N-4+\alpha}{2}}u(\lambda x). \end{equation*} We can check that $P_\lambda$ converses the norms $\|\cdot\|$ and $\|\cdot\|_*$ (see the definitions as in (\ref{def:norm})), thus for every $\lambda>0$ \begin{equation}\label{defplp} (P_\lambda)^{-1}=(P_\lambda)^t=P_{\lambda^{-1}}\quad \mbox{and}\quad \mathcal{J}_0=\mathcal{J}_0\circ P_\lambda, \end{equation} where $(P_\lambda)^t$ denotes the adjoint of $P_\lambda$. Twice differentiating the identity $\mathcal{J}_0=\mathcal{J}_0\circ P_\lambda$ yields for all $u, \varphi, \psi\in D^{2,2}_\alpha(\mathbb{R}^N)$ \begin{equation*} (\mathcal{J}''_0[u]\phi,\psi)=(\mathcal{J}''_0[P_\lambda(u)]P_\lambda(\phi),P_\lambda(\psi)), \end{equation*} that is \begin{equation}\label{defpft} \mathcal{J}''_0[u]=(P_\lambda)^{-1}\circ\mathcal{J}''_0[P_\lambda(u)]\circ P_\lambda,\quad \forall u\in D^{2,2}_\alpha(\mathbb{R}^N). \end{equation} Differentiating (\ref{defplp}) we see that $P(\lambda,u):=P_\lambda(u)$ maps $(0,\infty)\times \mathcal{U}$ into $\mathcal{U}$, hence \begin{equation}\label{defpw} \frac{\partial P}{\partial u}(\lambda,u): T_u \mathcal{U}\to T_{P_\lambda(u)}\mathcal{U}\quad \mbox{and}\quad P_\lambda: (T_u \mathcal{U})^\perp\to (T_{P_\lambda(u)}\mathcal{U})^\perp. \end{equation} From Theorem \ref{thmpwhl}, we know that the manifold $\mathcal{U}$ is non-degenerate, then take the same argument as in \cite[Corollary 3.2]{FS03}, we can know that $\mathcal{J}''_0[U_1]$ is a self-adjoint Fredholm operator of index zero which maps the space $D^{2,2}_\alpha(\mathbb{R}^N)$ into $T_{U_1}\mathcal{U}^\perp$, and $\mathcal{J}''_0[U_1]\in \mathfrak{L}(T_{U_1}\mathcal{U}^\perp)$ is invertible. Consequently, using (\ref{defpft}) and (\ref{defpw}), we obtain in this case \begin{equation}\label{defpr} \|\mathcal{J}''_0[U_1]\|_{\mathfrak{L}(T_{U_1}\mathcal{U}^\perp)}=\|\mathcal{J}''_0[U]\|_{\mathfrak{L}(T_{U}\mathcal{U}^\perp)},\quad \forall U\in \mathcal{U}. \end{equation} \begin{lemma}\label{lemhw} Suppose that $h\in L^\infty(\mathbb{R}^N)\cap C(\mathbb{R}^N)$, then there exists a constant $C_1=C_1(\|h\|_\infty,\alpha,\lambda)>0$ such that for any $\lambda>0$ and for any $\omega\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$, \begin{equation}\label{gh0} |H[U_{\lambda}+\omega]|\leq C_1(\||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha}_*+\|\omega\|^{p^*_\alpha}), \end{equation} \begin{equation}\label{gh1} \|H'[U_{\lambda}+\omega]\|\leq C_1(\||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha-1}_*+\|\omega\|^{p^*_\alpha-1}), \end{equation} \begin{equation}\label{gh2} \|H''[U_{\lambda}+\omega]\|\leq C_1(\||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha-2}_*+\|\omega\|^{p^*_\alpha-2}). \end{equation} Moreover, if $\lim_{|x|\to 0}h(x)=\lim_{|x|\to \infty}h(x)=0$ then \begin{equation}\label{ghu} \||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|_*\to 0 \quad\mbox{as}\quad \lambda\to 0\quad\mbox{or}\quad \lambda\to \infty. \end{equation} \end{lemma} \begin{proof} We will only show (\ref{gh2}) as (\ref{gh0})-(\ref{gh1}) follow analogously. By H\"{o}lder's inequality and since the embedding $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\hookrightarrow L^{p^*_{\alpha}}_{\alpha}(\mathbb{R}^N)$ is continuous, we have \begin{equation*} \begin{split} \|H''[U_{\lambda}+\omega]\| \leq & (p^*_\alpha-1)\sup_{\|g_1\|,\|g_2\|\leq 1} \int_{\mathbb{R}^N}\frac{|h(x)||U_{\lambda}+\omega|^{p^*_{\alpha}-2}|g_1||g_2|}{|x|^{\alpha}}dx\\ \leq & (p^*_\alpha-1)\|h\|^{\frac{2}{p^*_\alpha}}_\infty \sup_{\|g_1\|,\|g_2\|\leq 1} \||h|^{\frac{1}{p^*_\alpha}}(U_{\lambda}+\omega)\|^{p^*_\alpha-2}_*\|g_1\|_*\|g_2\|_* \\ \leq & c(\|h\|_\infty,\alpha,\lambda)\||h|^{\frac{1}{p^*_\alpha}}(U_{\lambda}+\omega)\|^{p^*_\alpha-2}_*. \end{split} \end{equation*} Then by using the triangle inequality and again $D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\hookrightarrow L^{p^*_{\alpha}}_{\alpha}(\mathbb{R}^N)$, we can directly obtain (\ref{gh2}). Under the additional assumption $\lim_{|x|\to 0}h(x)=\lim_{|x|\to \infty}h(x)=0$, (\ref{ghu}) follows by the dominated convergence theorem and \begin{equation*} \int_{\mathbb{R}^N}\frac{|h(x)|U_{\lambda}^{p^*_{\alpha}}}{|x|^\alpha}dx=\int_{\mathbb{R}^N}\frac{|h(\lambda^{-1} x)|U_1^{p^*_{\alpha}}}{|x|^\alpha}dx. \end{equation*} \end{proof} In order to deal with the problem $\mathcal{J}'_\varepsilon[u]=0$ for $\varepsilon$ close to zero, we combine variational methods with the Lyapunov-Schmit reduction method, in the spirit of \cite{AGP99}, and also \cite{FS03,MN21}. The next lemma is the crucial step. \begin{lemma}\label{lemreg} Suppose that $h\in L^\infty(\mathbb{R}^N)\cap C(\mathbb{R}^N)$, then there exist constants $\varepsilon_0$, $C_2>0$ and a smooth function \begin{equation*} \omega=\omega(\lambda,\varepsilon):(0,\infty)\times(-\varepsilon_0,\varepsilon_0)\to D^{2,2}_{\alpha,rad}(\mathbb{R}^N) \end{equation*} such that for any $\lambda>0$ and $\varepsilon\in (-\varepsilon_0,\varepsilon_0)$, \begin{equation}\label{we} \omega(\lambda,\varepsilon)\in \mathcal{E}, \end{equation} \begin{equation}\label{jes} \mathcal{J}'_\varepsilon[U_\lambda+\omega(\lambda,\varepsilon)]\eta=0,\quad \forall \eta\in \mathcal{E}, \end{equation} \begin{equation}\label{wgx} \|\omega(\lambda,\varepsilon)\|\leq C_2|\varepsilon|. \end{equation} Moreover, if $\lim_{|x|\to 0}h(x)=\lim_{|x|\to \infty}h(x)=0$ then \begin{equation}\label{wgt0} \|\omega(\lambda,\varepsilon)\|\to 0 \quad\mbox{as}\quad \lambda\to 0\quad\mbox{or}\quad \lambda\to \infty, \end{equation} uniformly with respect to $\varepsilon$. \end{lemma} \begin{proof} Define $G: (0,\infty)\times D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\times \mathbb{R}\times \mathbb{R}\to D^{2,2}_{\alpha,rad}(\mathbb{R}^N)\times \mathbb{R}$ \begin{equation*} G(\lambda,\omega,l,\varepsilon):=(\mathcal{J}'_\varepsilon[U_\lambda+\omega]-l \dot{\xi}_\lambda,(\omega,\dot{\xi}_\lambda)), \end{equation*} where $\dot{\xi}_\lambda$ denotes the normalized tangent vector $\frac{d}{d\lambda}U_\lambda$. We observe \begin{equation}\label{fmt} \begin{split} \left(\left(\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)\right)(\omega,l), (\mathcal{J}''_0[U_\lambda]\omega-l \dot{\xi}_\lambda,(\omega,\dot{\xi}_\lambda))\right)=\|\mathcal{J}''_0[U_\lambda]\omega\|^2+l^2+|(\omega,\dot{\xi}_\lambda)|^2, \end{split} \end{equation} where \begin{equation*} \begin{split} \left(\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)\right)(\omega,l)= (\mathcal{J}''_0[U_\lambda]\omega-l \dot{\xi}_\lambda,(\omega,\dot{\xi}_\lambda)). \end{split} \end{equation*} From the invertibility of $\mathcal{J}''_0[U_1]$ we infer that $\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)$ is an injective Fredholm operator of index zero, hence invertible and by (\ref{defpr}) and (\ref{fmt}) we obtain \begin{equation}\label{gpn} \begin{split} \left\|\left(\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)\right)^{-1}\right\| \leq & \max\{1, \|(\mathcal{J}''_\varepsilon[U_\lambda])^{-1}\|\} \\ = & \max\{1, \|(\mathcal{J}''_\varepsilon[U_1])^{-1}\|\}=:C_*. \end{split} \end{equation} If $G(\lambda,\omega,l,\varepsilon)=(0,0)$ for some $l\in\mathbb{R}$ then $\omega$ satisfies (\ref{we})-(\ref{jes}), and $G(\lambda,\omega,l,\varepsilon)=(0,0)$ if and only if $(\omega,l)=F_{\lambda,\varepsilon}(\omega,l)$, where \begin{equation*} F_{\lambda,\varepsilon}(\omega,l):=-\left(\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)\right)^{-1}G(\lambda,\omega,l,\varepsilon)+(\omega,l). \end{equation*} We will prove that $F_{\lambda,\varepsilon}$ is a contraction map in some ball $B_\rho$, where we may choose the radius $\rho=\rho(\varepsilon)>0$ independent of $U\in\mathcal{U}$. Here for $(\omega,l)\in B_\rho$, it means $\omega\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)$ and $l\in\mathbb{R}$ satisfying $\|(\omega,l)\|:=(\|\omega\|^2+l^2)^{1/2}\leq \rho$. Suppose $(\omega,l)\in B_\rho$. From (\ref{defpft}) and (\ref{gpn}), we can obtain that \begin{equation}\label{fnl} \begin{split} \left\|F_{\lambda,\varepsilon}(\omega,l)\right\| \leq & C_* \left\|G(\lambda,\omega,l,\varepsilon)-\left(\frac{\partial G}{\partial (\omega,l)}(\lambda,0,0,0)\right)(\omega,l)\right\| \\ \leq & C_* \|\mathcal{J}'_\varepsilon[U_\lambda+\omega]-\mathcal{J}''_0[U_\lambda]\omega\| \\ \leq & C_* \int^1_0\|\mathcal{J}''_0[U_\lambda+t\omega]-\mathcal{J}''_0[U_\lambda]\|dt \|\omega\| + C_*|\varepsilon| \|H'[U_{\lambda}+\omega]\| \\ \leq & C_* \int^1_0\|\mathcal{J}''_0[U_1+tP_{\lambda^{-1}}(\omega)]-\mathcal{J}''_0[U_1]\|dt \|\omega\| + C_*|\varepsilon| \|H'[U_{\lambda}+\omega]\| \\ \leq & C_* \rho \sup_{\|\omega\|\leq\rho} \|\mathcal{J}''_0[U_1+\omega]-\mathcal{J}''_0[U_1]\| + C_*|\varepsilon| \sup_{\|\omega\|\leq\rho}\|H'[U_{\lambda}+\omega]\|. \end{split} \end{equation} Analogously, for $(\omega_1,l_1), (\omega_2,l_2)\in B_\rho$ we get \begin{equation}\label{fnpl} \begin{split} \frac{\left\|F_{\lambda,\varepsilon}(\omega_1,l_1)-F_{\lambda,\varepsilon}(\omega_2,l_2)\right\|}{C_*\|\omega_1-\omega_2\|} \leq & \frac{\|\mathcal{J}'_\varepsilon[U_\lambda+\omega_1]-\mathcal{J}'_\varepsilon[U_\lambda+\omega_2]-\mathcal{J}''_0[U_\lambda](\omega_1-\omega_2)\|}{\|\omega_1-\omega_2\|} \\ \leq & \int^1_0\|\mathcal{J}''_\varepsilon[U_\lambda+\omega_2+t(\omega_1-\omega_2)]-\mathcal{J}''_0[U_\lambda]\|dt \\ \leq & \int^1_0\|\mathcal{J}''_0[U_\lambda+\omega_2+t(\omega_1-\omega_2)]-\mathcal{J}''_0[U_\lambda]\|dt \\ & + |\varepsilon| \int^1_0\|H''[U_{\lambda}+\omega_2+t(\omega_1-\omega_2)]\|dt \\ \leq & \sup_{\|\omega\|\leq 3\rho} \|\mathcal{J}''_0[U_1+\omega]-\mathcal{J}''_0[U_1]\| \\ &+ |\varepsilon| \sup_{\|\omega\|\leq 3\rho}\|H''[U_{\lambda}+\omega]\|. \end{split} \end{equation} We may choose $\rho_0>0$ such that \begin{equation*} C_*\sup_{\|\omega\|\leq 3\rho_0} \|\mathcal{J}''_0[U_1+\omega]-\mathcal{J}''_0[U_1]\|\leq\frac{1}{2}, \end{equation*} and $\varepsilon_0>0$ such that \begin{equation*} \varepsilon_0 C_*\sup_{U\in \mathcal{U}, \|\omega\|\leq 3\rho_0}\|H''[U+\omega]\| <\frac{1}{3} \quad \mbox{and}\quad \varepsilon_0 C_* \sup_{U\in \mathcal{U}, \|\omega\|\leq \rho_0}\|H'[U+\omega]\| \leq \frac{\rho_0}{2}. \end{equation*} With these choices and the above estimates, it is easy to see that for every $U_\lambda\in \mathcal{U}$ and $|\varepsilon|\leq \varepsilon_0$, $F_{\lambda,\varepsilon}$ maps $B_{\rho_0}$ into itself and is a contraction map. Therefore, $F_{\lambda,\varepsilon}$ has a unique fixed point $(\omega(\lambda,\varepsilon),l(\lambda,\varepsilon))$ in $B_{\rho_0}$ and it is a consequence of the implicit function theorem that $\omega$ and $l$ are continuously differentiable. From (\ref{fnl}) we also infer that $F_{\lambda,\varepsilon}$ maps $B_{\rho}$ into $B_{\rho}$, whenever \begin{equation*} 2 C_*|\varepsilon| \sup_{U\in \mathcal{U}, \|\omega\|\leq\rho_0}\|H'[U+\omega]\|\leq \rho\leq\rho_0. \end{equation*} In order to get (\ref{wgx}), here we take \begin{equation*} \rho=\rho(\varepsilon):=2 C_*|\varepsilon| \sup_{U\in \mathcal{U}, \|\omega\|\leq\rho_0}\|H'[U+\omega]\|, \end{equation*} consequently, due to the uniqueness of the fixed point we have \begin{equation*} \|\omega(\lambda,\varepsilon),l(\lambda,\varepsilon)\|\leq 2 C_*|\varepsilon| \sup_{U\in \mathcal{U}, \|\omega\|\leq\rho_0}\|H'[U+\omega]\|, \end{equation*} which gives (\ref{wgx}). Let us now prove (\ref{wgt0}). Set \begin{equation}\label{defrl} \rho_\lambda=\min\left\{4 \varepsilon_0 C_* C_1 \||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha-1}_*, \rho_0, (8 \varepsilon_0 C_* C_1)^{2-p^*_\alpha}\right\} \end{equation} where $C_1=C_1(\|h\|_\infty,\alpha,\lambda)>0$ is given as in Lemma \ref{lemhw}. In view of (\ref{gh1}), for any $|\varepsilon|<\varepsilon_0$ and $\lambda>0$ we have that \begin{equation*} C_*|\varepsilon| \sup_{\|\omega\|\leq\rho_\lambda}\|H'[U_\lambda+\omega]\| \leq |\varepsilon| C_* C_1 \||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha-1}_*+ |\varepsilon| C_* C_1 \rho^{p^*_\alpha-2}_\lambda \rho_\lambda. \end{equation*} Since $\rho^{p^*_\alpha-2}_\lambda\leq (8 \varepsilon_0 C_* C_1)^{-1}$, we have \begin{equation*} C_*|\varepsilon| \sup_{\|\omega\|\leq\rho_\lambda}\|H'[U_\lambda+\omega]\| < |\varepsilon| C_* C_1 \||h|^{\frac{1}{p^*_\alpha}}U_{\lambda}\|^{p^*_\alpha-1}_*+ \frac{1}{4}\rho_\lambda \leq \frac{1}{2}\rho_\lambda, \end{equation*} then by the above argument, we can conclude that $F_{\lambda,\varepsilon}$ maps $B_{\rho_\lambda}$ into $B_{\rho_\lambda}$. Consequently, due to the uniqueness of the fixed-point we have \begin{equation*} \|\omega(\lambda,\varepsilon)\|\leq \rho_\lambda. \end{equation*} From (\ref{ghu}) and (\ref{defrl}), we have that $\rho_\lambda \to 0$ as $\lambda\to 0$ or $\lambda\to \infty$, then we get (\ref{wgt0}). \end{proof} Under the assumptions of Lemma \ref{lemreg}, for $|\varepsilon|<\varepsilon_0$ we may define \begin{equation}\label{defule} \mathcal{U^\varepsilon}:=\left\{u\in D^{2,2}_{\alpha,rad}(\mathbb{R}^N)|u=U_{\lambda}+\omega(\lambda,\varepsilon),\quad \lambda\in(0,\infty)\right\}, \end{equation} where $\omega(\lambda,\varepsilon)\in \mathcal{E}$ is given as in Lemma \ref{lemreg}. Note that $\mathcal{U^\varepsilon}$ is a one-dimensional manifold. The next lemma will show that finding critical points for functional can be reduced to a finite dimensional problem. \begin{lemma}\label{lemcuve} Under the assumptions of Lemma \ref{lemreg}, we may choose $\varepsilon_0>0$ such that for every $|\varepsilon|<\varepsilon_0$ the manifold $\mathcal{U^\varepsilon}$ is a natural constraint for $\mathcal{J}_\varepsilon$, i.e., every critical point of $\mathcal{J}_\varepsilon|_{\mathcal{U^\varepsilon}}$ is the critical point of $\mathcal{J}_\varepsilon$. \end{lemma} \begin{proof} Fix $u\in \mathcal{U^\varepsilon}$ such that $\mathcal{J}'_\varepsilon|_{\mathcal{U^\varepsilon}}[u]=0$. From the definition of $\mathcal{U^\varepsilon}$ and by Lemma \ref{lemreg}, we can know the form of $u$ is that $u=U_{\lambda}+\omega(\lambda,\varepsilon)$ for some $\lambda>0$. In the following, we use a dot for the derivation with respect to $\lambda$. From the definition of $\mathcal{E}$, it holds that $\langle \dot{U}_\lambda,\omega(\lambda,\varepsilon) \rangle_\alpha=0$ for all $\lambda>0$, then we obtain \begin{equation}\label{cuvef} \langle \ddot{U}_\lambda,\omega(\lambda,\varepsilon) \rangle_\alpha+\langle \dot{U}_\lambda,\dot{\omega}(\lambda,\varepsilon) \rangle_\alpha=0. \end{equation} Moreover differentiating the identity $U_\lambda=P_{\sigma}U_{\lambda/\sigma}$ with respect to $\lambda$ we obtain \begin{equation}\label{cuvefs} \dot{U}_\sigma:=\frac{\partial U_\lambda}{\partial \lambda}\Big|_{\lambda=\sigma}=\frac{1}{\sigma}P_{\sigma}\dot{U}_{1}\quad \mbox{and}\quad \ddot{U}_\sigma:=\frac{\partial^2 U_\lambda}{\partial \lambda^2}\Big|_{\lambda=\sigma}=\frac{1}{\sigma^2}P_{\sigma}\ddot{U}_{1}. \end{equation} From (\ref{jes}) we get $\mathcal{J}'_\varepsilon[u]=c_1 \dot{U}_\lambda$ for some $\lambda>0$ and $c_1\in\mathbb{R}$. Then from (\ref{cuvef})-(\ref{cuvefs}) and (\ref{wgx}), we obtain \begin{equation*} \begin{split} 0 = & \mathcal{J}'_\varepsilon[u](\dot{U}_\lambda+\dot{\omega}(\lambda,\varepsilon))= c_1\langle\dot{U}_\lambda,\dot{U}_\lambda+\dot{\omega}(\lambda,\varepsilon)\rangle_\alpha =c_1 \lambda^{-2}(\|\dot{U}_1\|^2-\langle P_{\lambda}\ddot{U}_1, \omega(\lambda,\varepsilon)\rangle_\alpha) \\ = & c_1 \lambda^{-2}(\|\dot{U}_1\|^2-\langle\ddot{U}_1, P_{\lambda^{-1}}\omega(\lambda,\varepsilon)\rangle_\alpha) =c_1\lambda^{-2}(\|\dot{U}_1\|^2-\|\ddot{U}_1\|O(1)\varepsilon). \end{split} \end{equation*} Finally, we see that for small $\varepsilon>0$ the number $c_1$ must be zero, therefore the conclusion follows. \end{proof} Now, we are in position to prove the main result. \noindent{\bf Proof of Theorem \ref{thmpwhp}.} Choose $\varepsilon>0$ small, then let $u^\varepsilon_\lambda=U_\lambda+\omega(\lambda,\varepsilon)$, where $\omega(\lambda,\varepsilon)$ is given in Lemma \ref{lemreg} and write \begin{equation*} \mathcal{J}_\varepsilon[u^\varepsilon_\lambda]=\mathcal{J}_0[U_{\lambda}]+\frac{1}{2}(\|u^\varepsilon_\lambda\|^2-\|U_{\lambda}\|^2) -\frac{1}{p^*_{\alpha}}\int_{\mathbb{R}^N}\frac{(1+\varepsilon h(x))((u^\varepsilon_\lambda)^{p^*_{\alpha}}_+ -U_{\lambda}^{p^*_{\alpha}})}{|x|^{\alpha} }dx -\varepsilon H[U_{\lambda}]. \end{equation*} Recall that $\|U_{\lambda}\|=\|U_{1}\|$ does not depend on $\lambda$, then from (\ref{wgt0}) we infer that $u^\varepsilon_\lambda$ is uniformly bounded in $D^{2,2}_\alpha(\mathbb{R}^N)$, and $\|u^\varepsilon_\lambda-U_{\lambda}\|=\|\omega(\lambda,\varepsilon)\|=o(1)$ as $\lambda\to 0$ or $\lambda\to \infty$, therefore \begin{equation*} |\|u^\varepsilon_\lambda\|^2-\|U_{\lambda}\|^2|\leq (\|u^\varepsilon_\lambda\|+\|U_{\lambda}\|)\|u^\varepsilon_\lambda-U_{\lambda}\|=o(1). \end{equation*} Moreover, by H\"{o}lder's inequality and (CKN) inequality (\ref{Pi}) we obtain \begin{equation*} \begin{split} \left|\int_{\mathbb{R}^N}\frac{(1+\varepsilon h(x))((u^\varepsilon_\lambda)^{p^*_{\alpha}}_+ -U_{\lambda}^{p^*_{\alpha}})}{|x|^{\alpha} }dx\right| \leq & C\int_{\mathbb{R}^N}\frac{((u^\varepsilon_\lambda)^{p^*_{\alpha}-1}_+ +U_{\lambda}^{p^*_{\alpha}-1})|(u^\varepsilon_\lambda)_+ -U_{\lambda}|}{|x|^{\alpha}}dx \\ \leq & C\|u^\varepsilon_\lambda-U_{\lambda}\|=o(1). \end{split} \end{equation*} Finally, from (\ref{gh0}), (\ref{ghu}) and (\ref{wgt0}), we already noticed that $|H[U_{\lambda}]|=o(1)$ as $\lambda\to 0$ or $\lambda\to \infty$, then we can conclude that \begin{equation*} \Gamma_{\varepsilon}(\lambda):=\mathcal{J}_\varepsilon[u^\varepsilon_\lambda]=\mathcal{J}_0[U_{\lambda}]+o(1)=\mathcal{J}_0[U_{1}]+o(1),\quad \mbox{as}\quad \lambda\to 0\quad \mbox{or} \quad \lambda\to\infty, \end{equation*} that is, \begin{equation*} \lim_{\lambda\to 0}\Gamma_{\varepsilon}(\lambda)=\lim_{\lambda\to \infty}\Gamma_{\varepsilon}(\lambda)=\mathcal{J}_0[U_1], \end{equation*} uniformly with respect to $\varepsilon$. Thus, $\Gamma_{\varepsilon}$ has at least one critical point $\lambda_{\varepsilon}$ (in fact, $\Gamma_{\varepsilon}$ might be constant and in this case we obtain infinitely many critical points). Hence $u^\varepsilon_{\lambda_\varepsilon}$ is a critical point for $\mathcal{J}_\varepsilon$ by Lemma \ref{lemcuve}, and the proof of Theorem \ref{thmpwhp} is now complete. \qed \noindent{\bfseries Acknowledgements} The research has been supported by National Natural Science Foundation of China 11971392, Natural Science Foundation of Chongqing, China cstc2019jcyjjqX0022 and Fundamental Research Funds for the Central Universities XDJK2019TY001. We are very grateful to the referee for insightful suggestions, which has led to an important improvement of the paper. \end{document}
\begin{document} \begin{abstract} We view closed orientable 3-manifolds as covers of $S^{3}$ branched over hyperbolic links. For a cover $M \stackrel{p}{\to} S^{3}$, of degree $p$ and branched over a hyperbolic link $L \subset S^{3}$, we assign the complexity $p \vol[S^{3} \setminus L]$. We define an invariant of 3-manifolds, called the {\it link volume} and denoted $\lv[M]$, that assigns to a 3-manifold $M$ the infimum of the complexities of all possible covers $M \to S^{3}$, where the only constraint is that the branch set is a hyperbolic link. Thus the link volume measures how efficiently $M$ can be represented as a cover of $S^3$. We study the basic properties of the link volume and related invariants, in particular observing that for any hyperbolic manifold $M$, $\vol[M] < \lv[M]$. We prove a structure theorem (Theorem~\ref{thm:jt}) that is similar to (and uses) the celebrated theorem of J\o rgensen and Thurston. This leads us to conjecture that, generically, the link volume of a hyperbolic 3-manifold is much bigger than its volume (for precise statements see Conjectures \ref{conj:LV>>V1} and \ref{conj:LV>>V2}). Finally we prove that the link volumes of the manifolds obtained by Dehn filling a manifold with boundary tori are linearly bounded above in terms of the length of the continued fraction expansion of the filling curves (for a precise statement, see Theorem \ref{thm:dehn}). \end{abstract} \nocite{*} \title{The Link Volume of 3-Manifolds} \section{Introduction} \label{sec:intro} The study of 3-manifolds as branched covers of $S^{3}$ has a long history. In 1920 Alexander~\cite{alex} gave a very simple argument showing that every closed orientable triangulated 3-manifold is a cover of $S^{3}$ branched along the 1-skeleton of a tetrahedron embedded in $S^{3}$. We explain his construction and give basic definitions in Section~\ref{sec:background}. Clearly, if a 3-manifold $M$ is a finite sheeted branched cover of $S^{3}$, then $M$ is closed and orientable. Moise~\cite{moise} showed that every closed 3-manifold admits a triangulation; thus we see: a 3-manifold $M$ is closed and orientable if and only if $M$ is a finite sheeted branched cover of $S^{3}$. From this point on, by {\it manifold} we mean connected closed orientable 3-manifold. Alexander himself noticed one weakness of his theorem: the branch set is not a submanifold. He claimed that this can be easily resolved, but gave no indication of the proof. In 1986 Feighn~\cite{feighn} substantiated Alexander's claim, Modifying the branch set to be a link. Thurston showed the existence of a {\it universal link}, that is, a link $L \subset S^{3}$ so that every 3-manifold is a cover of $S^{3}$ branched along $L$. Hilden, Lozano and Montesinos~\cite{hlm1}~\cite{hlm2} drastically simplified Thurston's example showing, in particular, that the figure eight knot is universal. Cao and Meyerhoff~\cite{CaoMeyehoff} showed that the figure eight knot is the hyperbolic link of smallest volume. In this paper, we consider hyperbolic links and consider their volume as a measure of complexity, hence we see that every 3-manifold is a cover of $S^{3}$, branched along the simplest possible link. Our goal is to define and study invariant that asks: how efficient is the presentation of a 3-manifolds as a branched over of $S^{3}$? We do this as follows: let $M$ be a $p$-fold cover of $S^{3}$, branched along the hyperbolic link $L$. We denote this as $M \stackrel{p}{\to} (S^3,L)$ (read: $M$ is a $p$-fold cover of $S^{3}$ branched along $L$). The complexity of $M \stackrel{p}{\to} (S^3,L)$ is defined to be the degree of the cover times the volume of $L$, that is: $$p \vol[S^3 \setminus L].$$ The {\it link volume} of $M$, denoted $\lv[M]$, is the infimum of the complexities of all covers $M \stackrel{p}{\to} (S^3,L)$, subject to the constraint that $L$ is a hyperbolic link; that is: $$\lv[M] = \inf \{ p \vol[S^3 \setminus L] | M \stackrel{p}{\to} (S^3,L)\, ; \, L \mbox{ hyperbolic}\}.$$ Given a hyperbolic manifold $M$ we consider its volume, $\vol[M]$, as its complexity. This is consistent with our attitude towards hyperbolic links, and is considered very natural by many 3-manifold topologists. Why is that? What is it that the volume actually measures? Combining results of Gromov, J\o rgensen, and Thurston (for a detailed exposition see~\cite{KR}) we learn the following. Let $t_C(M)$ denote the minimal number of tetrahedra required to triangulate a link exterior in $M$, that is, the least number of tetrahedra required to triangulate $M \setminus N(L)$, where the minimum is taken over all possible links $L \subset M$ (possibly, $L = \emptyset$) and all possible tringulations of $M \setminus N(L)$. Then there exist constants $a, b>0$ so that \begin{equation} a \vol[M] \leq t_C(M) \leq b\vol[M]. \end{equation} We consider invariants up-to linear equivalence, and so we see that $\mbox{Vol}$ and $t_C$ are equivalent. This gives a natural, topological interpretation of the volume. In this paper we begin the study of the link volume, with the ultimate goal of obtaining a topological understanding of it. The basic facts about the link volume are presented in Section~\ref{sec:basic}. The most important are the following easy observations: \begin{enumerate} \item The link volume is obtained, that is, for any manifold $M$ there is a cover $M \stackrel{p}{\to} (S^3,L)$ so that $\lv[M] = p \vol[S^{3} \setminus L]$. \item For every hyperbolic 3-manifold $M$ we have: $$\vol[M] < \lv[M].$$ \end{enumerate} The second point begs the question: is the link volume of hyperbolic manifolds equivalent to the hyperbolic volume? As we shall see below, the results of this paper lead us to believe that this is not the case (Conjectures~\ref{conj:LV>>V1} and~\ref{conj:LV>>V2}). The right hand side of the Inequality~(1) implies that, for fixed $V$, any hyperbolic manifold of volume less than $V$ can be obtained from a manifold $X$ by Dehn filling, where $X$ is constructed using at most $bV$ tetrahedra. Since there are only finitely many such $X$'s, this implies the celebrated result of J\o rgensen--Thurston: for any $V>0$, there exists finite collection of compact ``parent manifolds'' $\{X_i,\dots X_{n}\}$, so that $\partial X_{i}$ consists of tori, and any hyperbolic manifold of volume at most $V$ is obtained by Dehn filling $X_{i}$, for some $i$. Our first result is: \begin{thm} \label{thm:jt} There exists a universal constant $\Lambda > 0$ so that for every $V>0$, there is a finite collection $\{\phi_{i}:X_{i} \to E_{i}\}_{i=1}^{n_{V}}$, where $X_{i}$ and $E_{i}$ are complete finite volume hyperbolic manifolds and $\phi_{i}$ is an unbranched cover, and for any cover $M \stackrel{p}{\to} (S^3,L)$ with $p \vol[S^{3} \setminus L] < V$ the following hold: \begin{enumerate} \item For some $i$, $M$ is obtained from $X_{i}$ by Dehn filling, $S^{3}$ is obtained from $E_{i}$ by Dehn filling, and the following diagram commutes (where the vertical arrows represent the covering projections and the horizontal arrows represent Dehn fillings): \begin{center} \begin{picture}(200,60)(0,0) \put( 0, 0){\makebox(0,0){$E_{i}$}} \put( 0,50){\makebox(0,0){$X_{i}$}} \put( 0, 40){\vector(0,-1){30}} \put( 10, 0){\vector(1,0){75}} \put(100, 0){\makebox(0,0){$S^3,L$}} \put(100, 40){\vector(0,-1){30}} \put(10,28){\makebox(0,0){$/\phi_{i}$}} \put(108,28){\makebox(0,0){$/\phi$}} \put(108,28){\makebox(0,0)} \put(100,50){\makebox(0,0){$M$}} \put( 10,50){\vector(1,0){80}} \end{picture} \end{center} \item $E_{i}$ can be triangulated using at most $\Lambda V/p$ tetrahedra (hence $X_{i}$ can be triangulated using at most $\Lambda V$ tetrahedra and $\phi_{i}$ is simplicial). \end{enumerate} \end{thm} For $V>0$, let $\mathcal{M}_{V}$ denote the set of manifolds of link volume less than $V$. Since the link volume is always obtained, applying Theorem~\ref{thm:jt} to covers realizing the link volumes of manifolds in $\mathcal{M}_{V}$, we obtain a finite family of ``parent manifolds'' $X_{1},\dots,X_{n}$ that give rise to every manifold in $\mathcal{M}_{V}$ via Dehn filling, much like J\o rgensen--Thurston. The extra structure given by the projection $\phi_{i}:X_{i} \to E_{i}$ implies that the fillings that give rise to manifolds of low link volume are very special: Fix $V$, and let $X_i$ be as in the statement of Theorem~\ref{thm:jt}. Then for any hyperbolic manifold $M$ that is obtained by filling $X_i$ we have $\vol[M] < \vol[X_i]$. On the other hand, it is by no means clear that $\lv[M] < V$, for it is not easy to complete the diagram in Theorem~\ref{thm:jt}: \begin{enumerate} \item $X_i$ must cover a manifold $E_i$. \item The covering projection and the filled slopes must be compatible (see Subsection~\ref{subsec:slopes} for definition). \item The slopes filled on $E_i$ must give $S^3$, a very unusual situation since $E_i$ is hyperbolic. \end{enumerate} These lead us to believe that the link volume, as a fuction, is much bigger than the volume. Specifically we conjecture: \begin{conj} \label{conj:LV>>V1} Let $X$ be a complete finite volume hyperbolic manifold with one cusp. For a slope $\alpha$ on $\partial X$, let $X(\alpha)$ denote the closed manifold obtained by filling $X$ along $\alpha$. Then for any $V>0$, there exists a finite set of slopes $\mathcal{F}$ on $\partial X$, so that if $\lv[X(\alpha)] < V$, then $\alpha$ intersects some slope in $\mathcal{F}$ at most $V/2$ times. \end{conj} As is well known, the volume of the figure eight knot complement is about $2.029\dots$, twice $v_{3}$, the volume of a regular ideal tetrahedron. By considering manifolds that are obtained by Dehn filling the figure eight knot exterior we see that Conjecture~\ref{conj:LV>>V1} implies: \begin{conj} \label{conj:LV>>V2} For every $V>0$ there exists a manifold $M$ so that $\vol[M] < 2v_{3} = 2.029\dots$ and $\lv[M] > V$. \end{conj} To describe our second result, we first define the knot volume and a few other variations of the link volume; for the definition simple cover see the Subsection~\ref{subsec:monte}. \begin{dfns} \label{dfns:KV} \begin{enumerate} \item The {\it knot volume} of a 3-manifold $M$ is obtained by considering only hyperbolic knots in the definition of the link volume, that is, $$\kv[M] = \inf \{ p \vol[S^3 \setminus K] | M \stackrel{p}{\to} (S^3,K); K \mbox{ is a hyperbolic knot}\}.$$ \item The {\it simple knot volume} of a 3-manifold $M$ is obtained by considering only simple covers in the definition of the knot volume, that is, $$\kvs[M] = \inf \left\{ p \vol[S^3 \setminus K] \Bigg| M \stackrel{p}{\to} (S^3,K); \begin{array}{l} K \mbox{ a hyperbolic knot}, \\ \mbox{and the cover is simple} \end{array} \right\}.$$ \item For an integer $d \geq 3$, the {\it simple $d$-knot volume} in obtained by restricting to $p$-fold covers for $p \leq d$ in the definition of the simple knot volume, that is, $$\kvsd[d M] = \inf \left\{ p \vol[S^3 \setminus K] \Bigg| M \stackrel{p}{\to} (S^3,K); \begin{array}{l} K \mbox{ a hyperbolic knot}, \\ \mbox{the cover is simple}, \\ \mbox{and } p \leq d \end{array} \right\}.$$ \end{enumerate} \end{dfns} Similarly, one can play with various restrictions on the covers considered. However, one must ensure that the definition makes sense. For example, the {\it regular} link volume can be defined using only regular covers. This makes no sense, as not every manifold is the regular cover of $S^{3}$. It follows from Hilden~\cite{hilden} and Montesinos~\cite{montesinos} that every 3-manifold is a simple 3-fold cover of $S^{3}$ branched over a hyperbolic knot; hence the definitions above make sense. Our next result is an upper bound, and holds for any of the variations listed in Definitions~\ref{dfns:KV}. Since these definitions are obtained by adding restrictions to the covers considered, it is clear that $\kvsd[3 M]$ is greater than or equal to any of the others, including the link volume. We therefore phrase Theorem~\ref{thm:dehn} below for that invariant. But first we need: \begin{dfn} \label{dfn:depth} Let $T$ be a torus, and $\mu$, $\lambda$ generators for $H_{1}(T)$. By identifying $\mu$ with $1/0$ and $\lambda$ with $0/1$, we get an identification of the {\it slopes} of $H_1(T)$ with $\mathbb{Q} \cup \{1/0\}$, where an element of $H_{1}(T)$ is called a {\it slope} if it can be represented by a connected simple closed curve on $T$. Then the {\it depth} of a slope $\alpha$, denoted $\depth[\alpha]$, is the length of the shortest contiuded fraction expenssion representing $p/q$. For a collection of tori $T_{1},\dots,T_{n}$ with bases chosen for $H_{1}(T_{i})$ for each $i$, we define $$\depth[p_{1}/q_{1},\dots,p_{n}/q_{n}] = \Sigma_{i=1}^{n} \depth[p_{i}/q_{i}].$$ \end{dfn} We are now ready to state: \begin{thm} \label{thm:dehn} Let $X$ be a connected, compact orientable $3$-manifold, $\partial X$ consisting of $n$ tori $T_{1},\dots,T_{n}$, and fix $\mu_{i}$, $\lambda_{i}$, generators for $H_{1}(T_{i})$ for each $i$. Then there exist a universal constant $B$ and a constant $A$ that depends on $X$ and the choice of bases for $H_{1}(T_{i})$, so that for any $p_{i}/q_{i}$ ($i=1,\dots,n$), $$\kvsd[3 X(p_{1}/q_{1},\dots,p_{n}/q_{n})] < A + B \depth[p_{1}/q_{1},\dots,p_{n}/q_{n}],$$ where $X(p_{1}/q_{1},\dots,p_{n}/q_{n})$ denotes the manifold obtained by filling $X$ along the slopes $p_{i}/q_{i}$. \end{thm} As noted above, $\kvsd[3 M]$ is greater than or equals to all the invariants defined in Definition~\ref{dfn:depth} and the link volume. Hence Theorem~\ref{thm:dehn}, which gives an upper bound, holds for all these invariants, and in particular: \begin{cor} \label{cor:dehn} With the hypotheses of Theorem~\ref{thm:dehn}, there exist a universal constant $B$ and a constant $A$ that depends on $X$ and the choice of bases for $H_{1}(T_{i})$, so that for any slopes $p_{i}/q_{i}$ ($i=1,\dots,n$), $$\lv[X(p_{1}/q_{1},\dots,p_{n}/q_{n})] \leq A + B \depth[p_{1}/q_{1},\dots,p_{n}/q_{n}].$$ \end{cor} \noindent{\bf Organization.} This paper is organized as follows. In Section~\ref{sec:background} we go over necessary background material. In Section~\ref{sec:variations} we explain some possible variation on the link volume. Notably, we define the {\it surgery volume} (definition due to Kimihiko Motegi) and an invariant denote $pB(M)$ (definition due to Ryan Blair). We show that {\it ,in contrast to the link volume, } the surgery volume of hyperbolic manifolds is bounded in terms of their volume. We also show that $pB(M)$ is linearly equivalent to $g(M)$, the Heegaard genus of $M$. In Section~\ref{sec:basic} we explain basic facts about the link volume and list some open questions. In Section~\ref{sec:jt} we prove Theorem~\ref{thm:jt}. In Section~\ref{sec:dehn} we prove Theorem~\ref{thm:dehn}. \noindent{\bf Acknowledgement.} We thank Ryan Blair, Tsuyoshi Kobayashi, Kimihiko Motegi, Hitoshi Murakami, and Jair Remigio--Ju\'arez for helpful conversations. \section{Background} \label{sec:background} By {\it manifold} we mean connected, closed, orientable 3-manifold. In some cases, we consider connected, compact, orientable 3-manifolds; then we explicitly say {\it compact manifold}. By {\it hyperbolic manifold} $X$ we mean a complete, finite volume Riemannian 3-manifold locally isometric to $\mathbb{H}^{3}$. It is well know that any hyperbolic manifold $X$ is the interior of a compact manifold $\bar{X}$ and $\bar{X} \setminus X = \partial \bar{X}$ consists of tori. To simplify notation, we do not refer to $\bar{X}$ explicitly and call $\partial \bar X$ the {\it boundary of} $X$. We assume familiarity with the basic concepts of 3-manifold theory and hyperbolic manifolds, and in particular the Margulis constant. By {\it volume} we mean the hyperbolic volume. The volume of a hyperbolic manifold $M$ is denoted $\vol[M]$. We follow standard notation. In particular, by {\it Dehn filling} (or simply {\it filling}) we mean attaching a solid torus to a torus boundary component. \subsection{Branched covering} \label{subsec:covers} We begin by recalling Alexander's Theorem~\cite{alex}; because this theorem is very short an elegant, we include a sketch of its proof here. \begin{thm}[Alexander] Let $\mathcal{T}$ be a triangulation of $S^{n}$ obtained by doubling an $n$-simplex. Let $M$ be a closed orientable triangulated $n$-manifold. Then $M$ is a cover of $S^{n}$ branched along $\mathcal{T}^{(n-2)}$, the $n-2$-skeleton of $\mathcal{T}$. \end{thm} \begin{proof}[Sketch of Proof] Let $M$ be as above. Given $\mathcal{T}_M$, a triangulation of $M$, let $\mathcal{T'}_M$ denote its barycentric subdivision. Each vertex $v$ of $\mathcal{T}_M'$ is the center of a $k$-face of $\mathcal{T}_M$, for some $k$. Label $v$ with the label $k$. By construction, there are exactly $n+1$ labels, $0,\dots,n$, and no two adjacent vertices have the same label. Note that the 1-skeleton of $\mathcal{T}$ is $K_{n+1}$, the complete graph on $n+{1}$ vertices. Label these vertices with the labels $0,\dots,n$ so that every label appears exactly once. We define a function from $\mathcal{T}_M'^{(n-1)}$ (the $n-1$ skeleton of $\mathcal{T}_M'$) to $S^{n}$ by sending each $k$-face simplicially to the unique $k$-face of $S^{n}$ with the same labeling (for $k < n$); it is easy to see that this function is well defined. However, the $n$-cells of $M$ can be sent to either of the two $n$ simplices of $\mathcal{T}_M'$. We pick the simplex so that the map is orientation preserving. It is left to the reader to verify that this is indeed a cover, branched over the $n-2$ skeleton of the triangulation of $S^{n}$. \end{proof} \begin{lem} \label{lem:finiteness} For any compact triangulated $n$-manifold $M$, $B \subset M^{(n-2)}$ a subcomplex, and $d>0$, there are only finitely many $d$-fold covers of $M$ branched along $B$. \end{lem} \begin{proof} It is well known that a $p$-fold cover of $M$ branched along $B$ is determined by a presentation of $\pi_{1}(M \setminus B)$ into $S_{p}$, the symmetric group on $p$ elements (see, for example, \cite{rolfsen}). The lemma follows from the fact that $\pi_{1}(M\setminus B)$ is finitely generated and $S_{p}$ is finite. \end{proof} \subsection{Simple covers and the Montesinos Move} \label{subsec:monte} \begin{dfn} Let $f:M \to N$ be a cover of finite degree $p$ branched along $B \subset N$. Note that every point of $N \setminus B$ has exactly $p$ preimages, and every point of $B$ has at most $p$ preimages. $f:M \to N$ is called {\it simple} if every point of $B$ has exactly $p-1$ preimages. \end{dfn} Let $M \to (S^{3},L)$ be a 3-fold simple cover branched along the link $L$. We view $L$ diagrammatically, as projected into $S^{2} \subset S^{3}$ in the usual way. Since the cover is simple, each generator in the Wirtinger presentation of $S^3 \setminus L$ corresponds to a permutation in the symmetric group on 3 elements (that is, $(1 \ 2)(3)$ or $(1 \ 3)(2)$ or $(2 \ 3)(1)$). We consider these as three colors, and color each strand of $L$ accordingly. By assumption, $M$ is connected; hence not all generators correspond to the same permutation. Finally, the relators of the Wirtinger presentation guarantee that at each crossing wither all three color appear, or only one color does. Thus we obtain a 3 coloring of the strands of $L$. Montesinos proved that if we replace a positive crossing where {\it all three colors appear} by 2 negative crossings the cover is not changed. This is called the {\it Montesinos move}. The reason is simple: the neighborhood of a 3-colored crossing is a ball, and its cover is a ball as well. (This is false if only one color appears at the crossing!) More generally, when all three colors appear we can replace $n$ half twists with $n + 3k$ half twists ($n,k \in \mathbb Z$). The case $n=0$ is allowed, but then we must require that the two strands in question have distinct colors. We denote such a move by $n \mapsto n+3k$ Montesinos move. In Figure~\ref{fig:montesinos} we show a few views of the Montesinos Move. \begin{figure} \caption{Montesinos move} \label{fig:montesinos} \end{figure} Finally, we record the following fact for future reference. It is easy to see that the $p$-fold cover cover $f:M \to S^{3}$ branched along $B \subset S^{3}$ is connected if and only if the image of $\pi_{1}(S^{3} \setminus B)$ in $S_{p}$ acts transitively on the set f $p$ letters. For simple 3-fold covers this means: \begin{lem} \label{lem:connected3fold} Let $M$ be a 3-manifold and $f:M \to S^{3}$ a simple 3-fold cover branched along the link $L \subset S^{3}$. Then $M$ is connected if and only if at least two colors appear in the 3-coloring of $L$. \end{lem} \subsection{Slopes on tori and coverings} \label{subsec:slopes} Recall that s {\it slope} on a torus is the free homotopy class of a connected simple closed curve, up to reserving the orientation of the curve. For this subsection we fix the following: let $X$ and $E$ be complete hyperbolic manifolds of finite volume, and $\phi:X \to E$ an unbranched cover. Let $T$ be a boundary component of $X$; note that $\phi$ induces an unbranched cover $T \to \phi(T)$. Let $\alpha$ be a slope on $T$ realized by a connected simple closed curve $\gamma \subset T$. Then $\phi(\gamma)$ is a (not necessarily simple) connected essential curve on $\phi(T)$. Since $\phi(T)$ is a torus, there is a curve $\bar\beta$ on $\phi(T)$ so that $\phi(\gamma)$ is homotopic to $\bar\beta^{m}$, for some $m \neq 0$. Let $\beta$ be the slope defined by $\bar\beta$. Define the function $\phi_{\downarrow}$ from the slopes on $T$ to the slopes on $\phi(T)$ by setting $\phi_{\downarrow}(\alpha) = \beta$. Conversely, let $\alpha$ be a slope on $\phi(T)$ realized by a connected simple closed curve $\gamma \subset \phi(T)$. Then $\phi^{-1}(\gamma)$ is a (not necessarily connected) essential simple closed curve. Each component of $\phi^{-1}(\gamma)$ defines a slope on $T$, and since these curves are disjoint, they all define the same slope, say $\beta$. Define the function $\phi_{\uparrow}$ from the slopes on $\phi(T)$ to the slopes on $T$ by setting $\phi_{\uparrow}(\alpha) = \beta$. It is easy to see that $\phi_{\downarrow}$ is the inverse of $\phi_{\uparrow}$. We say that $\alpha$ and $\phi_{\downarrow}(\alpha)$ are {\it corresponding} slopes. Suppose that we Dehn fill $T$ and $\phi(T)$. If the slope filled are not corresponding, then the curve filled on $T$ maps to a curve of $\phi(T)$ that is not null homotopic in the attached solid torus. Thus the map $\phi$ cannot be extended into that solid torus. Conversely, suppose that corresponding slopes are filled. We parametrize the attached solid tori as $S^{1} \times D^{2}$, and extend $\phi$ into the solid tori by coning along each disk $\{p\} \times D^{2}$ ($p \in S^{1}$). It is easy to see that the extended map is a cover, branched (if at all) along the core of the attached solid torus. (The local degree at the core of the solid torus is the number denoted by $m$ in the construction of $\phi_{\downarrow}$ above.) In conclusion, $\phi$ induces a correspondence between slopes of $T$ and slopes on $\phi(T)$, and $\phi$ can be extended to the attached solid tori to give a branched cover after Dehn filling if and only if corresponding slopes are filled. Next, let $T_{1}$, $T_{2} \subset \partial X$ be tori that project to the same component of $\partial E$. Then two bijections $\phi_{\downarrow}$ from the slopes of $T_{1}$ and $T_{2}$ to the slopes of $\phi(T_{1}) = \phi(T_{2})$ induce a bijection between the slopes of $T_{1}$ and the slopes of $T_{2}$; again we call slopes that are interchanged by this bijection {\it corresponding}. Filling $T_{1}$ and $T_{2}$ along corresponding slopes is called {\it consistent}, {\it inconsistent} otherwise. Note that after filling $X$ there is a filling of $E$ so that the cover $X \to E$ extends to a branched cover if and only if the filling of $X$ is consistent on every pair of components of $\partial X$. \subsection{Hyperbolic alternating links} \label{sebsection:alternating} In this subsection we follow Chapter~4 of Lickorish \cite{lickorish}. We begin with the following standard definitions: \begin{dfns} \label{dfn:alternating} Let $L$ be a link and $D$ a diagram for $L$. The projection sphere is denoted $S^{2}$. Then $D$ is called {\it alternating} if, for each component $K$ of $L$, when traversing the projection of $K$ the crossing occur as \dots over, under, over, under,\dots. $L$ is called an {\it alternating link} if it admits an alternating diagram. A link diagram $D$ is called {\it strongly prime} if any simple closed curve that intersects it transversely in two simple points (that is, two points that are not crossings) bounds a disk that $D$ intersects in a single arc with no crossings. A link $L$ is called {\it split} if its exterior admits an essential sphere, that is, if there is an embedded sphere $S \subset S^3 \setminus L$ so that each of the balls obtained by cutting $S^3$ open along $S$ contains at least one component of $L$. A link diagram $D \subset S^2$ is called a {\it split diagram} if there is a circle $\gamma$ embedded in $S^2$, so that each disk obtained by cutting $S^2$ open along $\gamma$ contains at least one component of $D$. Note that a split diagram is necessarily a diagram for a split link, but the converse does not hold. A link is called {\it simple} if its exterior does not admit an essential surface of non-negative Euler characteristic. A link $L$ is called {\it hyperbolic} if $S^3 \setminus L$ admits a complete, finite volume, hyperbolic metric. \end{dfns} Menasco (\cite{menasco}, see also~\cite{lickorish}) proved: \begin{thm} \label{thm:menasco} Let $D$ be an alternating link diagram for a link $L$. If $D$ is strongly prime and is not split, then $L$ is simple. \end{thm} Thurston proved: \begin{thm} \label{thm:thurston} Any simple link is hyperbolic. \end{thm} Combining these results, we obtain: \begin{cor} \label{cor:hyperbolic} If a link $L$ has a non-split, strongly prime, alternating diagram, then $L$ is hyperbolic. \end{cor} \subsection{Twist number and hyperbolic volume} \label{subsec:twist} For the definition of twist number see, for example,~\cite{lackenby}. We briefly recall it here. Let $D$ be a link diagram. Let $\sim$ be the equivalent relation on the crossings of $D$ generated by $c \sim c'$ if $c$ and $c'$ lie on the boundary of a bigon of $D$. This equivalence relation can be visualized as follows: if $c_{1},\dots,c_{n}$ form an equivalence class of crossings, then after reordering them if necessary, there is a chain of $n-1$ bigons in $D$ with $c_{i-1}$ and $c_{i}$ on the boundary the $i$th bigon. The {\it twist number} of a link $L$, denoted $t(L)$, is the smallest number of equivalence classes in any diagram for $L$. Thus, for example, the obvious diagram of twist knots show they have twist number at most 2. Lackenby~\cite{lackenby} gave upper and lower bounds on the hyperbolic volume of link exteriors in terms of their twist number. We emphasize that the lower bound holds for alternating links (or, more precisely, for alternating diagrams), while the upper bound holds {\it for all links}. It is the upper bound that we will need in this work, hence we need not assume the diagram alternates. We will need: \begin{thm}[Lackenby~\cite{lackenby}] \label{lackenby} There exists a constant $c$ so that for any hyperbolic link $L$, $$\vol[S^{3} \setminus L] \leq c t(L).$$ \end{thm} \section{Variations} \label{sec:variations} In this section we discuss two variations of the link volume. The first variation is obtained by replacing the volume by another knot invariant (note that one can use any invariant with values in $\mathbb R_{\geq 0}$). This variation was suggested by Ryan Blair. Let $L \subset S^{3}$ be a link and let $b(L)$ denote its bridge index. We consider the complexity of $M \stackrel{p}{\to} (S^3,L)$ to be $p b(L)$. Define $\mbox{pB}(M)$ to be the infimum of $pb(L)$, taken over all possible covers. It is easy to see that the preimage of a bridge surface $S$ for $L$ is a Heegaard surface for $M$, say $\Sigma$. Since $S$ is a $2b$ punctured sphere, $\chi(S \setminus L) = 2-2b$. Its preimage has Euler characteristic $p(2-2b)$. We obtain $\Sigma$ by adding some number of points, say $n \geq 0$. Then $\chi(\Sigma) = p (2-2b) + n$. Thus we get: \begin{eqnarray*} 2g(\Sigma) - 2 & = & -\chi(\Sigma) \\ & = & p(2b-2) - n \\ & = & 2pb -(2p + n) \\ & \leq & 2pb - 2. \end{eqnarray*} Since $\mbox{pB}(M)$ is positive integer valued, the infimum is obtained. By considering a cover that realizes $\mbox{pB}(M)$, we obtain a surface $\Sigma$ so that $g(\Sigma) \leq \mbox{pB}(M)$. Thus $g(M) \leq g(\Sigma) \leq \mbox{pB}(M)$. The converse is highly non-trivial. Given an arbitrary manifold $M$, Hilden~\cite{hilden} constructed a $3$-fold cover $M \stackrel{3}{\to} (S^3,L)$. The construction uses an arbitrary Heegaard surface $\Sigma \subset M$. One feature of Hilden's construction is that $b(L) \leq 2g(\Sigma) + 2$. Since $\Sigma$ was an arbitrary Heegaard surface, we may assume that $g(\Sigma) = g(M)$. Thus we see that $\mbox{pB}(M) \leq 6g(M) + 6$. Combining the inequalities we got we obtain: $$g(M) \leq \mbox{pB}(M) \leq 6g(M) + 6.$$ Thus we see that the Heegaard genus and $\mbox{pB}$ are equivalent. \noindent Another variation, suggested by Kimihiko Motegi, is the {\it surgery volume}. Given a manifold $M$, it is well known that $M$ is obtained by Dehn surgery on a link in $S^{3}$, say $L$. By Myers ~\cite{myers}, every compact 3-manifold admits a simple knot. Applying this to $S^3 \setminus N(L)$ we obtain a knot $K$ so that $L' = L \cup K$ is a hyperbolic link. Since $M$ is obtained from $S^{3}$ via surgery along $L'$ (with the original surgery coefficients on $L$ and the trivial slope on $K$), we conclude that $M$ is obtained from $S^{3}$ via surgery along a hyperbolic link. The surgery volume of $M$ is then $$\mbox{SurgVol}(M) = \inf \{ \vol[S^3 \setminus L] | M \mbox{ is obtained by surgery on }L, L\mbox{ is hyperbolic}\}.$$ Neumann and Zagier \cite{NeumannZagier} showed that if a hyperbolic manifold $N_1$ is obtained by filling a hyperbolic manifold $N_2$, then $\vol[N_1] < \vol[N_2]$. Applying this in our setting (with $S^3 \setminus L'$ as $N_1$ and $M$ as $N_2$) we see that for any hyperbolic manifold $M$, $\vol[M] \leq \mbox{SurgVol}(M).$ We note that there exists a function $f:(0,\infty) \to (0,\infty)$ so that any hyperbolic manifold $M$ is obtained by surgery on a hyperbolic link $L \subset S^{3}$ with $\vol[S^{3} \setminus L] \leq f(\vol[M])$. To see this, fix $V$ and let $X_{1},\dots,X_{n}$ be the set of parent manifolds of all hyperbolic manifolds of volume at most $V$. For each $X_{i}$ there is a link $L_{i}$ in $S^{3}$, so that $X_{i}$ is obtained by surgery on some of the components of $L_{i}$ and drilling the rest. Therefore, any hyperbolic manifold $M$ with volume at most $V$ is obtained on surgery on some $L_{i}$ ($i=1,\dots,n$). Set $$f(V) = \max_{i=1}^{n}\{\vol[S^{3} \setminus L_{i}]\}.$$ We get: $$\vol[M] \leq \mbox{SurgVol}(M) \leq f(\vol[M]).$$ The surgery volume and the hyperbolic volume are equivalent if there is a {\it linear} function $f$ as above; we do not know if this is the case. \section{Basic facts and open questions} \label{sec:basic} Basic facts about the Link Volume: \begin{description} \item[The link volume is obtained] that is, for every $M$ there exists a cover $M \stackrel{p}{\to} (S^3,L)$ so that $\lv[M] = p \vol[S^{3} \setminus L]$. Recall that the link volume was defined as an infimum. To see that there is a cover realizing it, we need to show that the infimum is obtained. Fix a manifold $M$, and let $M \stackrel{p_{n}}{\to} (S^3,L_{n})$ be a sequence of covers that approximates $\lv[M]$. By Cao--Meyerhoff~\cite{CaoMeyehoff}, for every $n$, $\vol[S^{3 }\setminus L_{n}] > 2$. Hence for large enough $n$, $p_{n} \leq \lv[M] / 2$; we see that there are only finitely many values for $p_{n}$. For any collection of covers $M \stackrel{d}{\to} (S^3,L_{i}')$ of fixed degree $d$, the infimum of $\{d \vol[S^{3} \setminus L_{i}']\}$ is obtained, since the set of hyperbolic volumes is well-ordered. It follows that the link volume is realized by some cover in $\{M \stackrel{p_{n}}{\to} (S^3,L_{n})\}$. \item[The link volume is the volume of a link exterior] that is, for any $M$, there exists $\widetilde{L} \subset M$ so that $\lv[M] = \mbox{Vol}(M \setminus \widetilde{L})$. This follows easily from the previous point. Let $M \stackrel{p}{\to} (S^3,L)$ be a cover realizing the link volume. Let $\widetilde L$ be the preimage of $L$. Then the cover $M \to S^{3}$ induces a cover $M \setminus \widetilde L \to S^{3} \setminus L$. Since the cover $M\setminus \widetilde L \to S^{3} \setminus L$ is not branched, we can lift the hyperbolic structure on $S^{3} \setminus L$ to $M \setminus \widetilde{L}$. We obtain a complete finite volume hyperbolic structure on $M \setminus \widetilde L$ of volume $p \vol[S^{3} \setminus ]L = \lv[M]$. \item[The link volume is bigger than the volume] If $M$ is hyperbolic then $\vol[M] < \lv[M]$: this follows immediately from the previous point and the fact the volume always goes down under Dehn filling~\cite{NeumannZagier}. \item [The spectrum of link volumes is well ordered] it follows from the second point above that the spectrum of link volumes is a subset of the spectrum of hyperbolic volumes. Since the spectrum of hyperbolic volumes is well ordered, so are all of its subsets. \item[The spectrum of link volumes is "small"] the reader can easily make sense of the claim the the spectrum of link volumes is a very small subset of the spectrum of hyperbolic volume. In fact, the spectrum of link volumes is a subset of the spectrum integral products of volumes of hyperbolic links in $S^{3}$. However, it is not too small: there are infinitely many manifolds $M$ with $\lv[M] < 7.22\dots$. Moreover, in \cite{JairYoav} Jair Remigio-Juarez and the first named author showed that there are infinitely many manifold of {\it the same} link volume, just under $7.22\dots$. This is in sharp contrast to the hyperbolic volume function which is finite-to-one. \end{description} For the remainder of this paper we will often use these facts without reference. Basic questions about the Link Volume include: \begin{enumerate} \item Calculate $\lv[M]$. It is not clear whether or not there exists an algorithm to calculate the link volume of a given manifold $M$. This would involves some questions about the set of links in $S^{3}$ that give rise to $M$ and appears to be quite hard. \item The following question was proposed by Hitoshi Murakami: if $N \stackrel{q}{\to} M$ is an unbranched cover then $\lv[N] \leq q \lv[M]$. How good is this bound? Even for $q=2$, the answer is not clear. \item Since the link volume is obtained, for every manifold $M$ there is a positive integer $d$ which is the smallest integer so that there exists a cover $M \stackrel{d} \to (S^3,L)$ realizing $\lv[M]$. What is $d$ and how does it reflect the topology of $M$? Can $d$ be arbitrarily large? Is any positive integer $d$ for some $M$? \item Characterize the set $\{ \widetilde{L} \subset M| \exists M \to S^3$, branched over $L$, and $\widetilde{L}$ is the preimage of $L\}$. The link volume is, of course, the minimal volume of the manifolds in this set, and in this paper we concentrate on it. It is easy to see that there is no upper bound to the volumes of manifolds in this set. It may be interesting to try and characterize the elements of this set. \item Do there exist hyperbolic manifolds $M_1$, $M_2$ with $\vol[M_1] = \vol[M_2]$ and $\lv[M_1] \neq \lv[M_2]$? \item Similarly, do there exist hyperbolic manifolds $M_1$, $M_2$ with $\lv[M_1] = \lv[M_2]$ and $\vol[M_1] \neq \vol[M_2]$? We note that the examples of manifolds with the same link volume mentioned above are all Siefert fibered spaces. \end{enumerate} \section{Proof of Theorem~\ref{thm:jt}} \label{sec:jt} Fix $V>0$. Fix $\mu>0$ a Margulis constant for $\mathbb{H}^{3}$ and $d>0$. (We remark that the constant $\Lambda$ that we obtain in this proof depends on these choices.) Let $M$ be a manifold of $\lv[M] < V$. Let $M \stackrel{p} \to (S^3,L)$ be a cover realizing $\lv[M]$. Denote the $d$ neighborhood of the $\mu$-thick part of $S^{3} \setminus L$ by $E_{L}$. By construction, $E_{L}$ is obtained from $S^{3} \setminus L$ by drilling out certain geodesics; by Kojima~\cite[Proposition~4]{kojima}, $E_{L}$ is hyperbolic. Let $X_{\phi}$ denote the preimage of $E_{L}$ in $M$. Then the cover $\phi:M \to S^{3}$ induces an unbranched cover $\phi:X_{\phi} \to E_{L}$. By lifting the hyperbolic structure from $E_{L}$ to $X_{\phi}$, we see that $X_{\phi}$ is a finite volume hyperbolic manifold. By construction, the following diagram commutes (where vertical arrows represent the covering projections and horizontal arrows represent Dehn fillings): \begin{center} \begin{picture}(200,60)(0,0) \put( 0, 0){\makebox(0,0){$E_{L}$}} \put( 0,50){\makebox(0,0){$X_{\phi}$}} \put( 0, 40){\vector(0,-1){30}} \put( 10, 0){\vector(1,0){75.5}} \put(100, 0){\makebox(0,0){$S^3,L$}} \put(100, 40){\vector(0,-1){30}} \put(8,28){\makebox(0,0){$/\phi$}} \put(108,28){\makebox(0,0){$/\phi$}} \put(100,50){\makebox(0,0){$M$}} \put( 10,50){\vector(1,0){80}} \end{picture} \end{center} By J\o rgensen and Thurston (see, for example, \cite{KR}), there exists a constant $\Lambda$ (depending on $\mu$ and $d$), so that for any complete, finite volume hyperbolic manifold $N$, the $d$-neighborhood of the $\mu$-thick part of $N$ can be triangulated using no more than $\Lambda \vol[N]$ tetrahedra. Applying this to $N = S^{3} \setminus L$, since the $d$-neighborhood of the $\mu$-thick part of $N$ is $E_{L}$, we see that $E_{L}$ can be triangulated using at most $\Lambda \vol[S^{3} \setminus L] = \Lambda \lv[M] / p < \Lambda V/p$ tetrahedra. Since there are only finite many manifolds that can be triangulated using at most $\Lambda V/p$ tetrahedra, there are only finitely many possibilities for $E_{L}$. Lifting the triangulation from $E_{L}$ to $X_{\phi}$, we see that $X_{\phi}$ can be triangulated with at most $\Lambda \lv[M] < \Lambda V$ tetrahedra, and that $\phi:X_{\phi} \to E_{L}$ is simplicial. This shows that there are only finitely many possibilities for $X_{\phi}$ and $\phi$. We denote them $\{\phi_{i}:X_{i} \to E_{i}\}_{i=1}^{n_{V}}$. \section{The Link Volume and Dehn Filling} \label{sec:dehn} In this section we prove Theorem~\ref{thm:dehn}. The proof is constructive and requires two elements, the first is Hilden's construction of simple 3-fold covers of $S^{3}$, and the second is the results of Thurston and Menasco that show that an alternating link that ``looks like'' a hyperbolic link is in fact hyperbolic. For the latter, see Subsection~\ref{sebsection:alternating}. We now explain the former. In~\cite{hilden}, Hilden showed that any 3-manifold is the simple 3-fold cover of $S^{3}$. The crux of his proof is the construction, for any $g$, of a 3-fold branched cover $p:V_{g} \to B$, where $V_{g}$ is the genus $g$ handlebody and $B$ is the 3-ball. He then proves that any map $f:\partial V_{g} \to \partial V_{g}$ can be isotoped so as to commute with $p$. Thus $f$ induces a map $\bar f:\partial B \to \partial B$ so that the following diagram commutes (here the vertical arrows denote Hilden's covering projection): \begin{center} \begin{picture}(200,60)(0,0) \put( 0, 0){\makebox(0,0){$B$}} \put( 0,50){\makebox(0,0){$V_{g}$}} \put( 0, 40){\vector(0,-1){30}} \put( 10, 0){\vector(1,0){75.5}} \put(100, 0){\makebox(0,0){$B$}} \put(100, 40){\vector(0,-1){30}} \put(50,10){\makebox(0,0){$\bar f$}} \put(50,60){\makebox(0,0){$f$}} \put(100,50){\makebox(0,0){$V_g$}} \put( 10,50){\vector(1,0){80}} \end{picture} \end{center} Starting with a closed, orientable, connected 3-manifold $M$, Hildens uses a Heegaard splitting of $M = V_g \cup_f V_g$; the construction above gives a map to $B \cup_{\bar f} B \cong S^3$. This is, in a nutshell, Hilden's construction of $M$ as a cover of $S^3$. Our goal is using a similar construction to get a map from $X$. Since $X$ has boundary it cannot branch cover $S^{3}$, and we must modify Hilden's construction. To that end, we first describe the cover $p:V_{g} \to B$ in detail. Let $S_{3g+2}$ be the $3g+2$ times punctured $S^2$, viewed as a $3g$-times punctured annulus. Then $S_{3g+2} \times [-1,1]$ admits a symmetry of order two (rotation by $\pi$ about the $y$-axis) given by $(x,y,t) \mapsto (-x,y,-t)$, where $S_{3g+2}$ is embedded symmetrically in the $xy$-plane as shown in Figure~\ref{fig:annulus}. \begin{figure} \caption{$S_{3g+2} \label{fig:annulus} \end{figure} $S_{3g+2} \times [-1,1]$ also admits a symmetry of order 3 by rotating $S_{3g+2}$ about the origin of the $xy$-plane and fixing the $[-1,1]$ factor. These two symmetries generate an action of the dihedral group of order 6 on $S_{3g+2} \times [-1,1]$. It is easy to see that the quotient is a ball. On the other hand, the quotient of $S_{3g+2} \times [-1,1]$ by the order two symmetry is $V_{g}$. This induces the map $f:V_{g} \to B$; note that this is a cover, branched along a trivial tangle with $g+2$ arcs (thus the branch set of the map $M \to S^3$ described above is a $g+2$ bridge link, and the braiding is determined by $\bar f$). This is Hilden's construction, see Figure~\ref{fig:annulus1}, where the branch set of $V_{g} \to B$ is indicated by dashed lines (in $B$). \begin{figure} \caption{Hilden's covers} \label{fig:annulus1} \end{figure} A Heegaard splitting for the manifold with boundary $X$ is a decomposition of $X$ into two compression bodies; we assume the reader is familiar with the basic definitions (see, for example, \cite{CassonGordon}). We use the notation $V_{g,n}$ for a compression body with $\partial_{+} V_{g,n}$ a genus $g$ surface and $\partial_{-} V_{g,n}$ a collection of $n$ tori (so $0 \leq n \leq g$). Since $\partial X$ consists of $n$ tori, any Heegaard splitting of $X$ consists of two compression bodies of the form $V_{g,n_{1}}$ and $V_{g,n_{2}}$, for some $g,n_1,n_2$ with $n_{1} + n_{2} = n$. We use the notation $V_{g,n_{i}}^{*}$ for the manifold obtained by removing $n_{i}$ disjoint open balls from the interior of $V_{g,n_{i}}$. We use the notation $X^{*}$ for the manifold obtained by removing $n$ disjoint open balls from the interior of $X$. Finally, we use the notation $B^{*}_{n_{i}}$ for the manifold obtained by removing $n_{i}$ disjoint open balls from the interior of $B$. Since compression bodies do not admit simple 3-fold branched covers of the type we need, we work with $V_{g,n_{i}}^{*}$, see Figure~\ref{fig:annulus2}. \begin{figure} \caption{Hilden's covers modified} \label{fig:annulus2} \end{figure} Figure~\ref{fig:annulus2} is very similar to Figure~\ref{fig:annulus1}, but has a few ``decoration'' added in blue. The circles added to $S_{3g+2} \times [-1,1]$ are embedded in $S_{3g+2} \times \{0\}$. There are exactly $3n_{i}$ such circles. Clearly, they are invariant under the dihedral group action, and their images in $V_{g}$ and $B$ are shown. By removing an appropriate neighborhood of these circles and their images, we get a simple 3-fold cover from $V_{g,n_{i}}^{*}$ to $B_{n_{i}}^{*}$. Applying Hilden's theorem to the gluing map $f:\partial_{+} V_{g,n_{1}} \to \partial_{+} V_{g,n_{2}}$, we obtained a map $\bar f:\partial B_{n_{1}} \to \partial B_{n_{2}}$. Clearly, downstairs we see the manifold obtained by removing $n_{1} + n_{2} = n$ open balls from $S^{3}$; we denote it by $S^{3,*}$. Note that the branch set is a tangle (that is, a 1-manifold properly embedded in $S^{3,*}$) that intersects every sphere boundary component in exactly 4 points; we denote this branch set by $T$. Moreover, the preimage of each component of $\partial S^{3,*}$ consists of exactly two components: a torus that double covers it, and a sphere that projects to it homeomorphically. The map from the torus in $\partial X^{*}$ to the sphere in $S^{3,*}$ is the quotient under the well known hyperelliptic involution. \noindent Hilden's construction, as adopted to our scenario, is the key to everything we do below. We sum up its main properties here: \begin{prop} \label{prop:hilden} Let $X$ be a compact, orientable manifold with $\partial X$ consisting of $n$ tori. Let $X^{*}$ be the manifold obtained by removing $n$ open balls from the interior of $X$. Let $S^{3,*}$ be the manifold obtained by removing $n$ open balls from $S^{3}$. Then there exists a simple 3-fold cover $p:X^{*} \to S^{3,*}$. The branch set is a compact 1-manifold, denoted $T$, that intersects every boundary component of $S^{3,*}$ in exactly four points. The preimage of each component $S$ of $\partial S^{3,*}$ consists of one torus component of $\partial X$ that double covers $S$ via a hyperelliptic involution, and one sphere component of $\partial X^{*} \setminus \partial X$ that maps to $S$ homeomorphicaly. \end{prop} Recall that in Theorem~\ref{thm:dehn}, $X$ came equipped with a choice of meridian and longitude on each boundary component. $S^{3,*}$ is naturally a subset of $S^{3}$. We isotope $S^{3,*}$ in $S^{3}$ so that, after projecting it into the plane, the following conditions hold: \begin{enumerate} \item The balls removed from $S^{3}$ are denoted $\bar B_{i}$ ($i=1\dots,n$). The projection of each $\bar B_{i}$ is a round disk; these disks are denoted $B_i$, see Figure~\ref{fig:Bi}. \begin{figure} \caption{$T$ in a neiborhood of $B_{i} \label{fig:Bi} \end{figure} \item $T$ intersects each $B_i$ in exactly four points. Each of these point is an endpoint of a strand of $T$. The four point are the intersection of the lines of slopes $\pm 1$ through the center of the disk with its boundary, and are labeled (in cyclic order) NE, SE, SW, and NW. \item We twist the boundary components of $S^{3,*}$ so that, in addition, the meridian and longitude of the corresponding boundary component of $\partial X$ map to a horizontal and vertical circles, respectively; thses curves (slightly rounded) are labeled $\mu$ and $\lambda$ in Figure~\ref{fig:Bi}. \end{enumerate} Let $T_i \subset \partial X$ be the torus that projects to $\partial \bar B_i$. Recall that by {\it Dehn filling} $T_i$ we mean attaching a solid torus $V$ to $T_i$. $V$ is foliated by concentric tori, with one singular leaf (the core circle). To understand how the hyperelliptic involution extends from $\partial V = T_{i}$ into $V$ we construct the following explicit model of the hyperelliptic involution: let $T_{i}$ be the image of $\mathbb R^{2}$ under the action of $\mathbb Z^{2}$ given by $(x,y) \mapsto (x+n,y+m)$. Then the hyperelliptic involution is given by rotation by $\pi$ about $(0,0)$. The four fixed points on $T_{i}$ are the images of $(0,0)$, $(1/2,0)$ (rotate and translate by $(x+1,y)$), $(0,1/2)$ (rotate and translate by $(x,y+1)$), and $(1/2,1/2)$ (rotate and translate by $(x+1,y+1)$). Given any slope $p/q$ (with $p$ and $q$ relatively prime), it is clear that the foliation of $\mathbb R^{2}$ by straight lines of slope $p/q$ is invariant under the rotation by $\pi$ about $(0,0)$. The line through $(0,0)$ goes through $(p/2,q/2)$, which is the image of one of the other three fixed points, as not both $p$ and $q$ are even. Similarly for the lines through $(1/2,0)$, $(0,1/2)$, $(1/2,1/2)$; these lines project to two circles on the torus, with exactly two fixed points on each circle. By considering the images of the foliation of $\mathbb R^{2}$ by lines of slope $p/q$, we obtain a foliation of $\partial V$ by circles (each representing the slope $p/q$). In the foliation of $V$ by concentric tori, each torus admits such a foliation, and the length of the leaves limit on $0$ as we approach the singular leaf. At the limit, we see that the involution on the non-singular leaves induces an involution of the singular leaf whose image is an arc. Thus the hyperelliptic involution of $T_{i}$ extends to an involution on $V$, whose image is foliated by spheres, with one singular leaf that is an arc. The image of $V$ is a ball, and the branch set is a rational tangle of slope $p/q$; for more about rational tangles and their double covers see, for example,~\cite{rolfsen}. We denote this rational tangle by $R_{i}$. \begin{notation} \label{notation:EquivalenceClasses} We assume the rational tangles we study have been isotoped to be alternating (it is well known that this can be achieved). Two rational tangles are considered {\it equivalent} if the following two conditions hold: \begin{enumerate} \item The over/under information of the strands of the rational tangles coming in from the NE are the same. Since the rational tangle is alternating, this implies that the over/under information from the other corners is the same as well. \item The strands of the rational tangles that start at NE end at the same point (SE, SW, or NW). \end{enumerate} Note that the crossing information is ill-defined for the two tangles $1/0$ and $0/1$, as they have no crossings. We arbitrarily choose an equivalent class for each of these tangle, so that the second condition is fulfilled. We obtain $6^n$ possible equivalence classes (recall that $n = {|\partial X|}$). \end{notation} \noindent Given slopes on $T_{1},\dots,T_{n}$, we get rational tangles $R_{1},\dots,R_{n}$, as described above. In each $\bar B_{i}$ we place a rational tangle, denoted $\widehat R_{i}$, so that $\widehat R_{i} \in \{\pm 1, \pm 2, \pm 1/2 \}$, representing the same equivalence class as $R_{i}$. We assume that their projections into $B_{i}$ are as in Figure~\ref{fig:onetwohalf}. We thus obtain a link, denoted $\widehat T$, and a diagram for $\widehat T$, denoted $\widehat D$. Since $\widehat T$ and $\widehat D$ only depend on the equivalence classes of the slopes, when considering all possible slopes, we obtain finitely many links and diagrams (specifically, $6^{n}$). In order to obtain hyperbolic branch set, we will, eventually, apply Mensaco~\cite{menasco} as explained in Subsection~\ref{sebsection:alternating}. To that end we will need to make the branch set alternating. As we shall see below, we do this using a $1 \to -2$ and $-2 \to 1$ Montesinos moves; these moves can be used to make the link alternating in a way that is very similar to crossing changes. Below we will show that we can apply Montesinos moves to $T$, however, we may not apply these moves to the rational tangles inside $B_{i}$. This causes the following trouble: let $\alpha \subset T$ be an interval connecting two punctures, say $\partial \bar B_{i}$ and $\partial \bar B_{i'}$ (possibly, $i=i'$). Assume that the last crossing of $R_{i}$ before $\alpha$ is an over crossing, and that the number of crossings along $\alpha$ is even. Then if we make $T$ alternate, the last crossing along $\alpha$ will be an overcrossing. This means that the first crossing of $R_{i'}$ after $\alpha$ must be an undercrossing. This may or may not be the case, and we have no control over it. In order to encode this, we consider the following graph $\Gamma$: $\Gamma$ has $n$ vertices, and they correspond to $B_{1},\dots,B_{n}$. The edges of $\Gamma$ correspond to intervals of $T$ that connect $B_{i}$ to $B_{i'}$ (again, $i$ and $i'$ may not be distinct). Inspired by the discussion above, we assign signs to the edges of $\Gamma$ as follows (in essence, good edges get a $+$ and bad edges get a $-$): \begin{enumerate} \label{SignsOfEdges} \item Let $I \subset T$ be an interval connecting $B_{i}$ to $B_{i'}$ (possibly $i=i'$) so that the last crossing before $I$ and the first crossing after $I$ are the same (that is, both overscrossings or are both undercrossings), and the number of crossings along $I$ is odd. Then the corresponding edge get the sign $+$. \item Let $I \subset T$ be an interval connecting $B_{i}$ to $B_{i'}$ (possibly $i=i'$) so that the last crossing before $I$ and the first crossing after $I$ are the opposite (that is, one is an overcrossing and one an undercrossing), and the number of crossings along $I$ is even. Then the corresponding edge get the sign $+$. \item All other edges get the sign $-$. \end{enumerate} If $\Gamma$ is connected, we pick a spanning tree $\widehat \Gamma$ for $\Gamma$. That is, $\widehat \Gamma$ is a tree obtained from $\Gamma$ by removing edges, so that every vertex of $\Gamma$ is adjacent to some edge of $\widehat \Gamma$. In general, we take $\widehat \Gamma$ to be a {\it maximal forest} in $\Gamma$. A forrest is a collection of trees, or a graph without cycles. A {\it maximal forest} in $\Gamma$ is a graph obtained from $\Gamma$ by removing a minimal (with respect to inclusion) set of edges so that a forrest is obtained; equivalently, it is the union of maximal trees for the connected components of $\Gamma$. Clearly a maximal forest $\widehat\Gamma$ has the following two properties: first, $\widehat\Gamma$ contains no cycles. Second, any edge from $\Gamma$ that we add to $\widehat \Gamma$ closes a cycle. \begin{lem} \label{lemma:VertexSigns} There is a sign assignment to the vertices of $\widehat \Gamma$, so that an edge of $\widehat \Gamma$ has a plus sign if and only if the vertices it connects have the same sign. \end{lem} \begin{proof}[Proof of Lemma~\ref{lemma:VertexSigns}] We induct on the number of edges in $\widehat \Gamma$. If there are no edges there is nothing to prove. Assume there are edges. In that case at least one component of $\widehat \Gamma$ is a tree with more that one vertex. Such a tree must have a leaf, say $v$. Remove $v$ and $e$, the unique edge of $\widehat \Gamma$ connected to $v$. By induction, there is a sign assignment for the remaining vertices fulfilling the conditions of the lemma. We now add $v$ and $e$. Clearly, we can give $v$ a sign so that the condition of the lemma holds for $e$. The lemma follows. \end{proof} We now isotope $\widehat T$ and accordingly modify $\widehat D$ as shown in Figure~\ref{that connectfig:YamashitaMove} \begin{figure} \caption{Modifying $\widehat D$ near $B_{i} \label{that connectfig:YamashitaMove} \end{figure} at each puncture that corresponds to a vertex with a minus sign. Since this changes the number of crossings on some of strands of $\Gamma$, we recalculate the signs on the corresponding edges. Note that the isotopy above adds one or three crossing to every strand of $T$ that corresponds to an edge of $\widehat \Gamma$ with sign $-$, and zero, two, four, or six crossings to every strand of $T$ that corresponds to an edge with sign $+$. We easily conclude that every edge of $\widehat \Gamma$ has sign $+$. Moreover: \begin{lem} \label{lemma:SingsOfEdges} Every edge of $\Gamma$ has sign $+$. \end{lem} \begin{proof} The proof is very similar to the proof that every link projection can be made into an alternating link projection via crossing change and is left for the reader, with the following hint: suppose there exists an edge, say $e$, whose sign is $-$. Since we used a maximal forest, there is a cycle in $\Gamma$ (say $e_{1},\dots,e_{k}$) so that $e_{1} = e$ and $e_{i}$ belongs to the maximal forest for $i>1$; in particular, exactly one edge of the cycle has sign $-$. Use this cycle to produce a closed curve (not necessarily simple) in $S^{2}$ that intersects he link $\widehat T$ transversely an odd number of times. This is absurd, in light of the Jordan Curve Theorem. \end{proof} Next we prove that $X^{*}$ can be obtained as a 3-fold cover of $S^{3,*}$ with a particularly nice branch set. We begin with $T$, $\widehat T$, and $\widehat D$ described above; their properties are summed up in Condition~(1) of Lemma~\ref{lem:hyperbolicity} below. For parts of this argument {\it cf.} Blair~\cite{blair}. Recall Definitions~\ref{dfn:alternating} for standard terms in knot theory. \begin{lem} \label{lem:hyperbolicity} There exists a link $\widehat T$ in $S^{3}$, with projection into $S^{2}$ denoted $\widehat D$, so that $X^{*}$ is a simple, $3$-fold cover of $S^{3} \setminus \cup_{i=1}^{n} \mbox{\rm int}(\bar B_{i})$ branched along the tangle $T = \widehat T \cap (S^{3} \setminus \cup_{i} \mbox{\rm int}(B_{i}))$ and the following conditions hold: \begin{enumerate} \item $\widehat T \cap \bar B_{i} = \widehat R_{i}$ (recall that $\widehat R_{i}$ projects into $B_{i}$ as shown in Figure~\ref{fig:onetwohalf}), \begin{figure} \caption{$\hat R_{i} \label{fig:onetwohalf} \end{figure} the projection of $\mbox{int}(T \setminus \widehat T)$ is disjoint from $\cup_{i=1}^{n}B_{i}$, and the meridian and longitude of $T_{i} \subset \partial X$ project to horizontal and vertical circles about $B_{i}$ (respectively, recall Figure~\ref{fig:Bi}). \item $\widehat D$ is not a split diagram. \item Every simple closed curve in $S^{2} \setminus \cup B_{i}$ that intersects $\widehat D$ transversely in two simple point bounds a disk that intersects $\widehat D$ in a single arc with no crossing. \item Let $\alpha \subset S^{2}$ be an arc with one endpoint on $B_{i'}$ and the other on $B_{i''}$ (for $i', \ i'' =1,\dots,n$, possibly $i'=i''$), and $\mbox{\rm int}(\alpha) \cap (\cup_{i} B_{i}) = \emptyset$. Then one of the following conditions holds: \begin{enumerate} \item $i' = i''$, and $\alpha$ cobounds a disk with $\partial B_{i}$ with no crossings. \item $|\mbox{int}(\alpha) \cap \widehat D| > 2$. \end{enumerate} \item In the three coloring of $\widehat D \cap (S^{2} \setminus \cup_{i=1}^{n} B_{i})$ induced by the cover $X^{*} \to S^{3,*}$, every crossing is three colored. \item $\widehat D$ is alternating. \item $\widehat T$ is a knot. \end{enumerate} \end{lem} \begin{rmk} \label{rmk:signs} To obtain conditions~(1)--(5) we modify $\widehat T$ via isotopy; except for the move shown in Figure~\ref{fig:localmove2}, the projection of the support of this isotopy is disjoint from $\cup_{i=1}^{n}B_{i}$. Note that in the move shown in Figure~\ref{fig:localmove2} each edge gets and even number of crossings added. Hence the signs of the edges of $\Gamma$ do not change, and Lemma~\ref{lemma:SingsOfEdges} still holds after we obtain Conditions~(1)--(5). (We use this lemma to obtain Condition~(6), and never need it again after that.) \end{rmk} \begin{proof} {\bf Condition~(1).} Condition~(1) already holds. We note that none of the moves applied in the proof of this lemma changes this. We will not refer to Condition~(1) explicitly. \noindent {\bf Condition~(2).} $\widehat D$ is diagrammatically split if and only if it is disconnected. Suppose $\widehat D$ is disconnected, and let $K_j$ and $K_{j'}$ be components of $\widehat T$ that project to distinct components of $\widehat D$. Let $\alpha \subset S^{2} \setminus \cup_{i=1}^{n} B_{i}$ be an embedded arc with one endpoint on $K_{j}$ and the other on $K_{j'}$ (note that $K_{j}, \ K_{j'} \not\subset \cup_{i=1}^{n} B_{i}$, hence $\alpha$ exists; $\alpha$ may intersect $\widehat D$ in its interior). We perform an {\it isotopy along $\alpha$}, as shown in Figure~\ref{fig:alphamove}. \begin{figure} \caption{Isotopy along $\alpha$} \label{fig:alphamove} \end{figure} After that $K_j$ crosses $K_{j'}$ outside $\cup_{i=1}^n B_i$; clearly, this reduces the number of components of $\widehat D$. Repeating this process if necessary, Condition~(2) is obtained. \noindent {\bf Condition~(3).} For each $B_i$, let $N(B_i)$ be a normal neighborhood of $B_i$ so that $\widehat D \cap N(B_i)$ consists of the tangle in $B_i$ and four short segments as in the left hand side of Figure~\ref{fig:localmove2}. We assume further that for $i \neq j$, $N(B_i) \cap N(B_j) = \emptyset$. \begin{figure} \caption{Isolating $B_i$} \label{fig:localmove2} \end{figure} Inside each $N(B_i)$ perform the isotopy shown in Figure~\ref{fig:localmove2}. Next we count the number of simple closed curves in $S^{2} \setminus \cup_{i} B_{i}$ that intersect $\widehat D$ in two points and do not bound a disk $\Delta$ with $\widehat D \cap \Delta$ is a single arc with no crossings. These curves are counted up to ``diagrammatic isotopy'', that is, an isotopy via curves that are transverse to $\widehat D$ at all time and in particular are disjoint from the crossings. Let $C_1,\dots,C_k$ be the closures of the components of $S^2 \setminus (\widehat D \cup (\cup_i B_i))$. Let $\gamma, \gamma' \subset S^2 \setminus \cup_i B_i$ be two simple closed curves that intersects $\widehat D$ transversely in two simple points. Then $\widehat D$ cuts $\gamma$ into 2 arcs, say one in the region $C_j$ and one in $C_{j'}$. Note that if $j=j'$, then $C_j$ is adjacent to itself, and in particular there is a simple closed curve in $S^2$ that intersects $\widehat D$ transversely in one point, which is absurd. Condition~(2) (connectivity of $\widehat D$) is equivalent to all regions being disks, and hence implies that $\gamma$ and $\gamma'$ are diagrammatically isotopic if and only if both curves traverse the same regions $C_j$ and $C_{j'}$, and $\gamma \cap \partial C_j$ is contained in the same segments of $C_j \cap C_{j'}$ as $\gamma' \cap \partial C_{j'}$. (See Figure~\ref{fig:regions}; \begin{figure} \caption{Segments} \label{fig:regions} \end{figure} here a {\it segment} means an interval $I \subset S^{2} \setminus \cup_{i} \mbox{int}(B_{i})$, so that $I \subset C_j \cap C_{j'}$, $\partial I$ are crossings or lie on $\partial B_i$ for some $i$, and $I$ contains no crossings in its interior.) For any pair of regions $C_j$ and $C_{j'}$, let $n_{j,j'}$ be the number of segments in $C_j \cap C_{j'}$ (for example, in Figure~\ref{fig:regions}, $n_{j,j'} = 4$). Then we see that the number of simple closed curves that intersect $\widehat D$ in two simple points, traverse $C_j$ and $C_{j'}$, and do not bound a disk containing a single arcs of $\widehat D$ (counted up to diagrammatic isotopy) is ${n_{j,j'}}\choose{2}$, where ${0}\choose{2}$ and ${1}\choose{2}$ are naturally understood to be 0. Hence the total number of such curves (counted up to diagrammatic isotopy) is \begin{equation} \label{equation:NumberOfCircles} \sum_{1 \leq j < j' \leq k}{n_{j,j'}\choose2}. \end{equation} Now assume that condition~(3) does not hold; then there exist regions $C_j$ and $C_{j'}$ with $n_{j,j'} \geq 2$. Let $I$ be an interval of $C_j \cap C_{j'}$. Since we isolated $B_i$ (for all $i$) as shown in Figure~\ref{fig:localmove2}, the endpoints of $I$ cannot lie on $\partial B_i$ and must therefore both be crossings. The move shown in Figure~\ref{fig:localmove3} \begin{figure} \caption{Isotopy to reduce $C_{i,i'} \label{fig:localmove3} \end{figure} reduces $n_{j,j'}$ by one. This move introduces several new regions, and those are shaded in Figure~\ref{fig:localmove3}. Inspecting Figure~\ref{fig:localmove3}, we see that for any pair of regions $C_{j}$, $C_{j'}$ that existed prior to the move, $n_{j,j'}$ does not increase, and for any pair of regions $C_{j}$, $C_{j'}$ with at least one new region, $n_{j,j'}$ is 1 or 0. Hence the sum in Equation~(\ref{equation:NumberOfCircles}) is reduced, and repeated application of this move yields a diagram $\widehat D$ for a link $\widehat T$ for which Condition~(3) holds; by construction, Condition~(2) still holds. \noindent {\bf Condition~(4).} Condition~(4) holds thanks to the isotopy performed in the previous step and shown in Figure~\ref{fig:localmove2}. \noindent {\bf Condition~(5).} Since $\widehat D$ is the branch set of the simple 3-fold cover $X^{*} \to S^{3,*}$ it inherits a 3-coloring as explained in Subsection~\ref{subsec:covers}, where the colors are transpositions in $S_{3}$. Since $X^{*}$ is connected, at least two colors appear in the coloring of $T$ (recall Lemma~\ref{lem:connected3fold}; that lemma was stated for covers of $S^{3}$ but it is easy to see that it holds for covers of $S^{3,*}$ as well). Assume there exists a one colored crossing of $\widehat D$ outside $\cup_{i=1}^n B_i$, say $c$, and let $p$ be a point on a strand of $\widehat D$ that is of a different color than $c$, and so that $p \not\in \cup_{i=1}^n B_i$. Let $\alpha$ be an arc connecting $p$ and $c$ so that $\alpha \cap (\cup_{i=1}^n B_i) = \emptyset$. If $\mbox{int}(\alpha)$ intersects a strand of $\widehat D$ whose color is different than the color of $c$, we cut $\alpha$ short at that intersection. Thus we may assume that any point of $\mbox{int}(\alpha) \cap \widehat D$ has the same color as $c$. We apply the following move (often used by Hilden, Montesinos and others), see Figure~\ref{fig:threecolors}. \begin{figure} \caption{Making the crossings 3-colored} \label{fig:threecolors} \end{figure} This move reduces the number of one colored corssings outside $\cup_i B_i$, and hence repeating this move gives Condition~(5). We now verify that Conditions~(2)--(4) still hold. Inspecting Figure~\ref{fig:threecolors}, we see that Condition~(2), which is equivalent to connectivity of $\widehat D$, clearly holds. A simple closed curves that intersects $\widehat D$ twice after this moves, intersects it at most twice before the move. By considering these curves and Figure~\ref{fig:threecolors} we conclude that Condition~(3) holds as well (in checking this, note that $\mbox{int}\alpha \cap \widehat D$ maybe empty; to rule out one case, you need to use the coloring: a red arc cannot be connected to a blue arc without a crossing). For each $i$, the preimage of $\partial \bar B_{i}$ is disconnected; hence the four segments of $\widehat D$ on the left side of Figure~\ref{fig:localmove2} are all the same color. Since $\widehat D$ is connected and has more than one color, is must have a three colored crossing, which cannot be contained in $N(B_{i})$ for any $i$. We can take the point $p$ in the construction above to be a point near that three colored crossing, and in particular, we may assume that $p \not\in N(B_{i})$ for any $i$. Therefore this move effects $\widehat D \cap N(B_{i})$ by {\it adding} arcs that traverse $N(B_{i})$ without interscting $B_{i}$ itself, but not changing any of the existing diagram in the right hand side of Figure~\ref{fig:localmove2}. Therefore Condition~(4) holds. \noindent {\bf Condition~(6).} Note that the tangles $\widehat R_i$ are alternating ($i=1,\dots,n$). It is well known that any link projection can be made into an alternating projection by reversing some of its crossings. We mark the crossings of $\widehat D$ by $\pm$, marking a crossing $+$ if we do not need to reverse it and $-$ otherwise. By reversing all the signs if necessary, we may assume that the signs in $B_{1}$ are $+$. Since the signs of all the edges of $\Gamma$ are $+$ (Lemma~\ref{lemma:SingsOfEdges} and Remark~\ref{rmk:signs}), the signs in every $B_{i}$ are all $+$. Thus all the crossings that are marked $-$ are outside $\cup_{i=1}^n B_{i}$, and hence three colored. We change each of this crossing using the Montesinos move $+1 \mapsto -2$ or $-1 \mapsto +2$, as in the top row of Figure~\ref{fig:montesinos}, noting that this does not change the double cover. It is clear that now $\widehat D$ is an alternating diagram fulfilling Conditions~(1)--(6). \noindent {\bf Condition~(7).} Assume $T$ is a link. If there is a crossing outside $\cup_{i} B_{i}$ that corresponds to two distinct components of $T$, we perform a $+1 \mapsto +4$ or $-1 \mapsto -4$ Montesinos move; this reduces the number of components of $T$. Assume there is no such crossing, and let $\alpha$ be an arc connecting strands (say $s_1$ and $s_{2}$) that correspond to two distinct components of $T$. Since no $B_{i}$ contains a closed component, we may assume $\alpha \cap (\cup_{i} B_{i}) = \emptyset$; furthermore, by truncating $\alpha$ if necessary, we may assume that $\mbox{int}\alpha \cap \widehat D = \emptyset$. By Condition~(4) at least one endpoint of $s_{2}$ is a crossing outside $\cup_{i} B_{i}$, say $c$. If $s_{1}$ and $s_{2}$ have the same color, we replace $\alpha$ with an arc that connects $s_{1}$ with a strand adjacent to $s_{2}$ at $c$. By Condition~(5) $c$ is three colored, and by assumption, both its strands correspond to the same component of $T$. Thus we obtain an arc that connects distinct components and has endpoints of different colors. Finally, we assume without loss of generality that the crossing information at $s_{1}$ is as shown in Figure~\ref{fig:makingknot}. Since $\widehat D$ is connected and alternating, considering the face containing $\alpha$, we conclude that the crossing information on $s_{2}$ is as shown in that figure. We change $\widehat D$ using a $0 \mapsto \pm3$ Montasinos move (as shown in the bottom of Figure~\ref{fig:montesinos}), obtaining a diagram fulfilling Conditions~(1)--(6) that corresponds to a link with fewer components, see Figure~\ref{fig:makingknot}. \begin{figure} \caption{Making the brach set into a knot} \label{fig:makingknot} \end{figure} Iterating this process, we obtain a knot, completing the proof of of Lemma~\ref{lem:hyperbolicity} \end{proof} We are now ready to complete the proof of Theorem~\ref{thm:dehn}. Fix $X$ as in the statement of the theorem and pick a slope on each components of $\partial X$, say $p_{i}/q_{i}$ on the torus $T_{i} \subset \partial X$; note that we are using the meridian-longitudes to express the slpes as rational numbers (possibly, $1/0$). Construct a 3-fold, simple cover $X^{*} \to S^{3,*}$ as in Lamme~\ref{lem:hyperbolicity} that corresponds to the appropriate equivalence classes of the slopes (recall Notation~\ref{notation:EquivalenceClasses}). For convenience we work with $\widehat D$, the digram of $\widehat T$. We now change the diagram $\widehat D$ by replacing the rational tangle $\widehat R_{i}$ in $B_{i}$ (that represents the equivalence class of $p_{i}/q_{i}$) with the rational tangle $R_{i}$ (that realizes the slope $p_{i}/q_{i}$), $i=1,\dots,n$. By construction the four strands of $\widehat D$ that connect to $B_{i}$ are single colored, and we color the $R_{i}$ by the same color. Thus we obtain a diagram of a three colored link denoted $K$. We claim that $K$ has the following properties: \begin{enumerate} \item $K$ is a knot. \item $K$ admits an alternating projection. \item This projection is non-split. \item This projection is strongly prime. \end{enumerate} We prove each claim in order: \begin{enumerate} \item Since the tangles $\widehat R_{i}$ and $R_{i}$ are equivalent they connect the same points on $\partial B_{i}$ (Notation~\ref{notation:EquivalenceClasses}). By Lemma~\ref{lem:hyperbolicity}~(7), $\widehat T$ is a knot. Hence $K$, which is obtained from $\widehat T$ by replacing $\widehat R_{i}$ by $R_{i}$, is a knot as well. \item By Lemma~\ref{lem:hyperbolicity}~(6), $\widehat D$ is alternating. By the definition of the equivalence classes of rational tangles, $K$ (which is obtained by replacing $\widehat R_{i}$ by $R_{i}$) admits an alternating projection. \item Let $\gamma \subset S^{2}$ be a simple closed curve disjoint from the diagram for $K$. If $\gamma$ is diagrammatically isotopic (that is, an isotopy through curves that are transverse to the diagram at all times) to a curve that is disjoint from $\cup_{i} B_{i}$ then by Lemma~\ref{lem:hyperbolicity}~(2) $\gamma$ bounds a disk disjoint from $\widehat D$; this disk is also disjoint from the diagram of $K$. If $\gamma$ is diagrammatically isotopic into $B_{i}$, then $\gamma$ bounds a disk disjoint from the diagram for $K$ since rational tangles are prime. Finally, if $\gamma$ is not isotopic into or out of $\cup_{i} B_{i}$, we violate Condition~(4b) of Lemma~\ref{lem:hyperbolicity}. Hence the diagram for $K$ is non-split. \item This is very similar to~(3) and is left to the reader. \end{enumerate} By Menasco and Thurston (see Corollary~\ref{cor:hyperbolic}), $K$ is hyperbolic. Next we note that the 3-coloring of $K$ defines a $3$-fold cover of $S^{3}$; by construction, the cover of $S^{3,*}$ is $X^{*}$. The cover of each rational tangle is disconnected and consists of a solid torus attached to $T_{i} \subset \partial X$ with slope $p_{i}/q_{i}$, and a ball attached to a component of $\partial X^{*} \setminus \partial X$. Thus we obtain $X(p_{1}/q_{1},\dots,p_{n}/q_{n})$ as a simple $3$-fold cover of $S^{3}$ branched over $K$. We now isotope each rational tangle $R_{i}$ to realize its depth, that is, realizing the twist number of each rational tangle (recall Subsection~\ref{subsec:twist}). The twist number of $R_{i}$ is exactly $\depth[p_{i}/q_{i}]$. The tangle $T$ (which is the projection of $K$ outside $\cup_{i} B_{i}$) has a fixed number of twist regions, say $t$. Hence the total number of twist regions is $t + \sum_{i=1}^{n} \depth[p_{i}/q_{i}] = t + \depth[\alpha]$ (where $\alpha=(\alpha_{1},\dots,\alpha_{n})$ denotes the multislope on $\partial X$, as in Section~\ref{sec:intro}). This gives an upper bound for the twist number for $K$: $$t(K)\leq t + \depth[\alpha].$$ Lackenby~\cite{lackenby} (recall Subsection~\ref{subsec:twist}) showed that there exists a constant $c$ so that: $$\vol[S^{3} \setminus K] \leq c t(K).$$ Hence we get: \begin{eqnarray*} \kvsd[3 X(\alpha_{1},\dots,\alpha_{n}]) &\leq & 3 \vol[S^{3} \setminus K] \\ &\leq& 3c t(K) \\ &\leq& 3c t + 3c(\depth[\alpha]). \\ \end{eqnarray*} By setting $A = 3c$ and $B = 3ct$, we obtain constants fulfilling the requirements of Theorem~\ref{thm:dehn} that are valid for any multislope $\alpha' = (\alpha_{1}',\dots,\alpha_{n}')$, with $\alpha_{i}'$ in the same equivalence class as $\alpha_{i}$. As there are only finitely many (specifically, $6^{n}$) equivalence classes, taking the maximal constants $A$ and $B$ for these classes completes the proof of the theorem. \end{document}
\begin{document} \setcounter{page}{1} \title[Spaceability of the set of bounded linear non--absolutely summing operators]{Spaceability of the set of bounded linear non-absolutely summing operators in Quasi-Banach sequence spaces} \author[Daniel Tomaz]{Daniel Tomaz} \address{Department of Mathematics, Federal University of Para\'{i}ba, 58.051-900 - Jo\~{a}o Pessoa, Brazil} \email{\textcolor[rgb]{0.00,0.00,0.84}{[email protected]}} \keywords{Lineability, spaceability, absolutely summing operators, quasi-Banach spaces} \thanks{Daniel Tomaz is supported by Capes} \subjclass[2010]{Primary 46A16; Secondary 46A45.} \begin{abstract} In the short note we prove that for every $0<p<1$, there exists an infinite dimensional closed linear subspace of $\mathcal{L}\left( \ell_{p};\ell_{p}\right) $ every nonzero element of which is non $(r,s)$-absolutely summing operator for the real numbers $r,s$ with $1\leq s\leq r<\infty$. This improve a result obtained in \cite{DanielT}. \\ \end{abstract} \maketitle \section{Introduction} In the last decade many authors have been searching for large linear structures of mathematical objects enjoying certain special properties. These notions of lineability/spaceability has been investigated in several contexts, for instance, Functional Analysis, Measure Theory, Probability Theory, Set Theory, etc. \\If $E$ is a vector space, a subset $A$ of $E$ is said to be \emph{lineable} if $A\cup\left\{ 0\right\} $ contains a infinite dimensional linear subspace of $E$. Moreover, if $E$ is a topological vector space, a subset $A$ is said \emph{spaceable} if $A\cup\left\{ 0\right\} $ contains a closed infinite dimensional linear subspace of $E$. If $\alpha$ is a cardinal number, a subset $A$ of $E$ is called $\alpha$-\emph{lineable} (\emph{spaceable}) if $A\cup\left\{ 0\right\} $ contains a (closed) $\alpha$-dimensional linear subspace of $E$. \\These definitions were introduced by Aron, Gurariy and Seoane-Sep\'{u}lveda in the classical references \cite{Aron} and \cite{Quarta}, considered as the founding pillars of the theory of lineability. See also, for instance, the recent papers \cite{Bernal,Pellegrino1,Pellegrino2,BCFP_LAA,cariellojfa,VMS}. We refer also the recent monograph \cite{book}, where many examples can be found and techniques are developed in several different frameworks. \subsection{Notation} Let us now fix some notation. Let $E,F$ be Banach or quasi-Banach spaces over the scalar field $\mathbb{K}$, which can be either $\mathbb{R}$ or $\mathbb{C}$. The space of absolutely $(r,s)$-summing linear operators from $E$ to $F$ will be represented by $\prod\nolimits_{(r,s)}\left( E;F\right) $ and the space of bounded linear operators from $E$ to $F$ will be denoted by $\mathcal{L}\left(E;F\right) $. \\Recall that an linear operator $T:E\rightarrow F$ is absolutely $\left( r,s\right) $-summing if $\sum_{k}\left\Vert T\left( x_{k}\right) \right\Vert ^{r}<\infty$ whenever $\left( x_{k}\right) _{k=1}^{\infty}$ is a sequence in $E$ such that $\sum_{k}\left\vert f\left( x_{k}\right) \right\vert ^{s}<\infty$ for each $f\in E^{\prime}$, where $E^{\prime}$ denote the topological dual of $E$. \\The basics of the linear theory of absolutely summing operators can be found in the classical book \cite{Diestel}. If $E$ is a Banach or quasi-Banach space, we denote by \\ $ \ell_{p}^{w}\left( E\right) =\left\{ \left( x_{j}\right) _{j=1}^{\infty }\in E^{\mathbb{N}};\text{ } {\textstyle\sum\limits_{j=1}^{\infty}} \left\vert \phi\left( x_{j}\right) \right\vert ^{p}<\infty,\text{ } \forall\phi\in E^{\prime}\right\}$ the space of weakly $p$-summable $E$-valued sequences and by $ \ell_{p}\left( E\right) =\left\{ \left( x_{j}\right) _{j=1}^{\infty}\in E^{\mathbb{N}};\text{ } {\textstyle\sum\limits_{j=1}^{\infty}} \left\Vert x_{j}\right\Vert ^{p}<\infty\right\}$ the space of absolutely $p$-summable $E$-valued sequences. We will denote by $\mathfrak{c}$ the cardinality of the continuum. If $0<p<1$, the sequences spaces $\ell_{p}$ are quasi-Banach spaces ($p$-Banach space) with quasi-norms given by \[ \left\Vert x\right\Vert_{\ell_{p}}=\left( \sum\limits_{k=1}^{\infty}\left\vert x_{k}\right\vert ^{p}\right) ^{\frac{1}{p}}. \] \\The behavior of quasi-Banach spaces or, more generally, metrizable complete topological vector spaces, called $F$-spaces is sometimes quite different from the behavior of Banach spaces. Besides, the search for closed infinite dimensional subspaces of quasi-Banach spaces is a quite delicate issue . Thus, it seems interesting to look for lineability and spaceability techniques that also cover the case of quasi-Banach spaces. For more details on quasi-Banach spaces we refer to \cite{book3}. The aim of this paper is to prove the spaceability of the set of bounded linear non-absolutely summing operators in quasi-Banach sequence spaces. To be more precise, let us to prove that $\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) $ is $\mathfrak{c}$-spaceable for every $0<p<1$, improving a result that was proved in \cite{DanielT}. \section{preliminaries} In this section, we will consider some common tools in the related results to the lineability/spaceability. Let us split $\mathbb{N}$ into countably many infinite pairwise disjoint subsets $\left( \mathbb{N}_{k}\right) _{k=1}^{\infty}$. For each integer $k\in\mathbb{N},$ write \[ \mathbb{N}_{k}=\left\{ n_{1}^{\left( k\right) }<n_{2}^{\left( k\right) }<\cdots\right\} . \] Define \[ \ell_{p}^{\left( k\right) }:=\left\{ x\in\ell_{p}:\text{ }x_{j}=0\text{ if }j\notin\mathbb{N}_{k}\right\} . \] On the other hand, since $\mathbb{N=}\left\{ n_{m}^{\left( j\right) }:j,m\in\mathbb{N}\right\} ,$ consider the sequence of linear operators \[ i^{\left( k\right) }:\ell_{p}\longrightarrow\ell_{p}^{\left( k\right) } \] given by \[ \left( i^{\left( k\right) }\left( x\right) \right) _{n_{m}^{\left( j\right) }}=\left\{ \begin{array} [c]{c} x_{m},\text{ if }j=k,\\ 0,\text{ if }j\neq k \end{array} \right. \] for all $x=\left( x_{m}\right) _{m=1}^{\infty}\in\ell_{p}$. Note that \[ \left\Vert i^{\left( k\right) }\left( x\right) \right\Vert _{\ell _{p}^{\left( k\right) }}=\left\Vert i\left( x\right) \right\Vert _{\ell_{p}} \] where $i:\ell_{p}\longrightarrow\ell_{p}$ is the identity map. Now, for each $k\in\mathbb{N}$, consider the sequence $\left( u_{k}\right) _{k=1}^{\infty }$ in $\mathcal{L}\left( \ell_{p};\ell_{p}\right) $ defined of the form \[ u_{k}:\ell_{p}\overset{i^{\left( k\right) }}{\longrightarrow}\ell _{p}^{\left( k\right) }\overset{j_{k}}{\longrightarrow}\ell_{p}, \] with $j_{k}:\ell_{p}^{\left( k\right) }\longrightarrow\ell_{p}$ is the inclusion operator. Moreover, notice that \begin{equation} \left\Vert i^{\left( k\right) }\left( x\right) \right\Vert _{\ell _{p}^{\left( k\right) }}=\left\Vert i\left( x\right) \right\Vert \label{rr} _{\ell_{p}}=\left\Vert u_{k}\left( x\right) \right\Vert _{\ell_{p}} \end{equation} for all $k.$ \begin{theorem} \label{t1} (\cite[Theorem 4]{Maddox}) Let $0<p<1$ and $1\leq s\leq r<\infty$. Then the identity map $i:\ell_{p}\longrightarrow\ell_{p}$ is non $\left( r,s\right) $-absolutely summing. \end{theorem} \begin{remark} It is straightforward consequence of \eqref{rr} and the previous theorem that for each $k\in\mathbb{N}$, the operator $u_{k}:\ell_{p}\longrightarrow\ell_{p}$ is non $\left( r,s\right) $-absolutely summing regardless of the real numbers $r,s$, with $1\leq s\leq r<\infty$. \end{remark} \section{The main result} \begin{theorem} $\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) $ is $\mathfrak{c}$-spaceable for every $0<p<1$. \end{theorem} \begin{proof} In fact, by Theorem \ref{t1} it follows that $\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) $ is non-empty. So, consider the operator $T:\ell_{p}\longrightarrow\mathcal{L}\left( \ell_{p};\ell_{p}\right) $ defined by \[ T\left( \left( a_{i}\right) _{i=1}^{\infty}\right) =\sum\limits_{i=1} ^{\infty}a_{i}u_{i}, \] with $u_{i}$ defined in the preliminaries. It follows from \cite[Lemma 2.1]{DanielT} that $T$ is \ well-defined, linear and injective. Moreover, using \cite[Theorem 3.1]{DanielT} we know that \[ T\left( \ell_{p}\diagdown\left\{ 0\right\} \right) \subset\mathcal{L} \left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) . \] Therefore, $\overline{T\left( \ell_{p}\right) }$ is a closed infinite-dimensional subspace of $\mathcal{L}\left( \ell_{p};\ell_{p}\right) $. We just have to show that \[ \overline{T\left( \ell_{p}\right) }\diagdown\left\{ 0\right\} \subset\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup \limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) . \] Indeed, let $\Psi\in\overline{T\left( \ell_{p}\right) }\diagdown\left\{ 0\right\} $. Then, there are sequences $\left( a_{i}^{\left( k\right) }\right) _{i=1}^{\infty}\in\ell_{p}\diagdown\left\{ 0\right\} $ $\left( k\in\mathbb{N}\right) $ such that \begin{equation} \Psi=\lim_{k\rightarrow\infty}T\left( \left( a_{i}^{\left( k\right) }\right) _{i=1}^{\infty}\right) \text{in}\ \mathcal{L}\left( \label{ee} \ell_{p};\ell_{p}\right). \end{equation} Note that, for each $k\in\mathbb{N}$, \[ T\left( \left( a_{i}^{\left( k\right) }\right) _{i=1}^{\infty}\right) =\sum\limits_{i=1}^{\infty}a_{i}^{\left( k\right) }u_{i}. \] Then, from \eqref{ee} we have \[ \Psi=\lim_{k\rightarrow\infty}\sum\limits_{i=1}^{\infty}a_{i}^{\left( k\right) }u_{i}=\sum\limits_{i=1}^{\infty}\lim_{k\rightarrow\infty} a_{i}^{\left( k\right) }u_{i}. \] In particular, for $x\in\ell_{p}$ arbitrary we get \[ \Psi(x)=\lim_{k\rightarrow\infty}\sum\limits_{i=1}^{\infty}a_{i}^{\left( k\right) }u_{i}(x)=\sum\limits_{i=1}^{\infty}\lim_{k\rightarrow\infty} a_{i}^{\left( k\right) }u_{i}(x). \] Since convergence in $\ell_{p}$ implies coordinatewise convergence, it follows that \begin{equation} \lim_{k\rightarrow\infty}a_{i}^{\left( k\right) }=\alpha_{i}\ \text{for all}\ i. \label{tt} \end{equation} On the other hand, since each operator $u_{i}\in\mathcal{L}\left( \ell _{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) $ (it suffices to use \eqref{rr}), for each $i\in\mathbb{N}$, there exist a sequence $\left( x^{\left( j\right) }\right) _{j=1}^{\infty}\in\ell _{s}^{w}\left( \ell_{p}\right) $ such that $\left( u_{i}(x^{\left( j\right) }\right) ) _{j=1}^{\infty}\notin\ell_{r}\left( \ell_{p}\right) $, that is, \begin{equation} \sum\limits_{j=1}^{\infty}\left\vert \varphi\left( x^{\left( j\right) }\right) \right\vert ^{s}<\infty\text{ and }\sum\limits_{j=1}^{\infty \label{e1} }\left\Vert u_{i}\left( x^{\left( j\right) }\right) \right\Vert _{\ell _{p}}^{r}=\infty\text{ }, \end{equation} for each $\varphi\in\left( \ell_{p}\right) ^{\prime}=\ell_{\infty}$ because $0<p<1$ (see \cite[Theorem 2.3]{book1}). So, using \eqref{tt} we get \begin{align*} \sum\limits_{j=1}^{\infty}\left\Vert \Psi\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r} & =\sum\limits_{j=1}^{\infty}\left\Vert \lim_{k\rightarrow\infty}\sum\limits_{i=1}^{\infty}a_{i}^{\left( k\right) }u_{i}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}\\ & =\sum\limits_{j=1}^{\infty}\left\Vert \sum\limits_{i=1}^{\infty} \lim_{k\rightarrow\infty}a_{i}^{\left( k\right) }u_{i}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}\\ & =\sum\limits_{j=1}^{\infty}\left\Vert \sum\limits_{i=1}^{\infty}\alpha _{i}.u_{i}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}. \end{align*} Since $\left( \alpha_{i}\right) _{i}\neq0$ (it follows from the use of the $p$-norm in \eqref{tt}), let $i_{0}$ be such that $\alpha_{i_{0}}\neq0$. Since the supports of the operators $u_{i}$ are pairwise disjoint for all $i$, from \eqref{e1} we have \begin{align*} \sum\limits_{j=1}^{\infty}\left\Vert \Psi\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r} & =\sum\limits_{j=1}^{\infty}\left\Vert \sum\limits_{i=1}^{\infty}\alpha_{i}.u_{i}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}\\ & \geq\sum\limits_{j=1}^{\infty}\left\Vert \alpha_{i_{0}}.u_{i_{0}}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}\\ & =\left\vert \alpha_{i_{0}}\right\vert ^{r}.\sum\limits_{j=1}^{\infty}\left\Vert u_{i_{0}}\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}=\infty \end{align*} and thus \[ \sum\limits_{j=1}^{\infty}\left\Vert \Psi\left( x^{\left( j\right) }\right) \right\Vert _{\ell_{p}}^{r}=\infty. \] We conclude that $\Psi\in\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup\limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) $. Hence, \[ \overline{T\left( \ell_{p}\right) }\diagdown\left\{ 0\right\} \subset\mathcal{L}\left( \ell_{p};\ell_{p}\right) \diagdown\mathbf igcup \limits_{1\leq s\leq r<\infty} {\textstyle\prod\nolimits_{\left( r,s\right) }} \left( \ell_{p};\ell_{p}\right) \] finishing the proof. \end{proof} \mathbf ibliographystyle{amsplain} \end{document}
\begin{document} \title{{Brownian half-plane excursion and critical Liouville quantum gravity}} \date{} \author{ \begin{tabular}{c}Juhan Aru\\[-5pt]\small EPFL \operatornameeratorname{e}nd{tabular}\; \begin{tabular}{c}Nina Holden\\[-5pt]\small ETH Z\"urich\operatornameeratorname{e}nd{tabular}\; \begin{tabular}{c}Ellen Powell\\[-5pt]\small Durham University \operatornameeratorname{e}nd{tabular} \begin{tabular}{c}Xin Sun\\[-5pt]\small University of Pennsylvania \operatornameeratorname{e}nd{tabular} } \setcounter{tocdepth}{2} \maketitle \begin{abstract} In a groundbreaking work, Duplantier, Miller and Sheffield showed that subcritical Liouville quantum gravity (LQG) coupled with Schramm-Loewner evolutions (SLE) can be obtained by gluing together a pair of Brownian motions. In this paper, we study the counterpart of their result in the critical case via a limiting argument. In particular, we prove that as one sends $\kappappa' \downarrow 4$ in the subcritical setting, the space-filling SLE$_{\kappappa'}$ in a disk degenerates to the CLE$_4$ exploration introduced by Werner and Wu, along with a collection of i.i.d.\ coin tosses indexed by the branch points of the exploration. Furthermore, in the same limit, we observe that although the pair of initial Brownian motions collapses to a single one, one can still extract two different independent Brownian motions $(A,B)$ from this pair, such that the Brownian motion $A$ encodes the LQG distance from the CLE loops to the boundary of the disk and the Brownian motion $B$ encodes the boundary lengths of the CLE$_4$ loops. In contrast to the subcritical setting, the pair $(A,B)$ does not determine the CLE-decorated LQG surface. Our paper also contains a discussion of relationships to random planar maps, the conformally invariant CLE$_4$ metric, and growth fragmentations. \operatornameeratorname{e}nd{abstract} \tableofcontents \section{Introduction} The most classical object of random planar geometry is probably the two-dimensional Brownian motion together with its variants. Over the past 20 years, a plenitude of other interesting random geometric objects have been discovered and studied. Among those we find Liouville quantum gravity (LQG) surfaces \cite{DS11} and conformal loop ensembles (CLE) \cite{SW12,Sh09}. LQG surfaces aim to describe the fields appearing in the study of 2D Liouville quantum gravity and can be viewed as canonical models for random surfaces. They can be mathematically defined in terms of volume forms \cite{DS11,RV14,Kah85} (used in this paper), but recently also in terms of random metrics \cite{GM19,DDDF20}. CLE is a random collection of loops that correspond conjecturally to interfaces of the $q$-state Potts model and the FK random cluster model in the continuum limit (see e.g.\ \cite{CLEPERC}). In this paper we study a coupling of LQG measures, CLE and Brownian motions, taking a form of the kind first discovered in \cite{DMS14}. On the one hand we consider a ``uniform'' exploration of a conformal loop ensemble, $\mathbb{C}LE_4$, drawn on top of an independent LQG surface known as the critical LQG disk. On the other hand, we take a seemingly simpler object: the Brownian half plane excursion. In this coupling one component of the Brownian excursion encodes the branching structure of the CLE$_4$ exploration, together with a certain (LQG surface dependent) distance of CLE$_4$ loops from the boundary. The other component of the Brownian excursion encodes the LQG boundary lengths of the discovered CLE$_4$ loops. {Our result can be viewed as the critical (${\kappa'}=4$) analog of Duplantier-Miller-Sheffield’s mating of trees theorem for ${\kappa'} > 4$, \cite{DMS14}. The original mating of trees theorem first observes that the quantum boundary length process defined by a space-filling SLE$_{\kappa'}$ curve drawn on a subcritical LQG surface is given by a certain correlated planar Brownian motion. Moreover, it says that one can take the two components of this planar Brownian motion, glue each one to itself (under its graph) to obtain two continuum random trees, and then mate these trees along their branches to obtain both the LQG surface and the space-filling SLE curve wiggling between the trees in a measurable way. This theorem has had far-reaching consequences and applications, for example to the study of random planar maps and their limits \cite{HS19,GM-RW,GM-conv}, SLE and CLE \cite{GHM-KPZ, MSWfrag,ahs-int,AS-CLE}, and LQG itself \cite{MSTBMI,ARS}. See the survey \cite{GHS19} for further applications. Obtaining a critical analog of the mating of trees theorem was one of the main aims of this paper. The problem one faces is that the above-described picture degenerates in many ways as ${\kappa'}\downarrow 4$ (e.g.\ the correlation of the Brownian motions tends to one and the Liouville quantum gravity measure converges to the zero measure). However, it is known that the LQG measure can be renormalized in a way that gives meaningful limits \cite{APS18two}, and the starting point of the current project was the observation that the pair of Brownian motions can be renormalized via an affine transformation to give something meaningful as well.} Still, not all the information passes nicely to the limit, and in particular extra randomness appears. Therefore, our limiting coupling is somewhat different in nature to that of \cite{DMS14} (or \cite{AG19} for the finite volume case of quantum disks). Most notably, one of the key results of \cite{DMS14,AG19} is that the CLE decorated LQG determines the Brownian motions, and vice versa. In our case neither statement holds in the same way; see Section \operatorname{Re}f{sec:meas} for more details. For example, to define the Brownian excursion from the branching CLE$_4$ exploration, one needs a binary variable at every branching event to decide on an ordering of the branches. {We believe that in addition to completing the critical version of Duplantier-Miller-Sheffield's mating of trees theorem, the results of this paper are intriguing in their own right. Moreover, as explained below, this article opens the road for several interesting questions in the realm of SLE theory, about LQG-related random metrics, in the setting of random planar maps decorated with statistical physics models, and about links to growth-fragmentation processes.} \subsection{Contributions} Since quite some set up is required to describe our results for $\kappappa=4$ precisely, we postpone the detailed statement to Theorem \operatorname{Re}f{thm_main}. Let us state here a caricature version of the final statement. Some of the objects appearing in the statement will also be precisely defined only later, yet should be relatively clear from their names. \begin{theorem} Let: \begin{itemize} \item $\mathfrak{lqg}$ be the field of a critical quantum disk together with associated critical LQG measures \corr{(see Section \operatorname{Re}f{sec:LQG})}; \item $\mathfrak{cle}$ denote the uniform space-filling $\SLE_4$ in the unit disk \corr{parametrized by critical LQG mass}, which is defined in terms of a uniform $\mathbb{C}LE_4$ exploration plus a collection of independent coin tosses \corr{(see Section \operatorname{Re}f{sec:ucle4})}; \item and $\mathfrak{be}$ describe a Brownian (right) half plane excursion $(A, B)$ \corr{(see Section \operatorname{Re}f{sec:Bfs})}. \operatornameeratorname{e}nd{itemize} Then one can couple $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ such that $\mathfrak{cle}$ and $\mathfrak{lqg}$ are independent, $A$ encodes a certain quantum distance for $\mathbb{C}LE_4$ loops from the boundary, and $B$ encodes the quantum boundary lengths of the $\mathbb{C}LE$ loops. Moreover $(\mathfrak{cle}, \mathfrak{lqg})$ determines $\mathfrak{be}$, but the opposite does not hold. \label{thm:caricature} \operatornameeratorname{e}nd{theorem} \begin{figure}[h] \centering \includegraphics[width=\textwidth]{exp_1} \caption{A simplistic sketch of the correspondence in Theorem \operatorname{Re}f{thm:caricature}. \textbf{On the left:} all the outermost $\mathbb{C}LE_4$ loops discovered by the space filling SLE$_4$ before the dashed loop surrounding $z$ is discovered, together with all of the second-level nested CLE$_4$ loops discovered before the dotted loop surrounding $z$ is discovered. \textbf{On the right:} the corresponding half-planar Brownian excursion, with the coordinate axes switched for ease of viewing. The sub-excursion marked by the dashed (resp. dotted) line - i.e., the portion of Brownian path starting and ending at the endpoints of this line - corresponds to the exploration within the dashed (resp. dotted) loop. The lengths of these lines are the Liouville quantum gravity lengths of the corresponding loops, and the duration of the sub-excursions are their Liouville quantum gravity areas. The time that $z$ is visited is marked by a dot, and the time that the dotted loop is discovered is marked by a cross. When the dotted loop is discovered, a coin is tossed to determine which of the two disconnected yet-to-be explored domains is visited first by the space filling SLE$_4$; in this example, the component containing $z$ is visited second. See also Figure \operatorname{Re}f{fig:correspondence_2} below.} \label{fig:correspondence} \operatornameeratorname{e}nd{figure} \begin{figure}[h] \centering \includegraphics[width=\textwidth]{explored} \caption{An illustration of the subset of the unit disk, shaded gray, that has been explored by the space-filling SLE$_4$ at two different times. \textbf{On the left:} at the time that the second level CLE$_4$ loop surrounding $z$ is discovered (marked by a cross on the right of Figure \operatorname{Re}f{fig:correspondence}). \textbf{On the right:} at the time that $z$ is reached (marked by a dot on the right of Figure \operatorname{Re}f{fig:correspondence}). Note that, although this is not apparent from the sketch, the explored subset of the unit disk at any given time is actually a connected set.} \label{fig:correspondence_2} \operatornameeratorname{e}nd{figure} In terms of limit results, we for example prove the following: \begin{itemize} \item We show that a $\SLE_{\kappa'}({\kappa'}-6)$ in the disk converges to the uniform CLE$_4$ exploration introduced by Werner and Wu, \cite{WW13}, as ${\kappa'}\downarrow 4$ (Proposition \operatorname{Re}f{prop:convfullbranch}). Here an extra level of randomness appears in the limit, in the sense that new CLE$_4$ loops in the exploration are always added at a uniformly chosen point on the boundary, in contrast to the ${\kappa'}>4$ case where the loops are traced by a continuous curve. \item Using a limiting argument, we also show in Section \operatorname{Re}f{sec:conv_order} how to make sense of a ``uniform'' space-filling SLE$_4$ exploration, albeit no longer defined by a continuous curve. Again extra randomness appears in the limit: contrary to the ${\kappa'} > 4$ case, the nested uniform CLE$_4$ exploration does not uniquely determine this space-filling SLE$_4$. \item Perhaps less surprisingly but nonetheless not without obstacles, we show that the nested CLE$_{\kappa'}$ in the unit disk converges to the nested CLE$_4$ with respect to Hausdorff distance (Proposition \operatorname{Re}f{prop:cleloopconv}). We also show that after dividing the associated quantum gravity measures by $(4-2\gamma)$, a $\gamma$-Liouville quantum gravity disk converges to a critical Liouville quantum gravity disk. \operatornameeratorname{e}nd{itemize} In terms of connections and open directions, let us very briefly mention a few examples and refer to Section \operatorname{Re}f{secGF} for more detail. \begin{itemize} \item First, as stated above in Theorem \operatorname{Re}f{thm:caricature}, $(\mathfrak{cle}, \mathfrak{lqg})$ determines $\mathfrak{be}$, but the opposite does not hold. A natural question is whether there is another natural mating-of-trees type theorem for $\kappappa=4$ where one has measurability \corr{in} both directions. \item Second, our coupling sheds light on recent work of A\"id\'ekon and Da Silva \cite{ADS}, who identify a (signed) growth fragmentation embedded naturally in the Brownian half plane excursion. The cells in this growth fragmentation correspond to very natural observables in our exploration. \item Third, as we have already mentioned, one of the coordinates in our Brownian excursion encodes a certain LQG distance of CLE$_4$ loops from the boundary. It is reasonable to conjecture that this distance should be related to the CLE$_4$ distance defined in \cite{WW13} via a Lamperti transform.\footnote{We thank N.\ Curien for explaining this relation to us.} \item Fourth, several interesting questions can be asked in terms of convergence of discrete models. Critical FK-decorated planar maps and stable maps are two immediate candidates. \operatornameeratorname{e}nd{itemize} \subsection{Outline} The rest of the article is structured as follows. In Section 2, after reviewing background material on branching SLE and CLE, we will prove the convergence of the $\SLE_{\kappa'}({\kappa'}-6)$ exploration in the disk to the uniform CLE$_4$ exploration, and also show the convergence of the nested CLE with respect to Hausdorff distance. In Section 3, we use the limiting procedure to give sense to a notion of space-filling SLE$_4$. In Section 4, we review the basics of Liouville quantum gravity surfaces and of the mating of trees story, and prove convergence of the Brownian motion functionals appearing in \cite{DMS14,AG19} after appropriate normalization. We also finalize a certain proof of Section 3, which is interestingly (seemingly) much easier to prove in the mating of trees context. Finally, in Section 5 we conclude the proof of joint convergence of Brownian motions, space-filling SLE and LQG. This allows us to state and conclude the proof of our main theorem. We finish the paper with a small discussion on connections, and an outlook on several interesting open questions. Throughout, $\gamma\in(\sqrt{2},2]$ is related to parameters $\kappappa,\kappappa',\operatornameeratorname{e}ps$ by \begin{equation} \label{eq:parameters} \kappappa=\gamma^2,\quad \kappappa'=16/\kappappa,\quad\operatornameeratorname{e}ps=2-\gamma. \operatornameeratorname{e}nd{equation} \section{Convergence of branching SLE$_{\kappa'}$ and CLE$_{\kappa'}$ as ${\kappa'}\downarrow 4$} \label{sec:conv_clesle} \subsection{Background on branching SLE and conformal loop ensembles} \label{sec:bg} \subsubsection{Spaces of domains} Let $\mathcal{D}$ be the space of $\mathrm{D}=\{\mathrm{D}_t\, ; \, t\ge 0\}$ such that: \begin{itemize} \item for every $t\ge 0$, $0\in {\mathrm{D}}_t\subset \mathcal{D}$ and ${\mathrm{D}}_t$ is simply connected planar domain; \item $\mathrm{D}_t\subset \mathrm{D}_s$ for all $0\le s < t<\infty;$ \item for every $t\ge 0$, if $f_t=f_t[{\mathrm{D}}]$ is the unique conformal map from $\mathbb{D}$ to ${\mathrm{D}}_t$ that sends $0$ to $0$ and has $f_t'(0)>0$, then $f_t'(0)=\mathbb{C}R(0;\mathrm{D}_t)=e^{-t}$. \operatornameeratorname{e}nd{itemize} We also write $g_t=g_t[{\mathrm{D}}]$ for the inverse of $f_t$. Recall that a sequence of simply connected domains $(U^n)_{n\ge 0}$ containing $0$ are said to converge to a simply connected domain $U$ in the Carath\'{e}odory topology (viewed from $0$) if we have $f_{U^n}\to f_U$ uniformly in $r\mathbb{D}$ for any $r<1$, where $f_{U^{n}}$ (respectively $f_U$) are the unique conformal maps from $\mathbb{D}$ to $U^{n}$ (respectively $U$) sending $0$ to $0$ and with positive real derivative at $0$. Carath\'{e}odory convergence viewed from $z\ne 0$ is defined in the analogous way. We equip $\mathcal{D}$ with the natural extension of this topology: that is, we say that a sequence $({\mathrm{D}}^{n})_{n\ge 0}$ in $\mathcal{D}$ converges to ${\mathrm{D}}$ in $\mathcal{D}$ if for any $r<1$ and $T\in [0,\infty)$ \begin{equation}\label{eq:cartconvdef} \sup_{t\in [0,T]}\sup_{z\in r\mathbb{D}}|f^{n}_t(z)-f_t(z)|\to 0\operatornameeratorname{e}nd{equation} as $n\to \infty$, where $f_t^{n}=f_t[\mathrm{D}^{n}]$ and $f_t=f_t[\mathrm{D}]$. With this topology, $\mathcal{D}$ is a metrizable and separable space: see for example \cite[Section 6.1]{QLE}. \subsubsection{Radial Loewner chains} In order to introduce radial SLE, we first need to recall the definition of a (measure-driven) radial Loewner chain. Such chains are closely related to the space $\mathcal{D}$, as we will soon see. If $\lambda$ is a measure on $[0,\infty) \times \partial \mathbb{D}$ whose marginal on $[0,\infty)$ is Lebesgue measure, we define the radial Loewner equation driven by $\lambda$ via \begin{equation}\label{eq:loew_int} g_t(z)=\int_{[0,t]\times \partial \mathbb{D}} g_s(z)\frac{u+g_s(z)}{u-g_s(z)} \, d\lambda(s,u) ; \quad \quad g_0(z)=z \operatornameeratorname{e}nd{equation} for $z\in \mathbb{D}$ and $t\ge 0$. It is known (see for example \cite[Proposition 6.1]{QLE}) that for any such $\lambda$, \operatornameeratorname{e}qref{eq:loew_int} has a unique solution $g_t(z)$ for each $z\in \mathbb{D}$, defined until time $t_z:=\sup\{t\ge 0: g_t(z)\in \mathbb{D}\}$. Moreover, if one defines $\mathrm{D}_t:=\{z\in \mathbb{D}: t_z<t\}$, then $\mathrm{D}=\{\mathrm{D}_t\, , \, t\ge 0\}$ is an element of $\mathcal{D}$, and $g_t$ from \operatornameeratorname{e}qref{eq:loew_int} is equal to $g_t[\mathrm{D}]=(f_t[\mathrm{D}])^{-1}$ for each $t$. We call $\mathrm{D}$ the radial Loewner chain driven by $\lambda$. Note that if one restricts to measure of the form $\lambda(A,dt)=\delta_{W(t)}(A) \, dt$ with $W:[0,\infty)\to \partial \mathbb{D}$ piecewise continuous, this defines the more classical notion of a radial Loewner chain. In this case we can rewrite the radial Loewner equation as \begin{equation} \label{eqn:rad_loewner} \partial_t g_t(z)= g_t(z) \corr{\frac{W_t+g_t(z)}{W_t-g_t(z)}}; \;\; z\in \mathbb{D},\, t\le t_z:=\inf\{s: g_s(z)=W_s\} \operatornameeratorname{e}nd{equation} and we refer to the corresponding Loewner chain as the radial Loewner evolution with driving function $W$. In fact, this is the case that we will be interested in when defining radial $\SLE_{\kappa'}({\kappa'}-6)$ for ${\kappa'}>4$. \begin{remark}\label{rmk:dconv_cconv}Let us further remark that if $(\lambda^n)$ are a sequence of driving measures as above, such that $\lambda^n$ converges weakly \corr{(i.e.\, with respect to the weak topology on measures)} to some $\lambda$ on $[0,T]\times \partial \mathbb{D}$ for every $T$, then the corresponding Loewner chains $(\mathrm{D}^n),\mathrm{D}$ are such that $\mathrm{D}^n\to \mathrm{D}$ in $\mathcal{D}$ (\cite[Proposition 6.1]{QLE}). \corr{In particular, one can check that if $\lambda^n(A,dt)=\delta_{W^n(t)}(A) \, dt$ and $\lambda(A,dt)=\delta_{W(t)}(A)\, dt$ for some piecewise continuous functions $W^n:[0,\infty)\to \partial \mathbb{D}$, and $W:[0,\infty)\to \partial \mathbb{D}$ then the corresponding Loewner chains converge in $\mathcal{D}$ if for any $T>0$ fixed and $F:[0,T]\times \partial \mathbb{D}\to \mathbb{R}$ bounded and continuous, we have \begin{equation}\label{eq:lambdanlambda}\lambda^n(F)=\int_0^T\int_{\partial \mathbb{D}} F(u,t) \delta_{W^n(t)}(u)dt = \int_0^T F(W^n(t),t) \, dt \to \lambda(F)=\int_0^T F(W(t),t) \, dt\operatornameeratorname{e}nd{equation} as $n\to \infty$.} \operatornameeratorname{e}nd{remark} \begin{remark}\label{rmk:stopped_loewner} In what follows we will sometimes need to consider evolving domains $\{\mathrm{D}_t\, ; \, t\in [0,S]\}$ that satisfy the conditions to be an element of $\mathcal{D}$ up to some finite time $S$. In this case we may extend the definition of $\mathrm{D}_t$ for $t\ge S$ by setting $\mathrm{D}_t=f_S(e^{-(t-S)}\mathbb{D})$, where $f_S:\mathbb{D}\to \mathrm{D}_S$ is the unique conformal map sending $0\to 0$ and with $f_S'(0)=e^{-S}$.With this extension, $\mathrm{D}=\{\mathrm{D}_t \, ; \, t\ge 0\}$ defines an element of $\mathcal{D}$. If we have a sequence of such objects, then we say that they converge to a limiting object in $\mathcal{D}$ if and only if these extensions converge. We will use this terminology without further comment in the rest of the article. \operatornameeratorname{e}nd{remark} \subsubsection{Radial $\SLE_{\kappa'}({\kappa'}-6)$} \label{sec:slek} Let ${\kappa'}\in (4,8)$, and recall the relationship \operatornameeratorname{e}qref{eq:parameters} between ${\kappa'}\in (4,8)$ and $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$. Although the use of $\operatornameeratorname{e}ps$ is somewhat redundant at this point, we do so to avoid redefining certain notations later on. Let $B$ be a standard Brownian motion, and let $\theta^\operatornameeratorname{e}ps_0=\{(\theta_0^\operatornameeratorname{e}ps)_t\, ; \, t>0\}$ be the unique $B$-measurable process taking values in $[0,2\pi]$, with $(\theta^\operatornameeratorname{e}ps_0)_0=x\in [0,2\pi]$, that is instantaneously reflecting at $\{0,2\pi\}$, and that solves the SDE \begin{equation}\label{eq:sde_theta} d(\theta^\operatornameeratorname{e}ps_0)_t = \sqrt{{\kappa'}}dB_t+ \frac{{\kappa'}-4}{2}\cot\left(\frac{(\theta^\operatornameeratorname{e}ps_0)_t}{2}\right) \, dt\operatornameeratorname{e}nd{equation} on time intervals for which $(\theta^\operatornameeratorname{e}ps_0)_t\ne \{0,2\pi\}$. The existence and pathwise uniqueness of this process is shown in \cite[Proposition 3.15 \& Proposition 4.2]{Sh09}. It follows from the strong Markov property of Brownian motion that $\theta^\operatornameeratorname{e}ps_0$ has the strong Markov property. We let $\tau^\operatornameeratorname{e}ps_0$ be the first hitting time of $2\pi$ by $\theta_0^\operatornameeratorname{e}ps$. Associated to $\theta^\operatornameeratorname{e}ps_0$, we can define a process $W^\operatornameeratorname{e}ps_0$, taking values on $\partial \mathbb{D}$, by setting \begin{equation}\label{def:Wfromtheta} (W_0^\operatornameeratorname{e}ps)_t = \operatornameeratorname{e}xp\left(\operatornameeratorname{i}\, ((\theta_0^\operatornameeratorname{e}ps)_t- \int_0^t \cot\left((\theta_0^\operatornameeratorname{e}ps)_s/2\right) \, ds)\right) \quad t\ge 0.\operatornameeratorname{e}nd{equation} This indeed gives rise to a continuous function $W_0^\operatornameeratorname{e}ps$ in time (see e.g.\ \cite{Sh09,MSW14}) and using this as the driving function in the radial Loewner equation \operatornameeratorname{e}qref{eqn:rad_loewner} defines a radial $\SLE_{\kappa'}({\kappa'}-6)$ in $\mathbb{D}$ from $1$ to $0$, with a force point at $e^{-ix}$ (recall that $(\theta_0^\operatornameeratorname{e}ps)_0=x$). We denote this by $({\mathbf{D}}^\operatornameeratorname{e}ps_0)=\{({\mathbf{D}}^\operatornameeratorname{e}ps_0)_t\, ; \, t\ge 0\}$ which is an element of $\mathcal{D}$. In fact, there almost surely exists a continuous non self-intersecting curve $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_0:[0,\infty)\to \mathbb{D}$ such that $({\mathbf{D}}^\operatornameeratorname{e}ps_0)_t$ is the connected component of $\mathbb{D}\setminus \operatornameeratorname{e}ta^\operatornameeratorname{e}ps_0[0,t]$ containing $0$ for all $t$ \cite{RS05,MSIG1}. Usually we will start with $x=0$, and then we say that the force point is at $1^-$: everything in the above discussion remains true in this case, see \cite{Sh09}. In this setting we refer to ${\mathbf{D}}^\operatornameeratorname{e}ps_0$ and/or $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_0$ (interchangeably) as simply a radial $\SLE_{{\kappa'}}({\kappa'}-6)$ targeted at $0$. The time $\tau^\operatornameeratorname{e}ps_0$ corresponds to the first time that $0$ is surrounded by a counterclockwise loop: see Figure \operatorname{Re}f{fig:tau}. To begin, we will just consider the SLE stopped at this time. We write $${\mathrm{D}}^\operatornameeratorname{e}ps_0=\{({\mathrm{D}}^\operatornameeratorname{e}ps_0)_t\, ; \, t\ge 0\}:=\{({\mathbf{D}}^\operatornameeratorname{e}ps_0)_{\tau^\operatornameeratorname{e}ps \wedge t}\, ; \, {t\ge 0}\}$$ for the corresponding element of $\mathcal{D}$ (see Remark \operatorname{Re}f{rmk:stopped_loewner}). \begin{figure}[h] \centering \includegraphics[width=\textwidth]{excursions} \caption{From left to right, the process $\theta^\operatornameeratorname{e}ps_0$ does the following at the illustrated time: hits 0, hits 0, hits neither 0 nor $2\pi$, hits $2\pi$. The rightmost image is, therefore, an illustration of the time $\tau_0^\operatornameeratorname{e}ps$.}\label{fig:tau} \operatornameeratorname{e}nd{figure} \subsubsection{An approximation to radial SLE$_{{\kappa'}}({\kappa'}-6)$} We will make use of the following approximations $({\mathrm{D}}^{\operatornameeratorname{e}ps,n}_0)_{n\in \mathbb{N}}$ to ${\mathrm{D}}^\operatornameeratorname{e}ps_0$ in $\mathcal{D}$ (in order to show convergence to {the} CLE$_4$ {exploration}). Fixing $\operatornameeratorname{e}ps$, and taking the processes $\theta_0^\operatornameeratorname{e}ps$ and $W_0^\operatornameeratorname{e}ps$ as above, the idea is to remove intervals of time where $\theta_0^\operatornameeratorname{e}ps$ is making tiny excursions away from $0$, and then define $\mathrm{D}^{\operatornameeratorname{e}ps,n}_0$ to be the radial Loewner chain whose driving function is equal to $W_0^\operatornameeratorname{e}ps$, but with these times cut out. More precisely, we set $T_0^{\operatornameeratorname{e}ps,n}:=0;$ and \corr{inductively define} \begin{align*} {R}_1^{\operatornameeratorname{e}ps,n} & =\inf \{t\ge T_0^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm} (\theta^\operatornameeratorname{e}ps_0)_t\ge 2^{-n}\}; \\ S_1^{\operatornameeratorname{e}ps,n} & =\sup\{t\le R_1^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm} (\theta^\operatornameeratorname{e}ps_0)_t=0\}; \\ T_1^{\operatornameeratorname{e}ps,n} & =\inf\{t\ge R_1^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm} (\theta^\operatornameeratorname{e}ps_0)_t=0\}; \\ R_2^{\operatornameeratorname{e}ps,n} & =\inf \{t\ge T_1^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm}(\theta^\operatornameeratorname{e}ps_0)_t\ge 2^{-n}\} ; \\ S_2^{\operatornameeratorname{e}ps,n} & =\sup\{t\le R_2^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm} (\theta^\operatornameeratorname{e}ps_0)_t=0\} ; \\ T_2^{\operatornameeratorname{e}ps,n} & =\inf\{t\ge R_2^{\operatornameeratorname{e}ps,n}\hspace{-0.1cm}:\hspace{-0.1cm} (\theta^\operatornameeratorname{e}ps_0)_t=0\}; \operatornameeratorname{e}nd{align*} etc.\ so the intervals $[S_i^{\operatornameeratorname{e}ps,n},T_i^{\operatornameeratorname{e}ps,n}]$ for $i\ge 1$ are precisely the intervals on which $\theta^\operatornameeratorname{e}ps_0$ is making an excursion away from $0$ whose maximum height exceeds $2^{-n}$. Call the $i$th one of these excursions $e_i^{\operatornameeratorname{e}ps,n}$. Also set $ \Lambda^{\operatornameeratorname{e}ps,n}:= \sup\{j: S_j^{\operatornameeratorname{e}ps,n}\le \tau^\operatornameeratorname{e}ps_0\}$ and \begin{equation*}l_i^{\operatornameeratorname{e}ps,n}:= T_i^{\operatornameeratorname{e}ps,n}-S_i^{\operatornameeratorname{e}ps,n} \text{ for } i<\Lambda^{\operatornameeratorname{e}ps,n} \text{ ; } l_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n}=\tau^\operatornameeratorname{e}ps_0-\corr{S}_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n} \text{ ; } L_i^{\operatornameeratorname{e}ps,n}=\sum\nolimits_{1\le j \le i} l_j^{\operatornameeratorname{e}ps,n} \text{ for } 1\le i \le \Lambda^{\operatornameeratorname{e}ps,n}.\operatornameeratorname{e}nd{equation*} Now we define $$(W_0^{\operatornameeratorname{e}ps,n})_t=(W_0^\operatornameeratorname{e}ps)_{S_i^{\operatornameeratorname{e}ps,n}+(t-L_{i-1}^{\operatornameeratorname{e}ps,n})}, \text{ for } t\in [L_{i-1}^{\operatornameeratorname{e}ps,n},L_i^{\operatornameeratorname{e}ps,n})\text{ and } 1\le i \le \Lambda^{\operatornameeratorname{e}ps,n},$$ and set $\mathrm{D}_0^{\operatornameeratorname{e}ps,n}$ to be the radial Loewner chain with driving function $W_0^{\operatornameeratorname{e}ps,n}$. This is defined up to time $\tau_0^{\operatornameeratorname{e}ps,n}:=L_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n}$. We will show in Section \operatorname{Re}f{sec:conv_bt} that ${\mathrm{D}}^{\operatornameeratorname{e}ps,n}_0 \to {\mathrm{D}}^\operatornameeratorname{e}ps_0$ in $\mathcal{D}$ as $n\to \infty$ (see Lemma \operatorname{Re}f{lem:ngoodapprox}). \subsubsection{Uniform $\mathbb{C}LE_4$ exploration {targeted at the origin}}\label{sec:ucle4} Now suppose that we replace ${\kappa'}$ with $4$, so that the solution $\theta_0$ of \operatornameeratorname{e}qref{eq:sde_theta} is simply a (speed $4$) Brownian motion reflected at $\{0,2\pi\}$. Then the integral in \operatornameeratorname{e}qref{def:Wfromtheta} does not converge, but it is finite for any single excursion of $\theta_0$.\footnote{That is, if $\lambda$ is the Brownian excursion measure then the integral is finite for $\lambda$-almost all excursions, see \cite[Section 2]{WW13}).} For any $n\in \mathbb{N}$ if we define $\tau^n_0$, $\Lambda^{n}$ and $(S_i^{n},T_i^{n},l_i^{n},L_i^{n})_{i\ge 1}$ as in the sections above, we can therefore define a process ${\mathrm{D}}^{n}_0$ in $\mathcal{D}$ via the following procedure: \begin{itemize} \item sample random variables $(X_i^{n})_{i\ge 1}$ uniformly and independently on $\partial \mathbb{D}$; \item define $(W_0^n)_t$ for $t\in [0,\tau_0^n)$ by setting \begin{equation}\label{def:excu4} (W_0^n)_t= X_i^{n}\operatornameeratorname{e}xp\left(\operatornameeratorname{i} ((\theta_0)_{t+S_i^{n}} - \int_{S_i^{n}}^{t+S_i^{n}} \cot((\theta_0)_s/2) \,ds)\right) \operatornameeratorname{e}nd{equation} for $t\in [L_{i-1}^n,L_i)$ and $ 1\le i\le \Lambda^n$; \item let $\mathrm{D}^n$ be the radial Loewner chain with driving function $W_0^n$. \operatornameeratorname{e}nd{itemize} With these definitions we have that ${\mathrm{D}}^{n}_0\mathbb{R}ightarrow {\mathrm{D}}_0$ in $\mathcal{D}$ as $n\to \infty$, where the limit process is the \operatornameeratorname{e}mph{uniform CLE$_4$ exploration} introduced in \cite{WW13}, and run until the outermost CLE$_4$ loop surrounding $0$ is discovered. More precisely, the uniform CLE$_4$ exploration towards $0$ in $\mathbb{D}$ can be defined as follows. One starts with a Poisson point process $\{(\gamma_j, t_j)\, ; \, j\in J\}$ with intensity given by $M$ times Lebesgue measure, where $M$ is the SLE$_4$ bubble measure rooted uniformly over the unit circle: see \cite[Section 2.3.2]{SWWDCD}. In particular, for each $j$, $\gamma_j$ is a simple continuous loop rooted at some point in $\partial \mathbb{D}$. We define $\mathrm{int}(\gamma_j)$ to be the connected component of $\mathbb{D}\setminus \gamma_j$ that intersects $\partial \mathbb{D}$ only at the root, and set $\tau=\inf \{t: t=t_j \text{ with } 0 \in \mathrm{int}(\gamma_j)\}$ so that for all $t_j<\tau$, $\mathrm{int}(\gamma_j)$ does not contain the origin. Therefore, for each such $j$ we can associate a unique conformal map $f_j$ from $\mathbb{D}$ to the connected component of $\mathbb{D}\setminus \gamma_j$ containing $0$ to $\mathbb{D}$, such that $f_j(0)=0$ and $f_j'(0)>0$. For any $t\le \tau$ it is then possible to define (for example by considering only loops with some minimum size and then letting this size tend to $0$, see again \cite{WW13,SWWDCD}) $f_t$ to be the composition $\circ_{t_j< t} f_{t_j}$, where the composition is done in reverse chronological order of the $t_j$s. The process \begin{equation}\label{pppcle}\{\mathrm{D}'_t \, ; \, t\le \tau\}:=\{f_t(\mathbb{D}) \, ; \, t\le \tau\}\operatornameeratorname{e}nd{equation} is then a process of simply connected subdomains of $\mathbb{D}$ containing $0$, which is decreasing in the sense that $\mathrm{D}'_t\subseteq \mathrm{D}'_s$ for all $0\le s\le t \le \tau$. This is the description of the uniform $\mathbb{C}LE_4$ exploration towards $0$ most commonly found in the literature. Note that with this definition, time is parameterized according to the underlying Poisson point process, and entire loops are ``discovered instantaneously''. Since we are considering processes in $\mathcal{D}$, we need to reparameterize $\mathrm{D}'$ by $-\log \mathbb{C}R$ seen from the origin. By definition, for each $j\in J$, $\gamma_j$ is a simple loop rooted at a point in $\partial \mathbb{D}$ that does not surround $0$. If we declare the loop to be traversed counterclockwise, we can view it as a curve $c_j:[0,f_j'(0)]\to \mathbb{D}$ parameterized so that $\mathbb{C}R(0;\mathbb{D}\setminus c_j)=e^{-t}$ for all $t$ (the choice of direction means that $\mathrm{int}(\gamma_j)$ is surrounded by the left-hand side of $c_j$). We then define $\mathrm{D}$ to be the unique process in $\mathcal{D}$ such that for each $j\in J$ with $t_j\le \tau$, and all $t\in [-\log f_{t_j}'(0),- \log f_{t_j}'(0)-\log f_j'(0)]$, $\mathrm{D}_t$ is the connected component of $f_{t_j}(\mathbb{D}\setminus c_j[0,t-\log f_{t_j}'(0)])$ containing $0$. In other words, $\mathrm{D}$ is a reparameterization of $\mathrm{D}'$ by $-\log \mathbb{C}R$ seen from $0$, where instead of loops being discovered instantaneously, they are traced continuously in a counterclockwise direction. The process is defined until time $\tau_0:=-\log \mathbb{C}R(0;f_{\tau}(D\setminus \gamma_{\tau} ))$, at which point the origin is surrounded by a loop (the law of this loop is that of the outermost loop surrounding the origin in a nested CLE$_4$ in $\mathbb{D}$). With this definition, the same argument as in \cite[Section 4]{WW13} shows that ${\mathrm{D}}^{n}_0\mathbb{R}ightarrow {\mathrm{D}}_0$ in $\mathcal{D}$ as $n\to \infty$. Moreover, this convergence in law holds jointly with the convergence $\tau_0^n\mathbb{R}ightarrow \tau_0$ (in particular, $\tau_0$ has the law of the first time that a reflected Brownian motion started from $0$ hits $\pi$, as was already observed in \cite{SSW09}). The $\mathbb{C}LE_4$ exploration can be continued after this first loop exploration time $\tau_0$ by iteration. More precisely, given the process up to time $\tau_0$, one next samples an independent $\mathbb{C}LE_4$ exploration in the interior of the discovered loop containing $0$, but now with loops traced clockwise instead of counterclockwise. When the next level loop containing $0$ is discovered, the procedure is repeated, but going back to counterclockwise tracing. Continuing in this way, we define the whole uniform CLE$_4$ exploration targeted at $0$: ${\mathbf{D}}_0=\{({\mathbf{D}}_0)_t \, ; \, t\ge 0\}$. Note that by definition $\mathrm{D}_0$ is then just the process $\mathbf{D}_0$, stopped at time $\tau_0$. \begin{remark} \label{rmk:slek_markov} The ``clockwise/counterclockwise'' switching defined above is consistent with what happens in the the $\SLE_{\kappa'}({\kappa'}-6)$ picture when ${\kappa'}>4$. Indeed, it follows from the Markov property of $\theta_0^\operatornameeratorname{e}ps$ (in the ${\kappa'}>4$ case) that after time $\tau_0^\operatornameeratorname{e}ps$, the evolution of $\theta$ until it next hits $0$ is independent of the past and equal in law to $(2\pi-\theta_0^\operatornameeratorname{e}ps(t))_{t\in [0,\tau_0^\operatornameeratorname{e}ps]}$. This implies that the future of the curve after time $\tau_0^\operatornameeratorname{e}ps$ has the law of an $\SLE_{\kappa'}({\kappa'}-6)$ in the connected component of the remaining domain containing $0$, but now with force point starting infinitesimally counterclockwise from the tip, until $0$ is surrounded by a clockwise loop. This procedure alternates, just as in the ${\kappa'}=4$ case. \operatornameeratorname{e}nd{remark} \subsubsection{{Exploration of the (nested) CLE}}\label{sec:sletocle} In the previous subsections, we have seen how to construct $\SLE_{\kappa'}({\kappa'}-6)$ processes, denoted by ${\mathbf{D}}^\operatornameeratorname{e}ps_0$ ($\operatornameeratorname{e}ps=\operatornameeratorname{e}ps(\kappappa')$) from $1$ to $0$ in $\mathbb{D}$, and that these are generated by curves $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$. We have also seen how to construct a uniform $\mathbb{C}LE_4$ exploration, ${\mathbf{D}}_0$, targeted at $0$ in $\mathbb{D}$. The $0$ in the subscripts here is to indicate that $0$ is a special \operatornameeratorname{e}mph{target point}. But we can also define the law of an $\SLE_{\kappa'}({\kappa'}-6)$, or a $\mathbb{C}LE_4$ exploration process, targeted at any point $z$ in the unit disk. To do this we simply take the law of $\phi(\mathbf{D}^\operatornameeratorname{e}ps_0)$ or $\phi(\mathbf{D}_0)$, where $\phi:\mathbb{D}\to \mathbb{D}$ is the unique conformal map sending $0$ to $z$ and $1$ to $1$. We will denote these processes by $({\mathbf{D}}^\operatornameeratorname{e}ps_z),{\mathbf{D}}_z$, where the $({\mathbf{D}}^\operatornameeratorname{e}ps_z)$ are also clearly generated by curves $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ for $\operatornameeratorname{e}ps>0$. By definition, the time parameterization for $\mathbf{D}_z^\operatornameeratorname{e}ps$ is such that $-\log \mathbb{C}R(z; (\mathbf{D}_z^\operatornameeratorname{e}ps)_t)=t$ for all $t, z, \operatornameeratorname{e}ps$ (similarly for $\mathbf{D}_z$). In fact, both $\SLE_{\kappa'}({\kappa'}-6)$ and the uniform $\mathbb{C}LE_4$ exploration satisfy a special \operatornameeratorname{e}mph{target invariance} property: see for example \cite{SW05} {for $\SLE_{\kappa'}({\kappa'}-6)$} and \cite[Lemma 8]{WW13} for CLE$_4$. This means that they can be targeted at a countable dense set of point in $\mathbb{D}$ simultaneously, in such a way that for any distinct $z,w\in \mathbb{D}$, the processes targeted at $z$ and $w$ agree (modulo time reparameterization) until the first time that $z$ and $w$ lie in different connected components of the yet-to-be-explored domain. We will choose our dense set of points to be $\mathcal{Q}:=\mathbb{Q}^2\cap \mathbb{D}$, and for $\operatornameeratorname{e}ps>0$ refer to the coupled process $({\mathbf{D}}^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$ (or $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$) as the \operatornameeratorname{e}mph{branching $\SLE_{\kappa'}$} in $\mathbb{D}$. Similarly we refer to the coupled process $({\mathbf{D}}_z)_{z\in \mathcal{Q}}$ as the \operatornameeratorname{e}mph{branching $\mathbb{C}LE_4$ exploration} in $\mathbb{D}$. Note that in this setting we can associate a process $\theta_z^\operatornameeratorname{e}ps$ to each $z \in \mathcal{Q}$: we consider the image of $\mathbf{D}_z^\operatornameeratorname{e}ps$ under the unique conformal map from $\mathbb{D}\to\mathbb{D}$ sending $z\mapsto 0$ and $1\mapsto1$, and define $\theta_z^\operatornameeratorname{e}ps$ to be the unique process such that this new radial Loewner chain is related to $\theta_z^\operatornameeratorname{e}ps$ via equations \operatornameeratorname{e}qref{def:Wfromtheta} and \operatornameeratorname{e}qref{eqn:rad_loewner}. Note that $\theta_z^\operatornameeratorname{e}ps$ has the same law as $\theta_0^\operatornameeratorname{e}ps$ for each fixed $z$ (by definition), but the above procedure produces a coupling of $\{\theta_z^\operatornameeratorname{e}ps\, ; \, z\in \mathcal{Q}\}$. {We will make use of the following property connecting chordal and radial SLE (that is closely related to target invariance).} \begin{lemma}[Theorem 3, \cite{SW05}] \label{lem:radial_chordal} Consider the radial $\SLE_{\kappappa'}(\kappappa'-6)$ with force point at $e^{-\operatornameeratorname{i} x}$ for $x\in (0,2\pi)$, stopped at the first time that $\operatornameeratorname{e}^{-\operatornameeratorname{i} x}$ and $0$ are separated. Then its law coincides (up to a time change) with that of a chordal SLE$_{\kappa'}$ from $1$ to $\operatornameeratorname{e}^{\operatornameeratorname{i} x}$ in $\mathbb{D}$, stopped at the equivalent time. \operatornameeratorname{e}nd{lemma} We remark that from $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$, we can a.s.\ define a curve $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_a$ for any fixed $a\in \overline{\mathbb{D}}$, by taking the a.s.\ limit (with respect to the supremum norm on compacts of time) of the curves $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{a_k}$, where $a_k\in \mathcal{Q}$ is a sequence tending to $a$ as $k\to \infty$. This curve has the law of an $\SLE_{\kappappa'}({\kappa'}-6)$ from $1$ to $a$ in $\mathbb{D}$ \cite[Section 2.1]{MSW14}. Let us caution at this point that such a limiting construction does not work simultaneously for all $a$. Indeed, there are a.s.\ certain exceptional points $a$, the set of which a.s.\ has Lebesgue measure zero, for which the limit of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{a_k}$ does not exist for some sequence $a_k\rightarrow a$. See Figure \operatorname{Re}f{fig:loopdef}. Let us now explain how, for each ${\kappa'}\in(4,8)$, we can use the branching $\SLE_{\kappa'}$ to define a (nested) $\mathbb{C}LE_{\kappa'}$. The \operatornameeratorname{e}mph{conformal loop ensemble} $\mathbb{C}LE_{\kappappa'}$ in $\mathbb{D}$ is a collection of non-crossing (nested) loops in the disk, \cite{SW12}, whose law is invariant under M\"{o}bius transforms $\mathbb{D}\to \mathbb{D}$. The ensemble can therefore be defined in any simply connected domain by conformal invariance, and the resulting family of laws is conjectured (in some special cases proved, e.g.\ \cite{CN08,Smi10,BH19,GMS19,KS19}) to be a universal scaling limit for collections of interfaces in critical statistical physics models. \begin{figure} \centering \includegraphics[width=.9\textwidth]{loopdef} \caption{\textbf{On the left:} the curve $\operatornameeratorname{e}ta_0^\operatornameeratorname{e}ps$ (in blue) is run up to time $\tau_{0,0}^\operatornameeratorname{e}ps$ (the last time that $\theta_0^\operatornameeratorname{e}ps$ hits $0$ before hitting $2\pi$). The point $\operatornameeratorname{e}ta_0^\operatornameeratorname{e}ps(\tau_{0,0}^{\operatornameeratorname{e}ps})$ is defined to be $o_0^\operatornameeratorname{e}ps$ and we have that $\operatornameeratorname{e}ta_0^\operatornameeratorname{e}ps([0,\tau_{0,0}^{\operatornameeratorname{e}ps}])=\operatornameeratorname{e}ta_{o_0^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps([0,\widetilde{\tau}_0^\operatornameeratorname{e}ps])$ for some time $\widetilde{\tau}_0^\operatornameeratorname{e}ps$. \textbf{On the right:} the outermost CLE$_{\kappa'}$ loop $\mathcal{L}_0^\operatornameeratorname{e}ps$ containing $0$ (marked in red) is defined to be $\operatornameeratorname{e}ta_{o_0^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps([\widetilde{\tau}_0^\operatornameeratorname{e}ps,\infty])$. Note that we have a choice about how to define $\operatornameeratorname{e}ta^{\operatornameeratorname{e}ps}_{o^\operatornameeratorname{e}ps_0}$: if we take it to be a limit of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{a_k}$ where $a_k\to o_0^\operatornameeratorname{e}ps$ along the dotted line, this will be different to if $a_k\to o_0^\operatornameeratorname{e}ps$ along the dashed line. We choose the definition that makes ${o^\operatornameeratorname{e}ps_0}$ into a double point for $\operatornameeratorname{e}ta^{\operatornameeratorname{e}ps}_{o^\operatornameeratorname{e}ps_0}$.} \label{fig:loopdef} \operatornameeratorname{e}nd{figure} For $z\in \mathcal{Q}$, the procedure to define $\mathcal{L}^\operatornameeratorname{e}ps_{z}$, the outermost $\mathbb{C}LE_{\kappa'}$ loop containing $z$, goes as follows: \begin{itemize} \item Let $\tau^\operatornameeratorname{e}ps_{z}$ be the first time that $\theta^\operatornameeratorname{e}ps_z$ hits $2\pi$, and let $\tau^\operatornameeratorname{e}ps_{0,z}$ be the last time before this that $\theta^\operatornameeratorname{e}ps_z$ is equal to $0$.\label{def:tauz} \item Let $o^\operatornameeratorname{e}ps_z=\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps(\tau^\operatornameeratorname{e}ps_{0,z})$. In fact, the point $o^\operatornameeratorname{e}ps_z$ is one of the exceptional points for which the limit of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{a_k}$ does not exist for all sequences $a_k\rightarrow o^\operatornameeratorname{e}ps_z$, so it is not immediately clear how to define $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o_z^\operatornameeratorname{e}ps}$, see Figure \operatorname{Re}f{fig:loopdef}. However, the limit \operatornameeratorname{e}mph{is} well defined if we insist that the sequence $a_k\to o_z^\operatornameeratorname{e}ps$ is such that $0$ and $a_k$ are separated by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ at time $\tau_z^\operatornameeratorname{e}ps$ for each $k$. \item Define $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o_z^\operatornameeratorname{e}ps}$ to be the limit of the curves $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{a_k}$ as $k\to \infty$. In particular the condition on the sequence $a_k$ means that $o_z^\operatornameeratorname{e}ps$ is a.s.\ a double point of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o_z^\operatornameeratorname{e}ps}$. With this definition of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o_z^\operatornameeratorname{e}ps}$, it follows that \[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z([0,\tau^\operatornameeratorname{e}ps_{0,z}])=\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o^\operatornameeratorname{e}ps_z}([0,\widetilde{\tau}^{\operatornameeratorname{e}ps}_z]) \text{ a.s.\ for some } \widetilde{\tau}^{\operatornameeratorname{e}ps}_z\ge 0.\] \item Set $\mathcal{L}^\operatornameeratorname{e}ps_z:=\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{o^\operatornameeratorname{e}ps_z}([\widetilde{\tau}^{\operatornameeratorname{e}ps}_z,\infty))$. \operatornameeratorname{e}nd{itemize} We write $\mathcal{B}^\operatornameeratorname{e}ps_z$ for the connected component of $\mathbb{D}\setminus \mathcal{L}^\operatornameeratorname{e}ps_z$ containing $z$: note that this is equal to $({\mathbf{D}}^\operatornameeratorname{e}ps_z)_{\tau^\operatornameeratorname{e}ps_z}$. We will call this the (outermost) $\mathbb{C}LE_{{\kappa'}}$ \operatornameeratorname{e}mph{interior bubble} containing $z$. We define the sequence of nested $\mathbb{C}LE_{\kappa'}$ loops $(\mathcal{L}^\operatornameeratorname{e}ps_{z,i})$ for $i\ge 1$ by iteration (so $\mathcal{L}^\operatornameeratorname{e}ps_z=:\mathcal{L}^\operatornameeratorname{e}ps_{z,1}$), and denote the corresponding sequence of nested domains (interior bubbles) containing $z$ by $(\mathcal{B}^\operatornameeratorname{e}ps_{z,i})_{i\ge 1}$. More precisely, the $i$th loop is defined inside $\mathcal{B}^\operatornameeratorname{e}ps_{z,i-1}$ in the same way that the first loop is defined inside $\mathbb{D}$, after mapping $\mathcal{B}^\operatornameeratorname{e}ps_{z,i-1}$ conformally to $\mathbb{D}$ and considering the curve $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z([\tau_z^\operatornameeratorname{e}ps,\infty))$ rather than $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$. The uniform $\mathbb{C}LE_4$ exploration defines a nested $\mathbb{C}LE_4$ in a similar but less complicated manner: see \cite{WW13}. For any $z\in \mathcal{Q}$, to define $\mathcal{L}_z$ (the outermost $\mathbb{C}LE_4$ loop containing $z$) we consider the Loewner chain ${\mathrm{D}}_z$ and define the times $\tau_z$ and $\tau_{0,z}$ (according to $\theta_z$) as in the ${\kappa'}>4$ case. Then between times $\tau_{0,z}$ and $\tau_z$ the Loewner chain ${\mathrm{D}}_z$ is tracing a simple loop - starting and ending at a point $o_z$. This loop is what we define to be $\mathcal{L}_z$. We define $\mathcal{B}_z$ to be the interior of $\mathcal{L}_z$: note that this is also equal to $({\mathbf{D}}_z)_{\tau_z}.$ Finally, we define the nested collection of $\mathbb{C}LE_4$ loops containing $z$ and their interiors by iteration, denoting these by $(\mathcal{B}_{z,i},\mathcal{L}_{z,i})_{i\ge 1}$ (so $\mathcal{B}_{z,1}:=\mathcal{B}_z$ and $\mathcal{L}_{z,1}:=\mathcal{L}_z$). \subsubsection{Space-filling SLE} \label{sec:sf_sle} Now, for $\kappappa'\in (4,8)$ we can also use the branching SLE$_{\kappa'}$, $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$, to define a space-filling curve $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ known as space-filling SLE$_{\kappa'}$. This was first introduced in \cite{MSIG4,DMS14}\corr{; see also \cite[Appendix A.3]{BG20} for the precise definition of the space-filling \operatornameeratorname{e}mph{loop} that we will use.} {The presentation here closely follows \cite{GHS19}.} In our definition, the branches of $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$ are all $\SLE_{\kappa'}({\kappa'}-6)$ processes started from the point $1$, and with force points initially located infinitesimally clockwise from $1$. This means that the associated space-filling SLE$_{\kappa'}$ will be a so-called \operatornameeratorname{e}mph{counterclockwise space-filling $\SLE_{\kappa'}$ loop} from $1$ to $1$ in $\mathbb{D}$.\footnote{Variants of this process, e.g.\ chordal/whole-plane versions, a clockwise version, and version with another starting point, can be defined by modifying the definition of the branching SLE, see e.g.\ \cite{GHS19,AG19}.} Given an instance $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$ of a branching SLE$_{\kappa'}$, to define the associated space-filling SLE$_{\kappa'}$, we start by defining an ordering on the points of $\mathcal{Q}$. For this we use a coloring procedure. First, we color the boundary of $\mathbb{D}$ blue. Then, for each $z\in \mathcal{Q}$, we can consider the branch $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ of the branching SLE$_{\kappa'}$ targeted towards $z$. We color the left hand side of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ red, and the right hand side of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ blue. Whenever $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ disconnects one region of $\mathbb{D}$ from another, we can then label the resulting connected components as \operatornameeratorname{e}mph{monocolored} or \operatornameeratorname{e}mph{bicolored}, depending on whether the boundaries of these components are made up of one or two colors, respectively. \begin{figure}\label{fig:spacefilling} \centering \includegraphics[width=.5\textwidth]{spacefilling.pdf} \caption{Constructing the ordering from the space-filling $\SLE_{\kappa'}$. When $z$ and $w_1$ are separated, the connected component containing $z$ has entirely blue boundary, while the connected component containing $w_1$ has red and blue on its boundary $\mathbb{R}ightarrow$ $z$ comes before $w_1$ in the ordering. By contrast, when $z$ and $w_2$ are separated, $w_2$ is in a monocolored component and $z$ is not{, which implies that} $z$ comes after $w_2$ in the ordering. So $w_1{\operatorname{pre}}c z {\operatorname{pre}}c w_2$ in this example.} \operatornameeratorname{e}nd{figure} For $z$ and $w$ distinct elements of $\mathcal{Q}$, we know (by definition of the branching SLE) that $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ and $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_w$ will agree until the first time that $z$ and $w$ are separated. When this occurs, it is not hard to see that precisely one of $z$ or $w$ will be in a newly created monocolored component. If this is $z$ we declare that $z{\operatorname{pre}}c w$, and otherwise that that $w{\operatorname{pre}}c z$. In this way, we define a consistent ordering ${\operatorname{pre}}c$ on $\mathcal{Q}$. See Figure \operatorname{Re}f{fig:spacefilling}. It was shown in \cite{MSIG4} that there is a unique continuous space-filling curve $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$, parametrized by Lebesgue area, that visits the points of $\mathcal{Q}$ in this order. This is the counterclockwise space-filling SLE$_{\kappa'}$ loop (we will tend to parametrize it differently in what follows, but will discuss this later). We make the following remarks. \begin{itemize}\setlength\itemsep{0em} \item We can think of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ as a version of ordinary $\SLE_{\kappappa'}$ that iteratively fills in bubbles, or disconnected components, as it creates them. The ordering means that it will fill in monocolored components first, and come back to bicolored components only later. \item The word counterclockwise in the definition refers to the fact that the boundary of $\partial \mathbb{D}$ is covered up by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ in a counterclockwise order. \operatornameeratorname{e}nd{itemize} \subsection{Convergence of the SLE$_{\kappa'}({\kappa'}-6)$ branches} \label{sec:conv_bt} In this subsection and the next, we will show that for any $z\in \mathcal{Q}$, we have the joint convergence, in law as ${\kappa'}\downarrow 4$ of: \begin{itemize} \item the $\SLE_{\kappappa'}({\kappa'}-6)$ branch towards $z$ to the $\mathbb{C}LE_4$ exploration branch towards $z$; and \item the nested $\mathbb{C}LE_{\kappappa'}$ loops surrounding $z$ to the nested $\mathbb{C}LE_4$ loops surrounding $z$. \operatornameeratorname{e}nd{itemize} The present subsection is devoted to proving the first statement. Let us assume without loss of generality that our target point $z$ is the origin. We first consider the radial $\SLE_{\kappa'}({\kappa'}-6)$ branch targeting $0$, $\mathrm{D}_0^\operatornameeratorname{e}ps$, up until the first time $\tau_0^\operatornameeratorname{e}ps$ that $0$ is surrounded by a counterclockwise loop. The basic result is as follows. \begin{proposition}\label{prop:sletocleconv} $({\mathrm{D}}_0^\operatornameeratorname{e}ps,\tau_0^\operatornameeratorname{e}ps)\mathbb{R}ightarrow ({\mathrm{D}}_0, \tau_0)$ in $\mathcal{D}\times \mathbb{R}$ as $\operatornameeratorname{e}ps\downarrow 0$. \operatornameeratorname{e}nd{proposition} By Remark \operatorname{Re}f{rmk:slek_markov} and the iterative definition of the $\mathbb{C}LE_4$ exploration towards $0$, the convergence for all time follows immediately from the above: \begin{proposition} \label{prop:convfullbranch} ${\mathbf{D}}^\operatornameeratorname{e}ps_0\mathbb{R}ightarrow {\mathbf{D}}_0$ in $\mathcal{D}$ as $\operatornameeratorname{e}ps\downarrow 0$. \operatornameeratorname{e}nd{proposition} Our proof of Proposition \operatorname{Re}f{prop:sletocleconv} will go through the approximations $\mathrm{D}_0^{\operatornameeratorname{e}ps,n}$ and $\mathrm{D}_0^n$. Namely, we will show that for any \operatornameeratorname{e}mph{fixed} level $n$ of approximation, $\mathrm{D}_0^{\operatornameeratorname{e}ps,n}\to \mathrm{D}_0^n$ as $\operatornameeratorname{e}ps\downarrow 0$, equivalently ${\kappa'}\downarrow 4$. Broadly speaking this holds since the macroscopic excursions of the underlying processes $\theta_0^\operatornameeratorname{e}ps$ converge, and in between these macroscopic excursions we can show that the location of the tip of the curve distributes itself uniformly on the boundary of the unexplored domain. We combine this with the fact that the approximations $\mathrm{D}_0^{\operatornameeratorname{e}ps,n}$ converge to $\mathrm{D}^\operatornameeratorname{e}ps_0$ as $n\to \infty$, \operatornameeratorname{e}mph{uniformly} in $\operatornameeratorname{e}ps$, to obtain the result. The heuristic explanation for the mixing of the curve tip on the boundary is that the force point in the definition of an $\SLE_{\kappa'}({\kappa'}-6)$ causes the curve to ``whizz'' around the boundary more and more quickly as ${\kappa'}\downarrow 4$. This means that in any fixed amount of time (e.g., between macroscopic excursions), it will forget its initial position and become uniformly distributed in the limit. {Making this heuristic rigorous is the main technical step of this subsection, and is achieved in Subsection \operatorname{Re}f{subsec:whiz}.} \subsubsection{Excursion measures converge as ${\kappa'}\downarrow 4$} The first step towards proving Proposition \operatorname{Re}f{prop:sletocleconv} is to describe the sense in which the underlying process $\theta^\operatornameeratorname{e}ps_0$ for the $\SLE_{\kappappa'}(\kappappa'-6)$ branch converges to the process $\theta_0$ for the CLE$_4$ exploration. It is convenient to formulate this in the language of excursion theory; see Lemma \operatorname{Re}f{lem:mn_conv} below. To begin we observe, and record in the following remark, that when $\theta^\operatornameeratorname{e}ps_0$ is very small, it behaves much like a Bessel process of a certain dimension. \begin{remark} \label{rmk:theta_bessel_compare} Suppose that $(\theta_0^\operatornameeratorname{e}ps)_0=0$. By Girsanov's theorem, if the law of $\{(\theta^\operatornameeratorname{e}ps_0)_t \, ; \, t\ge 0\} $ is weighted by the martingale $$\operatornameeratorname{e}xp(Z_t^\operatornameeratorname{e}ps-\frac{\langle Z^\operatornameeratorname{e}ps \rangle_t}{2})\; ; \; Z^\operatornameeratorname{e}ps_t:=\frac{\kappappa'-4}{\sqrt{\kappappa'}} \int_0^t (\frac{1}{(\theta^\operatornameeratorname{e}ps_0)_s}-\frac{1}{2}\cot(\frac{(\theta^\operatornameeratorname{e}ps_0)_s}{2})) \, dB_s ,$$ the resulting law of $\{(\theta^\operatornameeratorname{e}ps_0)_t\, ; \, {t\le \tau^\operatornameeratorname{e}ps_0} \}$ is that of $\sqrt{{\kappa'}}$ times a Bessel process of dimension $\delta({\kappa'})=3-8/{\kappa'}$. \corr{ Note that for $y\in [0,2\pi)$, $(1/y- (1/2)\cot(y/2))$ is positive and increasing, and that for $y\in [0,\pi]$, $y/12\le (1/y-(1/2)\cot(y/2)) \le y/6$, so in particular the integral in the definition of $Z_t^\operatornameeratorname{e}p$ is well-defined.} \operatornameeratorname{e}nd{remark} Now, observe that by the Markov property of $\theta^\operatornameeratorname{e}ps_0$, we can define its associated (infinite) excursion measure on excursions from $0$. {We define $m^\operatornameeratorname{e}ps$ to be the image of this measure under the operation of stopping excursions if and when they reach height $2\pi$.} For $n\ge 0$, we write $m^\operatornameeratorname{e}ps_n$ for $m^\operatornameeratorname{e}ps$ restricted to excursions with maximum height exceeding $2^{-n}$, and normalized to be a probability measure. It then follows from the strong Markov property that the excursions of $\theta_0^\operatornameeratorname{e}ps$ during the intervals $[S_i^{\operatornameeratorname{e}ps,n},T_i^{\operatornameeratorname{e}ps,n}]$ are independent samples from $m_n^\operatornameeratorname{e}ps$, and $\Lambda^{\operatornameeratorname{e}ps,n}$ is the index of the first of these samples that actually reaches height $2\pi$. We also write $m^\operatornameeratorname{e}ps_*$ for the measure $m^\operatornameeratorname{e}ps$ restricted to excursions that reach $2\pi$, again normalized to be a probability measure. Finally, we consider the excursion measure on excursions from $0$ for Brownian motion. We denote the image of this measure, after stopping excursions when they hit $2\pi$, by $m$. Analogously to above, we write $m_n$ for $m$ conditioned on the excursion exceeding height $2^{-n}$. We write $m_\star$ for $m$ conditioned on the excursion reaching height $2\pi$. The measures $m,(m^\operatornameeratorname{e}ps)_\operatornameeratorname{e}ps$ are supported on the excursion space \[ E = \{ e\in C(\mathbb{R}_+,[0,2\pi])\, ; \, e(0)=0, \zeta(e):=\sup\{s>0: e(s)\in (0,2\pi)\}\in (0,\infty)\}\] on which we define the distance \[d_E(e,e')=\sup_{t\ge 0} |e(t)-e'(t)| + |\zeta(e)-\zeta(e')|.\] \begin{lemma}\label{lem:mn_conv} For any $n\ge 0$, $m_n^\operatornameeratorname{e}ps \to m_n$ in law as $\operatornameeratorname{e}ps\to 0$, with respect to $d_E$. The same holds with $(m_\star^\operatornameeratorname{e}ps,m_\star)$ in place of $(m_n^\operatornameeratorname{e}ps, m_n)$. \operatornameeratorname{e}nd{lemma} \begin{proof} For $a>0$, set $E^a = \{ e\in C(\mathbb{R}_+,[0,2\pi-a])\, ; \, e(0)=0, \zeta^a(e):=\sup\{s>0: e(s)\in (0,2\pi-a)\}\in (0,\infty)\}$, and equip it with the metric $d_{E^a}(e,e')=\sup_{t\ge 0} |e(t)-e'(t)| + |\zeta^a(e)-\zeta^a(e')|$. Set $\delta=\delta({\kappa'}(\operatornameeratorname{e}ps))$, recalling the definition $\delta({\kappa'})=8-3/{\kappa'}$. {We first state and prove the analogous result for Bessel processes.} \begin{lemma}\label{bconv} Let $b^\operatornameeratorname{e}ps$ be a sample from the Bessel-$\delta$ excursion measure away from $0$, conditioned on exceeding height $2^{-n}$, and stopped on the subsequent first hitting of $0$ or $2\pi-a$. Let $b$ be a sample from the Brownian excursion measure with the same conditioning and stopping.\footnote{Of course this depends on $a$, but we drop this from the notation for simplicity.} Then for any $a>0$, $b^\operatornameeratorname{e}ps\mathbb{R}ightarrow b$ as $\operatornameeratorname{e}ps\downarrow 0$, in the space $(E^a,d_{E^a})$. \operatornameeratorname{e}nd{lemma} \begin{proofof}{Lemma \operatorname{Re}f{bconv}} \corr{For} any $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$, $b^\operatornameeratorname{e}ps$ can be sampled (see \cite[Section 3]{DMS14}) by: \begin{itemize} \item first sampling $X^\operatornameeratorname{e}ps$ from the probability measure on $[2^{-n},\infty)$ with density proportional to $x^{\delta-3} dx$; \item then running a Bessel-$(4-\delta)$ process from $0$ to $X^\operatornameeratorname{e}ps$; \item stopping this process at $2\pi-a$ if $X^\operatornameeratorname{e}ps\ge 2\pi-a$; or \item placing it back to back with the time reversal of an independent Bessel-$(4-\delta)$ from $0$ to $X^\operatornameeratorname{e}ps$ if $X^\operatornameeratorname{e}ps<2\pi-a$. \operatornameeratorname{e}nd{itemize} Since the time for a Bessel-$(4-\delta)$ to leave $[0,a']$ converges to $0$ as $a'\to 0$ uniformly in $\delta<3/2$, and for any $a'<2^{-n}$, a Bessel-$(4-\delta)$ from $a'$ to $y$ converges in law to a Bessel$-3$ from $a'$ to $y$ as ${\kappa'}\downarrow 4$, uniformly in $y\in [2^{-n},2\pi]$, this shows that $b^\operatornameeratorname{e}ps\mathbb{R}ightarrow b$ in $(E^a,d_{E^a})$.\operatornameeratorname{e}nd{proofof} {Now we continue the proof of Lemma \operatorname{Re}f{lem:mn_conv}.} Recalling the Radon--Nikodym derivative of Remark \operatorname{Re}f{rmk:theta_bessel_compare} (note that ${\kappa'}-4\to 0$ as $\operatornameeratorname{e}ps\downarrow 0$), we conclude that if $e^\operatornameeratorname{e}ps$ and $e$ are sampled from $m_n^\operatornameeratorname{e}ps$ and $m_n$ respectively, and stopped upon hitting $\{0,2\pi-a\}$ for the first time after hitting $2^{-n}$, then $e^\operatornameeratorname{e}ps\to e$ in law as $\operatornameeratorname{e}ps\downarrow 0$, in the space $(E^a,d_{E^a})$. To complete the proof, it therefore suffices to show (now without stopping $e^\operatornameeratorname{e}ps$ or $e$) that \[\zeta(e^\operatornameeratorname{e}ps)-\zeta^a(e^\operatornameeratorname{e}ps)\to 0 \;\;\; \text{ and } \;\;\; \sup_{t\in (\zeta^a(e^\operatornameeratorname{e}ps),\zeta(e^\operatornameeratorname{e}ps))} |e^\operatornameeratorname{e}ps(t)-2\pi|\to 0\] as $a\to 0$, uniformly in $\operatornameeratorname{e}ps$ (small enough). But by symmetry, if $\zeta^a(e^\operatornameeratorname{e}ps)<\zeta(e^\operatornameeratorname{e}ps)$ then $2\pi-e^\operatornameeratorname{e}ps$ from time $\zeta^a(e^\operatornameeratorname{e}ps)$ onwards has the law of $\theta^\operatornameeratorname{e}ps$ started from $a$ and stopped upon hitting $0$ or $2\pi$. As $a\to 0$ the probability that this process remains in $[0,\pi]$ tends to $1$ uniformly in $\operatornameeratorname{e}ps$, and then we can use the same Radon--Nikodym considerations to deduce the result. The final statement of Lemma \operatorname{Re}f{lem:mn_conv} can be justified in exactly the same manner. \operatornameeratorname{e}nd{proof} \subsubsection{Strategy for the proof of Proposition \operatorname{Re}f{prop:sletocleconv}} With Lemma \operatorname{Re}f{lem:mn_conv} in hand the strategy to prove Proposition \operatorname{Re}f{prop:sletocleconv} is to establish the following two lemmas: \begin{lemma}\label{lem:ngoodapprox} Let $F$ be a continuous bounded function on $\mathcal{D}\times [0,\infty)$. Then $\mathbb{E}[F({\mathrm{D}}^{\operatornameeratorname{e}ps,n}_0,\tau^{\operatornameeratorname{e}ps,n}_0)]\to \mathbb{E}[F({\mathrm{D}}^\operatornameeratorname{e}ps_0,\tau^\operatornameeratorname{e}ps_0)]$ as $n\to \infty$, uniformly in ${\kappa'}\in (4,8)$, equivalently $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$. \operatornameeratorname{e}nd{lemma} \begin{proof} Fix $\operatornameeratorname{e}ps$ as above, and let us assume that the processes ${\mathrm{D}}^{\operatornameeratorname{e}ps,n}_0$ as $n$ varies and ${\mathrm{D}}^\operatornameeratorname{e}ps_0$ are coupled together in the natural way: using the same underlying $\theta^\operatornameeratorname{e}ps_0$ and $W^\operatornameeratorname{e}ps_0$. \corr{By Remark \operatorname{Re}f{rmk:dconv_cconv}, in particular \operatornameeratorname{e}qref{eq:lambdanlambda}, it suffices to prove that \begin{equation}\label{eq:tente} \tau^{\operatornameeratorname{e}ps,n}_0\to \tau^{\operatornameeratorname{e}ps}_0 \operatornameeratorname{e}nd{equation} in probability as $n\to \infty$, uniformly in $\operatornameeratorname{e}ps$. In other words, to show that the time spent by $\theta_0^\operatornameeratorname{e}ps$ in excursions of maximum height less than $2^{-n}$ (before first hitting $2\pi$) goes to $0$ uniformly in $\operatornameeratorname{e}ps$ as $n\to \infty$.} \corr{To do this, let us consider the total (i.e., cumulative) duration $C^{\operatornameeratorname{e}ps,n}$ of such excursions of $\theta_0^\operatornameeratorname{e}ps$, before the the first time $\sigma^{\operatornameeratorname{e}ps}$ that $\theta_0^\operatornameeratorname{e}ps$ reaches $\pi$. The reason for restricting to this time interval is to make use of the final observation in Remark \operatorname{Re}f{rmk:theta_bessel_compare}: that the integrand in the definition of $Z^\operatornameeratorname{e}ps$ is deterministically bounded up to time $\sigma^\operatornameeratorname{e}ps$. This will allow us to transfer the question to one about Bessel processes. And, indeed, since the number of times that $\theta_0^\operatornameeratorname{e}ps$ will reach $\pi$ before time $\tau_0^\operatornameeratorname{e}ps$ is a geometric random variable with success probability uniformly bounded away from $0$ (due to Lemma \operatorname{Re}f{lem:mn_conv}), it is enough to show that $C^{\operatornameeratorname{e}ps,n}$ tends to $0$ in probability as $n\to \infty$, uniformly in $\operatornameeratorname{e}ps$.} \corr{For this, we first notice that by Remark \operatorname{Re}f{rmk:theta_bessel_compare}, for any $a,S>0$ we can write \begin{equation*} \mathbb{P}(C^{\operatornameeratorname{e}ps,n}>a)\le \mathbb{P}(\sigma^\operatornameeratorname{e}ps>S)+\mathbb{Q}^\operatornameeratorname{e}ps(\operatornameeratorname{e}xp(-Z_{\sigma^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps+\tfrac{1}{2}\langle Z^\operatornameeratorname{e}ps \rangle_{\sigma^\operatornameeratorname{e}ps}) \mathbbm{1}_{\{C^{\operatornameeratorname{e}ps,n}>a\}}\mathbbm{1}_{\{\sigma^\operatornameeratorname{e}ps\le S\}}) \operatornameeratorname{e}nd{equation*} where $Z^\operatornameeratorname{e}ps$ is as defined in Remark \operatorname{Re}f{rmk:theta_bessel_compare} and under $\mathbb{Q}^\operatornameeratorname{e}ps$, $\theta_0^\operatornameeratorname{e}ps$ has the law of $\sqrt{{\kappa'}}$ times a Bessel process of dimension $\delta({\kappa'})=3-8/{\kappa'}$. Since $\mathbb{P}(\sigma^\operatornameeratorname{e}ps>S)\to 0$ as $S\to \infty$, uniformly in $\operatornameeratorname{e}ps$ (this is proved for example in \cite{SSW09}), it suffices to show that for any fixed $S$, the second term in the above equation tends to $0$ uniformly in $\operatornameeratorname{e}ps$ as $n\to \infty$. To this end, we begin by using Cauchy--Schwarz to obtain the upper bound \begin{equation*} \mathbb{Q}^\operatornameeratorname{e}ps(\operatornameeratorname{e}xp(-Z_{\sigma^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps+\tfrac{1}{2}\langle Z^\operatornameeratorname{e}ps \rangle_{\sigma^\operatornameeratorname{e}ps} \mathbbm{1}_{\{C^{\operatornameeratorname{e}ps,n}>a\}}\mathbbm{1}_{\{\sigma^\operatornameeratorname{e}ps\le S\}})\big)^2\le \mathbb{Q}^\operatornameeratorname{e}ps(\operatornameeratorname{e}xp(-2Z_{\sigma^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps+\langle Z^\operatornameeratorname{e}ps \rangle_{\sigma^\operatornameeratorname{e}ps}) \mathbbm{1}_{\{\sigma^\operatornameeratorname{e}ps\le S\}}) \mathbb{Q}^\operatornameeratorname{e}ps( \mathbbm{1}_{\{C^{\operatornameeratorname{e}ps,n}>a\}}). \operatornameeratorname{e}nd{equation*} Then, because we are on the event that $\sigma^\operatornameeratorname{e}ps\le S$, and the integrand in the definition of $Z^\operatornameeratorname{e}ps$ is deterministically bounded up to time $\sigma^\operatornameeratorname{e}ps$, we have that $\mathbb{Q}^\operatornameeratorname{e}ps(\operatornameeratorname{e}xp(-2Z_{\sigma^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps+\langle Z^\operatornameeratorname{e}ps \rangle_{\sigma^\operatornameeratorname{e}ps}) \mathbbm{1}_{\{\sigma^\operatornameeratorname{e}ps\le S\}}) \le c$ for some constant $c=c(S)$ not depending on $\operatornameeratorname{e}ps$. So it remains to show that the $\mathbb{Q}^\operatornameeratorname{e}ps$ expectation of $C^{\operatornameeratorname{e}ps,n}$, goes to $0$ uniformly in $\operatornameeratorname{e}ps$ as $n\to \infty$.} \corr{Recall that under $\mathbb{Q}^\operatornameeratorname{e}ps$, $\theta_0^\operatornameeratorname{e}ps$ has the law of $\sqrt{{\kappa'}}$ times a Bessel process of dimension $\delta({\kappa'})=3-8/{\kappa'}$. Now, by \cite[Theorem 1]{PitmanYor} we can construct a dimension $\delta({\kappa'})$ Bessel process by concatenating excursions from a Poisson point process $\Lambda$ with intensity $\int_0^{\infty} x^{\delta-3} \nu_\delta^x \, dx$ times Lebesgue measure on $E\times \mathbb{R}$, where $\nu_\delta^x$ is a probability measure on Bessel excursions with maximum height $x$ for each $x>0$. Moreover, by Brownian scaling, $\nu_\delta^x(e)=\nu_\delta^1(e_x)$, $e_x(s)=x^{-1}e(x^{2}s)$ for $0\le s \le \zeta(e_x)=x^{-2}\zeta(e)$. (For proofs of these results, see for example \cite{PitmanYor}). Now, if we let $T=\inf\{t:(e,t)\in \Lambda \text{ and } \sup e(s) \ge \pi\}$, then conditionally on $T$, we can write $C^{{\kappa'}, n}$ as the sum of the excursion lifetimes $\zeta(e)$ over points $(e,t)$ in a (conditionally independent) Poisson point process with intensity $$\int_0^{2^{-n}} x^{\delta-3} \nu_\delta^x \, dx \times \mathrm{Leb}([0,T]).$$ Note that by definition of the Poisson point process, $T$ is an exponential random variable with associated parameter $\int_\pi^\infty x^{\delta-3} \, dx$, and so has uniformly bounded expectation in ${\kappa'}$. Since Brownian scaling also implies that $\nu_\delta^x(\zeta(e)) =x^2\nu_\delta^{1}(\zeta(e_x))$ for excursions $e$, Campbell's formula yields that the expectation of $C^{{\kappa'},n}$ is of order $2^{-n\delta}$. This indeed converges uniformly to $0$ in $\delta\ge 1$ (equivalently ${\kappa'},\operatornameeratorname{e}ps$), which completes the proof.} \operatornameeratorname{e}nd{proof} \begin{lemma}\label{lem:levelnconv} For any fixed $n\in \mathbb{N}$, $({\mathrm{D}}^{\operatornameeratorname{e}ps,n}_0,\tau^{\operatornameeratorname{e}ps,n}_0)$ converges to $({\mathrm{D}}^n_0,\tau^n_0)$ in law as $\operatornameeratorname{e}ps \downarrow 0$, with respect to the Carath\'{e}odory $\times$ Euclidean topology. \operatornameeratorname{e}nd{lemma} \begin{proofof}{Proposition \operatorname{Re}f{prop:sletocleconv}} This follows by combining Lemma \operatorname{Re}f{lem:ngoodapprox} and Lemma \operatorname{Re}f{lem:levelnconv}, plus the fact that $(\mathrm{D}_0^n,\tau_0^n)\mathbb{R}ightarrow (\mathrm{D}_0,\tau_0)$ as $n\to \infty$. \operatornameeratorname{e}nd{proofof} \subsubsection{Convergence at a fixed level of approximation as ${\kappa'}\downarrow 4$}\label{subsec:whiz} The remainder of this section will now be devoted to proving Lemma \operatorname{Re}f{lem:levelnconv}. This is slightly trickier, and so we will break down its proof further into Lemmas \operatorname{Re}f{lem:bigexsame} and \operatorname{Re}f{lem:uniform} below. Let us first set-up for the statements of these lemmas. For ${\kappa'}\in (4,8)$ (equiv. $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$) we set $X_i^{\operatornameeratorname{e}ps,n}=(W_0^\operatornameeratorname{e}ps)_{S_i^{\operatornameeratorname{e}ps,n}}$ for $1\le i\le \Lambda^{\operatornameeratorname{e}ps,n}$ and then write $$\mathbf{X}^{\operatornameeratorname{e}ps,n}=(X_1^{\operatornameeratorname{e}ps,n},X_2^{\operatornameeratorname{e}ps,n},\cdots, X_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n}).$$ For the $\mathbb{C}LE_4$ case, we write $$\mathbf{X}^{n}=(X_1^{n},X_2^{n},\cdots, X_{\Lambda^{n}}^{n})$$ where the $X^n$ are as defined in Section \operatorname{Re}f{sec:ucle4}. Also recall the definition of the excursions $(e_i^{\operatornameeratorname{e}ps,n})_{1\le i \le \Lambda^{\operatornameeratorname{e}ps,n}}$ of $\theta^\operatornameeratorname{e}ps$ above height $2^{-n}$. Define the corresponding excursions $(e_i^n)_{i\le \Lambda^n}$ for the uniform $\mathbb{C}LE_4$ exploration, and denote $$\mathbf{e}^{\operatornameeratorname{e}ps,n}=(e_1^{\operatornameeratorname{e}ps,n},e_2^{\operatornameeratorname{e}ps,n},\cdots, e_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n}), \quad \mathbf{e}^{n}=(e_1^{n},e_2^{n},\cdots, e_{\Lambda^{n}}^{n}).$$ Thus, $\mathbf{X}^{\operatornameeratorname{e}ps,n}, \mathbf{X}^n$ live in the space of sequences of finite length, taking values in $\partial \mathbb{D}$. We equip this space with topology such that $\mathbf{X}^{(n)}\to \mathbf{X}$ as $n\to \infty$ iff the vector length of $\mathbf{X}^{(n)}$ is equal to the length of $\mathbf{X}$ for all $n\ge N_0$ large enough, and such that every component of $\mathbf{X}^{(n)}$ \corr{(for $n\ge N_0$)} converges to the corresponding component of $\mathbf{X}$ with respect to the Euclidean distance. Similarly, $\mathbf{e}^{\operatornameeratorname{e}ps,n}, \mathbf{e}^n$ live in the space of sequences of finite length, taking values in the space $E$ of excursions away from $\{0,2\pi\}$. We equip this sequence space with topology such that $\mathbf{e}^{(k)}\to \mathbf{e}$ as $k\to \infty$ iff the vector length of $\mathbf{e}^{(k)}$ is equal to the vector length of $\mathbf{e}$ for all $k$ large enough, together with component-wise convergence with respect to $d_E$. \begin{lemma} \label{lem:bigexsame} For any $n\in \mathbb{N}$, $(\mathbf{e}^{\operatornameeratorname{e}ps,n},\tau^{\operatornameeratorname{e}ps,n})\mathbb{R}ightarrow (\mathbf{e}^n,\tau^n)$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{lemma} \begin{proof} This is a direct consequence of Lemma \operatorname{Re}f{lem:mn_conv} and the definition of $\tau^{\operatornameeratorname{e}ps,n},\tau^n$. \operatornameeratorname{e}nd{proof} \begin{lemma}\label{lem:uniform} For any $n\in \mathbb{N}$, $\mathbf{X}^{\operatornameeratorname{e}ps,n}\to \mathbf{X}^n$ in law as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{lemma} This second lemma will take a bit more work to prove. However, we can immediately see how the two together imply Lemma \operatorname{Re}f{lem:levelnconv}:\\ \begin{proofof}{Lemma \operatorname{Re}f{lem:levelnconv}} Lemmas \operatorname{Re}f{lem:bigexsame} and \operatorname{Re}f{lem:uniform} imply that the driving functions of $\mathrm{D}^{\operatornameeratorname{e}ps,n}_0$ converge in law to the driving function of $\mathrm{D}^n_0$ with respect to the Skorokhod topology. This implies the result by Remark \operatorname{Re}f{rmk:dconv_cconv}. \operatornameeratorname{e}nd{proofof}\\ Our new goal is therefore to prove Lemma \operatorname{Re}f{lem:uniform}. The main ingredient is the following (recall that $S_1^{\operatornameeratorname{e}ps,n}$ is the start time of the first excursion of $\theta_0^\operatornameeratorname{e}ps$ away from $0$ that reaches height $2^{-n}$). \begin{lemma}\label{lem:uniform_equation} For any $u\ne 0$ and $n\in \mathbb{N}$ fixed, \begin{equation}\label{eqn:ftuniform} \corr{\mathbb{E}[\, X_1^{\operatornameeratorname{e}ps,n}\, ]} = \mathbb{E}[\,\operatornameeratorname{e}xp(\operatornameeratorname{i} u \int_0^{S_1^{\operatornameeratorname{e}ps,n}}\cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds)\,]\to 0 \text{ as } \operatornameeratorname{e}ps \downarrow 0. \operatornameeratorname{e}nd{equation} \operatornameeratorname{e}nd{lemma} For the proof of Lemma \operatorname{Re}f{lem:uniform_equation}, we are going to make use of Remark \operatorname{Re}f{rmk:theta_bessel_compare}. That is, the fact that $\theta^\operatornameeratorname{e}ps_0$ behaves very much like $\sqrt{{\kappa'}}$ times a Bessel process of dimension $\delta=3-8/{\kappa'}\in (1,2)$. The Bessel process is much more convenient to work with (in terms of exact calculations), because of its scaling properties. Indeed, for Bessel processes we have the following lemma: {\begin{lemma}\label{lem:bes_uniform} Let $\widetilde \theta^\operatornameeratorname{e}ps$ be $\sqrt{{\kappa'}}=\sqrt{{\kappa'}(\operatornameeratorname{e}ps)}$ times a Bessel process of dimension $3-8/{\kappa'}$ (started from $0$) and $\widetilde S^{\operatornameeratorname{e}ps,m}$ be the start time of the first excursion in which it exceeds $2^{-m}$. Then for $u\ne 0$, $$| \mathbb{E}[\operatornameeratorname{e}xp\big(2\operatornameeratorname{i} u \int_0^{\widetilde{S}^{\operatornameeratorname{e}ps,m}} (\widetilde \theta^\operatornameeratorname{e}ps_s)^{-1} \, ds\big) ]|\to 0$$ as $\operatornameeratorname{e}ps\downarrow 0$ for any $m$ large enough. \operatornameeratorname{e}nd{lemma} } \noindent (The assumption that $m$ is sufficiently large here is made simply for convenience of proof.)\\ \begin{proof} By changing the value of $u$ appropriately, we can instead take $\widetilde \theta^{\operatornameeratorname{e}ps}$ to be a Bessel process of dimension $\delta({\kappa'})=3-8/{\kappa'}$ (i.e., we forget about the multiplicative factor of $\sqrt{{\kappa'}}$). Note that $\delta({\kappa'})\in (1,2)$ for ${\kappa'}<8$ and $\delta({\kappa'}) \downarrow 1$ as ${\kappa'}\downarrow 4$. By standard It\^{o} excursion theory, $\widetilde \theta^\operatornameeratorname{e}ps$ can be formed by gluing together the excursions of a Poisson point process $\Lambda$ with intensity $\nu_{\delta(\kappa)}\times \text{Leb}_{[0,\infty)}$, where $\nu_\delta$ is the Bessel-$\delta$ excursion measure. \corr{As mentioned previously, it is a classical result that we can decompose $\nu_\delta(\cdot)=\int_0^\infty x^{\delta-3}\nu_\delta^x(\cdot) \, dx$ (there is a multiplicative constant that we can set to one without loss of generality) where $\nu_\delta^x$ is a probability measure on excursions with maximum height exactly $x$ for each $x>0$ and that moreover by Brownian scaling, $\nu_\delta^x(e)=\nu_\delta^1(e_x)$, $e_x(s)=x^{-1}e(x^{2}s)$ for $0\le s \le \zeta(e_x)=x^{-2}\zeta(e)$.} Let \begin{equation}\label{eq:Tkm} T^{\kappa'}_m \, \corr{\overset{(d)}{=}} \, \text{Exp}\left(\frac{(2^{-m})^{\delta-2}}{2-\delta} \right)\operatornameeratorname{e}nd{equation} be the smallest $t$ such that $(e,t)$ is in the Poisson process for some $e$ with $\sup(e)> 2^{-m}$. \corr{Then conditionally on $T_m^{{\kappa'}}$, the collection of points $(e,t)$ in the Poisson process with $t\le T_m^{{\kappa'}}$ is simply a Poisson process $\Lambda{(T_m^{\kappa'})}$ with intensity $\int_0^{2^{-m}} x^{\delta-3}\nu_\delta^x \times \mathrm{Leb}([0,T_m^{{\kappa'}}])$. So, if for any given excursion $e\in E$, we define $$f(e)=\int_0^{\zeta(e)}\frac{1}{e(s)} \, ds$$ (setting $f(e)=\infty$ if the interval diverges), we have \begin{equation}\label{eq:campbell} \mathbb{E}(\operatornameeratorname{e}^{2 \operatornameeratorname{i} u \int_0^{\widetilde{S}^{\operatornameeratorname{e}ps,m}} (\widetilde \theta_s^\operatornameeratorname{e}ps)^{-1}\, ds} \, | \, T_m^{\kappa'} ) = \mathbb{E}(\operatornameeratorname{e}^{2\operatornameeratorname{i} u \sum_{(e,t)\in \Lambda{(T_m^{\kappa'})}} f(e)} \, | \, T_m^{\kappa'})=\operatornameeratorname{e}xp\big(T_m^{\kappa'} \int_0^{2^{-m}} x^{\delta-3}\nu_\delta^x(1-\operatornameeratorname{e}^{2\operatornameeratorname{i} u f(e)}) \big) \operatornameeratorname{e}nd{equation} where in the final equality we have applied Campbell's formula for the Poisson point process $\Lambda{(T_m^{\kappa'})}$.} \corr{The real part of $1-\operatornameeratorname{e}^{2 \operatornameeratorname{i} u f(e)}$ is bounded above by $2 u^2 f(e)^2$. Then using the Brownian scaling property of $\nu_\delta^x$ explained before, we can bound $\nu_\delta^x(\mathbb{R}e (1-\operatornameeratorname{e}^{2 \operatornameeratorname{i} u f(e)}))$ by $u^2 x^2\nu_\delta^1(f^2)$. Using the fact that $\nu_\delta^1(f^2) < \infty$, which can be obtained from a direct calculation, it follows that $\int_0^{2^{-m}} x^{\delta-3}\nu_\delta^x(\mathbb{R}e (1-\operatornameeratorname{e}^{2 \operatornameeratorname{i} u f(e)})) \, dx< (2-\delta)^{-1} 2^{-m(\delta-2)}$ for all $m\ge M_0 = M_0(u)$, where $M_0<\infty$ does not depend on $\delta<3/2$ (say). This allows us to take expectations over $T_m^{{\kappa'}}$ in \operatornameeratorname{e}qref{eq:campbell} (recall the distribution of $T_m^{\kappa'}$ from \operatornameeratorname{e}qref{eq:Tkm}) to obtain that \begin{align}\label{eq:boundcampbell} \left|\mathbb{E}(\operatornameeratorname{e}^{2 \operatornameeratorname{i} u \int_0^{\widetilde{S}^{\operatornameeratorname{e}ps,m}} (\widetilde \theta_s^\operatornameeratorname{e}ps)^{-1}\, ds})\right| & = \left|1-2^{m(\delta-2)}(2-\delta) \int_0^{2^{-m}} x^{\delta-3} \nu_\delta^x((1-\cos(2uf(e))+\operatornameeratorname{i} \sin(2uf(e)))) \, dx \right|^{-1} \nonumber \\ & \le \left|2^{m(\delta-2)}(2-\delta) \int_0^{2^{-m}} x^{\delta-3} \nu_\delta^x(\sin(2u f(e))) \, dx \right|^{-1} \nonumber \\ & \le \left|(2-\delta) \int_0^{1} y^{\delta-3} \nu_\delta^{2^{-m}y}(\sin(2u f(e))) \, dy \right|^{-1} \operatornameeratorname{e}nd{align} for all $m\ge M_0$ and $\delta\in (1,3/2)$.} \corr{We now fix $u\ne 0$ and $m\ge M_0$ for the rest of the proof. Our aim is to show that the final expression in \operatornameeratorname{e}qref{eq:boundcampbell} above converges to $0$ as $\delta\downarrow 1$ (equivalently $\operatornameeratorname{e}ps\downarrow 0$). To do this, we use the Brownian scaling property of $\nu_\delta^x$ again to write $\nu_{\delta}^{2^{-m}y}(\sin(2uf(e)))=\nu_\delta^1 (\sin(2^{-m+1}uyf(e)))$ for each $y$. We also observe that $$y^{-1}\nu_\delta^1(\sin(2^{-m+1}uyf(e)))\to \nu_\delta^1(2^{-m+1}uf(e))$$ as $y\downarrow 0$, which follows by dominated convergence since $\sin(z)/z\to 1$ as $z\downarrow 0$. Moreover (by Lemma \operatorname{Re}f{lem:mn_conv}, say) the convergence is uniform in $\delta$. This means that for some $Y_{u,m}\in(0,1)$ and $ k_{u,m}<\infty$ depending only on $u$ and $m$, we have that $$|\nu_\delta^1(\sin(2^{-m+1}uyf(e)))\ge k_{u,m} y \; \text{ for all } y\ge Y_{u,m}.$$ It follows that \begin{align*}\left|(2-\delta) \int_0^{1} y^{\delta-3} \nu_\delta^{2^{-m}y}(\sin(2u f(e))) \, dy \right| & \ge (2-\delta)k_{u,m}\int_0^{Y_{u,m}} y^{\delta-2} \, dy -(2-\delta)\int_{Y_{u,m}}^1 y^{\delta-3} \, dy \\ & \ge \frac{k_{u,m}Y_{u,m}^{\delta-1}}{\delta-1}-(1-Y_{u,m}^{\delta-2}). \operatornameeratorname{e}nd{align*} for all $\delta\in (1,3/2)$. Since this expression converges to $\infty$ as $\delta\downarrow 1$, and the final term in \operatornameeratorname{e}qref{eq:boundcampbell} is its reciprocal, the proof is complete.} \operatornameeratorname{e}nd{proof}\\ With this in hand, the proof of Lemma \operatorname{Re}f{lem:uniform_equation} follows in a straightforward manner.\\ \begin{proofof}{Lemma \operatorname{Re}f{lem:uniform_equation}} In order to do a Bessel process comparison and make use of Lemma \operatorname{Re}f{lem:bes_uniform}, we need to replace the fixed $n$ in \operatornameeratorname{e}qref{eqn:ftuniform} by some $m$ which is very large (so we are only dealing with time intervals where $\theta^\operatornameeratorname{e}ps_0$ is tiny). However, this is not a problem, since for $m\ge n$ we can write \[ \int_0^{S_1^{\operatornameeratorname{e}ps,n}} \cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds = \int_0^{S_1^{\operatornameeratorname{e}ps,m}} \cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds + \int_{S_1^{\operatornameeratorname{e}ps,m}}^{S_1^{\operatornameeratorname{e}ps,n}} \cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds \] where the two integrals are independent. This means that $|\mathbb{E}[\,\operatornameeratorname{e}xp(i u \int_0^{S_1^{\operatornameeratorname{e}ps,n}}\cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds)\,]|$ is actually increasing in $n$ for any fixed $\operatornameeratorname{e}ps$, so proving \operatornameeratorname{e}qref{eqn:ftuniform} for $m>n$ also proves it for $n$. So we can write, for any $m\ge n$ \[ |\mathbb{E}[\,\operatornameeratorname{e}xp(\operatornameeratorname{i} u \int_0^{S_1^{\operatornameeratorname{e}ps,n}}\cot((\theta^\operatornameeratorname{e}ps_0)/2) \, ds)\,]| \le |\mathbb{E}[\,\operatornameeratorname{e}xp(\operatornameeratorname{i} u \int_0^{S_1^{\operatornameeratorname{e}ps,m}}\cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds)\,]| \] which is, by the triangle inequality, less than \begin{equation*}\label{eqn:boundcomparebessel} \corr{ \left| \mathbb{E}[\operatornameeratorname{e}xp\big(2\operatornameeratorname{i} u \int_0^{\widetilde{S}^{\operatornameeratorname{e}ps,m}} (\widetilde \theta^\operatornameeratorname{e}ps_s)^{-1} \, ds\big) ]\right| + \left|\mathbb{E}[\,\operatornameeratorname{e}xp\big(\operatornameeratorname{i} u \int_0^{S_1^{\operatornameeratorname{e}ps,m}}\cot((\theta^\operatornameeratorname{e}ps_0)_s/2) \, ds\big)\,]-\mathbb{E}[\operatornameeratorname{e}xp\big(2\operatornameeratorname{i} u \int_0^{\widetilde S^{\operatornameeratorname{e}ps,m}} (\widetilde \theta^{\operatornameeratorname{e}ps} _s)^{-1} \, ds\big) ]\right|. } \operatornameeratorname{e}nd{equation*} Now, \corr{using that $(1/y- (1/2)\cot(y/2))\downarrow 0$ as $y\downarrow 0$, and an argument almost identical to the first half of the proof of Lemma \operatorname{Re}f{lem:ngoodapprox}}, the second term above converges to $0$ as $m\to \infty$, uniformly in $\operatornameeratorname{e}ps$. Since Lemma \operatorname{Re}f{lem:bes_uniform} says that the first term converges to 0 as $\operatornameeratorname{e}ps\to 0$ for any $m$ large enough, this completes the proof. \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{Lemma \operatorname{Re}f{lem:uniform}} Equation \operatornameeratorname{e}qref{eqn:ftuniform} implies that the law of $X_1^{\operatornameeratorname{e}ps,n}$ converges to the uniform distribution on the unit circle as ${\kappa'}\downarrow 4$. The full result then follows by the Markov property of $\theta^\operatornameeratorname{e}ps_0$. \operatornameeratorname{e}nd{proofof} \subsubsection{Summary} So, we have now tied up all the loose ends from the proof of Proposition \operatorname{Re}f{prop:sletocleconv}. Recall that this proposition asserted the convergence in law of a single $\SLE_{\kappappa'}({\kappa'}-6)$ branch in $\mathbb{D}$, targeted at $0$, to the corresponding uniform CLE$_4$ exploration branch. Let us conclude this subsection by noting that the same result holds when we change the target point. For $z\in \mathbb{D}$ not necessarily equal to $0$, we define $\mathcal{D}_z$ to be the space of evolving domains whose image after applying the conformal map $f(w)=(w-z)/(1-\bar{z}w)$ from $\mathbb{D}\to \mathbb{D}$, $z\mapsto 0$, lies in $\mathcal{D}$. From the convergence in Proposition \operatorname{Re}f{prop:convfullbranch}, plus the target invariance of radial $\SLE_{\kappa'}({\kappa'}-6)$ and the uniform CLE$_4$ exploration, it is immediate that: \begin{corollary}\label{cor:convfullbranch} For any $z\in \mathcal{Q}$, $({\mathbf{D}}_z^\operatornameeratorname{e}ps,\tau_z^\operatornameeratorname{e}ps)\mathbb{R}ightarrow ({\mathbf{D}}_z,\tau_z)$ in $\mathcal{D}_z\times \mathbb{R}$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{corollary} \corr{Recall that $\tau_{0,z}^\operatornameeratorname{e}ps$ is the last time that $\theta_z^\operatornameeratorname{e}ps$ hits $0$ before first hitting $2\pi$ and $[\tau_{0,z},\tau_z]$ is the time interval during which $\mathbf{D}_z$ traces the outermost CLE$_4$ loop surrounding $z$. Notice that $\tau_z^\operatornameeratorname{e}ps-\tau_{0,z}^\operatornameeratorname{e}ps$ is equal to the length of the excursion $ \mathrm{e}_{\Lambda^{\operatornameeratorname{e}ps,n}}^{\operatornameeratorname{e}ps,n}$ and similarly $\tau_z-\tau_{0,z}$ is the length of the excusion $\mathrm{e}_{\Lambda^n}$ (for every $n$), so that by Lemma \operatorname{Re}f{lem:bigexsame} the following extension holds.} \begin{corollary} \label{rmk:convfullbranch} For any fixed $z\in \mathcal{Q}$ $$({\mathbf{D}}_z^\operatornameeratorname{e}ps, \tau_z^\operatornameeratorname{e}ps,\tau_{0,z}^\operatornameeratorname{e}ps )\mathbb{R}ightarrow ({\mathbf{D}}_z,\tau_z, \tau_{0,z} )$$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{corollary} \subsection{Convergence of the CLE$_{\kappa'}$ loops} \label{sec:conv_loops} Recall that for $z\in \mathcal{Q}$, $\mathcal{L}_z^\operatornameeratorname{e}ps$ (resp. $\mathcal{L}_z$) denotes the outermost $\mathbb{C}LE_{\kappappa'}$ loop (resp. CLE$_4$ loop) containing $z$ and $\mathcal{B}_z^\operatornameeratorname{e}ps$ (resp. $\mathcal{B}_z$) denotes the connected component of the complement of $\mathcal{L}_z^\operatornameeratorname{e}ps$ (resp. $\mathcal{L}_z$) containing $z$. By definition we have \begin{equation}\label{eq:bub_loewner} \mathcal{B}_z^\operatornameeratorname{e}ps = ({\mathbf{D}}^\operatornameeratorname{e}ps_z)_{\tau^\operatornameeratorname{e}ps_z} \text{ and } \mathcal{B}_z=({\mathbf{D}}_z)_{\tau_z}, \operatornameeratorname{e}nd{equation} where $\{(\mathbf{D}_z^\operatornameeratorname{e}ps)_t\, ; \, t\ge 0\}$ and $\{(\mathbf{D}_z)_t\, ; \, t\ge0\}$ {are processes in $\mathcal{D}_z$} describing radial $\SLE_{\kappa'}({\kappa'}-6)$ processes and a uniform $\mathbb{C}LE_4$ exploration, respectively, towards $z$. See Section \operatorname{Re}f{sec:sletocle} for more details. {In this subsection we will prove the convergence of $\mathcal{L}_z^\operatornameeratorname{e}ps\mathbb{R}ightarrow \mathcal{L}_z$ with respect to the Hausdorff distance. That this might be non-obvious is illustrated by the following difference: in the limit $\partial \mathbf{D}_z = \mathcal{L}_z$, whereas this is not at all the case for $\operatornameeratorname{e}ps > 0$. Nevertheless, we have:} \begin{proposition}\label{prop:singleloopconv}For any $z\in \mathcal{Q}$ \label{prop:cleloopconv} $$({\mathbf{D}}^\operatornameeratorname{e}ps_z, \mathcal{L}^\operatornameeratorname{e}ps_z, \mathcal{B}^\operatornameeratorname{e}ps_z) \mathbb{R}ightarrow ({\mathbf{D}}_z, \mathcal{L}_z, \mathcal{B}_z) $$ as $\operatornameeratorname{e}ps\downarrow 0$, with respect to the product topology generated by ($\mathcal{D}_z$ $\times$ Hausdorff $\times$ Carath\'{e}odory viewed from $z$) convergence. \operatornameeratorname{e}nd{proposition} Given \operatornameeratorname{e}qref{eq:bub_loewner}, and that we already know the convergence of $\mathbf{D}_z^\operatornameeratorname{e}ps$ as $\operatornameeratorname{e}ps\downarrow 0$, the proof of Proposition \operatorname{Re}f{prop:cleloopconv} boils down to the following lemma. \begin{lemma} \label{lem:keyforcleloopconv} Suppose that $({\mathbf{D}}_0, \mathcal{L}, \mathcal{B}_0)$ is a subsequential limit in law of $({\mathbf{D}}_0^\operatornameeratorname{e}ps, \mathcal{L}_0^\operatornameeratorname{e}ps, \mathcal{B}_0^\operatornameeratorname{e}ps)$ as $\operatornameeratorname{e}ps\downarrow 0$ (with the topology of Proposition \operatorname{Re}f{prop:cleloopconv}). Then we have $\mathcal{L}=\mathcal{L}_0$ a.s. \operatornameeratorname{e}nd{lemma} \begin{proofof}{Proposition \operatorname{Re}f{prop:cleloopconv} given Lemma \operatorname{Re}f{lem:keyforcleloopconv}} By conformal invariance we may assume that $z=0$. Observe that by Corollary \operatorname{Re}f{cor:convfullbranch}, we already know that $({\mathbf{D}}_0^\operatornameeratorname{e}ps, \mathcal{B}_0^\operatornameeratorname{e}ps)\mathbb{R}ightarrow ({\mathbf{D}}_0, \mathcal{B}_0)$ as $\operatornameeratorname{e}ps\to 0$, with respect to the product ($\mathcal{D}$ $\times$ Carath\'{e}odory ) topology. Indeed, if one takes a sequence $\operatornameeratorname{e}ps_n$ converging to $0$, and a coupling of $({\mathbf{D}}_0^{\operatornameeratorname{e}ps_n},\tau_0^{\operatornameeratorname{e}ps_n})_{n\in \mathbb{N}}$ and $({\mathbf{D}}_0,\tau_0)$ so that $({\mathbf{D}}_0^{\operatornameeratorname{e}ps_n},\tau_0^{\operatornameeratorname{e}ps_n})\to ({\mathbf{D}}_0,\tau_0)$ a.s.\ as $n\to \infty$, it is clear due to \operatornameeratorname{e}qref{eq:bub_loewner} that each $\mathcal{B}_0^{\operatornameeratorname{e}ps_n}$ also converges to $\mathcal{B}_0$ a.s. Also note that $(\mathcal{L}_0^\operatornameeratorname{e}ps)$ is tight in $\operatornameeratorname{e}ps$ with respect to the Hausdorff topology, since all the sets in question are almost surely contained in $\overline{\mathbb{D}}$. Thus $({\mathrm{D}}_0^\operatornameeratorname{e}ps, \mathcal{B}_0^\operatornameeratorname{e}ps, \mathcal{L}_0^\operatornameeratorname{e}ps)$ is tight in the desired topology, and the limit is uniquely characterized by the above observation and Lemma \operatorname{Re}f{lem:keyforcleloopconv}. This yields the proposition. \operatornameeratorname{e}nd{proofof} \subsubsection{Strategy for the proof of Lemma \operatorname{Re}f{lem:keyforcleloopconv}} At this point, we know the convergence in law of $({\mathbf{D}}_0^\operatornameeratorname{e}ps, \mathcal{B}_0^\operatornameeratorname{e}ps)\to ({\mathbf{D}}_0, \mathcal{B}_0)$ as $\operatornameeratorname{e}ps\downarrow 0$, and we know that $\mathcal{B}_0^\operatornameeratorname{e}ps$ is the connected component of $\mathbb{D}\setminus \mathcal{L}_0^\operatornameeratorname{e}ps$ containing $0$ for every $\operatornameeratorname{e}ps$. Given a subsequential limit $({\mathbf{D}}_0, \mathcal{B}_0, \mathcal{L})$ in law of $({\mathbf{D}}_0^\operatornameeratorname{e}ps,\mathcal{B}_0^\operatornameeratorname{e}ps, \mathcal{L}_0^\operatornameeratorname{e}ps)$, the difficulty in concluding that $\mathcal{L}=\mathcal{L}_0$ lies in the fact that Carath\'{e}odory convergence (which is what we have for $\mathcal{B}_0^\operatornameeratorname{e}ps$) does not ``see'' bottlenecks: see Figure \operatorname{Re}f{fig:cart_prob}. To proceed with the proof, we first show that any part of the supposed limit $\mathcal{L}$ that does not coincide with $\mathcal{L}_0$ must lie \operatornameeratorname{e}mph{outside} of $\mathcal{B}_0$. \begin{lemma}\label{cor:loopconvinclusion} With the set up of Lemma \operatorname{Re}f{lem:keyforcleloopconv}, we have $\mathcal{L}\subseteq \mathbb{C}\setminus \mathcal{B}_0$ almost surely. \operatornameeratorname{e}nd{lemma} Once we have this ``one-sided" result, it suffices to prove that the laws of $\mathcal{L}$ and $\mathcal{L}_0$ coincide: \begin{lemma}\label{lem:convlooplaw} Suppose that $\mathcal{L}$ is as in Lemma \operatorname{Re}f{lem:keyforcleloopconv}. Then the law of $\mathcal{L}$ is equal to the law of $\mathcal{L}_0$. \operatornameeratorname{e}nd{lemma} The first lemma follows almost immediately from the Carath\'{e}odory convergence of $\mathcal{B}_0^\operatornameeratorname{e}ps\to \mathcal{B}_0$ (see the next subsection). To prove the second lemma, we use the fact that $\mathbb{C}LE_\kappappa$ for $\kappappa\in (0,8)$ is \operatornameeratorname{e}mph{inversion invariant}: more correctly, a \operatornameeratorname{e}mph{whole plane} version of $\mathbb{C}LE_\kappappa$ is invariant under the mapping $z\mapsto 1/z$. Roughly speaking, this means that for whole plane CLE, we can use inversion invariance to obtain the complementary result to Lemma \operatorname{Re}f{cor:loopconvinclusion}, and deduce Hausdorff convergence in law of the analogous loops. We then have to do a little work, using the relation between whole plane CLE and CLE in the disk (a Markov property), to translate this back to the disk setting and obtain Lemma \operatorname{Re}f{lem:convlooplaw}. \begin{figure} \centering \includegraphics[width=0.5\textwidth]{cart_prob.pdf} \caption{The sequence of domains enclosed by the thick black curves will converge in the Carath\'{e}odory sense (viewed from $0$), but \operatornameeratorname{e}mph{not} in the Hausdorff sense, to the dotted domain. This is the type of behavior that must be ruled out to deduce convergence of CLE loops (in the Hausdorff sense) from convergence of the radial SLE (in the Carath\'{e}odory sense).}\label{fig:cart_prob} \operatornameeratorname{e}nd{figure} \subsubsection{Preliminaries on Carath\'{e}odory convergence} We first record the following standard lemma concerning Carath\'{e}odory convergence, that will be useful in what follows. \begin{lemma}[Carath\'{e}odory kernel theorem] Suppose that $(U_n)_{n\ge 1}$ is a sequence of simply connected domains containing $0$, and for each $n$, write $V_n$ for the connected component of the interior of $\cap_{k\ge n} U_k$ containing $0$. Define the \operatornameeratorname{e}mph{kernel} of $(U_n)_{n\ge 1}$ to be $\cup_n V_n$ if this is non-empty, otherwise declare it to be $\{0\}$. Suppose that $(U_n)_{n\ge 1}$ and $U$ are simply connected domains containing $0$. Then $U_n\to U$ with respect to the Carath\'{e}odory topology (viewed from 0) if and only if every subsequence of the $U_n$ has kernel $U$.\label{lem:kernel} \operatornameeratorname{e}nd{lemma} One immediate consequence of this is the following: \begin{corollary}\label{cor:cart_inclusion} Suppose that $(K_n,D_n)\mathbb{R}ightarrow (K,D)$ as $n\to \infty$ for the product (Hausdorff $\times$ Carath\'{e}odory topology), where for each fixed $n$, the coupling of $K_n$ and $D_n$ is such that $D_n$ is a simply connected domain with $0\in D_n$, and $K_n$ is a compact subset of $\mathbb{C}$ with $K_n\subseteq \mathbb{C}\setminus D_n$ almost surely. Then $K\subseteq \mathbb{C}\setminus D$ almost surely. \operatornameeratorname{e}nd{corollary} \begin{proof} By Skorokhod embedding, \corr{we may assume without loss of generality that $(K_{n},D_{n})\to (K,D)$} almost surely as $n\to \infty$. For $j\in \mathbb{N}$ write $V_{j}$ for the connected component of $\mathrm{int}(\cap_{k\ge j}D_{k})$ containing $0$. By assumption, $K_{n}\subseteq \mathbb{C}\setminus D_{n}$ for every $n$ almost surely, which means that $K_{n}\subseteq \mathbb{C}\setminus V_j$ for all $n\ge j$ almost surely. Since $K_{n}$ converges to $K$ in the Hausdorff topology, we have $K\subseteq \mathbb{C}\setminus V_j$ for each $j$, {which implies that} $K\subseteq \mathbb{C}\setminus \cup_j V_j$ almost surely. Finally, because $D_{n}\to D$ in the Carath\'{e}odory topology, the Carath\'{e}odory kernel theorem gives that $\cup_j V_j=D$ almost surely. Hence $K\subseteq \mathbb{C} \setminus D$ almost surely, as desired. \operatornameeratorname{e}nd{proof} \noindent In particular: \begin{proofof}{Lemma \operatorname{Re}f{cor:loopconvinclusion}} This is a direct consequence of Corollary \operatorname{Re}f{cor:cart_inclusion}. \operatornameeratorname{e}nd{proofof}\\ Now, if $U_n\subseteq \mathbb{C}$ are such that $1/U_n:=\{z: 1/z\in U_n\}$ is a simply connected domain containing $0$ for each $n$, we say that $U_n\to U$ with respect to the Carath\'{e}odory topology seen from $\infty$, iff $1/U_n\to 1/U$ with respect to the Carath\'{e}odory topology seen from $0$. It is clear from this definition and the above arguments (or similar) that the following properties hold. \begin{lemma}\label{lem:cartfrominfinityprops} Suppose that $U_n\in \mathbb{C}$ are simply connected domains such that $1/U_n$ is simply connected containing $0$ for each $n$. Then \begin{itemize} \item if $(U_n,K_n)\mathbb{R}ightarrow (U,K)$ jointly with respect the product (Carath\'{e}odory seen from $\infty \times$ Hausdorff) topology, for some compact sets $K_n$ with $K_n\subseteq \mathbb{C}\setminus U_n$ for each $n$, then \corr{$K\subseteq \mathbb{C}\setminus U$} almost surely; \item if $(U_n, D_n)\mathbb{R}ightarrow (U,D)$ jointly with respect the product (Carath\'{e}odory seen from $\infty \times$ Carath\'{e}odory seen from $0$) topology, for some simply connected domains $\corr{\mathbb{D}}\supseteq D_n\ni 0$ with $D_n\subseteq \mathbb{C}\setminus \corr{U_n}$ for each $n$, then $D\subseteq \mathbb{C}\setminus \corr{U}$ almost surely. \operatornameeratorname{e}nd{itemize} \operatornameeratorname{e}nd{lemma} \corr{\begin{proof} The first bullet point follows from Corollary \operatorname{Re}f{cor:cart_inclusion} by considering $1/U_n,1/U$ and $1/K_n,1/K$. For the second, let us assume by Skorohod embedding that $(U_n,D_n)\to (U,D)$ almost surely in the claimed topology. Then the compact sets $\partial D_n:=\bar{D_n}\setminus D_n\subset \bar{\mathbb{D}}$ are tight for the Hausdorff topology, and hence have some subsequential limit $\partial$. (The argument of) Corollary \operatorname{Re}f{cor:cart_inclusion} implies that $\partial \subset \mathbb{C}\setminus U$ and $\partial \subset \mathbb{C}\setminus D$ almost surely. Since $U$ is an open simply connected domain containing $\infty$ and $D$ is an open simply connected domain containing $0$, this implies that $D\subset \mathbb{C}\setminus U$ almost surely. \operatornameeratorname{e}nd{proof}} \subsubsection{Whole plane CLE and conclusion of the proofs} As mentioned previously, we would now like to use some kind of symmetry argument to prove Lemma \operatorname{Re}f{lem:convlooplaw}. However, the symmetry we wish to exploit is not present for CLE in the unit disk, and so we have to go through an argument using \operatornameeratorname{e}mph{whole plane} CLE instead. Whole plane CLE was first introduced in \cite{KW16} and is, roughly speaking, the local limit of CLE in (any) sequence of domains with size tending to $\infty$. The key symmetry property of whole plane CLE$_{\kappa'}$ that we will make use of is its invariance under applying the inversion map $z\mapsto 1/z$ (\cite{KW16,GMQ18}). More precisely: \begin{lemma}\label{lem:wpcle_props} Let $\Gamma^{\kappa'}$ be a whole plane $\mathbb{C}LE_{\kappa'}$ with ${\kappa'}\in [4,8)$. \begin{itemize} \item \operatornameeratorname{e}mph{(Inversion invariance)} The image of $\Gamma^{\kappa'}$ under $z\mapsto 1/z$ has the same law as $\Gamma^{\kappa'}$. \item \operatornameeratorname{e}mph{(Markov property)} Consider the collection of loops in $\Gamma^{\kappa'}$ that lie entirely inside $\mathbb{D}$ and surround $0$. Write $I_1^{\operatornameeratorname{e}ps}$ (with $\operatornameeratorname{e}ps=\operatornameeratorname{e}ps({\kappa'})$ as usual) for the connected component containing $0$ of the complement of the {outermost} loop in this collection. Write $\mathfrak{l}_2^\operatornameeratorname{e}ps$ for the second {outermost} loop in this collection. Then the image of $\mathfrak{l}_2^\operatornameeratorname{e}ps$ under the conformal map $I_1^\operatornameeratorname{e}ps\to \mathbb{D}$ sending $z$ to $0$ with positive derivative at $0$ has the same law as the outermost loop surrounding $0$ for a $\mathbb{C}LE_{\kappa'}$ in $\mathbb{D}$. \operatornameeratorname{e}nd{itemize} \operatornameeratorname{e}nd{lemma} \begin{proof} The inversion invariance is shown in \cite[Theorem 1.1]{KW16} for ${\kappa'}=4$ and \cite[Theorem 1.1]{GMQ18} for ${\kappa'}\in (4,8)$. The Markov property follows from \cite[Lemma 2.9]{GMQ18} when ${\kappa'}>4$ and \cite[Theorem 1]{KW16} when ${\kappa'}=4$. \operatornameeratorname{e}nd{proof}\\ Let us now state the convergence result that we will prove for whole plane CLE$_{\kappa'}$ as ${\kappa'}\downarrow 4$, and show how it implies Lemma \operatorname{Re}f{lem:convlooplaw}. For $\operatornameeratorname{e}ps>0$, we extend the above definitions and write $\mathfrak{l}_1^\operatornameeratorname{e}ps, \mathfrak{l}_2^\operatornameeratorname{e}ps$ for the largest and second largest whole plane $\mathbb{C}LE_{{\kappa'}}$ loops containing $0$, that are entirely contained in the unit disk. We let $I_i^\operatornameeratorname{e}ps$ be the connected component of $\mathbb{C}\setminus \mathfrak{l}_i^\operatornameeratorname{e}ps$ containing $0$ for $i=1,2$ and let $E_i^\operatornameeratorname{e}ps$ be the connected component containing $\infty$. When $\operatornameeratorname{e}ps=0$ we write $\mathfrak{l}_1,\mathfrak{l}_2$ for the corresponding loops of a whole plane $\mathbb{C}LE_4$, and $I_1,E_1, I_2,E_2$ for the corresponding domains containing $0$ and $\infty$. Note that in this case we have $\overline{I_i}=\mathbb{C}\setminus E_i$ and $\overline{E_i}=\mathbb{C}\setminus I_i$ for $i=1,2$. \begin{lemma}\label{lem:wholeplane} $(I_1^\operatornameeratorname{e}ps, E_1^\operatornameeratorname{e}ps, I_2^\operatornameeratorname{e}ps, E_2^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (I_1, E_1, I_2, E_2)$ as $\operatornameeratorname{e}ps\to 0$, with respect to the product \\ Carath\'{e}odory (seen from $(0,\infty,0,\infty)$ in the four coordinates) topology. \operatornameeratorname{e}nd{lemma} \begin{proofof}{Lemma \operatorname{Re}f{lem:convlooplaw} given Lemma \operatorname{Re}f{lem:wholeplane}} Suppose that $(I_1^\operatornameeratorname{e}ps, \mathfrak{l}_1^\operatornameeratorname{e}ps)$ converges in law to $(I_1,\mathfrak{l})$ along some subsequence, with respect to the product (Carath\'{e}odory seen from 0 $\times$ Hausdorff) topology. By the above lemma, we can extend this convergence to the joint convergence of $(I_1^\operatornameeratorname{e}ps, \mathfrak{l}_1^\operatornameeratorname{e}ps, E_2^\operatornameeratorname{e}ps, I_2^\operatornameeratorname{e}ps)\to (I_1,\mathfrak{l},E_2, I_2)$. But then Corollary \operatorname{Re}f{cor:cart_inclusion} and Lemma \operatorname{Re}f{lem:cartfrominfinityprops} imply that $\mathfrak{l}\subseteq \mathbb{C}\setminus I_2=\overline{E_2}$ and $\mathfrak{l}\subseteq \mathbb{C}\setminus E_2= \overline{I_2}$ almost surely. This implies that $\mathfrak{l}\subseteq \mathfrak{l}_2=\partial(E_2)=\partial(I_2)$ almost surely. Moreover, it is not hard to see (using the definition of Hausdorff convergence) that $\mathfrak{l_2}\setminus \mathfrak{l}=\operatornameeratorname{e}mptyset$, else $\mathfrak{l}_2^\operatornameeratorname{e}ps$ would not disconnect $0$ from $\infty$ for small $\operatornameeratorname{e}ps$. So $\mathfrak{l}=\mathfrak{l}_2$ almost surely. Now consider, for each $\operatornameeratorname{e}ps$, the unique conformal map $g_1^\operatornameeratorname{e}ps:I_1^\operatornameeratorname{e}ps \to \mathbb{D}$ that sends $0\to 0$ and has $(g_1^\operatornameeratorname{e}ps)'(0)>0$. Then the above considerations imply that if $g_1^\operatornameeratorname{e}ps(\mathfrak{l}_2^\operatornameeratorname{e}ps)$ converges in law along some subsequence, with respect to the Hausdorff topology, then the limit must have the law of $g_1(\mathfrak{l}_2)$, where $g_1:I_1\to \mathbb{D}$ is defined in the same way as $g_1^\operatornameeratorname{e}ps$ but with $I^\operatornameeratorname{e}ps_1$ replaced by $I_1$. Since the law of $g_1^\operatornameeratorname{e}ps(\mathfrak{l}_2^\operatornameeratorname{e}ps)$ is the same as that of $\mathcal{L}_0^\operatornameeratorname{e}ps$ for every $\operatornameeratorname{e}ps$ and the law of $g_1(\mathfrak{l}_2)$ has the law of $\mathcal{L}_0$, this proves Lemma \operatorname{Re}f{lem:convlooplaw}.\operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{Lemma \operatorname{Re}f{lem:keyforcleloopconv} and Proposition \operatorname{Re}f{prop:cleloopconv}} Combining Lemmas \operatorname{Re}f{cor:loopconvinclusion} and \operatorname{Re}f{lem:convlooplaw} yields Lemma \operatorname{Re}f{lem:keyforcleloopconv}. As explained previously, this implies Proposition \operatorname{Re}f{prop:cleloopconv}. \operatornameeratorname{e}nd{proofof}\\ So, we are left only to prove Lemma \operatorname{Re}f{lem:wholeplane}, concerning whole plane $\mathbb{C}LE$. We will build up to this with a sequence of lemmas: first proving convergence of nested $\mathbb{C}LE$ loops in very large domains, then transferring this to whole plane CLE, and finally appealing to inversion invariance to obtain the result. \begin{lemma}\label{lem:nestedcleconv} Fix $R>1$. For ${\kappa'}\in (4,8)$ and a $\mathbb{C}LE_{\kappappa'}$ in $R\mathbb{D}$, denote by $(l_i^\operatornameeratorname{e}ps)_{i\ge 1}$ the sequence of nested loops containing $0$, starting with the second smallest loop to \operatornameeratorname{e}mph{fully} enclose the unit disk (set equal to the boundary of $R\mathbb{D}$ if only one or no loops in $R\mathbb{D}$ actually surround $\mathbb{D}$) {and such that $l_i^\operatornameeratorname{e}ps$ surrounds $l_{i+1}^\operatornameeratorname{e}ps$ for all $i$}. Write $(b_i^\operatornameeratorname{e}ps)_{i\ge 1}$ for the connected components containing $0$ of the complements of the $(l_i^\operatornameeratorname{e}ps)_{i\ge 1}$. Then $(b_i^\operatornameeratorname{e}ps)_{i\ge 1}$ converges in law to its CLE$_4$ counterpart as $\operatornameeratorname{e}ps \to 0$, with respect to the product Carath\'{e}odory topology viewed from $0$. \operatornameeratorname{e}nd{lemma} \begin{proof} By Corollary \operatorname{Re}f{cor:convfullbranch} and scale invariance of CLE, together with the iterative nature of the construction of nested loops, we already know that the sequence of nested loops in $R\mathbb{D}$ containing $0$, starting from the outermost one, converges as $\operatornameeratorname{e}ps\to 0$, with respect to the product Carath\'{e}odory topology viewed from $0$. Taking a coupling where this convergence holds a.s., it suffices to prove that the index of the smallest loop containing the unit disk also converges a.s. This is a straightforward consequence of the kernel theorem - Lemma \operatorname{Re}f{lem:kernel} - plus the fact that the smallest $\mathbb{C}LE_4$ loop in $R\mathbb{D}$ that contains $\mathbb{D}$ actually contains $(1+r)\mathbb{D}$ for some strictly positive $r$ a.s. \operatornameeratorname{e}nd{proof} \begin{lemma} \label{lem:nestedwpcleconv} The statement of the above lemma holds true if we replace the CLEs in $R\mathbb{D}$ with whole plane versions. \operatornameeratorname{e}nd{lemma} \begin{proof} For fixed $\kappappa\in [4,8)$, let $\Gamma^\mathbb{C}$, $\Gamma^{R\mathbb{D}}$ denote whole plane CLE$_{\kappa'}$ and $\mathbb{C}LE_{\kappappa'}$ on $R\mathbb{D}$ respectively. The key to this lemma is Theorem 9.1 in \cite{MWW15}, which states (in particular) that $\Gamma^{R\mathbb{D}}$ rapidly converges to $\Gamma^\mathbb{C}$ in the following sense. For some $C,\alpha > 0$, $\Gamma^{R\mathbb{D}}$ and $\Gamma^\mathbb{C}$ can be coupled so that for any $r>0$ and $R>r$, with probability at least $1-C(R/r)^{-\alpha}$, there is a conformal map $\varphi$ from some $D\supset (R/r)^{1/4}\mathbb{D}$ to $D'\supset (R/r)^{1/4}\mathbb{D}$, which maps the nested loops of $\Gamma^{R\mathbb{D}}$ - starting with the smallest containing $r\mathbb{D}$ - to the corresponding nested loops of $\Gamma^\mathbb{C}$, and has low distortion in the sense that $|\varphi'(z)-1|\le C(R/r)^{-\alpha}$ on $R^{1/4}\mathbb{D}$. In fact, it is straightforward to see that $C$ and $\alpha$ (which in principle depend on $\kappappa$) may be chosen uniformly for $\kappappa\in [4,6]$ (say). Indeed, {it follows from the proof in \cite{MWW15} that} they depend only on the law of the log conformal radius of the outermost loop containing $0$ for a $\mathbb{C}LE_{\kappa'}$ in $\mathbb{D}$, and this varies continuously in $\kappappa$, {\cite{SSW09}}. Hence, the result follows by letting $R\to \infty$ in Lemma \operatorname{Re}f{lem:nestedcleconv} {and noting that the second smallest loop containing $\mathbb{D}$ is contained in $r\mathbb{D}$ with arbitrarily high probability as $r\to \infty$, uniformly in $\kappappa$.} \operatornameeratorname{e}nd{proof} \begin{proofof}{Lemma \operatorname{Re}f{lem:wholeplane}} Lemmas \operatorname{Re}f{lem:nestedwpcleconv} and \operatorname{Re}f{lem:wpcle_props} (inversion invariance) imply that $(I_1^\operatornameeratorname{e}ps,I_2^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (I_1,I_2)$ and $(E_1^\operatornameeratorname{e}ps, E_2^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (E_1, E_2)$ as $\operatornameeratorname{e}ps\to 0$. This ensures that $(I_1^\operatornameeratorname{e}ps, E_1^\operatornameeratorname{e}ps, I_2^\operatornameeratorname{e}ps, E_2^\operatornameeratorname{e}ps)$ is tight in $\operatornameeratorname{e}ps$, so we need only prove that if $(I_1, \hat{E}_1, I_2, \hat{E}_2)$ is a subsequential limit of $(I_1^\operatornameeratorname{e}ps, E_1^\operatornameeratorname{e}ps, I_2^\operatornameeratorname{e}ps, E_2^\operatornameeratorname{e}ps)$, then $\hat{E}_1=E_1=\mathrm{int}(\mathbb{C}\setminus I_1)$ and $\hat{E}_2=E_2=\mathrm{int}(\mathbb{C}\setminus I_2)$ almost surely. Note that $(\hat{E}_1,\hat{E}_2)$ has the same law as $(E_1, E_2)$, and since $I_1^\operatornameeratorname{e}ps\subseteq \mathbb{C}\setminus E_1^\operatornameeratorname{e}ps$ for all $\operatornameeratorname{e}ps$, \corr{Lemma \operatorname{Re}f{lem:cartfrominfinityprops}} implies that $I_1\subseteq \mathbb{C}\setminus \hat{E}_1$. In other words $ \hat{E}_1\subseteq E_1$ almost surely. Then because $\hat{E}_1$ and $E_1$ have the same law, we may deduce that they are equal almost surely. Similarly we see that $\hat{E}_2=E_2$ almost surely. \operatornameeratorname{e}nd{proofof} \subsubsection{Conclusion} Recall that for $z\in \mathbb{D}$, $(\mathcal{B}_{z,i}^\operatornameeratorname{e}ps,\mathcal{L}_{z,i}^\operatornameeratorname{e}ps)_{i\ge 1}$ (resp. $(\mathcal{B}_{z,i},\mathcal{L}_{z,i})_{i\ge 1}$) denotes the sequence of nested $\mathbb{C}LE_{\kappappa'}$ (resp. $\mathbb{C}LE_4$) bubbles and loops containing $z$. By the Markov property and iterative nature of the construction, it is immediate from Proposition \operatorname{Re}f{prop:cleloopconv} that: \begin{corollary} \label{cor:cleloopconv} For fixed $z\in \mathcal{Q}$ $$({\mathbf{D}}^\operatornameeratorname{e}ps_z, (\mathcal{L}^\operatornameeratorname{e}ps_{z,i})_{i\ge 1}, (\mathcal{B}^\operatornameeratorname{e}ps_{z,i})_{i\ge 1}) \mathbb{R}ightarrow ({\mathbf{D}}_z, (\mathcal{L}_{z,i})_{i\ge 1}, (\mathcal{B}_{z,i})_{i\ge 1})$$ as $\operatornameeratorname{e}ps\downarrow 0$, with respect to the product topology generated by ($\mathcal{D}_z$ $\times$ $\prod$ Hausdorff $\times$ $\prod$ Carath\'{e}odory viewed from $z$) convergence. \operatornameeratorname{e}nd{corollary} \section{The uniform space-filling SLE$_4$} \label{sec:conv_order} In this section we show that the ordering on points (with rational co-ordinates) in the disk, induced by space-filling $\SLE_{\kappa'}$ with ${\kappa'}>4$, converges to a limiting ordering as ${\kappa'}\downarrow 4$. We call this the uniform space-filling SLE$_4$.\footnote{This name is partially inspired from the fact that the process is constructed via a uniform CLE$_4$ exploration, and partly since, every time the domain of exploration is split into two components, the components are ordered uniformly at random.} Nonetheless, we can describe explicitly the law of this ordering, which for any two fixed points comes down to the toss of a fair coin. {As for $\kappappa' > 4$, there would be other ways to define a space-filling SLE$_4$ process, by considering different explorations of CLE$_4$.} Let us now recall some notation in order to properly state the result. For $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$ and $z,w\in \mathcal{Q}$, we define $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$ to be the indicator function of the event that the space-filling $\SLE_{\kappappa'}$ $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ hits $z$ before $w$ (see Section \operatorname{Re}f{sec:sf_sle}). By convention we set this equal to $1$ when $z=w$. To describe the limit as ${\kappa'}\downarrow 0$, we define $\mathcal{O}=(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}}$ to be a collection of random variables, coupled with $(\mathbf{D}_z)_{z\in\mathcal{Q}}$ such that \operatornameeratorname{e}mph{conditionally given $(\mathbf{D}_z)_{z\in\mathcal{Q}}$}: \begin{itemize} \itemsep0em \item $\mathcal{O}_{z,z}=1$ for all $z\in \mathcal{Q}$ a.s.; \item $\mathcal{O}_{z,w}$ is a Bernoulli($\frac{1}{2}$) random variable for all $z,w\in\mathcal{Q}$ with $z\ne w$; \item $\mathcal{O}_{z,w}=1-\mathcal{O}_{w,z}$ for all $z,w\in\mathcal{Q}$ with $z\ne w$ a.s.; \item for all $z,w_1,w_2\in \mathcal{Q}$ with $z\ne w_1, w_2$, if $\mathbf{D}_z$ separates $z$ from $w_2$ at the same time as it separates $z$ from $w_1$ then $\mathcal{O}_{z,w_1}=\mathcal{O}_{z,w_2}$, otherwise $\mathcal{O}_{z,w_1}$ and $\mathcal{O}_{z,w_2}$ are independent. \operatornameeratorname{e}nd{itemize} \begin{lemma} \label{lem:defo}There is a unique joint law on $((\mathbf{D}_z)_{z\in \mathcal{Q}},\mathcal{O})$ satisfying the above requirements, and such that the marginal law of $(\mathbf{D}_z)_{z\in \mathcal{Q}}$ is that of a branching uniform $\mathbb{C}LE_4$ exploration. With this law, $\mathcal{O}$ a.s.\ defines an order on any finite subset of $\mathcal{Q}$ by declaring that $z{\operatorname{pre}}ceq w$ iff $\mathcal{O}_{z,w}=1$. \operatornameeratorname{e}nd{lemma} We will prove the lemma in just a moment. The main result of this section is the following. \begin{proposition}\label{prop:convbranchingsleorder} $(({\mathbf{D}}^{\operatornameeratorname{e}ps}_z)_{z\in \mathcal{Q}},(\mathcal{O}^\operatornameeratorname{e}ps_{z,w})_{z,w\in \mathcal{Q}})$ converges to $(({\mathbf{D}}_z)_{z\in \mathcal{Q}},(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}})$, in law as $\operatornameeratorname{e}ps\downarrow 0$, with respect to the product topology $\left(\prod_{\mathcal{Q}} \mathcal{D}_z \,\times \, \prod_{\mathcal{Q}\times \mathcal{Q}} \text{discrete}\right)$, where $(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}}$ is as defined in Lemma \operatorname{Re}f{lem:defo}. \operatornameeratorname{e}nd{proposition} \begin{proofof}{Lemma \operatorname{Re}f{lem:defo}} The main observation is that if a joint law $((\mathbf{D}_z)_{z\in \mathcal{Q}},\mathcal{O})$ as in the lemma exists, then for all $z,w,y\in \mathcal{Q}$ we a.s.\ have \begin{equation}\label{order_prop}\{\mathcal{O}_{z,w}=1\}\cap \{\mathcal{O}_{w,y}=1\}\mathbb{R}ightarrow \{\mathcal{O}_{z,y}=1\}.\operatornameeratorname{e}nd{equation} To verify this, we assume that $z,w,y$ are distinct (else the statement is trivial) with $\mathcal{O}_{z,w}=1$ and $\mathcal{O}_{w,y}=1$. Since $\mathcal{O}_{w,z}=1-\mathcal{O}_{z,w}=0$ this implies that $y$ and $z$ are \operatornameeratorname{e}mph{not} separated from $w$ by $\mathbf{D}_w$ at the same time. If $\mathbf{D}_w$ separates $z$ from $w$ strictly before separating $y$ from $w$, then $\mathbf{D}_z$ separates $y$ and $w$ from $z$ at the same time, so $\mathcal{O}_{z,y}=\mathcal{O}_{z,w}=1$. If $\mathbf{D}_w$ separates $y$ from $w$ strictly before separating $z$ from $w$, then $\mathbf{D}_y$ separates $z$ and $w$ from $y$ at the same time, so $\mathcal{O}_{z,y}=1-\mathcal{O}_{y,z}=1-\mathcal{O}_{y,w}=\mathcal{O}_{w,y}=1$. In either case it must be that $\mathcal{O}_{z,y}=1$. We now show why this implies that for any $\{z_1, \cdots, z_k\}$ with $z_i\in \mathcal{Q}$ distinct, there exists a unique a conditional law on $(\mathcal{O}_{z_i,z_j})_{1\le i,j\le k}$ given $(\mathbf{D}_z)_{z\in \mathcal{Q}}$, satisfying the requirements of the lemma. We argue by induction on the number of points. Indeed, suppose it is true with $1\le k \le n-1$ for some $n$ and take $\{z_1,\cdots, z_n\}$ in $\mathcal{Q}$ distinct. We construct the conditional law of $(\mathcal{O}_{z_i,z_j})_{1\le i,j\le n}$ given $(\mathbf{D}_z)_{z\in \mathcal{Q}}$ as follows. \begin{itemize} \item To define $(\mathcal{O}_{z_1,z_i})_{1\le i \le n}$: \begin{itemize}\item partition the indices $\{2,\cdots, n\}$ into equivalence classes $\{C_1,\dots, C_K\}$ such that $i\sim j$ iff $\mathbf{D}_{z_1}$ separates $z_1$ from $z_i$ and $z_j$ at the same time; \item for each equivalence class sample an independent Bernoulli$(1/2)$ random variable; \item set $\mathcal{O}_{z_1,z_i}$ to be the random variable associated with class $[i]$ for every $i$. \operatornameeratorname{e}nd{itemize} \item Given $(\mathcal{O}_{z_1,z_i})_{1\le i \le n}$ and $(\mathbf{D}_z)_{z\in \mathcal{Q}}$, define $\mathcal{O}_{z_i,z_j}$ with $[i]\ne [j]$ by setting it equal to $\mathcal{O}_{z_1,z_j}$ if $z_i$ and $z_1$ are separated from $z_j$ at the same time, or $\mathcal{O}_{z_1,z_i}$ if $z_j$ and $z_1$ are separated from $z_i$ at the same time. \item For each $1\le l\le K$ consider the connected component $U_l\subset \mathbb{D}$ in the branching $\mathbb{C}LE_4$ exploration that contains the points $z_i$ with $[i]=C_l$ when they are separated from $z_1$. The $\mathbb{C}LE_4$ explorations inside these components are mutually independent, independent of the $\mathbb{C}LE_4$ exploration before this separation time, and each has the same law as $(\mathbf{D}_z)_{z\in \mathcal{Q}}$ after mapping to the unit disk. Thus since each equivalence class contains strictly less than $n$ points, using the induction hypothesis, we can define $(\mathcal{O}_{z_i,z_j})_{i\ne j, [i]=[j]=C_l}$ for $1\le l\le K$ such that: \begin{itemize} \item the collections for different $l$ are mutually independent; \item $(\mathcal{O}_{z_i,z_j})_{i\ne j, [i]=[j]=C_l}$ for each $l$ is independent of the CLE$_4$ exploration outside of $U_l$, and after conformally mapping everything to the unit disk, is coupled the exploration inside $U_l$ as in the statement of Lemma \operatorname{Re}f{lem:defo}. \operatornameeratorname{e}nd{itemize} \operatornameeratorname{e}nd{itemize} Using the induction hypothesis, it is straightforward to see that this defines a conditional law on $(\mathcal{O}_{z_i,z_j})_{1\le i\ne j\le n}$ given $(\mathbf{D}_z)_{z\in \mathcal{Q}}$ that satisfies the conditions of the Lemma. Moreover, note that the first two bullet points above, together with \operatornameeratorname{e}qref{order_prop}, define the law of $(\mathcal{O}_{z_1,z_j})_{1\le j\le n}$ and $(\mathcal{O}_{z_i,z_j})_{[i]\ne [j]}$ (satisfying the requirements) uniquely. Combining with the uniqueness in the induction hypothesis, it follows easily that the conditional law of $(\mathcal{O}_{z_i,z_j})_{1\le i\ne j\le n}$ given $(\mathbf{D}_z)_{z\in \mathcal{Q}}$ (satisfying the requirements) is unique. Consequently, given $(\mathbf{D}_z)_{z\in \mathcal{Q}}$, there exists a unique conditional law on the product space $\{0,1\}^{\mathcal{Q}\times \mathcal{Q}}$ equipped with the product $\sigma$-algebra, such that if $\mathcal{O}=(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}}$ has this law then it satisfies the conditions above Lemma \operatorname{Re}f{lem:defo}. This concludes the existence and uniqueness statement of the lemma. The property \operatornameeratorname{e}qref{order_prop} implies that $\mathcal{O}$ does a.s.\ define an order on any finite subset of $\mathcal{Q}$. \operatornameeratorname{e}nd{proofof}\\ In the coming subsections we will prove Proposition \operatorname{Re}f{prop:convbranchingsleorder}. Since tightness of all the random variables in question is immediate (either by definition or from our previous work) it suffices to characterize any limiting law. We begin in Section \operatorname{Re}f{sec:twopoints} by showing this {for the order of two points}; see just below for an outline of the strategy. Then, we will prove that the \operatornameeratorname{e}mph{time} at which they are separated by the $\SLE_{\kappa'}({\kappa'}-6)$ converges (for the $-\log \mathbb{C}R$ parameterization with respect to either of the points). This is important for characterizing joint limits, when there are three or more points being considered. It also turns out to be non-trivial, due to pathological behavior that cannot be ruled out when one only knows convergence of the SLE branches in the spaces $\mathcal{D}_z$. We conclude the proof in a third subsection, and finally combine this with the results of Section \operatorname{Re}f{sec:conv_clesle} to summarize the ``Euclidean'' part of this article in Proposition \operatorname{Re}f{prop:cle-conv}. \subsection{Convergence of order for two points}\label{sec:twopoints} In this section we show that for two distinct points $z,w\in \mathbb{D}$, the law of the order in which they are visited by the space-filling SLE$_{\kappa'}$ $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$, converges to the result of a fair coin toss as ${\kappa'}\downarrow 4$. That is, $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$ converges to a Bernoulli$(1/2)$ random variable as $\operatornameeratorname{e}ps\downarrow 0$. The rough outline of the proof is as follows Recall that $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ is determined by an $\SLE_{\kappa'}({\kappa'}-6)$ branching tree, in which $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ denotes the SLE$_{\kappa'}({\kappa'}-6)$ branch towards $z$ (parameterized according to minus log conformal radius as seen from $z$). If we consider the time $\sigma^\operatornameeratorname{e}ps_{z,w}$ at which $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ separates $z$ and $w$, then for every $\operatornameeratorname{e}ps>0$, $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$ is actually measurable with respect to $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z([0,\sigma^\operatornameeratorname{e}ps_{z,w}])$. So what we are trying to show is that this measurability turns to independence in the $\operatornameeratorname{e}ps\downarrow 0$ limit. This means that we will not get very far if we consider the conditional law of $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$ given $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z([0,\sigma^\operatornameeratorname{e}ps_{z,w}])$, so instead we have to look at times just before $\sigma_{z,w}^\operatornameeratorname{e}ps$. Namely, we will consider the times $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps$ that $w$ is sent first sent to within distance $\delta$ of the boundary by the Loewner maps associated with $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$. We will show that for any \operatornameeratorname{e}mph{fixed} $\delta\in (0,1)$, the conditional probability that $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps=1$, given $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps([0,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps])$, converges to $1/2$ as $\operatornameeratorname{e}ps\to 0$. Knowing this for every $\delta$ allows us to reach the desired conclusion. To show that these conditional probabilities do tend to $1/2$ for fixed $\delta$, we apply the Markov property at time $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps$. This tells us that after mapping $(\mathbf{D}^\operatornameeratorname{e}ps_z)_{\sigma_{z,w,\delta}^\operatornameeratorname{e}ps}$ to the unit disc, the remainder of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ evolves as a radial $\SLE_{\kappa'}({\kappa'}-6)$ with a force point somewhere on the unit circle. And we know the law of this curve: initially it evolves as a chordal $\SLE_{\kappa'}$ targeted at the force point, and after the force point is swallowed, it evolves as a radial $\SLE_{\kappa'}({\kappa'}-6)$ in the to be discovered domain with force point starting adjacent to the tip. So we need to show that for such a process, the behavior is ``symmetric'' in an appropriate sense. In fact, we have to deal with two scenarios, according to whether the images of $z$ and $w$ are separated or not when the force point is swallowed. If they are separated, our argument becomes a symmetry argument for chordal $\SLE_{\kappa'}$. If they are not, our argument becomes a symmetry argument for space-filling $\SLE_{\kappa'}$. For a more detailed outline of the strategy, and the bulk of the proof, see Lemma \operatorname{Re}f{lem:1/2}. At this point, let us just record the required symmetry property of space-filling $\SLE_{\kappa'}$ in the following lemma. \begin{lemma}\label{lem:pre-1/2} Let $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ be a space-filling $\SLE_{{\kappa'}(\operatornameeratorname{e}ps)}$ in $\mathbb{D}$, as above. Then for any $x\in \mathbb{D}$: $$\mathbb{P}(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0 \text{ before } x) \to \frac{1}{2} \text{ as } \operatornameeratorname{e}ps\to 0.$$ \operatornameeratorname{e}nd{lemma} \begin{proof} For this we use a conformal invariance argument. Namely, we notice that by conformal invariance of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$, applying the map $$z\mapsto \frac{1-\bar{x}}{1-x}\frac{z-x}{1-\bar{x}z}$$ from $\mathbb{D}$ to $\mathbb{D}$ that sends $1$ to $1$ and $x$ to $0$, we have $$ \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0 \text{ before } x] = \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } \hat{x} \text{ before } 0]= 1- \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0\text{ before } \hat{x} ]$$ where $\hat{x}=-x(1-\bar{x})(1-x)^{-1}$ is the image of $0$ under the conformal map, and $|\hat{x}|=|x|$. Hence it suffices to show that $$ \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0 \text{ before } x] - \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0 \text{ before } \hat{x}]\to 0$$ as $\operatornameeratorname{e}ps\to 0$. By rotational invariance, if we write $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{\theta}$ for a space-filling SLE$_{\kappa'}$ starting at $\operatornameeratorname{e}^{i\theta}$, then it is enough to show that $$ \mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{\theta} \text{ hits } 0 \text{ before } |x|]-\mathbb{P}[\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{0} \text{ hits } 0 \text{ before } |x|]\to 0$$ as $\operatornameeratorname{e}ps\to 0$, for any $\theta\in[0,2\pi]$. However, this is easily justified, because we can couple an $\SLE_{\kappappa'}({\kappa'}-6)$ from $1$ to $0$ and another from $\operatornameeratorname{e}^{i\theta}$ to $0$, so that they successfully couple (i.e., coincide for all later times) before $0$ is separated from $|x|$ with arbitrarily high probability (uniformly in $\theta$) as ${\kappa'}\downarrow 4$. This follows from Lemma \operatorname{Re}f{lem:uniform_equation}, target invariance of the SLE$_{\kappa'}({\kappa'}-6)$ and \operatornameeratorname{e}qref{eq:tente}; i.e., because in an arbitrarily small amount of time as ${\kappa'}\downarrow 4$, the $\SLE_{\kappappa'}({\kappa'}-6)$ will have swallowed every point on $\partial \mathbb{D}$. \operatornameeratorname{e}nd{proof}\\ Now we proceed with the set-up for the main result of this section (Proposition \operatorname{Re}f{prop:order_simple} below). Recall that ${\mathbf{D}}_z\in {\mathcal{D}}$ is the sequence of domains formed by the branch of the uniform CLE$_4$ exploration towards $z$ in $\mathbb{D}$. For $w\ne z$, we write $\sigma_{z,w}$ for the first time that ${\mathbf{D}}_z$ separates $z$ from $w$ and let $\mathcal{O}_{z,w}$ be a Bernoulli random variable (taking values $\{0,1\}$ each with probability $1/2$) that is independent of $\{({\mathbf{D}}_z)_t \, ; \, t\in [0,\sigma_{z,w}]\}$. We define elements $${\mathrm{D}}^\operatornameeratorname{e}ps_{z,w}=\{ ({\mathbf{D}}^\operatornameeratorname{e}ps_{z})_{t\wedge \sigma_{z,w}^\operatornameeratorname{e}ps}\, ; \, t\ge 0\} \text{ and } {\mathrm{D}}_{z,w}=\{ ({\mathbf{D}}_{z})_{t\wedge \sigma_{z,w}}\, ; \, t\ge 0\}$$ of $\mathcal{D}$. These are, respectively, the domain sequences formed by the $\SLE_{\kappa'}({\kappa'}-6)$ and the uniform $\mathbb{C}LE_4$ exploration branches towards $z$, stopped when $z$ and $w$ become separated. By definition, they are parameterized such that $-\log \mathbb{C}R(0;({\mathrm{D}}_{z,w}^\operatornameeratorname{e}ps)_{t})=t\wedge \sigma_{z,w}^\operatornameeratorname{e}ps$ for all $t$. \begin{proposition} \label{prop:order_simple} Fix $z\ne w\in \mathcal{Q}$. Then if $({\mathbf{D}},\mathcal{O})$ is a subsequential limit in law of $({\mathbf{D}}^\operatornameeratorname{e}ps_z,\mathcal{O}^\operatornameeratorname{e}ps_{z,w})$ (with respect to the product $\mathcal{D}_z$ $ \times$ discrete topology), $({\mathbf{D}},\mathcal{O})$ must satisfy the following property. If ${{\mathrm{D}}}$ is equal to ${\mathbf{D}}$ stopped at the first time that $w$ is separated from $z$, then $$({{\mathrm{D}}},\mathcal{O}) \overset{(law)}{=}({\mathrm{D}}_{z,w},\mathcal{O}_{z,w}).$$ \operatornameeratorname{e}nd{proposition} \noindent Note that this \operatornameeratorname{e}mph{does not} yet imply that the times at which $z$ and $w$ are separated converge.\\ To set up for the proof of this proposition, we define for $\operatornameeratorname{e}ps,\delta>0$, ${\sigma}^\operatornameeratorname{e}ps_{z,w,\delta}$ to be the first time $t$ that, under the conformal map $g_t[{\mathrm{D}}_z^\operatornameeratorname{e}ps]$, the image of $w$ is at distance $\delta$ from $\partial \mathbb{D}$. See Figure \operatorname{Re}f{fig:almost_sep} for an illustration. Define $\sigma_{z,w,\delta}$ in the same way for $\operatornameeratorname{e}ps=0$. Write ${\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps$ and ${\mathrm{D}}_{z,w,\delta}$ for the same things as ${\mathrm{D}}_{z,w}^\operatornameeratorname{e}ps$ and ${\mathrm{D}}_{z,w}$, but with the time now cut off at $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps$ and $\sigma_{z,w,\delta}$ respectively. \begin{lemma}\label{lem:Dzw} \begin{enumerate}[(a)] \item $({\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps) \mathbb{R}ightarrow ({\mathrm{D}}_{z,w,\delta},\sigma_{z,w,\delta})$ as $\operatornameeratorname{e}ps\to 0$ for every fixed $\delta>0$. \item $({\mathrm{D}}_{z,w,\delta},\sigma_{z,w,\delta})\mathbb{R}ightarrow ({\mathrm{D}}_{z,w},\sigma_{z,w})$ as $\delta\to 0$ \operatornameeratorname{e}nd{enumerate} \operatornameeratorname{e}nd{lemma} \begin{proof} For (a) we use that ${\mathbf{D}}^\operatornameeratorname{e}ps_{z}\mathbb{R}ightarrow {\mathbf{D}}_z$ in $\mathcal{D}_z$. Taking a coupling $({\mathbf{D}}_z,({\mathbf{D}}^{\operatornameeratorname{e}ps}_z)_{\operatornameeratorname{e}ps>0})$ such that this convergence is almost sure, it is clear from the definition of convergence in $\mathcal{D}_z$ that, under this coupling, $({\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps)\to ({\mathrm{D}}_{z,w,\delta},\sigma_{z,w,\delta})$ almost surely for every $\delta>0$. Statement (b) holds because $\sigma_{z,w,\delta}\to \sigma_{z,w}$ a.s.\ as $\delta\to 0$. Indeed, $\sigma_{z,w,\delta}$ is almost surely increasing in $\delta$ and bounded above by $\sigma_{z,w}$ so must have a limit $\sigma^*\le \sigma_{z,w}$ as $\delta\to 0$. On the other hand, $w$ cannot be mapped anywhere at positive distance from the boundary under $g_{\sigma^*}[{\mathbf{D}}_z]$, so it must be that $\sigma^*\ge \sigma_{z,w}$. \operatornameeratorname{e}nd{proof}\\ Thus we can reduce the proof of Proposition \operatorname{Re}f{prop:order_simple} to the following lemma. \begin{lemma}\label{lem:cond_exp_ssl} For any continuous bounded function $F$ with respect to $\mathcal{D}_z$, and any fixed $\delta>0$, we have that \[ \mathbb{E}[\mathcal{O}^\operatornameeratorname{e}ps_{z,w} F({\mathrm{D}}^\operatornameeratorname{e}ps_{z,w,\delta})] \to \frac{1}{2} \mathbb{E}[F({\mathrm{D}}_{z,w,\delta})]\] as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{lemma} \begin{proofof}{Proposition \operatorname{Re}f{prop:order_simple} given Lemma \operatorname{Re}f{lem:cond_exp_ssl}} Consider a subsequential limit as in Proposition \operatorname{Re}f{prop:order_simple}. Write $\widetilde{{\mathrm{D}}}_\delta$ for ${\mathbf{D}}$ stopped at the first time that $w$ is sent within distance $\delta$ of $\partial \mathbb{D}$ under the Loewner flow. Then it is clear (by taking a coupling where the convergence holds a.s.) that $(\widetilde{{\mathrm{D}}}_\delta, \mathcal{O})$ is equal to the limit in law of $({\mathrm{D}}^\operatornameeratorname{e}ps_{z,w,\delta}, \mathcal{O}_{z,w}^\operatornameeratorname{e}ps)$ as $\operatornameeratorname{e}ps\to 0$ along the subsequence. On the other hand, Lemma \operatorname{Re}f{lem:cond_exp_ssl} implies that the law of such a limit is that of ${\mathrm{D}}_{z,w,\delta}$ together with an independent Bernoulli random variable. Indeed, any continuous bounded function with respect to the product topology on on $\mathcal{D}_z \times \{0,1\}$ is of the form $({\mathrm{D}},x)\to \mathbbm{1}_{\{x=1\}} F({\mathrm{D}})+\mathbbm{1}_{\{x=0\}} G({\mathrm{D}})$ for $F,G$ bounded and continuous with respect to $\mathcal{D}_z$. Moreover, $\mathbbm{1}_{\{x=0\}}G=G-\mathbbm{1}_{\{x=1\}}G$ and we already know that $\mathbb{E}[G({\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps)] \to \mathbb{E}[G({\mathrm{D}}_{z,w,\delta})]$ as $\operatornameeratorname{e}ps \to 0$. So $(\tilde{\mathrm{D}}_\delta,\mathcal{O})$ has the law of $\mathbf{D}_{z,w,\delta}$ plus an independent Bernoulli random variable for each $\delta>0$. Combining with (b) of Lemma \operatorname{Re}f{lem:Dzw} yields the proposition. \operatornameeratorname{e}nd{proofof} \\ The proof of Lemma \operatorname{Re}f{lem:cond_exp_ssl} will take up the remainder of this subsection. An important ingredient is the following result of \cite{KS17}, about the convergence of $\SLE_{\kappa'}$ to $\SLE_4$ as ${\kappa'}\downarrow 4$. \begin{theorem}[Theorem 1.10 of \cite{KS17}]\label{KS} Chordal $\SLE_{\kappappa'}$ between two boundary points in the disk converges in law to chordal $\SLE_4$ as ${\kappa'}\downarrow 4$. This is with respect to supremum norm on curves viewed up to time reparameterization. \operatornameeratorname{e}nd{theorem} \begin{proofof}{Lemma \operatorname{Re}f{lem:cond_exp_ssl}} Since $F$ is bounded, subsequential limits of $\mathbb{E}[\mathcal{O}^\operatornameeratorname{e}ps_{z,w} F({\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps)]$ always exist. Therefore, we only need to show that such a limit must be equal to $(1/2) \mathbb{E}[F({\mathrm{D}}_{z,w,\delta})]$. \begin{figure} \centering \includegraphics[width=\textwidth]{almost_sep} \caption{\operatornameeratorname{e}mph{The SLE$_{\kappa'}({\kappa'}-6)$ branch $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$, run up to time $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps$. This is the first time that under the Loewner map, $w$ is sent within distance $\delta$ of the boundary. The future of the curve has image $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ under this map, and is an SLE$_{\kappa'}({\kappa'}-6)$ starting from $x_1=\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps(\sigma_{z,w,\delta}^\operatornameeratorname{e}ps)$ with a force point at $x_2\in \partial \mathbb{D}$. $z$ is visited before $w$ by the original space-filling $\SLE_{\kappa'}$ iff when $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ separates $0$ and $w'$ (the image of $w$), the component containing $0$ is ``monocolored''.}} \label{fig:almost_sep} \operatornameeratorname{e}nd{figure} For this, we apply the map $g_{\sigma^\operatornameeratorname{e}ps_{z,w,\delta}}[{\mathbf{D}}_z^\operatornameeratorname{e}ps]$: recall that this is the unique conformal map from $({\mathbf{D}}_z^\operatornameeratorname{e}ps)_{\sigma_{z,w,\delta}^\operatornameeratorname{e}ps}$ to $\mathbb{D}$ that sends $z$ to $0$ and has positive real derivative at $z$, see Figure \operatorname{Re}f{fig:almost_sep}. We then use the Markov property of $\SLE_{\kappappa'}({\kappa'}-6)$. This tells us that conditionally on ${\mathrm{D}}^\operatornameeratorname{e}ps_{z,w,\delta}$, the image of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ under this map is that of an $\SLE_{\kappappa'}({\kappa'}-6)$ started at some $x_1\in \partial \mathbb{D}$ with a force point at $x_2\in \partial \mathbb{D}$ (where $x_1,x_2$ are measurable with respect to ${\mathrm{D}}^\operatornameeratorname{e}ps_{z,w,\delta}$). Let us call this curve $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$. Let $w'$ be the image of $w$ under $g_{\sigma^\operatornameeratorname{e}ps_{z,w,\delta}}[{\mathbf{D}}_z^\operatornameeratorname{e}ps]$, which is also measurable with respect to $D^\operatornameeratorname{e}ps_{z,w,\delta}$ and has $|w'|=1-\delta$ a.s.\ Then the conditional expectation of $\mathcal{O}^\operatornameeratorname{e}ps_{z,w}$ given ${\mathrm{D}}_{z,w,\delta}^\operatornameeratorname{e}ps$ can be written as a probability for $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$. Namely, it is just the probability that when $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ first separates $w'$ and $0$, the component containing $0$ either has boundary made up of entirely of the left hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ and the clockwise arc from $x_1$ to $x_2$, or the right hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ and the complementary counterclockwise arc. We denote this event for $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ by $\mathcal{A}^\operatornameeratorname{e}ps$. Therefore, by dominated convergence, Lemma \operatorname{Re}f{lem:cond_exp_ssl} follows from Lemma \operatorname{Re}f{lem:1/2} stated and proved below. \operatornameeratorname{e}nd{proofof} \begin{lemma}\label{lem:1/2} Let $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ be an $\SLE_{\kappappa'}({\kappa'}-6)$ started at some $x_1\in \partial \mathbb{D}$ with a force point at $x_2\in \partial \mathbb{D}$. Fix $w'\in\mathbb{D}$. Let $\mathcal{A}^\operatornameeratorname{e}ps$ be the event that when $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ first separates $w'$ and $0$, the component containing $0$ either has boundary made up of entirely of the left hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ and the clockwise arc from $x_1$ to $x_2$, or the right hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ and the complementary counterclockwise arc. \begin{equation} \label{eq:1/2} \mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps) \to \frac{1}{2} \text{ as } \operatornameeratorname{e}ps\to 0 \; \textrm{(equivalently as ${\kappa'}\downarrow 4$)}.\operatornameeratorname{e}nd{equation} \operatornameeratorname{e}nd{lemma} Another way to describe the event $\mathcal{A}^\operatornameeratorname{e}ps$ is the following. If the clockwise boundary arc from $x_1$ to $x_2$ together with the left hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ is colored red, and the counterclockwise boundary arc together with the right hand side of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ is colored blue (as in Figures \operatorname{Re}f{fig:almost_sep} and \operatorname{Re}f{fig:almost_sep_2}) then $\mathcal{A}^\operatornameeratorname{e}ps$ is the event that when $0$ and $w'$ are separated, the component containing $0$ is ``monocolored''. \\ \operatornameeratorname{e}mph{Outline for the proof of Lemma \operatorname{Re}f{lem:1/2}.} Note that until the first time that $0$ is separated from $x_2$, $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ has the law (up to time reparameterization) of a chordal $\SLE_{\kappappa'}$ from $x_1$ to $x_2$ in $\mathbb{D}$: see Lemma \operatorname{Re}f{lem:radial_chordal}. Importantly, we know by Theorem \operatorname{Re}f{KS} that this converges to chordal $\SLE_4$ as ${\kappa'} \downarrow 4$. This is the main ingredient going into the proof, for which the heuristic is as follows. If $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ is very close to a chordal SLE$_4$, then after some small initial time it should not hit the boundary of $\mathbb{D}$ again until getting very close to $x_2$. At this point either $w'$ and $0$ will be on the ``same side of the curve" (scenario on the right of Figure \operatorname{Re}f{fig:almost_sep_2}) or they will be on ``different sides'' (scenario on the left of Figure \operatorname{Re}f{fig:almost_sep_2}). \begin{itemize} \item In the latter case (left of Figure \operatorname{Re}f{fig:almost_sep_2}), note that $\widetilde{\operatornameeratorname{e}ta}$ is very unlikely to return anywhere near to $0$ or $w'$ before swallowing the force point at $x_2$. Hence, whether or not $\mathcal{A}^\operatornameeratorname{e}ps$ occurs depends only on whether the curve goes on to hit the boundary ``just to the left'' of $x_2$, or ``just to the right''. Indeed, hitting on one side will correspond to $0$ being in a monocolored red bubble when it is separated from $w'$, meaning that $\mathcal{A}^\operatornameeratorname{e}ps$ will occur, while hitting on the other side will correspond to $w'$ being in a monocolored blue bubble, and it will not. By the Markov property and symmetry, we will argue that each of these happen with (conditional) probability close to $1/2$. \item In the former case (right of Figure \operatorname{Re}f{fig:almost_sep_2}), $\widetilde{\operatornameeratorname{e}ta}$ will go on to swallow the force point $x_2$ before separating $0$ and $w'$, with high probability as ${\kappa'}\downarrow 4$. Once this has occurred, $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ will continue to evolve in the cut-off component containing $0$ and $w'$, as an $\SLE_{\kappa'}({\kappa'}-6)$ with force point initially adjacent to the tip. But then by mapping to the unit disk again, the conditional probability of $\mathcal{A}^\operatornameeratorname{e}ps$ becomes the probability that a space-filling $\SLE_{\kappa'}$ visits one particular point before another. This converges to $1/2$ as ${\kappa'}\downarrow 4$ by Lemma \operatorname{Re}f{lem:pre-1/2}. \operatornameeratorname{e}nd{itemize} \begin{figure} \includegraphics[width=\textwidth]{almost_sep_2} \caption{\operatornameeratorname{e}mph{{Illustration of Lemma~\operatorname{Re}f{lem:1/2}.} The two scenarios that can occur when the force point $x_2$ is swallowed by $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$. On the left, $0$ and $w'$ are on opposite sides of the curve (there is also an analogous scenario when $0$ is on the ``blue side'' and $w'$ is on the ``red side''). If this happens, we are interested whether $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ hits the blue or the red part of $\partial \mathbb{D}$ first. On the right, they are on the same side of the curve and we are interested in what happens after $x_2$ is swallowed.}} \label{fig:almost_sep_2} \centering \operatornameeratorname{e}nd{figure} \begin{proofof}{Lemma \operatorname{Re}f{lem:1/2}} Let us now proceed with the details. For $u>0$ small, let $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps_u$ be $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ run until the first entry time $T^\operatornameeratorname{e}ps_u$ of $\mathbb{D} \cap B_{x_2}(u)$. By Theorem \operatorname{Re}f{KS}, the probability that $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ separates $0$ or $w'$ from $x_2$ before time $T^\operatornameeratorname{e}ps_u$ tends to $0$ as $\operatornameeratorname{e}ps\to 0$ for any fixed $u<|x_2-x_1|$. We write $E_{u,\text{b}}^\operatornameeratorname{e}ps$ for this event. We also {fix} a $u'>0$, chosen such that $x_1,0$ and $w'$ are contained in the closure of $\mathbb{D} \setminus B_{x_2}(u')$. Again from the convergence to SLE$_4$ we can deduce that \begin{equation}\label{eq:star} \mathbb{P}\left(\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps \text{ revisits } \mathbb{D} \setminus B_{x_2}(u') \text{ after time }T^\operatornameeratorname{e}ps_u \right)\to 0 \text{ as } u\to 0, \text{ uniformly in } \operatornameeratorname{e}ps . \operatornameeratorname{e}nd{equation} The point of this is that $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ cannot ``change between the configurations in Figure \operatorname{Re}f{fig:almost_sep_2}'' without going back into $\mathbb{D}\setminus B_{x_2}(u')$. Write: \begin{itemize} \itemsep0em \item $E_{u,\text{l}}^\operatornameeratorname{e}ps$ for the intersection of $(E_{u,\text{b}}^\operatornameeratorname{e}ps)^c$ and the event that \corr{$\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps_u\cup \overline{B_{x_2}(u)}$} separates $0$ and $w'$ in $\mathbb{D}$, with $0$ on the left of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps_u$;\item $E_{u,\text{r}}^\operatornameeratorname{e}ps$ for the same thing but with left replaced by right; \item $E_{u,\text{s}}^\operatornameeratorname{e}ps$ for the intersection of $(E_{u,\text{b}}^\operatornameeratorname{e}ps)^c$ and the event that \corr{$\widetilde{\operatornameeratorname{e}ta}_u^\operatornameeratorname{e}ps\cup \overline{B_{x_2}(u)}$} does not separate $0$ and $w'$ in $\mathbb{D}$. \operatornameeratorname{e}nd{itemize} Then we can decompose \begin{align*} \mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps) & = & \mathbb{E}[\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \, E_{u,\text{b}}^\operatornameeratorname{e}ps )\mathbbm{1}_{E_{u,\text{b}}^\operatornameeratorname{e}ps}+\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \, E_{u,\text{l}}^\operatornameeratorname{e}ps )\mathbbm{1}_{E_{u,\text{l}}^\operatornameeratorname{e}ps}+\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \,E_{u,\text{r}}^\operatornameeratorname{e}ps )\mathbbm{1}_{E_{u,\text{r}}^\operatornameeratorname{e}ps}+\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \, E_{u,\text{s}}^\operatornameeratorname{e}ps)\mathbbm{1}_{E_{u,\text{s}}^\operatornameeratorname{e}ps}] \nonumber \\ & = & \underset{\textcircled{1}}{\mathbb{E}[{\mathcal{A}}^\operatornameeratorname{e}ps \mathbbm{1}_{E_{u,\text{b}}^\operatornameeratorname{e}ps}]} + \underset{\textcircled{2}}{\mathbb{E}[\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \, E_{u,\text{l}}^\operatornameeratorname{e}ps )\mathbbm{1}_{E_{u,\text{l}}^\operatornameeratorname{e}ps}]} + \underset{\textcircled{3}}{\mathbb{E}[\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \,E_{u,\text{r}}^\operatornameeratorname{e}ps )\mathbbm{1}_{E_{u,\text{r}}^\operatornameeratorname{e}ps}]} +\underset{\textcircled{4}}{\mathbb{E}[\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps \, | \, E_{u,\text{s}}^\operatornameeratorname{e}ps)\mathbbm{1}_{E_{u,\text{s}}^\operatornameeratorname{e}ps}]} \operatornameeratorname{e}nd{align*} By the observations of the previous paragraph, $\mathbb{P}(E_{u,\text{b}}^\operatornameeratorname{e}ps)\to 0$ as $\operatornameeratorname{e}ps\to 0$ for any fixed $u$, and therefore also \begin{equation} \label{1to 0}\textcircled{1} \to 0 \text{ as } \operatornameeratorname{e}ps\to 0 \text{ for any fixed } u. \operatornameeratorname{e}nd{equation} Let us now describe what is going on with the terms $\textcircled{2},\textcircled{3}$ and $\textcircled{4}$. The term $\textcircled{2}$ corresponds to the left scenario of Figure \operatorname{Re}f{fig:almost_sep_2}, and the term $\textcircled{3}$ corresponds to the same scenario, but when $0$ and $w'$ lie on opposite sides of the curve to those illustrated in the figure. We will show that: \begin{equation}\label{left_scenario} \lim_{u\to 0} \lim_{\operatornameeratorname{e}ps\to 0} \, (\textcircled{2} + \textcircled{3}) = \frac{1}{2} \mathbb{P}(\SLE_4 \text{ from } x_1 \text{ to } x_2 \text{ in } \mathbb{D} \text{ separates } w' \text{ and } 0)=: \frac{p}{2} \operatornameeratorname{e}nd{equation} The term $\textcircled{4}$ corresponds to the scenario on the right of Figure \operatorname{Re}f{fig:almost_sep_2}. We will show that: \begin{equation}\label{right_scenario} \lim_{u\to 0}\lim_{\operatornameeratorname{e}ps \to 0} \, \textcircled{4} = \frac{1}{2}(1-p)=\frac{1}{2}\mathbb{P}(\SLE_4 \text{ from } x_1 \text{ to } x_2 \text{ in } \mathbb{D} \text{ does not separate } w' \text{ and } 0).\operatornameeratorname{e}nd{equation} Combining \operatornameeratorname{e}qref{left_scenario}, \operatornameeratorname{e}qref{right_scenario}, \operatornameeratorname{e}qref{1to 0} and the decomposition $\mathbb{P}(\mathcal{A}^\operatornameeratorname{e}ps)=\textcircled{1}+\textcircled{2}+\textcircled{3}+\textcircled{4}$ gives \operatornameeratorname{e}qref{eq:1/2}, and thus completes the proof. So all that remains is to show \operatornameeratorname{e}qref{left_scenario} and \operatornameeratorname{e}qref{right_scenario}. \operatornameeratorname{e}mph{Proof of \operatornameeratorname{e}qref{left_scenario}.} First, by \operatornameeratorname{e}qref{eq:star}, we can pick $u$ small enough such that the differences \begin{align*} & \left(\textcircled{2} - \mathbb{E}[\mathbb{P}(\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps\corr{|_{[T_u^\operatornameeratorname{e}ps,\infty)}} \text{ hits the clockwise arc between } x_1 \text{ and } x_2 \text{ first }\, | \, E_{u,\text{l}}^\operatornameeratorname{e}ps )\, \mathbbm{1}_{E_{u,\text{l}}^\operatornameeratorname{e}ps}] \right) \text{ and } \\ & \left(\textcircled{3} - \mathbb{E}[\mathbb{P}(\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps\corr{|_{[T_u^\operatornameeratorname{e}ps,\infty)}} \text{ hits the counterclockwise arc between } x_1 \text{ and } x_2 \text{ first } \, | \, E_{u,\text{r}}^\operatornameeratorname{e}ps )\,\mathbbm{1}_{E_{u,\text{r}}^\operatornameeratorname{e}ps}]\right) \operatornameeratorname{e}nd{align*} are arbitrarily small, uniformly in $\operatornameeratorname{e}ps$. All we are doing here is using the fact that if $u$ is small enough, $\widetilde\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ will not return anywhere close to $0$ or $w'$ after time $T_u^\operatornameeratorname{e}ps$. This allows us to reduce the problem to estimating conditional probabilities for chordal $\SLE_{\kappa'}$. To estimate these probabilities (the conditional probabilities in the displayed equations above) we can use Theorem \operatorname{Re}f{KS}, \corr{plus symmetry. In particular, Theorem \operatorname{Re}f{KS} implies that for a chordal SLE$_{\kappa'}$ curve on $\mathbb{H}$ from $0$ to $\infty$, the probability that it hits $[R,\infty)$ before $(-\infty,-L]$ for any fixed $L,R\in (0,\infty)$ can be made arbitrary close to the probability that it hits $[\max(L,R),\infty)$ before $(-\infty,-\max(L,R)]$ as ${\kappa'}\downarrow 4$. This is because SLE$_4$ does not hit the boundary apart from at the end points and the convergence is in the uniform topology. Since the probability that chordal SLE$_{\kappa'}$ in $\mathbb{H}$ from $0$ to $\infty$ hits $[\max(L,R),\infty)$ before $(-\infty,-\max(L,R)]$ is $1/2$ for every ${\kappa'}$ by symmetry, we see that the probability of hitting $[R,\infty)$ before $(-\infty,-L]$ converges to $1/2$ as ${\kappa'} \downarrow 4$.} We use this to observe, by conformally mapping to $\mathbb{H}$ that $$\mathbb{P}\left(\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps\corr{|_{[T_u^\operatornameeratorname{e}ps,\infty)}} \text{ hits the clockwise arc between } x_1 \text{ and } x_2 \text{ first } \, | \, \widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps([0,T_u^\operatornameeratorname{e}ps])\right)\to \frac{1}{2} $$ almost surely as $ \operatornameeratorname{e}ps\to 0$. Using this along with dominated convergence, we obtain \operatornameeratorname{e}qref{left_scenario}. \operatornameeratorname{e}mph{Proof of \operatornameeratorname{e}qref{right_scenario}.} Write $E^\operatornameeratorname{e}ps$ for the event that $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ swallows the force point $x_2$ before separating $0$ and $w'$. Then we can rewrite \textcircled{4} as \begin{equation} \mathbb{E}[\mathcal{A}^\operatornameeratorname{e}ps (\mathbbm{1}_{E^\operatornameeratorname{e}ps_{u,s}}-\mathbbm{1}_{E^\operatornameeratorname{e}ps})]+\mathbb{E}[\mathcal{A}^\operatornameeratorname{e}ps \mathbbm{1}_{E^\operatornameeratorname{e}ps}]. \operatornameeratorname{e}nd{equation} Applying \operatornameeratorname{e}qref{eq:star} shows that the first term tends to $0$ as $u\to 0$, uniformly in $\operatornameeratorname{e}ps$. Let us now show that the second tends to $(1/2)(1-p)$ as $\operatornameeratorname{e}ps\to 0$. To do this, we condition on $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ run up to the time $T^\operatornameeratorname{e}ps_0$ that the force point $x_2$ is swallowed. Conditioned on this initial segment we can use the Markov property of $\SLE_{\kappa'}({\kappa'}-6)$ to describe the future evolution of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$. Indeed, it is simply that of a radial $\SLE_{\kappappa'}({\kappa'}-6)$ started from $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps(T_0^\operatornameeratorname{e}ps)\in \partial \mathbb{D}$ and targeted towards $0$, with force point located infinitesimally close to the starting point. Viewing the evolution of $\widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps$ after time $T_0^\operatornameeratorname{e}ps$ as one branch of a space-filling $\SLE_{\kappappa'}$ we then have \begin{equation*}\mathbb{E}[\mathcal{A}^\operatornameeratorname{e}ps \mathbbm{1}_{E^\operatornameeratorname{e}ps}] = \mathbb{E}[\mathbb{P}(\text{space-filling SLE}_{\kappa'} \text{ started from } \widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps(T_0^\operatornameeratorname{e}ps) \text{ hits } 0 \text{ before } w') \mathbbm{1}_{E^\operatornameeratorname{e}ps}] \operatornameeratorname{e}nd{equation*} which we further decompose as \[ \frac{1}{2} \mathbb{P}(E^\operatornameeratorname{e}ps) + \mathbb{E}\left[\left(\mathbb{P}(\text{space-filling SLE}_{\kappa'} \text{ started from } \widetilde{\operatornameeratorname{e}ta}^\operatornameeratorname{e}ps(T_0^\operatornameeratorname{e}ps) \text{ hits } 0 \text{ before } w')-1/2\right) \mathbbm{1}_{E_\operatornameeratorname{e}ps}\right]. \] Since the first term above tends to $(1/2)(1-p)$ as $\operatornameeratorname{e}ps\to 0$, it again suffices by dominated convergence (and by applying a rotation) to show that for any $x\in \mathbb{D}$: $$\mathbb{P}(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps \text{ hits } 0 \text{ before } x) \to \frac{1}{2} \text{ as } \operatornameeratorname{e}ps\to 0.$$ This is precisely the statement of Lemma \operatorname{Re}f{lem:pre-1/2}. Thus we conclude the proof of \operatornameeratorname{e}qref{right_scenario}, and therefore Lemma \operatorname{Re}f{lem:1/2}. \operatornameeratorname{e}nd{proofof} \subsection{Convergence of separation times} We now want to prove that for $z\ne w$ the \operatornameeratorname{e}mph{actual separation times} $\sigma_{z,w}^\operatornameeratorname{e}ps$ converge to the separation time $\sigma_{z,w}$ in law (jointly with the exploration) as $\operatornameeratorname{e}ps\to 0$. The difficulty is as follows. Suppose we are on a probability space where $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ converges a.s.\ to $\operatornameeratorname{e}ta_z$. Then we can deduce (by Lemma \operatorname{Re}f{lem:Dzw}) that any limit of $\sigma_{z,w}^\operatornameeratorname{e}ps$ must be greater than or equal to $\sigma_{z,w}$. But it still could be the case that $z$ and $w$ are ``almost separated'' at some sequence of times that converge to $\sigma_{z,w}$ as $\operatornameeratorname{e}ps\downarrow 0$, but that the $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ then go on to do something else for a macroscopic amount of time before coming back to finally separate $z$ and $w$. Note that in this situation the $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ would be creating ``bottlenecks'' at the almost-separation times, so it would not contradict Proposition \operatorname{Re}f{prop:order_simple}). The main result of this subsection is the following. \begin{proposition}\label{prop:jointDsigma} For any $z\ne w \in \mathcal{Q}$ \begin{equation} \label{sep_time_conv}(\mathbf{D}_z^\operatornameeratorname{e}ps,\sigma_{z,w}^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (\mathbf{D}_z,\sigma_{z,w})\operatornameeratorname{e}nd{equation} as $\operatornameeratorname{e}ps\to 0$, with respect to Carath\'{e}odory convergence in $\mathcal{D}$ in the first co-ordinate, and convergence in $\mathbb{R}$ in the second. \operatornameeratorname{e}nd{proposition} \begin{remark}\label{rmk:sep_time_tight} It is easy to see that $\sigma_{z,w}^\operatornameeratorname{e}ps$ is tight in $\operatornameeratorname{e}ps$ for any fixed $z\ne w\in \mathbb{D}$. For example, this follows from Corollary \operatorname{Re}f{cor:cleloopconv}, which implies that minus the log conformal radius, seen from $z$, of the first $\mathbb{C}LE_{{\kappa'}}$ loop containing $z$ and not $w$, is tight. Since $\sigma_{z,w}^\operatornameeratorname{e}ps$ is bounded above by this minus log conformal radius, tightness of $\sigma_{z,w}^\operatornameeratorname{e}ps$ follows. \operatornameeratorname{e}nd{remark} There is one situation where convergence of the separation times is already easy to see from our work so far. Namely, when $z$ and $w$ are separated (in the limit) at a time when a CLE$_4$ loop has just been drawn. More precisely: \begin{lemma}\label{lem:septime_endofloop} Suppose that $\operatornameeratorname{e}ps_n\downarrow 0$ is such that $$(\mathbf{D}_{z}^{\operatornameeratorname{e}ps_n},\mathbf{D}_w^{\operatornameeratorname{e}ps_n}, \sigma_{z,w}^{\operatornameeratorname{e}ps_n}, \sigma_{w,z}^{\operatornameeratorname{e}ps_n},\mathcal{O}_{z,w}^{\operatornameeratorname{e}ps_n})\mathbb{R}ightarrow (\mathbf{D}_z, \mathbf{D}_w^*, \sigma_{z,w}^*,\sigma_{w,z}^*,\mathcal{O}^*) \text{ as } n\to \infty$$ (where at this point we know that $\mathbf{D}_z,\mathbf{D}_w^*$ have the same \operatornameeratorname{e}mph{marginal} laws as $\mathbf{D}_z,\mathbf{D}_w$, but not necessarily the same joint law). Then on the event that $\mathbf{D}_z$ separates $w$ from $z$ at a time $\sigma_{z,w}$ when a $\mathbb{C}LE_4$ loop $\mathcal{L}$ is completed, we have that almost surely: \begin{compactitem} \item $\sigma_{z,w}^*=\sigma_{z,w}$; \item $\mathbf{D}_w^*$ is equal to $\mathbf{D}_z$ (modulo time reparameterization), up to the time $\sigma_{w,z}$ that $z$ is separated from $w$; \item $\sigma_{w,z}^*=\sigma_{w,z}$; and \item conditionally on the above event occurring, $\mathcal{O}^*$ is independent of $\mathbf{D}_z,\mathbf{D}_w^*$ and has the law of a Bernoulli$(\frac{1}{2})$ random variable. \operatornameeratorname{e}nd{compactitem} \operatornameeratorname{e}nd{lemma} \begin{proof} Without loss of generality, by switching the roles of $z$ and $w$ if necessary and by the Markov property of the explorations, it suffices to consider the case that $\mathcal{L}=\mathcal{L}_z$ is the outermost $\mathbb{C}LE_4$ loop (generated by $\mathbf{D}_z$) containing $z$. By Skorokhod embedding together with Corollary \operatorname{Re}f{rmk:convfullbranch} and Proposition \operatorname{Re}f{prop:cleloopconv}, we may assume that we are working on a probability space where the convergence assumed in the lemma holds almost surely, jointly with the convergence $\mathcal{L}_z^{\operatornameeratorname{e}ps_n} \to \mathcal{L}_z$ (in the Hausdorff sense), $\mathcal{B}_z^{\operatornameeratorname{e}ps_n}=(\mathbf{D}_{z}^{\operatornameeratorname{e}ps_n})_{\tau_z^{\operatornameeratorname{e}ps_n}}\to \mathcal{B}_z=(\mathbf{D}_z)_{\tau_z}=\mathrm{int}(\mathcal{L}_z)$ (in the Carth\'{e}odory sense) and $(\tau_{0,z}^{\operatornameeratorname{e}ps_n},\tau_z^{\operatornameeratorname{e}ps_n}) \to (\tau_{0,z},\tau_z)$. (Recall the definitions of these times from Section \operatorname{Re}f{def:tauz}). We may also assume that the convergence $\sigma_{z,w,\delta}^{\operatornameeratorname{e}ps_n}\to \sigma_{z,w,\delta}$ holds almost surely as $n\to \infty$ for all rational $\delta>0$. Now we restrict to the event $E$ that $\mathbf{D}_z$ separates $z$ from $w$ at time $\tau_z$, so that in particular $w$ is at positive distance from $\mathcal{L}_z\cup (\mathbf{D}_z)_{\tau_z}=\overline{(\mathbf{D}_z)_{\tau_z}}$. The Hausdorff convergence $\mathcal{L}_z^{\operatornameeratorname{e}ps_n} \to \mathcal{L}_z$ thus implies that $w\in \mathbb{D}\setminus \mathcal{B}_z^{\operatornameeratorname{e}ps_n}$ for all $n$ large enough (i.e., $w$ is outside of the first $\mathbb{C}LE_{{\kappa'}(\operatornameeratorname{e}ps_n)}$ loop containing $z$), and therefore that $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}\le\tau^{\operatornameeratorname{e}ps_n}_z$ for all $n$ large enough (i.e., separation occurs no later than this loop closure time). Since $\sigma_{z,w}^*$ is defined to be the almost sure limit of $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}$ as $n\to \infty$, and we have assumed that $\tau_z^{\operatornameeratorname{e}ps_n}\to \tau_z$ almost surely, this implies that $\sigma_{z,w}^*\le \tau_z$ almost surely on the event $E$. On the other hand, we know that $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}\ge \sigma_{z,w,\delta}^{\operatornameeratorname{e}ps_n}$ and $\sigma_{z,w,\delta}^{\operatornameeratorname{e}ps_n}\to \sigma_{z,w,\delta}$ as $n\to \infty$ for all rational positive $\delta$, so that $\sigma_{z,w}^*\ge \sigma_{z,w,\delta}$ for all $\delta$ and therefore $\sigma_{z,w}^*\ge \lim_{\delta\to } \sigma_{z,w,\delta}=\sigma_{z,w}=\tau_z$ almost surely. Together this implies that $\sigma_{z,w}=\tau_z=\sigma_{z,w}^*$ on the event $E$. Next, observe that by the same argument as in the penultimate sentence above, we have $\sigma_{w,z}^*\ge \sigma_{w,z}$ with probability one. Moreover, we saw that on the event $E$, $w\in \mathbb{D}\setminus \mathcal{B}_z^{\operatornameeratorname{e}ps_n}$ for all $n$ large enough. But we also have that $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}\to \tau_z$, so that $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}>\tau_{0,z}^{\operatornameeratorname{e}ps_n}$ and therefore $w\in (\mathbf{D}^{\operatornameeratorname{e}ps_n}_{z,w})_{\tau_{0,z}^{\operatornameeratorname{e}ps_n}}\setminus \mathcal{B}_z^{\operatornameeratorname{e}ps_n}$ for all $n$ large enough. Hence, $$ \sigma_{w,z}^*=\lim_n \sigma_{w,z}^{\operatornameeratorname{e}ps_n} \le \lim_n -\log \mathbb{C}R(w,(\mathbf{D}^{\operatornameeratorname{e}ps_n}_{z,w})_{\tau_{0,z}^{\operatornameeratorname{e}ps_n}}\setminus \mathcal{B}_z^{\operatornameeratorname{e}ps_n})=-\log \mathbb{C}R(w,(\mathbf{D}_{z})_{\corr{\tau_{0,z}}}\setminus \mathcal{B}_z)=\sigma_{w,z}.$$ Combining the two inequalities above gives the third bullet point of the lemma, and since $\mathbf{D}_{w,z}^{\operatornameeratorname{e}ps_n}$ and $\mathbf{D}_{z,w}^{\operatornameeratorname{e}ps_n}$ agree up to time parameterization until $z$ and $w$ are separated for every $n$, we also obtain the second bullet point. For the final bullet point, if we write $\mathbf{D}_{z,w}$ for $\mathbf{D}_z$ stopped at time $\sigma_{z,w}$, we already know from the previous subsection that the law of $\mathcal{O}^*$ given $\mathbf{D}_{z,w}$ is fair Bernoulli. Moreover, since $\mathcal{O}^{\operatornameeratorname{e}ps_n}_{z,w}$ and $(g_{\sigma_{z,w}^{\operatornameeratorname{e}ps_n}}[\mathbf{D}_{z}^{\operatornameeratorname{e}ps_n}]((\mathbf{D}_z^{\operatornameeratorname{e}ps_n})_{s+\sigma_{z,w}^{\operatornameeratorname{e}ps_n}}) \, ; \, s\ge 0)$ are independent for every $n$, it follows that $\mathcal{O}^*$ is independent of $(g_{\sigma_{z,w}^*}[\mathbf{D}_{z}]((\mathbf{D}_z)_{s+\sigma_{z,w}^*})\, ; \,s\ge 0)$. So in general (i.e., without restricting to the event $E$) we can say that, given $(g_{\sigma_{z,w}^*}[\mathbf{D}_{z}]((\mathbf{D}_z)_{s+\sigma_{z,w}^*}) \, ; \, s\ge 0)$ and $((\mathbf{D}_z)_t \, ; \, t\le \sigma_{z,w})$, $\mathcal{O}^*$ has the conditional law of a Bernoulli$(1/2)$ random variable. Since the event $E$ (that $\sigma_{z,w}=\tau_z$) is measurable with respect to $((\mathbf{D}_z)_t \, ; \, t\le \sigma_{z,w})$, and we have already seen that $\sigma_{z,w}=\sigma_{z,w}^*$ on this event, we deduce the final statement of the lemma. \operatornameeratorname{e}nd{proof}\\ \begin{proofof}{Proposition \operatorname{Re}f{prop:jointDsigma}} By tightness (Remark \operatorname{Re}f{rmk:sep_time_tight}), and since we already know the convergence in law of $(\mathbf{D}_z^{\operatornameeratorname{e}ps}, (\sigma^\operatornameeratorname{e}ps_{z,w,\delta})_{\delta>0})$ to $(\mathbf{D}_z,(\sigma_{z,w,\delta})_{\delta>0})$ , it suffices to prove that any joint subsequential limit in law of $(\mathbf{D}_z,(\sigma_{z,w,\delta})_{\delta>0},\sigma^*_{z,w}) \text{ of } (\mathbf{D}_z^{\operatornameeratorname{e}ps}, (\sigma^\operatornameeratorname{e}ps_{z,w,\delta})_{\delta>0}, \sigma_{z,w}^\operatornameeratorname{e}ps)$ has $\sigma^*_{z,w}=\sigma_{z,w}$ almost surely. So let us assume that we have such a subsequential limit (along some sequence $\operatornameeratorname{e}ps_n\downarrow 0$) and that we are working on a probability space where the convergence holds almost surely. As remarked previously, since $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}\ge \sigma_{z,w,\delta}^{\operatornameeratorname{e}ps_n}$ for each $\delta>0$ and $\lim_\delta\lim_n\sigma_{z,w,\delta}^{\operatornameeratorname{e}ps_n}=\lim_\delta \sigma_{z,w,\delta}=\sigma_{z,w}$, we already know that $\sigma_{z,w}^*\ge \sigma_{z,w}$ almost surely. So we just need to prove that $\mathbb{P}(\sigma_{z,w}+s\le\sigma_{z,w}^*)=0$, or alternatively, that $\lim_{\delta\to 0} \mathbb{P}(\sigma_{z,w,\delta}+s\le \sigma_{z,w}^*)=0$ for any $s>0$ fixed. Since $\sigma_{z,w,\delta}$ and $\sigma_{z,w}^*$ are the almost sure limits of $\sigma^{\operatornameeratorname{e}ps_n}_{z,w,\delta}$ and $\sigma_{z,w}^{\operatornameeratorname{e}ps_n}$ as $n\to \infty$, it is sufficient to prove that for each $s>0$ $$\limsup_{\delta\to 0}\limsup_{\operatornameeratorname{e}ps\to 0} \mathbb{P}(\sigma_{z,w,\delta}^{\operatornameeratorname{e}ps}+s\le \sigma_{z,w}^{\operatornameeratorname{e}ps})=0.$$ The strategy of the proof is to use Lemma \operatorname{Re}f{lem:septime_endofloop} to say that (when $\delta$ and $\operatornameeratorname{e}ps$ are small), $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ will separate lots of $\mathbb{C}LE_{\kappa'}$ loops from $z$ during the time interval $[\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s]$. Then we will argue that this is very unlikely to happen during the time interval $[\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w}^\operatornameeratorname{e}ps]$, which means that $\sigma_{z,w}^\operatornameeratorname{e}ps<\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s$ with high probability. More precisely, let us assume from now on that $s>0$ is fixed, and write $\mathcal{S}_r$ for the collection of faces (squares) of $r\mathbb{Z}^2$ that intersect $\mathbb{D}$. We write $\widetilde S_{\delta,r}^\operatornameeratorname{e}ps$ for the event that there exists $S\in \mathcal{S}_r$ that is separated by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ from $z$ during the interval $[\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps +s]$ \operatornameeratorname{e}mph{and} such that $z$ is visited by the space-filling $\SLE_{\kappa'}$ before $S$. We write $S_{\delta,r}^\operatornameeratorname{e}ps$ for the same event but with the interval [$\sigma_{z,w,\delta}^\operatornameeratorname{e}ps, \sigma_{z,w}^\operatornameeratorname{e}ps]$ instead. So if the event $\{\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s\le \sigma_{z,w}^\operatornameeratorname{e}ps\}$ occurs, then either $S_{\delta,r}^\operatornameeratorname{e}ps$ occurs or $\widetilde S_{\delta,r}^\operatornameeratorname{e}ps$ does not. Hence, for any $r>0$: $$\limsup_{\delta\to 0}\limsup_{\operatornameeratorname{e}ps\to 0} \mathbb{P}(\sigma_{z,w,\delta}^{\operatornameeratorname{e}ps}+s\le \sigma_{z,w}^{\operatornameeratorname{e}ps})\le \limsup_{\delta\to 0} \limsup_{\operatornameeratorname{e}ps\to 0} \mathbb{P}(S_{\delta,r}^\operatornameeratorname{e}ps)+ \limsup_{\delta\to 0}\limsup_{\operatornameeratorname{e}ps\to 0}\mathbb{P}((\widetilde S_{\delta,r}^\operatornameeratorname{e}ps)^c).$$ \corr{We will show that \begin{equation} \label{eq:kexp} \liminf_{\delta\downarrow 0}\liminf_{\operatornameeratorname{e}ps\downarrow 0}\mathbb{P}(\widetilde S_{\delta,r}^\operatornameeratorname{e}ps)\to 1 \text{ as } r\to 0, \operatornameeratorname{e}nd{equation}and that for any $r>0$, \begin{equation} \label{eqn:bottleneck} \lim_{\delta\downarrow 0} \lim_{\operatornameeratorname{e}ps\downarrow 0} \mathbb{P}(S_{\delta,r}^\operatornameeratorname{e}ps) =0. \operatornameeratorname{e}nd{equation} Let us start with \operatornameeratorname{e}qref{eq:kexp}. First, Lemma \operatorname{Re}f{lem:septime_endofloop} tells us that since many $S\in \mathcal{S}_r$ will be separated from $z$ by the CLE$_4$ exploration during the time interval $[\sigma_{z,w},\sigma_{z,w}+s]$ as $r\downarrow 0$, the same will be true for the space filling SLE$_{\kappa'}$ on the time interval $[\sigma_{z,w,\delta}^\operatornameeratorname{e}ps, \sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s)$ when $\operatornameeratorname{e}ps, \delta$ are small. More precisely, for any fixed $k\in \mathbb{N}$, $\delta>0$, the lemma implies that $$\liminf_{\operatornameeratorname{e}ps\downarrow 0} \mathbb{P}(\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps([\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s]) \text{ separates } k \text{ squares in } \mathcal{S}_r \text{ from } z) \ge p_{\delta,k,r} $$where $p_{\delta,k,r}$ is the probability that $\mathbf{D}_z$ disconnects at least $k$ squares in $\mathcal{S}_r$ from $z$ by distinct $\mathbb{C}LE_4$ loops during the time interval $[\sigma_{z,w,\delta},\sigma_{z,w,\delta}+s]$. Moreover, since $\sigma_{z,w,\delta}\to \sigma_{z,w}$ as $\delta\to 0$ almost surely, $\liminf_{\delta\downarrow 0} p_{\delta,k,r}$ is equal to the probability $p_{k,r}$ that $\mathbf{D}_z$ disconnects at least $k$ squares in $\mathcal{S}_r$ from $z$ by distinct $\mathbb{C}LE_4$ loops during the time interval $[\sigma_{z,w},\sigma_{z,w}+s]$. Note that since $s$ is positive (and fixed), $p_{k,r}\to 1$ as $r\to 0$ for any fixed $k$. This is almost exactly what we need. However, recall that although $\tilde{S}_{\delta,r}^\operatornameeratorname{e}ps$ only requires one $S\in \mathcal{S}_r$ to be disconnected from $z$ by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps([\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s])$, it also requires that this $z$ is visited by the space filling SLE$_{\kappa'}$ before $S$. This is why we ask for $k$ squares to be separated, because then by Lemma \operatorname{Re}f{lem:septime_endofloop}, whether they are visited before or after $z$ converges to a sequence of independent coin tosses. Namely, for any $k\in \mathbb{N}$, \begin{align*} \liminf_{\delta\downarrow 0}\liminf_{\operatornameeratorname{e}ps\downarrow 0}\mathbb{P}(\widetilde S_{\delta,r}^\operatornameeratorname{e}ps) & \ge (1-2^{-k}) \liminf_{\delta\downarrow 0}\liminf_{\operatornameeratorname{e}ps\downarrow 0} \mathbb{P}(\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps([\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w,\delta}^\operatornameeratorname{e}ps+s]) \text{ separates } k \text{ squares in } \mathcal{S}_r \text{ from } z) \\ & \ge (1-2^{-k})\liminf_{\delta\downarrow 0} p_{\delta,k,r} \\ & \ge (1-2^{-k}) p_{k,r}. \operatornameeratorname{e}nd{align*} The $\liminf$ as $r\to 0$ of the left-hand side above is therefore greater than or equal to $(1-2^{-k})\lim_{r\to 0}p_{k,r}=(1-2^{-k})$ for every $k$. Since $k$ was arbitrary this concludes the proof of \operatornameeratorname{e}qref{eq:kexp}. Hence, to conclude the proof of the proposition, it suffices to justify \operatornameeratorname{e}qref{eqn:bottleneck}.} Although this is a statement purely about SLE, it turns out to be somewhat easier to prove using the connection with Liouville quantum gravity in \cite{DMS14}. Thus we postpone the proof of \operatornameeratorname{e}qref{eqn:bottleneck} to Section \operatorname{Re}f{sec:order_proof}, at which point we will have introduced the necessary objects and stated the relevant theorem of \cite{DMS14}. Let us emphasise that this proof will rely only on \cite{DMS14} and basic properties of Liouville quantum gravity (and could be read immediately by someone already familiar with the theory) so it is safe from now on to treat Proposition \operatorname{Re}f{prop:jointDsigma} as being proved. \operatornameeratorname{e}nd{proofof}\\ \subsection{Convergence of the partial order: proof of Proposition \operatorname{Re}f{prop:convbranchingsleorder}} Recall that Proposition \operatorname{Re}f{prop:convbranchingsleorder}, stated at the very beginning of Section \operatorname{Re}f{sec:conv_order}, asserts the joint convergence of the branching $\SLE_{\kappa'}$ and the collection of order variables to the limit $$((\mathbf{D}_z)_{z\in \mathcal{Q}}, (\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}})$$ defined in Lemma \operatorname{Re}f{lem:defo}. Completing the proof is now simply a case of putting together our previous results. \begin{proofof}{Proposition \operatorname{Re}f{prop:convbranchingsleorder}} The following three claims are the main ingredients. \begin{itemize} \item[\textbf{Claim 1:}] $({\mathbf{D}}_z^\operatornameeratorname{e}ps)_{z\in \mathcal{Q}}\mathbb{R}ightarrow ({\mathbf{D}}_z)_{z\in \mathcal{Q}}$. \noindent \textit{Proof:} This follows from Corollary \operatorname{Re}f{cor:convfullbranch}, Proposition \operatorname{Re}f{prop:jointDsigma} and the fact that for every $\operatornameeratorname{e}ps$ and $z,w\in \mathcal{Q}$, ${\mathbf{D}}_z^\operatornameeratorname{e}ps$ and ${\mathbf{D}}_w^\operatornameeratorname{e}ps$ agree (up to time change) until $z$ and $w$ are separated, and then evolve independently. \item[\textbf{Claim 2:}] For any $z, w\in \mathcal{Q}$, \corr{$({\mathbf{D}}_z^\operatornameeratorname{e}ps, {\mathbf{D}}_w^\operatornameeratorname{e}ps, \mathcal{O}_{z,w}^\operatornameeratorname{e}ps)\mathbb{R}ightarrow ({\mathbf{D}}_z, {\mathbf{D}}_w, \mathcal{O}_{z,w})$}. \noindent \textit{Proof:} \corr{As usual, due to tightness, it is enough to show that any subsequential limit $({\mathbf{D}}_z^*, {\mathbf{D}}_w^*, \mathcal{O}^*)$ of $({\mathbf{D}}_z^\operatornameeratorname{e}ps, {\mathbf{D}}_w^\operatornameeratorname{e}ps, \mathcal{O}_{z,w}^\operatornameeratorname{e}ps)$, along a sequence $\operatornameeratorname{e}ps_n\downarrow 0$, has the correct joint distribution. In fact, we may assume that $$({\mathbf{D}}_z^{\operatornameeratorname{e}ps_n}, {\mathbf{D}}_w^{\operatornameeratorname{e}ps_n}, \sigma_{z,w}^{\operatornameeratorname{e}ps_n},\sigma_{w,z}^{\operatornameeratorname{e}ps_n},\mathcal{O}_{z,w}^{\operatornameeratorname{e}ps_n})\mathbb{R}ightarrow ({\mathbf{D}}_z^*, {\mathbf{D}}_w^*,\sigma_{z,w}^*,\sigma_{w,z}^*, \mathcal{O}^*)$$ and verify the same statement, where by Proposition \operatorname{Re}f{prop:jointDsigma} and Claim 1 above, we already know that $$(\mathbf{D}_z^*,\mathbf{D}_w^*,\sigma_{z,w}^*,\sigma_{w,z}^*)\overset{(d)}{=} (\mathbf{D}_z,\mathbf{D}_w,\sigma_{z,w},\sigma_{w,z})$$ (in particular, $\mathbf{D}_z^*$ and $\mathbf{D}_w^*$ agree up to time reparametrization until $z$ and $w$ are separated at times $\sigma_{z,w}^*$, $\sigma_{w,z}^*$). Now, Proposition \operatorname{Re}f{prop:order_simple} implies that, given ${\mathbf{D}}_z^*$ and ${\mathbf{D}}_w^*$ stopped at times $\sigma_{z,w}^*,\sigma_{w,z}^*$ respectively, the conditional law of $\mathcal{O}^*$ is fair Bernoulli. On the other hand, since $$\mathcal{O}^{\operatornameeratorname{e}ps_n}_{z,w}\, , \, (g_{\sigma_{z,w}^{\operatornameeratorname{e}ps_n}}[\mathbf{D}_{z}^{\operatornameeratorname{e}ps_n}]((\mathbf{D}_z^{\operatornameeratorname{e}ps_n})_{s+\sigma_{z,w}^{\operatornameeratorname{e}ps_n}}) \, ; \, s\ge 0) \text{ and } (g_{\sigma_{w,z}^{\operatornameeratorname{e}ps_n}}[\mathbf{D}_{w}^{\operatornameeratorname{e}ps_n}]((\mathbf{D}_w^{\operatornameeratorname{e}ps_n})_{s+\sigma_{w,z}^{\operatornameeratorname{e}ps_n}}) \, ; \, s\ge 0)$$ are mutually independent for every $n$, it follows that $\mathcal{O}^*$ is independent of $$(g_{\sigma_{z,w}^*}[\mathbf{D}^*_{z}]((\mathbf{D}_z)_{s+\sigma_{z,w}^*})\, ; \,s\ge 0)\, , \, (g_{\sigma_{w,z}^*}[\mathbf{D}^*_{w}]((\mathbf{D}_w)_{s+\sigma_{w,z}^*})\, ; \,s\ge 0).$$ This provides the claim. } \item[\textbf{Claim 3:}] For any $z,w\in \mathcal{Q}$, $(({\mathbf{D}}_y^\operatornameeratorname{e}ps)_{y\in \mathcal{Q}}, \mathcal{O}^\operatornameeratorname{e}ps_{z,w})\mathbb{R}ightarrow (({\mathbf{D}}_y)_{y\in \mathcal{Q}}, \mathcal{O}_{z,w})$. \noindent \textit{Proof:} \corr{The same argument as for Claim 2 above extends directly to this slightly more general setting (we omit the details).} \operatornameeratorname{e}nd{itemize} With Claim 1 in hand (and the argument proving Lemma \operatorname{Re}f{lem:defo}) all we need to show is that for any subsequential limit in law $(({\mathbf{D}}_z)_{z\in \mathcal{Q}}, (\mathcal{O}^*_{z,w})_{z,w\in \mathcal{Q}})$ of $(({\mathbf{D}}_z^\operatornameeratorname{e}ps)_{z\in \mathcal{Q}}, (\mathcal{O}^\operatornameeratorname{e}ps_{z,w})_{z,w\in \mathcal{Q}})$ as $\operatornameeratorname{e}ps\to 0$, the \operatornameeratorname{e}mph{conditional law} of $(\mathcal{O}^*_{z,w})_{z,w\in \mathcal{Q}}$ given $({\mathbf{D}}_z)_{z\in \mathcal{Q}}$ satisfies the bullet points above Lemma \operatorname{Re}f{lem:defo}. That is: (a) $\mathcal{O}^*_{z,z}=1$ for all $z\in \mathcal{Q}$; (b) $\mathcal{O}^*_{z,w}=1-\mathcal{O}^*_{w,z}$ for all $z,w\in \mathcal{Q}$ distinct; (c) $\mathcal{O}^*_{z,w}$ is (conditionally) Bernoulli$(1/2)$ for any such $z,w$; and (d) for all $z,w_1,w_2\in \mathcal{Q}$ with $z\ne w_1, w_2$, if ${\mathbf{D}}_z$ separates $z$ from $w_1$ at the same time as it separates $z$ from $w_1$ then $\mathcal{O}^*_{z,w_1}=\mathcal{O}^*_{z,w_2}$; otherwise $\mathcal{O}^*_{z,w_1}$ and $\mathcal{O}^*_{z,w_2}$ are (conditionally) independent. Observe that (a) and (b) follow by definition of the $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$, and (c) follows from Claim 3 above. The first case of (d) also follows by definition, and the second follows from the definition of $\mathcal{O}_{z,w_1}^\operatornameeratorname{e}ps, \mathcal{O}_{z,w_2}^\operatornameeratorname{e}ps$ together with the branching property of $({\mathbf{D}}_z^\operatornameeratorname{e}ps)_{z\in \mathcal{Q}}$ and the convergence of the separation times. \operatornameeratorname{e}nd{proofof} \subsection{Joint convergence of SLE, CLE and the order variables} The results of Sections \operatorname{Re}f{sec:conv_clesle} and \operatorname{Re}f{sec:conv_order} give the final combined result: \begin{proposition} \begin{eqnarray*} & (({\mathbf{D}}^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}},(\mathcal{L}^\operatornameeratorname{e}ps_{z,i})_{z\in \mathcal{Q}, i\ge 1}, (\mathcal{B}^\operatornameeratorname{e}ps_{z,i})_{z\in \mathcal{Q}, i\ge 1},(\mathcal{O}^\operatornameeratorname{e}ps_{z,w})_{z,w\in \mathcal{Q}} ) & \\ & \mathbb{R}ightarrow & \\ & (({\mathbf{D}}_z)_{z\in \mathcal{Q}},(\mathcal{L}_{z,i})_{z\in \mathcal{Q}, i\ge 1}, (\mathcal{B}_{z,i})_{z\in \mathcal{Q}, i\ge 1},(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}} )& \operatornameeratorname{e}nd{eqnarray*} as $\operatornameeratorname{e}ps\downarrow 0$, with respect to the product topology $$\prod_\mathcal{Q} \mathcal{D}_z \times \prod_{\mathcal{Q}\times \mathbb{N}} \text{Hausdorff} \times \prod_{\mathcal{Q}\times \mathbb{N}} \text{Carath\'{e}odory viewed from } z \times \prod_{\mathcal{Q}\times\mathcal{Q}} \text{discrete}.$$ \label{prop:cle-conv} \operatornameeratorname{e}nd{proposition} \begin{proof} Since we know that all the individual elements in the above tuples converge, the laws are tight in $\operatornameeratorname{e}ps$. Combining Proposition \operatorname{Re}f{prop:convbranchingsleorder} and Corollary \operatorname{Re}f{cor:cleloopconv} \corr{(in particular, using that $(\mathcal{L}_{z,i})_{z\in \mathcal{Q}, i\ge 1}, (\mathcal{B}_{z,i})_{z\in \mathcal{Q}, i\ge 1}$ are deterministic functions of $({\mathbf{D}}_z)_{z\in \mathcal{Q}}$)} ensures that any subsequential limit has the correct law. \operatornameeratorname{e}nd{proof} \section{Liouville quantum gravity and mating of trees} \subsection{Liouville quantum gravity}\label{sec:LQG} Let $D\subset \mathbb{C}$ be a {simply connected} domain with harmonically non-trivial boundary. For $f,g\in C^\infty(D)$ define the Dirichlet inner product by \operatornameeratorname{e}qbn (f,g)_\nabla = \frac{1}{2\pi} \int_D \nabla f(z) \cdot \nabla g(z)\, d^2\hspace{-0.05cm}z. \operatornameeratorname{e}qen Let $H(D)$ be the Hilbert space closure of the subspace of functions $f\in C^\infty(D)$ for which $(f,f)_\nabla<\infty$, where we identify two functions that differ by a constant. Letting $(f_n)$ be an orthonormal basis for $H(D)$, the free boundary GFF $h$ on $D$ is defined by \operatornameeratorname{e}qbn h = \sum_{n=1}^{\infty} \alpha_n f_n, \operatornameeratorname{e}qen where $(\alpha_n)$ is a sequence of i.i.d.\ standard normal random variables \corr{and the convergence is almost sure in the space of generalized functions modulo constants}. The free boundary GFF is only defined modulo additive constant here, but we remark that there are several natural ways to fix the additive constant\corr{, for example, by requiring that testing the field against a fixed test function gives zero. If this is done in an arbitrary way (i.e., picking some arbitrary test function in the previous sentence) the resulting field almost surely lives in the space $H^{-1}_{\text{loc}}(D)$: this is the space of generalized functions whose restriction to any bounded \nina{domain} $U\subset D$ is an element of the Sobolev space $H^{-1}(U)$.} See \cite{SheGFF,Ber16} for more details. Let $\mathcal{S}=\mathbb{R}\times(0,\pi)$ denote the infinite strip. By, for example, \cite[Lemma 4.3]{DMS14}, $H(\mathcal{S})$ has an orthogonal decomposition $H(\mathcal{S})=H_1(\mathcal{S})\operatornamelus H_2(\mathcal{S})$, where $H_1(\mathcal{S})$ is the subspace of $H(\mathcal{S})$ consisting of functions (modulo constants) which are constant on vertical lines of the form $u+[0,\operatornameeratorname{i} \pi]$ and $H_1(\mathcal{S})$ is the subspace of $H(\mathcal{S})$ consisting of functions which have mean zero on all such vertical lines. This leads to a decomposition $h=h_1+h_2$ of the free boundary GFF $h$ on $\mathcal{S}$, where $h_1$ (resp.\ $h_2$) is the projection of $h$ onto $H_1(\mathcal{S})$ (resp.\ $H_2(\mathcal{S})$). We call $h_2$ the \operatornameeratorname{e}mph{lateral component} of $h$. Now let $D\subset\mathbb{C}$ be as before, and let $\mathfrak h$ be an instance of the free-boundary Gaussian free field (GFF) on $D$ with the additive constant fixed in an arbitrary way. Set $h=\mathfrak h+f$, where $f$ is a (possibly random) continuous function on $D$. For $\delta>0$ and $z\in D$ let $h_\delta(z)$ denote the average of $h$ on the circle $\partial B_\delta(z)$ if $B_\delta(z)\subset D$; otherwise set $h_\delta(z)=0$. For $\gamma\in (\sqrt{2},2)$ and $\operatornameeratorname{e}ps=2-\gamma$ the field $h$ induces an area measure $\mu_h^\operatornameeratorname{e}ps$ on $D$, which is defined by the following limit in probability for any bounded open set $A\subseteq D$: $$ \mu_h^\operatornameeratorname{e}ps(A) = \lim_{\delta\rightarrow 0} (2\operatornameeratorname{e}ps)^{-1}\int_A \operatornameeratorname{e}xp\Big(\gamma h_\delta(z)\Big)\delta^{\gamma^2/2} \, d^2\hspace{-0.05cm}z. $$ Note that the definitions for $\operatornameeratorname{e}ps>0$ differ by a factor of $2\operatornameeratorname{e}ps$ from the definitions normally found in the literature. This is natural in the context of this paper, where we will be concerned with taking $\operatornameeratorname{e}ps\downarrow 0$ (see below). Indeed, for $\gamma=2$ (which will correspond to the limit as $\operatornameeratorname{e}ps\downarrow 0$) we define: $$ \mu_{h}(A) = \lim_{\delta\rightarrow 0} \int_A \Big(-h_\delta +\log(1/\delta)\Big)\operatornameeratorname{e}xp(2h_\delta(z))\delta \, d^2\hspace{-0.05cm}z.$$ If $f$ extends continuously to $\partial D$, boundary measures $\nu^\operatornameeratorname{e}ps_h$ and $\nu_h$ can be defined similarly by \begin{align*} \nu_{h}^{\operatornameeratorname{e}ps}(A) & = \lim_{\delta\rightarrow 0}\, (2\operatornameeratorname{e}ps)^{-1}\int_A \operatornameeratorname{e}xp\Big(\frac{\gamma}{2}h_\delta(z)\Big)\delta^{\gamma^2/4} \, dz,\\ \nu_{h}(A) & = \lim_{\delta\rightarrow 0}\, \int_A \Big(-\frac{h_\delta}{2} +\log(1/\delta)\Big)\, \delta \, \operatornameeratorname{e}xp(h_\delta(z)) \, dz. \operatornameeratorname{e}nd{align*} See \cite{DS11, Ber17, Pow18chaos} for proofs of these facts. A pair $(D,h)$ defines a so-called \operatornameeratorname{e}mph{$\gamma$-Liouville quantum gravity (LQG) surface}. More precisely, a $\gamma$-LQG surface is an equivalence class of pairs $(D,h)$ where {$D$ is as above and $h$ is a distribution, and} we define two pairs $(D_1,h_1)$ and $(D_2,h_2)$ to be equivalent if there is a conformal map $\phi:D_1\to D_2$ such that \begin{equation}\label{eq:coc} h_1 = h_2\circ\phi+Q_\gamma \log|\phi'|,\qquad Q_\gamma:=2/\gamma+\gamma/2. \operatornameeratorname{e}nd{equation} With this definition, {if $h_1,h_2$ are absolutely continuous with respect to a GFF plus a continuous function we have} $\mu_{h_2}^\operatornameeratorname{e}ps=\phi_*(\mu_{h_1}^\operatornameeratorname{e}ps)$ and $\nu_{h_2}^\operatornameeratorname{e}ps=\phi_*(\nu_{h_1}^\operatornameeratorname{e}ps)$ for $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$. The analogous identities also hold for $\operatornameeratorname{e}ps=0$. The \operatornameeratorname{e}mph{LQG disk} is an LQG surface of special interest, since it arises in scaling limit results concerning random planar maps, for example, \cite{BMdisk,GMdisk}. The following is our definition of the unit boundary length $\gamma$-LQG disk in the subcritical case. Our field is equal to $-2\gamma^{-1}\log(2\operatornameeratorname{e}ps)$ plus the field defined in, e.g.\ \cite{DMS14}: this is because we want it to have boundary length $1$ for our definition of $\nu_h^\operatornameeratorname{e}ps$ (which is $(2\operatornameeratorname{e}ps)^{-1}$ times the usual one). \begin{definition}[unit boundary length $\gamma$-LQG disk for $\gamma\in (\sqrt{2},2)$] \label{def:disk} Let $h_2$ be a field on the strip $\mathcal{S}=\mathbb{R}\times (0,\operatornameeratorname{i} \pi)$ with the law of the lateral component of a free boundary GFF on $\mathcal{S}$. Let $h_1^\operatornameeratorname{e}ps$ be a function on $\mathcal{S}$ such that $h^\operatornameeratorname{e}ps_1(s+\operatornameeratorname{i} y)=\mathcal{B}^\operatornameeratorname{e}ps_s$, where: \begin{itemize} \item[(i)] $(\mathcal{B}^\operatornameeratorname{e}ps_s)_{s\geq 0}$ has the law of $B_{2s}-(2/\gamma-\gamma/2)s$ conditioned to be negative for all time, for $B$ a standard Brownian motion started from $0$; \item[(ii)] $(\mathcal{B}^\operatornameeratorname{e}ps_{-s})_{s\geq 0}$ is independent of $(\mathcal{B}^\operatornameeratorname{e}ps_s)_{s\geq 0}$ and satisfies $(\mathcal{B}^\operatornameeratorname{e}ps_{-s})_{s\geq 0}\operatornameeratorname{e}qD(\mathcal{B}^\operatornameeratorname{e}ps_s)_{s\geq 0}$. \operatornameeratorname{e}nd{itemize} Set $h_{\operatorname s}^\operatornameeratorname{e}ps=h_1^\operatornameeratorname{e}ps+h_2$ and let ${\widehat h^\operatornameeratorname{e}ps}$ be the distribution on $\mathcal{S}$ whose law is given by \operatornameeratorname{e}qb h_{\operatorname s}^\operatornameeratorname{e}ps-2\gamma^{-1}\log \nu^\operatornameeratorname{e}ps_{h_{\operatorname s}^\operatornameeratorname{e}ps}(\partial \mathcal{S}) \qquad \text{reweighted\,\,by\,\,} \nu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps_{\operatorname s}}(\partial \mathcal{S})^{4/\gamma^2-1}. \label{eq:reweight} \operatornameeratorname{e}qe Then the surface defined by $(\mathcal{S},{\widehat h^\operatornameeratorname{e}ps})$ has the law of a unit boundary length $\gamma$-LQG disk. \operatornameeratorname{e}nd{definition} See \cite[Definition 2.4 and Remark 2.5]{HS19} for a proof that the above does correspond to $-2\gamma^{-1}\log(2\operatornameeratorname{e}ps)$ + the unit boundary length disk of \cite{DMS14}. Note that (see for example \cite[Lemma 4.20]{DMS14}) $\nu_{h_{\operatorname s}}^\operatornameeratorname{e}ps(\partial \mathcal{S})$ is finite for each fixed $\operatornameeratorname{e}ps>0$, so that the above definition makes sense. In fact, we can say something stronger, {namely Lemma \operatorname{Re}f{lem:tail} just below. We remark that the power $1/17$ in the lemma has not been optimized.} \begin{lemma}\label{lem:tail} There exists $C\in (0,\infty)$ not depending on $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$ such that $$\mathbbm P[ \nu_{h_{\operatorname s}^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\partial \mathcal{S})>x]\le Cx^{-1/17} \text{ for all } x\ge 1.$$ \corr{Moreover, for any fixed $x$, $\mathbbm P[ \nu_{h_{\operatorname s}^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps((-\infty,-K)\cup(K\cup \infty)\times \operatornameeratorname{i} \{0,\pi\})>x]\to 0$ as $K\to \infty$, uniformly in $\operatornameeratorname{e}ps$.} \corr{Finally,} if $h_{\operatorname s}$ is defined in the same way as $h_{\operatorname s}^\operatornameeratorname{e}ps$ above but instead letting $(\mathcal{B}_s)_{s\geq 0}$ have the law of $(-\sqrt{2})$ times a 3-dimensional Bessel process, then we also have that $$\mathbbm P[ \nu_{h_{\operatorname s}}(\partial \mathcal{S})>x]\le Cx^{-1/17} \text{ for all } x\ge 1.$$ \operatornameeratorname{e}nd{lemma} \begin{proof} Let us first deal with the subcritical measures. In this case, we write $$b_k^\operatornameeratorname{e}ps=\nu_{h_2}^\operatornameeratorname{e}ps([k,k+1]\times \{0,\operatornameeratorname{i}\pi\})$$ for $k\in \mathbb{Z}$. Then the law of $b_k^\operatornameeratorname{e}ps$ does not depend on $k$ since the law of $h_2$ is translation invariant, see for example \cite[Remark 5.48]{Ber16}. Furthermore, by \cite[Theorem 1.1]{REMY}, $\mathbb{E}((b_0^\operatornameeratorname{e}ps)^q)$ is uniformly bounded in $\operatornameeratorname{e}ps$ for any $q<1$. (The result of \cite{REMY} shows uniform boundedness of the moment for a field that differs from $h_2$ in $[0,1]\times \{0\}$ or $[0,1]\times \{\operatornameeratorname{i} \pi\}$ by a centered Gaussian function with uniformly bounded variance.) Letting $a_k^\operatornameeratorname{e}ps=\sup_{s\in[k,k+1]} e^{(\gamma/2) \mathcal{B}^\operatornameeratorname{e}ps_s}$ we then have that $$\nu_{h_{\operatorname s}^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\partial \mathcal{S})\le \sum_{k\in \mathbb{Z}} a_k^\operatornameeratorname{e}ps b_k^\operatornameeratorname{e}ps.$$ Thus, since $\sum_{k\in\mathbb{Z}} (|k|\vee 1)^{-2}<10$, a union bound gives \operatornameeratorname{e}qb \begin{split} \mathbbm P[ \nu_{h_{\operatorname s}^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\partial \mathcal{S})>x] \leq \sum_{k\in\mathbb{Z}} \left( \mathbbm P[ a_k^\operatornameeratorname{e}ps > x^{1/2}(|k|\vee 1)^{-4} ]+ \mathbbm P[ b_k^\operatornameeratorname{e}ps > 0.1x^{1/2}(|k|\vee 1)^{2} ]\right). \operatornameeratorname{e}nd{split} \label{eq18} \operatornameeratorname{e}qe Taking $q=3/4$ (for example), using the uniform bound on $\mathbb{E}((b_k^\operatornameeratorname{e}ps)^q)$ and applying Chebyshev's inequality gives that $ \sum_{k\in \mathbb{Z}} \mathbbm P[ b_k^\operatornameeratorname{e}ps > 0.1x^{1/2}(|k|\vee 1)^{2} ] \le c_0 x^{-3/8}$ for some universal constant $c_0$. Furthermore, since $\mathcal{B}^\operatornameeratorname{e}ps$ is stochastically dominated by $(-\sqrt{2})$ times a three dimensional Bessel process, see \cite[Lemma 12.4]{KRV18}, we have that for $(Z(t))_{t\geq 0}$ such a process and $(W(t))_{t\geq 0}$ a standard linear Brownian motion: \operatornameeratorname{e}qbn \begin{split} \mathbbm P[ a_k^\operatornameeratorname{e}ps > x^{1/2}(|k|\vee 1)^{-4} ] &\leq \mathbbm P\Big[ \inf_{s\in[k,k+1]} Z(s) < \gamma^{-1} \log\big(x^{-1/2}(|k|\vee 1)^{4}\big) \Big]\\ &\leq \mathbbm P\Big[ \inf_{s\in[k,k+1]} |W(s)| < \gamma^{-1} \log\big(x^{-1/2}(|k|\vee 1)^{4}\big) \Big]^3 \operatornameeratorname{e}nd{split} \operatornameeratorname{e}qen for all $x$ and $k${, where we used to get the second inequality that $Z\operatornameeratorname{e}qD|(W_1,W_2,W_3)|$ for $W_1,W_2,W_3$ independent copies of $W$.} The probability on the right side is 0 if $|k|\leq x^{1/8}$ and otherwise it is bounded above by $c_1|k|^{-1/2} \gamma^{-1} \log\big(x^{-1/2}(|k|\vee 1)^{4}\big)$ where $c_1$ is another universal constant. Therefore, for a final universal constant $c_2>0$, $$ \sum_{k\in\mathbb{Z}}\mathbbm P[ a_k^\operatornameeratorname{e}ps > x^{1/2}(|k|\vee 1)^{-4} ] \leq 2\sum_{k\in\mathbb{Z}\,:\,|k|>x^{1/8}} \Big(c_1|k|^{-1/2} \gamma^{-1} \log\big(x^{-1/2}(|k|\vee 1)^{4}\big)\Big)^3 \leq c_2 x^{-1/17}.$$ \corr{The same bounds yield the second statement of the lemma}. \corr{Finally,} exactly the same proof works in the case of the critical measure, using \cite[Section 1.1.1]{REMY} to see that $b_k=\nu_{h_2}([k,k+1])$ has a finite $q$th moment, that does not depend on $k$ by translation invariance. \operatornameeratorname{e}nd{proof}\\ We may now define the critical unit boundary length LQG disk as follows. \begin{definition}[unit boundary length 2-LQG disk]\label{def:critical-disk} Letting $h_{\operatorname s}$ be as in Lemma \operatorname{Re}f{lem:tail} we define the unit boundary length 2-LQG disk to be the surface $(\mathcal{S}, \widehat h)$, where $$\widehat h:=h_{\operatorname s}-\log \nu_{h_{\operatorname s}}(\partial \mathcal{S}).$$ \operatornameeratorname{e}nd{definition} {Note that $\nu_{h_{\operatorname s}}(\partial S)$ is finite by Lemma \operatorname{Re}f{lem:tail}.} \begin{remark} Readers may have previously encountered the above as the definition of a quantum disk with \operatornameeratorname{e}mph{two marked boundary points}. A quantum surface with $k$ marked points is an equivalence class of $(D,h,x_1,\dots, x_k)$ with $x_1,\dots, x_k\in \overline{D}$, using the equivalence relation described by \operatornameeratorname{e}qref{eq:coc}, but with the additional requirement that $\phi$ maps marked points to marked points. In this paper we will use Definitions \operatorname{Re}f{def:disk} and \operatorname{Re}f{def:critical-disk} to define specific equivalence class representatives of quantum disks, but we will always consider them as quantum surfaces without any marked points. That is, we will consider their equivalence classes under the simple relation \operatornameeratorname{e}qref{eq:coc}. \operatornameeratorname{e}nd{remark} The following lemma says that the subcritical disk converges to the critical disk as $\operatornameeratorname{e}ps\downarrow 0$ (equivalently, $\gamma\uparrow 2$). {We say that a sequence of measures $(\bar\mu_n)_{n\in\mathbb{N}}$ on a metric space $E$ (equipped with the Borel $\sigma$-algebra) converges weakly to a measure $\bar\mu$ if for all $A\subseteq E$ such that $\bar\mu(\partial A)=0$ we have $\bar\mu_n(A)\rightarrow \bar\mu(A)$.} \begin{lemma}\label{lem:disk-conv} For $\operatornameeratorname{e}ps>0$ let $\widehat{h}^\operatornameeratorname{e}ps$ be the field of Definition \operatorname{Re}f{def:disk} and $\widehat{h}$ be the field of Definition \operatorname{Re}f{def:critical-disk}. Then $(\widehat{h}^\operatornameeratorname{e}ps,\mu^\operatornameeratorname{e}ps_{\widehat{h}^\operatornameeratorname{e}ps},\nu^\operatornameeratorname{e}ps_{\widehat{h}^\operatornameeratorname{e}ps})\mathbb{R}ightarrow (\widehat{h},\mu_{\widehat{h}},\nu_{\widehat{h}})$, where the first coordinate is equipped with the \corr{$H^{-1}_{\mathrm{loc}}(\mathcal{S})$} topology and the second and third coordinates are equipped with the weak topology of measures on $\mathcal{S}$ and $\partial \mathcal{S}$ respectively. \label{prop:disk-conv} \operatornameeratorname{e}nd{lemma} \begin{proof} To conclude it is sufficient to prove the following, for an arbitrary sequence $\operatornameeratorname{e}ps_n\downarrow 0$: \begin{enumerate}[(i)] \item we have convergence in law along the sequence $\operatornameeratorname{e}pn$ if we replace $\widehat h$ by $h_{\operatorname s}$, and $\widehat h^\operatornameeratorname{e}pn$ by $h_{\operatorname s}^\operatornameeratorname{e}pn$ for every $n$; and \item there exists a coupling of the $(\nu_{h^\operatornameeratorname{e}pn_{\operatorname s}})$ such that $\nu_{h^\operatornameeratorname{e}ps_{\operatorname s}}^\operatornameeratorname{e}pn(\partial \mathcal{S})^{4/\gamma^2-1}\rightarrow 1$ in $L^1$ as $n\to \infty$. \operatornameeratorname{e}nd{enumerate} To see (i), first observe that the processes $\mathcal{B}^\operatornameeratorname{e}ps$ converge to $\mathcal{B}$ in law as $\operatornameeratorname{e}ps\to 0$, with respect to the topology of uniform convergence on compacts of time. \corr{Indeed for any fixed $\delta>0$, if $T_\delta^\operatornameeratorname{e}ps$ (resp.\ $T_\delta$) is the first time that $\mathcal{B}^\operatornameeratorname{e}ps$ (resp.\ $\mathcal{B}$) hits $-\delta$, it is easy to see that $\mathcal{B}^\operatornameeratorname{e}ps(\cdot+T_\delta^\operatornameeratorname{e}ps)$ converges to $\mathcal{B}(\cdot+T_\delta)$ in law in the specified topology as $\operatornameeratorname{e}ps\to 0$: a consequence of the fact that the drift coefficient in $\mathcal{B}^\operatornameeratorname{e}ps$ goes to $0$, and by applying the Markov property at time $T_\delta^\operatornameeratorname{e}ps, T_\delta$. Moreover, $T_\delta, T_\delta^\operatornameeratorname{e}ps$ converge to $0$ in probability as $\delta\to 0$, uniformly in $\operatornameeratorname{e}ps$: this is true since $T_\delta,T_\delta^\operatornameeratorname{e}ps$ are stochastically dominated by their counterparts for non-conditioned (drifted) Brownian motion, and the result plainly holds for the non-conditioned versions. Combining these observations yields the assertion.} We may therefore couple ${h_{\operatorname s}^\operatornameeratorname{e}pn}$ and $h_{\operatorname s}$ so that their lateral components are identical, and the components that are constant on vertical lines converge a.s.\ on compacts as $n\to \infty$. For this coupling, the result of \cite{APS18two} implies that \operatornameeratorname{e}qb\nu^\operatornameeratorname{e}pn_{h_{\operatorname s}^\operatornameeratorname{e}pn}(A)\to \nu_{h_{\operatorname s}}(A) \text{ and } \mu^\operatornameeratorname{e}pn_{h_{\operatorname s}^\operatornameeratorname{e}pn}(U)\to \mu_{h_{\operatorname s}}(U)\label{eq:aps18}\operatornameeratorname{e}qe in probability as $n\to \infty$, for any bounded subsets $A\subset \partial \mathcal{S}$ and $U\subset \mathcal{S}$. More precisely \cite[Section 4.1.1-4.1.2]{APS18two} proves that $\nu_{h}^\operatornameeratorname{e}pn(A)\to \nu_h^\operatornameeratorname{e}pn(A)$, when $h$ is a specific field on $\mathcal{S}$ that differs from $h_{\operatorname s}$ by a bounded continuous function on $A$ (similarly for $\mu$). Since adding a continuous function $f$ to $h$ modifies the boundary measure locally by $\operatornameeratorname{e}xp((\gamma/2)f)$ and the bulk measure by $\operatornameeratorname{e}xp(\gamma f)$ we deduce \operatornameeratorname{e}qref{eq:aps18}. To conclude that $$(h_{\operatorname s}^\operatornameeratorname{e}pn,\nu^\operatornameeratorname{e}pn_{h_{\operatorname s}^\operatornameeratorname{e}pn}, \mu^\operatornameeratorname{e}pn_{h_{\operatorname s}^\operatornameeratorname{e}pn})\to (h_{\operatorname s},\nu_{h_{\operatorname s}},\mu_{h_{\operatorname s}})$$ in probability for this coupling (with the correct topology), and thus complete the proof of (i), it remains to show that $\nu_{h_{\operatorname s}^\operatornameeratorname{e}pn}^n(\partial \mathcal{S})\to \nu_{h_{\operatorname s}}(\partial \mathcal{S})$ and $\mu_{h_{\operatorname s}^\operatornameeratorname{e}pn}^n(\mathcal{S})\to \mu_{h_{\operatorname s}}( \mathcal{S})$ in probability as $n\to \infty$. For this, \corr{we use the second assertion of Lemma \operatorname{Re}f{lem:tail}} \corr{together with the fact that } $\nu_{h_{\operatorname s}}(\mathcal{S})=\lim_{K\to \infty} \nu_{h_{\operatorname s}}((-K,K)\times \operatornameeratorname{i} \{0,\pi\})$ by definition. Combining with \operatornameeratorname{e}qref{eq:aps18} yields the desired conclusion for the boundary measures. A similar argument can be applied for the bulk measures, where we may use, for example \cite[Theorem 1.2]{AG19} or \cite[Theorem 1.2]{ARS} to get the uniform $q$th moment bound for $q<1$ as in the proof of \operatorname{Re}f{lem:tail}. For (ii), first observe that $$\nu_{h^{\operatornameeratorname{e}ps_n}_{\operatorname s}}^{\operatornameeratorname{e}ps_n}(\partial \mathcal{S})^{4/\gamma^2-1}\mathbb{R}ightarrow 1$$ in law since $$4/\gamma^2-1\rightarrow 0 \text{ and } \nu_{h^{\operatornameeratorname{e}ps_n}_{\operatorname s}}^{\operatornameeratorname{e}ps_n}(\partial \mathcal{S})\rightarrow\nu_{h_{\operatorname s}}(\partial \mathcal{S}).$$ Furthermore, Lemma \operatorname{Re}f{lem:tail} gives the uniform integrability of $\nu_{h^\operatornameeratorname{e}ps_{\operatorname s}}^\operatornameeratorname{e}ps(\partial \mathcal{S})^{4/\gamma^2-1}$ in $\operatornameeratorname{e}ps$. Combining these two results we get (ii). \operatornameeratorname{e}nd{proof} \begin{remark}\label{rmk:disk-conv3} We reiterate that $\mu_{\widehat{h}}(\mathcal{S})<\infty$ and $\nu_{\widehat{h}}(\partial\mathcal{S})=1$ almost surely. Moreover, we have the convergence $\mu_{\widehat h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\mathcal{S})\mathbb{R}ightarrow \mu_{\widehat h}(\mathcal{S})<\infty$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{remark} \begin{remark}\label{rmk:disk-conv2} For $b>0$ we define the $b$-boundary length disk to be a surface with the law of $(\mathcal{S},h^b)$, where $h^b=h+2\gamma^{-1}\log(b)$ for $h$ as in Definition \operatorname{Re}f{def:disk} or \operatorname{Re}f{def:critical-disk}. Lemma \operatorname{Re}f{prop:disk-conv} also holds if we assume all the disks are $b$-boundary length disks. \operatornameeratorname{e}nd{remark} The fields that appear in the statement of our main theorem are defined as follows. \begin{definition} We define fields $h^\operatornameeratorname{e}ps$ (resp. $h$) to be parameterizations of unit boundary length $\gamma$-LQG disks (resp. the 2-LQG disk) by $\mathbb{D}$ instead of $\mathcal{S}$. More specifically we take $\phi:\mathbb{D}\to \mathcal{S}$ to be the conformal map from $\mathcal{S}$ to $\mathbb{D}$ that sends $+\infty,-\infty,\operatornameeratorname{i}\pi$ to $1,-1,\operatornameeratorname{i}$, respectively. Then we set $$h^\operatornameeratorname{e}ps=\widehat h^\operatornameeratorname{e}ps \circ\phi+Q_\gamma \log|\phi'| \text{ and } h=\widehat h \circ\phi+2\log|\phi'|,$$ where $\widehat h^\operatornameeratorname{e}ps$ (resp. $\widehat{h}$) is the field in the strip $ \mathcal{S}$ corresponding to Definition \operatorname{Re}f{def:disk} (resp. Definition \operatorname{Re}f{def:critical-disk}). \label{conv:disk} \operatornameeratorname{e}nd{definition} \begin{remark}\label{rmk:disk-conv} Lemma \operatorname{Re}f{lem:disk-conv} clearly also implies the convergence $$(h^\operatornameeratorname{e}ps,\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}, \nu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (h,\mu_h,\nu_h)$$ as $\operatornameeratorname{e}ps\to 0$ (with respect to $\corr{H^{-1}_{\mathrm{loc}}(\mathbb{D})}$ convergence in the first coordinate, and weak convergence of measures on $\mathbb{D},\partial \mathbb{D}$ in the final coordinates). \operatornameeratorname{e}nd{remark} In fact, it implies the convergence of various embeddings of quantum disks. Of particular use to us will be the following: \begin{lemma}\label{cor:disc_mapped} Suppose that for each $\operatornameeratorname{e}ps$, $\widehat{h}^\operatornameeratorname{e}ps$ is as in Remark \operatorname{Re}f{rmk:disk-conv2} for some $b>0$ and that $\widetilde{h}^\operatornameeratorname{e}ps$ is defined by: choosing a point $z^\operatornameeratorname{e}ps$ from $\mu^\operatornameeratorname{e}ps_{\widehat{h}^\operatornameeratorname{e}ps}$ in $\mathcal{S}$; defining $\psi^\operatornameeratorname{e}ps:\mathcal{S}\to \mathbb{D}$ conformal such that $\psi^\operatornameeratorname{e}ps(z^\operatornameeratorname{e}ps)=0$ and $(\psi^\operatornameeratorname{e}ps)'(z^\operatornameeratorname{e}ps)>0$; and setting $$\widetilde{h}^\operatornameeratorname{e}ps:= \widehat{h}^\operatornameeratorname{e}ps \circ (\psi^\operatornameeratorname{e}ps)^{-1}\corr{+Q_\gamma\log |((\psi^\operatornameeratorname{e}ps)^{-1})'|.}$$ Suppose similarly that $(\widetilde{h}, \widetilde{\mu})$ is defined by: taking the field $\widehat{h}$ in Remark \operatorname{Re}f{rmk:disk-conv2} with the same $b>0$, picking a point $z$ from $\mu_{\widehat{h}}$; taking $\psi:\mathcal{S}\to \mathbb{D}$ conformal with $\psi'(z)>0$ and $\psi(z)=0$; and setting $$\widetilde{h}=\widehat{h}+\psi^{-1} \corr{+2\log|(\psi^{-1})'|}\, , \, \widetilde{\mu}=\mu_{\widetilde{h}}.$$ Then as $\operatornameeratorname{e}ps\to 0$, we have that $$(\widetilde h^\operatornameeratorname{e}ps,\mu^\operatornameeratorname{e}ps_{\widetilde h^\operatornameeratorname{e}ps})\mathbb{R}ightarrow (\widetilde h, \widetilde \mu).$$ Moreover, for any $m>0$ \begin{equation} \label{eq:nomasstoboundary} \mathbb{P}(\mu^\operatornameeratorname{e}ps_{\widetilde{h}^\operatornameeratorname{e}ps}( \mathbb{D}\setminus (1-\delta) \mathbb{D})>m) \to 0 \text{ as } \delta\to 0 \operatornameeratorname{e}nd{equation} uniformly in $\operatornameeratorname{e}ps$. This convergence is also uniform over $b\in[0,C]$ for any $0<C<\infty$. \operatornameeratorname{e}nd{lemma} \begin{proof} We assume that $b=1$; the result for other $b$ and the uniform convergence in \operatornameeratorname{e}qref{eq:nomasstoboundary} follows immediately from the definition in Remark \operatorname{Re}f{rmk:disk-conv2}. The proof then follows from Lemma \operatorname{Re}f{lem:disk-conv}. We take a coupling where the convergence is almost sure: in particular, the fields $\widehat h^\operatornameeratorname{e}ps$ converge almost surely to $\widehat h$ in $H^{-1}_{\corr{\mathrm{loc}}}(\mathcal{S})$ and the measures $\mu_{\widehat h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ converge weakly almost surely to $\mu_{\widehat h}$ in $\mathcal{S}$. This means that we can sample a sequence of $z^\operatornameeratorname{e}ps$ from the $\mu_{\widehat h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ and $z$ from $\mu_{\widehat h}$, such that $z^\operatornameeratorname{e}ps\to z\in \mathcal{S}$ almost surely. Since $z\in \mathcal{S}$ is at positive distance from $\partial \mathcal{S}$, this implies that the conformal maps $\psi^\operatornameeratorname{e}ps$ converge to $\psi$ almost surely on compacts of $\mathcal{S}$ and therefore that $\widetilde h^\operatornameeratorname{e}ps \to \widetilde h$ in $H^{-1}_{\corr{\mathrm{loc}}}(\mathbb{D})$ and $\mu_{\widetilde h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ converges weakly to $\widetilde \mu$. Finally, \operatornameeratorname{e}qref{eq:nomasstoboundary} follows from the convergence proved above, and the fact that it holds for the limit measure $\mu_{\widetilde h}$. \operatornameeratorname{e}nd{proof} {Later, we will also need to consider fields obtained from the field $\widetilde h^\operatornameeratorname{e}ps$ of Lemma \operatorname{Re}f{cor:disc_mapped} via a random rotation. For this purpose, we record the following remark.} \begin{remark}\label{rmk:changing_lengths} Suppose that $h_n$ are a sequence of fields coupled with some rotations $\theta_n$ such that $\bar{h}_n=h_n\circ \theta_n-2\gamma_n^{-1}\log \nu_{h_n}(\partial \mathbb{D})$ has the law of $\widetilde h^{\operatornameeratorname{e}ps_n}$ from Lemma \operatorname{Re}f{cor:disc_mapped} with $b=1$, for some $\operatornameeratorname{e}pn\downarrow 0$, $\gamma_n=\gamma(\operatornameeratorname{e}ps_n)$. Suppose further that $(h_n, \nu_{h_n}(\partial \mathbb{D}),\mu_{h_n}(\mathbb{D}))\mathbb{R}ightarrow (h,\nu^*, \mu^*)$ in $H^{-1}_{\corr{\mathrm{loc}}}(\mathbb{D})\times \mathbb{R} \times \mathbb{R}$ as $n\to \infty$. Then $\nu^*=\nu_h(\partial \mathbb{D})$ and $\mu^*=\mu_h(\mathbb{D})$ almost surely. Indeed, $(h_n, \nu_{h_n}(\partial \mathbb{D}),\mu_{h_n}(\mathbb{D}),\theta_n,\bar{h}_n)$ is tight in $n$, and any subsequential limit $(h,\nu^*,\mu^*,\theta,\bar{h})$ has $(h,\nu^*,\mu^*)$ coupled as above. Since $\mu_{h_n}(A)=(\nu_{h_n}(\partial \mathbb{D}))^2\mu_{\bar{h}_n}(\theta_n^{-1}(A))$ for every $n$ and $A\subset \mathbb{D}$ it follows from Lemma \operatorname{Re}f{cor:disc_mapped} that $\mu^*=(\nu^*)^2\mu_{\bar{h}}(\mathbb{D})$ and $\nu_{\bar{h}}(\partial \mathbb{D})=1$ a.s. On the other hand, it is not hard to see that $\bar{h}$ must be equal to $h\circ\theta-\log \nu^*$ a.s., which implies the result. \operatornameeratorname{e}nd{remark} \subsection{Mating of trees} Mating of trees theory, \cite{DMS14}, provides a powerful encoding of LQG and SLE in terms of Brownian motion. We will state the version in the unit disk $\mathbb{D}$ below. Let $\alpha\in(-1,1)$ and let $Z^{(c)}$ be $c$ times a standard planar Brownian motion with correlation $\alpha>0$, started from $(1,0)$ or $(0,1)$. Condition on the event that $Z$ first leaves the first quadrant at the origin $(0,0)$; this is a zero probability event but can be made sense of via a limiting procedure, see for example \cite[Proposition 4.2]{AG19}. We call the resulting conditioned process (restricted until the time at which the process first leaves the first quadrant) a \operatornameeratorname{e}mph{Brownian cone excursion with correlation $\alpha$}. Note that we use the same terminology for the resulting process for any $c$ and either choice of $(1,0)$ or $(0,1)$ for the starting point. To state the mating-of-trees theorem (disk version) we first introduce some notation. Let $(\mathbb{D},h^\operatornameeratorname{e}ps)$ denote a unit boundary length $\gamma$-LQG disk for $\gamma\in(\sqrt{2},2)$, embedded as described in Definition \operatorname{Re}f{conv:disk}. Let $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ denote a space-filling SLE$_{\kappappa'}$ in $\mathbb{D}$, starting and ending at 1, which is independent of $h$. \corr{Recall that this is defined from a branching SLE$_{\kappa'}$ as described in Section \operatorname{Re}f{sec:sf_sle}, where the branch targeted towards $z\in \mathbb{D}$ is denoted by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ (one can obtain $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$) from $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ by deleting time intervals on which $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ is exploring regions of $\mathbb{D}$ that have been disconnected from $z$).} Parametrize $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ by the area measure induced by $h$. Let $Z^\operatornameeratorname{e}ps=(L^\operatornameeratorname{e}ps,R^\operatornameeratorname{e}ps)$ denote the process started at $(0,1)$ and ending at $(0,0)$ which encodes the evolution of the left and right boundary lengths of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$: see Figure \operatorname{Re}f{fig:spacefilling_bl}. \begin{figure} \centering \includegraphics[width=\textwidth]{spacefilling_bl} \caption{The left figure is an illustration of the branch of a space-filling SLE$_{\kappa'}$ (${\kappa'}>4$) towards some point $z\in \mathbb{D}$, and stopped at some time before it reaches $z$. The space-filling SLE itself will fill in the monocolored components that are separated from $z$ as it creates them, so if $t$ is equal to the total $\gamma$-LQG area of the grey shaded region on the right figure, then the space-filling SLE has visited precisely this grey region at time $t$. We then define the left (resp.\ right) boundary length of the space-filling SLE at time $t$ to be the $\gamma$-LQG boundary length of the red (resp.\ blue) curve shown on the right figure.} \label{fig:spacefilling_bl} \operatornameeratorname{e}nd{figure} {The following theorem follows essentially from \cite{DMS14}. For precise statements, see \cite[Theorem 2.1]{FDMOT} for the law of $Z^\operatornameeratorname{e}ps$ and see \cite[Theorem 7.3]{FDMOT} for the law of the monocolored components.} \begin{theorem}[\cite{DMS14,FDMOT}] In the setting above, $Z^\operatornameeratorname{e}ps$ has the law of a Brownian cone excursion with correlation $-\cos(\pi\gamma^2/4)$. The pair $(h^\operatornameeratorname{e}ps,\operatornameeratorname{e}ta^\operatornameeratorname{e}ps)$ is measurable with respect to the $\sigma$-algebra generated by $Z^\operatornameeratorname{e}ps$. Furthermore, if $z$ is sampled from $\mu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ renormalized to be a probability measure, then the monocolored complementary components of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ define independent $\gamma$-LQG disks conditioned on their $\gamma$-LQG boundary lengths {and areas}, i.e., if we condition on the ordered sequence of boundary lengths {and areas} of the monocolored domains $U$ disconnected from $z$ by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ then the corresponding LQG surfaces $(U,h|_U)$ are independent $\gamma$-LQG disks with the given boundary lengths and areas. \label{thm:MOT} \operatornameeratorname{e}nd{theorem} \begin{remark}\label{rmk:varZ} In fact, we now know from \cite{ARS} that the variance $c^2$ of the Brownian motion from which the law of $Z^\operatornameeratorname{e}ps$ can be constructed is equal to $1/(\operatornameeratorname{e}ps\sin(\pi\gamma^2/4))$, where $\gamma=\gamma(\operatornameeratorname{e}ps)=2-\operatornameeratorname{e}ps$. In particular, the variance is of order $\operatornameeratorname{e}ps^{-2}$. \operatornameeratorname{e}nd{remark} For each fixed $z\in\mathbb{D}$ there is a natural parametrization of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ called its \operatornameeratorname{e}mph{quantum natural parametrization} which is defined in terms of $Z^\operatornameeratorname{e}ps$ as follows. First define $\mathfrak t=\inf\{ t\geq 0\,:\,\operatornameeratorname{e}ta^\operatornameeratorname{e}ps(t)=z \}$ to be the time at which $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ first hits $z$. Then let $\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t}$ denote the set of $s\in[0,\mathfrak t]$ for which we \operatornameeratorname{e}mph{cannot} find a cone excursion $J\subset[0,\mathfrak t]$ \corr{(that is, $J=[t_1,t_2]\subset [0,\mathfrak t]$ such that $(X^\operatornameeratorname{e}ps_s,Y^\operatornameeratorname{e}ps_s)\ge (X^\operatornameeratorname{e}ps_{t_2},Y^\operatornameeratorname{e}ps_{t_2})$ on $J$, and either $X^\operatornameeratorname{e}ps_{t_1}=X^\operatornameeratorname{e}ps_{t_2}$ or $Y^\operatornameeratorname{e}ps_{t_1}=Y^\operatornameeratorname{e}ps_{t_2}$)} such that $s\in J$. We call the times in $\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t}$ \operatornameeratorname{e}mph{ancestor-free times relative to time $\mathfrak t$.} It is possible to show (see \cite[Section 1.4.2]{DMS14}) that the local time of $\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t}$ is well-defined.\footnote{This local time (and the corresponding local time for $\operatornameeratorname{e}ps=0$ defined below) is only defined up to a deterministic multiplicative constant. We fix this constant in the proof of Lemma \operatorname{Re}f{lem:BM-conv}.} Let $(\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t}_t)_{t\geq 0}$ denote the increasing function describing the local time of $\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t}$ such that $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t}_0=0$ and $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t}_t= \operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t}_{\mathfrak t}$ for $t\geq \mathfrak t$. Then let $T^{\operatornameeratorname{e}ps,\mathfrak t}_t$ for $t\in[0,\operatornameeratorname{e}ll_{\mathfrak t}^{\operatornameeratorname{e}ps,\mathfrak t}]$ denote the right-continuous inverse of $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t}$. \begin{definition}[Quantum natural parametrization] With the above definitions $$(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{z}(T^{\operatornameeratorname{e}ps,\mathfrak t}_t))_{t\in[0,\operatornameeratorname{e}ll_{\mathfrak t}^{\operatornameeratorname{e}ps,\mathfrak t}]}$$ defines a parametrization of $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ which is called its quantum natural parametrization. \label{def:QNT} \operatornameeratorname{e}nd{definition} \subsection{Convergence of the mating of trees Brownian functionals}\label{sec:Bfs} Let $Z^\operatornameeratorname{e}ps$ be the process from Theorem \operatorname{Re}f{thm:MOT} and let $X^\operatornameeratorname{e}ps=(A^\operatornameeratorname{e}ps,B^\operatornameeratorname{e}ps)$, where $$ A^\operatornameeratorname{e}ps_t=a_\operatornameeratorname{e}ps(L^\operatornameeratorname{e}ps_t+R^\operatornameeratorname{e}ps_t),\qquad B^\operatornameeratorname{e}ps_t=R^\operatornameeratorname{e}ps_t-L^\operatornameeratorname{e}ps_t,\qquad a_\operatornameeratorname{e}ps^2=\frac{1+\cos(\pi\gamma^2/4)}{1-\cos(\pi\gamma^2/4)}, t\geq 0. $$ \begin{figure}[h] \centering \includegraphics[width=\textwidth]{cone} \caption{\corr{The transformation from $Z^\operatornameeratorname{e}ps$ to $X^\operatornameeratorname{e}ps$.}} \operatornameeratorname{e}nd{figure} Note that $a_\operatornameeratorname{e}ps=\operatornameeratorname{e}ps\pi/2+o(\operatornameeratorname{e}ps)$ and that $X^\operatornameeratorname{e}ps$ is an uncorrelated Brownian excursion with variance {$2(1+\cos(\pi\gamma^2/4))(\operatornameeratorname{e}ps \sin(\pi\gamma^2/4))^{-1}=\pi+o(\operatornameeratorname{e}ps)$} in the cone $\{z\in \mathbb{C}: \arg(z)\in [-\pi/2+\tan^{-1}(a_\operatornameeratorname{e}ps),\pi/2-\tan^{-1}(a_\operatornameeratorname{e}ps))\}$, starting from $(a_\operatornameeratorname{e}ps,1)$ and ending at the origin. Also define the processes $\widehat X^{\operatornameeratorname{e}ps,\mathfrak t}=(\widehat A^{\operatornameeratorname{e}ps,\mathfrak t},\widehat B^{\operatornameeratorname{e}ps,\mathfrak t})$ for each $\mathfrak t<\mu^\operatornameeratorname{e}ps(\mathbb{D})$, by setting $$\widehat X^{\operatornameeratorname{e}ps,\mathfrak t}_t= X^{\operatornameeratorname{e}ps,\mathfrak t}_{T^{\operatornameeratorname{e}ps,\mathfrak t}_t}\, ; \, t> 0. $$ We will prove in this subsection that all the quantities defined above have a joint limit in law as $\operatornameeratorname{e}ps\downarrow 0$. Namely, let us consider an uncorrelated Brownian excursion $X=(A,B)$ in the right half plane from $(0,1)$ to $(0,0)$; {the process can for example be constructed via a limiting procedure where we condition a standard planar Brownian motion from $(0,1)$ to $(0,0)$ on first leaving $\{z\,:\,\operatorname{Re}(z)>-\delta \}$ at a point $\widehat z$ where $|\operatorname{Im}(\widehat z)|<\delta$}. For $\mathfrak{t}$ less than the total duration of $X$, let $\mathcal{I}^{\mathfrak t}\subset[0,\mathfrak t]$ denote the set of times at which $A$ has a backward running infimum relative to time $\mathfrak t$, i.e., $s\in\mathcal{I}^{\mathfrak t}$ if $A_u>A_s$ for all $u\in(s,\mathfrak t]$. Let $(\operatornameeratorname{e}ll^{\mathfrak t}_t)_{t\geq 0}$ denote the increasing function describing the local time of $\mathcal{I}^{\mathfrak t}$ such that $\operatornameeratorname{e}ll^{\mathfrak t}_0=0$ and $\operatornameeratorname{e}ll^{\mathfrak t}_t= \operatornameeratorname{e}ll^{\mathfrak t}_{\mathfrak t}$ for $t\geq \mathfrak t$. Then let $T^{\mathfrak t}$ denote the right-continuous inverse of $\operatornameeratorname{e}ll^{\mathfrak t}$, and define $\widehat X^{\mathfrak t}=(\widehat A^{\mathfrak t},\widehat B^{\mathfrak t})$ by $\widehat X^{\mathfrak t}_t= X^{\mathfrak t}_{T^{\mathfrak t}_t}$.\\ We set $$\mathfrak{be}^\operatornameeratorname{e}ps=(X^\operatornameeratorname{e}ps,(\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (T^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\widehat X^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t} )$$ and $$\mathfrak{be}=(X,(\mathcal{I}^{\mathfrak t})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\mathfrak t})_{\mathfrak t}, (T^{\mathfrak t})_{\mathfrak t}, (\widehat X^{\mathfrak t})_{\mathfrak t} )$$ where the indexing is over $\mathfrak t\in \mathbb{R}_+\cap \mathbb{Q}$. Then we have the following convergence. \begin{lemma}\label{lem:BM-conv} $\mathfrak{be}^\operatornameeratorname{e}ps\mathbb{R}ightarrow \mathfrak{be}$ as $\operatornameeratorname{e}ps\downarrow 0${, where we use the Hausdorff topology on the second coordinate and the Skorokhod topology on the remaining coordinates.} \operatornameeratorname{e}nd{lemma} \begin{proof} First we consider the infinite volume case where $X^\operatornameeratorname{e}ps$ is a two-sided planar Brownian motion started from $0$, with the same variance and covariance as before, namely variance $2(1+\cos(\pi\gamma^2/4))(\operatornameeratorname{e}ps \sin(\pi\gamma^2/4))^{-1}=\pi+o(\operatornameeratorname{e}ps)$ and covariance $0$. In this infinite volume setting we define $(\mathcal{I}^{\operatornameeratorname{e}ps,{\mathfrak t}})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,{\mathfrak t}})_{\mathfrak t}, (T^{\operatornameeratorname{e}ps,{\mathfrak t}})_{\mathfrak t}, (\widehat X^{\operatornameeratorname{e}ps,{\mathfrak t}})_{\mathfrak t}$ similarly to before, such that for $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$, $\mathcal{I}^{\operatornameeratorname{e}ps,{\mathfrak t}}\subset(-\infty,{\mathfrak t})$ is the set of ancestor-free times relative to time ${\mathfrak t}$, $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,{\mathfrak t}}:\mathbb{R}\to(-\infty,0]$ is an increasing process given by the local time of $\mathcal{I}^{\operatornameeratorname{e}ps,{\mathfrak t}}$ satisfying $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,{\mathfrak t}}_s\operatornameeratorname{e}quiv 0$ for $s\geq {\mathfrak t}$, $T^{\operatornameeratorname{e}ps,{\mathfrak t}}:(-\infty,0)\to(-\infty,0)$ is the right-inverse of $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,{\mathfrak t}}$, and $\widehat X^{\operatornameeratorname{e}ps,{\mathfrak t}}_s=X^{\operatornameeratorname{e}ps}_{T^{\operatornameeratorname{e}ps,{\mathfrak t}}_s}$. We make a similar adaptation of the definition to the infinite volume setting for $\operatornameeratorname{e}ps=0$; in particular, $X$ is ($\sqrt{\pi}$ times) a {standard} uncorrelated two-sided Brownian motion planar motion. By translation invariance in law of $X^\operatornameeratorname{e}ps$ and $X$, and since $X^\operatornameeratorname{e}ps$ and $X$ determine the rest of the objects in question, it is sufficient to show convergence for ${\mathfrak t}=0$. \nina{First we claim that for all $\operatornameeratorname{e}ps\in[0,2-\sqrt{2})$ we can sample $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ by considering a PPP in the second quadrant with intensity $dx\times y^{-\alpha(\operatornameeratorname{e}ps)}dy$ for $\alpha(\operatornameeratorname{e}ps)=1+2/(2-\operatornameeratorname{e}ps)^2=1+2/\gamma^2$, such that the points $(x,y)$ of this PPP are in bijection with the complementary components of $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ with $y$ representing the length of the component and $x$ representing the relative ordering of the components. (In the case $\operatornameeratorname{e}ps=0$, $\mathcal{I}^{0,0}$ refers to $\mathcal{I}^0$.) For $\operatornameeratorname{e}ps=0$ the claim follows since $A$ restricted to the complementary components of $\mathcal{I}^0$ has law given by the Brownian excursion measure. For $\operatornameeratorname{e}ps\in(0,2-\sqrt{2})$ the claim follows from \cite{DMS14}: It is explained in \cite[Section 1.4.2]{DMS14} that $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ has the law of the zero set of some Bessel process, which verifies the claim modulo the formula for $\alpha(\operatornameeratorname{e}ps)$. The dimension of $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ is $2/\gamma^2$ \cite[Table 1 and Example 2.3]{GHM-KPZ}, and we get the formula for $\alpha(\operatornameeratorname{e}ps)$ by adding 1 to this number.} \nina{Next} we argue that the marginal law of $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ converges to the marginal law of $\mathcal{I}^{0}$. \nina{Consider the definition of these sets via PPP as described in the previous paragraph.} Since $\lim_{\operatornameeratorname{e}ps\rightarrow 0}\alpha(\operatornameeratorname{e}ps)=\alpha(0)=3/2$, the PPP for $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$ converge in law to the PPP for $\operatornameeratorname{e}ps=0$ on all sets bounded away from $y=0$. \nina{This implies that for any compact interval $I$ we have convergence in law of $\mathcal{I}^{\operatornameeratorname{e}ps,0}\cap I$ to $\mathcal{I}^{0}\cap I$ for the Hausdorff distance}. \nina{Now} we will argue that if $\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0}\subset(-\infty,0)$ denotes the backward running infima of $A^\operatornameeratorname{e}ps$ relative to time 0, then \operatornameeratorname{e}qbn (X^\operatornameeratorname{e}ps,\mathcal{I}^{\operatornameeratorname{e}ps,0},\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0})\mathbb{R}ightarrow (X,\mathcal{I}^{0},\mathcal{I}^{0}). \operatornameeratorname{e}qen Since $(X^\operatornameeratorname{e}ps,\nina{\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0}})\mathbb{R}ightarrow (X,\mathcal{I}^{0})$ and $\nina{\mathcal{I}^{\operatornameeratorname{e}ps,0}}\mathbb{R}ightarrow \mathcal{I}^{0}$, we only need to prove that for any a.s.\ subsequential limit $(X,\mathcal{I}^{0},\widetilde\mathcal{I}^{0})$ we have $\mathcal{I}^0=\widetilde\mathcal{I}^0$ a.s. Observe that $\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0}\subset\mathcal{I}^{\operatornameeratorname{e}ps,0}$ \nina{since $\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0}$ denotes the backward running infima of $A^\operatornameeratorname{e}ps$, $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ denotes the set of ancestor-free times of $A^\operatornameeratorname{e}ps$ relative to time 0, and a time which is a backward running infimum of $A^\operatornameeratorname{e}ps$ relative to time 0 cannot be inside a cone excursion, hence it is ancestor-free.} \nina{The observation $\widetilde\mathcal{I}^{\operatornameeratorname{e}ps,0}\subset\mathcal{I}^{\operatornameeratorname{e}ps,0}$} implies that $\widetilde\mathcal{I}^{0}\subset\mathcal{I}^{0}$ a.s.\ \nina{in any subsequential limit $(X,\mathcal{I}^{0},\widetilde\mathcal{I}^{0})$.} Since $\widetilde\mathcal{I}^{0}\operatornameeratorname{e}qD\mathcal{I}^{0}$, this implies that $\mathcal{I}^0=\widetilde\mathcal{I}^0$ a.s. Next we will argue that $(\mathcal{I}^{\operatornameeratorname{e}ps,0},\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,0},T^{\operatornameeratorname{e}ps,0}) \mathbb{R}ightarrow (\mathcal{I}^{0},\operatornameeratorname{e}ll^{0},T^{0})$, assuming we choose the multiplicative constant consistently when defining $\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,0}$ and $\operatornameeratorname{e}ll^{0}$. The convergence result follows again from the construction of $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ and $\mathcal{I}^0$ via a PPP, since the $x$ coordinate of the PPP defines the local time (modulo multiplication by a deterministic constant). Using that $(\mathcal{I}^{\operatornameeratorname{e}ps,0},\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,0},T^{\operatornameeratorname{e}ps,0}) \mathbb{R}ightarrow (\mathcal{I}^{0},\operatornameeratorname{e}ll^{0},T^{0})$, that $\mathcal{I}^{\operatornameeratorname{e}ps,0}$ and $\mathcal{I}^{0}$ determine the other two elements in this tuple, and that $(X^\operatornameeratorname{e}ps,\mathcal{I}^{\operatornameeratorname{e}ps,0})\mathbb{R}ightarrow(X,\mathcal{I}^{0})$, we get $$ (X^\operatornameeratorname{e}ps,\mathcal{I}^{\operatornameeratorname{e}ps,0},\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,0},T^{\operatornameeratorname{e}ps,0}) \mathbb{R}ightarrow (X^0,\mathcal{I}^{0},\operatornameeratorname{e}ll^{0},T^{0}). $$ We conclude that the lemma holds in the infinite volume setting by using that $$\widehat X^{\operatornameeratorname{e}ps,0}_s=X^{\operatornameeratorname{e}ps}_{T^{\operatornameeratorname{e}ps,0}_s} \text{ and }\widehat X_s=X_{T^{0}_s}.$$ To conclude the proof we will transfer from the infinite volume setting to the finite volume setting. Let us start by recalling that there is a natural infinite measure $\theta_\operatornameeratorname{e}ps$ on Brownian excursions in the cone $\mathcal{C}_\operatornameeratorname{e}ps:=\{z\in \mathbb{C}: \arg(z)\in (-\pi/2+\tan^{-1}(a_\operatornameeratorname{e}ps),\pi/2-\tan^{-1}(a_\operatornameeratorname{e}ps))\}$ which is uniquely characterized (modulo multiplication by a constant) by the following property. Let $X^\operatornameeratorname{e}ps$ be as in the previous paragraph, let $\delta>0$ and let $J_\operatornameeratorname{e}ps=[t_1,t_2]\subset\mathbb{R}_-$ be the interval with largest left end point $t_1$ of length at least $\delta$ during which $X^\operatornameeratorname{e}ps$ makes an excursion in the cone $\mathcal{C}_\operatornameeratorname{e}ps$. Here a cone excursion in $\mathcal{C}_\operatornameeratorname{e}ps$ is a path starting at $(ba_\operatornameeratorname{e}ps,b)+z_0$ for some $b>0$ and $z_0\in\mathbb{C}$, ending at $z_0$, and otherwise staying inside $z_0+\mathcal{C}_\operatornameeratorname{e}ps$. Define \begin{equation}\label{XYeq} Y^\operatornameeratorname{e}ps_t=(X^\operatornameeratorname{e}ps_{t+t_1}-X^\operatornameeratorname{e}ps_{t_2}) \operatornameeratorname{e}nd{equation} for $t\in[0,t_2-t_1]$ so that $Y^\operatornameeratorname{e}ps$ is a path that starts at $(ba_\operatornameeratorname{e}ps,b)$ for some $b>0$, ends at the origin, and otherwise stays inside $\mathcal{C}_\operatornameeratorname{e}ps$. Then $Y^\operatornameeratorname{e}ps$ has law $\theta_\operatornameeratorname{e}ps$ restricted to excursions of length at least $\delta$. (Here and in the rest of the proof, when we work with a non-probability measure of finite mass, we will often assume that it been renormalized to be a probability measure.) See \cite{Shi85}. The measure $\theta_\operatornameeratorname{e}ps$ allows a disintegration $\theta_\operatornameeratorname{e}ps=\int_0^\infty \theta_\operatornameeratorname{e}ps^b\,db$, where a path sampled from $\theta_\operatornameeratorname{e}ps^b$ a.s.\ starts at $(ba_\operatornameeratorname{e}ps,b)$. Furthermore, for $b,b'>0$, a path sampled from $\theta_\operatornameeratorname{e}ps^b$ and rescaled by $b'/b$ so it ends at $(b'a_\operatornameeratorname{e}ps,b')$ (and with Brownian scaling of time), has law $\theta_\operatornameeratorname{e}ps^{b'}$. Finally, an excursion sampled from $\theta_\operatornameeratorname{e}ps^1$ is equal in law to the excursion in the statement of the lemma. See \cite{AG19}. Let us now use these facts to complete the proof. We define a function $f^\operatornameeratorname{e}ps$ such that for $X^\operatornameeratorname{e}ps$ a two-sided planar Brownian motion as above we have $f^\operatornameeratorname{e}ps(X^\operatornameeratorname{e}ps)=((\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (T^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\widehat X^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t} )$ a.s. For $Y^\operatornameeratorname{e}ps$ a Brownian cone excursion in $\mathcal{C}_\operatornameeratorname{e}ps$ starting at $(a_\operatornameeratorname{e}ps,1)$ we define $f^\operatornameeratorname{e}ps(Y^\operatornameeratorname{e}ps)$ such that $(Y^\operatornameeratorname{e}ps,f^\operatornameeratorname{e}ps(Y^\operatornameeratorname{e}ps))$ is equal in law to the tuple $\mathfrak{be}^\operatornameeratorname{e}ps$ in the theorem statement. We also extend the definition of $f^\operatornameeratorname{e}ps$ to the case of Brownian excursions $Y^\operatornameeratorname{e}ps$ in $\mathcal{C}_\operatornameeratorname{e}ps$ starting at $(ba_\operatornameeratorname{e}ps,b)$ for general $b>0$ in the natural way. Now let $Y^\operatornameeratorname{e}ps$ be coupled with $X^\operatornameeratorname{e}ps$ as in \operatornameeratorname{e}qref{XYeq} for some fixed $\delta>0$, and let $E^\operatornameeratorname{e}ps$ be the event that $Y^\operatornameeratorname{e}ps$ starts at $(ba_\operatornameeratorname{e}ps,b)$ for $b\in [1,2]$. Define $f,E$ similarly for $\operatornameeratorname{e}ps=0$. We claim that \operatornameeratorname{e}qb (X^\operatornameeratorname{e}ps,f^\operatornameeratorname{e}ps(X^\operatornameeratorname{e}ps),Y^\operatornameeratorname{e}ps,f^\operatornameeratorname{e}ps(Y^\operatornameeratorname{e}ps),E^\operatornameeratorname{e}ps) \mathbb{R}ightarrow (X,f(X),Y,f(Y),E) \label{eq:finite-infinite} \operatornameeratorname{e}qe as $\operatornameeratorname{e}ps\to 0$. In fact, this claim is immediate since if $(X^\operatornameeratorname{e}ps,f^\operatornameeratorname{e}ps(X^\operatornameeratorname{e}ps))$ converges to $(X,f(X))$ then (by convergence of $\mathcal{I}^{\operatornameeratorname{e}ps,0}$) we also have convergence of the interval $J_\operatornameeratorname{e}ps$, which further gives convergence of $ (Y^\operatornameeratorname{e}ps,f^\operatornameeratorname{e}ps(Y^\operatornameeratorname{e}ps),E^\operatornameeratorname{e}ps)$ to $(Y,f(Y),E)$. With $Y^\operatornameeratorname{e}ps$ as in the previous paragraph let $\widetilde Y^\operatornameeratorname{e}ps$ denote a random variable which is obtained by conditioning on $E^\operatornameeratorname{e}ps$ and then applying a Brownian rescaling of $Y^\operatornameeratorname{e}ps$ so that $\widetilde Y^\operatornameeratorname{e}ps$ starts at $(a_\operatornameeratorname{e}ps,1)$. We get from \operatornameeratorname{e}qref{eq:finite-infinite} that $(\widetilde Y^\operatornameeratorname{e}ps, f^\operatornameeratorname{e}ps(\widetilde Y^\operatornameeratorname{e}ps)) \mathbb{R}ightarrow (\widetilde Y,f(\widetilde Y))$. Note that if we condition the excursions in the statement of the lemma to have duration at least $\delta$, then these have exactly the same laws as $(\widetilde Y^\operatornameeratorname{e}ps, f^\operatornameeratorname{e}ps(\widetilde Y^\operatornameeratorname{e}ps),\widetilde Y,f(\widetilde Y))$ conditioned to have duration at least $\delta$. Thus the lemma follows upon taking $\delta\to 0$, since the probability that the {considered} excursions {have} duration at least $\delta$ tends to $1$, uniformly in $\operatornameeratorname{e}ps$. \operatornameeratorname{e}nd{proof} \subsection{Proof of \operatornameeratorname{e}qref{eqn:bottleneck}}\label{sec:order_proof} \corr{Let us first recall the statement of \operatornameeratorname{e}qref{eqn:bottleneck}. We have fixed $z,w\in \mathbb{D}$, and as usual, $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ denotes a space filling SLE$_{\kappa'}$ in $\mathbb{D}$, while $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$ denotes the branch in the associated branching SLE$_{\kappa'}$ towards $z$, parameterized by $-\log$ conformal radius seen from $z$. For $\delta>0$, we have defined the times $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps$ that $w$ is sent first sent to within distance $\delta$ of $\partial \mathbb{D}$ by the Loewner maps associated with $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$, and $\sigma_{z,w}^\operatornameeratorname{e}ps=\sigma_{z,w,0}^\operatornameeratorname{e}ps$ to be the first time that $z$ and $w$ are separated by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z$. For $r>0$, we denote the collection of faces (squares) of $r\mathbb{Z}^2$ that intersect $\mathbb{D}$ by $\mathcal{S}_r$. Finally, we write $S_{\delta,r}^\operatornameeratorname{e}ps$ for the event that there exists $S\in \mathcal{S}_r$ that is separated by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ from $z$ during the interval $\sigma_{z,w,\delta}^\operatornameeratorname{e}ps, \sigma_{z,w}^\operatornameeratorname{e}ps]$ \operatornameeratorname{e}mph{and} such that $z$ is visited by the space-filling $\SLE_{\kappa'}$ $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$, before $S$. The statement of \operatornameeratorname{e}qref{eqn:bottleneck} is then that \begin{equation*} \lim_{\delta\downarrow 0} \lim_{\operatornameeratorname{e}ps\downarrow 0} \mathbb{P}(S_{\delta,r}^\operatornameeratorname{e}ps) =0. \operatornameeratorname{e}nd{equation*} } The mating of trees theorem (Theorem \operatorname{Re}f{thm:MOT}) together with the convergence proved in the previous subsection now make the proof of this statement reasonably straightforward. Indeed, in plain language, it says that the probability of an $\SLE_{\kappa'}({\kappa'}-6)$ branch almost separating two points $z$ and $w$ (where ``almost'' is encoded by a small parameter $\delta$) but then going on to separate a bicolored component of macroscopic size from $z$ at some time $t$ \operatornameeratorname{e}mph{strictly} before separating $z$ from $w$, goes to $0$ as $\delta\to 0$, uniformly in ${\kappa'}$. The idea is to couple this SLE with an independent $\gamma$-LQG disk and note that if the event mentioned above were to occur, then the component $U$ containing $z$ and $w$ at time $t$ would have a small ``bottleneck'' and hence define a very strange distribution of $\gamma$-LQG mass when viewed as a $\gamma$-LQG surface. On the other hand, if we sample several points from the $\gamma$-LQG area measure on the disk, then one of these is likely to be in the bicolored component separated from $z$ and $w$ at time $t$. So the mating of trees theorem says that $U$ should really look like a quantum disk, and in particular, have a rather well behaved distribution of $\gamma$-LQG mass {without bottlenecks}. This contradiction will lead us to the proof of \operatornameeratorname{e}qref{eqn:bottleneck}. Let us now get on with the details. For $\operatornameeratorname{e}ps\in(0,2-\sqrt{2})$ we consider a CLE$_{{\kappa'}}$ exploration alongside an \operatornameeratorname{e}mph{independent} unit boundary length quantum disk $h^\operatornameeratorname{e}ps$ as in Definition \operatorname{Re}f{conv:disk}. We write $\mu^\operatornameeratorname{e}ps$ for its associated LQG area measure and let $y^\operatornameeratorname{e}ps$ be a point in $\mathbb{D}$ sampled from $\mu^\operatornameeratorname{e}ps$ normalized to be a probability measure. We let $z\in \mathcal{Q}$ be fixed. \begin{corollary}\label{cor:nobottleneck} Consider the event $A^\operatornameeratorname{e}ps_{\delta,m,v}$ that: \begin{itemize}\item $\mathcal{O}_{z,y^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps=1$ (i.e., the component containing $z$ when $y^\operatornameeratorname{e}ps$ and $z$ are separated is monocolored); \item when ${\mathrm{D}}_{z,y^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ (this monocolored component) is mapped to $\mathbb{D}$, with a point in the interior chosen proportionally to $\mu^\operatornameeratorname{e}ps|_{{\mathrm{D}}_{z,y^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps}$ sent to $0$, the resulting quantum mass of $\mathbb{D}\setminus (1-10\delta \mathbb{D})$ is greater than $m$. \operatornameeratorname{e}nd{itemize} Then for every $m$ we have that \[ \lim_{\delta\to 0}\limsup_{\operatornameeratorname{e}ps\to 0}\mathbb{P}(A_{\delta,m,v}^\operatornameeratorname{e}ps)=0.\] \operatornameeratorname{e}nd{corollary} \begin{proof} Theorem \operatorname{Re}f{thm:MOT} says that the monocolored components separated from $y^\operatornameeratorname{e}ps$ by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_{y^\operatornameeratorname{e}ps}$ are quantum disks conditionally on their boundary lengths and areas. Moreover, we know that the total mass of the original disk $h^\operatornameeratorname{e}ps$ converges in law to something a.s.\ finite as $\operatornameeratorname{e}ps\to 0$, by Lemma \operatorname{Re}f{lem:disk-conv} and Remark \operatorname{Re}f{rmk:disk-conv3}. Recalling the definition of $\widehat{B}$ from Section \operatorname{Re}f{sec:Bfs}, we also know that the largest quantum boundary length among all monocolored components separated from $y^\operatornameeratorname{e}ps$ has law given by the largest jump of $\widehat{B}^{\mathfrak t}$, for $\mathfrak t$ chosen uniformly in $(0,\mu^\operatornameeratorname{e}ps(\mathbb{D}))$. Indeed, if $\mathfrak t$ corresponds to $y^\operatornameeratorname{e}ps$ as in the paragraph above Definition \operatorname{Re}f{def:QNT}, then $\mathfrak{t}$ is a uniform time in $(0,\mu^\operatornameeratorname{e}ps(\mathbb{D}))$ and the jumps of $\widehat{B}^{\mathfrak t}$ are precisely the quantum boundary lengths of the monocolored components disconnected from $y^\operatornameeratorname{e}ps$. By Lemma \operatorname{Re}f{lem:BM-conv} we may deduce that the law of this largest jump converges to something a.s.\ finite as $\operatornameeratorname{e}ps\to 0$. Thus, by choosing $N,L$ large enough, we may work on an event with arbitrarily high probability (uniformly in $\operatornameeratorname{e}ps$) where there are fewer than $N$ monocolored components separated for $y^\operatornameeratorname{e}ps$ with mass $\ge m$, and where they all have $\nu^\operatornameeratorname{e}ps$ boundary length less than $L$. Lemma \operatorname{Re}f{cor:disc_mapped} then provides the result. \operatornameeratorname{e}nd{proof}\\ We also need one more elementary property of radial Loewner chains to assist with the proof of \operatornameeratorname{e}qref{eqn:bottleneck}. \begin{lemma}\label{lem:radial_flow_out} Consider the image $(g_t(z))_{t\ge 0}$ of a point $z\in \mathbb{D}$ under the radial Loewner flow $(g_t)_{t\ge 0}=(g_t[\mathbf{D}])_{t\ge 0}$ corresponding to $\mathbf{D}\in\mathcal{D}$. Then with probability one, $|g_t(z)|$ is a non-decreasing function of time (until the point $z$ is swallowed). \operatornameeratorname{e}nd{lemma} \begin{proof} From the radial Loewner equation one can compute directly that, until the point $z$ is swallowed, $$\partial_t (|g_t(z)|^2) = 2 |g_t(z)| \mathbb{R}e \big(\frac{W_t+g_t(z)}{W_t-g_t(z)}\big).$$ Since $\mathbb{R}e((1+x)/(1-x))>0$ for any $x\in \mathbb{D}$, the right-hand side above must be positive. \operatornameeratorname{e}nd{proof}\\ \begin{proofof}{\operatornameeratorname{e}qref{eqn:bottleneck}} Fix $r>0$ and suppose that $\mathbb{P}(S_{\delta,r}^\operatornameeratorname{e}ps)\ge a$ for some $a>0$. Recall that $S_{\delta,r}^\operatornameeratorname{e}ps$ is the event that there exists $S\in \mathcal{S}_r$ that is separated by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ from $z$ during the interval $[\sigma_{z,w,\delta}^\operatornameeratorname{e}ps, \sigma_{z,w}^\operatornameeratorname{e}ps]$ {and} such that the disconnected component containing $z$ is monocolored. Let $h^\operatornameeratorname{e}ps, \mu^\operatornameeratorname{e}ps,y^\operatornameeratorname{e}ps$ be as above Corollary \operatorname{Re}f{cor:nobottleneck}, and let $a'=\inf_{\operatornameeratorname{e}ps> 0}\min_{S\in \mathcal{S}_r}\mathbb{P}(y^\operatornameeratorname{e}ps\in S)$. Then $a'$ is strictly positive, due to the convergence result Lemma \operatorname{Re}f{conv:disk}, plus the fact that $\min_{S\in \mathcal{S}_r}\mathbb{P}(y\in S)>0$ when $y$ is picked from the critical LQG area measure for a critical unit boundary length disk. By independence, we then have $\mathbb{P}(E_\delta^\operatornameeratorname{e}ps)\ge aa'$, where $E_{\delta}^\operatornameeratorname{e}ps$ is the event that $\sigma_{z,y^\operatornameeratorname{e}ps}\in [\sigma_{z,w,\delta}^\operatornameeratorname{e}ps,\sigma_{z,w}^\operatornameeratorname{e}ps]$ and $\mathcal{O}_{z,y}^\operatornameeratorname{e}ps=1$. We can also choose $v,m$ small enough and $K$ large enough that on an event $F_{m,v,K}^\operatornameeratorname{e}ps$ with probability $\ge 1-aa'/2$, uniformly in $\operatornameeratorname{e}ps$: \begin{itemize} \itemsep0em \item $B_z(v)\subset l_z^\operatornameeratorname{e}ps$ (resp. $B_w(v)\subset l_w^\operatornameeratorname{e}ps$) where $l_z$ (resp. $l_w^\operatornameeratorname{e}ps$) is the first nested $\mathbb{C}LE_{{\kappa'}}$ bubble containing $z$ (resp. $w$) that is entirely contained in $B_z(|z-w|/3))$ (resp. $B_w(|z-w|/3)$; \item $B_z(v)$ and $B_w(v)$ have $\mu$-mass greater than or equal to $m$; \item if we map $l_z^\operatornameeratorname{e}ps$ (resp. $l_w^\operatornameeratorname{e}ps$) to $\mathbb{D}$ with $z$ (resp. $w$) sent to $0$, then the images of $B_z(v)$ and $B_w(v)$ are contained in $(1/2)\mathbb{D}$; \item $\mu^\operatornameeratorname{e}ps(\mathbb{D})\le K$. \operatornameeratorname{e}nd{itemize} Again this is possible because such $v,m,K$ can be chosen when $\operatornameeratorname{e}ps=0,{\kappa'}=4$, and we can appeal to the convergence results Proposition \operatorname{Re}f{prop:cleloopconv} and Lemma \operatorname{Re}f{conv:disk}. Note that on the event $F_{v,m,K}^\operatornameeratorname{e}ps$: \begin{itemize} \itemsep0em \item[(i)] $B_w(v)$ and $B_z(v)$ are contained in $({\mathbf{D}}_{z}^\operatornameeratorname{e}ps)_t$ for all $t\in(\sigma^\operatornameeratorname{e}ps_{z,w,\delta},\sigma^\operatornameeratorname{e}ps_{z,w})$; \item[(ii)] for any $t\in(\sigma^\operatornameeratorname{e}ps_{z,w,\delta},\sigma^\operatornameeratorname{e}ps_{z,w})$ and conformal map sending $({\mathbf{D}}_{z}^\operatornameeratorname{e}ps)_t$ to $\mathbb{D}$ with $z'\in B_z(v)$ sent to $0$, the image of $B_w(v)$ is contained in a $10\delta$ neighbourhood of $\partial \mathbb{D}$. \operatornameeratorname{e}nd{itemize} Point (ii) follows because any such conformal map can be written as the composition of a conformal map $({\mathbf{D}}_{z}^\operatornameeratorname{e}ps)_t$ to $\mathbb{D}$ sending $z$ to 0, and then a conformal map from $\mathbb{D}\to \mathbb{D}$ sending the image of $z'$, which lies in $(1/2)\mathbb{D}$, to $0$. By Lemma \operatorname{Re}f{lem:radial_flow_out}, $v$ is sent to distance $\le \delta$ from the boundary by the first of these two maps. The third bullet point in the definition of $F_{v,m,K}$ then implies that the whole of $B_w(v)$ is actually sent within distance $4\delta$ of $\partial \mathbb{D}$. Distortion estimates near the boundary for the second conformal map allow one to deduce (ii). To finish the proof, we consider the event $E_\delta^\operatornameeratorname{e}ps \cap F_{m,v,K}^\operatornameeratorname{e}ps$ which has probability $\ge aa'/2$ by construction. Conditionally on this event, if we sample a point from $\mathbf{D}_{z,y^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ according to the measure $\mu^\operatornameeratorname{e}ps$, then this point will lie in $B_z(v)$ with conditional probability $\ge m/K$. If this happens, then upon mapping to the unit disk with this point sent to the origin, a set of $\mu^\operatornameeratorname{e}ps$ mass $\ge m$ (namely $B_z(v)$) will necessarily be sent to $\mathbb{D}\setminus (1-10\delta)\mathbb{D}$ (see point (ii) above). Note that $m/K$ is a function $c(a)$ of $a$ only (and in particular does not depend on $\operatornameeratorname{e}ps,\delta$). So in summary, if $\mathbb{P}(S_{\delta,r}^\operatornameeratorname{e}ps)\ge a$, then $\mathbb{P}(A_{\delta, m,v}^\operatornameeratorname{e}ps)>aa'c(a)$ for some $m(a),v(a),c(a)$ depending only on $a$, where $A_{\delta,m,v}^\operatornameeratorname{e}ps$ is as in Corollary \operatorname{Re}f{cor:nobottleneck}. This means that if \operatornameeratorname{e}qref{eqn:bottleneck} does not hold, then $\lim_{\delta\to 0} \limsup_{\operatornameeratorname{e}ps\to 0} \mathbb{P}(A^\operatornameeratorname{e}ps_{\delta, m,v})>0$ for some $m,v$. This contradicts Corollary \operatorname{Re}f{cor:nobottleneck}, and hence \operatornameeratorname{e}qref{eqn:bottleneck} is proved. \operatornameeratorname{e}nd{proofof} \section{Mating of trees for $\kappappa=4$ and joint convergence of CLE, LQG, and Brownian motions as $\kappappa'\downarrow 4$} \label{sec:joint-conv} Before stating the main theorems, let us briefly take stock of the progress so far. Recall that to each $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$ we associate ${\kappa'}={\kappa'}(\operatornameeratorname{e}ps)=16/(2-\operatornameeratorname{e}ps)^2$, and write $(\mathbf{D}_z^\operatornameeratorname{e}ps)_{z\in \mathcal{Q}}$ for the SLE$_{\kappappa'}(\kappappa'-6)$ branches from 1 to $z$ in a branching SLE$_{\kappa'}$ in $\mathbb{D}$. These are generated by curves $(\operatornameeratorname{e}ta^\operatornameeratorname{e}ps_z)_{z\in \mathcal{Q}}$, so that $(\mathbf{D}_z^\operatornameeratorname{e}ps)_t$ is the connected component of $\mathbb{D}\setminus \operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ containing $z$ for every $z$ and $t$. Recall that this branching SLE defines a nested CLE$_{\kappappa'}$ which we denote by $\Gamma^{\operatornameeratorname{e}ps}$, and a space-filling $\SLE_{\kappa'}$ which we denote by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$. {The space-filling SLE$_{\kappa'}$} $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ then determines an order on the points in $\mathcal{Q}$: for $z,w\in \mathcal{Q}$ we denote by $\mathcal{O}_{z,w}^\operatornameeratorname{e}ps$ the random variable that is $1$ if $z$ is visited before $w$ by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ (or $z=w$) and $0$ otherwise. We combine these and set $$\mathfrak{cle}^\operatornameeratorname{e}ps=((\mathbf{D}^\operatornameeratorname{e}ps_z)_z,\Gamma^\operatornameeratorname{e}ps,(\mathcal{O}_{z,w}^{\operatornameeratorname{e}ps})_{z,w})$$ for each $\operatornameeratorname{e}ps$, where $z,w$ are indexed by $\mathcal{Q}$. When ${\kappa'}=4$ we have analogous objects. We write $\Gamma$ for a nested CLE$_4$ in $\mathbb{D}$, and we assume that $\Gamma$ is coupled with a branching uniform $\mathbb{C}LE_4$ exploration that explores its loops. We write $\mathbf{D}_z$ for the branch towards each $z\in \mathcal{Q}$ in this exploration. Finally, we define a collection of independent coin tosses $(\mathcal{O}_{z,w})_{z,w\in \mathcal{Q}}$ as described at the start of Section \operatorname{Re}f{sec:conv_order}. Combining these, we set $$ \mathfrak{cle}=((\mathbf{D}_z)_z,\Gamma,(\mathcal{O}_{z,w})_{z,w}).$$ The processes $\mathbf{D}^\operatornameeratorname{e}ps_z,\mathbf{D}_z$ are each parameterized by $-\log $ conformal radius seen from $z$, and equipped with the topology of $\mathcal{D}_z$ for every $z\in \mathcal{Q}$. {The loop ensembles} $\Gamma^\operatornameeratorname{e}ps,\Gamma$ are equipped with the topology of Hausdorff convergence for the countable collection of loops surrounding each $z\in\mathcal{Q}$. We also consider, for each $\operatornameeratorname{e}ps$, a unit boundary length Liouville quantum gravity disk as in Definition \operatorname{Re}f{conv:disk}, {\operatornameeratorname{e}mph{independent of}} $\mathfrak{cle}^\operatornameeratorname{e}ps$, and write $$ \mathfrak{lqg}^\operatornameeratorname{e}ps=(\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps},\nu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps},h^\operatornameeratorname{e}ps)$$ for the associated area measure, boundary length measure and field. We denote by $$\mathfrak{lqg}=(\mu_h,\nu_h,h)$$ its critical counterpart, which we also sample independently of $\mathfrak{cle}$. We equip the fields with the $H^{-1}(\mathbb{D})$ topology, and the measures with the weak topology for measures on $\mathbb{D}$ and $\partial \mathbb{D}$ respectively. Then by Remark \operatorname{Re}f{rmk:disk-conv}, Proposition \operatorname{Re}f{prop:cle-conv}, and the independence of $\mathfrak{cle}^\operatornameeratorname{e}ps$ and $\mathfrak{lqg}^\operatornameeratorname{e}ps$ (resp.\ $\mathfrak{cle}$ and $\mathfrak{lqg}$), we have that: \begin{proposition} \label{prop:p1} $(\mathfrak{cle}^\operatornameeratorname{e}ps,\mathfrak{lqg}^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (\mathfrak{cle},\mathfrak{lqg})$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{proposition} Additionally, for every $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$ by the mating of trees theorem, Theorem \operatorname{Re}f{thm:MOT}, $(\mathfrak{cle}^\operatornameeratorname{e}ps,\mathfrak{lqg}^\operatornameeratorname{e}ps)$ determines a collection of Brownian observables $$\mathfrak{be}^\operatornameeratorname{e}ps=(X^\operatornameeratorname{e}ps,(\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (T^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t}, (\widehat X^{\operatornameeratorname{e}ps,\mathfrak t})_{\mathfrak t} )$$ as explained in Section \operatorname{Re}f{sec:Bfs}. Recall that $X^\operatornameeratorname{e}ps$ is $\sqrt{\pi}$ times an uncorrelated Brownian excursion in the cone $\{z\in \mathbb{C}: \arg(z)\in [-\pi/2+\tan^{-1}(a_\operatornameeratorname{e}ps),\pi/2-\tan^{-1}(a_\operatornameeratorname{e}ps))\}$, starting from $(a_\operatornameeratorname{e}ps,1)$ and ending at the origin, where $a_\operatornameeratorname{e}ps=\sqrt{(1+\cos(\pi\gamma^2/4))/(1-\cos(\pi\gamma^2/4)))}=\pi\operatornameeratorname{e}ps/2+o(\operatornameeratorname{e}ps)$. The indexing of the above processes is over $\mathfrak{t}\in \mathbb{R}_+\cap \mathbb{Q}$. If we also write $$\mathfrak{be}=(X,(\mathcal{I}^{\mathfrak t})_{\mathfrak t}, (\operatornameeratorname{e}ll^{\mathfrak t})_{\mathfrak t}, (T^{\mathfrak t})_{\mathfrak t}, (\widehat X^{\mathfrak t})_{\mathfrak t} ),$$ for a tuple with law as described in Section \operatorname{Re}f{sec:Bfs}, then by Lemma \operatorname{Re}f{lem:BM-conv} we have that: \begin{proposition} \label{prop:p2} $\mathfrak{be}^\operatornameeratorname{e}ps\mathbb{R}ightarrow \mathfrak{be}$ as $\operatornameeratorname{e}ps\to 0$. \operatornameeratorname{e}nd{proposition} Here, $\mathcal{I}^{\operatornameeratorname{e}ps,\mathfrak t},\mathcal{I}^{\mathfrak t}$ are equipped with the Hausdorff topology, and the stochastic processes in the definition of $\mathfrak{be}^\operatornameeratorname{e}ps, \mathfrak{be}$ are equipped with the Skorokhod topology.\\ We now wish to describe the \operatornameeratorname{e}mph{joint} limit of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, \mathfrak{be}^\operatornameeratorname{e}ps)$ as $\operatornameeratorname{e}ps\to 0$. For this, we first need to introduce a little notation. For {$z, w\in\mathcal{Q}$, $z\ne w$,} we can consider the first time $\sigma_{z,w}^\operatornameeratorname{e}ps$ (defined by $\mathfrak{cle}^\operatornameeratorname{e}ps$) at which $z$ and $w$ are in different complementary components of $\mathbb{D}\setminus \operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$. We let $U^\operatornameeratorname{e}ps=U^\operatornameeratorname{e}ps(z,w)\subset\mathbb{D}$ denote the component which is visited first by the space-filling SLE$_{\kappa'}$ $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$. We say that $U^\operatornameeratorname{e}ps=U^\operatornameeratorname{e}ps(z,w)$ is the \operatornameeratorname{e}mph{monocolored} component when $z$ and $w$ are separated. Let us define $$\mathfrak{U}^\operatornameeratorname{e}ps_z:=\{U\subset \mathbb{D}: U=U^\operatornameeratorname{e}ps(z,w) \text{ for some } z\ne w \text{ with } \mathcal{O}^\operatornameeratorname{e}ps_{z,w}=0\}$$ to be the set of monocolored components separated from $z$ by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$. Note that these are naturally ordered, according to the order that they are visited by $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$. In fact, we may also associate orientations to the elements of $\mathfrak U_z^\operatornameeratorname{e}ps$: we say that $U\in \mathfrak U_z^\operatornameeratorname{e}ps$ is ordered clockwise (resp.\ counterclockwise) if the boundary of $U$ is visited by $\operatornameeratorname{e}ta_z^\operatornameeratorname{e}ps$ in a clockwise (resp.\ counterclockwise) order, and in this case we write $\mathrm{sgn}(U)=-1$ (resp.\ $+1$). \begin{remark}\label{rmk:MOT} For $\operatornameeratorname{e}ps\in (0,2-\sqrt{2})$, by Theorem \operatorname{Re}f{thm:MOT} and the definitions above, we have that: \begin{compactitem} \item the duration of $Z^\operatornameeratorname{e}ps$ is equal to $\mu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\mathbb{D})$, hence $X^\operatornameeratorname{e}ps=0$ for all $t\ge \mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}(\mathbb{D})$ almost surely; \item for $z\in \mathcal{Q}$, the time $t_z^\operatornameeratorname{e}ps$ at which $\operatornameeratorname{e}ta^\operatornameeratorname{e}ps$ visits $z$ is almost surely given by $\mu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(\cup_{U\in \mathfrak{U}_{z}^\operatornameeratorname{e}ps} U)=\sum_{\mathfrak{U}_{z}^\operatornameeratorname{e}ps} \mu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps(U)$; \item the ordered $\nu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ boundary lengths of the components of $\mathfrak{U}_z^\operatornameeratorname{e}ps$ are almost surely equal to the ordered jumps of $(\widehat B^{\operatornameeratorname{e}ps,t_{z}^\operatornameeratorname{e}ps})$, and the sign of each jump is equal to the sign of the corresponding element of $\mathfrak{U}_z^\operatornameeratorname{e}ps$; \item the ordered $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}$ masses of the components of $\mathfrak{U}_z^\operatornameeratorname{e}ps$ are almost surely equal to the ordered jumps of $T^{\operatornameeratorname{e}ps,t_z^\operatornameeratorname{e}ps}$. \operatornameeratorname{e}nd{compactitem} \operatornameeratorname{e}nd{remark} We can also define analogous objects associated with the CLE$_4$ exploration: if $z$ and $w$ are separated at time $\sigma_{z,w}$ by the $\mathbb{C}LE_4$ exploration branch towards $z$, and $\mathcal{O}_{z,w}=1$ we set $U(z,w)=(\mathbf{D}_z)_{\sigma_{z,w}}$; if $\mathcal{O}_{z,w}={0}$ we set $U(z,w)=(\mathbf{D}_w)_{\sigma_{w,z}}$. {The set} $\mathfrak{U}_z$ is then defined in exactly the same way. Note that in this case the elements of $\mathfrak{U}_z$ are ordered by declaring that $U$ comes before $U'$ iff $U=U(z,w)$ and $U'=U(z,w')$ for $w\ne w'$ such that $\mathcal{O}_{w',w}=0$. We now say that $U\in \mathfrak U_z$ is ordered clockwise (resp.\ counterclockwise) if there is an even (resp.\ odd) number of loops which enclose $U$, and write $\mathrm{sgn}(U)=-1$ (resp. $+1$). The main ingredient that will allow us to describe the joint limit {of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, \mathfrak{be}^\operatornameeratorname{e}ps)$} is the following: \begin{proposition} \label{prop:p3} Given $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps)$, denote by $ z^\operatornameeratorname{e}ps$ a point sampled from $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}$ in $\mathbb{D}$ (normalised to be a probability measure) and given $(\mathfrak{cle}, \mathfrak{lqg})$, denote by $z$ a point sampled in the same way from $\mu_{h}$. For given $\delta>0$, write $(U_1^{\operatornameeratorname{e}ps},\dots, U_{N^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps)$ for the ordered components of $\mathfrak{U}_{z^\operatornameeratorname{e}ps}^{\operatornameeratorname{e}ps}$ with $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}$ area $\ge \delta$, and define $(U_1,\dots, U_N)$ similarly for the ordered components of $\mathfrak{U}_{z}$ with $\mu_h$ area $\ge \delta$. Suppose that $w_i^\operatornameeratorname{e}ps$ for $1\le i \le N_\operatornameeratorname{e}ps$ (resp.\ $w_i$ for $1\le i \le N$) are sampled from $\mu^\operatornameeratorname{e}ps|_{U_i^\operatornameeratorname{e}ps}$ (resp.\ $\mu|_{U_i}$) normalized to be probability measures, and $g_i^\operatornameeratorname{e}ps:U_i^\operatornameeratorname{e}ps \to \mathbb{D}$ (resp.\ $g_i:U_i\to \mathbb{D}$) are the conformal maps that send $w_i^\operatornameeratorname{e}ps$ to $0$ (resp. $w_i$ to $0$) with positive real derivative at $w_i^\operatornameeratorname{e}ps$ (resp.\ $w_i$). Set $\mathrm{sgn}(U_i^\operatornameeratorname{e}ps)=w_i^\operatornameeratorname{e}ps=0$ (resp.\ $\mathrm{sgn}(U_i)=w_i=0$) and $g_i^\operatornameeratorname{e}ps(h^\operatornameeratorname{e}ps)$ (resp.\ $g_i(h)$) to be the $0$ function for $i>N^\operatornameeratorname{e}ps$ (resp.\ $i>N$). Then $$(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, z^\operatornameeratorname{e}ps,(\mathrm{sgn}(U_i^\operatornameeratorname{e}ps))_{i\ge 1}, (w_i^\operatornameeratorname{e}ps)_{i\ge 1}, (g_i^\operatornameeratorname{e}ps(h^\operatornameeratorname{e}ps))_{i\ge 1}) \mathbb{R}ightarrow (\mathfrak{cle},\mathfrak{lqg},z,(\mathrm{sgn}(U_i))_{i\ge 1}, (w_i)_{i\ge 1}, (g_i(h))_{i\ge 1})$$ as $\operatornameeratorname{e}ps\to 0$.\footnote{with respect to the Euclidean topology in the third coordinate, and the topology in the final coordinates defined such that $((s_i^n)_{i\ge 1}, (w_i^n)_{i\ge 1}, (h^n_i)_{i\ge 1})\to ((s_i)_{i\ge 1}, (w_i)_{i\ge 1}, (h_i)_{i\ge 1})$ as $n\to \infty$ iff the number of non-zero components on the left hand side is equal to the number $N_n$ of non-zero components on the right hand side for all $n$ large enough, and the first $N$ components converge in the product discrete $\times$ Euclidean $\times$ $H^{-1}(\mathbb{D})$ topology.} The fields $g_i^\operatornameeratorname{e}ps(h^\operatornameeratorname{e}ps)$ and $g(h)$ above are defined using the change of coordinates formula \operatornameeratorname{e}qref{eq:coc}. \operatornameeratorname{e}nd{proposition} In other words, the ordered and signed sequence of monocolored quantum surfaces separated from $z^{\operatornameeratorname{e}ps_n}$ converges almost surely, as a sequence of quantum surfaces {(see above \operatornameeratorname{e}qref{eq:coc})} to the ordered sequence of monocolored quantum surfaces separated from $z$ as $n\to \infty$. From this, we can deduce our main theorem. \begin{theorem}\label{thm_main} $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, \mathfrak{be}^\operatornameeratorname{e}ps)$ converges jointly in law to {a tuple} $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ as $\operatornameeratorname{e}ps\downarrow 0$. In the limiting tuple, $\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be}$ have {marginal} laws as above, $\mathfrak{cle}$ and $\mathfrak{lqg}$ are independent, and $(\mathfrak{cle}, \mathfrak{lqg})$ determines $\mathfrak{be}$. Furthermore, we have the following explicit description of the correspondence between $(\mathfrak{cle},\mathfrak{lqg})$ and $\mathfrak{be}$ in the limit. Suppose that $z\in\mathbb{D}$ is sampled from the critical Liouville measure $\mu$ normalized to be a probability measure. Then \begin{itemize} \item $X_t=0$ for all $ t\ge \mu(\mathbb{D})$ almost surely and the conditional law of \begin{equation}\label{eq:tz}t_z:= \mu_h\left( \cup_{U\in\mathfrak{U}_z} U \right)\operatornameeratorname{e}nd{equation} given $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ is uniform on $(0,\mu(\mathbb{D}))$, \item $X_{t_z}=(A_{t_z},B_{t_z})$ satisfies {the following for a deterministic constant $c>0$}: \begin{equation}\label{eq:AB} A_{t_z}={c}\liminf_{\delta\to 0} \delta N_\delta \text{ and } B_{t_z}=1+\sum_{U\in \mathfrak U_z} \mathrm{sgn}(U) \nu_h(U)\operatornameeratorname{e}nd{equation} almost surely, where for $\delta>0$, $N_\delta$ is the number of domains $U\in \mathfrak{U}_z$ such that $\nu_h(\partial U)\in (\delta/2,\delta)$, \item the ordered collection $(\mu_h(U),\mathrm{sgn}(U)\nu_h(\partial U))_{U\in \mathfrak{U}_z}$ is almost surely equal to the ordered collection of jumps of $(T^{t_z},\widehat B^{t_z})$ (where $(T^{t_z},\widehat B^{t_z})$ are defined from $\mathfrak{be}$ as in Section \operatorname{Re}f{sec:Bfs}). \operatornameeratorname{e}nd{itemize} \operatornameeratorname{e}nd{theorem} Notice that \begin{equation}\label{eq:Al}A_{t_z}=\widehat A_{\operatornameeratorname{e}ll^{t_z}_{t_z}}=\operatornameeratorname{e}ll^{t_z}_{t_z}\operatornameeratorname{e}nd{equation} is the limit as $\operatornameeratorname{e}ps\to 0$ of the total length of the $\SLE_{\kappa'}({\kappa'}-6)$ branch towards $z$ in the quantum natural parameterization. We can therefore view $A_{t_z}$ as a limiting ``quantum natural distance'' of $z$ from the boundary of the disk. In a similar vein, we record in Table \operatorname{Re}f{table1} some of the correspondences between the $\mathbb{C}LE_4$ decorated critical LQG disk with order variables $(\mathfrak{cle}, \mathfrak{lqg})$ and the Brownian excursion $\mathfrak{be}$, where $z,w$ are points sampled from the critical LQG measure $\mu_h$ in the bulk. \begin{table*}[h]\centering \begin{tabular}{@{}l l@{}} \toprule $\mathfrak{be}$ & $(\mathfrak{cle}, \mathfrak{lqg})$ \\ \midrule duration of $X$ & $\mu_h(\mathbb{D})$ \\ $\{t_w<t_z\}$ & $\{\mathcal{O}_{w,z}=1\}=$``$w$ ordered before $z$'' \\ $t_z$ & $\mu_h(\overline{\{w\in \mathcal{Q}: O_{w,z}=1\}})=$``quantum area of points ordered before $z$'' \\ $A_{t_z}$ & quantum natural distance of $z$ from $\partial \mathbb{D}$ \\ jumps of $\widehat B^{t_z}$ & LQG boundary lengths of ``components ordered before $z$''\\ sign of jump & parity of $ \#\ \{\mathbb{C}LE_4 \text{ loops surrounding component}\}$ \\ jumps of $T^{t_z}$ & LQG areas of ``components ordered before $z$''\\ CRT encoded by $A$ & $\mathbb{C}LE_4$ exploration branches parameterized by quantum natural distance \\ \bottomrule \operatornameeratorname{e}nd{tabular} \label{table1} \operatornameeratorname{e}nd{table*} \begin{proofof}{Theorem \operatorname{Re}f{thm_main} given Proposition \operatorname{Re}f{prop:p3}} Since we know the marginal convergence of each component of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, \mathfrak{be}^\operatornameeratorname{e}ps)$, we know that the triple is tight in $\operatornameeratorname{e}ps$. Thus our task is to characterize any subsequential limit $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps,\mathfrak{be}^\operatornameeratorname{e}ps)$. Note that Proposition \operatorname{Re}f{prop:p1} already tells us that $(\mathfrak{cle}, \mathfrak{lqg})$ are independent, and Proposition \operatorname{Re}f{prop:p2} tells us that the marginal law of $\mathfrak{be}$ is that of a Brownian half plane excursion plus associated observables. To characterize the law of $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ we will prove that if $z\in \mathbb{D}$ is sampled according to $\mu_h$ in $\mathbb{D}$, conditionally independently of the rest of $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ then: \begin{enumerate}[(i)] \item the duration of $X$ is equal to $\mu_h(\mathbb{D})$ almost surely; \item $t_z$ defined by \operatornameeratorname{e}qref{eq:tz} is conditionally uniform on $(0,\mu_h(\mathbb{D}))$ given $(\mathfrak{cle}, \mathfrak{lqg},\mathfrak{be})$; \item the ordered collection $(\mu_h(U),\mathrm{sgn}(U)\nu_h(\partial U))_{U\in \mathfrak{U}_z}$ is almost surely equal to the ordered collection of jumps of $(T^{t_z},\widehat B^{t_z})$ (defined from $\mathfrak{be}$ as in Section \operatorname{Re}f{sec:Bfs}); and \item $A_{t_z}, B_{t_z}$ satisfy \operatornameeratorname{e}qref{eq:AB} almost surely. \operatornameeratorname{e}nd{enumerate} Let us remark already that the above claim is enough to complete the proof of the theorem. Indeed, suppose that $(\mathfrak{cle}, \mathfrak{lqg},\mathfrak{be})$ is a subsequential limit in law of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, \mathfrak{be}^\operatornameeratorname{e}ps)$ as $\operatornameeratorname{e}ps\to 0$ and let $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be}, \mathfrak{be}')$ be coupled so that $(\mathfrak{cle},\mathfrak{lqg},\mathfrak{be})$ is equal in law to $(\mathfrak{cle},\mathfrak{lqg}, \mathfrak{be}')$, while $\mathfrak{be},\mathfrak{be}'$ are conditionally independent given $\mathfrak{cle}, \mathfrak{lqg}$. Further sample $z$ from $\mu_h$ in $\mathbb{D}$, conditionally independently of the rest of $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be},\mathfrak{be}')$, so that (i)-(iv) hold for $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be}, z)$ and for $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be}\nina{'}, z)$ (with $X,A,B$ replaced by their counterparts $X',A',B'$ for $\mathfrak{be}'$.) Then by (i)-(ii) and since $X(\mathfrak{be})$, $X(\mathfrak{be}')$ are almost surely continuous, if $\mathbb{P}(\mathfrak{be}\ne \mathfrak{be}')$ were strictly positive then $\mathbb{P}(X(\mathfrak{be})_{t_z}\ne X(\mathfrak{be}')_{t_z})$ would be strictly positive as well. This would contradict (iii) and (iv), so we conclude that $\mathfrak{be}=\mathfrak{be}'$ almost surely. This means that $\mathfrak{be}$ is determined by $(\mathfrak{cle}, \mathfrak{lqg})$, and the explicit description in the statement of the {theorem} also follows immediately. \corr{The same argument implies that the law of any subsequential limit is unique. More concretely, suppose that $\operatornameeratorname{e}ps_n$, ${\operatornameeratorname{e}ps}'_n$ are two sequences tending to $0$ as $n\to \infty$, such that $(\mathfrak{cle}^{\operatornameeratorname{e}ps_n},\mathfrak{lqg}^{\operatornameeratorname{e}ps_n},\mathfrak{be}^{\operatornameeratorname{e}ps_n})\mathbb{R}ightarrow (\mathfrak{cle}, \mathfrak{lqg},\mathfrak{be})$ and $(\mathfrak{cle}^{\operatornameeratorname{e}ps'_n},\mathfrak{lqg}^{\operatornameeratorname{e}ps'_n},\mathfrak{be}^{\operatornameeratorname{e}ps'_n})\mathbb{R}ightarrow (\mathfrak{cle}',\mathfrak{lqg}',\mathfrak{be}')$ as $n\to \infty$. Then we can also take a joint subsequential limit of $(\mathfrak{cle}^{\operatornameeratorname{e}ps_n},\mathfrak{lqg}^{\operatornameeratorname{e}ps_n},\mathfrak{be}^{\operatornameeratorname{e}ps_n},\mathfrak{cle}^{\operatornameeratorname{e}ps'_n},\mathfrak{lqg}^{\operatornameeratorname{e}ps'_n},\mathfrak{be}^{\operatornameeratorname{e}ps'_n})$; call it $(\mathfrak{cle},\mathfrak{lqg},\mathfrak{be},\mathfrak{cle}',\mathfrak{lqg}',\mathfrak{be}')$ where necessarily $\mathfrak{cle}=\mathfrak{cle}'$ and $\mathfrak{lqg}=\mathfrak{lqg}'$, since we already know the convergence $(\mathfrak{cle}^\operatornameeratorname{e}ps,\mathfrak{lqg}^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (\mathfrak{cle},\mathfrak{lqg})$. Repeating the argument of the previous paragraph gives that $\mathfrak{be}=\mathfrak{be}'$ almost surely. In particular, the marginal law of $(\mathfrak{cle}',\mathfrak{lqg}',\mathfrak{be}')$ is the same as that of $(\mathfrak{cle},\mathfrak{lqg},\mathfrak{be})$.} So we are left to justify the above claim. To this end, let \begin{equation}\label{E:triple} (\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be}) \operatornameeratorname{e}nd{equation} be a subsequential limit, along some subsequence of $\operatornameeratorname{e}ps$. By Proposition \operatorname{Re}f{prop:p3} and passing to a further subsequence if necessary we may extend this to the convergence \begin{gather}(\mathfrak{cle}^{\operatornameeratorname{e}pn},\mathfrak{lqg}^{\operatornameeratorname{e}pn}, z^{\operatornameeratorname{e}pn}, \mathfrak{be}^{\operatornameeratorname{e}pn},\big((\mathrm{sgn}(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1}, (g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn))_{i\ge 1}\big)_{\delta\in \mathbbm Q\cap (0,1)} )\nonumber \\ \mathbb{R}ightarrow \nonumber \\(\mathfrak{cle}, \mathfrak{lqg},z,\mathfrak{be}, \big((\mathrm{sgn}(U_i^{\corr{\delta}}))_{i\ge 1}, (g_i^{\corr{\delta}}(h))_{i\ge 1}\big)_{\delta \in \mathbbm Q \cap (0,1)})\label{eq:ssmain}\operatornameeratorname{e}nd{gather} along some $\operatornameeratorname{e}pn\downarrow 0$, where for every $\delta {\in \mathbbm Q \cap (0,1)}$ the joint law of \corr{$$(\mathfrak{cle}^{\operatornameeratorname{e}pn},\mathfrak{lqg}^{\operatornameeratorname{e}pn}, z^{\operatornameeratorname{e}pn}, \mathfrak{be}^{\operatornameeratorname{e}pn},\big((\mathrm{sgn}(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1}, (g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn))_{i\ge 1}\big)_{\delta\in \mathbbm Q\cap (0,1)} )) \text{ and }(\mathfrak{cle}, \mathfrak{lqg}, z, (\mathrm{sgn}(U_i^{\corr{\delta}})_{i\ge1}, g_i^{\corr{\delta}}(h)_{i\ge 1}))$$} are as in Proposition \operatorname{Re}f{prop:p3} \corr{(now with the depedence on $\delta$ indicated for clarity)} and the joint law of $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ is the one assumed in \operatornameeratorname{e}qref{E:triple}. Note that the conditional law of $z$ given $(\mathfrak{cle},\mathfrak{lqg},\mathfrak{be})$ is that of a sample from $\mu_h$, since the same is true at every approximate level and since $\mu^{\operatornameeratorname{e}pn}_{h^\operatornameeratorname{e}pn}$ converges as part of $\mathfrak{lqg}^{\operatornameeratorname{e}pn}$. We next argue that the convergence \operatornameeratorname{e}qref{eq:ssmain} necessarily implies the joint convergence \begin{gather}(\mathfrak{cle}^{\operatornameeratorname{e}pn},\mathfrak{lqg}^{\operatornameeratorname{e}pn}, z^{\operatornameeratorname{e}pn}, \mathfrak{be}^{\operatornameeratorname{e}pn},\big((\mathrm{sgn}(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1},(g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn))_{i\ge 1}, (\mu^\operatornameeratorname{e}pn_{h^\operatornameeratorname{e}pn}(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1}, (\nu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(\partial U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1 }\big)_{\delta\in \mathbbm Q\cap (0,1)}) \nonumber \\ \mathbb{R}ightarrow \nonumber \\ (\mathfrak{cle}, \mathfrak{lqg},z, \mathfrak{be}, \big((\mathrm{sgn}(U_i^{\corr{\delta}}))_{i\ge 1}, (g_i^{\corr{\delta}}(h))_{i\ge 1}, (\mu_h(U_i^{\corr{\delta}}))_{i\ge 1 }, (\nu_{h}(\partial U^{\corr{\delta}}_i))_{i\ge 1 }\big)_{\delta\in \mathbbm Q\cap (0,1)})\label{eq:ssmain2}\operatornameeratorname{e}nd{gather} as $n\to \infty$, where the initial components are exactly as in \operatornameeratorname{e}qref{eq:ssmain}. Indeed, we know that the tuple on the left is tight in $n$, {because the first six terms are tight by above and both $(\mu^\operatornameeratorname{e}pn_{h^\operatornameeratorname{e}pn}(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1}$ and $(\nu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(\partial U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))_{i\ge 1 }$ are sequences with only a tight number of non-zero terms, and with all non-zero terms bounded by convergent quantities in $(\mathfrak{lqg}^\operatornameeratorname{e}pn,\mathfrak{be}^\operatornameeratorname{e}pn)$.} On the other hand, for any fixed $\delta$, $i$ and $n$, $$\mu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(U_i^{\corr{\operatornameeratorname{e}pn,\delta}})=\mu_{g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn)}^\operatornameeratorname{e}pn(\mathbb{D}) \text{ and } \nu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(\partial U_i^{\corr{\operatornameeratorname{e}pn,\delta}})=\nu_{g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn)}^\operatornameeratorname{e}pn(\partial \mathbb{D}),$$ so by Theorem \operatorname{Re}f{thm:MOT}, $(g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn), \mu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}), \nu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(\partial U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))$ is a sequence of $\gamma(\operatornameeratorname{e}pn)$-quantum disks together with their quantum boundary lengths and areas. We can therefore apply Remark \operatorname{Re}f{rmk:changing_lengths} to deduce that any subsequential limit in law $(g_i(h),\mu^*,\nu^*)$ of \\ $(g_i^{\corr{\operatornameeratorname{e}pn,\delta}}(h^\operatornameeratorname{e}pn), \mu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(U_i^{\corr{\operatornameeratorname{e}pn,\delta}}), \nu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(\partial U_i^{\corr{\operatornameeratorname{e}pn,\delta}}))$ must be equal to $$(g_i^{\corr{\delta}}(h),\mu_{g_i^{\corr{\delta}}(h)}(\mathbb{D}),\nu_{g_i^{\corr{\delta}}(h)}(\partial \mathbb{D}))=(g_i^{\corr{\delta}}(h),\mu_h(U_i^{\corr{\delta}}),\nu_h(\partial U_i^{\corr{\delta}})).$$ This concludes the proof of \operatornameeratorname{e}qref{eq:ssmain2}. So to summarize, if we have any subsequential limit $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ of $(\mathfrak{cle}^\operatornameeratorname{e}ps,\mathfrak{lqg}^\operatornameeratorname{e}ps,\mathfrak{be}^\operatornameeratorname{e}ps)$ we can couple it with $z$ (whose conditional law given $(\mathfrak{cle},\mathfrak{lqg},\mathfrak{be})$ is that of a sample from $\mu_h$) and with $(U_i,g_i)_{i\ge 1}$ for every positive $\delta\in \mathbbm Q$, such that the joint convergence \operatornameeratorname{e}qref{eq:ssmain2} holds along some subsequence $\operatornameeratorname{e}pn\downarrow 0$. By Skorokhod embedding we may assume that this convergence is almost sure, and so just need to prove that (i)-(iv) hold for the limit. This essentially follows from Remark \operatorname{Re}f{rmk:MOT} and the convergence of the final coordinates in \operatornameeratorname{e}qref{eq:ssmain2}; we give the details for each point below. \begin{enumerate}[(i)] \item This holds since $X^{\operatornameeratorname{e}pn}=0$ for all $t\ge \mu^{\operatornameeratorname{e}pn}(\mathbb{D})$ almost surely for every $n$, and $(\mu^{\operatornameeratorname{e}pn}_{h^\operatornameeratorname{e}pn}(\mathbb{D}),X^{\operatornameeratorname{e}pn}) \to (\mu_h(\mathbb{D}),X)$ almost surely. \item The convergence of the areas in \operatornameeratorname{e}qref{eq:ssmain2} implies that $$t_{z^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn=\sum_{\mathfrak{U}_{z^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn} \mu^\operatornameeratorname{e}pn_{h^\operatornameeratorname{e}pn}(U)$$ converges almost surely to $t_z$ defined in \operatornameeratorname{e}qref{eq:tz} along the subsequence $\operatornameeratorname{e}pn\downarrow 0$. On the other hand, $t_z^\operatornameeratorname{e}pn$ is conditionally uniform on $(0,\mu^\operatornameeratorname{e}pn_{h^\operatornameeratorname{e}pn}(\mathbb{D}))$ given $(\mathfrak{cle}^\operatornameeratorname{e}pn, \mathfrak{lqg}^\operatornameeratorname{e}pn, \mathfrak{be}^\operatornameeratorname{e}pn)$ for every $n$. \item The ordered collection of jumps of $(T^{{\operatornameeratorname{e}pn},t^{\operatornameeratorname{e}pn}_{z^{\operatornameeratorname{e}pn}}},\widehat B^{{\operatornameeratorname{e}pn},t_{z^{\operatornameeratorname{e}pn}}^{\operatornameeratorname{e}pn}})$ converge almost surely to the ordered collection of jumps of $(T^{t_z},\widehat B^{t_z})$ on the one hand, by definition of the convergence $(\mathfrak{be}^{\operatornameeratorname{e}pn},z^{\operatornameeratorname{e}pn})\to (\mathfrak{be},z)$ (and by considering a sequence $z^n\in \mathcal{Q}$ converging to $z$). On the other hand, they are equal to the ordered collection $(\mu^{\operatornameeratorname{e}pn}_{h^\operatornameeratorname{e}pn}(U),\mathrm{sgn}(U)\nu_{h^{\operatornameeratorname{e}pn}}^{\operatornameeratorname{e}pn}(\partial U))_{U\in \mathfrak{U}_z^{\operatornameeratorname{e}pn}}$ for every $n$. Since this latter collection converges almost surely to the ordered collection $(\mu_h(U),\mathrm{sgn}(U)\nu_h(\partial U))_{U\in \mathfrak{U}_z}$, we obtain (iii). \item This follows from (iii) and the fact that the marginal law of $X=(A,B)$ is that of a Brownian excursion in the right half plane. Specifically, the first coordinate of $X$ at a given time $t$ can a.s.\ be recovered from the jumps of its inverse local time at backwards running infima with respect to time $t$, see \operatornameeratorname{e}qref{eq:Al}, and the second coordinate can also be recovered from the collection of its signed jumps when reparameterized by this inverse local time. When $t=t_z$, the values are recovered exactly using the formula \operatornameeratorname{e}qref{eq:AB} after using (iii) to translate between $(\mu_h(U),\mathrm{sgn}(U)\nu_h(\partial U))_{U\in \mathfrak{U}_z}$ and $(T^{t_z},\widehat B^{t_z})$. \operatornameeratorname{e}nd{enumerate} \operatornameeratorname{e}nd{proofof} \subsection{Proof of Proposition \operatorname{Re}f{prop:p3}} \corr{In this subsection, $\delta$ is fixed, so we omit it from the notation (just as in the statement of Proposition \operatorname{Re}f{prop:p3}).} Since the convergence of $\mu_{h^\operatornameeratorname{e}ps}^\operatornameeratorname{e}ps$ to $\mu_h$ is included in the convergence of $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps)$ to $(\mathfrak{cle}, \mathfrak{lqg})$ it is clear (for example by working on a probability space where the convergence holds almost surely) that $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, z^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (\mathfrak{cle},\mathfrak{lqg}, z)$ as $\operatornameeratorname{e}ps\to 0$. From here, the proof proceeds via the following steps. \begin{enumerate}[(1)] \item The tuples on the left hand side in Proposition \operatorname{Re}f{prop:p3} are tight in $\operatornameeratorname{e}ps$,{ so we may take a subsequential limit $(\mathfrak{cle}, \mathfrak{lqg}, z, (s_i)_{i\ge 1}, (w_i)_{i\ge 1}, (h_i)_{i\ge 1})$ (that we will work with for the remainder of the proof)}. \item $w_i\in \mathbb{D}\setminus \Gamma$ \corr{(i.e. $w_i$ is not on any nested CLE$_4$ loop)} for all $i$ a.s. \item If $\widetilde g_i:U(z,w_i) {\to} \mathbb{D}$ are conformal with $\widetilde g_i(w_i)=0$ and $\widetilde g_i'(w_i)>0$, then $h_i=\widetilde g_i(h)$ for each $i$ a.s.\footnote{Once we have point (5), it follows that these are equal to the $(g_i)_{i=1}^N$.} \item Given $(\mathfrak{cle},\mathfrak{lqg},z)$, the $w_i$ are conditionally independent and distributed according to $\mu_h$ in each $U(z,w_i)$. \item $\{U\in \mathfrak{U}_z: \mu_h(U)\ge \delta\}=\{U(z,w_i)\}_{i\ge 1}$ a.s., where the set on the left is ordered as usual. \item $s_i=\mathrm{sgn}(U(z,w_i))$ for each $i$ a.s. \operatornameeratorname{e}nd{enumerate} These clearly suffice for the proposition. \\ \begin{proofof}{(1)} Tightness of the first five components follows from the fact that $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, z^\operatornameeratorname{e}ps)\mathbb{R}ightarrow (\mathfrak{cle},\mathfrak{lqg}, z)$ as $\operatornameeratorname{e}ps\to 0$, plus the tightness of the quantum boundary lengths in $\mathfrak{U}_z^\operatornameeratorname{e}ps$ (recall that these converge when $\mathfrak{be}^\operatornameeratorname{e}ps$ converges). To see the tightness of $(g_i^\operatornameeratorname{e}ps(h^\operatornameeratorname{e}ps))_{i\ge 1}$ we note that there are at most $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}(D)/\delta$ non-zero terms, where $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}(\mathbb{D})$ is tight in $\operatornameeratorname{e}ps$. Moreover, each non-zero $g_i^\operatornameeratorname{e}ps(h^\operatornameeratorname{e}ps)$ has the law of $\widetilde{h}^\operatornameeratorname{e}ps\circ \theta^\operatornameeratorname{e}ps+a^\operatornameeratorname{e}ps$, where $\widetilde{h}^\operatornameeratorname{e}ps$ is as in Lemma \operatorname{Re}f{cor:disc_mapped}, $\theta^\operatornameeratorname{e}ps$ are random rotations (which automatically form a tight sequence in $\operatornameeratorname{e}ps$) and $a^\operatornameeratorname{e}ps$ are some tight sequence of real numbers. This implies the result by Lemma \operatorname{Re}f{cor:disc_mapped}. \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{(2)} Suppose that $(y_j^\operatornameeratorname{e}ps)_{j\ge 1}$ are sampled conditionally independently according to $\mu^\operatornameeratorname{e}ps_{h^\operatornameeratorname{e}ps}$ in $\mathbb{D}$, normalized to be a probability measure. Then $(\mathfrak{cle}^\operatornameeratorname{e}ps, \mathfrak{lqg}^\operatornameeratorname{e}ps, (y_j^\operatornameeratorname{e}ps)_{j\ge 1})\mathbb{R}ightarrow (\mathfrak{cle}, \mathfrak{lqg}, (y_j)_{j\ge 1})$ where the $(y_j)_{j\ge 1}$ are sampled conditionally independently from $\mu_h$ and almost surely all lie in $\mathbb{D}\setminus \Gamma$. On the other hand, since $\mathfrak{cle}^\operatornameeratorname{e}ps$ and $\mathfrak{lqg}^\operatornameeratorname{e}ps$ are independent, one can sample $(w_i^\operatornameeratorname{e}ps)_{i\ge 1}$ by taking $(\mathfrak{cle}^\operatornameeratorname{e}ps,\mathfrak{lqg}^\operatornameeratorname{e}ps,(y_j^\operatornameeratorname{e}ps)_{j\ge 1})$ and then setting $w_i^\operatornameeratorname{e}ps=y_j^\operatornameeratorname{e}ps$ for each $i$, with $j=\min\{k:y_k\in U_i^\operatornameeratorname{e}ps\}$. \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{(3)} By Skorokhod's theorem, we may work on a probability space where we have the almost sure convergence \begin{equation}\label{eq:sko}(\mathfrak{cle}^\operatornameeratorname{e}pn, \mathfrak{lqg}^\operatornameeratorname{e}pn, z^\operatornameeratorname{e}pn,(\mathrm{sgn}(U_i^\operatornameeratorname{e}pn))_{i}, (w_i^\operatornameeratorname{e}pn)_{i}, (g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn))_{i})\to (\mathfrak{cle}, \mathfrak{lqg}, z, (s_i)_{i}, (w_i)_{i}, (h_i)_{i})\operatornameeratorname{e}nd{equation} along a sequence $\operatornameeratorname{e}pn\downarrow 0$. It is then natural to expect, since the $w_i^\operatornameeratorname{e}pn$ converge a.s.\ to the $w_i$ and $\mathfrak{cle}^\operatornameeratorname{e}pn$ converges a.s.\ to $\mathfrak{cle}$, that the maps $g_i^\operatornameeratorname{e}pn$ will converge to $\widetilde g_i$ described in (3). Since $h^\operatornameeratorname{e}pn$ also converges a.s.\ to $h$ (as part of the convergence $\mathfrak{lqg}^\operatornameeratorname{e}pn\to \mathfrak{lqg}$) it therefore follows $h_i$ will a.s.\ be equal to \corr{$\widetilde g_i(h)$} for each $i$. This is the essence of the proof. However, one needs to take a little care with the statement concerning the convergence $g_i^{\operatornameeratorname{e}pn} \to \widetilde g_i$, since the domains $U_i^\operatornameeratorname{e}pn$ and $U(z,w_i)$ are defined in terms of points that are \operatornameeratorname{e}mph{not} necessarily in $\mathcal{Q}$, while the convergence of $\mathfrak{cle}^\operatornameeratorname{e}ps\to \mathfrak{cle}$ is stated in terms pairs of points in $\mathcal{Q}$. To carry out the careful argument, let us fix $i\ge 1$. Since $w_i\in \mathbb{D}\setminus \Gamma$ a.s.\ by (2), there exists $r>0$ and $y\in \mathcal{Q}$ such that $B(y,r)\subset B(w_i,2r)\subset U(z,w_i)=(\mathbf{D}_{w_i})_{\sigma_{w_i,z}}$. By taking $r$ smaller if necessary, we can also find $x\in \mathcal{Q}$ with $B(x,r)\subset B(z,2r)\subset (\mathbf{D}_z)_{\sigma_{z,w_i}}$. Note that $\mathcal{O}_{z,w_i}=\mathcal{O}_{x,y}=0$ by definition. Due to the almost sure convergence $z^\operatornameeratorname{e}pn\to z$, $w^\operatornameeratorname{e}pn_i \to w_i$, and $\mathfrak{cle}^\operatornameeratorname{e}pn\to \mathfrak{cle}$ it then follows that $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn,w_i^\operatornameeratorname{e}pn)=U^\operatornameeratorname{e}pn(x, y)=(\mathbf{D}_y^\operatornameeratorname{e}pn)_{\sigma^\operatornameeratorname{e}pn_{y,x}}$, and $\mathcal{O}^\operatornameeratorname{e}pn_{x,y}=\mathcal{O}^\operatornameeratorname{e}pn_{z^\operatornameeratorname{e}pn,w_i^\operatornameeratorname{e}pn}=0$ for all $n$ large enough. Moreover, we know that the maps $f^\operatornameeratorname{e}pn:\mathbb{D} \to U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn, w_i^\operatornameeratorname{e}pn)=(\mathbf{D}^\operatornameeratorname{e}pn_{y})_{\sigma_{y,x}}$ with $f^\operatornameeratorname{e}pn(0)=y$, $(f^\operatornameeratorname{e}pn)'(0)>0$ converge on compacts of $\mathbb{D}$ to $f:\mathbb{D}\to U(x,y)=(\mathbf{D}_y)_{\sigma_{y,x}}$ sending $0$ to $y$ and with $f'(0)>0$. On the other hand, $(\widetilde g_i)^{-1}=f\circ \phi$ where $\phi:\mathbb{D}\to \mathbb{D}$ sends $0\mapsto f^{-1}(w_i)$ and has $\phi'(0)>0$, and $(g_i^\operatornameeratorname{e}pn)^{-1}=f^\operatornameeratorname{e}pn\circ \phi^\operatornameeratorname{e}pn$ for each $\operatornameeratorname{e}pn$, where $\phi^\operatornameeratorname{e}pn:\mathbb{D}\to \mathbb{D}$ has $\phi^\operatornameeratorname{e}pn(0)=(f^\operatornameeratorname{e}pn)^{-1}(w_i^\operatornameeratorname{e}pn)$ and $(\phi^\operatornameeratorname{e}pn)'(0)>0$. Since $w_i^\operatornameeratorname{e}pn\to w_i$ almost surely, and the $w_i^\operatornameeratorname{e}pn$ are uniformly close to $y$ and bounded away from the boundary of $U^\operatornameeratorname{e}pn(x,y)$, this implies that $(g_i^{\operatornameeratorname{e}pn})^{-1}$ converges to $\widetilde g_i^{-1}$ uniformly on compacts of $\mathbb{D}$. In turn, this implies that $h_i$ restricted to any compact of $\mathbb{D}$ is equal to $\widetilde g_i(h)$, which verifies that $h_i=g_i(h)$ a.s. \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{(4)} For this it suffices to prove that for each $i$, $$(\mathfrak{cle}^\operatornameeratorname{e}pn, \mathfrak{lqg}^\operatornameeratorname{e}pn, z^\operatornameeratorname{e}pn, w_i^\operatornameeratorname{e}pn, g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn),\mu^\operatornameeratorname{e}pn_{g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn)})\mathbb{R}ightarrow (\mathfrak{cle}, \mathfrak{lqg}, z, w_i, h_i,\mu_{h_i})$$ as $n\to \infty$, where the convergence of the final components is in the sense of weak convergence for measures on $\mathbb{D}$. Note that if we work on a space where all but the last components converge a.s., as in (3), then the proof of (3) shows that $h_i=\widetilde g_i(h)$ and that $(g_i^\operatornameeratorname{e}pn)^{-1}\to (\widetilde g_i)^{-1}$ a.s.\ when restricted to compact subsets of $\mathbb{D}$. This implies the a.s.\ convergence of the measures \corr{$\mu^\operatornameeratorname{e}pn_{g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn)}$ to $\mu_{h_i}$} when restricted to compact subsets of $\mathbb{D}$. On the other hand, $\mu_{g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn)}(\mathbb{D})$ is a tight sequence in $n$, and by Remark \operatorname{Re}f{rmk:changing_lengths}, any subsequential limit $(\mathfrak{cle}, \mathfrak{lqg}, z, w_i, h_i, m)$ of $(\mathfrak{cle}^\operatornameeratorname{e}pn, \mathfrak{lqg}^\operatornameeratorname{e}pn, z^\operatornameeratorname{e}pn, w_i^\operatornameeratorname{e}pn, g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn),\mu^\operatornameeratorname{e}pn_{g_i^\operatornameeratorname{e}pn(h^\operatornameeratorname{e}pn)}(\mathbb{D}))$ has $m=\mu_{h_i}(\mathbb{D})$ a.s. Combining these observations yields the result. \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{(5)} As in (3) we assume that we are working on a probability space where we have almost sure convergence along a sequence $\operatornameeratorname{e}pn\downarrow 0$, so we need to show that the limiting domains $U(z,w_i)$ are precisely the elements of $\mathfrak U_z$ that have $\mu_h$ area greater than or equal to $\delta$. The same argument as for (4) gives that each $U(z,w_i)$ is a component of $\mathfrak{U}_z$ with $\mu_h$ area greater than or equal to $\delta$. So it remains to show that they are the only such elements of $\mathfrak U_z$. For this, suppose that $U\in \mathfrak{U}_z$ has $\mu_h(U)\ge \delta$. Then $\mu_h(U)=\delta+r$ for some $r>0$ with probability $1$. Choosing $w\in \mathcal{Q}$, $a>0$ such that $U=U(z,w)\supset B(w,a)$ it is easy to see that $U(z,w)$ is the a.s.\ Carath\'{e}odory limit seen from $w$ of $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn, w)$ as $\operatornameeratorname{e}pn\to 0$. Using the convergence of $\mu^\operatornameeratorname{e}pn_{h^\operatornameeratorname{e}pn}$ to $\mu_h$ and Corollary \operatorname{Re}f{cor:cart_inclusion}, we therefore see that $\lim_n \mu_{h^\operatornameeratorname{e}pn}^\operatornameeratorname{e}pn(U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn,w))\ge \mu_h(U(z,w))=\delta+r$ and so $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn, w)=U_i^\operatornameeratorname{e}pn=U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn,w_i^\operatornameeratorname{e}pn)$ for some $i$ and all $n$ large enough. From here we may argue as in the proof of (3) to deduce that the Carath\'{e}odory limit of $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn,w_i^\operatornameeratorname{e}pn)$ is equal to $U(z,w_i)$. Thus, since $U=U(z,w)$ is the Carath\'{e}odory limit of $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}pn,w)$ which is equal to $U^\operatornameeratorname{e}pn(z^\operatornameeratorname{e}ps,w_i^\operatornameeratorname{e}pn)$ for all $n$ large enough, we conclude that $U=U(z,w_i)$. The fact that the orders of the collections in (3) coincide follows from the convergence of the order variables as part of $\mathfrak{cle}^\operatornameeratorname{e}ps\to \mathfrak{cle}$ (and the argument we have now used several times that allows one to transfer from $z^\operatornameeratorname{e}ps, w_i^\operatornameeratorname{e}ps$ to points in $\mathcal{Q}$: we omit the details). \operatornameeratorname{e}nd{proofof}\\ \begin{proofof}{(6)} Let us work under almost sure convergence as in the proof of (3), fix $i\ge 1$ and define $x,y,r$ as in the proof of (3). By Proposition \operatorname{Re}f{prop:convbranchingsleorder}, we know that $\sigma^\operatornameeratorname{e}pn_{y,x}\to \sigma_{y,x}$ almost surely as $n\to \infty$, and that $\mathrm{sgn}(U_i^\operatornameeratorname{e}pn)$ is determined by the {number of loops nested around $y$ which $\mathbf{D}^\operatornameeratorname{e}pn_y$ discovers} before \operatornameeratorname{e}mph{or at} time $\sigma^\operatornameeratorname{e}pn_{y,x}$ (see the definition of CLE loops from the space-filling/branching $\SLE_{\kappa'}$ in Section \operatorname{Re}f{sec:sletocle}). If $\sigma_{y,x}$ occurs between two such times for $\mathbf{D}_y$, it is clear from the a.s.\ convergence of $\sigma^\operatornameeratorname{e}pn_{y,x}$ and $\mathbf{D}_{y}^\operatornameeratorname{e}pn$ that the number of loop closure times for $\mathbf{D}^{\operatornameeratorname{e}pn}_y$ occurring before or at $\sigma^\operatornameeratorname{e}pn_{y,x}$ converges to the number of loop closure times for $\mathbf{D}_{y,x}$ occurring before or at time $\sigma_{y,x}$. If $\sigma_{y,x}$ is a loop closure time for $\mathbf{D}_y$, the result follows from Lemma \operatorname{Re}f{lem:septime_endofloop}. \operatornameeratorname{e}nd{proofof} \subsection{Discussion and outlook} The results obtained above open the road to several very natural questions related to the critical mating of trees picture. We will describe some of those below. Roughly, they can be stated as follows: \begin{enumerate} \item Can one obtain a version of critical mating of trees where there is bi-measurability between the decorated LQG surface and the pair of Brownian motions (with possibly additional information included)? \item There is an interesting relation to growth-fragmentation processes studied in \cite{ADS}. Can one combine these two point of views in a fruitful way? \item The Brownian motion $A$ encodes a distance of each point to the boundary, and in particular between any CLE$_4$ loop and the boundary. What is its relation to the CLE$_4$ metric introduced in \cite{SWW19}? \item Can one prove convergence of observables in critical FK-decorated random planar maps towards the observables in the critical mating-of-trees picture? \operatornameeratorname{e}nd{enumerate} Let us finally mention that there are also other interesting questions in the realm of critical LQG, e.g.\ the behaviour of height-functions on top of critical planar maps, which are certainly worth exploring too. \subsubsection{Measurability}\label{sec:meas} In the subcritical mating of trees, i.e., when $\kappappa' > 4$, $\gamma < 2$ and we consider the coupling $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$ described in the introduction or in Section 5 (for simplicity without subscripts), \cite{DMS14} proves that in the infinite-volume setting the pair $(\mathfrak{cle}, \mathfrak{lqg})$ determines $\mathfrak{be}$ and vice-versa. In particular, $(\mathfrak{cle}, \mathfrak{lqg})$ can be obtained from $\mathfrak{be}$ via a measurable map. This result is extended to the finite volume case of LQG disks in \cite{AG19}. By contrast, some of this measurability gets lost when we consider our critical setting. The easier direction to consider is whether $(\mathfrak{cle}, \mathfrak{lqg})$ determine $\mathfrak{be}$. In the subcritical case this comes basically from the construction, and it does not matter what we really mean by $\mathfrak{cle}$: the nested CLE$_{\kappappa'}$, the space-filling SLE$_{\kappappa'}$ and the radial exploration tree of CLE$_{\kappappa'}$  are all measurable with respect to one another. This, however, gets more complicated in the critical case. First, the question of whether the nested CLE$_4$ determines the uniform exploration tree of CLE$_4$ is already not straightforward; this is a theorem of an unpublished work \cite{SWW19}. Moreover, the nested CLE$_4$ no longer determines the space-filling exploration from Section \operatorname{Re}f{sec:conv_order}: indeed, we saw that to go from the uniform exploration tree to the ordering on points, some additional order variables are needed. These order variables are, however, the only missing information when going from $(\mathfrak{cle}, \mathfrak{lqg})$ to $\mathfrak{be}$: the conclusion of Theorem \operatorname{Re}f{thm_main} is that when we include the order variables in $\mathfrak{cle}$ (in other words consider the space-filling exploration) then indeed $\mathfrak{be}$ is measurable with respect to $(\mathfrak{cle}, \mathfrak{lqg})$. In the converse direction, things are trickier. In the coupling considered in this paper, $\mathfrak{be}$ does not determine the pair $(\mathfrak{cle}, \mathfrak{lqg})$; however, we conjecture that $(\mathfrak{cle}, \mathfrak{lqg})$ is determined modulo a countable number of ``rotations''. Informally, one can think of these rotations as follows: a rotation is an operation where we stop the CLE$_4$ exploration at a time when the domain of exploration is split into two domains $D$ and $D'$, we consider the LQG surfaces $(D,h)$ and $(\mathbb{D}\setminus D,h)$, and we conformally weld these two surfaces together differently. The field and loop ensemble $(\widehat\mathfrak{cle}, \widehat\mathfrak{lqg})$ of the new surface will be different than the the pair $(\mathfrak{cle}, \mathfrak{lqg})$ of the original surface, but their law is unchanged if we choose the new welding appropriately (e.g.\ if we rotate by a fixed amount of LQG length), and $\mathfrak{be}$ is pathwise unchanged. Therefore performing such a rotation gives us two different pairs $(\mathfrak{cle}, \mathfrak{lqg})$ and $(\widehat\mathfrak{cle}, \widehat\mathfrak{lqg})$ with the same law, and which are associated with the same $\mathfrak{be}$. We believe that these rotations are the only missing part needed to obtain measurability in this coupling. In fact, by considering a different CLE$_4$ exploration, where loops are pinned in a predetermined way (e.g.\ where all loops are pinned to some trunk, such as in e.g.\ \cite{lehmkuehler21}), one could imagine obtaining a different coupling of $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$, where $\mathfrak{be}$ does determine $(\mathfrak{cle}, \mathfrak{lqg})$. \subsubsection{Growth fragmentation}\label{secGF} We saw below the statement of Theorem \operatorname{Re}f{thm_main} how certain observables in the Brownian excursion $\mathfrak{be}$ map to observables (e.g., quantum boundary lengths and areas of discovered $\mathbb{C}LE$ loops) in $(\mathfrak{cle}, \mathfrak{lqg})$, when we restrict to a single uniform CLE$_4$ exploration branch. Given the definition of the branching $\mathbb{C}LE_4$ exploration (recall that the explorations towards any two points coincide exactly until they are separated by the discovered loops and then evolve independently) this is one way to define an entire branching process from the Brownian excursion. In fact, this embedded branching process was already described completely, and independently, in an earlier work of A\"{i}dekon and Da Silva \cite{ADS}. Namely, given $X=(A,B)$ with law as in Theorem \operatorname{Re}f{thm_main}, one can consider for any $a\ge 0$ the countable collection of excursions of $X$ to the right of the vertical line with horizontal component $a$. Associated with each such excursion is a total displacement (the difference between the vertical coordinate of the start and end points) and a sign (depending on which of these coordinates is larger). In \cite{ADS}, the authors prove that if one considers the evolution of these signed displacements as $a$ increases, then one obtains a signed \operatornameeratorname{e}mph{growth fragmentation} process with completely explicit law. The fact that this process is a growth fragmentation means, roughly speaking, that it can be described by the evolving ``mass'' of a family of cells: the mass of the initial cell evolves according to a positive self-similar Markov process, and every time this mass has a jump, a new cell with exactly this mass is introduced into the system. Each such new cell initiates an independent cell system with the same law. In the setting of signed growth fragmentations, masses may be both positive and negative. In the coupling $(\mathfrak{cle}, \mathfrak{lqg}, \mathfrak{be})$, such a growth fragmentation is therefore naturally embedded in $\mathfrak{be}$. It corresponds to a parameterization of the branching uniform $\mathbb{C}LE_4$ exploration by quantum natural distance from the boundary (i.e., by the value of the $A$ component), and branching occurs whenever components of the disk become disconnected in the exploration. At any given time, the absolute mass of a fragment is equal to the quantum boundary length of the corresponding component, and the sign of the fragment is determined by the number of $\mathbb{C}LE_4$ loops that surround this component. {Let us also mention that growth fragmentations in the setting of CLE on LQG were also studied in \cite{MSWfrag,MSWfrag2}, and coincide with the growth fragmentations obtained as scaling limits from random planar map explorations in \cite{BBCK}. Taking $\kappappa \rightarrow 4$ in these settings (either $\kappappa \uparrow 4$ in \cite{MSWfrag} or $\kappappa \downarrow 4$ in \cite{MSWfrag2}) is also very natural and would give other insights about $\kappappa =4$ than those obtained in this paper. Lehmkuehler takes this approach in \cite{lehmkuehler21}.} \subsubsection{Link with the conformally invariant metric on $\mathbb{C}LE_4$} Recall the uniform CLE$_4$ exploration from Section 2.1.5, which was introduced by Werner and Wu \cite{WW13}. Werner and Wu interpret the time $t$ at which a loop $\mathcal{L}$ of the CLE$_4$ $\Gamma$ is added, with the time parameterization \operatornameeratorname{e}qref{pppcle}, as the distance of $\mathcal{L}$ to the boundary $\partial\mathbb{D}$; we refer to it here as the \operatornameeratorname{e}mph{\corr{CLE$_4$ exploration} distance} of $\mathcal{L}$ to $\partial\mathbb{D}$. In an unpublished work, Sheffield, Watson, and Wu \cite{SWW19} prove that this distance is the distance as measured by a conformally invariant metric on $\Gamma\cup\{\partial\mathbb{D} \}$. This metric is conjectured to be the limit of the adjacency metric on CLE$_{\kappa'}$ loops as ${\kappa'}\downarrow 4$. It is also argued in \cite{SWW19} that the uniform exploration of $\Gamma$ is determined by $\Gamma$. Our process $A$ also provides a way to measure the distance of a CLE$_4$ loop $\mathcal{L}$ to $\partial\mathbb{D}$, as we previously discussed below \operatornameeratorname{e}qref{eq:Al} in the case of a point. Namely, for an arbitrary point $z$ enclosed by $\mathcal{L}$ define \begin{equation} t(\mathcal{L}):= \mu_h\left( \cup_{U\in\mathfrak{U}_z} U \setminus \operatorname{int}(\mathcal{L}) \right), \operatornameeratorname{e}nd{equation} where $\operatorname{int}(\mathcal{L})\subset\mathbb{D}$ is the domain enclosed by $\mathcal{L}$. It is not hard to see that $t(\mathcal{L})$ does not depend on the choice of $z$. We call $A_{t(\mathcal{L})}$ the \operatornameeratorname{e}mph{quantum natural distance} of $\mathcal{L}$ to $\partial\mathbb{D}$. Note that $A_{t(\mathcal{L})}$ can also be defined similarly as in \operatornameeratorname{e}qref{eq:AB} by counting the number of CLE$_4$ loops of length in $(\delta/2,\delta)$ that are encountered before $\mathcal{L}$ in the CLE$_4$ exploration and then sending $\delta\rightarrow 0$ while renormalizing appropriately. We remark that, in contrast to the \corr{CLE$_4$ exploration distances}, we do \operatornameeratorname{e}mph{not} expect that the quantum natural distances to the boundary defined here correspond to a conformally invariant metric on $\Gamma$. It is natural to conjecture that the \corr{CLE$_4$ exploration} distance and the quantum natural distance are related via a Lamperti type transform \operatornameeratorname{e}qb A_{t(\mathcal{L})}=c_0\int_0^T \nu_h(\partial D_t)\,dt. \label{eq:lamperti} \operatornameeratorname{e}qe for some deterministic constant $c_0>0$, where $T$ is the \corr{CLE$_4$ exploration} distance of a loop $\mathcal{L}$ from $\partial \mathbb{D}$ and for $t\in [0,T)$, $D_t$ is the connected component containing $\mathcal{L}$ of $\mathbb{D}$ minus the loops at \corr{CLE$_4$ exploration} distance less than $t$ from $\partial \mathbb{D}$. This is natural \nina{since the distances are invariant under the application of a conformal map (where the field $h$ is modified as in \operatornameeratorname{e}qref{eq:coc})}, since the CLE$_4$ exploration is \operatornameeratorname{e}mph{uniform} for both distances (so if two loops $\mathcal{L},\mathcal{L}'$ have \corr{CLE$_4$ exploration} distance $t,t'$, respectively, to $\partial\mathbb{D}$ then $t<t'$ if and only if $A_{t(\mathcal{L})}<A_{t(\mathcal{L}')}$), and since the left and right sides of \operatornameeratorname{e}qref{eq:lamperti} transform similarly upon adding a constant $c$ to the field $h$ (namely, both sides are multiplied by $e^{c}$). Proving or disproving \operatornameeratorname{e}qref{eq:lamperti} is left as an open problem. We remark that several earlier papers \cite{Sha16,Sh16,Ben18,HS19,GM19} have proved uniqueness of lengths or distances in LQG via an axiomatic approach, with axioms of a rather similar flavor to the above, but these proofs do not immediately apply to our setting. \subsubsection{Discrete models} The mating of trees approach to Liouville quantum gravity coupled with CLE is inspired by certain random walk encodings of random planar maps decorated by statistical physics models. The first such encoding is the hamburger/cheeseburger bijection of Sheffield~\cite{She16HC} for random planar maps decorated by the critical Fortuin–Kasteleyn random cluster model (FK-decorated planar map). In the FK-decorated planar map each configuration is a planar map with an edge subset, whose weight is assigned according to the critical FK model with parameter $q>0$. Sheffield encodes this model by five-letter words whose symbol set consists of hamburger, cheeseburger, hamburger order, cheeseburger order, and fresh order. The fraction $p$ of fresh orders within all orders is given by $\sqrt q=\frac{2p}{1-p}$. As we read the word, a hamburger (resp.\ cheeseburger) will be consumed by either a hamburger (resp.\ cheeseburger) order or a fresh order, in a last-come-first-serve manner. In this setting, the discrete analog of our Brownian motion $(A,B)$ is the net change in the burger count and the burger discrepancy since time zero, which we denote by $(\mathcal{C}_n,\mathcal{D}_n)$. It was proved in~\cite{She16HC} that $\operatornameeratorname{e}ps(\mathcal{C}_{t/\operatornameeratorname{e}ps^2},\mathcal{D}_{t/\operatornameeratorname{e}ps^2})$ converges in law to $(B^1_{t}, B^2_{\alpha t})$, where $B^1,B^2$ are independent standard one-dimensional Brownian motions and $\alpha=\max\{1-2p, 0\}$. When $p\in (0,\frac12)$, the correlation of $(B^1_{t}+B^2_{\alpha t},B^1_{t}- B^2_{\alpha t})$ is the same as for the left and right boundary length processes of space filling $\SLE_{\kappa'}$ decorated $\gamma$-LQG (cf.\ Theorem \operatorname{Re}f{thm:MOT}) where $q=2+ 2\cos (8\pi/\kappappa')$ and $\gamma^2=16/\kappappa'$. This is consistent with the conjecture that under these parameter relations, LQG coupled with $\mathbb{C}LE$ (equivalently, space filling $\SLE$) is the scaling limit of the FK-decorated planar map for $q\in (0,4)$. Indeed, based on the Brownian motion convergence in~\cite{She16HC}, it was shown in~\cite{GMS19,GS-FK2,GS-FK3} that geometric quantities such as loop lengths and areas converge as desired. When $q=4$ and $p=\frac12$, we have $B^2_{\alpha t}=0$, just as in the $\kappappa'\downarrow 4$ limit of LQG coupled with $\mathbb{C}LE$, where the correlation of the left and right boundary length processes tend to 1. We believe that the process $(\operatornameeratorname{e}ps\mathcal{C}_{t/\operatornameeratorname{e}ps^2}, \mathrm{Var}[\mathcal{D}_{\operatornameeratorname{e}ps^{-2}}]^{-1} \mathcal{D}_{t/\operatornameeratorname{e}ps^2})$ converges in law to $(B^1_{t}, B^2_{t})$; moreover, based on this convergence and results in our paper, it should be possible to extract the convergence of the loop lengths and areas for FK decorated planar map to the corresponding observables in critical LQG coupled with $\mathbb{C}LE_4$. We leave this as an open question. It would also be very interesting to identify the order of the normalization $\mathrm{Var}[\mathcal{D}_{\operatornameeratorname{e}ps^{-2}}]^{-1}$, which is related to the asymptotic of the partition function of the FK-decorated planar map with $q=4$. Another model of decorated random planar maps that is believed to converge (after uniformisation) to CLE decorated LQG is the O($n$) loop model, where the critical case $\kappappa=4$ corresponds to $n=2$. It is therefore also interesting to ask whether our Brownian half plane excursion $\mathfrak{be}$ can be obtained as a scaling limit of a suitable boundary length exploration process in this discrete setting. In fact, a very closely related question was considered in \cite{BCM18}, where the authors identify the scaling limit of the perimeter process in peeling explorations of infinite volume critical Boltzmann random planar maps (see \cite{BBG} for the relationship between these maps and the O(2) model). Modulo finite/infinite volume differences, this scaling limit - which is a Cauchy process - corresponds to a single ``branch'' in our Brownian motion (see Section \operatorname{Re}f{secGF}). \operatornameeratorname{e}nd{document}
\begin{document} \date \today \maketitle In contrast with knots, whose properties depend only on their extrinsic topology in $S^3$, there is a rich interplay between the intrinsic structure of a graph and the extrinsic topology of all embeddings of the graph in $S^3$. For example, it was shown in \cite{CG} that every embedding of the complete graph $K_7$ in $S^3$ contains a non-trivial knot. Later in \cite{fl} it was shown that for every $m\in \mathbb N$, there is a complete graph $K_n$ such that every embedding of $K_n$ in $S^3$ contains a knot $Q$ (i.e., $Q$ is a subgraph of $K_n$) such that $|a_2(Q)|\geq m$, where $a_2$ is the second coefficient of the Conway polynomial of $Q$. More recently, in \cite{fmn} it was shown that for every $m\in \mathbb N$, there is a complete graph $K_n$ such that every embedding of $K_n$ in $S^3$ contains a knot $Q$ whose minimal crossing number is at least $m$. Thus there are arbitrarily complicated knots (as measured by $a_2$ and the minimal crossing number) in every embedding of a sufficiently large complete graph in $S^3$. In light of these results, it is natural to ask whether there is a graph such that every embedding of that graph in $S^3$ contains a composite knot. Or more generally, is there a graph such that every embedding of the graph in $S^3$ contains a satellite knot? Certainly, $K_7$ is not an example of such a graph since Conway and Gordon \cite{CG} exhibit an embedding of $K_7$ containing only the trefoil knot. In this paper we answer this question in the negative. In particular, we prove that every graph has an embedding in $S^3$ such that every non-trivial knot in that embedding is hyperbolic. Our theorem implies that every graph has an embedding in $S^3$ which contains no composite or satellite knots. By contrast, for any particular embedding of a graph we can add local knots within every edge to get an embedding such that every knot in that embedding is composite. Let $G$ be a graph. There is an odd number $n$, such that $G$ is a minor of $K_n$. We will show that for every odd number $n$, there is an embedding of $K_n$ in $S^3$ such that every non-trivial knot in that embedding of $K_n$ is hyperbolic. It follows that there is an embedding of $G$ in $S^3$ which contains no non-trivial non-hyperbolic knots. Let $n$ be a fixed odd number. We begin by constructing a preliminary embedding of $K_n$ in $S^3$ as follows. Let $h$ be a rotation of $S^3$ of order $n$ with fixed point set $\alpha\cong S^1$. Let $V$ denote the complement of an open regular neighborhood of the fixed point set $\alpha$. Let $v_1$, \dots, $v_n$ be points in $V$ such that for each $i$, $h(v_i)=v_{i+1}$ (throughout the paper we shall consider our subscripts mod $n$). These $v_i$ will be the vertices of the preliminary embedding of $K_n$. \begin{definition} By a {\bf solid annulus} we shall mean a 3-manifold with boundary which can be parametrized as $D\times I$ where $D$ is a disk. We use the term {\bf the annulus boundary} of a solid annulus $D\times I$ to refer to the annulus $\partial D\times I$. The {\bf ends} of $D\times I$ are the disks $D\times \{0\}$ and $D\times \{1\}$. If $A$ is an arc in a solid annulus $W$ with one endpoint in each end of $W$, and $A$ co-bounds a disk in $W$ together with an arc in $\partial W$, then we say that $A$ is a {\bf longitudinal arc} of $W$. \end{definition} As follows, we embed the edges of $K_n$ as simple closed curves in the quotient space $S^3/h=S^3$. Observe that since $V$ is a solid torus, $V'=V/h$ is also a solid torus. Let $D'$ denote a meridional disk for $V'$ which does not contain the point $v=v_1/h$. Let $W'$ denote the solid annulus $\mathrm{cl}(V'-D')$ with ends $D'_+$ and $D'_-$. Since $n$ is odd, we can choose unknotted simple closed curves $S_1$, \dots, $S_{\frac{n-1}{2}}$ in the solid torus $V'$ such that each $S_i$ contains $v$ and has winding number $n+i$ in $V'$, the $S_i$ are pairwise disjoint except at $v$, and for each $i$, $W'\cap S_i$ is a collection of $n+i$ untangled longitudinal arcs (see Figure \ref{twists}). \begin{figure}\label{twists} \end{figure} We define two additional simple closed curves $J'$ and $C'$ in $V'$ whose intersections with $W'$ are illustrated in Figure \ref{twists} as follows. First, choose a simple closed curve $J'$ in $V'$, whose intersection with $W'$ is a longitudinal arc which is disjoint from and untangled with $S_1\cup\dots \cup S_{\frac{n-1}{2}}$. Next we let $C'$ be the unknotted simple closed curve in  $W'-(S_1\cup\dots \cup S_{\frac{n-1}{2}} \cup J')$ whose projection is illustrated in Figure \ref{twists}.  In particular, $C’$ contains one half twist between $J'$ and the set of arcs of $S_1\cup\dots \cup S_{\frac{n-1}{2}}$ which do not contain $v$, another half twist between those arcs of $S_1\cup\dots \cup S_{\frac{n-1}{2}}$ and the set of arcs containing $v$, and $r$ full-twists between each of the individual  arcs of  $S_i$ and $S_{i+1}$ containing $v$.  We will determine the value of $r$ later. Each of the $\frac{n-1}{2}$ simple closed curves $S_1$, \dots, $S_\frac{n-1}{2}$ lifts to a simple closed curve consisting of $n$ consecutive edges of $K_n$. The vertices $v_1$, \dots, $v_n$ together with these $\frac{n(n-1)}{2}$ edges gives us a preliminary embedding $\Gamma_1$ of $K_n$ in $S^3$. Lift the meridional disk $D'$ of the solid torus $V'$ to $n$ disjoint meridional disks $D_1$, \dots, $D_n$ of the solid torus $V$. Lift the simple closed curve $C'$ to $n$ disjoint simple closed curves $C_1$, \dots, $C_n$, and lift the simple closed curve $J'$ to $n$ consecutive arcs $J_1$, \dots, $J_n$ whose union is a simple closed curve $J$. The closures of the components of $V-(D_1\cup \dots \cup D_n)$ are solid annuli, which we denote by $W_1$, \dots, $W_n$. The subscripts of all of the lifts are chosen consistently so that for each $i$, $v_i\in W_i$, $C_i\cup J_i\subseteq W_i$, and $D_i$ and $D_{i+1}$ are the ends of the solid annulus $W_i$. For each $i$, the pair $(W_i-(C_i \cup J_i),(W_i-(C_i \cup J_i))\cap \Gamma_1))$ is homeomorphic to $(W'-(C' \cup J'),(W'-(C' \cup J'))\cap(S_1\cup\dots \cup S_{\frac{n-1}{2}}))$. For each $i$, the solid annulus $W'$ contains $n+i-1$ arcs of $S_i$ which are disjoint from $v$. Hence each edge of the embedded graph $\Gamma_1$ meets each solid annulus $W_i$ in at least one arc not containing $v_i$. Let $\kappa$ be a simple closed curve in $\Gamma_1$. For each $i$, we let $k_i$ denote the set of those arcs of $\kappa\cap W_i$ which do not contain $v_i$, and let $e_i$ denote either the single arc of $\kappa\cap W_i$ which does contain $v_i$ or the empty set if $v_i$ is not on $\kappa$. Observe that since $\kappa$ is a simple closed curve, it contains at least three edges of $\Gamma_1$; and as we observed above, each edge of $\kappa$ contains at least one arc of $k_i$. Thus for each $i$, $k_i$ contains at least three arcs. Either $e_i$ is empty, the endpoints of $e_i$ are in the same end of the solid annulus $ W_i$, or the endpoints of $e_i$ are in different ends of $W_i$. We illustrate these three possibilities for $(W_i,C_i \cup J_i\cup k_i\cup e_i)$ In Figure \ref{WicapK} as forms a), b) and c) respectively. The number of full-twists represented by the labels $t$, $u$, $x$, or $z$ in Figure \ref{WicapK} is some multiple of $r$ depending on the particular simple closed curve $\kappa$. \begin{figure} \caption{The forms of $(W_i,C_i \cup J_i\cup k_i\cup e_i)$.} \label{WicapK} \end{figure} For each of the forms of $(W_i,C_i \cup J_i\cup k_i\cup e_i)$ illustrated in Figure \ref{WicapK}, we will associate an additional arc and an additional collection of simple closed curves as follows (illustrated in Figure \ref{curves}). Let the arc $B_i$ be the core of a solid annulus neighborhood of the union of the arcs $k_i$ in $W_i$ such that $B_i$ is disjoint from $J_i$, $C_i$, and $e_i$. Let the simple closed curve $Q$ be obtained from $C_i$ by removing the full twists $z$, $x$, $t$, and $u$. Let $Z$, $X$, $T$, and $U$ be unknotted simple closed curves which wrap around $Q$ in place of $z$, $x$, $t$, and $u$ as illustrated in Figure \ref{curves}. \begin{figure} \caption{The forms of $W_i$ with associated simple closed curves and the arc $B_i$.} \label{curves} \end{figure}   For each $i$, let $M_i$ denote an unknotted solid torus in $S^3$  obtained by gluing together two identical copies of $W_i$ along $D_i$ and $D_{i+1}$,  making sure that the end points of the arcs of $J_i$, $B_i$, and $e_i$ match  up with their counterparts in the second copy to get simple closed curves $j$, $b$, and $E$ respectively in $M_i$.  Thus $M_i$ has a $180^{\circ}$  rotational symmetry around a horizontal line which goes through the center of the figure and  the end points of both copies of $J_i$, $B_i$, and $e_i$. Recall that in form a), $e_i$ is the empty set, and hence so is $E$. Let $Q_1$ and $Q_2$, $X_1$ and $X_2$, $Z_1$ and $Z_2$, $T_1$ and $T_2$, and $U_1$ and $U_2$ denote the doubles of the unknotted simple closed curves $Q$, $X$, $Z$, $T$, $U$ respectively. Let $Y$ denote the core of the solid torus $\mathrm{cl}(S^3-M_i)$. We associate to Form a) of Figure \ref{curves} the link $L=Q_1\cup Q_2\cup j\cup b\cup Y$. We associate to Form b) of Figure \ref{curves} the link $L= Q_1\cup Q_2\cup j\cup b\cup Y\cup E\cup X_1\cup X_2\cup Z_1\cup Z_2$. We associate to Form c) of Figure \ref{curves} the link $L= Q_1\cup Q_2\cup j\cup b\cup Y\cup E\cup T_1\cup T_2\cup U_1\cup U_2$. Figure \ref{link} illustrates the three forms of the link $L$. \begin{figure} \caption{The possible forms of the link $L$.} \label{link} \end{figure} The software program SnapPea (available at http://www.geometrygames.\linebreak org/SnapPea/index.html) can be used to determine whether or not a given knot or link in $S^3$ is hyperbolic, and if so SnapPea estimates the hyperbolic volume of the complement. We used SnapPea to verify that each of the three forms of the link $L$ illustrated in Figure \ref{link} is hyperbolic. A 3-manifold is unchanged by doing Dehn surgery on an unknot if the boundary slope of the surgery is the reciprocal of an integer (though such surgery may change a knot or link in the manifold). According to Thurston's Hyperbolic Dehn Surgery Theorem \cite{BP,Th}, all but finitely many Dehn fillings of a hyperbolic link complement yield a hyperbolic manifold. Thus there is some $r\in \mathbb{N}$ such that for any $m\geq r$, if we do Dehn filling with slope $\frac{1}{m}$ along the components $X_1$, $X_2$, $Z_1$, $Z_2$ of the link $L$ in form b) or along the components $T_1$, $T_2$, $U_1$, $U_2$ of the link $L$ in form c), then we obtain a hyperbolic link $\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup Y \cup E$, where the simple closed curves $\overline{Q}_1$ and $\overline{Q_2}$ are obtained by adding $m$ full twists to $Q_1$ and $Q_2$ in place of each of the surgered curves. We fix the value of $r$ according to the above paragraph, and this is the value of $r$ that we use in Figure \ref{twists}. Recall that the number of twists $x$, $z$, $u$, and $t$ in the simple closed curves $C_i$ in Figure \ref{WicapK} are each a multiple of $r$. Thus the particular simple closed curves $C_i$ are determined by our choice of $r$ together with our choice of the simple closed curve $\kappa$. Now we do Dehn fillings along $X_1$ and $X_2$ with slope $\frac{1}{x}$, along $Z_1$ and $Z_2$ with slope $\frac{1}{z}$, along $U_1$ and $U_2$ with slope $\frac{1}{u}$, and along $T_1$ and $T_2$ with slope $\frac{1}{t}$. Since $x$, $z$, $u$, and $t$ are each greater than or equal to $r$, the link $\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup Y \cup E$ that we obtain will be hyperbolic. In form a), $E$ is the empty set and the link $Q_1\cup Q_2\cup j\cup b\cup Y \cup E$ was already seen to be hyperbolic using SnapPea. In this case, we do no surgery and we let the simple closed curves $\overline{Q}_1=Q_1$ and $\overline{Q}_2=Q_2$. It follows that each form of $M_i-(\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup E)$ is a hyperbolic 3-manifold. Observe that $M_i-(\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup E)$ is the double of $W_i-(C_i\cup J_i\cup B_i\cup e_i)$. Now that we have fixed $C_i$, we let $N(C_i)$, $N(J_i)$, $N(B_i)$, and $N(e_i)$ be pairwise disjoint regular neighborhoods of $C_i$, $J_i$, $B_i$, and $e_i$ respectively in the interior of each of the forms of the solid annulus $W_i$ (illustrated in Figure \ref{WicapK}). We choose $N(B_i)$ such that it contains the union of the arcs $k_i$. Note that in form a) $e_i$ is the empty set and hence so is $N(e_i)$. Let $N(k_i)$ denote a collection of pairwise disjoint regular neighborhoods one containing each arc of $k_i$ such that $N(k_i)\subseteq N(B_i)$. Let $V_i=\mathrm{cl}(W_i-(N(C_i) \cup N(J_i)\cup N(B_i)\cup N(e_i)))$, let $\Delta=\mathrm{cl}(N(B_i)-N(k_i))$, and let $V_i'=V_i\cup\Delta$. Since $N(B_i)$ is a solid annulus, it has a product structure $D^2\times I$. Without loss of generality, we assume that each of the components of $N(k_i)$ respects the product structure of $N(B_i)$. Thus $\Delta=F\times I$ where $F$ is a disk with holes. \begin{definition} Let $X$ be a 3-manifold. A sphere in $X$ is said to be {\bf essential} if it does not bound a ball in $X$. A properly embedded disk $D$ in $X$ is said to be {\bf essential} if $\partial D$ does not bound a disk in $\partial X$. A properly embedded annulus is said to be {\bf essential} if it is incompressible and not boundary parallel. A torus in $X$ is said to be {\bf essential} if it is incompressible and not boundary parallel. \end{definition} \begin{lemma}\label{L:annuli} For each $i$, $V_i'$ contains no essential torus, sphere, or disk whose boundary is in $D_i\cup D_{i+1}$. Also, any incompressible annulus in $V_i'$ whose boundary is in $D_i\cup D_{i+1}$ is either boundary parallel or can be expressed as $\sigma\times I$ (possibly after a change in parameterization of $\Delta$), where $\sigma$ is a non-trivial simple closed curve in $D_i\cap\Delta$. \end{lemma} \begin{proof} Since $k_i$ contains at least three disjoint arcs, $F$ is a disk with at least three holes. Let $\beta$ denote the double of $\Delta$ along $\Delta\cap(D_i\cup D_{i+1})$. Then $\beta=F\times S^1$. Now it follows from Waldhausen \cite{Wa} that $\beta$ contains no essential sphere or properly embedded disk, and any incompressible torus in $\beta$ can be expressed as $\sigma\times S^1$ (after a possible change in parameterization of $\beta$) where $\sigma$ is a non-trivial simple closed curve in $D_i\cap\Delta$. Let $\nu$ denote the double of $V_i$ along $V_i\cap(D_i\cup D_{i+1})$. Observe that $\nu\cup \beta$ is the double of $V_i'$ along $V_i'\cap (D_i\cup D_{i+1})$. Now the interior of $\nu$ is homeomorphic to $M_i-(\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup E)$. Since we saw above that $M_i-(\overline{Q}_1\cup \overline{Q}_2\cup j\cup b\cup E)$ is hyperbolic, it follows from Thurston \cite{Th,Th2} that $\nu$ contains no essential sphere or torus, or properly embedded disk or annulus. We see as follows that $\nu\cup\beta$ contains no essential sphere and any essential torus in $\nu\cup\beta$ can be expressed (after a possible change in parameterization of $\beta$) as $\sigma\times S^1$, where $\sigma$ is a non-trivial simple closed curve in $D_i\cap\Delta$. Let $\tau$ be an essential sphere or torus in $\nu\cup\beta$, and let $\gamma$ denote the torus $\nu\cap \beta$. By doing an isotopy as necessary, we can assume that $\tau$ intersects $\gamma$ in a minimal number of disjoint simple closed curves. Suppose there is a curve of intersection which bounds a disk in the essential surface $\tau$. Let $c$ be an innermost curve of intersection on $\tau$ which bounds a disk $\delta$ in $\tau$. Then $\delta$ is a properly embedded disk in either $\gamma$ or $\beta$. Since neither $\nu$ nor $\beta$ contains a properly embedded essential disk or an essential sphere, there is an isotopy of $\tau$ which removes $c$ from the collection of curves of intersection. Thus by the minimality of the number of curves in $\tau\cap\gamma$, we can assume that none of the curves in $\tau\cap\gamma$ bounds a disk in $\tau$. Suppose that $\tau$ is an essential sphere in $\nu\cup\beta$. Since none of the curves in $\tau\cap\gamma$ bounds a disk in $\tau$, $\tau$ must be contained entirely in either $\nu$ or $\beta$. However, we saw above that neither $\nu$ nor $\beta$ contains any essential sphere. Thus $\tau$ cannot be an essential sphere, and hence must be an essential torus. Since $\tau\cap\gamma$ is minimal, if $\tau\cap\nu$ is non-empty, then the components of $\tau$ in $\nu$ are all incompressible annuli. However, we saw above that $\nu$ contains no essential annuli. Thus $\tau\cap\nu$ is empty. Since $\nu$ contains no essential torus, the essential tori $\tau$ must be contained in $\beta$. Hence $\tau$ can be expressed (after a possible change in parameterization of $\beta$) as $\sigma\times S^1$, where $\sigma$ is a non-trivial simple closed curve in $D_i\cap\Delta$. Now we consider essential surfaces in $V_i'$. Suppose that $V_i'$ contains an essential sphere $S$. Since $\nu\cap\beta$ contains no essential sphere, $S$ bounds a ball $B$ in $\nu\cap \beta$. Now the ball $B$ cannot contain any of the boundary components of $\nu\cap \beta$. Thus $B$ cannot contain either $D_i$ or $D_{i+1}$. Since $S$ is disjoint from $D_i\cup D_{i+1}$, it follows that $B$ must be disjoint from $D_i\cup D_{i+1}$. Thus $B$ is contained in $V_i'$. Hence $V_i'$ cannot contain an essential sphere. We see as follows that $V_i'$ cannot contain an essential disk whose boundary is in $D_i\cup D_{i+1}$. Let $\epsilon$ be a disk in $V_i'$ whose boundary is in $D_i\cup D_{i+1}$. Let $\epsilon'$ denote the double of $\epsilon$ in $\nu\cup\beta$. Then $\epsilon'$ is a sphere which meets $D_i\cup D_{i+1}$ in the simple closed curve $\partial \epsilon$. Since $\nu\cup \beta$ contains no essential sphere, $\epsilon'$ bounds a ball $B$ in $\nu\cup \beta$. It follows that $B$ cannot contain any of the boundary components of $\nu\cup \beta$. Thus $B$ cannot contain any of the boundary components of $D_i\cup D_{i+1}$. Therefore, $D_i\cup D_{i+1}$ intersects the ball $B$ in a disk bounded by $\partial\epsilon$. Hence the simple closed curve $\partial\epsilon$ bounds a disk in $(D_i\cup D_{i+1})\cap V_i'$, and therefore the disk $\epsilon$ was not essential in $V_i'$. Thus, $V_i'$ contains no essential disk whose boundary is in $D_i\cup D_{i+1}$. Now suppose that $V_i'$ contains an essential torus $T$. Suppose that $T$ is not essential in $\nu\cup \beta$. Then either $T$ is boundary parallel or $T$ is compressible in $\nu\cup\beta$. However, $T$ cannot be boundary parallel in $\nu\cup\beta$ since $T\subseteq V_i'$. Thus $T$ must be compressible in $\nu\cup\beta$. Let $\delta$ be a compression disk for $T$ in $\nu\cup\beta$. Since $V_i'$ contains no essential sphere or essential disk whose boundary is in $D_i\cup D_{i+1}$, we can use an innermost disk argument to push $\delta$ off of $D_i\cup D_{i+1}$. Hence $T$ is compressible in $V_i'$, contrary to our initial assumption. Thus $T$ must be essential in $\nu\cup \beta$. It follows that $T$ has the form $\sigma\times S^1$, where $\sigma\subseteq D_i\cap \Delta$. However, since $\nu\cup \beta$ is the double of $V_i'$, the intersection of $\sigma\times S^1$ with $V_i'$ is an annulus $\sigma\times I$. In particular, $V_i'$ cannot contain $\sigma\times S^1$. Therefore, $V_i'$ cannot contain an essential torus. Suppose that $V_i'$ contains an incompressible annulus $\alpha$ whose boundary is in $D_i\cup D_{i+1}$. Let $\tau$ denote the double of $\alpha$ in $\nu\cup\beta$. Then $\tau$ is a torus. If $\tau$ is essential in $\nu\cup\beta$, then we saw above that $\tau$ can be expressed as $\sigma\times S^1$ (after a possible change in parameterization of $\beta$) where $\sigma$ is a non-trivial simple closed curve in $D_i\cap\Delta$. In this case, $\alpha$ can be expressed as $\sigma\times I$. On the other hand, if $\tau$ is inessential in $\nu\cup \beta$, then either $\tau$ is parallel to a component of $\partial (\nu\cup\beta)$, or $\tau$ is compressible in $\nu\cup\beta$. If $\tau$ is parallel to a boundary component of $\nu\cup\beta$, then $\alpha$ is parallel to the annulus boundary component of $W_i$, $N(J_i)$, $N(e_i)$, $N(B_i)$, or one of the boundary components of $N(k_i)$. Thus we suppose that the torus $\tau$ is compressible in $\nu\cup\beta$. In this case, it follows from an innermost loop outermost arc argument that either the annulus $\alpha$ is compressible in $V_i'$ or $\alpha$ is $\partial$-compressible in $V_i'$. Since we assumed $\alpha$ was incompressible in $V_i'$, $\alpha$ must be $\partial$-compressible in $V_i'$. Now according to a lemma of Waldhausen \cite{Wa}, if a 3-manifold contains no essential sphere or properly embedded essential disk, then any annulus which is incompressible but boundary compressible must be boundary parallel. We saw above that $V_i'$ contains no essential sphere or essential disk whose boundary is in $D_i\cup D_{i+1}$. Since the boundary of the incompressible annulus $\alpha$ is contained in $D_i\cup D_{i+1}$, it follows from Waldhausen's Lemma that $\alpha$ is boundary parallel in $V_i'$. \end{proof} It follows from Lemma \ref{L:annuli} that for any $i$, any incompressible annulus in $V_i'$ whose boundary is in $D_i\cup D_{i+1}$ is either parallel to an annulus in $D_i$ or $D_{i+1}$ or co-bounds a solid annulus in the solid annulus $W_i$ with ends in $D_i\cup D_{i+1}$. Recall that $\kappa$ is a simple closed curve in $\Gamma_1$ such that $\kappa\cap W_i= k_i\cup e_i$. Also $J=J_1\cup\dots\cup J_n$. Let $N(\kappa)$ and $N(J)$ be regular neighborhoods of the simple closed curves $\kappa$ and $J$ respectively, such that for each $i$, $N(\kappa)\cap W_i=N(k_i)\cup N(e_i)$, and $N(J)\cap W_i=N(J_i)$. Recall that $V=W_1\cup \dots \cup W_n$. Thus $\mathrm{cl}(V-(N(C_1)\cup \dots\cup N(C_n) \cup N(J) \cup N(\kappa))=V_1'\cup\dots\cup V_n'$. \begin{prop}\label{prop:notori} $H=\mathrm{cl}(V-(N(C_1)\cup \dots\cup N(C_n) \cup N(J) \cup N(\kappa))$ contains no essential sphere or torus. \end{prop} \begin{proof} Suppose that $S$ is an essential sphere in $H$. Without loss of generality, $S$ intersects the collection of disks $D_1\cup\dots\cup D_n$ transversely in a minimal number of simple closed curves. By Lemma \ref{L:annuli}, for each $i$, $V_i'$ contains no essential sphere or essential disk whose boundary is in $D_i\cup D_{i+1}$. Thus the sphere $S$ cannot be entirely contained in one $V_i'$. Let $c$ be an innermost curve of intersection on $S$. Then $c$ bounds a disk $\delta$ in some $V_i'$. However, since the number of curves of intersection is minimal, $\delta$ must be essential, contrary to Lemma \ref{L:annuli}. Hence $H$ contains no essential sphere. Suppose $T$ is an incompressible torus in $H$. We show as follows that $T$ is parallel to some boundary component of $H$. Without loss of generality, the torus $T$ intersects the collection of disks $D_1\cup\dots\cup D_n$ transversely in a minimal number of simple closed curves. By Lemma \ref{L:annuli}, for each $i$, $V_i'$ contains no essential torus, essential sphere, or essential disk whose boundary is in $D_i\cup D_{i+1}$. Thus the torus $T$ cannot be entirely contained in one $V_i'$. Also, by the minimality of the number of curves of intersection, we can assume that if $V_i'\cap T$ is nonempty, then it consists of a collection of incompressible annuli in $V_i'$ whose boundary components are in $D_i\cup D_{i+1}$. Furthermore, by Lemma \ref{L:annuli}, each such annulus is either boundary parallel or is contained in $N(B_i)$ and can be expressed (after a possible change in parameterization of $N(B_i)$) as $\sigma_i\times I$ for some non-trivial simple closed curve $\sigma_i$ in $D_i\cap \Delta$. If some annulus component of $V_i'\cap T$ is parallel to an annulus in $D_i\cup D_{i+1}$, then we could remove that component by an isotopy of $T$. Thus we can assume that each annulus in $V_i'\cap T$ is parallel to the annulus boundary component of one of the solid annuli $W_i$, $N(J_i)$, or $N(e_i)$, or can be expressed as $\sigma_i\times I$. In any of these cases the annulus co-bounds a solid annulus in $W_i$ with ends in $D_i\cup D_{i+1}$. Consider some $i$, such that $V_i'\cap T$ is non-empty. Hence it contains an incompressible annulus $A_i$ which has one of the above forms. By the connectivity of the torus $T$, either there is an incompressible annulus $A_{i+1}\subseteq V_{i+1}'\cap T$ such that $A_i$ and $A_{i+1}$ share a boundary component, or there is an incompressible annulus $A_{i-1}\subseteq V_{i-1}'\cap T$, such that $A_i$ and $A_{i-1}$ share a boundary component, or both. We will assume, without loss of generality, that there is an incompressible annulus $A_{i+1}\subseteq V_{i+1}'\cap T$ such that $A_i$ and $A_{i+1}$ share a boundary component. Now it follows that $A_i$ co-bounds a solid annulus $F_i$ in $W_i$ with ends in $D_i\cup D_{i+1}$, and $A_{i+1}$ co-bounds a solid annulus $F_{i+1}$ in $W_{i+1}$ together with two disks in $D_{i+1}\cup D_{i+2}$. Hence the solid annuli $F_i$ and $F_{i+1}$ meet in one or two disks in $D_{i+1}$. We consider several cases where $A_i$ is parallel to some boundary component of $V_i'$. Suppose that $A_i$ is parallel to the annulus boundary component of the solid annulus $N(J_i)$. Then the solid annulus $F_i$ contains $N(J_i)$ and is disjoint from the arcs $k_i$ and $e_i$. Now the arcs $J_i$ and $J_{i+1}$ share an endpoint contained in $F_i\cap F_{i+1}$, and there is no endpoint of any arc of $k_i$ or $e_i$ in $F_i\cap F_{i+1}$. It follows that the solid annulus $F_{i+1}$ contains the arc $J_{i+1}$ and contains no arcs of $k_{i+1}$. Hence by Lemma \ref{L:annuli}, the incompressible annulus $A_{i+1}$ must be parallel to $\partial N(J_{i+1})$. Continuing from one $V_i'$ to the next, we see that in this case, $T$ is parallel to $\partial N(J)$. Suppose that $A_i$ is parallel to the annulus boundary component of the solid annulus $\partial N(e_i)$ or one of the solid annuli in $\partial N(k_i)$. Using an argument similar to the above paragraph, we see that $A_{i+1}$ is parallel to the annulus boundary component of the solid annulus $\partial N(e_{i+1})$ or one of the solid annuli in $\partial N(k_{i+1})$. Continuing as above, we see that in this case $T$ is parallel to $\partial N(\kappa)$. Suppose that the annulus $A_i$ is parallel to the annulus boundary component of the solid annulus $W_i$. Then the solid annulus $F_i$ contains all of the arcs of $J_i$, $k_i$, and $e_i$. It follows as above that the solid annulus $F_{i+1}$ contains the arc $J_{i+1}$ and some arcs of $k_{i+1}\cup e_{i+1}$. Thus by Lemma \ref{L:annuli}, $A_{i+1}$ must be parallel to the annulus boundary component of the solid annulus $W_{i+1}$. Continuing in this way, we see that in this case $T$ is parallel to $\partial V$. Thus we now assume that no component of any $V_i'\cap T$ is parallel to an annulus boundary component of $V_i'$. Hence if any $V_i'\cap T$ is non-empty, then by Lemma \ref{L:annuli}, $V_i'\cap T$ consists of disjoint incompressible annuli in $N(B_i)$ which can each be expressed (after a possible re-parametrization of $N(B_i)$) as $\sigma_i\times I$ for some non-trivial simple closed curve $\sigma_i\subseteq D_i\cap \Delta$. Choose $i$ such that $V_i'\cap T$ is non-empty. Since $N(B_i)$ is a solid annulus, there is an innermost incompressible annulus $A_i$ of $N(B_i)\cap T$. Now $A_i$ bounds a solid annulus $F_i$ in $N(B_i)$, and $F_i$ contains more than one arc of $k_i$. Since $A_i$ is innermost in $N(B_i)$, $\mathrm{int}(F_{i})$ is disjoint from $T$. Now there is an incompressible annulus $A_{i+1}$ in $V_{i+1}'\cap T$, such that $A_i$ and $A_{i+1}$ meet in a circle in $D_{i+1}$. Furthermore, this circle bounds a disk in $D_{i+1}$ which is disjoint from $T$, and by our assumption is contained in $N(B_i)$. Thus by Lemma \ref{L:annuli}, the incompressible annulus $A_{i+1}$ has the form $\sigma_{i+1}\times I$ for some non-trivial simple closed curve $\sigma_{i+1}\subseteq D_{i+1}\cap \Delta$ . Thus $A_{i+1}$ bounds a solid annulus $F_{i+1}$ in $N(B_{i+1})$, and $\mathrm{int}(F_{i+1})$ is also disjoint from $T$. We continue in this way considering consecutive annuli to conclude that for every $j$, every component $A_j$ of $T\cap V_j'$ is an incompressible annulus which bounds a solid annulus $F_j$ whose interior is disjoint from $T$. Recall that $V=W_1\cup \dots \cup W_n$ is a solid torus. Let $Q$ denote the component of $V-T$ which is disjoint from $\partial V$. Then $Q$ is the union of the solid annuli $F_j$. Since some $F_i$ contains some arcs of $k_i$, the simple closed curve $\kappa$ must be contained in $Q$. Recall that the simple closed curve $\kappa$ contains at least three vertices of the embedded graph $\Gamma_1$. Also each vertex of $\kappa$ is contained in some arc $e_j$. Since each such $e_j\subseteq \kappa\subseteq Q$, some component $F_j$ of $Q\cap W_j$ contains the arc $e_j$. By our assumption, for any $V_i'\cap T$ which is non-empty, $V_i'\cap T$ consists of disjoint incompressible annuli in $N(B_i)$. In particular, $V_j\cap T\subseteq N(B_i)$. Now the annulus boundary of $F_j$ is contained in $N(B_j)$, and hence $F_j\subseteq N(B_j)$. But this is impossible since $e_j\subseteq F_j$ and $e_j$ is disjoint from $N(B_j)$. Hence our assumption that no component of any $V_i'\cap T$ is parallel to an annulus boundary component of $V_i'$ is wrong. Thus, as we saw in the previous cases, $T$ must be parallel to a boundary component of $H$. Therefore $H$ contains no essential annulus. \end{proof} Recall that the value of $r$, the simple closed curves, and the manifold $H$, all depend on the particular choice of simple closed curve $\kappa$. In the following theorem, we do not fix a particular $\kappa$, so none of the above are fixed. \begin{thm} Every graph can be embedded in $S^3$ in such a way that every non-trivial knot in the embedded graph is hyperbolic. \end{thm} \begin{proof} Let $G$ be a graph, and let $n\geq 3$ be an odd number such that $G$ is a minor of the complete graph on $n$ vertices $K_n$. Let $\Gamma_1$ be the embedding of $K_n$ given in our preliminary construction. Then, $\Gamma_1$ contains at most finitely many simple closed curves, $\kappa_1$, \dots, $\kappa_m$. For each $\kappa_j$, we use Thurston's Hyperbolic Dehn Surgery Theorem \cite{BP, Th} to choose an $r_j$ in the same manner that we chose $r$ after we fixed a particular simple closed curve $\kappa$. Now let $R=\mathrm{max}\{r_1,\dots,r_m\}$, and let $R$ be the value of $r$ in Figure \ref{twists}. This determines the simple closed curves $C_1$, \dots, $C_n$. Let $P=\mathrm{cl}(V-(N(C_1)\cup \dots\cup N(C_n) \cup N(J )))$ where $V$ and $J$ are given in our preliminary construction. Then the embedded graph $\Gamma_1\subseteq P$. For each $j=1$, \dots $m$, let $H_j=\mathrm{cl}(P-N(\kappa_j))$. It follows from Proposition \ref{prop:notori} that each $H_j$ contains no essential sphere or torus. Since each $H_j$ has more than three boundary components, no $H_j$ can be Seifert fibered. Hence by Thurston's Hyperbolization Theorem \cite{Th2}, every $H_j$ is a hyperbolic manifold. We will glue solid tori $Y_1$, \dots, $Y_{n+2}$ to $P$ along its $n+2$ boundary components $\partial V$, $\partial N(C_1)$, \dots, $\partial N(C_n)$, and $\partial N(J)$ to obtain a closed manifold $\overline{P}$ as follows. For each $j$, any gluing of solid tori along the boundary components of $P$ defines a Dehn filling of $H_j=\mathrm{cl}(P-N(\kappa_j))$ along all of its boundary components except $\partial N(\kappa_j)$. Since each $H_j$ is hyperbolic, by Thurston's Hyperbolic Dehn Surgery Theorem \cite{BP,Th}, all but finitely many such Dehn fillings of $H_j$ result in a hyperbolic 3-manifold. Furthermore, since $P$ is obtained by removing solid tori from $S^3$, for any integer $q$, if we attach the solid tori $Y_1$, \dots, $Y_{n+2}$ to $P$ with slope $\frac{1}{q}$, then $\overline{P}=S^3$. In this case each $H_j\cup Y_1\cup \dots\cup Y_{n+2}$ is the complement of a knot in $S^3$. There are only finitely many $H_j$'s, and for each $j$, only finitely many slopes $\frac{1}{q}$ are excluded by Thurston's Hyperbolic Dehn Surgery Theorem. Thus there is some integer $q$ such that if we glue the solid tori $Y_1$, \dots, $Y_{n+2}$ to any of the $H_j$ along $\partial N(C_1)$, \dots, $\partial N(C_{n})$, $\partial N(J)$, $\partial V$ with slope $\frac{1}{q}$, then we obtain the complement of a hyperbolic knot in $S^3$. Let $\Gamma_2$ denote the re-embedding of $\Gamma_1$, obtained as a result of gluing the solid tori $Y_1$, \dots, $Y_{n+2}$ to the boundary components of $P$ with slope $\frac{1}{q}$. Now $\Gamma_2$ is an embedding of $K_n$ in $S^3$ such that every non-trivial knot in $\Gamma_2$ is hyperbolic. Now there is a minor $G'$ of the embedded graph $\Gamma_2$ which is an embedding of our original graph $G$, such that every non-trivial knot in $G'$ is hyperbolic. \end{proof} \small \end{document}
\begin{document} \date{} \title{Some identities for determinants of structured matrices} \author{Estelle L. Basor \thanks{Supported in part by the National Science Foundation DMS-9970879.}\\ Department of Mathematics\\ California Polytechnic State University\\ San Luis Obispo, CA 93407 \and Torsten Ehrhardt \thanks{Research was in part done at MSRI and supported by National Science Foundation DMS-9701755.}\\ Fakult\"at f\"ur Mathematik\\ Technische Universit\"at Chemnitz\\ 09107 Chemnitz, Germany} \maketitle \begin{abstract} In this paper we establish several relations between the determinants of the following structured matrices: Hankel matrices, symmetric Toeplitz + Hankel matrices and Toeplitz matrices. Using known results for the asymptotic behavior of Toeplitz determinants, these identities are used in order to obtain Fisher-Hartwig type results on the asymptotics of certain skewsymmetric Toeplitz determinants and certain Hankel determinants. \end{abstract} \section{Introduction} In this paper we prove identities that involve the determinants of several types of structured matrices such as Hankel matrices, symmetric Toeplitz + Hankel matrices and skewsymmetric Toeplitz matrices. After having established these identities we show how they can be used in order to obtain asymptotic formulas for these determinants. Let us first recall the underlying notation. Given a sequence $\{a_n\}_{n=-\infty}^\infty$ of complex numbers, we associate the formal Fourier series \begin{eqnarray} a(t)&=&\sum_{n=-\infty}^\infty a_nt^n,\qquad t\in{\mathbb T}. \end{eqnarray} The $N\times N$ Toeplitz and Hankel matrices with the (Fourier) symbol $a$ are defined by \begin{equation} T_N(a) \;\;=\;\; \left(a_{j-k}\right)_{j,k=0}^{N-1}, \qquad H_N(a) \;\;=\;\; \left(a_{j+k+1}\right)_{j,k=0}^{N-1}.\label{f.THn} \end{equation} Usually $a$ represents an $L^1$-function defined on the unit circle, in which case the numbers $a_n$ are the Fourier coefficients, \begin{eqnarray} a_n &=& \frac{1}{2\pi}\int_{-\pi}^{\pi}a(e^{i\theta}) e^{-in\theta}\,d\theta,\qquad n\in{\mathbb Z}. \end{eqnarray} To a given symbol $a$ we associate the symbol $\tilde{a}(t):=a(t^{-1})$. The symbol $a$ is called even (odd) if $\tilde{a}(t)=\pm a(t)$, i.e., $a_{-n}=\pm a_{n}$. For our purposes it is important to define another type of Hankel matrix. Given a function $b\in L^1[-1,1]$ with moments defined by \begin{eqnarray} b_n &=& \frac{1}{\pi}\int_{-1}^1b(x)(2x)^{n-1}\,dx,\qquad n\ge1, \end{eqnarray} the $N\times N$ Hankel matrices generated by the (moment) symbol $b$ are defined by \begin{eqnarray} H_N[b] &=& \left(b_{1+j+k}\right)_{j,k=0}^{N-1}. \end{eqnarray} We indicate the difference in the definition by using the notation $H_N(\cdot)$ and $H_N[\cdot]$. The function $b$ is called even if $b(x)=b(-x)$. Our motivation to prove in the following sections identities for the above mentioned determinants comes from several problems. The best known problem, called the Fisher-Hartwig conjecture, concerns the asymptotics of the determinants of Toeplitz matrices for singular symbols. One would like to be able to compute the asymptotics of the determinant of $T_N(a)$ when the symbol $a$ has jump discontinuities, zeros, or other singularities of a certain form. A history of this problem and many known results and applications can be found in \cite{BS} or \cite{E}. In section five of this paper we prove some Fisher-Hartwig type results for certain skewsymmetric Toeplitz matrices. Another interesting problem is to compute asymptotically the determinants of the matrices $$T_N(a) + H_N(a)$$ where the symbol $a$ also has singularities. The interest in these asymptotics, especially in the case where $a$ is even, arose in random matrix theory (see \cite{BE1} and the references therein). The determination of these asymptotics will be done in a forthcoming paper \cite{BE2}. Finally, Hankel matrices defined by the moments of a function given on a line segment play an important role in orthogonal polynomial theory and again in random matrix theory. We refer the reader to \cite{Sz} for orthogonal polynomial connections and to \cite{M} for a general account of random matrix theory. In section five we prove two results for the asymptotics of the determinants of the Hankel moment matrices. These results allow the function $b$ to have jump discontinuities, but require the function to be even. The paper is organized as follows. Sections 2, 3, and 4 contain all the linear algebra type results which prove the exact identities for the various types of matrices and are self-contained. The asymptotic results are contained in section 5 and use the results of the previous sections and some already known results for Toeplitz matrices. \section{Hankel determinants versus symmetric Toeplitz + Hankel determinants} We begin with a preliminary result which will allow us to show the relationship with symmetric Toeplitz plus Hankel matrices and the Hankel moment matrices. \begin{proposition}\label{p1.1} Let $\{a_n\}_{n=-\infty}^\infty$ be a sequence of complex numbers such that $a_n=a_{-n}$ and let $\{b_n\}_{n=1}^\infty$ be a sequence defined by \begin{eqnarray}\label{f.b} b_n &=& \sum_{k=0}^{n-1} {n-1\choose k}(a_{1-n+2k}+a_{2-n+2k}). \end{eqnarray} Define the one-sided infinite matrices \begin{equation}\label{f.AB} A\;=\;\left(a_{j-k}+a_{j+k+1}\right)_{j,k=0}^\infty,\qquad B\;=\;\left(b_{j+k+1}\right)_{j,k=0}^\infty, \end{equation} and the upper triangular one-sided infinite matrix \begin{eqnarray}\label{f.D} D &=& \left(\begin{array}{cccc} \xi(0,0) & \xi(1,1) & \xi(2,2) & \dots \\ & \xi(1,0) & \xi(2,1) & \dots \\ && \xi(2,0) & \\ 0 &&& \ddots \end{array}\right) \quad\mbox{ where }\quad \xi(n,k)\;=\;{n\choose [\frac{k}{2}]}. \end{eqnarray} Then $B=D^TAD$. \end{proposition} \begin{proof} The assertion is equivalent to the statement that for all $n,m\ge0$ the following identity holds: \begin{eqnarray}\label{f1.b1} b_{n+m+1} &=& \sum_{j=0}^{n}\sum_{k=0}^{m} (a_{n-j-m+k}+a_{n-j+m-k+1})\xi(n,j)\xi(m,k), \end{eqnarray} where $b_{n+m+1}$ is given by \begin{eqnarray}\label{f1.b2} b_{n+m+1} &=& \sum_{r=0}^{n+m}{n+m\choose r}(a_{2r-n-m}+a_{2r-n-m+1}). \end{eqnarray} In order to prove this identity it is sufficient to prove that for each $s\geq0$ the terms $a_{s}=a_{-s}$ occur as many times in (\ref{f1.b1}) as in (\ref{f1.b2}). In fact, $a_s$ and $a_{-s}$ occurs in (\ref{f1.b1}) exactly $N_1+N_2+N_3$ times if $s\ge1$ and $N_1=N_2$ times if $s=0$, where \begin{eqnarray} N_1 &=& \sum_{\sumarr{0\le j\le n}{0\le k\le m}{j-k=n-m-s}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]} \;\;=\;\;\sum_{\sumarr{0\le j\le n}{m+1\le k\le 2m+1}{j+k=n+m-s+1}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]}, \nonumber\\ N_2 &=& \sum_{\sumarr{0\le j\le n}{0\le k\le m}{j-k=n-m+s}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]} \;\;=\;\;\sum_{\sumarr{n+1\le j\le 2n+1}{0\le k\le m}{j+k=n+m-s+1}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]}, \nonumber\\ N_3 &=& \sum_{\sumarr{0\le j\le n}{0\le k\le m}{j+k=n+m+1-s}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]}.\nonumber \end{eqnarray} In the expression for $N_1$ we have made a change of variables $k\mapsto 2m+1-k$ and in $N_2$ a change of variables $j\mapsto 2n+1-j$. Hence it follows that \begin{eqnarray} N_1+N_2+N_3 &=& \sum_{{j,k\ge0 \atop j+k=n+m+1-s}} {n\choose [\frac{j}{2}]}{m\choose [\frac{k}{2}]}.\nonumber \end{eqnarray} Moreover, $N_1=N_2=\frac{N_1+N_2+N_3}{2}$ for $s=0$ since then $N_3=0$. On the other hand, $a_s$ and $a_{-s}$ occurs in (\ref{f1.b2}) exactly $M_1+M_2$ times if $s\ge1$ and $M_1=M_2$ times if $s=0$, where $$ M_1\;\;=\;\; {n+m\choose[\frac{n+m+s}{2}]},\qquad M_2\;\;=\;\; {n+m\choose[\frac{n+m-s}{2}]}. $$ Thus we are done as soon as we have shown that $M_1+M_2=N_1+N_2+N_3$ for each $s\ge0$. We distinguish two cases. If $n+m+1-s$ is even, then we substitute $j\mapsto2j$, $k\mapsto 2k$, and $j\mapsto2j+1$, $k\mapsto2k+1$ in the above expression for $N_1+N_2+N_3$ and arrive at \begin{eqnarray} N_1+N_2+N_3 &=& \sum_{{j,k\ge0 \atop 2j+2k=n+m+1-s}} {n\choose j}{m\choose k} + \sum_{{j,k\ge0 \atop 2j+2k=n+m-1-s}} {n\choose j}{m\choose k}.\nonumber\\ &=& {n+m\choose\frac{n+m+1-s}{2}}+ {n+m\choose\frac{n+m-1-s}{2}} \;\;=\;\;M_1+M_2.\nonumber \end{eqnarray} If $n+m+1-s$ is odd, then we substitute $j\mapsto2j$, $k\mapsto 2k+1$, and $j\mapsto2j+1$, $k\mapsto2k$ in the expression for $N_1+N_2+N_3$ and obtain \begin{eqnarray} N_1+N_2+N_3 &=& 2\sum_{{j,k\ge0 \atop 2j+2k=n+m-s}} {n\choose j}{m\choose k}\nonumber\\ &=& 2{n+m\choose\frac{n+m-s}{2}} \;\;=\;\;M_1+M_2,\nonumber \end{eqnarray} which also completes the proof. \end{proof} \begin{theorem}\label{t1.2} Let $\{a_n\}_{n=-\infty}^\infty$ and $\{b_n\}_{n=1}^\infty$ fulfill the assumptions of the previous proposition. For $N\ge1$ define the matrices \begin{equation}\label{f.ABN} A_N\;=\;\left(a_{j-k}+a_{j+k+1}\right)_{j,k=0}^{N-1},\qquad B_N\;=\;\left(b_{j+k+1}\right)_{j,k=0}^{N-1}. \end{equation} Then $\det A_N=\det B_N$. \end{theorem} \begin{proof} $A_N$ and $B_N$ are the $N\times N$ sections of the infinite matrices $A$ and $B$ of the previous proposition. Let $D_N$ be the $N\times N$ sections of the infinite matrix $D$. Because of the triangular structure of $D$, it follows that $B_N=D_N^TA_ND_N$. Noting that the entries on the diagonal of $D$ are equal to $\xi(n,0)=1$, we obtain the desired assertion. \end{proof} The previous theorem shows the connection between the determinants of a symmetric Toeplitz + Hankel matrix on the one hand and a Hankel determinant on the other hand. We now express this relationship by using the standard notation for these matrices. \begin{theorem}\label{t1.3} Let $a\in L^1({\mathbb T})$ be an even function, and define $b\in L^1[-1,1]$ by \begin{eqnarray}\label{f1.10} b(\cos\theta) &=& a(e^{i\theta}) \sqrt{\frac{1+\cos\theta}{1-\cos\theta}}. \end{eqnarray} Then $\det(T_N(a)+H_N(a))=\det H_N[b]$. \end{theorem} \begin{proof} The moments of $b$ are given by \begin{eqnarray} b_n &=& \frac{1}{\pi}\int_{-1}^1b(x)(2x)^{n-1}\,dx\nonumber\\ &=& \frac{1}{\pi}\int_{0}^\pi a(e^{i\theta})(1+\cos\theta)(2\cos\theta)^{n-1}\, d\theta\nonumber\\ &=& \frac{1}{2\pi}\int_{-\pi}^{\pi} a(e^{i\theta})(1+e^{-i\theta})(e^{i\theta}+e^{-i\theta})^{n-1}\, d\theta\nonumber\\ &=& \frac{1}{2\pi}\int_{-\pi}^{\pi}a(e^{i\theta}) \left(\sum_{k=0}^{n-1}(e^{i(n-1-2k)\theta}+e^{i(n-2-2k)\theta}) {n-1\choose k}\right)\,d\theta\nonumber\\ &=& \sum_{k=0}^{n-1}{n-1\choose k}\left(a_{-n+1+2k}+a_{-n+2+2k}\right).\nonumber \end{eqnarray} Here we have made a change of variables $x=\cos\theta$ and written $(e^{i\theta}+e^{-i\theta})^{n-1}$ using the binomial formula. With regard to (\ref{f.b}) and Theorem \ref{t1.2} this completes the proof. \end{proof} In regard to relation (\ref{f1.10}) we remark that $b\in L^1[-1,1]$ in and only if $a(e^{i\theta})(1+\cos\theta)\in L^1({\mathbb T})$. Thus at this point we have shown that if $a$ and $b$ satisfy the relation (2.12), then $$\det H_N[b] = \det(T_N(a)+H_N(a)).$$ But actually more can be done in the case that the symbol $a$ satisfies a quarter wave symmetry property. Then, in fact, certain Hankel moment determinants can be written as Toeplitz determinants. The symbol $b(x)\in L^1[-1,1]$ of these Hankel determinants is of the form \begin{eqnarray}\label{f1.11} b(x)&=& b_0(x)\sqrt{\frac{1+x}{1-x}} \end{eqnarray} where $b_0(-x)=b_0(x)$ for all $x\in[-1,1]$. We first begin with the following auxiliary result. In what follows, let $W_N$ stand for the matrix acting on ${\mathbb C}^N$ by $$ W_N:(x_0,x_1,\dots,x_{N-1})\mapsto (x_{N-1},\dots,x_1,x_0), $$ and let $I_N$ denote the $N\times N$ identity matrix. \begin{proposition}\label{p1.4new} Let $a\in L^1({\mathbb T})$ and assume that $a(-t)=a(t^{-1})=a(t)$. Define \begin{eqnarray} d(e^{i\theta})&=& a(e^{i\theta/2}). \end{eqnarray} Then $\det(T_{N}(a)+H_N(a))=\det T_N(d)$. \end{proposition} \begin{proof} Note first that $d(t)$ is well defined since $a(t)=a(-t)$. Moreover, $a_{2n+1}=0$ and $a_{2n}=d_n$. By rearranging rows and columns of $T_N(a)+H_N(a)$ in an obvious way, it is easily seen that this matrix is similar to \begin{eqnarray} \left(\begin{array}{cc}\left(a_{2j-2k}\right)_{j,k=0}^{N_1-1}&0\\ 0& \left(a_{2j-2k}\right)_{j,k=0}^{N_2-1}\end{array}\right)+ \left(\begin{array}{cc}0&\left(a_{2j+2k+2}\right)_{j=0,\hspace{1.4ex}k=0}^{N_1-1,N_2-1}\\ \left(a_{2j+2k+2}\right)_{j=0,\hspace{1.4ex}k=0}^{N_2-1,N_1-1}&0\end{array}\right)\nonumber \end{eqnarray} where $N_1=\left[\frac{N+1}{2}\right]$ and $N_2=\left[\frac{N-1}{2}\right]$. This matrix equals $$\left(\begin{array}{cc} T_{N_1}(d)&H_{N_1,N_2}(d)\\H_{N_2,N_1}(d)&T_{N_2}(d)\end{array} \right),$$ where $H_{N_1,N_2}(d)$ and $H_{N_2,N_1}(d)$ are Hankel matrices of size $N_1\times N_2$ and $N_2\times N_1$, respectively. Multiplying the last matrix from the left and the right with the diagonal matrix ${\rm diag}\,(W_{N_1},I_{N_2})$ we obtain the matrix $T_N(d)$. Notice in this connection that $d_{n}=d_{-n}$ since $a(t^{-1})=a(t)$. \end{proof} \begin{corollary}\label{c1.5} Let $b\in L^1[-1,1]$ and suppose (\ref{f1.11}) holds with $b_0(-x)=b_0(x)$ for all $x\in[-1,1]$. Define the function \begin{eqnarray} d(e^{i\theta})&=&b_0(\cos\frac{\theta}{2}). \end{eqnarray} Then $\det H_N[b]=\det T_{N}(d)$. \end{corollary} \begin{proof} Since $b_0(x)=b_0(-x)$ it follows from definition (\ref{f1.10}) that $a(-t)=a(t^{-1})=a(t)$. Now we can apply Theorem \ref{t1.3} and Proposition \ref{p1.4new} in order to obtain the identity $\det H_N[b]=\det (T_N(a)+H_N(a))=\det T_N(d)$. \end{proof} Concerning the previous corollary, we wish to emphasize that the function $d$ is even, and hence the matrices $T_N(d)$ are symmetric. \section{Symmetric Toeplitz + Hankel determinants versus skewsymmetric Toeplitz determinants} The main result of this section has been established in \cite[Lemma 18]{Kra} and proved in \cite[Lemma 1]{Gor} and \cite[Proof of Thm.~7.1(a)]{Stem}. We give a slightly simplified and self-contained proof here. \begin{theorem}\label{t2.3} Let $\{a_n\}_{n=-\infty}^{\infty}$ be a sequence of complex numbers such that $a_{-n}=a_{n}$. Let $c_{n}$ be defined by \begin{eqnarray}\label{f.cn} c_{n} &=& \sum_{k=-n+1}^n a_{k}\quad\mbox{ for } n>0, \end{eqnarray} and put $c_0=0$ and $c_{-n}=-c_{n}$. Then $\det T_{2N}(c)=(\det(T_N(a)+H_N(a)))^2$. \end{theorem} \begin{proof} First of all we multiply the matrix $T_{2N}(c)$ from the left and right with ${\rm diag}\,(W_N,I_N)$. We obtain the matrix $$ \left(\begin{array}{cc} T_N(\tilde{c})&H_N(\tilde{c})\\H_N(c)&T_N(c)\end{array}\right) \;\;=\;\; \left(\begin{array}{cc}-T_N(c)&-H_N(c)\\H_N(c)&T_N(c)\end{array}\right) $$ by observing that $\tilde{c}=-c$. Next we claim that \begin{eqnarray} &&\hspace*{-5ex} \left(\begin{array}{cc}T_N(1-t)&0\{\mathbb T}_N(t)&I_N\end{array}\right) \left(\begin{array}{cc}-T_N(c)&-H_N(c)\\H_N(c)&T_N(c)\end{array}\right) \left(\begin{array}{cc}T_N(1-t^{-1})&T_N(t^{-1})\\0&I_N\end{array}\right)\nonumber\\ &=& \left(\begin{array}{cc}I_N&0\\0&T_N(1+t)\end{array}\right) \left(\begin{array}{cc}X_N&-T_N(a)-H_N(a)\{\mathbb T}_N(a)+H_N(a)&0\end{array}\right) \left(\begin{array}{cc}I_N&0\\0&T_N(1+t^{-1})\end{array}\right)\nonumber \end{eqnarray} with a certain matrix $X_N$. If we take the determinant of this equation, we obtain the desired determinant identity. In order to proof the above matrix identity it suffices to show that the following three equations hold: \begin{eqnarray} T_N(c)-T_N(t)T_N(c)T_N(t^{-1})+H_N(c)T_N(t^{-1})-T_N(t)H_N(c) \;\;=\;\;0,\hspace*{-24ex}&&\label{f2.I}\\[1ex] -T_N(t)T_N(c)T_N(1-t^{-1})+H_N(c)T_N(1-t^{-1}) &=& T_N(1+t)\left(T_N(a)+H_N(a)\right),\label{f2.II}\\[1ex] T_N(1-t)T_N(c)T_N(t^{-1})+T_N(1-t)H_N(c)&=& \left(T_N(a)+H_N(a)\right)T_N(1+t^{-1}).\qquad\label{f2.III} \end{eqnarray} Notice that (\ref{f2.III}) can be obtained from (\ref{f2.II}) by passing to the transpose. Moreover, by employing (\ref{f2.I}) equation (\ref{f2.II}) reduces to \begin{eqnarray} T_N(1-t)\left(T_N(c)+H_N(c)\right) &=& T_N(1+t)\left(T_N(a)+H_N(a)\right).\label{f2.IV} \end{eqnarray} Let us first prove (\ref{f2.I}). We introduce the $N\times1$ column vectors $e_0=(1,0,0,\dots,0)^T$ and $\gamma_N=(0,c_1,c_2,\dots,c_{N-1})^T$. Then $$ T_N(c)-T_N(t)T_N(c)T_N(t^{-1}) \;\;=\;\; \gamma_Ne_0 ^T-e_0\gamma_N^T \;\;=\;\; T_N(t)H_N(c)-H_N(c)T_N(t^{-1}), $$ whence indeed (\ref{f2.I}) follows. Next we remark that from the definition of the sequences $\{a_n\}_{n=-\infty}^\infty$ and $\{c_n\}_{n=-\infty}^\infty$ it follows that $c_n-c_{n-1}=a_n+a_{n-1}$ for all $n\in{\mathbb Z}$. Introducing the column vectors $\hat{\gamma}_N=(c_1,\dots,c_N)^T$, $\alpha_N=(a_0,\dots,a_{N-1})^T$ and $\hat{\alpha}_{N}=(a_1,\dots,a_N)^T$, it can be readily verified that \begin{eqnarray} T_N(1-t)T_N(c) &=& \left(c_{j-k}-c_{j-k-1}\right)_{j,k=0}^{N-1} -e_0\hat{\gamma}_N^T,\nonumber\\ T_N(1+t)T_N(a) &=& \left(a_{j-k}+a_{j-k-1}\right)_{j,k=0}^{N-1} -e_0\hat{\alpha}_N^T,\nonumber\\ T_N(1-t)H_N(c) &=& \left(c_{j+k+1}-c_{j+k}\right)_{j,k=0}^{N-1} +e_0\gamma_N^T,\nonumber\\ T_N(1+t)H_N(a) &=& \left(a_{j+k+1}+a_{j+k}\right)_{j,k=0}^{N-1} -e_0\alpha_N^T.\nonumber \end{eqnarray} Using the above relation $c_n-c_{n-1}=a_n+a_{n-1}$, it follows that \begin{eqnarray} T_N(1-t)T_N(c)-T_N(1+t)T_N(a) &=& -e_0\hat{\gamma}^T_N+e_0\hat{\alpha}^T_N\nonumber\\ T_N(1+t)H_N(a)-T_N(1-t)H_N(c) &=&-e_0\alpha^T_N-e_0\gamma^T_N\nonumber. \end{eqnarray} Since $\hat{\gamma}_N-\gamma_N=\hat{\alpha}_N+\alpha_N$ by the same relation, this implies equation (\ref{f2.IV}). \end{proof} The results of this theorem are not easy to rephrase by using the classical notation for Toeplitz and Hankel matrices. Consider, for instance, the simplest case where $a(t)\equiv1$. Then $c_n={\rm sign}(n)$ which are not the Fourier coefficients of an $L^1$-function. For more information on how one can nevertheless express the relationship between the symbols $a$ and $c$, and how the asymptotics for certain of the above determinants can be determined we refer to \cite{BE2}. \section{Hankel determinants versus skewsymmetric Toeplitz determinants} The results of the previous two sections allow us to establish an identity between Hankel determinants and determinants of skewsymmetric Toeplitz matrices. The next theorem is an additional needed ingredient for the identity. \begin{theorem} Let $\{c_n\}_{n=-\infty}^{\infty}$ be a sequence of complex numbers such that $c_{-n}=-c_n$ for all $n\in{\mathbb Z}$. Define numbers $\{b_n\}_{n=1}^\infty$ by \begin{eqnarray}\label{f.bn-cn} b_n &=& \sum_{k=0}^{\left[\frac{n}{2}\right]} \left\{{n-1 \choose k}-{n-1 \choose k-1}\right\}c_{n-2k}. \end{eqnarray} Moreover, define the matrices $$ B_N=\left(b_{j+k+1}\right)_{j,k=0}^{N-1},\qquad C_{2N}=\left(c_{j-k}\right)_{j,k=0}^{2N-1}. $$ Then $\det C_{2N}=(\det B_N)^2$. \end{theorem} \begin{proof} In formula (\ref{f.cn}) the numbers $c_n$ are defined in terms of the numbers $a_{-n+1},\dots,a_{n}$. By a simple inspection of this formula, it is easy to see that for any given sequence $\{c_n\}_{n=-\infty}^\infty$ there exists a sequence $\{a_n\}_{n=-\infty}^\infty$ such that (\ref{f.cn}) and $a_{n}=a_{-n}$ holds for all positive $n$. Now let us define the numbers $b_n$ not by (\ref{f.bn-cn}) but by (\ref{f.b}). Then with $B_{N}$ and $C_{2N}$ defined as above it follows from Theorem \ref{t1.2} and Theorem \ref{t2.3} that $\det C_{2N}=(\det B_N)^2$. It remains to show that (\ref{f.bn-cn}) holds. Indeed, we have that \begin{eqnarray} \lefteqn{\sum_{k=0}^{\left[\frac{n}{2}\right]} \left\{{ n-1 \choose k}-{n-1 \choose k-1}\right\} c_{n-2k} \;\;=\;\; \sum_{k=0}^{\left[\frac{n}{2}\right]} \left\{{n-1 \choose k}-{n-1 \choose k-1}\right\} \sum_{j=-n+2k+1}^{n-2k} a_j} \hspace*{15ex}\nonumber\\ &=& \sum_{-n+2k+1\le j\le n-2k\atop0\le 2k\le n} \left\{{n-1 \choose k}-{n-1 \choose k-1}\right\}a_j\nonumber\\ &=& \sum_{j=-n+1}^{n} \sum_{k=0}^{\min\left\{\left[\frac{n-j}{2}\right], \left[\frac{n+j-1}{2}\right]\right\}} \left\{{n-1 \choose k}-{n-1 \choose k-1}\right\}a_j\nonumber\\ &=& \sum_{j=-n+1}^{n} {n-1\choose\min\left\{\left[\frac{n-j}{2}\right], \left[\frac{n+j-1}{2}\right]\right\}}a_j\nonumber\\ &=& \sum_{j=-n+1}^n{n-1\choose\left[\frac{n-j}{2}\right]}a_j \;\;=\;\; \sum_{k=0}^{n-1}{n-1\choose k}(a_{2k+1-n}+a_{2k+2-n}).\nonumber \end{eqnarray} By formula (\ref{f.b}) this is equal to $b_n$. \end{proof} We again express the above relationship in terms of the standard notation. \begin{theorem}\label{t3.2} Let $b\in L^1[-1,1]$ and define $c\in L^1({\mathbb T})$ by \begin{eqnarray}\label{f3.15} c(e^{i\theta})&=& i\,{\rm sign}(\theta)\,b(\cos\theta),\qquad -\pi<\theta<\pi. \end{eqnarray} Then $\det T_{2N}(c)=(\det H_N[b])^2$. \end{theorem} \begin{proof} Obviously, $c(e^{-i\theta})=-c(e^{i\theta})$. Hence $c_{-n}=-c_{n}$. It is sufficient to verify formula (\ref{f.bn-cn}) for the Fourier coefficients and moments. First of all, \begin{eqnarray} c_n &=& \frac{1}{\pi}\int_0^{\pi}b(\cos\theta)\sin(n\theta)\,d\theta. \nonumber\end{eqnarray} Hence \begin{eqnarray} b_n &=& \frac{1}{\pi}\int_0^{\pi}b(\cos\theta) \left(\sum_{k=0}^{\left[\frac{n}{2}\right]} \left\{{n-1 \choose k}-{n-1 \choose k-1}\right\}\sin((n-2k)\theta) \right)d\theta.\nonumber \end{eqnarray} The expression in the big braces equals (by a change of variables $k\mapsto n-k$ in the second part of the sum) \begin{eqnarray} \lefteqn{ \sum_{k=0}^{\left[\frac{n}{2}\right]}{n-1 \choose k}\sin((n-2k)\theta) -\sum_{k=n-\left[\frac{n}{2}\right]}^n {n-1 \choose n-k-1}\sin((2k-n)\theta) }\hspace{10ex} \nonumber\\ &=& \sum_{k=0}^{n-1} {n-1 \choose k}\sin((n-2k)\theta) \;\;=\;\; (2\cos\theta)^{n-1}\sin\theta.\nonumber \end{eqnarray} Hence \begin{eqnarray} b_n &=& \frac{1}{\pi}\int_0^\pi b(\cos\theta)(2\cos\theta)^{n-1}\sin\theta\,d\theta.\nonumber \end{eqnarray} Now it is easy to see that $b_n$ are the moments of the function $b$. \end{proof} Regarding relation (\ref{f3.15}) we remark that $c\in L^1({\mathbb T})$ if and only if $b(x)/\sqrt{1-x^2}\in L^1[-1,1]$. At this point we have three main identities for Hankel moment determinants, one which follows from Theorem 2.3, one which follows from Corollary 2.5 and finally one which follows from the previous theorem. If we desire to find the asymptotics of the determinants of the Hankel moment matrices it is clear that the corresponding asymptotics for Toeplitz matrices need to be derived. In particular, in light of Theorem 4.2 and formula (4.22), it is desirable to compute the asymptotics of the Toeplitz determinant $\det T_{2N}(c)$, where $c$ satisfies $c(e^{-i\theta})=-c(e^{i\theta})$ and accordingly implies that the Toeplitz matrices are skewsymmetric. Note from this it follows that $\det T_{2N+1}(c)=0$ for all $N$. However, this implies that a single asymptotic formula for the determinants, such as the one given in the classical Szeg\"{o} limit theorem, or the more general Fisher-Hartwig formulas would not make sense here. In the following section we nevertheless compute the asymptotics of such Toeplitz determinants in some cases and raise a conjecture about more general cases. \section{Asymptotics of certain skewsymmetric Toeplitz determinants and Hankel determinants} Our goal of this section is to consider Toeplitz determinants with generating function $c(e^{i\theta}) = \chi(e^{i\theta})a(e^{i\theta})$ where $a$ is an even functions and \begin{eqnarray} \chi(e^{i\theta}) &=& i\,{\rm sign}(\theta), \qquad -\pi<\theta<\pi. \end{eqnarray} Let $t_{\beta}(e^{i\theta})$ stand for the function \begin{eqnarray} t_{\beta}(e^{i\theta}) &=& e^{i\beta(\theta-\pi)}, \qquad 0<\theta<2\pi. \end{eqnarray} This function has a single jump at $t=1$ whose size is determined by the parameter $\beta$. In the following proposition we assume that $a$ is not necessarily an even function but satisfies instead a rotation symmetry condition. \begin{proposition}\label{p3.3} Assume that $a\in L^1({\mathbb T})$ satisfies the relation $a(-t)=a(t)$ for $t\in{\mathbb T}$. Define the functions \begin{eqnarray} d(e^{i\theta})=a(e^{i\theta/2}),\qquad d_1(e^{i\theta})=t_{-1/2}(e^{i\theta})d(e^{i\theta}),\qquad d_2(e^{i\theta})=t_{1/2}(e^{i\theta})d(e^{i\theta}).\nonumber \end{eqnarray} Then $\det T_{2N}(a)=(\det T_N(d))^2$ and $\det T_{2N}(\chi a)=\det T_{N}(d_1)\det T_{N}(d_2)$. \end{proposition} \begin{proof} {}From the assumptions $a(t)=a(-t)$ it follows that the Fourier coefficients $a_{2n+1}$ are zero. Hence $T_{2N}(a)$ has a checkered pattern, and rearranging rows and columns it is easily seen that $T_{2N}(a)$ is similar to the matrix ${\rm diag}\,(T_N(d),T_N(d))$. The Fourier coefficients $c_{2n}$ of $c(t)=\chi(t)a(t)$ are equal to zero. By rearranging the rows and columns of $T_{2N}(\chi a)$ in the same way as above it becomes apparent that $T_{2N}(\chi a)$ is similar to a matrix $$ \left(\begin{array}{cc}0&D_2\\D_1&0\end{array}\right)\quad\mbox{ where } D_{1}=\left(c_{2(j-k)+1}\right)_{j,k=0}^{N-1}\mbox{ and } D_{2}=\left(c_{2(j-k)-1}\right)_{j,k=0}^{N-1}. $$ {}From the identity \begin{eqnarray}\label{f4.23} \chi(e^{i\theta}) \;\;=\;\; t_{-1/2}(e^{i\theta})t_{1/2}(e^{i(\theta-\pi)}) \;\;=\;\; -t_{1/2}(e^{i\theta})t_{-1/2}(e^{i(\theta-\pi)}) \end{eqnarray} it follows that $d_1(e^{i\theta})=e^{-i\theta/2}c(e^{i\theta/2})$ and $d_2(e^{i\theta})=-e^{i\theta/2}c(e^{i\theta/2})$. Hence $D_1=T_{N}(d_1)$ and $D_2=-T_N(d_2)$. Since $\det T_{2N}(c)=(-1)^N \det D_1 \det D_2$, this completes the proof. \end{proof} Hence we have reduced the computation of $\det T_{2N}(\chi a)$ to the Toeplitz determinants $T_N(d_1)$ and $T_N(d_2)$, for which in the case of piecewise continuous functions it is possible to apply the Fisher-Hartwig conjecture under certain assumptions. The following result, which is taken from \cite{E}, makes this explicit. Therein $G(\cdot)$ is the Barnes $G$-function \cite{WW}, $d_{0,\pm}$ are the Wiener-Hopf factors of the function $d_0$, \begin{eqnarray} d_{0,\pm}(e^{i\theta})&=&\exp\left(\sum_{k=1}^\infty [\log d_0]_{\pm k}e^{\pm ik\theta}\right), \end{eqnarray} and \begin{eqnarray} d_\pm(e^{i\theta})&=& d_{0,\pm}(e^{i\theta})\prod_{r=1}^R \Big(1-e^{\pm i(\theta-\theta_r)}\Big)^{\pm\beta_r} \end{eqnarray} are the generalized Wiener-Hopf factors of $d$. \begin{proposition} Let \begin{eqnarray}\label{f4.25} d(e^{i\theta}) &=& d_0(e^{i\theta})\prod_{r=1}^R t_{\beta_r}(e^{i(\theta-\theta_r)}), \end{eqnarray} where $d_0$ is an infinitely differentiable nonvanishing function with winding number zero, $\theta_1,\dots,\theta_R\in(0,2\pi)$ are distinct numbers, and $\beta_1,\dots,\beta_R$ are complex parameters satisfying $|{\mathbb R}e\beta_r|<1/2$ for all $r=1,\dots,R$. Then \begin{eqnarray}\label{f3.26} \frac{\det T_N(t_{-1/2}d)}{\det T_N(d)} &\sim& N^{-1/4}G(1/2)G(3/2)d_+(1)^{-1/2}d_-(1)^{1/2}, \qquad N\to\infty,\nonumber\\ \frac{\det T_N(t_{1/2}d)}{\det T_N(d)} &\sim& N^{-1/4}G(1/2)G(3/2)d_+(1)^{1/2}d_-(1)^{-1/2}, \qquad N\to\infty.\nonumber \end{eqnarray} Moreover, \begin{eqnarray} \det T_N(d) &\sim& F^NN^{\Omega}E,\qquad N\to\infty, \end{eqnarray} where $F=\exp\left(\frac{1}{2\pi}\int_{0}^{2\pi} \log d_0(e^{i\theta})\,d\theta\right)$, $\Omega=-\sum\limits_{r=1}^R\beta_r^2$, and $E$ is another constant. \end{proposition} (The constant $E$ is quite complicated, so in the interest of brevity, we omit the exact formula from this paper and refer to \cite{BT,BS,E} for an explicit representation.) The previous propositions yield the following results. We keep the same notation. \begin{corollary}\label{c4.3} Let $d$ be a function of the form (\ref{f4.25}) and assume that the same conditions as above are fulfilled. Let $a(e^{i\theta})=d(e^{2i\theta})$. Then \begin{eqnarray}\label{f4.29} \frac{\det T_{2N}(\chi a)}{\det T_{2N}(a)} &\sim& N^{-1/2}G^2(1/2)G^2(3/2),\qquad N\to\infty, \end{eqnarray} and \begin{eqnarray} \det T_{2N}(a) &\sim& F^{2N}N^{2\Omega}E^2,\qquad N\to\infty. \end{eqnarray} \end{corollary} The following corollary gives an asymptotic formula for the determinants of Hankel moment matrices in the special case where the symbol is even. \begin{corollary}\label{c4.4} Let $b \in L^1[-1,1]$ such that $b(-x)= b(x).$ Define $d(e^{i\theta}) = b(\cos(\theta/2))$ and suppose that $d$ is of the form (\ref{f4.25}). Then \begin{eqnarray} \det H_N[b] &\sim& F^N N^{\Omega -1/4}G(1/2)G(3/2)E,\qquad N\to\infty. \end{eqnarray} \end{corollary} \begin{proof} Define $a(e^{i\theta})=b(\cos\theta)$. Then Theorem \ref{t3.2} implies that $(\det H_N[b])^2=\det T_N(\chi a)$. Since $b(x)=b(-x)$ the function is well defined and $a(e^{i\theta})=d(e^{2i\theta})$. Now the formula follows from Corollary \ref{c4.3} and by taking square roots. \end{proof} The interesting point in Corollary \ref{c4.3} is that the asymptotic limit of (\ref{f4.29}) does not depend on the underlying function $a$. We remark that we have proved this limit relation for certain piecewise continuous functions $a$ subject to the condition $a(-t)=a(t)$. Our primary goal was however to determine the limit for certain functions $a$ satisfying the relation $a(t^{-1})=a(t)$. Our conjecture is that the asymptotic limit is given by the above expression in general also for those functions. In order to support this hypothesis we resort to the generalization of the Fisher-Hartwig conjecture, which has not yet been proved, but is strongly suggested by examples. Since $\det T_{2N+1}(\chi a)=0$ for all $N$ (under the assumption $a(t^{-1})=a(t)$), the asymptotics of $T_N(\chi a)$ can only be described by the generalized but not the original conjecture. The crucial observation is that one has several possibilities for representing $\chi a$ in a form like (\ref{f4.25}). Indeed, from (\ref{f4.23}) it follows that $$ \chi(e^{i\theta}) a(e^{i\theta}) \;\;=\;\; t_{-1/2}(e^{i\theta})t_{1/2}(e^{i(\theta-\pi)})a(e^{i\theta}) \;\;=\;\; -t_{1/2}(e^{i\theta})t_{-1/2}(e^{i(\theta-\pi)})a(e^{i\theta}), $$ tacitly assuming that $a$ admits also representation of the form (\ref{f4.25}) with appropriate properties. Then the generalized conjecture predicts \cite{BT,E} that \begin{eqnarray} \det T_{N}(\chi a) &\sim& \det T_N(t_{-1/2}(e^{i\theta}))\det T_N(t_{1/2}(e^{i(\theta-\pi)})) \det T_N(a) E_1\nonumber\\&& +(-1)^N\det T_N(t_{1/2}(e^{i\theta}))\det T_N(t_{-1/2}(e^{i(\theta-\pi)})) \det T_N(a) E_2,\nonumber \end{eqnarray} where $E_1$ and $E_2$ are the ``correlation'' constants \begin{eqnarray} E_1 &=& E(t_{-1/2}(e^{i\theta}),t_{1/2}(e^{i(\theta-\pi)})) E(t_{-1/2}(e^{i\theta}),a)E(t_{1/2}(e^{i(\theta-\pi)}),a) \nonumber\\ E_2 &=& E(t_{1/2}(e^{i\theta}),t_{-1/2}(e^{i(\theta-\pi)})) E(t_{1/2}(e^{i\theta}),a)E(t_{-1/2}(e^{i(\theta-\pi)}),a) \nonumber \end{eqnarray} with $E(\cdot,\cdot)$ defined by \begin{eqnarray} E(b,c) &=& \exp\left(\lim_{r\to1-0}\sum_{k=1}^\infty\Big( k[\log h_rb_+]_k[\log h_rc_-]_{-k}+ k[\log h_rb_-]_{-k}[\log h_rc_+]_k\Big)\right),\nonumber \end{eqnarray} $h_rb_\pm$ and $h_rc_\pm$ denoting the harmonic extensions of the Wiener-Hopf factors of $b_\pm$ and $c_\pm$. {}From all this it follows that \begin{eqnarray} \frac{\det T_{2N}(\chi a)}{\det T_{2N}(a)} &\sim& (2N)^{-1/2}G^2(1/2)G^2(3/2)(E_1+E_2),\nonumber \end{eqnarray} where a straightforward computation of the constants gives \begin{eqnarray} E_1 &=& 2^{-1/2}\left(\frac{a_+(-1)a_-(1)}{a_-(-1)a_+(1)}\right)^{1/2},\nonumber\\ E_2 &=& 2^{-1/2}\left(\frac{a_+(-1)a_-(1)}{a_-(-1)a_+(1)}\right)^{-1/2}.\nonumber \end{eqnarray} The assumption that $a(t^{-1})=a(t)$ implies that $a_-(t)=\gamma a_+(t^{-1})$ with a certain constant $\gamma\neq0$. Hence \begin{eqnarray} E_1=E_2=2^{-1/2},\nonumber \end{eqnarray} which leads to the conjecture that \begin{eqnarray} \frac{\det T_{2N}(\chi a)}{\det T_{2N}(a)} &\sim& N^{-1/2}G^2(1/2)G^2(3/2),\qquad N\to\infty. \end{eqnarray} Using Theorem \ref{t3.2} we arrive at a conjecture for the Hankel moment matrices: \begin{eqnarray} \frac{\det H_{N}[b]}{\sqrt{\det T_{2N}(a)}} &\sim& N^{-1/4}G(1/2)G(3/2),\qquad N\to\infty, \end{eqnarray} where $a(e^{i\theta})=b(\cos\theta)$. We remark that this formula is in accordance with Corollary \ref{c4.4}. We end this section by noting one other result that follows from our identities and Corollary \ref{c1.5}. This result applies to Hankel moment matrices with a special case of Jacobi weights and computes the asymptotics for $\det H_N[b]$ where $b$ is of the form $b_0(x)\sqrt{\frac{1+x}{1-x}}$ with an even function $b_0$. \begin{corollary} Suppose $b \in L^1[-1,1]$ is of the above form with an even function $b_0$. Let $d(e^{i\theta})= b_0(\cos(\theta/2))$ and suppose the $d$ is of the form (\ref{f4.25}). Then $$\det H_N[b] \sim F^N N^{\Omega}E ,\qquad N\to \infty.$$ \end{corollary} \end{document}
\begin{document} \thanks{Acknowledgements: The authors thank the Banff International Research Station (BIRS) and the Women in Numbers 4 (WIN4) workshop for the opportunity to initiate this collaboration. The first author is grateful for the support of National Science Foundation grant DMS-1449679, and the Simons Foundation.} \title{Quantum modular forms and singular combinatorial series with distinct roots of unity} \author{Amanda Folsom, Min-Joo Jang, Sam Kimport, and Holly Swisher} \maketitle \begin{abstract} Understanding the relationship between mock modular forms and quantum modular forms is a problem of current interest. Both mock and quantum modular forms exhibit modular-like transformation properties under suitable subgroups of $\textnormal{SL}_2(\mathbb Z)$, up to nontrivial error terms; however, their domains (the upper half-plane $\mathbb H$, and the rationals $\mathbb Q$, respectively) are notably different. Quantum modular forms, originally defined by Zagier in 2010, have also been shown to be related to the diverse areas of colored Jones polynomials, meromorphic Jacobi forms, partial theta functions, vertex algebras, and more. In this paper we study the $(n+1)$-variable combinatorial rank generating function $R_n(x_1,x_2,\dots,x_n;q)$ for $n$-marked Durfee symbols. These are $n+1$ dimensional multisums for $n>1$, and specialize to the ordinary two-variable partition rank generating function when $n=1$. The mock modular properties of $R_n$ when viewed as a function of $\tau\in\mathbb H$, with $q=e^{2\pi i \tau}$, for various $n$ and fixed parameters $x_1, x_2, \cdots, x_n$, have been studied in a series of papers. Namely, by Bringmann and Ono when $n=1$ and $x_1$ a root of unity; by Bringmann when $n=2$ and $x_1=x_2=1$; by Bringmann, Garvan, and Mahlburg for $n\geq 2$ and $x_1=x_2=\dots=x_n=1$; and by the first and third authors for $n\geq 2$ and the $x_j$ suitable roots of unity ($1\leq j \leq n$). The quantum modular properties of $R_1$ readily follow from existing results. Here, we focus our attention on the case $n\geq 2$, and prove for any $n\geq 2$ that the combinatorial generating function $R_n$ is a quantum modular form when viewed as a function of $x \in \mathbb Q$, where $q=e^{2\pi i x}$, and the $x_j$ are suitable distinct roots of unity. \end{abstract} \section{Introduction and Statement of results}\label{intro} \subsection{Background} A \emph{partition} of a positive integer $n$ is any non-increasing sum of positive integers that adds to $n$. Integer partitions and modular forms are beautifully and intricately linked, due to the fact that the generating function for the partition function $p(n):= \# \{\mbox{partitions of } n \}$, is related to Dedekind's eta function $\eta(\tau)$, a weight $\frac12$ modular form defined by \begin{align}\label{def_eta} \eta(\tau) := q^{\frac{1}{24}}\prod_{n=1}^\infty (1-q^n). \end{align} Namely, \begin{equation}\label{p-eta} 1 + \sum_{n=1}^\infty p(n)q^n = \frac{1}{(q;q)_{\infty}} = q^{\frac{1}{24}}\eta(\tau)^{-1}, \end{equation} where here and throughout this section $q:=e^{2\pi i \tau}$, $\tau \in \mathbb{H}:= \{x + i y \ | \ x \in \mathbb R, y \in \mathbb R^+\}$ the upper half of the complex plane, and the $q$-Pochhammer symbol is defined for $n\in\mathbb N_0\cup\{\infty\}$ by $$(a)_n=(a;q)_n:=\prod_{j=1}^n (1-aq^{j-1}).$$ In fact, the connections between partitions and modular forms go much deeper, and one example of this is given by the combinatorial rank function. Dyson \cite{Dyson} defined the {\em rank} of a partition to be its largest part minus its number of parts, and the \emph{partition rank function} is defined by \[ N(m,n) := \# \{\mbox{partitions of } n \mbox{ with rank equal to } m \}. \] For example, $N(7,-2)=2$, because precisely 2 of the 15 partitions of $n=7$ have rank equal to $-2$; these are $2+2+2+1$, and $3+1+1+1+1$. Partition rank functions have a rich history in the areas of combinatorics, $q$-hypergeometric series, number theory and modular forms. As one particularly notable example, Dyson conjectured that the rank could be used to combinatorially explain Ramanujan's famous partition congruences modulo 5 and 7; this conjecture was later proved by Atkin and Swinnerton-Dyer \cite{AtkinSD}. It is well-known that the associated two variable generating function for $N(m,n)$ may be expressed as a $q$-hypergeometric series \begin{align}\label{rankgenfn} \sum_{m=-\infty}^\infty \sum_{n=0}^\infty N(m,n) w^m q^n = \sum_{n=0}^\infty \frac{q^{n^2}}{(wq;q)_n(w^{-1}q;q)_n} =: R_1(w;q),\end{align} noting here that $N(m,0)=\delta_{m0}$, where $\delta_{ij}$ is the Kronecker delta function. Specializations in the $w$-variable of the rank generating function have been of particular interest in the area of modular forms. For example, when $w= 1$, we have that \begin{equation}\label{r1mock1} R_1(1;q) = 1+ \sum_{n=1}^\infty p(n) q^n = q^{\frac{1}{24}}\eta^{-1}(\tau) \end{equation} thus recovering \eqref{p-eta}, which shows that the generating function for $p(n)$ is (essentially\footnote{Here and throughout, as is standard in this subject for simplicity's sake, we may slightly abuse terminology and refer to a function as a modular form or other modular object when in reality it must first be multiplied by a suitable power of $q$ to transform appropriately. }) the reciprocal of a weight $\frac12$ modular form. If instead we let $w=-1$, then \begin{equation}\label{r1mock2} R_1(-1;q) = \sum_{n=0}^\infty \frac{q^{n^2}}{(-q;q)_n^2} =: f(q). \end{equation} The function $f(q)$ is not a modular form, but one of Ramanujan's original third order mock theta functions. Mock theta functions, and more generally mock modular forms and harmonic Maass forms have been major areas of study. In particular, understanding how Ramanujan's mock theta functions fit into the theory of modular forms was a question that persisted from Ramanujan's death in 1920 until the groundbreaking 2002 thesis of Zwegers \cite{Zwegers1}: we now know that Ramanujan's mock theta functions, a finite list of curious $q$-hypergeometric functions including $f(q)$, exhibit suitable modular transformation properties after they are \emph{completed} by the addition of certain nonholomorphic functions. In particular, Ramanujan's mock theta functions are examples of \emph{mock modular forms}, the holomorphic parts of \emph{harmonic Maass forms}. Briefly speaking, harmonic Maass forms, originally defined by Bruiner and Funke \cite{BF}, are nonholomorphic generalizations of ordinary modular forms that in addition to satisfying appropriate modular transformations, must be eigenfunctions of a certain weight $k$-Laplacian operator, and satisfy suitable growth conditions at cusps (see \cite{BFOR, BF, OnoCDM, ZagierB} for more). Given that specializing $R_1$ at $w=\pm 1$ yields two different modular objects, namely an ordinary modular form and a mock modular form as seen in \eqref{r1mock1} and \eqref{r1mock2}, it is natural to ask about the modular properties of $R_1$ at other values of $w$. Bringmann and Ono answered this question in \cite{BO}, and used the theory of harmonic Maass forms to prove that upon specialization of the parameter $w$ to complex roots of unity not equal to $1$, the rank generating function $R_1$ is also a mock modular form. (See also \cite{ZagierB} for related work.) \begin{theoremno}[\cite{BO} Theorem 1.1] If $0<a<c$, then $$q^{-\frac{\ell_c}{24}}R_1(\zeta_c^a;q^{\ell_c}) + \frac{i \sin\left(\frac{\pi a}{c}\right) \ell_c^{\frac{1}{2}}}{\sqrt{3}} \int_{-\overline{\tau}}^{i\infty} \frac{\Theta\left(\frac{a}{c};\ell_c \rho\right)}{\sqrt{-i(\tau + \rho)}} d\rho $$ is a harmonic Maass form of weight $\frac{1}{2}$ on $\Gamma_c$. \end{theoremno} \noindentndent Here, $\zeta_c^a := e^{\frac{2\pi ia}{c}}$ is a $c$-th root of unity, $\Theta\left(\frac{a}{c};\ell_c\tau\right)$ is a certain weight $3/2$ cusp form, $\ell_c:=\textnormal{lcm}(2c^2,24)$, and $\Gamma_c$ is a particular subgroup of $\textnormal{SL}_2(\mathbb Z)$. In this paper, as well as in prior work of two of the authors \cite{F-K}, we study the related problem of understanding the modular properties of certain combinatorial $q$-hypergeometric series arising from objects called $n$-marked Durfee symbols, originally defined by Andrews in his notable work \cite{Andrews}. To understand $n$-marked Durfee symbols, we first describe Durfee symbols. For each partition, the Durfee symbol catalogs the size of its Durfee square, as well as the length of the columns to the right as well as the length of the rows beneath the Durfee square. For example, below we have the partitions of $4$, followed by their Ferrers diagrams with any element belonging to their Durfee squares marked by a square $(\sqbullet)$, followed by their Durfee symbols. \[ \begin{array}{ccccc} 4 & 3+1 & 2+2 & 2+1+1 & 1+1+1+1 \\ \begin{array}{lllll} \sqbullet & \bullet & \bullet & \bullet \end{array} & \begin{array}{lll} \sqbullet & \bullet & \bullet \\ \bullet & & \end{array} & \begin{array}{ll} \sqbullet & \sqbullet \\ \sqbullet & \sqbullet \end{array} & \begin{array}{ll} \sqbullet & \bullet \\ \bullet & \\ \bullet & \end{array} & \begin{array}{l}\sqbullet \\ \bullet \\ \bullet \\ \bullet \end{array} \\ \color{magenta}pace{2mm} \left( \begin{array}{lll} 1 & 1 & 1 \\ & & \end{array} \right)_1 \color{magenta}pace{2mm} & \color{magenta}pace{2mm} \left( \begin{array}{ll} 1 & 1 \\ 1 & \end{array} \right)_1 \color{magenta}pace{2mm} & \color{magenta}pace{2mm} \left( \begin{array}{l} \\ \end{array} \right)_2 \color{magenta}pace{2mm} & \color{magenta}pace{2mm} \left( \begin{array}{ll} 1 & \\ 1 & 1 \end{array} \right)_1 \color{magenta}pace{2mm} & \color{magenta}pace{2mm} \left( \begin{array}{lll} & & \\ 1 & 1 & 1 \end{array} \right)_1 \color{magenta}pace{2mm}\\ \end{array} \] Andrews defined the {\em rank} of a Durfee symbol to be the length of the partition in the top row, minus the length of the partition in the bottom row. Notice that this gives Dyson's original rank of the associated partition. Andrews refined this idea by defining $n$-marked Durfee symbols, which use $n$ copies of the integers. For example, the following is a $3$-marked Durfee symbol of $55$, where $\alpha^j,\beta^j$ indicate the partitions in their respective columns. \[ \left( \begin{array}{cc|ccc|c} 4_3 & 4_3 & 3_2 & 3_2 & 2_2 & 2_1 \\ & 5_3 & & 3_2 & 2_2 & 2_1 \end{array} \right)_5 =: \left( \begin{array}{c|c|c} \alpha^3 & \alpha^2 & \alpha^1 \\ \beta^3 & \beta^2 & \beta^1 \end{array} \right)_5 \] Each $n$-marked Durfee symbol has $n$ ranks, one defined for each column. Let $\rm{len}(\pi)$ denote the length of a partition $\pi$. Then the $n$th rank is defined to be $\rm{len}(\alpha^n) - \rm{len}(\beta^n)$, and each $j$th rank for $1\leq j <n$ is defined by $\rm{len}(\alpha^j) - \rm{len}(\beta^j) -1$. Thus the above example has $3$rd rank equal to $1$, $2$nd rank equal to $0$, and $1$st rank equal to $-1$. Let $\mathcal{D}_n(m_1,m_2,\dots, m_n;r)$ denote the number of $n$-marked Durfee symbols arising from partitions of $r$ with $i$th rank equal to $m_i$. In \cite{Andrews}, Andrews showed that the $( n+1)$-variable rank generating function for Durfee symbols may be expressed in terms of certain $q$-hypergeometric series, analogous to (\ref{rankgenfn}). To describe this, for $n\geq 2$, define {\small{\begin{align}\label{rkorigdef} &R_n({\boldsymbol{x}};q) := \\ & \nonumber \mathop{\sum_{m_1 > 0}}_{m_2,\dots,m_n \geq 0} \!\!\!\!\!\!\!\! \frac{q^{(m_1 + m_2 + \dots + m_n)^2 + (m_1 + \dots + m_{n-1}) + (m_1 + \dots + m_{n-2}) + \dots + m_1}}{(x_1q;q)_{m_1} \!\left(\frac{q}{x_1};q\right)_{m_1} \!\!\!\!(x_2 q^{m_1};q)_{m_2 + 1} \!\!\left(\frac{q^{m_1}}{x_2};q\right)_{m_2+1} \!\!\!\!\!\!\!\!\!\!\cdots(x_n q^{m_1 + \dots + m_{n-1}};q)_{m_n+1} \!\!\left(\!\frac{q^{m_1 + \dots + m_{n-1}}}{x_n};q\!\right)_{\! m_n+1}},\end{align}}}where ${\boldsymbol{x}} = {\boldsymbol{x}}_n := (x_1,x_2,\dots,x_n).$ For $n=1$, the function $R_1(x;q)$ is defined as the $q$-hypergeometric series in (\ref{rankgenfn}). In what follows, for ease of notation, we may also write $R_1({\boldsymbol{x}};q)$ to denote $R_1(x;q)$, with the understanding that ${\boldsymbol{x}} := x$. In \cite{Andrews}, Andrews established the following result, generalizing (\ref{rankgenfn}). \begin{theoremno}[\cite{Andrews} Theorem 10] For $n\geq 1$ we have that \begin{align}\label{durfgenand1}\sum_{m_1,m_2,\dots,m_n = -\infty}^\infty \sum_{r=0}^\infty \mathcal{D}_n(m_1,m_2,\dots,m_n;r)x_1^{m_1}x_2^{m_2}\cdots x_n^{m_n}q^r = R_n({\boldsymbol{x}};q).\end{align} \end{theoremno} When $n=1$, one recovers Dyson's rank, that is, $\mathcal D_1(m_1;r)=N(m_1,r)$, so that \eqref{durfgenand1} reduces to \eqref{rankgenfn} in this case. The mock modularity of the associated two variable generating function $R_1(x_1;q)$ was established in \cite{BO} as described in the Theorem above. When $n=2$, the modular properties of $R_2(1,1;q)$ were originally studied by Bringmann in \cite{Bri1}, who showed that \[R_2(1,1;q) := \frac{1}{(q;q)_\infty}\sum_{m\neq 0} \frac{(-1)^{m-1}q^{3m(m+1)/2}}{(1-q^m)^2}\] is a \emph{quasimock theta function}. In \cite{BGM}, Bringmann, Garvan, and Mahlburg showed more generally that $R_{n}(1,1,\dots,1;q)$ is a quasimock theta function for $n\geq 2$. (See \cite{Bri1, BGM} for precise details of these statements.) In \cite{F-K}, two of the authors established the automorphic properties of $R_n\left({\boldsymbol{x}};q\right)$, for more arbitrary parameters ${\boldsymbol{x}} = (x_1,x_2,\dots,x_n)$, thereby treating families of $n$-marked Durfee rank functions with additional singularities beyond those of $R_n(1,1,\dots,1;q)$. We point out that the techniques of Andrews \cite{Andrews} and Bringmann \cite{Bri1} were not directly applicable in this setting due to the presence of such additional singularities. These singular combinatorial families are essentially mixed mock and quasimock modular forms. To precisely state a result from \cite{F-K} along these lines, we first introduce some notation, which we also use for the remainder of this paper. Namely, we consider functions evaluated at certain length $n$ vectors ${\boldsymbol{\zeta_n}}$ of roots of unity defined as follows (as in \cite{F-K}). In what follows, we let $n$ be a fixed integer satisfying $n\geq 2$. Suppose for $1\leq j \leq n$, $\alpha_j \in \mathbb Z$ and $\beta_j \in \mathbb N$, where $\beta_j \nmid \alpha_j, \beta_j \nmid 2\alpha_j$, and that $\frac{\alpha_{r}}{\beta_{r}} \pm \frac{\alpha_{s}}{\beta_{s}} \not\in\mathbb Z$ if $1\leq r\neq s \leq n$. Let \begin{align} \notag {\boldsymbol{\alpha_n}} &:= \Big( \frac{\alpha_{1}}{\beta_{1}},\frac{\alpha_{2}}{\beta_{2}},\dots,\frac{\alpha_{n}}{\beta_{n}} \Big) \in \mathbb Q^n \\ \label{zetavec} {\boldsymbol{\zeta_n}} &:=\big(\zeta_{\beta_{1}}^{\alpha_{1}},\zeta_{\beta_{2}}^{\alpha_{2}},\dots,\zeta_{\beta_{n}}^{\alpha_{n}}\big) \in \mathbb C^n. \end{align} \begin{remark} We point out that the dependence of the vector $\boldsymbol{\zeta_n}$ on $n$ is reflected only in the length of the vector, and not (necessarily) in the roots of unity that comprise its components. In particular, the vector components may be chosen to be $m$-th roots of unity for different values of $m$. \end{remark} \begin{remark} The conditions stated above for $\boldsymbol{\zeta_n}$, as given in \cite{F-K}, do not require $\gcd(\alpha_j, \beta_j) = 1$. Instead, they merely require that $\frac{\alpha_j}{\beta_j} \not\in \frac{1}{2}\mathbb Z$. Without loss of generality, we will assume here that $\gcd(\alpha_j, \beta_j) = 1$. Then, requiring that $\beta_j \nmid 2\alpha_j$ is the same as saying $\beta_j \neq 2$. \end{remark} In \cite{F-K}, the authors proved that (under the hypotheses for $\boldsymbol{\zeta_n}$ given above) the completed nonholomorphic function \begin{equation}\label{Ahat} \widehat{\mathcal A}(\boldsymbol{\zeta_n};q) = q^{-\frac{1}{24}}R_n(\boldsymbol{\zeta_n};q) + \mathcal A^-(\boldsymbol{\zeta_n};q) \end{equation} transforms like a modular form. Here the nonholomorphic part $\mathcal A^-$ is defined by \begin{equation}\label{def_A-} \mathcal A^-(\boldsymbol{\zeta_n};q) := \frac{1}{\eta(\tau)}\sum_{j=1}^{n} (\zeta_{2\beta_j}^{-3\alpha_j}-\zeta_{2\beta_j}^{-\alpha_j})\frac{\mathscr{R}_3^-\left(\frac{\alpha_j}{\beta_j},-2\tau;\tau\right)}{\Pi_{j}^\dag({\boldsymbol{\alpha_n}})}, \end{equation} where $\mathscr{R}_3$ is defined in \eqref{AminusDef}, and the constant $\Pi_j^{\dag}$ is defined in \cite[(4.2), with $n\mapsto j$ and $k\mapsto n$]{F-K}. Precisely, we have the following special case of a theorem established by two of the authors in \cite{F-K}. \begin{theoremno}[\cite{F-K} Theorem 1.1] If $n\geq 2$ is an integer, then $ \widehat{\mathcal A}\!\left( {\boldsymbol{\zeta_n}};q \right)$ is a nonholomorphic modular form of weight $1/2$ on $\Gamma_{n}$ with character $\chi_\gamma^{-1}$. \end{theoremno} Here, the subgroup $\Gamma_{n}\subseteq \textnormal{SL}_2(\mathbb Z)$ under which $\widehat{\mathcal A}({\boldsymbol{\zeta_n}};q)$ transforms is defined by \begin{align} \label{def_Gammangroup} \Gamma_{n}:=\bigcap_{j=1}^{n} \Gamma_0\left(2\beta_j^2\right)\cap \Gamma_1(2\beta_j), \end{align} and the Nebentypus character $\chi_\gamma$ is given in Lemma \ref{ETtrans}. \subsection{Quantum modular forms} In this paper, we study the quantum modular properties of the $(n+1)$-variable rank generating function for $n$-marked Durfee symbols $R_n({\boldsymbol{x}};q)$. Loosely speaking, a quantum modular form is similar to a mock modular form in that it exhibits a modular-like transformation with respect to the action of a suitable subgroup of $\textnormal{SL}_2(\mathbb Z)$; however, the domain of a quantum modular form is not the upper half-plan $\mathbb H$, but rather the set of rationals $\mathbb Q$ or an appropriate subset. The formal definition of a quantum modular form was originally introduced by Zagier in \cite{Zqmf} and has been slightly modified to allow for half-integral weights, subgroups of $\operatorname{SL_2}(\mathbb{Z})$, etc.\ (see \cite{BFOR}). \begin{defn} \label{qmf} A weight $k \in \frac{1}{2} \mathbb{Z}$ quantum modular form is a complex-valued function $f$ on $\mathbb{Q}$, such that for all $\gamma = \sm abcd \in \operatorname{SL_2}(\mathbb{Z})$, the functions $h_\gamma: \mathbb{Q} \setminus \gamma^{-1}(i\infty) \rightarrow \mathbb{C}$ defined by \begin{equation*} h_\gamma(x) := f(x)-\varepsilon^{-1}(\gamma) (cx+d)^{-k} f\left(\frac{ax+b}{cx+d}\right) \end{equation*} satisfy a ``suitable" property of continuity or analyticity in a subset of $\mathbb{R}$. \end{defn} \begin{remark} The complex numbers $\varepsilon(\gamma)$, which satisfy $|\varepsilon(\gamma)|=1$, are such as those appearing in the theory of half-integral weight modular forms. \end{remark} \begin{remark} We may modify Definition \ref{qmf} appropriately to allow transformations on subgroups of $\operatorname{SL_2}(\mathbb{Z})$. We may also restrict the domains of the functions $h_\gamma$ to be suitable subsets of $\mathbb{Q}$. \end{remark} The subject of quantum modular forms has been widely studied since the time of origin of the above definition. For example, quantum modular forms have been shown to be related to the diverse areas of Maass forms, Eichler integrals, partial theta functions, colored Jones polynomials, meromorphic Jacobi forms, and vertex algebras, among other things (see \cite{BFOR} and references therein). In particular, the notion of a quantum modular form is now known to have direct connection to Ramanujan's original definition of a mock theta function. Namely, in his last letter to Hardy, Ramanujan examined the asymptotic difference between mock theta and modular theta functions as $q$ tends towards roots of unity $\zeta$ radially within the unit disk (equivalently, as $\tau$ approaches rational numbers vertically in the upper half plane, with $q=e^{2\pi i \tau}, \tau \in \mathbb H$), and we now know that these radial limit differences are equal to special values of quantum modular forms at rational numbers (see \cite{BFOR, BR, FOR}). \subsection{Results} \label{sec_results} On one hand, exploring the quantum modular properties of the rank generating function for $n$-marked Durfee symbols $R_n$ in (\ref{durfgenand1}) is a natural problem given that two of the authors have established automorphic properties of this function on $\mathbb H$ (see \cite[Theorem 1.1]{F-K} above), that $\mathbb Q$ is a natural boundary to $\mathbb H$, and that there has been much progress made in understanding the relationship between quantum modular forms and mock modular forms recently \cite{BFOR}. Moreover, given that $R_n$ is a vast generalization of the two variable rank generating function in \eqref{rankgenfn} - both a combinatorial $q$-hypergeometric series and a mock modular form - understanding its automorphic properties in general is of interest. On the other hand, there is no reason to a priori expect $R_n$ to converge on $\mathbb Q$, let alone exhibit quantum modular properties there. Nevertheless, we establish quantum modular properties for the rank generating function for $n$-marked Durfee symbols $R_n$ in this paper. For the remainder of this paper, we use the notation $$\mathcal V_{n}(\tau) := \mathcal V({\boldsymbol{\zeta_n}};q),$$ where $\mathcal V$ may refer to any one of the functions $\widehat{\mathcal A}, \mathcal A^-, R_n,$ etc. Moreover, we will write \begin{equation}\label{rel_AR} \mathcal A_{n}(\tau) = q^{-\frac{1}{24}} R_n(\boldsymbol{\zeta_n};q) \end{equation} for the holomorphic part of $\widehat{\mathcal A}$; from \cite[Theorem 1.1]{F-K} above, we have that this function is a mock modular form of weight $1/2$ with character $\chi_\gamma^{-1}$ (see Lemma \ref{Chi_gammaForm}) for the group $\Gamma_n$ defined in (\ref{def_Gammangroup}). Here, we will show that $\mathcal{A}_n$ is also a quantum modular form, under the action of a subgroup $\Gamma_{\boldsymbol{\zeta_n}} \subseteq \Gamma_{n}$ defined in \eqref{eqn:GroupDefn}, with quantum set \begin{equation}\label{qSetDef} Q_{\boldsymbol{\zeta_n}} := \left\{\frac{h}{k}\in \mathbb Q\; \middle\vert\; \begin{aligned} & \ h\in\mathbb Z, k\in\mathbb N, \gcd(h,k) = 1, \ \beta_j \nmid k\ \forall\ 1\le j\le n,\\&\left\vert \frac{\alpha_j}{\beta_j}k - \left[\frac{\alpha_j}{\beta_j}k\right]\right\vert > \frac{1}{6}\ \forall\ 1\le j\le n\end{aligned} \right\},\end{equation} where $[x]$ is the closest integer to $x$. \begin{remark}\label{rmk:closest_int} For $x \in \frac12 + \mathbb Z$, different sources define $[x]$ to mean either $x-\frac12$ or $x+\frac12$. The definition of $Q_{\boldsymbol{\zeta_n}}$ involving $[ \cdot ]$ is well-defined for either of these conventions in the case of $x\in \frac12 + \mathbb Z,$ as $\vert x - [x]\vert = \frac{1}{2}$.\end{remark} To define the exact subgroup under which $\mathcal A_n$ transforms as a quantum automorphic object, we let \begin{equation}\label{def_ell}\ell = \ell(\boldsymbol{\zeta_n}):= \begin{cases} 6\left[\text{lcm}(\beta_1, \dots, \beta_{n})\right]^2 &\text{ if } 3 \nmid \beta_j \text{ for all } 1\leq j \leq n, \\ 2\left[\text{lcm}(\beta_1, \dots, \beta_{n})\right]^2 &\text{ if } 3 \mid \beta_j \text{ for some } 1\leq j \leq n, \end{cases}\end{equation} and let $S_\ell:=\left(\begin{smallmatrix}1 & 0 \\ \ell & 1 \end{smallmatrix}\right)$, $T:=\left(\begin{smallmatrix}1 & 1 \\ 0 & 1 \end{smallmatrix}\right)$. We then define the group generated by these two matrices as \begin{equation} \label{eqn:GroupDefn} \Gamma_{\boldsymbol{\zeta_n}}:= \langle S_\ell, T \rangle. \end{equation} We now state our first main result, which proves that $\mathcal A_n(x),$ and hence $e(-\frac{x}{24})R_n(\boldsymbol{\zeta_n};e(x))$ is a quantum modular form on $Q_{\boldsymbol{\zeta_n}}$ with respect to $\Gamma_{\boldsymbol{\zeta_n}}$. Here and throughout we let $e(x):=e^{2\pi ix}$. \begin{theorem}\label{thm_main_N0} Let $n \geq 2$. For all $\gamma = \left(\begin{smallmatrix}a & b \\ c & d \end{smallmatrix}\right) \in \Gamma_{\boldsymbol{\zeta_n}}$, and $x\in Q_{\boldsymbol{\zeta_n}}$, \[H_{n,\gamma}(x) := \mathcal{A}_n(x) - \chi_\gamma (c x+ d)^{-\frac12}\mathcal{A}_n(\gamma x) \] is defined, and extends to an analytic function in $x$ on $\mathbb{R} - \{\frac{-c}{d}\}$. In particular, for the matrix $S_\ell$, \begin{multline}\notag H_{n,S_\ell}(x) = \frac{\sqrt{3}}{2} \sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{\alpha_j} - \zeta_{2\beta_j}^{3\alpha_j})}{\displaystyle\Pi^\dag_j( {\boldsymbol{\alpha_n}})} \left[\sum_\pm \zeta_6^{\pm1}\int_{\frac{1}{\ell}}^{i\infty}\frac{g_{\pm\frac13+\frac12,-\frac{3\alpha_j}{\beta_j}+\frac12}(3\rho)}{\sqrt{-i(\rho+x)}}d\rho \right] \\ +\sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{-3\alpha_j} - \zeta_{2\beta_j}^{-\alpha_j})}{\displaystyle\Pi^\dag_j( {\boldsymbol{\alpha_n}})} (\ell x+1)^{-\frac12}\zeta_{24}^{-\ell}\mathcal{E}_1\left(\frac{\alpha_j}{\beta_j},\ell;x\right), \end{multline} where the weight $3/2$ theta functions $g_{a,b}$ are defined in \eqref{def_gab}, and $\mathcal E_1$ is defined in \eqref{def_mathcalE}. \end{theorem} \begin{remark} As mentioned above, the constants $\Pi^\dagger_j$ are defined in \cite[(4.2)]{F-K}. With the exception of replacing $n\mapsto j$ and $k\mapsto n$, we have preserved the notation for these constants from \cite{F-K}. \end{remark} \begin{remark} Our results apply to any $n\geq 2$, as the quantum modular properties in the case $n=1$ readily follow from existing results. Namely, proceeding as in the proof of Theorem \ref{qsProof}, one may determine a suitable quantum set for the normalized rank generating function in \cite[Theorem 1.1]{BO}. Using \cite[Theorem 1.1]{BO}, a short calculation shows that the error to modularity (with respect to the nontrivial generator of $\Gamma_c$) is a multiple of $$\int_{x}^{i\infty} \frac{\Theta(\frac{a}{c};\ell_c \rho)}{\sqrt{-i(\tau+\rho)}}d\rho$$ for some $x\in\mathbb Q$. When viewed as a function of $\tau$ in a subset of $\mathbb R$, this integral is analytic (e.g., see \cite{LZ, Zqmf}). One could also establish the quantum properties of a non-normalized version of $R_1$ by rewriting it in terms of the Appell-Lerch sum $A_3$, and proceeding as in the proof of Theorem \ref{thm_main_N0}. In this case, $R_1(\zeta_1;q)$ (where $\zeta_1=e(\alpha_1/\beta_1)$) converges on the quantum set $Q_{\zeta_1}$, where this set is defined by letting $n=1$ in \eqref{qSetDef}. The interested reader may also wish to consult \cite{CLR} for general results on quantum properties associated to mock modular forms. \end{remark} \begin{remark} In a forthcoming joint work \cite{FJKS}, we extend Theorem \ref{thm_main_N0} to hold for the more general vectors of roots of unity considered in \cite{F-K}, i.e., those with repeated entries. Allowing repeated roots of unity introduces additional singularities, and the modular completion of $R_n$ is significantly more complicated. This precludes us from proving the more general case in the same way as the restricted case we address here. \end{remark} \section{Preliminaries}\label{prelim} \subsection{Modular, mock modular and Jacobi forms} A special ordinary modular form we require is Dedekind's $\eta$-function, defined in (\ref{def_eta}). This function is well known to satisfy the following transformation law \cite{Rad}. \begin{lemma}\label{ETtrans} For $\gamma=\sm{a}{b}{c}{d} \in \textnormal{SL}_2(\mathbb Z)$, we have that \begin{align*} \eta\left(\gamma\tau\right) = \chi_\gamma(c\tau + d)^{\frac{1}{2}} \eta(\tau), \end{align*} where $$\chi_\gamma := \begin{cases} e\left(\frac{b}{24}\right), & \textnormal{ if } c=0, d=1, \\ \sqrt{-i} \ \omega_{d,c}^{-1}e\left(\frac{a+d}{24c}\right), & \textnormal{ if } c>0,\end{cases}$$ with $\omega_{d,c} := e(\frac12 s(d,c))$. Here the Dedekind sum $s(m,t)$ is given for integers $m$ and $t$ by $$s(m,t) := \sum_{j \!\!\!\mod t} \left(\!\!\left(\frac{j}{t}\right)\!\!\right)\left(\!\!\left(\frac{mj}{t}\right)\!\!\right),$$ where $((x)) := x-\lfloor x \rfloor - 1/2$ if $x\in \mathbb R\setminus \mathbb Z$, and $((x)):=0$ if $x\in \mathbb Z$. \end{lemma} The following gives a useful expression for $\chi_\gamma$ (see \cite[Ch. 4, Thm. 2]{Knopp}): \begin{equation}\label{Chi_gammaForm} \chi_\gamma = \left\{ \begin{array}{ll} \big(\frac{d}{|c|} \big)e\left(\frac{1}{24}\left( (a+d)c - bd(c^2-1) - 3c \right)\right) & \mbox{ if } c \equiv 1 \pmod{2}, \\ \big( \frac{c}{d} \big) e\left(\frac{1}{24}\left( (a+d)c - bd(c^2-1) + 3d - 3 - 3cd \right)\right) & \mbox{ if } d\equiv 1\pmod{2}, \end{array}\right. \end{equation} where $\big(\frac{\alpha}{\beta}\big)$ is the generalized Legendre symbol. We require two additional functions, namely the Jacobi theta function $\vartheta(u;\tau)$, an ordinary Jacobi form, and a nonholomorphic modular-like function $R(u;\tau)$ used by Zwegers in \cite{Zwegers1}. In what follows, we will also need certain transformation properties of these functions. \begin{proposition} \label{thetaTransform} For $u \in\mathbb C$ and $\tau\in\mathbb{H}$, define \begin{equation}\label{thetaDef}\vartheta(u;\tau) := \sum_{\nu\in\frac{1}{2} + \mathbb Z} e^{\pi i \nu^2\tau + 2\pi i \nu\left(u + \frac{1}{2}\right)}.\end{equation} Then $\vartheta$ satisfies \begin{enumerate} \item $\vartheta(u+1; \tau) = -\vartheta(u; \tau),$\\ \item $\vartheta(u + \tau; \tau) = -e^{-\pi i \tau - 2\pi i u}\vartheta(u; \tau),$\\ \item $\displaystyle \vartheta(u; \tau) = - i e^{\pi i \tau/4}e^{-\pi i u} \prod_{m=1}^\infty (1-e^{2\pi i m\tau})(1-e^{2\pi i u}e^{2\pi i \tau(m-1)})(1 - e^{-2\pi i u}e^{2\pi i m\tau}).$ \end{enumerate} \end{proposition} The nonholomorphic function $R(u;\tau)$ is defined in \cite{Zwegers1} by \[ R(u;\tau):=\sum_{\nu\in\frac12+\mathbb Z} \left\{\operatorname{sgn}(\nu)-E\left(\left(\nu+\frac{\operatorname{Im}(u)}{\operatorname{Im}(\tau)}\right)\sqrt{2\operatorname{Im}(\tau)}\right)\right\}(-1)^{\nu-\frac12}e^{-\pi i\nu^2\tau-2\pi i\nu u}, \] where \[ E(z):=2\int_0^ze^{-\pi t^2}dt. \] The function $R$ transforms like a (nonholomorphic) mock Jacobi form as follows. \begin{proposition}[Propositions 1.9 and 1.10, \cite{Zwegers1}]\label{Rtransform} The function $R$ satsifies the following transformation properties: \begin{enumerate} \item $R(u+1;\tau) = -R(u; \tau),$\\ \item $R(u; \tau) + e^{-2\pi i u - \pi i\tau}R(u+\tau; \tau) = 2e^{-\pi i u - \pi i \tau/4}$,\\ \item $R(u;\tau) = R(-u;\tau)$, \\ \item $R(u;\tau+1)=e^{-\frac{\pi i}{4}} R(u;\tau)$,\\ \item $\frac{1}{\sqrt{-i\tau}} e^{\pi i u^2/\tau} R\left(\frac{u}{\tau};-\frac{1}{\tau}\right)+R(u;\tau)=h(u;\tau),$ where the Mordell integral is defined by \begin{align}\label{def_hmordell} h(u;\tau):=\int_\mathbb R \frac{e^{\pi i\tau t^2-2\pi ut}}{\cosh \pi t} dt. \end{align} \end{enumerate} \end{proposition} Using the functions $\vartheta$ and $R$, Zwegers defined the nonholomorphic function \begin{align}\label{AminusDef} \mathscr R_3 (u, v;\tau) :=& \frac{i}{2} \sum_{j=0}^{2} e^{2\pi i j u} \vartheta(v + j\tau + 1; 3\tau) R(3u - v - j\tau - 1; 3\tau)\\ =& \frac{i}{2} \sum_{j=0}^{2} e^{2\pi i j u} \vartheta(v + j\tau; 3\tau) R(3u - v - j\tau; 3\tau),\nonumber \end{align} where the equality of the two expressions in \eqref{AminusDef} is justified by Proposition \ref{thetaTransform} and Proposition \ref{Rtransform}. This function is used to complete the level three Appell function (see \cite{Zwegers2} or \cite{BFOR}) \begin{align*} A_3(u, v; \tau) := e^{3\pi i u} \sum_{n\in\mathbb Z} \frac{(-1)^n q^{3n(n+1)/2}e^{2\pi i nv}}{1 - e^{2\pi i u}q^n},\end{align*} where $u,v \in \mathbb C$, as \begin{align*}\label{def_A3hat} \widehat{A}_3(u, v; \tau) := A_3(u, v;\tau) + \mathscr R_3(u, v;\tau). \end{align*} This completed function transforms like a (non-holmorphic) Jacobi form, and in particular satisfies the following elliptic transformation. \begin{theorem}[{\cite[Theorem 2.2]{Zwegers2}}]\label{completeAtransform} For $n_1, n_2, m_1, m_2\in\mathbb Z$, the completed level $3$ Appell function $\widehat{A}_3$ satisfies \[\widehat{A}_3(u + n_1\tau + m_1, v + n_2\tau + m_2; \tau) = (-1)^{n_1 + m_1}e^{2\pi i (u(3n_1 - n_2) - vn_1)}q^{3n_1^2/2 - n_1n_2}\widehat{A}_3(u, v; \tau).\] \end{theorem} The following relationship between the Appell series $A_3$ and the combinatorial series $R_n$ is proved in \cite{F-K}. \begin{props}[{\cite[Proposition 4.2]{F-K}}] Under the hypotheses given above on $\boldsymbol{\zeta_n}$, we have that \[ R_n(\boldsymbol{\zeta_n};q) =\frac{1}{(q)_\infty} \sum_{j=1}^n\left(\zeta_{2\beta_j}^{-3\alpha_j}-\zeta_{2\beta_j}^{-\alpha_j}\right)\frac{A_3\left(\frac{\alpha_j}{\beta_j},-2\tau;\tau\right)}{\Pi^\dag_j( {\boldsymbol{\alpha_n}})}. \] \end{props} We also note that \begin{align*} \widehat{\mathcal A}_n\left( \tau \right) = \frac{1}{\eta(\tau)}\sum_{j=1}^{n} (\zeta_{2\beta_j}^{-3\alpha_j}-\zeta_{2\beta_j}^{-\alpha_j})\frac{\widehat{A}_3\left(\frac{\alpha_j}{\beta_j},-2\tau;\tau\right)}{\Pi_{j}^\dag({\boldsymbol{\alpha_n}})}. \end{align*} In addition to working with the Appell sum $\widehat{A}_3$, we also make use of additional properties of the functions $h$ and $R$. In particular, Zwegers also showed how under certain hypotheses, these functions can be written in terms of integrals involving the weight $3/2$ modular forms $g_{a,b}(\tau)$, defined for $a,b\in\mathbb R$ and $\tau \in \mathbb H$ by \begin{align}\label{def_gab} g_{a,b}(\tau) := \sum_{\nu \in a + \mathbb Z} \nu e^{\pi i \nu^2\tau + 2\pi i \nu b}. \end{align} We will make use of the following results. \begin{proposition}[{\cite[Proposition 1.15 (1), (2), (4), (5)]{Zwegers1}}]\label{prop_Zg} The function $g_{a,b}$ satisfies: \begin{enumerate} \item[(1)] $g_{a+1,b}(\tau)= g_{a,b}(\tau)$, \\ \item[(2)] $g_{a,b+1}(\tau)= e^{2\pi ia} g_{a,b}(\tau)$,\\ \item[(3)] $g_{a,b}(\tau+1)= e^{-\pi ia(a+1)}g_{a,a+b+\frac12}(\tau)$,\\ \item[(4)] $g_{a,b}(-\frac{1}{\tau})= i e^{2\pi iab} (-i\tau)^{\frac32}g_{b,-a}(\tau)$. \end{enumerate} \end{proposition} \begin{theorem}[{\cite[Theorem 1.16 (2)]{Zwegers1}}]\label{thm_Zh2} Let $\tau \in \mathbb H$. For $a,b \in (-\frac12,\frac12)$, we have $$h(a\tau-b;\tau) = - e\left(\tfrac{a^2\tau}{2} - a(b+\tfrac12)\right) \int_{0}^{i\infty} \frac{g_{a+\frac12,b+\frac12}(\rho)}{\sqrt{-i(\rho+\tau)}}d\rho.$$ \end{theorem} \section{The quantum set}\label{quantumSet} We call a subset $S \subseteq \mathbb Q$ a {\em quantum set} for a function $F$ with respect to the group $G\subseteq \textnormal{SL}_2(\mathbb Z)$ if both $F(x)$ and $F(Mx)$ exist (are non-singular) for all $x\in S$ and $M\in G$. In this section, we will show that $Q_{\boldsymbol{\zeta_n}}$ as defined in \eqref{qSetDef} is a quantum set for $\mathcal{A}_n$ with respect to the group $\Gamma_{\boldsymbol{\zeta_n}}$. Recall that $Q_{\boldsymbol{\zeta_n}}$ is defined as \begin{align*} Q_{\boldsymbol{\zeta_n}} := \left\{\frac{h}{k}\in \mathbb Q\; \middle\vert\; \begin{aligned} & \ h\in\mathbb Z, k\in\mathbb N, \gcd(h,k) = 1, \ \beta_j \nmid k\ \forall\ 1\le j\le n,\\&\left\vert \frac{\alpha_j}{\beta_j}k - \left[\frac{\alpha_j}{\beta_j}k\right]\right\vert > \frac{1}{6}\ \forall\ 1\le j\le n\end{aligned} \right\},\end{align*} where $[x]$ is the closest integer to $x$ (see Remark \ref{rmk:closest_int}). Moreover, recall that the ``holomorphic part'' we consider (see \S \ref{sec_results}) is $\mathcal A_n(\tau) = q^{-\frac{1}{24}} R_n(\boldsymbol{\zeta_n}; q)$. To show that $Q_{\boldsymbol{\zeta_n}}$ is a quantum set for $\mathcal A_n(\tau)$, we must first show that the the multi-sum defining $R_n(\boldsymbol{\zeta_n}; \zeta_k^h)$ converges for $\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$. In what follows, as in the definition of $Q_{\boldsymbol{\zeta_n}}$, we take $h\in\mathbb Z$, $k\in\mathbb N$ such that $\gcd(h,k) = 1$. We start by addressing the restriction that for $\frac{h}{k}\in Q_{\boldsymbol{\zeta_n}}$, $\beta_j\nmid k$ for all $1 \le j \le n$. \begin{lemma} For $\frac{h}{k}\in \mathbb Q$, all summands of $R_n(\boldsymbol{\zeta_n}; \zeta_k^h)$ are finite if and only if $\beta_j \nmid k$ for all $1\le j \le n$.\end{lemma} \begin{proof} Examining the multi-sum $R_n(\boldsymbol{\zeta_n}; \zeta_k^h)$, we see that all terms are a power of $\zeta_k^h$ divided by a product of factors of the form $1 - \zeta_{\beta_j}^{\pm\alpha_j} \zeta_k^{hm}$ for some integer $m\ge 1$. Therefore, to have each summand be finite, it is enough to ensure that $1 - \zeta_{\beta_j}^{\pm\alpha_j} \zeta_k^{hm} \neq 0$ for all $m\ge 1$ and for all $1\le j \le n$. For ease of notation in this proof, we will omit the subscripts for $\alpha_j$ and $\beta_j$. If $1 - \zeta_{\beta}^{\pm\alpha} \zeta_k^{hm} = 0$ for some $m\in\mathbb N$, we have that \[\pm\frac{\alpha}{\beta} + \frac{hm}{k} \in\mathbb Z.\] Let $K = \textnormal{lcm}(\beta, k) = \beta\beta^\prime = kk^\prime$. Then $\pm\frac{\alpha}{\beta} + \frac{hm}{k} \not\in\mathbb Z$ is the same as $\pm\alpha\beta^\prime + hmk^\prime \not\in K\mathbb Z$. Since $K = kk^\prime$, if $k^\prime \nmid \alpha\beta^\prime$, this is always true and we do not have a singularity. However, since $K = \beta\beta^\prime = kk^\prime$, if $k^\prime \vert \alpha\beta^\prime$, then $\frac{\beta\beta^\prime}{k} \vert \alpha\beta^\prime$. This implies that $\beta \vert \alpha k$ and that $\beta \vert k$ since $\gcd(\alpha, \beta) = 1$. Therefore, if $\beta\nmid k$, it is always the case that $k^\prime\nmid \alpha\beta^\prime$, so for all $m\in\mathbb N$, \[\pm\frac{\alpha}{\beta} + \frac{hm}{k} \not\in\mathbb Z.\] \end{proof} Now that we have shown that all summands in $R_n(\boldsymbol{\zeta_n}; \zeta_k^h)$ are finite for $\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$, we will show that the sum converges. \begin{theorem}\label{qsProof} For $\boldsymbol{\zeta_n}$ as in \eqref{zetavec}, if $\frac{h}{k} \in Q_{\boldsymbol{\zeta_n}}$, then $R_n(\boldsymbol{\zeta_n}; \zeta_k^h)$ converges and can be evaluated as a finite sum. In particular, we have that: \begin{multline} \label{eqn_Rnconvsum} R_n(\boldsymbol{\zeta_n}; \zeta_k^h) = \prod_{j=1}^n \frac{1}{1 - ((1-x_j^k)(1-x_j^{-k}))^{-1}}\\ \times \!\!\!\!\! \sum_{\substack{0 < m_1\le k\\ 0 \le m_2, \dots, m_n < k}} \frac{\zeta_k^{h[(m_1 + m_2 + \dots + m_n)^2 + (m_1 + \dots + m_{n-1}) + (m_1 + \dots + m_{n-2}) + \dots + m_1]}}{(x_1\zeta_k^h;\zeta_k^h)_{m_1} \left(\frac{\zeta_k^h}{x_1};\zeta_k^h\right)_{m_1} (x_2 \zeta_k^{hm_1};\zeta_k^h)_{m_2 + 1} \left(\frac{\zeta_k^{hm_1}}{x_2};\zeta_k^h\right)_{m_2+1}} \\ \times \frac{1}{(x_3 \zeta_k^{h(m_1 + m_2)};\zeta_k^h)_{m_3 + 1}\!\!\left(\frac{\zeta_k^{h(m_1 + m_2)}}{x_3};\zeta_k^h\!\right)_{\!m_3 + 1} \!\!\!\!\!\!\!\!\!\!\cdots(x_n \zeta_k^{h(m_1 + \dots + m_{n-1})};\zeta_k^h)_{ m_n+1} \!\!\left(\!\frac{\zeta_k^{h(m_1 + \dots + m_{n-1})}}{x_n};\zeta_k^h\!\right)_{\! m_n+1} }, \end{multline} where $\boldsymbol{\zeta_n} = (x_1, x_2, \dots, x_n)$. \end{theorem} \begin{proof}[Proof of Theorem \ref{qsProof}] We start by taking $\frac{h}{k} \in Q_{\boldsymbol{\zeta_n}}$, and write $\zeta = \zeta_k^h$. For ease of notation, we will use $x_j$ to denote the $j$-th component in $\boldsymbol{\zeta_n}$, so $x_j = e^{2\pi i \alpha_j/\beta_j}$. Further, for clarity of argument, we will carry out the proof in the case of $n = 2$, with comments throughout about how the proof follows for $n > 2$. We have that \begin{align} \nonumber R_2((x_1,x_2); \zeta) =& \sum_{\substack{m_1 > 0\\ m_2\ge 0}} \frac{\zeta^{(m_1+m_2)^2 + m_1}}{(x_1\zeta;\zeta)_{m_1}(x_1^{-1}\zeta;\zeta)_{m_1}(x_2\zeta^{m_1};\zeta)_{m_2+1}(x_2^{-1}\zeta^{m_1};\zeta)_{m_2+1}}\\ \label{sumRearranged3} =&\sum_{M_1, M_2 \ge 0} \frac{1}{(1 - x_1^k)^{M_1} (1 - x_1^{-k})^{M_1} (1-x_2^k)^{M_2}(1-x_2^{-k})^{M_2}}\\ \label{sumRearranged2} &\times \sum_{\substack{0 < s_1 \le k\\ 0\le s_2 < k}}\frac{\zeta^{(s_1+s_2)^2+s_1}}{(x_1\zeta;\zeta)_{s_1}(x_1^{-1}\zeta;\zeta)_{s_1}(x_2\zeta^{s_1};\zeta)_{s_2+1}(x_2^{-1}\zeta^{s_1};\zeta)_{s_2+1}}, \end{align} where we have let $m_j = s_j + M_j k$ for $0 < s_1 \le k$, $0 \le s_2 < k$, and $M_j\in\mathbb N_0$, and have used the fact that \[ (x\zeta^r;\zeta)_{s+Mk} = (1 - x^k)^M \; (x\zeta^r; \zeta)_s ,\] which holds for any $M, r, s\in\mathbb N_0$. (We note that for $n > 2$, we proceed as above, additionally taking $0 \le s_j \le k-1$ for $j > 2$.) The second sum in \eqref{sumRearranged2} is a finite sum, as desired. For the first sum in (\ref{sumRearranged3}) we notice that we in fact have the product of two geometric series, each of the form \[\sum_{M_j\ge 0} \left(\frac{1}{ (1-x_j^k)( 1 - x_j^{-k})}\right)^{M_j}.\] By definition, we have $x_j = \cos\theta_j + i\sin\theta_j$ where $\theta_j = \frac{2\pi\alpha_j}{\beta_j}$. Therefore, this sum converges if and only if \begin{align*} \vert 1 - x_j^k\vert \vert 1 - x_j^{-k}\vert =2 - 2\cos(k\theta_j) > 1 \iff \cos(k\theta_j) < \frac{1}{2}. \end{align*} For $\cos(k\theta_j) < \frac{1}{2}$, it must be that $k\theta_j = r + 2\pi M$ where $-\pi < r \le \pi$, $\vert r \vert > \frac{\pi}{6}$, and $M \in\mathbb Z$. This is equivalent to saying \[\left\vert \frac{\alpha_j}{\beta_j}k - \left[\frac{\alpha_j}{\beta_j}k\right]\right\vert > \frac{1}{6}\ \ \forall\ 1\le j\le n,\] as in the definition of $Q_{\boldsymbol{\zeta_n}}$ in \eqref{qSetDef}. Therefore, we see that for $\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$, $R_2((x_1, x_2); \zeta)$ converges to the claimed expression in \eqref{eqn_Rnconvsum}. We note that by Abel's theorem, having shown convergence of $R_2((x_1, x_2); \zeta)$, we have that $R_2((x_1, x_2); q)$ converges to $R_2((x_1, x_2); \zeta)$ as $q\to\zeta$ radially within the unit disc. As noted, the above argument extends to $n > 2$. Letting $m_j = s_j + M_j k$ with $0 < s_1 \le k$ and $0 \le s_j < k$ for $j \ge 2$, rewriting as in \eqref{eqn_Rnconvsum}, and then summing the resulting geometric series gives the desired exact formula for $R_n(\boldsymbol{\zeta_n}; \zeta)$. \end{proof} To complete the argument that $Q_{\boldsymbol{\zeta_n}}$ is a quantum set for $R_n(\boldsymbol{\zeta_n}; \zeta)$ with respect to $\Gamma_{\boldsymbol{\zeta_n}}$, it remains to be seen that $R_n(\boldsymbol{\zeta_n}; \xi)$ converges, where $\xi = e^{2\pi i \gamma(\frac{h}{k})}$ for $\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$ and $\gamma\in \Gamma_{\boldsymbol{\zeta_n}}$, defined in \eqref{eqn:GroupDefn}. For the ease of the reader, we recall from \eqref{def_ell} and \eqref{eqn:GroupDefn} that \begin{align*} \Gamma_{\boldsymbol{\zeta_n}} := \left\langle \left(\begin{matrix} 1 & 1\\ 0 & 1\end{matrix}\right), \left(\begin{matrix} 1 & 0\\ \ell & 1\end{matrix}\right)\right\rangle,\end{align*} where \[\ell = \ell_{\beta} := \begin{cases} 6\left[\text{lcm}(\beta_1, \dots, \beta_{k})\right]^2 &\text{ if $\forall j$, $3\not\vert \beta_j$}\\ 2\left[\text{lcm}(\beta_1, \dots, \beta_{k})\right]^2 &\text{ if $\exists j$, $3 \vert \beta_j$.}\end{cases}\] The convergence of $R_n(\boldsymbol{\zeta_n}; \xi)$ is a direct consequence of the following lemma. \begin{lemma}\label{setClosed} The set $Q_{\boldsymbol{\zeta_n}}$ is closed under the action of $\Gamma_{\boldsymbol{\zeta_n}}$.\end{lemma} \begin{proof} Since $\Gamma_{\boldsymbol{\zeta_n}}$ is given as a set with two generators, it is enough to show that $Q_{\boldsymbol{\zeta_n}}$ is closed under action of each of those generators. Let $\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$. Then $\sm{1}{1}{0}{1}\frac{h}{k} = \frac{h + k}{k}$. Note that $\gcd(h+k, k) = \gcd(h,k) = 1$ and we already know that $k$ satisfies the conditions in the definition of $Q_{\boldsymbol{\zeta_n}}$. Therefore, $\sm{1}{1}{0}{1}\frac{h}{k}\inQ_{\boldsymbol{\zeta_n}}$. Under the action of $\sm{1}{0}{\ell}{1}$, we have \[\left(\begin{array}{cc}1 & 0 \\ \ell & 1\end{array}\right)\frac{h}{k} = \frac{h}{h\ell + k}.\] We first note that $\gcd(h, h\ell + k) = \gcd(h,k) = 1$, and $\beta_j \nmid (h\ell + k)$ as $\beta_j \vert \ell$ and $\beta_j \nmid k$. It remains to check that \[\left\vert \frac{\alpha_j}{\beta_j}(h\ell + k) - \left[\frac{\alpha_j}{\beta_j}(h\ell + k)\right]\right\vert > \frac{1}{6}\ \forall\ 1\le j\le n.\] We have that \begin{align}\nonumber \left\vert \frac{\alpha_j}{\beta_j} (h \ell + k) - \left[\frac{\alpha_j}{\beta_j}(h\ell + k)\right]\right\vert &= \left\vert \frac{\alpha_j h\ell}{\beta_j} + \frac{\alpha_j}{\beta_j}k - \left[\frac{\alpha_j h\ell}{\beta_j} + \frac{\alpha_j}{\beta_j}k\right]\right\vert\\ &= \left\vert\frac{\alpha_j}{\beta_j} k - \left[\frac{\alpha_j}{\beta_j} k\right]\right\vert > \frac{1}{6},\label{closestIntSimplification} \end{align} where we can simplify as in \eqref{closestIntSimplification} since, by definition of $\ell$, $\frac{\alpha_j\ell}{\beta_j} \in\mathbb Z$. Thus, $Q_{\boldsymbol{\zeta_n}}$ is closed under the action of $\Gamma_{\boldsymbol{\zeta_n}}$. \end{proof} \section{Proof of Theorem \ref{thm_main_N0}}\label{n0proof} We now prove Theorem \ref{thm_main_N0}. Our first goal is to establish that $H_{n,\gamma}$ is analytic in $x$ on $\mathbb{R} - \{\frac{-c}{d}\}$ for all $x\in Q_{\boldsymbol{\zeta_n}}$ and $\gamma = \left(\begin{smallmatrix}a & b \\ c & d \end{smallmatrix}\right) \in \Gamma_{\boldsymbol{\zeta_n}}$. As shown in Section \ref{quantumSet}, we have that $\mathcal A_n(x)$ and $\mathcal A_n(\gamma x)$ are defined for all $x\inQ_{\boldsymbol{\zeta_n}}$ and $\gamma\in \Gamma_{\boldsymbol{\zeta_n}}$. Note that it suffices to consider only the generators $S_\ell$ and $T$ of $\Gamma_{\boldsymbol{\zeta_n}}$, since $$H_{n,\gamma \gamma'}(\tau)= H_{n,\gamma'}(\tau) + \chi_{\gamma'}(C\tau+D)^{-\frac12} H_{n,\gamma}(\gamma'\tau)$$ for $\gamma = \left(\begin{smallmatrix}a & b \\ c & d \end{smallmatrix}\right)$ and $\gamma' = \left(\begin{smallmatrix}A & B \\ C & D \end{smallmatrix}\right)$. First, consider $\gamma = T$. Then by definition, $\chi_T=\zeta_{24}$, and so $H_{n,T}(x) = \mathcal A_n(x) - \zeta_{24} \mathcal A_n(x+1)$. When we map $x \mapsto x +1$, $q=e^{2\pi i x}$ remains invariant. Then since the definition of $R_{n}(x)$ in \eqref{rkorigdef} can be expressed as a series only involving integer powers of $q$, it is also invariant. Thus $$\mathcal A_n(x+1)=e^{\frac{ -2\pi i (x+1)}{24}}R_{n}(x) = \zeta_{24}^{-1}\mathcal A_n(x),$$ and so $H_{n,T}(x)=0$. We now consider the case $\gamma = S_\ell$. In this case using \eqref{Chi_gammaForm} we calculate that $\chi_{S_\ell}=\zeta_{24}^{-\ell}$. Thus, $$H_{n,S_\ell}(x) = \mathcal A_n(x) - \zeta_{24}^{-\ell}(\ell x +1)^{-\frac12} \mathcal A_n(S_\ell x).$$ From the modularity of $\widehat{\mathcal A}_n$ we have that $\widehat{\mathcal A}_n(x) = \zeta_{24}^{-\ell}(\ell x +1)^{-\frac12}\widehat{\mathcal A}_n(S_\ell x)$. Thus \eqref{Ahat} and \eqref{rel_AR} give that \begin{equation}\label{eq:HviaA-} H_{n,S_\ell}(x) =-\mathcal A_n ^-(x)+\zeta_{24}^{-\ell}(\ell x +1)^{-\frac12}\mathcal A_n^-(S_\ell x), \end{equation} where $\mathcal A_n^-$ is defined in \eqref{def_A-}. Using the Jacobi triple product identity from Proposition \ref{thetaTransform} item (3), we can simplify the theta functions to get that $\vartheta\left(-2\tau ;3\tau\right) = iq^{-\frac23}\eta(\tau)$, $\vartheta\left(-\tau ;3\tau\right) = iq^{-\frac16}\eta(\tau)$, and $\vartheta\left(0;3\tau\right) = 0$. Thus, \begin{align*} \mathscr{R}_3\left( \frac{\alpha_j}{\beta_j},-2\tau ; \tau \right) = - \frac12 q^{-\frac23} \eta(\tau) \sum_{\delta=0}^1 e\left(\frac{\alpha_j}{\beta_j} \delta \right) q^{\frac{\delta}{2}} R\left(\frac{3\alpha_j}{\beta_j} + (2-\delta)\tau ; 3\tau \right). \end{align*} Using Proposition \ref{Rtransform} item (2), we can rewrite $$R\left(\frac{3\alpha_j}{\beta_j} + 2\tau ; 3\tau \right) = 2e\left(\frac{3\alpha_j}{2\beta_j} \right) q^{\frac58} - e \left(\frac{3\alpha_j}{\beta_j} \right) q^{\frac12} R\left(\frac{3\alpha_j}{\beta_j} - \tau; 3\tau \right),$$ so that \begin{multline}\notag \sum_{\delta=0}^1 e\left(\frac{\alpha_j}{\beta_j} \delta \right) q^{\frac{\delta}{2}} R\left(\frac{3\alpha_j}{\beta_j} + (2-\delta)\tau ; 3\tau \right) = \\ 2e\left(\frac{3\alpha_j}{2\beta_j} \right) q^{\frac58} + e \left(\frac{2\alpha_j}{\beta_j} \right) q^{\frac12} \sum_{\pm} \pm e \left(\mp \frac{\alpha_j}{\beta_j} \right)R\left(\frac{3\alpha_j}{\beta_j} \pm \tau; 3\tau \right). \end{multline} Thus we see that \begin{multline}\label{eq:F-} \mathcal A_n^-(\tau) = -\frac12 \sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{-3\alpha_j} - \zeta_{2\beta_j}^{-\alpha_j})}{\displaystyle\Pi^\dag_{j} ( {\boldsymbol{\alpha_k}})} e \left(\frac{2\alpha_j}{\beta_j} \right) q^{-\frac16} \sum_{\pm} \pm e \left(\mp \frac{\alpha_j}{\beta_j} \right) R\left(\frac{3\alpha_j}{\beta_j} \pm \tau; 3\tau \right) \\ -q^{-\frac{1}{24}} \sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{-3\alpha_j} - \zeta_{2\beta_j}^{-\alpha_j})}{\displaystyle\Pi^\dag_{j} ( {\boldsymbol{\alpha_k}})} e \left(\frac{3\alpha_j}{2\beta_j} \right). \end{multline} Now to compute $\mathcal A_n^-(S_\ell \tau)$ we first define \begin{align*} F_{\alpha,\beta}(\tau):= q^{-\frac16} \sum_{\pm} \pm e \left(\mp \frac{\alpha}{\beta} \right) R\left(\frac{3\alpha}{\beta} \pm \tau; 3\tau \right). \end{align*} Then by \eqref{eq:HviaA-} and \eqref{eq:F-} we can write \begin{multline*} H_{n,S_\ell}(\tau) = \frac12 \sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{-3\alpha_j} - \zeta_{2\beta_j}^{-\alpha_j})}{\displaystyle\Pi^\dag_{j} ( {\boldsymbol{\alpha_k}})} e \left(\frac{2\alpha_j}{\beta_j} \right) \left[ F_{\alpha_j,\beta_j}(\tau) - \zeta_{24}^{-\ell}(\ell \tau +1)^{-\frac12}F_{\alpha_j,\beta_j}(S_\ell \tau) \right] \\ +\sum_{j=1}^{n}\frac{(\zeta_{2\beta_j}^{-3\alpha_j} - \zeta_{2\beta_j}^{-\alpha_j})}{\displaystyle\Pi^\dag_{j} ( {\boldsymbol{\alpha_k}})} (\ell\tau+1)^{-\frac12}\zeta_{24}^{-\ell}\mathcal E_1\left(\frac{\alpha_j}{\beta_j},\ell;\tau\right), \end{multline*} where \begin{equation}\label{def_mathcalE} \mathcal E_1\left(\frac{\alpha}{\beta},\ell;\tau\right):=(\ell\tau+1)^{\frac12} \zeta_{24}^\ell q^{-\frac{1}{24}}e\left(\frac32 \frac{\alpha}{\beta} \right) - e\left(\frac{-S_\ell\tau}{24} \right)e\left(\frac32 \frac{\alpha}{\beta} \right). \end{equation} Thus in order to prove that $H_{n,S_\ell}(x)$ is analytic on $\mathbb{R} - \{\frac{-1}{\ell}\}$ it suffices to show that for each $1\leq j \leq n$, \begin{align*} G_{\alpha_j,\beta_j }(\tau) := F_{\alpha_j,\beta_j}(\tau) - \zeta_{24}^{-\ell}(\ell \tau +1)^{-\frac12}F_{\alpha_j,\beta_j}(S_\ell \tau) \end{align*} is analytic on $\mathbb{R} - \{\frac{-1}{\ell}\}$. We establish this in Proposition \ref{prop_Habanalytic} below. \begin{proposition} \label{prop_Habanalytic} Fix $1\leq j \leq n$ and set $(\alpha, \beta) := (\alpha_j, \beta_j)$. With notation and hypotheses as above, we have that \begin{align*} G_{\alpha,\beta}(\tau) = \sqrt{3}\sum_{\pm}\mp e\left(\mp\frac16\right) \int_{\frac{1}{\ell}}^{i\infty}\frac{g_{\pm\frac13 + \frac12, \frac12-3\frac{\alpha}{\beta}}(3\rho)}{\sqrt{-i(\rho+\tau)}}d\rho, \end{align*} which is analytic on $\mathbb R - \left \{\frac{-1}{\ell}\right \}$. \end{proposition} \begin{proof} Fix $1\leq j \leq n$ and set $(\alpha, \beta) := (\alpha_j, \beta_j)$. Define $m := \left[\frac{3\alpha}{\beta} \right] \in \mathbb{Z}$, $r\in (-\frac12, \frac12)$ so that $\frac{3\alpha}{\beta} =m + r$. We note that $r\neq \pm \frac12$ since $\beta\neq 2$. Using Proposition \ref{Rtransform} (1), we have that \begin{equation}\label{eq:F_jtau} F_{\alpha,\beta}(\tau) = q^{-\frac16} \sum_{\pm} \pm e \left(\frac{\mp r}{3} \right) e \left( \frac{\mp m}{3} \right) (-1)^{m} R\left( \pm \tau + r; 3\tau \right). \end{equation} Letting $\tau_\ell := -\frac{1}{\tau} - \ell$ we have $S_\ell \tau = \frac{-1}{\tau_\ell}$. Using Proposition \ref{Rtransform} (5) with $u=\frac{r}{3} \tau_\ell \mp \frac13$ and $\tau \mapsto \frac{\tau_\ell}{3}$ we see that \begin{multline}\label{eq:h1} R\left(r \mp \frac{1}{\tau_\ell} ; \frac{-3}{\tau_\ell} \right) = \\ \sqrt{ \frac{-i\tau_\ell}{3} } \cdot e\left(-\frac12\left(\frac{r\tau_\ell}{3} \mp \frac13\right)^2\left(\frac{3}{\tau_\ell}\right)\right)\left[h\left(\frac{r\tau_\ell}{3} \mp \frac13; \frac{\tau_\ell}{3} \right) - R\left(\frac{r\tau_\ell}{3} \mp \frac13; \frac{\tau_\ell}{3} \right) \right]. \end{multline} Using Proposition \ref{Rtransform} parts (1) and (4) we see that $R\left(\frac{r\tau_\ell}{3} \mp \frac13; \frac{\tau_\ell}{3} \right) = \zeta_{24}^\ell R\left( \frac{-r}{3\tau} \mp \frac13 ; \frac{-1}{3\tau} \right)$. Then using Proposition \ref{Rtransform} (5) with $u=\mp \tau - r$ and $\tau \mapsto 3\tau$ we obtain that \[ R\left(\frac{r\tau_\ell}{3} \mp \frac13; \frac{\tau_\ell}{3} \right) = \zeta_{24}^\ell \sqrt{-i(3\tau)} \cdot e\left(\frac{-\left(\mp\tau - r\right)^2}{6\tau} \right) \left[h\left( \mp\tau - r; 3\tau\right) - R\left( \mp\tau - r ; 3\tau \right) \right], \] which together with \eqref{eq:F_jtau} and \eqref{eq:h1} gives \begin{multline} \notag F_{\alpha,\beta}(S_\ell \tau) = \\ e\left(\frac{1}{6\tau_\ell} \right) \sum_{\pm} \pm e \left(\frac{\mp r}{3} \right) e \left( \frac{\mp m}{3} \right) (-1)^{m} \sqrt{ \frac{-i\tau_\ell}{3} } \cdot e\left(-\frac12\left(\frac{r\tau_\ell}{3} \mp \frac13\right)^2\left(\frac{3}{\tau_\ell}\right)\right) \cdot \\ \left[h\left(\frac{r\tau_\ell}{3} \mp \frac13; \frac{\tau_\ell}{3} \right) - \zeta_{24}^\ell \sqrt{-i(3\tau)} \cdot e\left(\frac{-\left(\mp\tau - r\right)^2}{6\tau}\right) \left[h\left( \mp\tau - r; 3\tau\right) - R\left( \mp\tau - r ; 3\tau \right)\right] \right]. \end{multline} By the definition of $r$ and $\ell$ we have that $\frac{r^2\ell}{6} \in \mathbb{Z}$. Simplifying thus gives that \begin{multline*} F_{\alpha,\beta}(S_\ell \tau) = \sum_{\pm} \pm (-1)^{m} e\left( \frac{\mp m }{3}\right) e\left(\frac{r^2}{6\tau} \right) \sqrt{\frac{-i\tau_\ell}{3}} h\left( \frac{r\tau_\ell}{3} \mp \frac13 ; \frac{\tau_\ell}{3}\right) \\ - \sum_{\pm} \pm (-1)^{m} e\left( \frac{\mp m}{3}\right) e\left(\frac{\mp r}{3} \right) q^{-\frac16} \zeta_{24}^\ell (\ell\tau + 1)^{\frac12} \cdot h\left(\mp \tau - r ; 3\tau \right) \\ + \sum_{\pm} \pm (-1)^{m} e\left( \frac{\mp m}{3}\right) e\left(\frac{\mp r}{3} \right) q^{-\frac16} \zeta_{24}^\ell (\ell\tau + 1)^{\frac12} \cdot R\left(\mp \tau - r ; 3\tau \right), \end{multline*} and so using Proposition \ref{Rtransform} (3) and the fact that $h(u;\tau)=h(-u;\tau)$ which comes directly from the definition of $h$ in \eqref{def_hmordell}, we see that \begin{multline}\notag G_{\alpha,\beta}(\tau) = q^{-\frac16} \sum_{\pm}\pm (-1)^{m} e\left( \frac{\mp m}{3}\right) e\left(\frac{\mp r}{3} \right)h\left(\pm \tau + r ; 3\tau \right) \\ - \sum_{\pm}\pm (-1)^{m} e\left( \frac{\mp m}{3}\right) e\left(\frac{r^2}{6\tau}\right) \zeta_{24}^{-\ell} \sqrt{\frac{i}{3\tau}} \cdot h\left( \frac{r\tau_\ell}{3} \mp \frac13 ; \frac{\tau_\ell}{3}\right). \end{multline} We now use Theorem \ref{thm_Zh2} to convert the $h$ functions into integrals. Letting $a=\frac{\pm 1}{3}$, $b=-r$, and $\tau \mapsto 3\tau$ gives that \[ h\left(\pm \tau + r ; 3\tau \right) = -q^{\frac16} \zeta_6^{\mp 1} e\left(\frac{\pm r}{3} \right) \int_{0}^{i\infty} \frac{g_{\pm\frac13 + \frac12, \frac12 -r}(z) dz}{\sqrt{-i(z+3\tau)}}. \] Letting $a=r$, $b=\frac{\pm 1}{3}$, and $\tau \mapsto \frac{\tau_\ell}{3}$ gives that \[h\left( \frac{r\tau_\ell}{3} \mp \frac13 ; \frac{\tau_\ell}{3}\right) = -e\left(\frac{-r^2}{6\tau} \right) e\left(\frac{\mp r}{3} \right) e\left(\frac{-r}{2} \right) \int_{0}^{i\infty} \frac{g_{r + \frac12, \pm\frac13 + \frac12}(z) dz}{\sqrt{-i\left(z+\frac{\tau_\ell}{3} \right)}}.\] Thus \begin{multline}\notag G_{\alpha,\beta}(\tau) = -\sum_{\pm} \pm \zeta_6^{\mp1}(-1)^{m} e\left( \frac{\mp m}{3}\right) \int_{0}^{i\infty} \frac{g_{\pm\frac13 + \frac12, \frac12 -r}(z) dz}{\sqrt{-i(z+3\tau)}} \\ +\sum_{\pm} \pm \zeta_{24}^{-\ell}(-1)^{m} e\left( \frac{\mp m}{3}\right)e\left(\frac{\mp r}{3} \right) e\left(\frac{-r}{2} \right) \sqrt{\frac{i}{3\tau}} \int_{0}^{i\infty} \frac{g_{r+ \frac12, \pm\frac13 + \frac12}(z) dz}{\sqrt{-i\left(z+\frac{\tau_\ell}{3} \right)}}. \end{multline} By a simple change of variables (let $z=\frac{\ell}{3} - \frac{1}{z}$) we can write \begin{equation} \label{eq:int_convert} \int_{0}^{i\infty} \frac{g_{r + \frac12, \pm\frac13 + \frac12}(z) dz}{\sqrt{-i\left(z+\frac{\tau_\ell}{3} \right)}} = -\sqrt{-3\tau} \int_{\frac{3}{\ell}}^{0} \frac{g_{r + \frac12, \pm\frac13 + \frac12}\left(\frac{\ell}{3} - \frac{1}{z} \right) dz}{z^{\frac32}\sqrt{-i(z+3\tau)}}. \end{equation} Moreover, using Proposition \ref{prop_Zg} we can convert \begin{multline} \label{eq:g_convert} g_{r + \frac12, \pm\frac13 + \frac12}\left(\frac{\ell}{3} - \frac{1}{z} \right) = \zeta_{24} ^\ell \cdot g_{r-\frac12, \pm\frac13 + \frac12}\left( \frac{-1}{z}\right) \\ = -\zeta_{24} ^\ell e\left(\frac18 \right) e\left(\frac{\mp1}{6} \right) e\left(\frac{\pm r}{3} \right) e\left(\frac{r}{2} \right) z^{\frac32} \cdot g_{\pm\frac13 + \frac12, \frac12 - r}(z). \end{multline} Thus by \eqref{eq:int_convert} and \eqref{eq:g_convert} we have that \begin{align} G_{\alpha,\beta}(\tau) &=- \sum_{\pm} \pm \zeta_6^{\mp1}(-1)^{m} e\left( \frac{\mp m}{3}\right) \int_{0}^{i\infty} \frac{g_{\pm\frac13 + \frac12, \frac12 -r}(z) dz}{\sqrt{-i(z+3\tau)}} \notag \\ &\color{magenta}pace{.5in}- \sum_{\pm} \pm \zeta_6^{\mp1}(-1)^{m } e\left( \frac{\mp m}{3}\right) \int_{\frac{3}{\ell}}^{0}\frac{g_{\pm\frac13 + \frac12, \frac12 -r}(z) dz}{\sqrt{-i(z+3\tau)}} \notag \\ &= -\sum_{\pm} \pm \zeta_6^{\mp1}(-1)^{m} e\left( \frac{\mp m}{3}\right) \int_{\frac{3}{\ell}}^{i\infty}\frac{g_{\pm\frac13 + \frac12, \frac12 -r}(z) dz}{\sqrt{-i(z+3\tau)}}. \label{Hab_integral} \end{align} To complete the proof, one can deduce from Proposition \ref{prop_Zg} (2) that for $m\in\mathbb Z$, \begin{align*} g_{a,b}(\tau)=e(m a)g_{a,b-m}(\tau). \end{align*} Applying this to \eqref{Hab_integral} with a direct calculation gives us \[ G_{\alpha,\beta}(\tau) = \sqrt{3}\sum_{\pm}\mp e\left(\mp\frac16\right) \int_{\frac{1}{\ell}}^{i\infty}\frac{g_{\pm\frac13 + \frac12, \frac12-3\frac{\alpha}{\beta}}(3z)}{\sqrt{-i(z+\tau)}}dz, \] which is analytic on $\mathbb{R} - \{\frac{-1}{\ell}\}$ as desired. \end{proof} \section{Conclusion} We have proven that when we restrict to vectors $\boldsymbol{\zeta_n}$ which contain distinct roots of unity, the mock modular form $q^{-\frac{1}{24}} R_n(\boldsymbol{\zeta_n};q)$ is also a quantum modular form. To consider the more general case where we allow roots of unity in $\boldsymbol{\zeta_n}$ to repeat, the situation is significantly more complicated. In this setting, as shown in \cite{F-K}, the nonholomorphic completion of $q^{-\frac{1}{24}} R_n(\boldsymbol{\zeta_n};q)$ is not modular, but is instead a sum of two (nonholomorphic) modular forms of different weights. We will address this more general case in a forthcoming paper \cite{FJKS}. \end{document}
\begin{document} \begin{abstract} We study new statistics on permutations that are variations on the descent and the inversion statistics. In particular, we consider the \emph{alternating descent set} of a permutation $\sigma = \sigma_1\sigma_2\cdots\sigma_n$ defined as the set of indices $i$ such that either $i$ is odd and $\sigma_i > \sigma_{i+1}$, or $i$ is even and $\sigma_i < \sigma_{i+1}$. We show that this statistic is equidistributed with the \emph{$3$-descent set} statistic on permutations $\tilde{\sigma} = \sigma_1\sigma_2\cdots\sigma_{n+1}$ with $\sigma_1=1$, defined to be the set of indices $i$ such that the triple $\sigma_i \sigma_{i+1} \sigma_{i+2}$ forms an odd permutation of size $3$. We then introduce Mahonian inversion statistics corresponding to the two new variations of descents and show that the joint distributions of the resulting descent-inversion pairs are the same. We examine the generating functions involving \emph{alternating Eulerian polynomials}, defined by analogy with the classical Eulerian polynomials $\sum_{\sigma\in\mathfrak S_n} t^{\des(\sigma)+1}$ using alternating descents. For the alternating descent set statistic, we define the generating polynomial in two non-commutative variables by analogy with the $ab$-index of the Boolean algebra $B_n$, and make observations about it. By looking at the number of alternating inversions in alternating (down-up) permutations, we obtain a new $q$-analog of the Euler number $E_n$ and show how it emerges in a $q$-analog of an identity expressing $E_n$ as a weighted sum of Dyck paths. \end{abstract} \title{Variations on Descents and Inversions in Permutations} \section{Introduction}\label{intro} Specifying the descent set of a permutation can be thought of as giving information on how the elements are ordered locally, namely, which pairs of consecutive elements are ordered properly and which are not, the latter constituting the descents. The original idea that became the starting point of this research was to generalize descent sets to indicators of relative orders of $k$-tuples of consecutive elements, the next simplest case being $k=3$. In this case there are $6$ possible relative orders, and thus the analog of the descent set enumerator $\Psi_n(\aaa,\bbb)$, also known as the $ab$-index of the Boolean algebra $B_n$, would involve $6$ non-commuting variables. In order to defer overcomplication, to keep the number of variables at $2$, and to stay close to classical permutation statistics, we can divide triples of consecutive elements into merely ``proper'' or ``improper'', defined as having the relative order of an even or an odd permutation of size $3$, respectively. We call the improper triples \emph{$3$-descents}, and denote the set of positions at which $3$-descents occur in a permutation $\sigma$ by $D_3(\sigma)$. Computing the number of permutations with a given $3$-descent set $S$ yields a few immediate observations. For example, the number of permutations $\sigma\in\mathfrak S_n$ with~$D_3(\sigma)$ equal to a fixed subset $S\subseteq [n-2]$ is divisible by $n$. This fact becomes clear upon the realization that $D_3(\sigma)$ is preserved when the elements of $\sigma$ are cyclically shifted, so that $1$ becomes $2$, $2$ becomes $3$, and so on. As a result, it makes sense to focus on the set $\tilde{\mathfrak S}_n$ of permutations of $[n]$ with the first element equal to $1$. A second, less trivial observation arising from early calculations is that the number of permutations in $\tilde{\mathfrak S}_n$ whose $3$-descent set is empty is the Euler number $E_{n-1}$. This second observation follows from the equidistribution of the statistic $D_3$ on the set $\tilde{\mathfrak S}_{n+1}$ with another variation on the descent set statistic, this time on $\mathfrak S_n$, which we call the \emph{alternating descent set} (Theorem \ref{bijection_alt3}). It is defined as the set of positions $i$ at which the permutation has an \emph{alternating descent}, which is a regular descent if $i$ is odd or an ascent if $i$ is even. Thus the alternating descent set $\hat{D}(\sigma)$ of a permutation $\sigma$ is the set of places where $\sigma$ deviates from the alternating pattern. Many of the results in this paper that were originally motivated by the generalized descent statistic $d_3(\sigma) = |D_3(\sigma)|$ are actually given in terms of the alternating descent statistic $\hat{d}(\sigma) = |\hat{D}(\sigma)|$. We show that the alternating Eulerian polynomials, defined as $\hat{A}_n(t) := \sum_{\sigma\in\mathfrak S_n} t^{\hat{d}(\sigma)+1}$ by analogy with the classical Eulerian polynomials, have the generating function $$ \sum_{n\geq 1} \hat{A}_n(t)\cdot {u^n\over n!} = {t\left(1-h\bigl({u(t-1)}\bigr)\right) \over h\bigl(u(t-1)\bigr)-t} $$ where $h(x) = \tan x + \sec x$, so that the difference with the classical formula (\ref{des_inv_gf}) below (specialized at $q=1$) is only in that the exponential function is replaced by tangent plus secant (Theorem \ref{final_F}). A similar parallel becomes apparent in our consideration of the analog of the well known identity \begin{equation}\label{eulerian_identity} {A_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} m^n t^m \end{equation} for $\hat{A}_n(t)$. Given a formal power series $f(x) = 1 + \sum_{n\geq 1} a_n x^n/n!$, we define the symmetric function $$ g_{f,n} := \sum_{\gamma\models n} {n\choose \gamma} \cdot a_{\gamma_1} a_{\gamma_2}\cdots \cdot M_\gamma, $$ where $\gamma$ runs over all compositions of $n$, and $$ M_\gamma := \sum_{i_1 < i_2 < \cdots} x_{i_1}^{\gamma_1} x_{i_2}^{\gamma_2}\cdots . $$ Then (\ref{eulerian_identity}) can be written as $$ {A_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} g_{\exp,n}(1^m)\cdot t^m, $$ and we have $$ {\hat{A}_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} g_{\tan+\sec,n}(1^m)\cdot t^m, $$ where $1^m$ denotes setting the variables $x_1$, $x_2$, \ldots, $x_m$ to $1$ and the remaining variables to $0$ (Proposition \ref{gfn_substitution}). In Section \ref{cd-stuff} we discuss the generating function $\hat{\Psi}(\aaa,\bbb)$ for the number of permutations in $\mathfrak S_n$ with a given alternating descent set $S\subseteq [n-1]$, denoted $\hat{\beta}_n(S)$, which is analogous to the generating polynomial $\Psi_n(\aaa,\bbb)$ for the regular descent set statistic mentioned earlier. The polynomial $\Psi_n(\aaa,\bbb)$ can be expressed as the $cd$-index $\Phi_n(\ccc,\ddd)$ of the Boolean algebra $B_n$, where $\ccc=\aaa+\bbb$ and $\ddd=\aaa\bbb+\bbb\aaa$. We show that $\hat{\Psi}_n$ can also be written in terms of~$\ccc$ and $\ddd$ as $\hat{\Phi}_n(\ccc,\ddd) = \Phi_n(\ccc,\ \ccc^2-\ddd)$ (Proposition \ref{hat_Phi}), and that the sum of absolute values of the coefficients of this $(\ccc,\ddd)$-polynomial, which is the evaluation $\Phi_n(1,2)$, is the $n$-th term of a notable combinatorial sequence counting permutations in $\mathfrak S_n$ with no consecutive descents and no descent at the end (Theorem \ref{sum_coefficients}). This sequence has properties relevant to this work; in particular, the logarithm of the corresponding exponential generating function is an odd function, which is a crucial property of both $e^x$ and $\tan x + \sec x$ that emerges repeatedly in the derivations of the results mentioned above. We discuss the similarities with Euler numbers and alternating permutations in Section \ref{SWG_permutations}. It is natural to wonder if the variations of descents introduced thus far can be accompanied by corresponding variations of inversions. For alternating descents it seems reasonable to consider \emph{alternating inversions} defined in a similar manner as pairs of indices $i<j$ such that either $i$ is odd and the elements in positions $i$ and $j$ form a regular inversion, or else $i$ is even and these two elements do \emph{not} form a regular inversion. As for $3$-descents, we define the accompanying \emph{$3$-inversion statistic}, where a $3$-inversion is defined as the number of pairs of indices $(i,j)$ such that $i+1<j$ and the elements in positions $i$, $i+1$, and $j$, taken in this order, constitute an odd permutation of size $3$. Let $\hat{\imath}(\sigma)$ and $i_3(\sigma)$ be the number of alternating inversions and $3$-inversions of a permutation $\sigma$, respectively. We find that the joint distribution of the pair $(\hat{d}, \hat{\imath})$ of statistics on the set $\mathfrak S_n$ is identical to the distribution of the pair~$(d_3,i_3)$ of statistics on the set $\tilde{\mathfrak S}_{n+1}$ (Theorem \ref{main_bivariate_identity}). Stanley \cite{StanleyBinomialPosets} derived a generating function for the joint distribution of the classical descent and inversion statistics on $\mathfrak S_n$: \begin{equation}\label{des_inv_gf} 1 + \sum_{n\geq 1} \sum_{\sigma\in\mathfrak S_n} t^{d(\sigma)} q^{\inv(\sigma)}\cdot {u^n\over [n]_q!} = {1-t\over \Exp_q\bigl(u(t-1)\bigr) - t}, \end{equation} where $\Exp_q(x) = \sum_{n\geq 0} q^{n\choose 2} x^n/[n]_q!$, and $d(\sigma)$ and $\inv(\sigma)$ denote the number of descents and inversions of $\sigma$, respectively. (Another good reference on the subject is a recent paper \cite{ShareshianWachs} of Shareshian and Wachs.) It would be nice to produce an analog of the generating function (\ref{des_inv_gf}) for these descent-inversion pairs, but this task appears to be challenging, and it is not even clear what form such a generating function should have, as the $q$-factorials in the denominators of (\ref{des_inv_gf}) are strongly connected to $q$-binomial coefficients, which have a combinatorial interpretation of the number of inversions in a permutation obtained by concatenating two increasing runs of fixed size. Nevertheless the bivariate polynomial $\hat{A}_n(t,q) := \sum_{\sigma\in\mathfrak S_n} t^{\hat{d}(\sigma)} q^{\hat{\imath}(\sigma)}$ seems to be of interest, and in Section \ref{Euler-q-analog} we direct our attention to the $q$-polynomials that result if we set $t=0$. This special case concerns up-down permutations and, more precisely, their distribution according to the number of alternating inversions. For down-up permutations this distribution is essentially the same, the only difference being the order of the coefficients in the $q$-polynomial, and for our purposes it turns out to be more convenient to work with down-up permutations, so we use the distribution of $\hat{\imath}$ on them to define a $q$-analog $\hat{E}_n(q)$ of Euler numbers. The formal definition we give is $$ \hat{E}_n(q) := q^{-\lfloor n^2/4 \rfloor} \sum_{\sigma\in\Alt_n} q^{\hat{\imath}(\sigma)}, $$ where $\Alt_n$ is the set of down-up permutations of $[n]$. The polynomial $\hat{E}_n(q)$ is monic with constant term equal to the Catalan number $c_{\lfloor n/2 \rfloor}$ (Proposition \ref{hatEq_facts}), which hints at the possibility to express $\hat{E}_n(q)$ as the sum of $c_{\lfloor n/2 \rfloor}$ ``nice'' polynomials with constant term $1$. We discover such an expression in the form of a $q$-analog of a beautiful identity that represents $E_n$ as the sum of weighted Dyck paths of length~$2\lfloor n/2 \rfloor$. In this identity we imagine Dyck paths as starting at $(0,0)$ and ending at $(2\lfloor n/2 \rfloor, 0)$. We set the weight of an up-step to be the level at which that step is situated (the steps that touch the ``ground'' are at level $1$, the steps above them at level $2$, and so on) and the weight of a down-step to be either the level of the step (for even $n$) or one plus the level of the step (for odd $n$). We set the weight of the path to be the product of the weights of all its steps. The sum of the weights taken over all $c_{\lfloor n/2\rfloor}$ paths then equals $E_n$, and if we replace the weight of a step with the $q$-analog of the respective integer, we obtain $\hat{E}_n(q)$ (Theorem \ref{weighted_path_identity}). The original $q=1$ version of the above identity provides a curious connection between Catalan and Euler numbers. A notable difference between these numbers is in the generating functions: one traditionally considers the ordinary generating function for the former and the exponential one for the latter. An interesting and hopefully solvable problem is to find a generating function interpolating between the two, and a potential solution could be to use the above $q$-analog $\hat{E}_n(q)$ of Euler numbers to write $$ H(q,x) := \sum_{n\geq 0} \hat{E}_n(q)\cdot {x^n\over [n]_q!}, $$ so that $H(1,x) = \tan x + \sec x$ and $$ H(0,x) = \sum_{n\geq 0} c_{\lfloor n/2\rfloor} x^n = {(1+x)\left(1-\sqrt{1-4x^2}\right)\over 2x^2}. $$ \section*{Acknowledgments} This paper is part of the author's Ph.D.\ thesis. I would like to thank Pavlo Pylyavskyy for his ideas and conversations that led to this work. I am also grateful to Richard Stanley and Alex Postnikov for helpful discussions. \section{Variations on the descent statistic}\label{descents} Let $\mathfrak S_n$ be the set of permutations of $[n]=\{1,\ldots,n\}$, and let $\tilde{\mathfrak S}_{n}$ be the set of permutations $\sigma_1\sigma_2\cdots \sigma_{n}$ of $[n]$ such that $\sigma_1=1$. For a permutation $\sigma=\sigma_1\cdots \sigma_n$, define the \emph{descent set} $D(\sigma)$ of $\sigma$ by $D(\sigma)=\{i\ |\ \sigma_i>\sigma_j\} \subseteq [n-1]$, and set $d(\sigma)=|D(\sigma)|$. We say that a permutation $\sigma$ has a \emph{$3$-descent} at position $i$ if the permutation $\sigma_i\sigma_{i+1}\sigma_{i+2}$, viewed as an element of $\mathfrak S_3$, is odd. Let $D_3(\sigma)$ be the set of positions at which a permutation $\sigma$ has a $3$-descent, and set $d_3(\sigma)=|D_3(\sigma)|$. An important property of the $3$-descent statistic is the following. \begin{lemma}\label{cyclic_shift} Let $\omega^c_n$ be the cyclic permutation $(2\ 3\ \ldots\ n\ 1)$, and let $\sigma\in\mathfrak S_n$. Then $D_3(\sigma)=D_3(\sigma\omega^c_n)$. \end{lemma} \begin{proof} Multiplying $\sigma$ on the right by $\omega^c_n$ replaces each $\sigma_i<n$ by $\sigma_i+1$, and the element of $\sigma$ equal to $n$ by $1$. Thus the elements of the triples $\sigma_i\sigma_{i+1}\sigma_{i+2}$ that do not include $n$ maintain their relative order under this operation, and in the triples that include $n$, the relative order of exactly two pairs of elements is altered. Thus the $3$-descent set of $\sigma$ is preserved. \end{proof} \begin{corollary}\label{corollary_cyclic_shift} For all $i,j,k,\ell\in [n]$ and $B\subseteq [n-2]$, the number of permutations $\sigma\in\mathfrak S_n$ with $D_3(\sigma)=B$ and $\sigma_i=j$ is the same as the number of permutations with $D_3(\sigma)=B$ and $\sigma_k=\ell$. \end{corollary} \begin{proof} The set $\mathfrak S_n$ splits into orbits of the form $\{\sigma,\sigma\omega^c_n, \sigma(\omega^c_n)^2,\ldots,\sigma(\omega^c_n)^{n-1}\}$, and each such subset contains exactly one permutation with a $j$ in the $i$-th position for all $i,j\in [n]$. \end{proof} Next, we define another variation on the descent statistic. We say that a permutation $\sigma=\sigma_1\cdots\sigma_n$ has an \emph{alternating descent} at position $i$ if either $\sigma_i>\sigma_{i+1}$ and $i$ is odd, or else if $\sigma_i<\sigma_{i+1}$ and $i$ is even. Let $\hat{D}(\sigma)$ be the set of positions at which $\sigma$ has an alternating descent, and set $\hat{d}(\sigma)=|\hat{D}(\sigma)|$. Our first result relates the last two statistics by asserting that the $3$-descent sets of permutations in $\tilde{\mathfrak S}_{n+1}$ are equidistributed with the alternating descent sets of permutations in $\mathfrak S_n$. \begin{theorem}\label{bijection_alt3} Let $B\subseteq [n-1]$. The number of permutations $\sigma\in\tilde{\mathfrak S}_{n+1}$ with $D_3(\sigma)=B$ is equal to the number of permutations $\omega\in\mathfrak S_n$ with $\hat{D}(\omega)=B$. \end{theorem} \begin{proof_}{Proof (by Pavlo Pylyavskyy, private communication).} We construct a bijection between $\tilde{\mathfrak S}_{n+1}$ and $\mathfrak S_n$ mapping permutations with $3$-descent set $B$ to permutations with alternating descent set $B$. Start with a permutation in $\sigma \in \tilde{\mathfrak{S}}_n$. We construct the corresponding permutation $\omega$ in $\mathfrak{S}_n$ by the following procedure. Consider $n+1$ points on a circle, and label them with numbers from $1$ to $n+1$ in the clockwise direction. For convenience, we refer to these points by their labels. For $1\leq i \leq n$, draw a line segment connecting $\sigma_{i}$ and $\sigma_{i+1}$. The segment $\sigma_{i}\sigma_{i+1}$ divides the circle into two arcs. Define the sequence $C_1$, \ldots, $C_n$, where $C_i$ is one of the two arcs between $\sigma_{i}$ and $\sigma_{i+1}$, according to the following rule. Choose $C_1$ to be the arc between $\sigma_1$ and $\sigma_2$ corresponding to going from $\sigma_1$ to $\sigma_2$ in the clockwise direction. For $i>1$, given the choice of $C_{i-1}$, let $C_i$ be the arc between $\sigma_{i}$ and $\sigma_{i+1}$ that either {\it {contains}} or {\it {is contained in}} $C_{i-1}$. The choice of such an arc is always possible and unique. Let $\ell(i)$ denote how many of the $i$ points $\sigma_1, \ldots, \sigma_{i}$, including $\sigma_{i}$, are contained in $C_i$. Now, construct the sequence of permutations $\omega^{(i)}=\omega^{(i)}_1 \ldots \omega^{(i)}_i\in\mathfrak S_i$, $1\leq i \leq n$, as follows. Let $\omega^{(1)} = \ell(1)$. Given $\omega^{(i-1)}$, set $\omega^{(i)}_i = \ell(i)$, and let $\omega^{(i)}_1\ldots \omega^{(i)}_{i-1}$ be the permutation obtained from $\omega^{(i-1)}$ by adding $1$ to all elements which are greater than or equal to $\ell_i$. Finally, set $\omega=\omega^{(n)}$. Next, we argue that the map $\sigma \mapsto \omega$ is a bijection. Indeed, from the subword $\omega_1 \ldots \omega_i$ of $\omega$ one can recover $\ell(i)$ since $\omega_i$ is the $\ell(i)$-th smallest element of the set $\{\omega_1,\ldots,\omega_i\}$. Then one can reconstruct one by one the arcs $C_i$ and the segments connecting $\sigma_{i}$ and $\sigma_{i+1}$ as follows. If $\ell(i)>\ell(i-1)$ then $C_i$ contains $C_{i-1}$, and if $\ell(i) \leq \ell(i-1)$ then $C_i$ is contained in $C_{i-1}$. Using this observation and the number $\ell(i)$ of the points $\sigma_1, \ldots, \sigma_{i}$ contained in $C_i$, one can determine the position of the point $\sigma_{i+1}$ relative to the points $\sigma_1,\ldots, \sigma_{i}$. It remains to check that $D_3(\sigma)=\hat{D}(\omega)$. Observe that $\sigma$ has a $3$-descent in position $i$ if and only if the triple of points $\sigma_i, \sigma_{i+1}, \sigma_{i+2}$ on the circle is oriented counterclockwise. Also, observe that $\omega_i>\omega_{i-1}$ if and only if $C_{i-1}\subset C_i$. Finally, note that $C_{i-1} \subset C_i \supset C_{i+1}$ or $C_{i-1} \supset C_i \subset C_{i+1}$ if and only if triples $\sigma_{i-1}, \sigma_{i}, \sigma_{i+1}$ and $\sigma_{i}, \sigma_{i+1}, \sigma_{i+2}$ have the same orientation. We now show by induction on $i$ that $i\in D_3(\sigma)$ if and only if $i\in \hat{D}(\omega)$. From the choice of $C_1$ and $C_2$, it follows that $C_1\subset C_2$ if and only if $\sigma_3>\sigma_2$, and hence $\omega$ has an (alternating) descent at position $1$ if and only if $\sigma_1\sigma_2\sigma_3 = 1\sigma_2\sigma_3$ is an odd permutation. Suppose the claim holds for $i-1$. By the above observations, we have $\omega_{i-1} < \omega_i > \omega_{i+1}$ or $\omega_{i-1} > \omega_i < \omega_{i+1}$ if and only if the permutations $\sigma_{i-1}\sigma_{i}\sigma_{i+1}$ and $\sigma_{i} \sigma_{i+1} \sigma_{i+2}$ have the same sign. In other words, $i-1$ and $i$ are either both contained or both not contained in $\hat{D}(\omega)$ if and only if they are either both contained or both not contained in $D_3(\sigma)$. It follows that $i\in D_3(\sigma)$ if and only if $i\in\hat{D}(\omega)$. \end{proof_} An important special case of Theorem \ref{bijection_alt3} is $B=\varnothing$. A permutation $\sigma\in\mathfrak S_n$ has $\hat{D}(\sigma)=\varnothing$ if and only if it is an \emph{alternating (up-down)} permutation, i.e. $\sigma_1 < \sigma_2 > \sigma_3 < \cdots$. The number of such permutations of size $n$ is the \emph{Euler number} $E_n$. Thus we get the following corollary: \begin{corollary}\label{corollary_Euler} (a) The number of permutations in $\tilde{\mathfrak S}_{n+1}$ with no $3$-descents is $E_n$. \vskip4pt \noindent (b) The number of permutations in $\mathfrak S_{n+1}$ with no $3$-descents is $(n+1)E_n$. \end{corollary} \begin{proof} Part (b) follows from Corollary \ref{corollary_cyclic_shift}: for each $j\in [n+1]$, there are $E_n$ permutations in $\mathfrak S_{n+1}$ beginning with $j$. \end{proof} Permutations with no $3$-descents can be equivalently described as simultaneously avoiding \emph{generalized} patterns $132$, $213$, and $321$ (meaning, in this case, triples of \emph{consecutive} elements with one of these relative orders). Corollary \ref{corollary_Euler}(b) appears in the paper \cite{KitaevMansour} of Kitaev and Mansour on simultaneous avoidance of generalized patterns. Thus the above construction yields a bijective proof of their result. \section{Variations on the inversion statistic}\label{inversions} In this section we introduce analogs of the inversion statistic on permutations corresponding to the $3$-descent and the alternating descent statistics introduced in Section \ref{descents}. First, let us recall the standard inversion statistic. For $\sigma\in\mathfrak S_n$, let $a_i$ be the number of indices $j>i$ such that $\sigma_i > \sigma_j$, and set $\code(\sigma) = (a_1,\ldots,a_{n-1})$ and $\inv(\sigma) = a_1+\cdots + a_{n-1}$. For a permutation $\sigma\in\mathfrak S_n$ and $i\in [n-2]$, let $c^3_i(\sigma)$ be the number of indices $j>i+1$ such that $\sigma_i \sigma_{i+1} \sigma_j$ is an odd permutation, and set $\code_3(\sigma)=(c^3_1(\sigma),c^3_2(\sigma),\ldots,c^3_{n-2}(\sigma))$. Let $C_k$ be the set of $k$-tuples $(a_1,\ldots,a_k)$ of non-negative integers such that $a_i \leq k+1-i$. Clearly, $\code_3(\sigma)\in C_{n-2}$. \begin{lemma}\label{cyclic_shift_inversions} Let $\omega^c_n$ be the cyclic permutation $(2\ 3\ \ldots\ n\ 1)$, and let $\sigma\in\mathfrak S_n$. Then $\code_3(\sigma)=\code_3(\sigma\omega^c_n)$. \end{lemma} \begin{proof} The proof is analogous to that of Lemma \ref{cyclic_shift}. \end{proof} \begin{proposition}\label{code_3} The restriction $\code_3 : \tilde{\mathfrak S}_n \rightarrow C_{n-2}$ is a bijection. \end{proposition} \begin{proof} Since $|\tilde{\mathfrak S}_n|=|C_{n-2}| = (n-1)!$, it suffices to show that the restriction of $\code_3$ to $\tilde{\mathfrak S}_n$ is surjective. We proceed by induction on $n$. The claim is trivial for $n=3$. Suppose it is true for $n-1$, and let $(a_1,\ldots,a_{n-2})\in C_{n-2}$. Let $\tau$ be the unique permutation in $\tilde{\mathfrak S}_{n-1}$ such that $\code_3(\tau)=(a_2,\ldots,a_{n-2})$. For $1\leq\ell\leq n$, let $\ell*\tau$ be the permutation in $\mathfrak S_n$ beginning with $\ell$ such that the relative order of last $n-1$ elements of $\ell*\tau$ is the same as that of the elements of $\tau$. Setting $\ell=n-a_1$ we obtain $\code_3(\ell*\tau) = (a_1,\ldots,a_{n-2})$ since $\ell\ 1\ m$ is an odd permutation if and only if $\ell < m$, and there are exactly $a_1$ elements of $\ell*\tau$ that are greater than $\ell$. Finally, by Lemma \ref{cyclic_shift_inversions}, the permutation $\sigma = (\ell*\tau)(\omega^c_n)^{1-a_1}\in \tilde{\mathfrak S}_{n}$ satisfies $\code_3(\sigma)=(a_1,\ldots,a_{n-2})$. \end{proof} Let $i_3(\sigma) = c^3_1(\sigma) + c^3_2(\sigma) + \cdots + c^3_{n-2}(\sigma)$. An immediate consequence of Proposition \ref{code_3} is that $i_3(1*\sigma)$ is a \emph{Mahonian statistic} on permutations $\sigma\in\mathfrak S_{n}$: \begin{corollary}\label{code3_mahonian} We have $$ \sum_{\sigma\in\mathfrak S_n} q^{i_3(1*\sigma)} = (1+q)(1+q+q^2)\cdots (1+q+q^2+\cdots+q^{n-1}). $$ \end{corollary} For a permutation $\sigma\in\mathfrak S_n$ and $i\in [n-1]$, define $\hat{c}_i(\sigma)$ to be the number of indices $j>i$ such that $\sigma_i>\sigma_j$ if $i$ is odd, or the number of indices $j>i$ such that $\sigma_i<\sigma_j$ if $i$ is even. Set $\hat{\code}(\sigma) = (\hat{c}_1(\sigma), \ldots, \hat{c}_{n-1}(\sigma))\in C_{n-1}$ and $\hat{\imath}(\sigma) = \hat{c}_1(\sigma) + \cdots + \hat{c}_{n-1}(\sigma)$. \begin{proposition}\label{hat_code} The map $\hat{\code} : \mathfrak S_n \rightarrow C_{n-1}$ is a bijection. \end{proposition} \begin{proof} The proposition follows easily from the fact that if $\code(\sigma) = (a_1,\ldots, a_{n-1})$ is the standard inversion code of $\sigma$, then $\hat{\code}(\sigma)=(a_1,n-2-a_2,a_3,n-4-a_4,\ldots)$. Since the standard inversion code is a bijection between $\mathfrak S_n$ and $C_{n-1}$, so is $\hat{\code}$. \end{proof} \begin{corollary}\label{hat_code_mahonian} We have $$ \sum_{\sigma\in\mathfrak S_n} q^{\hat{\imath}(\sigma)} = (1+q)(1+q+q^2)\cdots (1+q+q^2+\cdots+q^{n-1}). $$ \end{corollary} Another way to deduce Corollary \ref{hat_code_mahonian} is via the bijection $\sigma \leftrightarrow \sigma^\vee$, where $$ \sigma^\vee = \sigma_1 \sigma_3 \sigma_5 \cdots \sigma_6 \sigma_4 \sigma_2. $$ \begin{proposition}\label{sigma_check} We have $\hat{\imath}(\sigma) = \inv(\sigma^\vee)$. \end{proposition} \begin{proof} It is easy to verify that a pair $(\sigma_i, \sigma_j)$, $i<j$, contributes to $\hat{\imath}(\sigma)$ if and only if it contributes to $\inv(\sigma^\vee)$. \end{proof} Next, we prove a fundamental relation between the variants of the descent and the inversion statistics introduced thus far. \begin{theorem}\label{main_bivariate_identity} We have $$ \sum_{\sigma\in \tilde{\mathfrak S}_{n+1}} t^{d_3(\sigma)} q^{i_3(\sigma)} = \sum_{\omega\in \mathfrak S_n} t^{\hat{d}(\omega)} q^{\hat{\imath}(\omega)}. $$ \end{theorem} \begin{proof} The theorem is a direct consequence of the following proposition. \begin{proposition}\label{equal_codes} If $\code_3(\sigma) = \hat{\code}(\omega)$ for some $\sigma\in\tilde{\mathfrak S}_{n+1}$ and $\omega\in\mathfrak S_n$, then $D_3(\sigma)=\hat{D}(\omega)$. \end{proposition} \begin{proof} The alternating descent set of $\omega$ can be obtained from $\hat{\code}(\omega)$ as follows: \begin{lemma}\label{hatD_from_hatcode} For $\omega\in\mathfrak S_n$, write $(a_1,\ldots,a_{n-1})=\hat{code}(\omega)$, and set $a_n=0$. Then $\hat{D}(\omega)=\{i\in [n-1]\ |\ a_i+a_{i+1} \geq n-i\}$. \end{lemma} \begin{proof} Suppose $i$ is odd; then if $\omega_i>\omega_{i+1}$, i.e. $i\in\hat{D}(\omega)$, then for each $j>i$ we have $\omega_i>\omega_j$ or $\omega_{i+1}<\omega_j$ or both, so $a_i+a_{i+1}$ is not smaller than $n-i$, which is the number of elements of $\omega$ to the right of $\omega_i$; if on the other hand $\omega_i<\omega_{i+1}$, i.e. $i\notin\hat{D}(\omega)$, then for each $j>i$, at most one of the inequalities $\omega_i>\omega_j$ and $\omega_{i+1}<\omega_j$ holds, and neither inequality holds for $j=i+1$, so $a_i+a_{i+1}\leq n-i-1$, which is the number of elements of $\omega$ to the right of $\omega_{i+1}$. The case of even $i$ is analogous. \end{proof} We now show that the $3$-descent set of $\sigma$ can be obtained from $(a_1,\ldots,a_{n-1})$ in the same way. \begin{lemma}\label{D3_from_code3} For $\sigma\in\tilde{\mathfrak S}_{n+1}$, write $(a_1,\ldots,a_{n-1})=\code_3(\sigma)$, and set $a_n=0$. Then $D_3(\sigma)=\{i\in [n-1]\ |\ a_i+a_{i+1} \geq n-i\}$. \end{lemma} \begin{proof} Let $B=D_3(\sigma)$, and let $\sigma' = \sigma (\omega_{n+1}^c)^{1-\sigma_i} \in \mathfrak S_{n+1}$. Then $\sigma'_i=1$, and by Lemmas \ref{cyclic_shift} and \ref{cyclic_shift_inversions}, we have $D_3(\sigma')=D_3(\sigma)=B$ and $\code_3(\sigma')=\code_3(\sigma)$. Suppose that $1=\sigma'_i<\sigma'_{i+1}<\sigma'_{i+2}$. Then $i\notin B$, and for each $j>i+2$, at most one of the permutations $\sigma_i'\sigma'_{i+1} \sigma'_{j} = 1\sigma'_{i+1}\sigma'_j$ and $\sigma'_{i+1} \sigma'_{i+2} \sigma'_j$ is odd, because $1 \sigma'_{i+1} \sigma'_{j}$ is odd if and only if $\sigma'_{i+1} > \sigma'_j$, and $\sigma'_{i+1} \sigma'_{i+2} \sigma'_j$ is odd if and only if $\sigma'_{i+1} < \sigma'_j < \sigma'_{i+2}$. Hence $a_i+a_{i+1}$ is at most $n-1-i$, which is the number of indices $j\in [n+1]$ such that $j>i+2$. Now suppose that $1=\sigma'_i < \sigma'_{i+1} > \sigma'_{i+2}$. Then $i\in B$, and for each $j>i+2$, at least one of the permutations $\sigma'_i \sigma'_{i+1} \sigma'_{j} = 1 \sigma'_{i+1} \sigma'_j$ and $\sigma'_{i+1} \sigma'_{i+2} \sigma'_{j}$ is odd, because $\sigma'_{i+1} > \sigma'_j$ makes $1 \sigma'_{i+1} \sigma'_{j}$ odd, and $\sigma'_{i+1} < \sigma_j$ makes $\sigma'_{i+1} \sigma'_{i+2} \sigma'_j$ odd. Thus each index $j>i+1$ contributes to at least one of $a_i$ and $a_{i+1}$, so $a_i+a_{i+1} \geq n-i$, which is the number of indices $j\in [n+1]$ such that $j > i+1$. \end{proof} Proposition \ref{equal_codes} follows from Lemmas \ref{hatD_from_hatcode} and \ref{D3_from_code3}. \end{proof} Combining the results of the above discussion, we conclude that both polynomials of Theorem \ref{main_bivariate_identity} are equal to $$ \sum_{(a_1,\ldots,a_{n-1})\in C_{n-1}} t^{|\hat{D}(a_1,\ldots,a_{n-1})|}\ q^{a_1+\cdots + a_{n-1}}, $$ where $\hat{D}(a_1,\ldots,a_{n-1}) = \{i\in [n-1]\ |\ a_i+a_{i+1} \geq n-i\}$. \end{proof} Note that the bijective correspondence $$ \sigma\in\mathfrak S_n \xrightarrow{\ \ \hat{\code}\ \ } c\in C_{n-1} \xrightarrow{\ \ (\code_3)^{-1}\ \ } \omega\in\tilde{\mathfrak S}_{n+1} $$ satisfying $\hat{D}(\sigma) = D_3(\omega)$ yields another bijective proof of Theorem \ref{bijection_alt3}. Besides the inversion statistic, the most famous Mahonian statistic on permutations is the \emph{major index}. For $\sigma \in \mathfrak S_n$, define the major index of $\sigma$ by $$ \maj(\sigma) = \sum_{i\in D(\sigma)} i. $$ Our next result reveals a close relation between the major index and the $3$-inversion statistic $i_3$. \begin{proposition}\label{i3_bmaj} For $\sigma\in \mathfrak S_n$, write $\sigma^r = \sigma'_n\cdots \sigma'_2\sigma'_1$, where $\sigma'_i = n+1-\sigma_i$. Then $$i_3(1*\sigma) = \maj(\sigma^r).$$ \end{proposition} \begin{proof} Let $\sigma = 1*\omega \in \tilde{\mathfrak S}_{n+1}$. Let $D(\sigma) = \{b_1 < \cdots < b_d\}$. Write $\sigma = \tau^{(1)} \tau^{(2)} \cdots \tau^{(d+1)}$, where $\tau^{(k)} = \sigma_{b_{k-1}+1} \sigma_{b_{k-1}+2} \cdots \sigma_{b_k}$ and $b_0=0$ and $b_{d+1}=n$. In other words, we split $\sigma$ into ascending runs between consecutive descents. Fix an element $\sigma_j$ of $\sigma$, and suppose $\sigma_j \in \tau^{(k)}$. We claim that there are exactly $k-1$ indices $i < j-1$ such that $\sigma_i \sigma_{i+1} \sigma_j$ is an odd permutation. For each ascending run $\tau^{(\ell)}$, $\ell<k$, there is at most one element $\sigma_i\in\tau^{(\ell)}$ such that $\sigma_i < \sigma_j < \sigma_{i+1}$, in which case $\sigma_i\sigma_{i+1}\sigma_j$ is odd. There is no such element in $\tau^{(\ell)}$ if and only if the first element $\sigma_{b_{\ell-1}+1}$ of $\tau^{(\ell)}$ is greater than $\sigma_j$, or the last element $\sigma_{b_\ell}$ of $\tau^{(\ell)}$ is smaller than $\sigma_j$. In the former case we have $\sigma_{b_{\ell}-1} > \sigma_{b_{\ell}} > \sigma_j$, so $\sigma_{b_{\ell}-1} \sigma_{b_{\ell}} \sigma_j$ is odd, and in the latter case, $\sigma_j > \sigma_{b_\ell} > \sigma_{b_\ell + 1}$, so $\sigma_{b_\ell} \sigma_{b_\ell + 1} \sigma_j$ is odd. Thus we obtain a one-to-one correspondence between the $k-1$ ascending runs $\tau^{(1)}$, \ldots, $\tau^{(k-1)}$ and elements $\sigma_i$ such that $\sigma_i\sigma_{i+1}\sigma_j$ is an odd permutation. We conclude that for each $\tau^{(k)}$, there are $(k-1)\cdot (b_k - b_{k-1})$ odd triples $\sigma_i \sigma_{i+1} \sigma_j$ with $\sigma_j\in\tau^{(k)}$, and hence $$ i_3(\sigma) = \sum_{k=1}^{d+1} (k-1)\cdot (b_k - b_{k-1}) = $$ $$ = (b_{d+1} - b_d) + (b_{d+1} - b_d + b_d - b_{d-1}) + (b_{d+1} - b_d + b_d - b_{d-1} + b_{d-1} - b_{d-2}) + \cdots = $$ $$ = \sum_{m=1}^d (n-b_m). $$ We have $D(\omega) = \{b_1-1, b_2-1, \ldots, b_d-1\}$, from where it is not hard to see that $D(\omega^r) = \{n-b_d, n-b_{d-1}, \ldots, n-b_1\}$. The proposition follows. \end{proof} Observe that for a permutation $\pi$ with $\pi^r = \pi'_m \cdots \pi'_1$, the triple $\pi_{i} \pi_{i+1} \pi_{i+2}$ is odd if and only if the triple $\pi_{i+2} \pi_{i+1} \pi_i$ is even, which in turn is the case if and only if the triple $\pi'_{i+2}\pi'_{i+1}\pi'_i$ of consecutive elements of $\pi^r$ is odd. Thus $d_3(\pi) = d_3(\pi^r)$, and we obtain the following corollary. \begin{corollary}\label{other_bivariate_identity} We have $$ \sum_{\sigma\in\tilde{\mathfrak S}_{n+1}} t^{d_3(\sigma)} q^{i_3(\sigma)} = \sum_{\omega\in\mathfrak S_n} t^{d_3(\omega\circ (n+1))} q^{\maj(\omega)}, $$ where $\omega\circ (n+1)$ is the permutation obtained by appending $(n+1)$ to $\omega$. \end{corollary} \begin{proof} To deduce the identity from Proposition \ref{i3_bmaj}, write $\sigma = 1*\pi$ and set $\omega = \pi^r$, so that $\omega\circ (n+1) = \sigma^r$. \end{proof} In the language of permutation patterns, the statistic $i_3(\sigma)$ can be defined as the total number of occurrences of generalized patterns $13$-$2$, $21$-$3$, and $32$-$1$ in $\sigma$. (An occurrence of a generalized pattern $13$-$2$ in a permutation $\sigma = \sigma_1 \sigma_2 \cdots$ is a pair of indices $(i,j)$ such that $i +1 < j$ and $\sigma_i$, $\sigma_{i+1}$, and $\sigma_{j}$ have the same relative order as $1$, $3$, and $2$, that is, $\sigma_{i} < \sigma_{j} < \sigma_{i+1}$, and the other two patterns are defined analogously.) In \cite{BabsonSteingrimsson} Babson and Steingr\'{i}msson mention the Mahonian statistic $\mathfrak STAT(\sigma)$, which is defined as $i_3(\sigma)$ (treated in terms of the aforementioned patterns) plus $d(\sigma)$. In the permutation $\sigma\circ (n+1)$, where $\sigma\in\mathfrak S_n$, the descents of $\sigma$ and the last element $n+1$ constitute all occurrences of the pattern $21$-$3$ involving $n+1$, and hence $i_3\bigl(\sigma\circ (n+1)\bigr) = \mathfrak STAT(\sigma)$. \section{Variations on Eulerian polynomials}\label{Eulerian-polynomials} Having introduced two new descent statistics, it is natural to look at the analog of the Eulerian polynomials representing their common distribution on $\mathfrak S_n$. First, recall the definition of the classical $n$-th Eulerian polynomial: $$ A_n(t) := \sum_{\sigma\in\mathfrak S_n} t^{d(\sigma)+1} = \sum_{k=1}^n A(n,k)\cdot t^k, $$ where $A(n,k)$ is the number of permutations in $\mathfrak S_n$ with $k-1$ descents. There is a well-known formula for the exponential generating function for Eulerian polynomials: \begin{equation}\label{egf_eulerian} E(t,u) = \sum_{n\geq 1} A_n(t) \cdot {u^n\over n!} = {t(1-e^{u(t-1)})\over e^{u(t-1)}-t}. \end{equation} In this section we consider analogs of Eulerian numbers and polynomials for our variations of the descent statistic. Define the \emph{alternating Eulerian polynomials} $\hat{A}_n(t)$ by $$ \hat{A}_n(t) := \sum_{\sigma\in\mathfrak S_n} t^{\hat{d}(\sigma)+1} = \sum_{k=1}^n \hat{A}(n,k)\cdot t^k, $$ where $\hat{A}(n,k)$ is the number of permutations in $\mathfrak S_n$ with $k-1$ alternating descents. Our next goal is to find an expression for the exponential generating function $$ F(t,u) := \sum_{n\geq 1} \hat{A}_n(t)\cdot {u^n\over n!}. $$ We begin by deducing a formula for the number of permutations in $\mathfrak S_n$ with a given alternating descent set. For $S\subseteq [n-1]$, let $\hat{\beta}_n(S)$ be the number of permutations $\sigma\in\mathfrak S_n$ with $\hat{D}(\sigma)=S$, and let $\hat{\alpha}_n(S) = \sum_{T\subseteq S} \hat{\beta}_n(T)$ be the number of permutations $\sigma\in\mathfrak S_n$ with $\hat{D}(\sigma) \subseteq S$. For $S = \{s_1 < \cdots < s_k\} \subseteq [n-1]$, let $\co(S)$ be the composition $(s_1, s_2 - s_1, s_3-s_2, \ldots, s_k - s_{k-1}, n-s_k)$ of $n$, and for a composition $\gamma =(\gamma_1, \ldots, \gamma_\ell)$ of $n$, let $S_\gamma$ be the subset $\{\gamma_1, \gamma_1+\gamma_2, \ldots, \gamma_1 + \cdots + \gamma_{\ell-1}\}$ of $[n-1]$. Also, define $$ {n \choose \gamma} := {n\choose \gamma_1,\ldots,\gamma_\ell} = {n!\over \gamma_1!\cdots \gamma_\ell!} $$ and $$ {n \choose \gamma}_E := {n\choose \gamma_1,\ldots,\gamma_\ell} \cdot E_{\gamma_1} \cdots E_{\gamma_\ell}. $$ \begin{lemma}\label{alternating_descent_set} We have $$ \hat{\alpha}_n(S) = {n \choose \co(S)}_E $$ and $$ \hat{\beta}_n(S) = \sum_{T\subseteq S} (-1)^{|S-T|} {n\choose \co(T)}_E. $$ \end{lemma} \begin{proof} Let $S=\{s_1 < \cdots < s_k\}\subseteq [n-1]$. Set $s_0=0$ and $s_{k+1}=n$ for convenience. The alternating descent set of a permutation $\sigma\in\mathfrak S_n$ is contained in $S$ if and only if for all $1 \leq i \leq k+1$, the subword $\tau_i=\sigma_{s_{i-1}+1} \sigma_{s_{i-1}+2} \cdots \sigma_{s_i}$ forms either an up-down (if $s_{i-1}$ is even) or a down-up (if $s_{i-1}$ is odd) permutation. Thus to construct a permutation $\sigma$ with $\hat{D}(\sigma) \subseteq S$, one must choose one of the ${n\choose s_1-s_0, s_2-s_1,\ldots,s_{k+1}-s_{k}} = {n\choose \co(S)}$ ways to distribute the elements of $[n]$ among the subwords $\tau_1$, \ldots, $\tau_{k+1}$, and then for each $i\in [k+1]$, choose one of the $E_{s_i-s_{i-1}}$ ways of ordering the elements within the subword $\tau_i$. The first equation of the lemma follows. The second equation is obtained from the first via the inclusion-exclusion principle. \end{proof} Now consider the sum \begin{equation}\label{summation1} \sum_{S\subseteq [n-1]} {n\choose \co(S)}_E x^{|S|} = \sum_{S\subseteq [n-1]} \hat{\alpha}_n(S)\cdot x^{|S|} = \sum_{\sigma\in\mathfrak S_n} \left(\sum_{T\supseteq \hat{D}(\sigma)} x^{|T|}\right) \end{equation} (a permutation $\sigma$ contributes to $\hat{\alpha}_n(T)$ whenever $T \supseteq \hat{D}(\sigma)$). The right hand side of (\ref{summation1}) is equal to \begin{eqnarray} \nonumber \sum_{\sigma\in\mathfrak S_n} \sum_{T\supseteq \hat{D}(\sigma)} x^{\hat{d}(\sigma) + |T-\hat{D}(\sigma)|} &=& \sum_{\sigma\in\mathfrak S_n} x^{\hat{d}(\sigma)} \sum_{i=0}^{n-1-\hat{d}(\sigma)} {n-1-\hat{d}(\sigma)\choose i}\ x^i \\ &=& \sum_{\sigma\in\mathfrak S_n} x^{\hat{d}(\sigma)} (1+x)^{n-1-\hat{d}(\sigma)}, \label{summation2} \end{eqnarray} as there are ${n-1-\hat{d}(\sigma)\choose i}$ subsets of $[n-1]$ containing $\hat{D}(\sigma)$. Continuing with the right hand side of (\ref{summation2}), we get \begin{equation}\label{summation3} {(1+x)^n\over x} \cdot\sum_{\sigma\in\mathfrak S_n} \left({x\over 1+x}\right)^{\hat{d}(\sigma)+1} = {(1+x)^n\over x} \cdot\hat{A}_n \left({x\over 1+x}\right). \end{equation} Combining equations (\ref{summation1})--(\ref{summation3}), we obtain \begin{equation}\label{summation4} \sum_{n\geq 1} \left(\sum_{S\subseteq [n-1]} {n\choose \co(S)}_E x^{|S|}\right)\cdot {y^n\over n!} = {1\over x}\cdot \sum_{n\geq 1} \hat{A}_n \left({x\over 1+x}\right) \cdot {y^n(1+x)^n\over n!} . \end{equation} Since $S \mapsto \co(S)$ is a bijection between $[n-1]$ and the set of compositions of $n$, the left hand side of (\ref{summation4}) is \begin{equation}\label{summation5} \sum_{n\geq 1} \left( \sum_{\gamma} {E_{\gamma_1} \cdots E_{\gamma_\ell}\over \gamma_1! \cdots \gamma_\ell!}\cdot x^{\ell-1} \right)\cdot y^n = {1\over x} \cdot \sum_{\ell\geq 1} x^\ell\cdot \left(\sum_{i\geq 1} {E_i y^i \over i!}\right)^\ell, \end{equation} where the inside summation in the left hand side is over all compositions $\gamma=(\gamma_1,\ldots,\gamma_\ell)$ of $n$. Applying the well-known formula $\sum_{j\geq 0} E_j y^j/ j! = \tan y + \sec y$, the right hand side of (\ref{summation5}) becomes \begin{equation}\label{summation6} {1\over x}\cdot \sum_{\ell\geq 1} x^\ell (\tan y + \sec y - 1)^\ell = {1\over x} \cdot \left({1\over 1 - x (\tan y + \sec y - 1)} -1 \right). \end{equation} Now set $t={x\over 1+x}$ and $u = y(1+x)$. Equating the right hand sides of (\ref{summation4}) and (\ref{summation6}), we obtain \begin{equation}\label{summation7} F(t,u) = \sum_{n\geq 1} \hat{A}_n(t)\cdot {u^n\over n!} = \left({1\over 1 - x (\tan y + \sec y - 1)} -1 \right). \end{equation} Finally, applying the inverse substitution $x = {t\over 1-t}$ and $y=u(1-t)$ and simplifying yields an expression for $F(t,u)$: \begin{eqnarray} \nonumber F(t,u)&=&{x(\tan y + \sec y -1) \over 1 - x(\tan y + \sec y -1)}\\ \nonumber &=& {t\over 1-t}\cdot \left({\tan y + \sec y - 1 \over 1 - {t\over 1-t} \cdot (\tan y + \sec y - 1)}\right) \\ &=& {t\cdot \bigl(\tan (u(1-t)) + \sec (u(1-t)) - 1\bigl) \over 1 - t\cdot\bigl(\tan (u(1-t)) + \sec (u(1-t))\bigl)}. \label{almost_final_F} \end{eqnarray} Using the property $(\tan z + \sec z)(\tan (-z) + \sec (-z))=1$, we can rewrite the above expression for $F(t,u)$ as follows: \begin{theorem}\label{final_F} We have $$ F(t,u) = {t\cdot\bigl( 1 - \tan(u(t-1)) - \sec(u(t-1))\bigl) \over \tan(u(t-1)) + \sec(u(t-1)) - t}. $$ \end{theorem} Thus $F(t,u)$ can be expressed by replacing the exponential function in the formula~(\ref{egf_eulerian}) for $E(t,u)$ by tangent plus secant. In fact, omitting the Euler numbers and working with standard multinomial coefficients gives a proof of (\ref{egf_eulerian}). A basic result on Eulerian polynomials is the identity \begin{equation}\label{A_over_identity} {A_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} m^n t^m. \end{equation} Our next result is a similar identity involving alternating Eulerian polynomials. For a partition $\lambda$ of $n$ with $r_i$ parts equal to $i$, define $$ z_\lambda := 1^{r_1}\cdot r_1! \cdot 2^{r_2}\cdot r_2! \cdot \cdots. $$ \begin{theorem}\label{f-eulerian} Let $$ \hat{f}_n(m) = \sum_{\lambda} {n!\over z_\lambda}\cdot {E_{\lambda_1-1} E_{\lambda_2-1} \cdots \over (\lambda_1-1)! (\lambda_2-1)! \cdots} \cdot m^{\ell(\lambda)}, $$ where the sum is over all partitions $\lambda=(\lambda_1,\lambda_2,\ldots,\lambda_{\ell(\lambda)})$ of $n$ into odd parts. Then $$ {\hat{A}_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} \hat{f}_n(m) t^m. $$ \end{theorem} \begin{proof} Let us consider the generating function $$ G(t,u) := \sum_{n\geq 1} {\hat{A}(t)\over (1-t)^{n+1}}\cdot {u^n\over n!}. $$ Then, by (\ref{almost_final_F}), we have \begin{equation}\label{G_expression} G(t,u) = {1\over 1-t} \cdot F\left(t,\ {u\over 1-t}\right) = {t\cdot(\tan u + \sec u - 1)\over(1-t)\bigl(1-t\cdot(\tan u + \sec u)\bigr)}. \end{equation} Define $$ H(m,u) := \sum_{n\geq 1} \hat{f}_n(m) \cdot {u^n\over n!}. $$ This series can be rewritten as follows: \begin{equation}\label{H_expression_1} H(m,u) = \sum_{n\geq 1} {\hat{f}_n(m)\over n!} \cdot u^n = -1+\prod_{i\geq 0}\left(\sum_{j\geq 0} {\left({E_{2i} mu^{2i+1}\over (2i+1)!}\right)^j\over j!} \right). \end{equation} Indeed, for each $i$, the index $j$ in the summation is the number of parts equal to $2i+1$ in a partition of $n$ into odd parts, and it is not hard to check that the contribution of $j$ parts equal to $2i+1$ to the appropriate terms of $\hat{f}_n(m)/n!$ is given by the expression inside the summation on the right. We subtract $1$ to cancel out the empty partition of $0$ counted by the product on the right but not by $H(m,u)$. Continuing with the right hand side of (\ref{H_expression_1}), we get \begin{eqnarray} \nonumber H(m,u)+1 &=& \prod_{i\geq 0} \exp\left({E_{2i} mu^{2i+1}\over (2i+1)!}\right)\\ &=& \exp\left(m\sum_{i\geq 0}\left({E_{2i}u^{2i+1}\over (2i+1)!}\right)\right). \label{H_expression_2} \end{eqnarray} The sum appearing in the right hand side of (\ref{H_expression_2}) is the antiderivative of $\sec u = \sum_{i\geq 0} E_{2i} u^{2i} / (2i)!$ that vanishes at $u=0$; this antiderivative is $\ln(\tan u + \sec u)$. Therefore $$ H(m,u) +1= (\tan u + \sec u)^m. $$ Hence we have \begin{equation}\label{H_expression_3} \sum_{m\geq 1} H(m,u)\cdot t^m = {(\tan u + \sec u)\cdot t \over 1 - (\tan u + \sec u)\cdot t} - {1\over 1-t}. \end{equation} It is straightforward to verify that the right hand sides of (\ref{G_expression}) and (\ref{H_expression_3}) agree, and thus \begin{equation}\label{final_G_H} \sum_{n\geq 1} {\hat{A}_n(t)\over (1-t)^{n+1}}\cdot {u^n\over n!} = G(t,u) = \sum_{m\geq 1} H(m,u) t^m = \sum_{m,n\geq 1} \hat{f}_n(m) t^m\cdot {u^n\over n!}. \end{equation} Equating the coefficients of $u^n/n!$ on both sides of (\ref{final_G_H}) completes the proof of the theorem. \end{proof} In the terminology of \cite[Sec.\ 4.5]{StanleyEC1}, Theorem \ref{f-eulerian} states that the polynomials $\hat{A}_n(t)$ are the \emph{$\hat{f}_n$-Eulerian polynomials}. \section{Eulerian polynomials and symmetric functions} \label{Eulerian_sym} The results of the previous section can be tied to the theory of symmetric functions. Let us recall some basics. For a composition $\gamma = (\gamma_1, \gamma_2, \ldots, \gamma_k)$, the \emph{monomial quasisymmetric function} $M_\gamma(x_1, x_2, \ldots)$ is defined by $$ M_\gamma := \sum_{1\leq i_1 < \cdots < i_k} x_{i_1}^{\gamma_1} x_{i_2}^{\gamma_2} \cdots x_{i_k}^{\gamma_k}. $$ Let $\pi(\gamma)$ denote the partition obtained by rearranging the parts of $\gamma$ in non-increasing order. Then for a partition $\lambda$, the \emph{monomial symmetric function} $m_\lambda(x_1, x_2, \ldots)$ is defined as $$ m_\lambda := \sum_{\gamma\ :\ \pi(\gamma)=\lambda} M_\gamma. $$ Let $f(x)$ be a function given by the formal power series $$ f(x) = 1 + \sum_{n\geq 1} {a_n x^n\over n!}. $$ Define the symmetric function $g_{f,n}(x_1,x_2,\ldots)$ by $$ g_{f,n} := \sum_{\gamma \models n} {n\choose \gamma} \cdot a_{\gamma_1} a_{\gamma_2} \cdots \cdot M_\gamma = \sum_{\lambda \vdash n} {n\choose \lambda} \cdot a_{\lambda_1} a_{\lambda_2} \cdots \cdot m_\lambda, $$ where by $\gamma\models n$ and $\lambda\vdash n$ we mean that $\gamma$ and $\lambda$ are a composition and a partition of $n$, respectively. This function can be thought of as the generating function for numbers like $\alpha_n(S)$ or $\hat{\alpha}_n(S)$ (the number of permutations $\sigma\in\mathfrak S_n$ with $D(\sigma)\subseteq S$ or $\hat{D}(\sigma)\subseteq S$, respectively). Our first step is to express $g_{f,n}$ in terms of the \emph{power sum symmetric functions} $p_k(x_1,x_2,\ldots) = \sum x_i^k$. Consider the generating function \begin{equation}\label{Gf_definition} G_f(x_1,x_2,\ldots; u) := \sum_{n\geq 0} g_{f,n}\cdot {u^n\over n!}. \end{equation} Then we have \begin{equation}\label{Gf_expression_1} G_f = \sum_{n\geq 0} \sum_{\gamma\models n} {a_{\gamma_1} a_{\gamma_2} \cdots\over \gamma_1! \gamma_2! \cdots}\cdot M_\gamma u^n = \prod_{i\geq 1} f(x_i u). \end{equation} Now let us write \begin{equation}\label{log_f} \ln(f(x)) = \sum_{n\geq 1} {b_n x^n\over n!}. \end{equation} Then from (\ref{Gf_expression_1}) we have \begin{equation}\label{Gf_expression_2} \ln G_f = \sum_{i\geq 1} \ln(f(x_i u)) = \sum_{n\geq 1} b_n p_n(x_1, x_2, \ldots)\cdot {u^n\over n!}. \end{equation} Since the power sum symmetric functions $p_\lambda = p_{\lambda_1} p_{\lambda_2}\cdots$, with $\lambda$ ranging over all partitions of positive integers, form a basis for the ring of symmetric functions, the transformation $p_n \mapsto b_n p_n u^n/(n-1)!$, where $u$ is regarded as a scalar, extends to a homomorphism of this ring. Applying this homomorphism to the well-known identity $$ \exp \sum_{n\geq 1} {1\over n}\cdot p_n = \sum_{\lambda}z_\lambda^{-1} p_\lambda, $$ where $\lambda$ ranges over all partitions of positive integers, we obtain from (\ref{Gf_expression_2}) that \begin{eqnarray} \nonumber G_f&=& \exp \sum_{n\geq 1} {1\over n}\cdot\left({b_n p_n u^n\over (n-1)!}\right)\\ &=&\sum_{\lambda}z_{\lambda}^{-1} \cdot {b_{\lambda_1} b_{\lambda_2}\cdots\over (\lambda_1-1)!(\lambda_2-1)!\cdots} \cdot p_\lambda u^{|\lambda|}. \label{Gf_final} \end{eqnarray} Comparing the coefficients of $u^n$ in (\ref{Gf_definition}) and (\ref{Gf_final}), we conclude the following: \begin{proposition}\label{gfn_expression} For a function $f(x)$ with $f(0) = 1$ and $\ln(f(x)) = \sum_{n\geq 1} b_n x^n / n!$ we have $$ g_{f,n} = \sum_{\lambda\vdash n} {n!\over z_\lambda} \cdot {b_{\lambda_1}b_{\lambda_2}\cdots\over (\lambda_1 - 1)! (\lambda_2 - 1)!\cdots} \cdot p_\lambda. $$ \end{proposition} Two special cases related to earlier discussion are $f(x) = e^x$ and $f(x) = \tan x + \sec x$. For $f(x) = e^x$, we have $b_1 = 1$, $b_2 = b_3 = \cdots = 0$, and hence $g_{f,n} = p_1^n$. In the case of $f(x)= \tan x+\sec x$, we have $$ b_i = \left\{ \begin{array}{ll} E_{i-1} & \mbox{if $i$ is odd}, \\ 0 & \mbox{if $i$ is even}, \end{array} \right. $$ thus the coefficient at $p_\lambda$ in the expression of Proposition \ref{gfn_expression} coincides with the coefficient in the term for $\lambda$ in the definition of the polynomial $\hat{f}_n(m)$ of Theorem~\ref{f-eulerian}. These observations lead to the following restatements of the classical identity~(\ref{A_over_identity}) and Theorem~\ref{f-eulerian}. \begin{proposition}\label{gfn_substitution} Let $g(1^m)$ denote the evaluation of $g(x_1,x_2,\ldots)$ at $x_1 = x_2 = \cdots = x_m = 1$, $x_{m+1} = x_{m+2} = \cdots = 0$. Then $$ {A_n(t)\over (1-t)^{n+1}} = \sum_{m\geq 1} g_{\exp,n}(1^m) \cdot t^m $$ and $$ {\hat{A}_n(t)\over(1-t)^{n+1}}= \sum_{m\geq 1}g_{\tan+\sec,n}(1^m)\cdot t^m. $$ \end{proposition} \begin{proof} We have $p_i(1^m) = m$, and hence $p_\lambda(1^m) = m^{\ell(\lambda)}$. \end{proof} It is an interesting problem to prove Proposition \ref{gfn_substitution} without referring to the results of Section \ref{Eulerian-polynomials}. Observe that for $\gamma = (\gamma_1, \gamma_2, \ldots, \gamma_k) \models n$, we have $M_\gamma(1^m) = {m \choose k}$, the number of monomials $x_{i_1}^{\gamma_1} x_{i_2}^{\gamma_2} \cdots x_{i_k}^{\gamma_k}$ where $1 \leq i_1 < \cdots < i_k \leq m$, which are the monomials in the definition of $M_\gamma$ that evaluate to $1$. It would also be of interest to relate the observations of this section to Schur functions. One possibility is to consider the following generalization of the \emph{complete homogeneous symmetric function}. Let $\varphi_f$ be the homomorphism of the ring of symmetric functions defined by $p_n \mapsto b_n p_n / (n-1)!$, where the $b_i$'s are as in equation (\ref{log_f}). Let $$ h_{f,n} := \sum_{\lambda \vdash n} z_\lambda^{-1} \varphi_f(p_\lambda). $$ For $f(x) = (1-x)^{-1}$, the homomorphism $\varphi$ is identity, and $h_{f,n}$ is the standard complete homogeneous symmetric function $h_n$, defined to be the sum of all monomials in $x_1$, $x_2$, \ldots, of degree $n$. Then (\ref{Gf_final}) becomes $$ G_f = \sum_{n\geq 1} h_{f,n} u^n $$ (we do not really need $u$ here because of homegeneity). We can define the generalized Schur function $s_{f,\lambda}$, where $\lambda = (\lambda_1,\lambda_2,\ldots) \vdash n$, by the \emph{Jacobi-Trudi identity} $$ s_{f,\lambda} :=\det\Bigl[\ h_{f,\ \lambda_i -i+j}\ \Bigr]_{1\leq i,j\leq n}\ , $$ where $h_{f,0} = 1$ and $h_{f,k} = 0$ for $k<0$ (see \cite[Sec.\ 7.16]{StanleyEC2}). What can be said about $s_{f,\lambda}$ for $f(x)=e^x$ and $f(x) = \tan x + \sec x$? \section{The alternating Eulerian numbers}\label{alt_eulerian_numbers} In this section we give a recurrence relation that allows to construct a triangle of alternating Eulerian numbers $\hat{A}(n,k)$ introduced in Section \ref{Eulerian-polynomials}. (Recall that $\hat{A}(n,k)$ denotes the number of permutations in $\mathfrak S_n$ with $k-1$ alternating descents.) The first few rows of this triangle are given in Table~2.1. \begin{table}[h!]\label{alt_eulerian_triangle} \begin{center} \begin{tabular}{ccccccccccccccc} &&&&&&1&&&&&&\\[6pt] &&&&&1&&1&&&&&\\[6pt] &&&&2&&2&&2&&&&\\[6pt] &&&5&&7&&7&&5&&&\\[6pt] &&16&&26&&36&&26&&16&&\\[6pt] &61&&117&&182&&182&&117&&61&\\[6pt] 272&&594&&1056&&1196&&1056&&594&&272 \end{tabular} \end{center} \caption{Triangle of alternating Eulerian numbers} \end{table} The following lemma provides a way to compute alternating Eulerian numbers given the initial condition $\hat{A}(n,1) = E_n$. \begin{lemma}\label{alt_Eulerian_recursion} For $n \geq k\geq 0$ we have \begin{eqnarray} \nonumber && \sum_{i=0}^n\sum_{j=0}^k{n\choose i}\cdot\hat{A}(i,j+1) \cdot\hat{A}(n-i,k-j+1)\\ && = (n+1-k)\hat{A}(n, k+1) + (k+1) \hat{A}(n, k+2). \label{hatA_recursion} \end{eqnarray} \end{lemma} \begin{proof} First, suppose that $k$ is even. The left hand side of the equation counts the number of ways to split the elements of $[n]$ into two groups of sizes $i$ and $n-i$, arrange the elements in the first and the second group so that the resulting permutations have $j$ and $k-j$ alternating descents, respectively, and writing down the second permutation after the first to form a permtutation of $[n]$. This permutation has either $k$ or $k+1$ alternating descents, depending on whether an alternating descent is produced at position $i$. For a permutation $\sigma\in\mathfrak S_n$ with $\hat{\imath}(\sigma)=k$, there are exactly $n+1-k$ ways to produce $\sigma$ by means of the above procedure, one for every choice of $i \in \hat{D}(\sigma)\cup\{0,n\}$. Similarly, for $\sigma\in\mathfrak S_n$ such that $\hat{\imath}(\sigma) = k+1$, there are exactly $k+1$ ways to produce $\sigma$, one for every choice of $i\in\hat{D}(\sigma)$. The identity follows. As for odd $k$, the same argument is valid, except that the quantity $\hat{A}(n-i,k-j+1)$ in the left hand side should be interpreted as the number of ways to arrange the elements of the second group to form a permutation with $k-j$ alternating \emph{ascents}, which become alternating descents when the two permutations are concatenated. \end{proof} Recall the generating function $$ F(t,u) = \sum_{n,k\geq 1} \hat{A}(n,k)\cdot {t^k u^n\over n!} $$ introduced in Section \ref{Eulerian-polynomials}. An alternative way to express $F(t,u)$ and obtain the result of Theorem \ref{final_F} is by solving a partial differential equation arising from the recurrence of Lemma \ref{alt_Eulerian_recursion}. \begin{proposition}\label{hatA_PDE} The function $F(t,u)$ is the solution of the partial differential equation \begin{equation}\label{PDE} F^2-F=u\cdot {\partial F\over\partial u} + (1-t)\cdot{\partial F\over \partial t} \end{equation} with the initial condition $F(0,u) = \tan u + \sec u$. \end{proposition} \begin{proof} Since $\hat{A}(n,0)=0$ for all $n$, the left hand side of (\ref{hatA_recursion}) is $n!$ times the coefficient of $t^k u^n$ in $\bigl(F(t,u)\bigr)^2$, which we denote by $[t^k u^n]F^2$. The right hand side of (\ref{hatA_recursion}) is \begin{eqnarray*} && n!\cdot\left( {\hat{A}(n,k+1)\over (n-1)!}+ {\hat{A}(n,k+1)\over n!} -{k\hat{A}(n,k+1)\over n!}+ {(k+1)\hat{A}(n,k+2)\over n!} \right)\\ &&=n!\cdot \Bigl( [t^k u^n] F_u + [t^k u^n] F - [t^{k-1} u^n] F_t + [t^k u^n] F_t \Bigr)\\ &&=n!\cdot [t^k u^n]\left( u F_u + F - t F_t + F_t \right), \end{eqnarray*} where $F_t$ and $F_u$ denote partial derivatives of $F$ with respect to $t$ and $u$. Equating the above with $n!\cdot [t^k u^n] F^2$ proves (\ref{PDE}). \end{proof} \section{The generating function for the alternating descent set statistic} \label{cd-stuff} Besides the generating polynomials for the alternating descent statistic, another natural generating function to consider is one counting permutations by their alternating descent set. We begin by stating some well-known facts about the analogous generating function for the classical descent set statistic. Fix a positive integer $n$. For a subset $S\subseteq [n-1]$, define the monomial $u_S$ in two non-commuting variables $\aaa$ and $\bbb$ by $u_S = u_1 u_2 \cdots u_{n-1}$, where $$ u_i = \left\{\begin{array}{ll}\aaa & \mbox{if $i\notin S$,}\\ \bbb & \mbox{if $i\in S$.}\end{array}\right. $$ Consider the generating function $$ \Psi_n(\aaa,\bbb) := \sum_{S\subseteq [n-1]} \beta_n(S) u_S, $$ where $\beta_n(S)$ is the number of permutations in $\mathfrak S_n$ with descent set $S$. The polynomial~$\Psi_n(\aaa,\bbb)$ is known as the \emph{$ab$-index of the Boolean algebra $B_n$}. A remarkable property of $\Psi_n(\aaa,\bbb)$ (and also of $ab$-indices of a wide class of posets, including face lattices of polytopes) is that it can be expressed in terms of the variables $\ccc = \aaa + \bbb$ and $\ddd = \aaa \bbb + \bbb \aaa$. The polynomial $\Phi_n(\ccc,\ddd)$ defined by $\Psi_n(\aaa,\bbb) = \Phi_n(\aaa+\bbb,\ \aaa\bbb+\bbb\aaa)$ is called the \emph{$cd$-index of $B_n$}. The polynomial $\Phi_n(\ccc,\ddd)$ has positive integer coefficients, for which several combinatorial interpretations have been found. Here we give one that will help establish a connection with the alternating descent set statistic. We proceed with a definition. \begin{definition}\label{simsun} A permutation is \emph{simsun} if, for all $k\geq 0$, removing $k$ largest elements from it results in a permutation with no consecutive descents. \end{definition} Let $\mathfrak SSS_n$ be the set of simsun permutations in $\mathfrak S_n$ whose last element is $n$. (Thus $\mathfrak SSS_n$ is essentially the set of simsun permutations of $[n-1]$ with an $n$ attached at the end.) It is known that $|\mathfrak SSS_n| = E_n$. For a permutation $\sigma\in\mathfrak SSS_n$, define the $(\ccc,\ddd)$-monomial $\cd(\sigma)$ as follows: write out the descent set of $\sigma$ as a string of pluses and minuses denoting ascents and descents, respectively, and then replace each occurrence of `` -- + '' by $\ddd$, and each remaining plus by $\ccc$. This definition is valid because a simsun permutation has no consecutive descents. For example, consider the permutation $423516 \in \mathfrak SSS_6$. Its descent set in the above notation is `` -- + + -- + '', and thus $\cd(423516) = \ddd\ccc\ddd$. The simsun permutations provide a combinatorial expression for the $cd$-index of~$B_n$: \begin{equation}\label{Phi_via_simsun} \Phi_n(\ccc,\ddd) = \sum_{\sigma\in\mathfrak SSS_n} \cd(\sigma). \end{equation} Now let us define the analog of $\Psi_n(\aaa,\bbb)$ for the alternating descent set statistic: $$ \hat{\Psi}_n(\aaa,\bbb):= \sum_{S\subseteq [n-1]} \hat{\beta}_n(S) u_S. $$ \begin{proposition}\label{hat_Phi} There exists a polynomial $\hat{\Phi}_n(\ccc,\ddd)$ such that $$\hat{\Phi}_n(\aaa+\bbb,\ \aaa\bbb+\bbb\aaa)=\hat{\Psi}_n(\aaa,\bbb),$$ namely, $\hat{\Phi}_n(\ccc,\ddd) = \Phi_n(\ccc,\ \ccc^2-\ddd)$. \end{proposition} \begin{proof} Note that $\hat{\Psi}_n(\aaa,\bbb)$ is the polynomial obtained from $\Psi(\aaa,\bbb)$ by switching the letters at even positions in all the $(\aaa,\bbb)$-monomials. For example, we have $\Psi_3(\aaa,\bbb) = \aaa\aaa + 2\aaa\bbb + 2\bbb\aaa + \bbb\bbb$, so $\hat{\Psi}_3(\aaa,\bbb) = \aaa\bbb + 2\aaa\aaa + 2\bbb\bbb + \bbb\aaa$. In terms of the variables $\ccc$ and $\ddd$, this operation corresponds to replacing $\ddd = \aaa\bbb+\bbb\aaa$ with $\aaa\aaa+\bbb\bbb = \ccc^2 - \ddd$, and $\ccc = \aaa+\bbb$ with either $\aaa+\bbb$ or $\bbb+\aaa$, which in any case is still equal to $\ccc$. \end{proof} The polynomial $\hat{\Phi}_n(\ccc,\ddd)$ has both positive and negative coefficients, but the polynomial~$\hat{\Phi}_n(\ccc,\ -\ddd) = \Phi_n(\ccc,\ \ccc^2+\ddd)$ has only positive coefficients. It would be nice to give a combinatorial interpretation for these coefficients similar to that of the coefficients of $\Phi_n(\ccc,\ddd)$, so that the coefficients of $\hat{\Phi}_n(\ccc,\ -\ddd)$ enumerate permutations of a certain kind according to some statistic. In what follows we show that the sum of the coefficients of $\hat{\Phi}_n(\ccc,\ -\ddd)$ is equal to the number of permutations containing no consecutive descents and not ending with a descent. Let $\mathcal R_n$ denote the set of such permutations of $[n]$. In working with the different kinds of permutations that have emerged thus far we use the approach of min-tree representation of permutations introduced by Hetyei and Reiner \cite{HetyeiReiner}. To a word $w$ whose letters are distinct elements of $[n]$, associate a labeled rooted planar binary tree according to the following recursive rule. Let $m$ be the smallest letter of $w$, and write $w = w_1\circ m\circ w_2$, where $\circ$ denotes concatenation. Then form the tree $T(w)$ by labeling the root with $m$ and setting the left and the right subtrees of the root to be $T(w_1)$ and $T(w_2)$, respectively. To the empty word we associate the empty tree. Thus $T(w)$ is an increasing rooted planar binary tree, i.e. the distinction between left and right children is being made. For example, $T(423516)$ is the tree shown in Figure \ref{sample_tree}. \begin{figure} \caption{The tree $T(423516)$} \label{sample_tree} \end{figure} To get the word $w$ back from the tree $T(w)$, simply read the labels of the nodes of $T(w)$ in topological order. Next, we formulate some of the permutation properties from the above discussion in terms of the min-tree representation. \begin{lemma}\label{no_consecutive_descents} A permutation $\sigma$ has no consecutive descents if and only if the tree $T(\sigma)$ has no node whose only child is a left child, except maybe for the rightmost node in topological order. \end{lemma} \begin{proof} Write $\sigma = s_1 s_2 \cdots s_n$ and $T = T(\sigma)$. For convenience, we refer to the nodes of~$T$ by their labels. We have $s_i > s_{i+1}$ if and only if $s_{i+1}$ is an ancestor of $s_i$ in~$T$. Since $s_i$ and $s_{i+1}$ are consecutive nodes in the topological reading of $T$, it follows that $s_{i+1}$ is an ancestor of $s_i$ if and only if $s_i$ has no right child. Thus we have $s_i > s_{i+1} > s_{i+2}$ if and only if $s_{i+1}$ has no right child and $s_i$ is a descendant of $s_{i+1}$, i.e. $s_{i+1}$ has a lone left child. The proposition follows. \end{proof} \begin{proposition}\label{Rn_trees} A permutation $\sigma$ is in $\mathcal R_n$ if and only if the tree $T(\sigma)$ has no node whose only child is a left child. \end{proposition} \begin{proof} We have $s_{n-1} > s_n$ if and only if the rightmost node $s_n$ has a (lone) left child. The proposition now follows from Lemma \ref{no_consecutive_descents}. \end{proof} \begin{proposition}\label{simsun-trees} A permutation $\sigma$ is in $\mathfrak SSS_n$ if and only if the rightmost node of~$T(\sigma)$ is labeled $n$, no node has a lone left child, and for every node $s$ not on the rightmost path (the path from the root to the rightmost node) that has both a left child $t$ and a right child $u$, the inequality $t>u$ holds. \end{proposition} \begin{proof} If $T(\sigma)$ has a node $s$ not on the rightmost path whose left child $t$ is smaller than its right child $u$, then removing the elements of $\sigma$ that are greater than or equal to $u$ results in a permutation $\sigma'$ such that in $T(\sigma')$, the node $s$ has a lone left child $t$ and is not the rightmost node, meaning that $\sigma'$ contains a pair of consecutve descents, by Lemma \ref{no_consecutive_descents}. If on the other hand $T(\sigma)$ has no such node $s$, the removing $k$ largest elements of $\sigma$ does not create any nodes with a lone left child except maybe for the rightmost node. \end{proof} One can see that for $\sigma = 423516$, the tree $T(\sigma)$ shown in Figure~\ref{sample_tree} satisfies all conditions of Proposition \ref{simsun-trees}, and hence $423516\in\mathfrak SSS_6$. Next, we consider the sum of coefficients of $\hat{\Phi}_n(\ccc,\ -\ddd)$. \begin{theorem}\label{sum_coefficients} The sum of coefficients of $\hat{\Phi}_n(\ccc,\ -\ddd)$ is $|\mathcal R_n|$. \end{theorem} \begin{proof} The sum of coefficients of $\hat{\Phi}_n(\ccc,\ -\ddd)$ is $\hat{\Phi}_n(1,-1) = \Phi_n(1,2)$, which equals $$ \sum_{\sigma\in\mathfrak SSS_n} 2^{d(\sigma)}, $$ where $d(\sigma)$ is the number of $\ddd$'s in $\cd(\sigma)$, or, equivalently, the number of descents of $\sigma$. Since the descents of $\sigma$ correspond to nodes of $T(\sigma)$ that have no right child (except for the rightmost node, which corresponds to the last element of $\sigma$), it follows from Proposition \ref{Rn_trees} that the descents of a permutation $\sigma\in\mathcal R_n$ correspond to the leaves of $T(\sigma)$ minus the rightmost node. Thus for $\sigma\in\mathcal R_n$ we have that $2^{d(\sigma)}$ is the number of leaves in $T(\sigma)$ minus one, which equals the number of of nodes of $T(\sigma)$ with two children. (The latter can be proved easily by induction.) For a min-tree $T$ and a node $s$ of $T$ with two children, let $F_s(T)$ be the tree obtained by switching the left and the right subtrees of $T$. (This operation is called the \emph{Foata-Strehl action} on the permutation encoded by $T$; see \cite{HetyeiReiner}.) For example, if $T$ is the tree $T(423516)$ shown above, then $F_2(T)$ is the tree shown in Figure \ref{sample_tree_2}. \begin{figure} \caption{The tree $F_2(T(423516))$} \label{sample_tree_2} \end{figure} Note that the action of $F_s$ preserves the set of nodes with two children and does not create any nodes with a lone left child if the original tree contained no such nodes. Hence the set $T(\mathcal R_n)$ is invariant under this action. Observe also that the operators $F_s$ commute and satisfy $F_s^2 = 1$. Thus these operators, viewed as operators on permutations corresponding to trees, split the set $\mathcal R_n$ into orbits of size $2^{d(\sigma)}$, where~$\sigma$ is any member of the orbit. It remains to show that each orbit contains exactly one permutation in $\mathfrak SSS_n$. Given $\sigma\in\mathcal R_n$, there is a unique, up to order, sequence of operators $F_s$, where $s$ is on the rightmost path, that, when applied to $T(\sigma)$, makes $n$ the rightmost node of the resulting tree. An example is shown in Figure \ref{sample_tree_3}. \begin{figure} \caption{The action of $F_1$ and $F_2$ on a min-tree} \label{sample_tree_3} \end{figure}(One needs to find the closest ancestor of $n$ on the rightmost path and then apply the corresponding operator to bring the node $n$ closer to the rightmost path.) Once $n$ is the rightmost node, apply the operator $F_s$ to all nodes $s$ with two children for which the condition of Proposition~\ref{simsun-trees} is violated. We obtain a tree corresponding to a permutation in~$\mathfrak SSS_n$ in the orbit of $\sigma$. To see that each orbit contains only one member of~$\mathfrak SSS_n$, observe that the action of $F_s$ preserves the sequence of elements on the path from $1$ to $k$ for each $k$, and given the sequence of ancestors for each $k\in [n]$, there is a unique way of arranging the elements of $[n]$ to form a min-tree satisfying the conditions of Proposition \ref{simsun-trees}: first, set the path from $1$ to $n$ to be the rightmost path, and then set all lone children to be right children, and for all nodes with two children, set the greater element to be the left child. The proof is now complete. \end{proof} Table \ref{alt_descent_set_cd} lists the polynomials $\hat{\Phi}_n(\ccc,\ddd)$ for $n\leq 6$. \begin{table}[h!] $$ \begin{array}{l|l} n\ &\ \hat{\Phi}_n(\ccc,\ddd)\\ \hline 1\ &\ 1 \\[5pt] 2\ &\ \ccc\\[5pt] 3\ &\ 2\ccc^2 - \ddd \\[5pt] 4\ &\ 5\ccc^3 - 2 (\ccc\ddd + \ddd\ccc) \\[5pt] 5\ &\ 16\ccc^4 - 7 (\ccc^2\ddd + \ddd\ccc^2) - 5 \ccc\ddd\ccc + 4 \ddd^2 \\[5pt] 6\ &\ 61\ccc^5 - 26 (\ccc^3\ddd + \ddd\ccc^3) - 21 (\ccc\ddd\ccc^2 + \ccc^2\ddd\ccc) + 10 \ddd\ccc\ddd + 12 (\ccc\ddd^2 + \ddd^2\ccc) \end{array} $$ \caption{The polynomials $\hat{\Phi}_n(\ccc,\ddd)$} \label{alt_descent_set_cd} \end{table} \section{Shapiro-Woan-Getu permutations} \label{SWG_permutations} In this section we take a closer look at the class of permutations which we denoted by $\mathcal{R}_n$ in Section \ref{cd-stuff}. Recall that $\mathcal{R}_n$ is the set of permutations with no consecutive (double) descents and no descent at the end. They appear in the paper \cite{SWG} by Shapiro, Woan, and Getu, hence the section title, who call them \emph{reduced} permutations. The paper studies enumeration of permutations by the number of runs or slides, and in~\cite[Sec.~11.1]{PostnikovReinerWilliams} Postnikov, Reiner, and Williams put these results in the context of structural properties of permutohedra: for instance, the polynomial encoding the distribution of permutations in $\mathcal{R}_n$ by the number of descents is the $\gamma$-polynomial of the classical permutohedron. In Section \ref{cd-stuff}, we found the number $R_n$ of SWG permutations of size $n$ to be the sum of absolute values of coefficients of a $(\ccc,\ddd)$-polynomial that, when expanded in terms of $\aaa$ and $\bbb$, gave the generating function for the alternating descent set statistic. Shapiro, Woan, and Getu provide a generating function for $R_n$: $$ R(x) := \sum_{n\geq 0} R_n\cdot {x^n\over n!} = 1 + {2\tan(x\sqrt{3}/2)\over \sqrt{3} - \tan(x\sqrt{3}/2)} $$ (we put $R_0=1$). Observe that $R(x)R(-x) = 1$, a property that $R(x)$ shares with $e^x$ and $\tan x + \sec x$, which are the two fundamental generating functions in the analisys done in previous sections. There is a further resemblance with the Euler numbers $E_n$ if one looks at the logarithm of $R(x)$: \begin{equation}\label{ln_Rx} \ln(R(x)) =-x+2\sum_{n\geq 0}R_{2n}\cdot{x^{2n+1}\over{(2n+1)}!}. \end{equation} Comparing with \begin{equation}\label{ln_tan_sec} \ln(\tan x + \sec x) = \sum_{n\geq 0} E_{2n}\cdot {x^{2n+1}\over (2n+1)!}, \end{equation} we see that taking the logarithm has a similar effect on both $R(x)$ and $\tan x + \sec x$ of taking the even part and integrating, except that for $R(x)$ all coefficients excluding that of $x$ are doubled. The fact that \begin{equation}\label{int_sec} \int \sec x\ dx = \ln(\tan x + \sec x) \end{equation} (omitting the arbitrary constant of integration) has been used in the proof of Theorem~\ref{f-eulerian}. This textbook integral formula can be proved combinatorially using the exponential formula for generating functions (see \cite[Sec.\ 5.1]{StanleyEC2}). Given an up-down permutation $\sigma$, divide $\sigma$ into blocks by the following procedure. Put the subword of $\sigma$ starting at the beginning of $\sigma$ and ending at the element equal to $1$ in the first block, and remove this block from $\sigma$. In the resulting word, find the \emph{maximum} element $m_2$ and put the subword consisting of initial elements of the word up to, and including,~$m_2$ in the second block, and remove the second block. In the remaining word, find the \emph{minimum} element $m_3$, and repeat until there is nothing left, alternating between cutting at the minimum and at the maximum element of the current word. For example, for $\sigma = 5 9 3 4 1 8 6 7 2$, the blocks would be $59341$, $8$, and $672$. Note that given the blocks one can uniquely recover the order in which they must be concatenated to form the original permutation $\sigma$. Indeed, the first block is the one containing $1$, the second block contains the largest element not in the the first block, the third block contains the smallest element not in the first two blocks, and so on. Thus to construct an up-down permutation of size $n$ we need to divide the elements of $[n]$ into blocks of \emph{odd} size, then determine the order of concatenation using the above principle, and then arrange the elements of odd numbered blocks in up-down order and those of even numbered blocks in down-up order. There are $E_{k-1}$ ways to arrange the elements in a block of size $k$ for odd $k$, and $0$ ways for even $k$ since we do not allow blocks of even size. Thus (\ref{ln_tan_sec}), which is equivalent to (\ref{int_sec}), follows from the exponential formula. This argument ``combinatorializes'' the proof of Theorem \ref{f-eulerian}. It would be nice to give a similar argument for reduced permutations $\mathcal{R}_n$. \begin{problem} Find a combinatorial proof of the formula (\ref{ln_Rx}) for $\ln(R(x))$. \end{problem} Another problem emerging from the results of Section \ref{cd-stuff} is the following. \begin{problem} Give a combinatorial interpretation of the coefficients of the polynomial $\hat{\Phi}_n(\ccc,\ -\ddd)$ by partitioning the set $\mathcal{R}_n$ into classes corresponding to the $F_{n-1}$ monomials. \end{problem} It is worth pointing out here that even though one can split $\mathcal{R}_n$ into $F_{n-1}$ classes corresponding to $(\ccc,\ddd)$-monomials by descent set, like it was done for simsun permutations in Section \ref{cd-stuff}, the resulting polynomial is different from $\hat{\Phi}_n(\ccc,\ -\ddd)$. There are a few hints on what the correct way to refine permutations in $\mathcal{R}_n$ could be. The coefficient of $\ccc^{n-1}$ in $\hat{\Phi}_n(\ccc,\ -\ddd)$ is the Euler number $E_{n}$, and the set $\mathcal{R}_n$ includes at least three kinds of permutations mentioned in this paper that are counted by $E_{n}$: alternating permutations ending with an ascent, simsun permutations, and permutations $\sigma\in\mathfrak S_{n}$ such that $\sigma \circ (n+1)$ has no $3$-descents. Values of $\hat{\Phi}_n$ for small $n$, including those listed in Table~\ref{alt_descent_set_cd}, present evidence that the common coefficient of $\ccc^{n-3}\ddd$ and $\ddd\ccc^{n-3}$ is $\hat{A}(n-1,2)$ (the number of permutations of size $n-1$ with exactly one alternating descent). \section{A $q$-analog of Euler numbers}\label{Euler-q-analog} Let $\hat{A}_n(t,q)$ denote the bivariate polynomial of Theorem \ref{main_bivariate_identity}: $$ \hat{A}_n(t,q) := \sum_{\sigma\in\mathfrak S_n} t^{\hat{d}(\sigma)} q^{\hat{\imath}(\sigma)}. $$ Then the alternating Eulerian polynomial $\hat{A}_n(t)$ is just the specialization $t\hat{A}_n(t,1)$. We also noted earlier (Corollary \ref{hat_code_mahonian}) that $$ \hat{A}_n(1,q) = [n]_q!, $$ the classical $q$-analog of the factorial defined by $[n]_q! := [1]_q[2]_q\cdots [n]_q$, where $[i]_q := 1+q+q^2+\cdots+q^{i-1}$. One can ask about other specializations of $\hat{A}_n(t,q)$, such as the ones with $t$ or $q$ set to $0$. Clearly, we have $\hat{A}_n(t,0) = 1$ because the only permutation~$\sigma\in\mathfrak S_n$ for which $\hat{\imath}(\sigma) = 0$ also satisfies $\hat{d}(\sigma) = 0$. The case of $t=0$ is more curious and is the subject of this section. We have $\hat{d}(\sigma) = 0$ if and only if $\sigma$ is an up-down permutation. Thus $\hat{A}_n(0,1) = E_n$, and the specialization $\hat{A}_n(0,q)$ gives a $q$-analog of the Euler number $E_n$ with coefficients encoding the distribution of the number of alternating inversions among up-down permutations. The following lemma is key in understanding this $q$-analog. \begin{lemma}\label{alt_inversion_criterion} For a permutation $\sigma\in\mathfrak S_n$, let $\hat{\code}(\sigma) = (\hat{c}_1,\hat{c}_2,\ldots,\hat{c}_{n-1})$. Then $\sigma$ is up-down (resp.,\ down-up) if and only if $\hat{c}_i + \hat{c}_{i+1} \leq n-1-i$ (resp.,\ $\hat{c}_i + \hat{c}_{i+1} \geq n-i$) for all $i$. \end{lemma} \begin{proof} This fact is just a special case of Lemma \ref{hatD_from_hatcode}. \end{proof} For various reasons it is more convenient to study the distribution of $\hat{\imath}$ on down-up, rather than up-down, permutations. The $q$-analog obtained this way from down-up permutations is essentially equivalent to $\hat{A}_n(0,q)$, the difference being the reverse order of coefficients and a power of $q$ factor. It follows from Lemma \ref{alt_inversion_criterion} that for a down-up permutation $\sigma\in\mathfrak S_n$, we have \begin{equation}\label{min_down_up_alt_inv} \hat{\imath}(\sigma) \geq (n-1) + (n-3) + (n-5) + \cdots = \left\lfloor {n^2\over 4} \right\rfloor. \end{equation} Therefore let $\Alt_n$ be the set of down-up permutations in $\mathfrak S_n$, and define $$ \hat{E}_n(q) := q^{-\lfloor n^2/4 \rfloor} \sum_{\sigma\in\Alt_n} q^{\hat{\imath}(\sigma)}. $$ The values of $\hat{E}_n(q)$ for small $n$ are given in Table \ref{hatEnq}. \begin{table}[t!] $$ \begin{array}{l|l} n & \hat{E}_n(q)\\\hline 0, 1, 2 & 1\\[3pt] 3 & 1+q\\[3pt] 4 & 2+2q+q^2\\[3pt] 5 & 2+5q+5q^2+3q^3+q^4\\[3pt] 6 & 5+12q+16q^2+14q^3+9q^4+4q^5+q^6\\[3pt] 7 & 5+21q+42q^2+56q^3+56q^4+44q^5+28q^6+14q^7+5q^8+q^9 \end{array} $$ \caption{The polynomials $\hat{E}_n(q)$ for $n\leq 7$} \label{hatEnq} \end{table} We have the following facts about $\hat{E}_n(q)$. \begin{proposition}\label{hatEq_facts} (a) The polynomial $\hat{E}_n(q)$ is monic and has degree $\left\lfloor {(n-1)^2\over 4}\right\rfloor$. \vskip4pt \noindent (b) $\hat{A}_n(0,q) = q^{\lfloor (n-1)^2/4 \rfloor}\cdot\hat{E}_n(q^{-1})$. \vskip4pt \noindent (c) $\hat{E}_n(0) = c_{\lfloor n/2 \rfloor}$, the $\lfloor n/2 \rfloor$-th Catalan number. \end{proposition} \begin{proof} (a) By Proposition \ref{hat_code}, the unique permutation $\sigma\in\mathfrak S_n$ with the maximum possible number of alternating inversions is the one for which $\hat{\code}(\sigma) = (n-1,n-2,\ldots,1)$. By Lemma \ref{alt_inversion_criterion}, or by simply realizing that $\sigma = n \circ 1 \circ (n-1) \circ 2 \circ \cdots$, one can see that $\sigma\in \Alt_n$. We have $\hat{\imath}(\sigma) = n(n-1)/2$, and thus the degree of $\hat{E}_n(q)$ is $n(n-1)/2 - \lfloor n^2/4 \rfloor = \lfloor (n-1)^2/4 \rfloor$. \vskip4pt (b) This identity is an algebraic restatement of an earlier observation. \vskip4pt (c) The constant term $\hat{E}_n(0)$ of $\hat{E}_n(q)$ is the number of permutations $\sigma\in\Alt_n$ with exactly $\lfloor n^2/4 \rfloor$ alternating inversions. By (\ref{min_down_up_alt_inv}), these are precisely the permutations in $\Alt_n$ satisfying $\hat{c}_{i} + \hat{c}_{i+1} = n-i$ for odd $i$. Let $\sigma\in\Alt_n$ be a permutation with this property. For $j\geq 1$, we have $\hat{c}_{2j} \geq n-2j - \hat{c}_{2j+1} = \hat{c}_{2j+2} - 1$. Thus $\hat{c}_2, \hat{c}_4, \ldots, \hat{c}_{2\lfloor n/2 \rfloor}$ is a strictly decreasing sequence of non-negative integers satisfying $\hat{c}_{2j} \leq n-2j$ (for convenience, let $\hat{c}_n = 0$). Reversing the sequence and reducing the $k$-th term by~$k-1$ for all $k$ yields a bijective correspondence with sequences of $\lfloor n/2 \rfloor$ non-negative integers whose $k$-th term does not exceed $k-1$, and it is well known that there are $c_{\lfloor n/2 \rfloor}$ such sequences. Since $\hat{c}_{2j-1}$ is uniquely determined by $\hat{c}_{2j}$, it follows that there are $c_{\lfloor n/2 \rfloor}$ permutations $\sigma\in\Alt_n$ with $\lfloor n^2/4 \rfloor$ alternating inversions. \end{proof} It is curious to note that the permutations in $\Alt_n$ with $\lfloor n^2/4 \rfloor$ alternating inversions can be characterized in terms of pattern avoidance, so that Proposition \ref{hatEq_facts}(c) follows from a result of Mansour \cite{Mansour} stating that the number of $312$-avoiding down-up permutations of size $n$ is $c_{\lfloor n/2 \rfloor}$. \begin{proposition}\label{alt_avoiding_312} A permutation $\sigma\in\Alt_n$ has $\hat{\imath}(\sigma) = \lfloor n^2/4 \rfloor$ if and only if $\sigma$ is $312$-avoiding. \end{proposition} The following lemma implies the above proposition and is useful in the later discussion as well. \begin{lemma}\label{alt_inversions_31-2} For a permutation $\sigma = \sigma_1\sigma_2\cdots \sigma_n\in\Alt_n$, the number $\hat{\imath}(\sigma)$ is equal to $\lfloor n^2/4\rfloor$ plus the number of occurrences of the generalized pattern $31$-$2$ (that is, the number of pairs of indices $i < j$ such that $\sigma_{i+1} < \sigma_j < \sigma_{i}$). \end{lemma} \begin{proof} For $i\in [n-1]$, define $$ S_i := \left\{ \begin{array}{ll} \{j\ |\ j>i\mbox{ and }\sigma_i>\sigma_j\}& \mbox{if $i$ is odd;}\\ \{j\ |\ j>i\mbox{ and }\sigma_i<\sigma_j\}& \mbox{if $i$ is even.} \end{array} \right. $$ Thus $\hat{c}_i = |S_i|$. Let $i$ be \emph{odd}. Then $\sigma_i > \sigma_{i+1}$, so $i+1 \in S_i$ and for every $j>i+1$, either $\sigma_j < \sigma_i$ or $\sigma_j > \sigma_{i+1}$, or both. Hence $\{i+1,i+2,\ldots,n-1\}\subseteq S_i\cup S_{i+1}$ and $\hat{c}_i + \hat{c}_{i+1} = n-1-i + |S_i\cap S_{i+1}|$. But $S_i\cap S_{i+1}$ is the set of indices $j>i+1$ such that $\sigma_{i+1} < \sigma_j < \sigma_{i}$, i.e.\ the number of occurrences of the pattern $31$-$2$ beginning at position $i$. Therefore the total number of alternating inversions is $\sum_{i\ odd}\ (n-1-i) = \lfloor n^2/4 \rfloor$ plus the total number of occurrences of $31$-$2$. \end{proof} \begin{proof_}{Proof of Proposition \ref{alt_avoiding_312}.} Suppose that a permutation $\sigma\in\Alt_n$ has exactly $\lfloor n^2/4 \rfloor$ alternating inversions but is not $312$-avoiding. Choose a triple $i < k < j$ such that $\sigma_k < \sigma_j < \sigma_i$ and the difference $k-i$ is as small as possible. Suppose that $k-i \geq 2$. If $\sigma_{k-1} < \sigma_j$, then we have $\sigma_{k-1} < \sigma_j < \sigma_i$, contradicting the choice of $i$, $k$, and $j$. If $\sigma_{k-1} > \sigma_j$, then we have $\sigma_k < \sigma_j < \sigma_{k-1}$, also contradicting the choice of $i$, $k$, and $j$. Hence $k = i+1$, and we obtain a contradiction by Lemma \ref{alt_inversions_31-2}. \end{proof_} In view of Lemma \ref{alt_inversions_31-2}, we can write $\hat{E}_n(q)$ as $$ \hat{E}_n(q) = \sum_{\sigma\in\Alt_n} q^{\occp(\sigma)} $$ where $\occp(\sigma)$ is the number of occurrences of $31$-$2$ in $\sigma$. In what follows, we use this expression to show how a $q$-analog of a combinatorial identity representing the Euler number $E_n$ as a weighted sum of Dyck paths yields a refined identity of $\hat{E}_n(q)$. First, we need to introduce Dyck paths, which are perhaps the most famous combinatorial objects counted by Catalan numbers. A \emph{Dyck path} of length $2m$ is a continuous path consisting of line segments, or \emph{steps}, each of which connects an integer point $(x,y)$ with either $(x+1,y-1)$ or $(x+1,y+1)$, such that the path starts at~$(0,0)$, ends at $(2m,0)$, and never goes below the $x$-axis, that is, contains no point with a negative $y$-coordinate. The identity we are about to describe involves associating a certain weight with every step of a Dyck path, defining the weight of the entire path to be the product of the weights of the individual steps, and adding the weights of all Dyck paths of length $2m$ to obtain $E_{2m}$ or $E_{2m+1}$, or, in the case of the refined identity, $\hat{E}_{2m}(q)$ or $\hat{E}_{2m+1}(q)$. For a step in a Dyck path, define the \emph{level} of that step to be the $y$-coordinate of the highest point of the corresponding segment of the path. Given a Dyck path $\mathcal{D}$ of length $2m$, let $\ell(i)$ be the level of the $i$-th step of $\mathcal{D}$. Define $$ w^e_{\mathcal{D},i}(q) := [\ell(i)]_q $$ and $$ w^o_{\mathcal{D},i}(q) := \left\{ \begin{array}{ll} [\ell(i)]_q, & \mbox{if the $i$-th step is an up-step};\\ [\ell(i)+1]_q, & \mbox{if the $i$-th step is a down-step}. \end{array} \right. $$ As mentioned above, we set the weight of the entire path to be the product of step weights: \begin{eqnarray*} w^e_{\mathcal{D}}(q) &=& \prod_{i=1}^{2m} w^e_{\mathcal{D},i}(q);\\ w^o_{\mathcal{D}}(q) &=& \prod_{i=1}^{2m} w^o_{\mathcal{D},i}(q). \end{eqnarray*} \begin{theorem}\label{weighted_path_identity} We have $$ \sum_{\mathcal{D}} w^e_{\mathcal{D}}(q) = \hat{E}_{2m}(q) $$ and $$ \sum_{\mathcal{D}} w^o_{\mathcal{D}}(q) = \hat{E}_{2m+1}(q), $$ where both sums are taken over all Dyck paths of length $2m$. \end{theorem} For example, for $m=2$ there are two Dyck paths, shown in Figures \ref{paths1} and \ref{paths2} with step weights given by $w^e_{\mathcal{D},i}(q)$ and $w^o_{\mathcal{D},i}(q)$. \begin{figure} \caption{Weighted Dyck paths adding up to $\hat{E} \label{paths1} \end{figure} \begin{figure} \caption{Weighted Dyck paths adding up to $\hat{E} \label{paths2} \end{figure} From these weighted paths, we get $$ 1 + (1+q)^2 = 2 + 2q + q^2 = \hat{E}_4(q) $$ and $$ (1+q)^2 + (1+q)^2(1+q+q^2) = 2 + 5q + 5q^2 + 3q^3 + q^4 = \hat{E}_5(q). $$ In the classical case $q=1$, the identities of Theorem \ref{weighted_path_identity} are due to Fran\c{c}on and Viennot \cite{FranconViennot}, and are discussed in a broader context in the book \cite[Sec.\ 5.2]{GouldenJackson} by Goulden and Jackson. The proof of our identities is a refinement of the original argument. \vskip12pt \begin{proof_}{Proof of Theorem \ref{weighted_path_identity}.} Fix a positive integer $n>1$, and let $m = \lfloor n/2 \rfloor$. Recall that in Section \ref{cd-stuff} we associated to a permutation $\sigma \in \mathfrak S_n$ an increasing planar binary tree $T(\sigma)$ with vertex set $[n]$. Extending the argument in the proof of Lemma \ref{no_consecutive_descents}, we conclude that $\sigma$ is in $\Alt_n$ if and only if the tree $T(\sigma)$ has no vertices with a lone child, except for the rightmost vertex in the case of even $n$, which has a lone left child. For $\sigma \in\Alt_n$, define the corresponding Dyck path $\mathcal{D}(\sigma)$ of length $2m$ as follows: set the $i$-th step of the path to be an up-step if vertex $i$ of $T(\sigma)$ has at least one child, and set the $i$-th step to be a down-step if vertex $i$ is a leaf of $T(\sigma)$.\ We leave it as an exercise for the reader to check that $\mathcal{D}(\sigma)$ is a valid Dyck path. Fix a Dyck path $\mathcal{D}$ of length $2m$. We claim that \begin{equation}\label{fixed_path_claim} \sum_{\sigma\in\Alt_n\ :\ \mathcal{D}(\sigma)=\mathcal{D}} q^{\occp(\sigma)} = \left\{\begin{array}{ll} w^e_{\mathcal{D}}(q), & \mbox{if $n$ is even};\\ w^o_{\mathcal{D}}(q), & \mbox{if $n$ is odd}. \end{array}\right. \end{equation} To prove the claim, consider for every $i$ the subtree $T_i(\sigma)$ obtained from $T(\sigma)$ by removing all vertices labeled with numbers greater than $i$. For the sake of clarity, one should imagine the ``incomplete'' tree $T_i(\sigma)$ together with ``loose'' edges indicating those edges with parent vertices in $T_i(\sigma)$ that appear when $T_i(\sigma)$ is completed to~$T(\sigma)$. For even $n$ one should also think of a loose edge directed to the right coming out of the rightmost vertex of every tree $T_i(\sigma)$ including $T_n(\sigma) = T(\sigma)$ --- this way the number of edges coming out of a vertex of $T_i(\sigma)$ is always $0$ or $2$. Observe that for $1\leq i \leq 2m$, the number of loose edges of $T_i(\sigma)$ is equal to $y_\mathcal{D}(i)+1$, where $y_\mathcal{D}(i)$ is the $y$-coordinate of the point of $\mathcal{D}$ whose $x$-coordinate is $i$. Indeed, $T_1(\sigma)$ has two loose edges, and $T_{i+1}(\sigma)$ is obtained from $T_i(\sigma)$ by attaching a non-leaf to a loose edge, thus increasing the number of loose edges by one, if the $i$-th step of $\mathcal{D}$ is an up-step, or by attaching a leaf to a loose edge, thus reducing the number of loose edges by one, if the $i$-th step is a down-step. Hence we can count the number of permutations $\sigma\in\Alt_n$ with $\mathcal{D}(\sigma)=\mathcal{D}$ by multiplying together the number of possibilities to attach a vertex labeled $i+1$ to $T_i(\sigma)$ to form $T_{i+1}(\sigma)$ for all $1\leq i\leq n-1$. The number of valid places to attach vertex $i+1$ is equal to the number of loose edges in $T_i(\sigma)$ unless $i+1$ is a leaf of $T(\sigma)$ and $n$ is even, in which case we have one fewer possibilities, because we are not allowed to make the rightmost vertex a leaf. Note that the level $\ell(i)$ of the $i$-th step of $\mathcal{D}$ is equal to $y_\mathcal{D}(i)$ if it is an up-step, or $y_\mathcal{D}(i)+1$ if it is a down-step. Comparing with the choice of step weights, we conclude that the number of possibilities to attach vertex $i+1$ is $w^e_{\mathcal{D},{i+1}}(1)$ if $n$ is even, or $w^o_{\mathcal{D},{i+1}}(1)$ if $n$ is odd. (For odd $n$ and $i=n-1$ the latter assertion makes no sense as $\mathcal{D}$ does not have an $n$-th step; however, there is just one way to attach the last vertex, so the counting argument is not affected.) The above computation proves the $q=1$ case of (\ref{fixed_path_claim}). To prove the general claim, we need to show that if there are $p$ possibilities to attach vertex $i+1$ to a loose edge of $T_i(\sigma)$, then the number of occurrences of the $31$-$2$ pattern ``induced'' by the attachment is $0$ for one of the possibilities, $1$ for another possibility, $2$ for another, and so on, up to $p-1$. Then choosing a place to attach vertex $i+1$ would correspond to choosing a term from $1 + q + q^2 + \cdots +q^{p-1} = [p]_q$, the weight of the $i$-th step of~$\mathcal{D}$, which is a factor in the total weight of~$\mathcal{D}$, and (\ref{fixed_path_claim}) would follow. It remains to specify which occurrences of $31$-$2$ in $\sigma$ are induced by which vertex of~$T(\sigma)$. Suppose there are $p$ possible places to attach vertex $i+1$. Order these places according to the topological order of tree traversal, and suppose we choose to put vertex $i+1$ in the $k$-th place in this order. Let $r_1$, $r_2$, \ldots, $r_{k-1}$ be the numbers of the vertices immediately following the first $k-1$ places in the topological order, and let~$a_j$ denote the label of the rightmost vertex of the eventual subtree of $T(\sigma)$ rooted at what is currently the $j$-th of these $k-1$ places. Although $a_j$ is not determined at the time vertex $i+1$ is attached, it is certain that $r_j < i+1 <a_j$ and that $a_j$ and $r_j$ will be consecutive elements of $\sigma$, with $i+1$ located somewhere to the right, resulting in an occurrence of $31$-$2$. Thus the choice to put vertex $i+1$ in the $k$-th available place induces $k-1$ occurrences of $31$-$2$, one for each $1\leq j \leq k-1$. It is not hard to check that each occurrence of $31$-$2$ is induced by some vertex of $T(\sigma)$, namely, the vertex corresponding to the rightmost element forming the pattern, in the way described above. \begin{figure} \caption{An intermediate tree $T_6(\sigma)$ and its completion $T(\sigma)$} \label{tree_construction} \end{figure} Let us illustrate the argument with an example. The left side of Figure \ref{tree_construction} shows the tree $T_6(\sigma)$ for some $\sigma \in \Alt_{10}$, with the four potential places for vertex $7$ marked A, B, C, and D. If vertex $7$ is put in position A, then it induces no occurrences of $31$-$2$. If it is put in position B, it induces one occurrence of $31$-$2$ as the triple $a5$-$7$ is created, where $a$ stands for the number of the rightmost vertex in the subtree rooted at A in the eventual tree. If vertex $7$ is put in position C, then in addition to the triple $a5$-$7$, one obtains a second $31$-$2$ triple $b1$-$7$. Finally, putting vertex $7$ in position D results in a third $31$-$2$ triple $c2$-$7$. (Here $b$ and $c$ are defined by analogy with $a$.) On the right side of Figure \ref{tree_construction} we have a possible completion of the tree on the left, which corresponds to the permutation $\sigma=10\ 5\ 8\ 1\ 4\ 3\ 7\ 2\ 9\ 6$. The theorem now follows by taking the sum of (\ref{fixed_path_claim}) over all Dyck paths $\mathcal{D}$ of length $2m$. \end{proof_} \end{document}
\begin{document} \title{Complete list of Bell inequalities with four binary settings} \author[1]{E. Zambrini Cruzeiro\thanks{[email protected]}} \author[1]{N. Gisin} \affil[1]{Group of Applied Physics, University of Geneva, 1211 Geneva, Switzerland} \date{} \setcounter{Maxaffil}{0} \renewcommand\Affilfont{\itshape\small} \maketitle \begin{abstract} We give the complete list of 175 facets of the local polytope for the case where Alice and Bob each choose their measurements from a set of four binary outcome measurements. For each inequality we compute the maximum quantum violation for qubits, the resistance to noise, and the minimal detection efficiency required for closing the detection loophole with maximally entangled qubit states, in the case where both detectors have the same efficiency (symmetric case). \end{abstract} \section{Introduction} Bell inequalities are central to the study of non-locality, but finding the complete list of Bell inequalities for a given Bell scenario can be a very difficult task \cite{Pitowski1989}. A Bell scenario is specified by a number of measurement settings and a number of measurement outcomes for each party. In the case of two parties with two measurement choices each (the simplest case), there is only one Bell inequality, the Clauser-Horne-Shimony-Holt (CHSH) inequality \cite{Clauser1969}. The local polytope has two facets, CHSH and positivity. If one allows both parties to choose between three binary outcome measurements, there is only one new relevant inequality besides CHSH. For four settings on each side, the number of facet inequalities grows to 175, where 169 of these inequalities genuinely use the four settings. The complete list of inequalities has not been known until recently, thanks to the research behind \cite{Brunner2008,Pal2009,Bancal2010}, but was never presented. Therefore one could find an almost complete list of the inequalities distributed in the literature. As a service to the community, we present the complete list in a single document. In addition, we study the basic quantum properties of these inequalities by computing the local and two-qubit quantum bounds, the state which attains the quantum bound, the resistance to noise for both the state that violates maximally the inequality and the maximally entangled state, and finally the minimum detector efficiency required to close the detection loophole assuming Alice and Bob's detectors have the same efficiency and the maximally entangled two-qubit state. In Section \ref{sec:one}, we review all Bell inequalities for scenarios with fewer binary-outcome measurements. In Section \ref{sec:two} we describe the computation of the local and quantum bounds, the resistance to noise and minimal detection efficiency to close the detection loophole, and present the main results. \section{Review of all Bell inequalities with less settings} \label{sec:one} In any bipartite Bell scenario, the statistics are fully specified by the joint probability distribution $p(ab|xy)$, where $a,b$ and $x,y$ are the outputs and inputs of Alice and Bob, respectively. For binary outcome scenarios XY22, where X is the number of settings of Alice and Y the number of settings of Bob, there are 4XY probability elements that specify $p(ab|xy)$. Some of these elements are not necessary to fully specify the statistics though, as they are not independent of each other due to the normalization and non-signalling conditions. Taking these into account, one finds that there are only 4XY-XY-X(Y-1)-Y(X-1)=XY+X+Y independent probability elements. Therefore one can fully specify the statistics of a binary outcome Bell test using XY+X+Y elements, this is the idea behind the Collins-Gisin (CG) notation \cite{Collins2004}. As we deal only with binary outcome measurement, in the rest of the paper we shall denote a XY22 scenario simply by XY. Using the CG notation, the probability distributions of 22 are specified by a table with the following elements \begin{table}[!htbp] \begin{center} \label{tab:notation} \begin{tabular}{l|c c} & $p^B(0|0)$ & $p^B(0|1)$ \\ \hline $p^A(0|0)$ & $p(00|00)$ & $p(00|01)$\\ $p^A(0|1)$ & $p(00|10)$ & $p(00|11)$\\ \end{tabular}$\leq$ b \end{center} \end{table} \noindent where $p^A(a|x)$ and $p^B(b|y)$ are the marginals of Alice and Bob respectively. Such a table defines a joint probability distribution $p(ab|xy)$. One can also describe an inequality using such a table, in this case each element of the table instead represents the coefficient that multiplies the probability element indicated in the probability table. We introduce coefficients for the joint probability distribution $d_{xy}$, the marginals of Alice $c_x$ and those of Bob $e_y$. In general, a Bell inequality for a XY scenario can be written as \begin{equation} I=\sum_{xy}d_{xy}p(00|xy)+\sum_xc_xp^A(0|x)+\sum_ye_yp^B(0|y)\leq b \end{equation} In the 22 scenario the only relevant inequality is the CHSH inequality \begin{table}[!htbp] \begin{center} \label{tab:chsh} CHSH=\begin{tabular}{l|c c} & -1 & 0 \\ \hline -1 & 1 & 1\\ 0 & 1 & -1\\ \end{tabular}$\leq$ 0 \end{center} \end{table} As described in the introduction, even for the case of binary outcomes few is known. All tight Bell inequalities are known for the following cases: 22, X2 (X $\leq 3$), 33 and 43 and 34. For the cases 22 and X2 (X $\leq 3$), there is only one Bell inequality \cite{Collins2004,Fine1982}: the CHSH inequality. The 33 scenario has one new inequality \cite{Collins2004}, \begin{table}[!htbp] \begin{center} \label{tab:I3322} $I_{3322}=$\begin{tabular}{l|c c c} & -1 & 0 & 0 \\ \hline -2 & 1 & 1 & 1\\ -1 & 1 & 1 & -1\\ 0 & 1 & -1 & 0 \end{tabular}$\leq$ 0 \end{center} \end{table} If Alice's third setting and Bob's first setting are not used, $I_{3322}$ reduces to CHSH. Therefore in terms of minimal detection efficiency (symmetric) to close the detection loophole, $I_{3322}$ and CHSH perform the same. In the 34 scenario, there are three new inequivalent inequalities \begin{table}[!htbp] \centering \subfloat[][]{$I^{(1)}_{3422}=$\begin{tabular}{l|c c c c} & 1 & 0 & 0 & 1 \\ \hline 1 & -1 & -1 & 1 & -1\\ 1 & -1 & 1 & -1 & -1\\ -2 & 1 & 1 & 1 &-1\\ \end{tabular}$\leq$ 2} \qquad \subfloat[][]{$I^{(2)}_{3422}=$\begin{tabular}{l|c c c c} & -1 & 0 & -1 & 1 \\ \hline 0 & -1 & 0 & 1 & -1\\ 1 & 1 & -1 & 0 & -1\\ -1 & 1 & 1 & 1 & 0\\ \end{tabular}$\leq$ 1} \qquad \subfloat[][]{$I^{(3)}_{3422}=$\begin{tabular}{l|c c c c} & 0 & 0 & -1 & 2 \\ \hline 1 & -2 & 0 & 1 & -1\\ 0 & 1 & -1 & 1 & -1\\ -1 & 1 & 1 & 1 & -1\\ \end{tabular}$\leq$ 2} \label{tbl:I3422} \end{table} The optimal settings for $I_{3422}^{(1)}$ assuming the maximally entangled state are the same optimal settings of the Elegant Bell inequality \cite{Gisin2009,Gisin2017}. Alice's optimal settings are three orthogonal measurements X,Y and Z, while Bob's settings form the vertices of a regular tetrahedron on the Bloch sphere. With $I_{3422}^{(1)}$ one can have a larger quantum violation using partially entangled two-qubit states. In this case, the optimal settings of Alice are still X,Y,Z, but Bob's settings become an irregular tetrahedron. All the inequalities we present in this section can be lifted to the 44 scenario \cite{Pironio2005}, therefore we already know five inequalities of 44. These five liftings correspond to inequalities 1 to 5 in Table \ref{tab:one}. The first 31 inequalities of Table \ref{tab:one} are the 31 inequalities published in \cite{Brunner2008}, given in the same order. The list of coefficients in the CG notation for each inequality is provided separately as a file, and the inequalities are listed in it in the same order. \section{Bell inequalities with four settings for each party} \label{sec:two} We give the complete list of 175 facets of the local polytope in the case of four binary outcome measurements for both Alice and Bob. We present all the inequalities in a file, except the trivial one (positivity: $p(ab|xy)\geq 0$ for all $a,b,x,y$), in the case of four outcomes for Alice and Bob. The main results are given in Table \ref{tab:one}. Along with each inequality, we provide the local bound $L$, quantum bound $Q$, the resistance to noise $\lambda$, and the minimal detection efficiency $\eta$ required for closing the detection loophole in the symmetric case. Note that six of the 175 inequalities correspond to liftings: positivity, CHSH, $I_{3322}$, $I_{3422}^{(1)}$, $I_{3422}^{(2)}$ and $I_{3422}^{(3)}$. Therefore there are 169 inequalities which use the four settings on both sides. \subsection{Quantum violation} The local bound $L$ is computed by finding the optimal strategy using only shared randomness and local operations. Joint probability distributions $p(ab|xy)$ which are local in this sense can be decomposed in the following way \begin{equation} p(ab|xy)=\int q(\lambda)p^A(a|x)p^B(b|y)d\lambda \end{equation} where $\lambda$ is the shared random variable and $q(\lambda)$ is its probability distribution. The quantum bound $Q$ gives the maximal quantum violation for a two-qubit pure entangled state of the form \begin{equation} |\psi (\theta)\rangle=\cos\theta |00\rangle +\sin\theta |11\rangle \end{equation} Note that we compute the maximal quantum bound for qubits. Nevertheless, it is known that using higher-dimensional states one can in some cases achieve a better quantum bound \cite{Pal2010}. We optimize over projective non-degenerate von Neumann measurements. Each measurement setting of Alice (Bob) is described by a vector $\vec{a}_x$ ($\vec{b}_y$) on the Bloch sphere. One has \begin{equation} p(00|xy)=\mathrm{Tr}\left(A_x\otimes B_y |\psi (\theta)\rangle\langle\psi (\theta)|\right) \end{equation} where $A_x\coloneqq \frac{1+\vec{a}_x\cdot\vec{\sigma}}{2}$, and similarly for $B_y$. All the inequalities for scenarios 22, 32, 23 and 33 are maximally violated by maximally entangled qubit states. This is not the case for scenarios 34 and 44, where the maximal violation can be given by a partially entangled state. In the 44 scenario, we find inequalities which are maximally violated by partially entangled states for non-degenerate measurements, which can be in some cases very far from the maximally entangled state. An example of such an inequality is facet number 15, which is maximally violated by the state \begin{equation} \psi(\theta_{\mathrm{max}})=0.4018|00\rangle + 0.9157|11\rangle \end{equation} The violation is small compared to other inequalities of this table, and we see that the resistance to noise is bad. However, as described in \cite{Brunner2008}, using degenerate measurements this inequality ($I_{4422}^4$ in \cite{Brunner2008}) is maximally violated by the maximally entangled state. Indeed using degenerate measurements this inequality becomes equivalent to CHSH. \subsection{Resistance to noise} Let $|\psi\rangle$ be the state that maximally violates a specific bell inequality. Then the resistance to noise $1-\lambda$ is defined as the amount of white noise that can be mixed with $|\psi\rangle$ in order for the bell inequality not to be violated anymore. \begin{equation} \rho=\lambda|\psi\rangle\langle\psi | +(1-\lambda)\frac{\mathds{1}}{4} \end{equation} The best bipartite inequality XY for X,Y $\leq 4$ in terms of resistance to noise using qubits is by far CHSH. \subsection{Minimum detection efficiency for closing the detection loophole} There are different possible strategies to close the detection loophole. All of them involve preventing the attacker (Eve) from exploiting the non-detections. How should the experimenter handle a non-detection event? One possible strategy, which was implemented in \cite{Brunner2008}, is that Alice and Bob output $a=0$ or $b=0$ respectively, in order to deal with a non-detection. However, they could also output 1, giving four possibilities in total. We have also optimized the detection efficiencies over these no-click strategies. A Bell inequality with detector efficiency $\eta$ for both Alice and Bob can be written \cite{Brunner2007}: \begin{equation}\label{eq:polynomial} I_{\eta,\eta}=\eta^2 Q + \eta (1-\eta)(M_A+M_B)+(1-\eta)^2X\leq L \end{equation} where $M_A$ is the value that the Bell inequality yields when only Alice's detector fires, $M_B$ is the bound when only Bob's detector fires and $X$ accounts for when both detectors do not fire. We find that the smallest value for the symmetric detection efficiency using a maximally entangled two-qubit state is $82.14\%$, which is already the best result of \cite{Brunner2008}. This inequality, labeled $A_5$ in \cite{Brunner2008}, is number 8 on our table. \subsection{Correlation inequalities} In this section, we present the two inequalities of this list which can be cast into correlator-only form. A correlator $E(x,y)$ is defined as \begin{equation} E(x,y)=p(a=b|xy)-p(a\neq b|xy) \end{equation} CHSH for example can be put into full-correlation form \begin{equation} E(0,0)+E(0,1)+E(1,0)-E(1,1) \overset{L}{\leq} 2 \overset{Q}{\leq} 2\sqrt{2} \end{equation} where $L$ and $Q$ relate to the local and quantum bounds, respectively. We find that only three inequalities can be put into full-correlation form: inequalities 1 (CHSH), 10 and 11. Facet number 10 is the inequality named $AS_1$ in \cite{Brunner2008}, while facet number 11 is $AS_2$. These inequalities are the only ones which can be put in full-correlation form for the 44 scenario, which confirms the result of D. Avis et al. \cite{Avis2006}. \section{Conclusion} We have studied the complete list of four binary-outcome settings Bell inequalities. We give the full list and a table with the local and quantum bounds of all inequalities, the two-qubit state that maximally violates each inequality, the resistance to noise and the minimal detection efficiency for maximally entangled qubit states to close the detection loophole in a Bell experiment where both detectors have the same efficiency. We find several inequalities which are maximally violated by partially entangled states, which is interesting for the study of nonlocality. It is also confirmed that the minimum detection efficiency is $82.14\%$, and is found for the inequality $A_5$ published in \cite{Brunner2008}. \fontsize{11}{16}\selectfont \section*{Added note} \textit{While this article was being written, the complete list of inequalities was presented in \cite{Oudot2018}, although the inequalities were not studied. In order not to confuse readers, we have added the same name convention for the inequalities that is used in \cite{Oudot2018}.} \newgeometry{margin=0.3cm} \small \begin{longtabu}{|c|c|c|c|c|c|c|c|} \caption{Quantum properties of all Bell inequalities with four binary-outcome settings for both parties. For each inequality we indicate the name under which they can be found in the literature, we give the quantum state that achieves the largest violation $|\psi(\theta_{\mathrm{max}})\rangle=\cos\theta_{\mathrm{max}}|00\rangle +\sin\theta_{\mathrm{max}}|11\rangle$, and we give its resistance to noise $\lambda$. For the maximally entangled state, we provide the resistance to noise $\lambda_{\mathrm{ME}}$, as well as the detection efficiency $\eta$ required to close the symmetric detection loophole. All quantities are computed for two-qubit systems and non-degenerate measurements.} \label{tab:one} \\ \hline \rowfont{\bfseries} \# & Name & L & Q & $\theta_{\mathrm{max}}/\pi$ & $\lambda$ & $\lambda_{\mathrm{ME}}$ & $\eta_{\mathrm{sym}}$\\ \hline 1 & CHSH & 1 & 1.2071 & 0.25 & \textbf{0.7071} & \textbf{0.7071} & 0.8284 \\ 2 & $I_{3322}$ & 1 & 1.25 & 0.25 & 0.8 & 0.8 & 0.8284 \\ 3 & $I_{4322}^1$ & 1 & 1.2361 & 0.2332 & 0.864 & 0.866 & 0.8761 \\ 4 & $I_{4322}^2$ & 1 & 1.2596 & 0.2251 & 0.828 & 0.8333 & 0.8685 \\ 5 & $I_{4322}^3$ & 1 & 1.4365 & 0.25 & 0.7746 & 0.7746 & 0.8514 \\ 6 & $I_{4422}^1$ & 1 & 1.197 & 0.2356 & 0.8988 & 0.9 & 0.8571 \\ 7 & $I_{4422}^2$ & 2 & 2.6214 & 0.2476 & 0.763 & 0.763 & 0.8443 \\ 8 & $A_5$ & 1 & 1.4353 & 0.2447 & 0.7751 & 0.7752 & \textbf{0.8214} \\ 9 & $A_6$ & 1 & 1.2321 & 0.25 & 0.8829 & 0.8829 & 0.8373 \\ 10 & $AS_1$ & 3 & 3.5412 & 0.25 & 0.7348 & 0.7348 & 0.8472 \\ 11 & $AS_2$ & 4 & 4.8785 & 0.25 & 0.74 & 0.74 & 0.8506 \\ 12 & $AII_1$ & 3 & 3.6056 & 0.2435 & 0.7676 & 0.7679 & 0.8323 \\ 13 & $AII_2$ & 2 & 2.5 & 0.25 & 0.8 & 0.8 & 0.8508 \\ 14 & $I_{4422}^3$ & 1 & 1.238 & 0.2257 & 0.863 & 0.866 & 0.8761 \\ 15 & $I_{4422}^4$ & 1 & 1.056 & \textbf{0.1315} & 0.9728 & 1 & 1 \\ 16 & $I_{4422}^5$ & 2 & 2.4365 & 0.25 & 0.7746 & 0.7746 & 0.8514 \\ 17 & $I_{4422}^6$ & 1 & 1.4495 & 0.25 & 0.8165 & 0.8165 & 0.8697 \\ 18 & $I_{4422}^7$ & 2 & 2.4548 & 0.2379 & 0.7937 & 0.7949 & 0.8405 \\ 19 & $I_{4422}^8$ & 3 & 3.4207 & 0.2456 & 0.856 & 0.8561 & 0.8893 \\ 20 & $I_{4422}^9$ & 2 & 2.4617 & 0.2352 & 0.8441 & 0.8455 & 0.8392 \\ 21 & $I_{4422}^{10}$ & 2 & 2.6139 & 0.2461 & 0.8175 & 0.8176 & 0.8458 \\ 22 & $I_{4422}^{11}$ & 3 & 3.6384 & 0.2444 & 0.779 & 0.7792 & 0.8474 \\ 23 & $I_{4422}^{12}$ & 3 & 3.6188 & 0.2404 & 0.7843 & 0.7849 & 0.8382 \\ 24 & $I_{4422}^{13}$ & 1 & 1.25 & 0.2069 & 0.8889 & 0.8889 & 0.8944 \\ 25 & $I_{4422}^{14}$ & 2 & 2.4103 & 0.238 & 0.8298 & 0.831 & 0.8523 \\ 26 & $I_{4422}^{15}$ & 1 & 1.25 & 0.25 & 0.8889 & 0.8889 & 0.8944 \\ 27 & $I_{4422}^{16}$ & 1 & 1.2407 & 0.219 & 0.8791 & 0.8829 & 0.9009 \\ 28 & $I_{4422}^{17}$ & 3 & 3.6714 & 0.2488 & 0.7883 & 0.7883 & 0.8611 \\ 29 & $I_{4422}^{18}$ & 2 & 2.1812 & 0.2064 & 0.9508 & 0.9623 & 0.9575 \\ 30 & $I_{4422}^{19}$ & 3 & 3.4307 & 0.25 & 0.8745 & 0.8745 & 0.887 \\ 31 & $I_{4422}^{20}$ & 2 & 2.3056 & 0.25 & 0.9075 & 0.9231 & 0.899 \\ 32 & $J_{4422}^{44}$ & 1 & 1.4145 & 0.2279 & 0.8085 & 0.8122 & 0.8677 \\ 33 & $J_{4422}^{25}$ & 2 & 2.4459 & 0.2393 & 0.797 & 0.7977 & 0.8638 \\ 34 & $J_{4422}^{60}$ & 1 & 1.5923 & 0.25 & 0.7715 & 0.7715 & 0.8351 \\ 35 & $J_{4422}^{43}$ & 2 & 2.4414 & 0.2287 & 0.8192 & 0.8221 & 0.8788 \\ 36 & $J_{4422}^{69}$ & 2 & 2.4693 & 0.2423 & 0.8099 & 0.8104 & 0.8581 \\ 37 & $J_{4422}^{74}$ & 1 & 1.4332 & 0.2432 & 0.8219 & 0.8223 & 0.8603 \\ 38 & $J_{4422}^6$ & 2 & 2.4158 & 0.2407 & 0.8279 & 0.8284 & 0.8502 \\ 39 & $J_{4422}^1$ & 2 & 2.3871 & 0.2393 & 0.8378 & 0.8386 & 0.8583 \\ 40 & $J_{4422}^{26}$ & 3 & 3.6402 & 0.2436 & 0.7785 & 0.7788 & 0.8409 \\ 41 & $J_{4422}^{63}$ & 2 & 2.6035 & 0.2433 & 0.7885 & 0.789 & 0.8589 \\ 42 & $J_{4422}^{75}$ & 3 & 3.627 & 0.2405 & 0.7821 & 0.7827 & 0.8641 \\ 43 & $J_{4422}^{32}$ & 3 & 3.5902 & 0.2386 & 0.7922 & 0.7931 & 0.8565 \\ 44 & $J_{4422}^{23}$ & 3 & 3.609 & 0.2403 & 0.787 & 0.7875 & 0.8528 \\ 45 & $J_{4422}^{66}$ & 3 & 3.6186 & 0.2422 & 0.7843 & 0.7848 & 0.8451 \\ 46 & $J_{4422}^{81}$ & 3 & 3.5996 & 0.2397 & 0.7896 & 0.7904 & 0.8547 \\ 47 & $J_{4422}^{71}$ & 3 & 3.5823 & 0.2471 & 0.7944 & 0.7945 & 0.8574 \\ 48 & $J_{4422}^{88}$ & 2 & 2.616 & 0.2468 & 0.7851 & 0.7851 & 0.8454 \\ 49 & $J_{4422}^{33}$ & 3 & 3.6151 & 0.2423 & 0.7853 & 0.7857 & 0.8516 \\ 50 & $J_{4422}^{54}$ & 2 & 2.393 & 0.2307 & 0.8513 & 0.8539 & 0.8783 \\ 51 & $J_{4422}^{111}$ & 2 & 2.7576 & 0.2455 & 0.7674 & 0.7676 & 0.856 \\ 52 & $J_{4422}^{118}$ & 3 & 3.5283 & 0.2295 & 0.8255 & 0.8286 & 0.8829 \\ 53 & $J_{4422}^{14}$ & 3 & 3.6023 & 0.2424 & 0.8059 & 0.8063 & 0.8682 \\ 54 & $J_{4422}^{37}$ & 2 & 2.3909 & 0.2278 & 0.8648 & 0.8671 & 0.8942 \\ 55 & $J_{4422}^{20}$ & 2 & 2.5356 & 0.2353 & 0.8236 & 0.8251 & 0.8624 \\ 56 & $N_{4422}^{10}$ & 2 & 2.5 & 0.25 & 0.8333 & 0.8333 & 0.8633 \\ 57 & $N_{4422}^4$ & 2 & 2.3956 & 0.2373 & 0.8634 & 0.8646 & 0.8922 \\ 58 & $J_{4422}^9$ & 2 & 2.3423 & 0.222 & 0.8796 & 0.8835 & 0.8927 \\ 59 & $J_{4422}^{82}$ & 2 & 2.7308 & 0.241 & 0.79 & 0.7908 & 0.8684 \\ 60 & $J_{4422}^{67}$ & 2 & 2.6731 & 0.2455 & 0.8034 & 0.8035 & 0.8651 \\ 61 & $J_{4422}^{51}$ & 3 & 3.6678 & 0.2419 & 0.8046 & 0.8051 & 0.8573 \\ 62 & $N_{4422}^7$ & 3 & 3.6244 & 0.2425 & 0.815 & 0.8155 & 0.8645 \\ 63 & $J_{4422}^2$ & 3 & 3.614 & 0.2353 & 0.8175 & 0.8189 & 0.8526 \\ 64 & $J_{4422}^{102}$ & 3 & 3.6651 & 0.2385 & 0.8053 & 0.8062 & 0.8433 \\ 65 & $J_{4422}^{87}$ & 3 & 3.6849 & 0.2419 & 0.8006 & 0.801 & 0.8545 \\ 66 & $J_{4422}^{83}$ & 3 & 3.6692 & 0.2414 & 0.8043 & 0.8048 & 0.8571 \\ 67 & $J_{4422}^{112}$ & 2 & 2.6248 & 0.2352 & 0.8149 & 0.8163 & 0.8607 \\ 68 & $J_{4422}^{94}$ & 2 & 2.5686 & 0.238 & 0.8287 & 0.8298 & 0.8707 \\ 69 & $J_{4422}^{24}$ & 3 & 3.6943 & 0.2432 & 0.7984 & 0.7987 & 0.8529 \\ 70 & $J_{4422}^{35}$ & 3 & 3.6933 & 0.2464 & 0.7987 & 0.7987 & 0.8379 \\ 71 & $J_{4422}^{36}$ & 3 & 3.6706 & 0.2393 & 0.8039 & 0.8048 & 0.8422 \\ 72 & $J_{4422}^{22}$ & 5 & 5.8156 & 0.2426 & 0.7862 & 0.7866 & 0.8637 \\ 73 & $J_{4422}^{61}$ & 5 & 5.8176 & 0.2495 & 0.7858 & 0.7858 & 0.8696 \\ 74 & $J_{4422}^{27}$ & 3 & 3.9643 & 0.25 & 0.7568 & 0.7568 & 0.8514 \\ 75 & $J_{4422}^{72}$ & 4 & 4.7878 & 0.2401 & 0.792 & 0.7929 & 0.8671 \\ 76 & $J_{4422}^{41}$ & 4 & 4.7596 & 0.2356 & 0.798 & 0.7993 & 0.8606 \\ 77 & $J_{4422}^{76}$ & 4 & 4.8291 & 0.2413 & 0.7835 & 0.784 & 0.8616 \\ 78 & $J_{4422}^{62}$ & 4 & 4.75 & 0.25 & 0.8 & 0.8 & 0.861 \\ 79 & $J_{4422}^{106}$ & 4 & 4.8382 & 0.2415 & 0.7816 & 0.7821 & 0.8604 \\ 80 & $J_{4422}^{126}$ & 4 & 4.8024 & 0.2441 & 0.789 & 0.7894 & 0.8649 \\ 81 & $J_{4422}^{77}$ & 4 & 4.8406 & 0.2408 & 0.7811 & 0.7817 & 0.8601 \\ 82 & $J_{4422}^{116}$ & 2 & 2.7652 & 0.2429 & 0.7968 & 0.7972 & 0.8632 \\ 83 & $J_{4422}^{50}$ & 3 & 3.8556 & 0.2438 & 0.7781 & 0.7784 & 0.8503 \\ 84 & $J_{4422}^{16}$ & 4 & 4.5674 & 0.2305 & 0.8409 & 0.8434 & 0.8897 \\ 85 & $J_{4422}^{19}$ & 4 & 4.6742 & 0.25 & 0.8165 & 0.8165 & 0.8719 \\ 86 & $J_{4422}^4$ & 4 & 4.6862 & 0.2452 & 0.8138 & 0.814 & 0.8702 \\ 87 & $J_{4422}^{42}$ & 2 & 2.6012 & 0.2362 & 0.8331 & 0.8342 & 0.8777 \\ 88 & $J_{4422}^{90}$ & 4 & 4.8398 & 0.2449 & 0.7813 & 0.7815 & 0.8444 \\ 89 & $J_{4422}^{58}$ & 4 & 4.8814 & 0.2457 & 0.7729 & 0.773 & 0.8386 \\ 90 & $J_{4422}^{17}$ & 3 & 3.4288 & 0.2364 & 0.8749 & 0.8756 & 0.888 \\ 91 & $J_{4422}^{34}$ & 2 & 2.4075 & 0.2377 & 0.8804 & 0.8808 & 0.9016 \\ 92 & $J_{4422}^{121}$ & 3 & 3.5971 & 0.2274 & 0.834 & 0.8374 & 0.8713 \\ 93 & $J_{4422}^{59}$ & 2 & 2.427 & 0.2372 & 0.8754 & 0.876 & 0.8848 \\ 94 & $J_{4422}^{31}$ & 2 & 2.2133 & 0.2017 & 0.9336 & 0.9443 & 0.9462 \\ 95 & $J_{4422}^7$ & 2 & 2.3642 & 0.2304 & 0.8917 & 0.8942 & 0.8859 \\ 96 & $J_{4422}^8$ & 2 & 2.2657 & 0.1837 & 0.9186 & 0.9333 & 0.9254 \\ 97 & $J_{4422}^{30}$ & 2 & 2.2459 & 0.1832 & 0.9242 & 0.9443 & 0.937 \\ 98 & $J_{4422}^{11}$ & 2 & 2.2229 & 0.1865 & 0.9309 & 0.9428 & 0.9355 \\ 99 & $J_{4422}^{110}$ & 5 & 5.9457 & 0.2447 & 0.7746 & 0.7748 & 0.8602 \\ 100 & $J_{4422}^{70}$ & 5 & 5.9539 & 0.2411 & 0.7731 & 0.7737 & 0.8595 \\ 101 & $J_{4422}^{93}$ & 5 & 5.9627 & 0.2423 & 0.7715 & 0.7719 & 0.8585 \\ 102 & $J_{4422}^{107}$ & 5 & 5.934 & 0.2478 & 0.7768 & 0.7768 & 0.8584 \\ 103 & $J_{4422}^{45}$ & 3 & 3.7572 & 0.2378 & 0.811 & 0.812 & 0.8682 \\ 104 & $J_{4422}^{47}$ & 4 & 4.7645 & 0.2408 & 0.8096 & 0.8101 & 0.87 \\ 105 & $J_{4422}^{28}$ & 4 & 4.75 & 0.25 & 0.8125 & 0.8125 & 0.861 \\ 106 & $J_{4422}^{86}$ & 3 & 3.75 & 0.25 & 0.8125 & 0.8125 & 0.8571 \\ 107 & $J_{4422}^{48}$ & 4 & 4.745 & 0.2409 & 0.8135 & 0.8142 & 0.8697 \\ 108 & $J_{4422}^{15}$ & 3 & 3.6133 & 0.2433 & 0.8412 & 0.8417 & 0.8664 \\ 109 & $J_{4422}^{122}$ & 2 & 2.6275 & 0.2381 & 0.8382 & 0.8391 & 0.8732 \\ 110 & $N_{4422}^3$ & 3 & 3.6093 & 0.2425 & 0.8421 & 0.8427 & 0.8671 \\ 111 & $N_{4422}^2$ & 3 & 3.6135 & 0.2338 & 0.8412 & 0.8429 & 0.8673 \\ 112 & $J_{4422}^{129}$ & 3 & 4.0523 & 0.2434 & 0.7688 & 0.7692 & 0.8515 \\ 113 & $J_{4422}^{80}$ & 4 & 4.9051 & 0.2464 & 0.7945 & 0.7946 & 0.8618 \\ 114 & $J_{4422}^{64}$ & 4 & 4.9167 & 0.2466 & 0.7924 & 0.7926 & 0.8695 \\ 115 & $J_{4422}^{68}$ & 4 & 5.0179 & 0.2416 & 0.7747 & 0.7753 & 0.8586 \\ 116 & $J_{4422}^{78}$ & 3 & 3.8134 & 0.2406 & 0.8114 & 0.812 & 0.8705 \\ 117 & $S_{242}^{51}$ & 5 & 6.0135 & 0.2417 & 0.7754 & 0.776 & 0.853 \\ 118 & $J_{4422}^{99}$ & 3 & 3.8264 & 0.2388 & 0.809 & 0.8099 & 0.8587 \\ 119 & $S_{242}^{52}$ & 4 & 4.8704 & 0.2364 & 0.8008 & 0.8021 & 0.8456 \\ 120 & $J_{4422}^{124}$ & 4 & 4.941 & 0.2455 & 0.7881 & 0.7882 & 0.8474 \\ 121 & $J_{4422}^{21}$ & 4 & 4.5441 & 0.2397 & 0.8655 & 0.8662 & 0.8924 \\ 122 & $J_{4422}^{127}$ & 3 & 3.6147 & 0.2269 & 0.8506 & 0.8537 & 0.8683 \\ 123 & $J_{4422}^5$ & 3 & 3.5007 & 0.2404 & 0.8749 & 0.8754 & 0.8734 \\ 124 & $N_{4422}^6$ & 3 & 3.5971 & 0.2496 & 0.8543 & 0.8543 & 0.8546 \\ 125 & $J_{4422}^{46}$ & 5 & 5.9717 & 0.2436 & 0.7942 & 0.7946 & 0.8661 \\ 126 & $J_{4422}^{108}$ & 5 & 5.9676 & 0.2403 & 0.7949 & 0.7955 & 0.8581 \\ 127 & $J_{4422}^{89}$ & 5 & 6.0036 & 0.239 & 0.7889 & 0.7898 & 0.8631 \\ 128 & $J_{4422}^{96}$ & 3 & 3.9417 & 0.2413 & 0.7993 & 0.7999 & 0.8642 \\ 129 & $J_{4422}^{117}$ & 5 & 5.9721 & 0.2447 & 0.7941 & 0.7944 & 0.866 \\ 130 & $J_{4422}^{39}$ & 4 & 4.8265 & 0.2306 & 0.8194 & 0.8221 & 0.8793 \\ 131 & $J_{4422}^{53}$ & 4 & 4.7994 & 0.2428 & 0.8243 & 0.8246 & 0.8653 \\ 132 & $J_{4422}^{57}$ & 4 & 4.8053 & 0.2415 & 0.8232 & 0.8237 & 0.8646 \\ 133 & $J_{4422}^{55}$ & 3 & 3.7066 & 0.2393 & 0.8415 & 0.8422 & 0.8752 \\ 134 & $J_{4422}^{56}$ & 4 & 4.7491 & 0.2419 & 0.8335 & 0.834 & 0.872 \\ 135 & $J_{4422}^3$ & 4 & 4.7249 & 0.2448 & 0.838 & 0.8382 & 0.8647 \\ 136 & $J_{4422}^{49}$ & 4 & 4.8089 & 0.2392 & 0.8226 & 0.8234 & 0.8644 \\ 137 & $N_{4422}^9$ & 4 & 4.7399 & 0.2445 & 0.8352 & 0.8354 & 0.8626 \\ 138 & $J_{4422}^{98}$ & 5 & 6.1497 & 0.2396 & 0.7767 & 0.7776 & 0.8643 \\ 139 & $J_{4422}^{85}$ & 4 & 4.9763 & 0.2362 & 0.8038 & 0.805 & 0.8694 \\ 140 & $J_{4422}^{79}$ & 5 & 6.0156 & 0.2396 & 0.7975 & 0.7982 & 0.8529 \\ 141 & $J_{4422}^{119}$ & 5 & 5.8489 & 0.2421 & 0.8249 & 0.8253 & 0.8714 \\ 142 & $J_{4422}^{125}$ & 5 & 6 & 0.25 & 0.8 & 0.8 & 0.8541 \\ 143 & $J_{4422}^{105}$ & 5 & 6.0742 & 0.2418 & 0.7883 & 0.7888 & 0.8465 \\ 144 & $J_{4422}^{65}$ & 4 & 5.111 & 0.2466 & 0.7826 & 0.7827 & 0.8484 \\ 145 & $J_{4422}^{113}$ & 3 & 3.8195 & 0.2399 & 0.83 & 0.8306 & 0.8698 \\ 146 & $J_{4422}^{101}$ & 5 & 6.0296 & 0.2429 & 0.7953 & 0.7956 & 0.8439 \\ 147 & $J_{4422}^{128}$ & 5 & 6.0096 & 0.2497 & 0.7985 & 0.7985 & 0.8531 \\ 148 & $N_{4422}^{11}$ & 3 & 3.4917 & 0.2246 & 0.8905 & 0.8947 & 0.8914 \\ 149 & $J_{4422}^{13}$ & 3 & 3.5629 & 0.2422 & 0.8766 & 0.8772 & 0.8615 \\ 150 & $J_{4422}^{91}$ & 7 & 8.2993 & 0.2422 & 0.7659 & 0.7663 & 0.858 \\ 151 & $J_{4422}^{92}$ & 4 & 5.0648 & 0.2405 & 0.7997 & 0.8002 & 0.8622 \\ 152 & $J_{4422}^{52}$ & 3 & 4.0999 & 0.2442 & 0.7944 & 0.7947 & 0.8586 \\ 153 & $J_{4422}^{115}$ & 3 & 3.9802 & 0.2394 & 0.8126 & 0.8134 & 0.8687 \\ 154 & $J_{4422}^{38}$ & 4 & 4.9295 & 0.2342 & 0.8205 & 0.8225 & 0.8693 \\ 155 & $J_{4422}^{40}$ & 6 & 6.902 & 0.2441 & 0.833 & 0.8333 & 0.883 \\ 156 & $J_{4422}^{84}$ & 4 & 5.067 & 0.2407 & 0.8083 & 0.8089 & 0.8697 \\ 157 & $J_{4422}^{95}$ & 5 & 5.9418 & 0.2401 & 0.8269 & 0.8277 & 0.8697 \\ 158 & $J_{4422}^{120}$ & 3 & 3.7931 & 0.2385 & 0.8502 & 0.8509 & 0.8899 \\ 159 & $J_{4422}^{18}$ & 5 & 5.7018 & 0.2416 & 0.8651 & 0.8653 & 0.8892 \\ 160 & $J_{4422}^{29}$ & 5 & 6.0246 & 0.2461 & 0.8145 & 0.8146 & 0.8515 \\ 161 & $J_{4422}^{114}$ & 5 & 6.0653 & 0.2463 & 0.8086 & 0.8087 & 0.8562 \\ 162 & $N_{4422}^8$ & 5 & 6.0189 & 0.2408 & 0.8154 & 0.816 & 0.8614 \\ 163 & $J_{4422}^{103}$ & 5 & 6.052 & 0.2385 & 0.8105 & 0.8114 & 0.8491 \\ 164 & $N_{4422}^1$ & 5 & 6.0076 & 0.2467 & 0.8171 & 0.8171 & 0.8622 \\ 165 & $J_{4422}^{123}$ & 5 & 6.0641 & 0.2422 & 0.8088 & 0.8092 & 0.8475 \\ 166 & $J_{4422}^{10}$ & 3 & 3.3738 & 0.2138 & 0.9233 & 0.9281 & 0.9155 \\ 167 & $J_{4422}^{12}$ & 3 & 3.4198 & 0.25 & 0.9147 & 0.9147 & 0.8893 \\ 168 & $J_{4422}^{73}$ & 4 & 5.1205 & 0.2478 & 0.8091 & 0.8092 & 0.8714 \\ 169 & $J_{4422}^{100}$ & 6 & 7.2675 & 0.2416 & 0.7894 & 0.7899 & 0.856 \\ 170 & $N_{4422}^{12}$ & 7 & 8.325 & 0.2431 & 0.7905 & 0.7909 & 0.8665 \\ 171 & $J_{4422}^{97}$ & 4 & 5.1584 & 0.2477 & 0.8119 & 0.8119 & 0.8657 \\ 172 & $N_{4422}^5$ & 7 & 8.4377 & 0.2403 & 0.785 & 0.7857 & 0.8516 \\ 173 & $J_{4422}^{104}$ & 6 & 7.5876 & 0.2398 & 0.7836 & 0.7844 & 0.8574 \\ 174 & $J_{4422}^{109}$ & 9 & 10.7261 & 0.2417 & 0.7766 & 0.7771 & 0.8552 \\ \hline \end{longtabu} \end{document}
\betaegin{document} \tauitle{Gibbsian and non-Gibbsian properties of the generalized mean-field fuzzy Potts-model } \alphauthor{ Benedikt Jahnel \varphiootnote{\sigmacriptsize{Ruhr-Universit\"at Bochum, Fakult\"at f\"ur Mathematik, D-44801 Bochum, Germany, \newline \tauexttt{[email protected]}, \newline \tauexttt{www.ruhr-uni-bochum.de/ffm/Lehrstuehle/Kuelske/jahnel.html }}} \,, Christof K\"ulske \varphiootnote{\sigmacriptsize{Ruhr-Universit\"at Bochum, Fakult\"at f\"ur Mathematik, D-44801 Bochum, Germany, \newline \tauexttt{[email protected]}, \newline \tauexttt{www.ruhr-uni-bochum.de/ffm/Lehrsttuehle/Kuelske/kuelske.html /$\sigmaim$kuelske/ }}}\, \,, Elena Rudelli \varphiootnote{\sigmacriptsize{Ruhr-Universit\"at Bochum, Fakult\"at f\"ur Mathematik, D-44801 Bochum, Germany, \newline \tauexttt{[email protected]}}, }\, \, \\ and Janine Wegener \varphiootnote{\sigmacriptsize{Ruhr-Universit\"at Bochum, Fakult\"at f\"ur Mathematik, D-44801 Bochum, Germany, \newline \tauexttt{[email protected]}}, }\, \, } \title{Gibbsian and non-Gibbsian properties of the generalized mean-field fuzzy Potts-model } \betaegin{abstract} We analyze the generalized mean-field $q$-state Potts model which is obtained by replacing the usual quadratic interaction function in the mean-field Hamiltonian by a higher power $z$. We first prove a generalization of the known limit result for the empirical magnetization vector of Ellis and Wang \cite{ElWa89} which shows that in the right parameter regime, the first-order phase-transition persists. Next we turn to the corresponding generalized fuzzy Potts model which is obtained by decomposing the set of the $q$ possible spin-values into $1<s<q$ classes and identifying the spins within these classes. In extension of earlier work \cite{HK04} which treats the quadratic model we prove the following: The fuzzy Potts model with interaction exponent bigger than four (respectively bigger than two and smaller or equal four) is non-Gibbs if and only if its inverse temperature $\beta$ satisfies $\beta\gammaeq \beta_c(r_*,z)$ where $\betaeta_c(r_*,z)$ is the critical inverse temperature of the corresponding Potts model and $r_*$ is the size of the smallest class which is greater or equal than two (respectively greater or equal than three.) We also provide a dynamical interpretation considering sequences of fuzzy Potts models which are obtained by increasingly collapsing classes at finitely many times $t$ and discuss the possibility of a multiple in- and out of Gibbsianness, depending on the collapsing scheme. \varepsilonnd{abstract} \sigmaetminusallskip \noindent {\betaf AMS 2000 subject classification:} 82B20, 82B26. \sigmaetminusallskip \noindent {\betaf Keywords:} Potts model, Fuzzy Potts model, Ellis-Wang Theorem, Gibbsian measures, non-Gibbsian measures, mean-field measures. \sigmaection{Introduction} Past years have seen a number of examples of measures which arise from local transforms of Gibbs measures which turned out to be non-Gibbs, for a general background see \cite{EFS, DEZ, KUL6}. Two particularly interesting types of transformations which were considered recently are time-evolutions \cite{ACD1, AFHR10} and local coarse-grainings \cite{B10, KULOP08, JaKu12}, both without geometry (mean-field) and with geometry. Very recently in \cite{FHM13} there is even been considered a system of Ising spins on a large discrete torus with a Kac-type interaction subject to an independent spin-flip dynamics, using large deviation techniques (usually applied in the mean-field setting) for the empirical density allowing for a spatial structure with geometry. In the present paper we pick up a line of a mean-field analysis which was begun in \cite{HK04}. The extension to exponents $z\gammaeq 2$ is natural since it amounts to considering energy given by the number of $z$-cliques of equal color in the case of integer $z$, see \rhoef{Random cluster representation and $z$-clique variables}. In \cite{HK04} the mean-field Potts model was considered under a local coarse-graining. Here the local spin-space $\{1, \deltaots, q\}$ is decomposed into $1<s<q$ classes of sizes $r_1,\deltaots, r_s$. This map, performed at each site simultaneously, defines a coarse-graining map $T:\{1,\deltaots, q\}^N \rhoightarrow \{1,\deltaots, s\}^N$. The measures arising as images of the Potts mean-field measures for $N$ spins under $T$ constitute the so-called fuzzy-Potts model. It was shown that non-Gibbsian behavior occurs if the temperature of the Potts model is small enough and precise transition-values between Gibbsian and non-Gibbs images were given. We remark that the notion of a Gibbsian mean-field model is employed which considers as a defining property the existence and continuity of single-site probabilities. This notion is standard by now (see for example \cite{KUL1,KUL2, FHM13-,JaKu13}) and provides the natural counterpart of Gibbsianness for lattice systems for mean-field measures. Aim one of the paper is to generalize the mean-field Potts Hamiltonian, and analyse phase-transitions for the generalized mean-field Potts measures. Is there an analogue of the Ellis-Wang theorem \cite{ElWa89} and persistance of the first order phase-transition? We show that this is indeed the case for $q>2$. For $q=2$ there is a threshold for the exponent such that for $2\lambdaeq z\lambdaeq4$ there is a phase-transition of second order, for $z>4$ the phase-transition is of first order. Aim two of the paper is to look at the Gibbsian properties of the resulting fuzzy model, obtained by application of the same map $T$ to the generalized mean-field Potts measure. Do we obtain the same characterization as for the standard mean-field Potts model? The answer is yes, but with changes, which are inherited by the changed behavior of the Curie-Weiss model when the interaction exponent changes. The third aim is to reinterpret our results and introduce a dynamical point of view. In this view we consider decreasing finite sequences of decompositions ${\cal A}_t$, of the local state-space $\{1,\deltaots,q\}$, labelled by a discrete time $t=0,1,\deltaots,T$. We call these sequences collapsing schemes. As we move along $t$ we are interested in whole trajectories of fuzzy measures and what can be said about Gibbsianness here. Analogous questions have been studied for time-evolved Gibbs measures arising from stochastic spin dynamics and usually there is no multiple in- and out Gibbsianness in these models. As we will see this may very well be the case here, depending on the collapsing scheme. Technically the paper rests on a detailed bifurcation analysis of the free energy, the first step being a reduction to a one-dimensional problem using an extension of the proof of \cite{ElWa89}. We find here the somewhat surprising fact that there is a triple point for $q=2,z=4$, with a transition from second-order to first-order phase-transition. \sigmaection{The generalized Potts model}\lambdaabel{The generalized Potts model} For a positive integer $q$ and a real number $z\gammaeq2$, the Gibbs measure $\pi^N_{\beta,q,z}$ for the $q$-state generalized Potts model on the complete graph with $N\in{\Bbb N}$ vertices at inverse temperature $\beta\gammaeq0$, is the probability measure on $\{1,\deltaots, q\}^N$ which to each $\xii\in\{1,\deltaots,q\}^N$ assigns probability \betaegin{equation}\lambdaabel{Gen_Potts_Finite} \betaegin{split} \pi^N_{\beta,q,z}(\xii)=\varphirac{1}{Z^N_{\beta,q,z}}\varepsilonxp(-NF_{\beta,q,z}(L_N^\xii)) \varepsilonnd{split} \varepsilonnd{equation} where $L_N^\xii=\varphirac{1}{N}\sigmaum_{i=1}^N1_{\xii_i}$ is the empirical distribution of the configuration $\xii=(\xii_i)_{i\in N}$, $F_{\beta,q,z}:{\Phi}P(\{1,\deltaots,q\}^N)\tauo{\Bbb R}$, $F_{\beta,q,z}(\nu):=-\beta\sigmaum_{i=1}^q\nu_i^z/z$ is the mean-field Hamiltonian of the generalized Potts model and $Z^N_{\beta,q,z}$ is the normalizing constant. Notice, the case $z=2$ is the standard Potts model, in particular the case $z=2$, $q=2$ refers to the Curie-Weiss-Ising model. We call the case $q=2$, $z\gammaeq2$ the generalized Curie-Weiss-Ising model. The Ellis-Wang Theorem \cite{ElWa89} describes the limiting behaviour of the standard Potts model as the system size grows to infinity. Here we give a generalized version for interactions with $z\gammaeq2$. \betaegin{thm}(Generalized Ellis-Wang Theorem)\lambdaabel{Generalized_Ellis_Wang} Assume that $q\gammaeq2$ and $z\gammaeq2$ then there exists a critical temperature $\beta_c(q,z)>0$ such that in the weak limit \betaegin{equation} \lambdaim_{N\tauo\infty}\pi^N_{\beta,q,z}(L_N\in\cdot) =\betaegin{cases} \delta_{1/q( 1,\deltaots,1)} & \tauext{ if } \beta<\beta_c(q,z)\cr \varphirac{1}{q}\sigmaum^q_{i=1}\delta_{u(\beta,q,z)e_i+(1-u(\beta,q,z))/q(1,\deltaots, 1)}& \tauext{ if } \beta>\beta_c(q,z) \varepsilonnd{cases} \varepsilonnd{equation} where $e_i$ is the unit vector in the $i$-th coordinate of \tauextit{ }${\Bbb R}^q$ and $u(\beta,q,z)$ is the largest solution of the so-called mean-field equation \betaegin{equation}\lambdaabel{MFeqPhi} u=\varphirac{1-\varepsilonxp\betaigl(\Delta_{\beta,q,z}(u)\betaigr)}{1+\lambdaeft(q-1\rhoight)\varepsilonxp\betaigl(\Delta_{\beta,q,z}(u)\betaigr)} \varepsilonnd{equation} with $\Delta_{\beta,q,z}(u):=-\varphirac{\betaeta}{q^{z-1}}\lambdaeft[\betaigl(1+(q-1)u\betaigr)^{z-1}-\betaigl(1-u\betaigr)^{z-1}\rhoight]$. Further, for $(q,z)\in\{2\}\tauimes[2,4]$ the function $\beta\mapsto u(\beta,q,z)$ is continuous. In the complementary case the function $\beta\mapsto u(\beta,q,z)$ is discontinuous at $\beta_c(q,z)$. \varepsilonnd{thm} For $q>2$ the above result is in complete analogy to the standard Potts model. For the generalized Curie-Weiss-Ising model ($q=2$) there is an important difference. It is a known fact that the standard Curie-Weiss-Ising model ($z=2$) has a second order phase-transition. This is still true as long as $2\lambdaeq z\lambdaeq4$. But in case of the generalized Curie-Weiss-Ising model with $z>4$ the phase-transition is of first order. \betaigskip In the analysis of the fuzzy Potts model the following result is useful. \betaegin{prop}\lambdaabel{q_Monotonicity_Of_Beta} For the generalized Potts model the function $q\mapsto\beta_c(q,z)$ is increasing. \varepsilonnd{prop} \sigmaection{The generalized fuzzy Potts model}\lambdaabel{The generalized fuzzy Potts model} Consider the $q$-state generalized Potts model and let $s<q$ and $r_1,..., r_s$ be positive integers such that $\sigmaum^s_{i=1}r_i=q$. For fixed $\beta>0$, $z\gammaeq2$ and $N\in{\Bbb N}$ let $X$ be the $\{1,\deltaots, q\}^N$-valued random vector distributed according to the Gibbs measure $\pi^N_{\beta,q,z}$. Then define $Y$ as the $\{1,\deltaots, s\}^N$-valued random vector by \betaegin{align*} Y_i= \betaegin{cases} 1\quad \tauext{if}\quad X_i\in\{1,..., r_1\},\\ 2\quad \tauext{if}\quad X_i\in\{r_1+1,..., r_1+r_2\},\\ \vdots \quad\quad\quad \vdots\\ s\quad \tauext{if}\quad X_i\in\{q-r_s+1,..., q\} \varepsilonnd{cases} \varepsilonnd{align*} for each $i\in\{1,\deltaots,N\}$. In other words using the coarse-graining map $T:\{1,\deltaots,q\}^N\mapsto\{1,\deltaots,s\}^N$ with $T(k)=l$ iff $\sigmaum_{j=1}^{l_i-1}r_{j}<k_i\lambdaeq\sigmaum_{j=1}^{l_i}r_{j}$ for all $i\in\{1,\deltaots,N\}$ we have $Y=T\circ X$. Let us denote $\mu^N_{\beta,q,z,(r_1,...,r_s)}$ the distribution of $Y$ and call it the finite-volume fuzzy Potts measure. The vector $(r_1,..., r_s)$ we call the spin partition of the fuzzy Potts model. \betaigskip In \cite{HK04} the notion of Gibbsianness for mean-field models is introduced. It is based on the continuity of the so-called mean-field specification as a function of the boundary condition. In analogy to the lattice situation a mean-field specification is a probability kernel that for every boundary measure is a measure on the single site space. If it is discontinuous w.r.t the boundary measure, it cannot constitute a Gibbs measure. The mean-field specification is obtained as the infinite-volume limit of the one site conditional probabilities in finite volume. To be more specific we present the statement from \cite{HK04} applied to our situation without proof. \betaegin{lem} For $\mu^N_{\beta,q,z,(r_1,...,r_s)}$ the generalized fuzzy Potts model on $\{1,\deltaots,s\}$ there exists a probability kernel $Q^N_{\beta,q,z,(r_1,...,r_s)}: \{1,\deltaots,s\}\tauimes {\Phi}P(\{1,\deltaots,s\})\tauo[0,1]$ such that the single-site conditional expectations at any site $i$ can be written in the form \betaegin{equation*} \mu^N_{\beta,q,z,(r_1,...,r_s)}(Y_i=k|Y_{\{1,\deltaots,N\}\sigmaetminus i}=\varepsilonta)=Q^N_{\beta,q,z,(r_1,...,r_s)}(x|\sigmaetminusr\varepsilonta) \varepsilonnd{equation*} where $\sigmaetminusr\varepsilonta\in{\Phi}P(\{1,\deltaots,s\})$ with $\sigmaetminusr\varepsilonta_l=\#(1\lambdaeq j\lambdaeq N,j\neq i,\varepsilonta_j=l)/(N-1)$ the fraction of sites for which the spin-values of the conditioning are in the state $l\in\{1,\deltaots,s\}$. Further $\mu^N_{\beta,q,z,(r_1,...,r_s)}$ is uniquely determined by $Q^N_{\beta,q,z,(r_1,...,r_s)}$. \varepsilonnd{lem} \betaegin{defn} Assume for all $k\in\{1,\deltaots,s\}$ and $\nu_N\tauo\nu$, the infinite-volume limit $Q^N_{\beta,q,z,(r_1,...,r_s)}(k|\nu_N)\tauo Q^\infty_{\beta,q,z,(r_1,...,r_s)}(k|\nu)$ exists. We call the generalized fuzzy Potts model Gibbs if $\nu\mapsto Q^\infty_{\beta,q,z,(r_1,...,r_s)}(\cdot|\nu)$ is continuous. Otherwise we call it non-Gibbs. \varepsilonnd{defn} Theorem 1.2 in \cite{HK04} therefore describes properties of the limiting conditional probabilities in case of the fuzzy Potts model. Here we give a version of this theorem for the generalized fuzzy Potts model with exponent $z>2$. \betaegin{thm}\lambdaabel{Generalized_HaKu} Consider the $q$-state generalized fuzzy Potts model at inverse temperature $\beta>0$ with exponent $z>2$ and spin partition $(r_1,..., r_s)$, where $1<s<q$ and $\sigmaum_{i=1}^s{r_i=q}$. Denote by $\betaeta_c(r_k,z)$ the inverse critical temperature of the $r_k$-state generalized Potts model with the same exponent $z>2$. Then \betaegin{itemize} \item[(i)] Suppose $2<z\lambdaeq4$ and $r_i\lambdaeq2$ for all $i\in\{1,\deltaots,s\}$, then the limiting conditional probabilities exist and are continuous functions of empirical distribution of the conditioning for all $\beta\gammaeq0$. \varepsilonnd{itemize} Assume $z>4$ or that $r_i\gammaeq3$ for some $i\in\{1,\deltaots,s\}$. Put $r_{*}:=\min\{r\gammaeq 3,r=r_i \tauext{\ for some\ } i\in\{1,\deltaots, s\}\}$ and $r_{\#}:=\min\{r\gammaeq 2,r=r_i \tauext{\ for some\ } i\in\{1,\deltaots, s\}\}$, then the following holds: \betaegin{itemize} \item[(ii)] If $z>4$ then \betaegin{enumerate} \item the limiting conditional probabilities exist and are continuous for all $\beta<\beta_c(r_{\#},z)$, \item the limiting conditional probabilities are discontinuous for all $\beta\gammaeq\beta_c(r_{\#},z)$, in particular they do not exist in points of discontinuity. \varepsilonnd{enumerate} \item[(iii)] If $2<z\lambdaeq 4$ then \betaegin{enumerate} \item the limiting conditional probabilities exist and are continuous for all $\beta<\beta_c(r_{*},z)$, \item the limiting conditional probabilities are discontinuous for all $\beta\gammaeq\beta_c(r_{*},z)$, in particular they do not exist in points of discontinuity. \varepsilonnd{enumerate} \varepsilonnd{itemize} \varepsilonnd{thm} \sigmaection{Dynamical Gibbs-non Gibbs transitions along collapsing schemes} Consider the set of Potts spin values $\{1,\deltaots,q\}$ and denote by ${\cal A}=\{I_1,\deltaots,I_r\}$ a spin partition. Write $\mu_{\beta,q,z,{\cal A}}^N$ for the finite-volume fuzzy Potts Gibbs measure on $\{1,\deltaots,r \}^N$. With a partition ${\cal A}$ comes the $\sigma$-algebra $\sigma({\cal A})$ which is generated by it. It consists of the unions of sets in ${\cal A}$. Conversely a $\sigma$-algebra determines a partition. The set of $\sigma$-algebras over $\{1,\deltaots,q\}$ is partially ordered by inclusion. Now let $({\cal A}_t)_{t=0,1,\deltaots, T}$ be a strictly decreasing sequence of partitions (a \tauextit{collapsing scheme}) with ${\cal A}_0=(\{1\},\deltaots, \{q\})$ being the finest one (consisting of $q$ classes), and ${\cal A}_T=(\{1,\deltaots,q\} )$ being the coarsest one. $t$ can be considered as a time index. Moving along $t$ more and more classes are collapsed. Note that the finite sequence of $\sigma$-algebras generated by these partitions, $\sigma({\cal A}_T)\sigmaubset \sigma({\cal A}_{T-1}) \sigmaubset \deltaots \sigmaubset \sigma({\cal A}_0)$ is a filtration. Such a filtration can be depicted as a rooted tree with $q$ leaves which has $T$ levels. A level $i$ corresponds to a $\sigma$-algebra ${\cal F}_i$, the vertices at level $i$ are the sets in the partition corresponding to ${\cal F}_i$. A set in the partition at level $i$ is a parent of a set in the partition at level $i-1$ iff it contains the latter. We look at the corresponding sequence of increasingly coarse-grained models $(\mu_{\beta,q,z,{\cal A}}^N)_{t=0,\deltaots, T}$. What can be said about in and out of Gibbsiannes along such a path? For a partition ${\cal A}$ and given exponent $z\gammaeq2$ denote by $r_*({\cal A},z)$ the size of the smallest class in the non-Gibbsian region $(r,z)\in([2,\infty)\tauimes[2,\infty))\sigmaetminus(\{2\}\tauimes[2,4])$. The following corollary is a direct consequence of our main Theorem \rhoef{Generalized_HaKu} and Theorem 1.2 in \cite{HK04}. \betaegin{cor}\lambdaabel{Non_Gibbs_Cor}The model is non-Gibbs at time $t\in \{1,2,\deltaots, T-1\}$ if and only if $\beta\gammaeq\beta_c(r_*({\cal A}_t,z),z)$. \varepsilonnd{cor} Even though by Proposition \rhoef{q_Monotonicity_Of_Beta} $r\mapsto\beta_c(r,z)$ is increasing, it is quite possible to have collapsing schemes where $t\mapsto\beta_c(r_*({\cal A}_t,z),z)$ is not monotone for $t\in \{1,\deltaots, T\}$. This is because $t\mapsto r_*({\cal A}_t,z)$ does not have to have monotonicity, as it happens e.g in the following example: \betaegin{equation*}\lambdaabel{Rotation_Finite_Volume} \betaegin{split} &{\cal A}_0=(\{1\},\{2\},\{3\},\{4\},\{5\})\cr &{\cal A}_1=(\{1,2\},\{3\},\{4\},\{5\})\cr &{\cal A}_2=(\{1,2,3\},\{4\},\{5\})\cr &{\cal A}_3=(\{1,2,3\},\{4,5\})\cr &{\cal A}_4=(\{1,2,3,4,5\})\cr \varepsilonnd{split} \varepsilonnd{equation*} with $(r_*({\cal A}_t,5))_{t=1,\deltaots, T-1}=(2,3,2)$. If $q$ is a power of two, and the collapsing scheme is chosen according to a binary tree, there is of course monotonicity, as e.g. in the following example \betaegin{equation*}\lambdaabel{Rotation_Finite_Volume} \betaegin{split} &{\cal A}_0=(\{1\},\{2\},\{3\},\{4\},\{5\},\{6\},\{7\},\{8\})\cr &{\cal A}_1=(\{1,2\},\{3,4\},\{5,6\},\{7,8\})\cr &{\cal A}_2=(\{1,2,3,4\},\{5,6,7,8\})\cr &{\cal A}_3=(\{1,2,3,4,5,6,7,8\})\cr \varepsilonnd{split} \varepsilonnd{equation*} with $(r_*({\cal A}_t,5))_{t=1,\deltaots, T-1}=(2,4)$. \betaegin{defn} Let us agree to call a collapsing scheme regular if and only if $(r_*({\cal A}_t,z))_{t=1,\deltaots, T-1}$ is increasing, $T\gammaeq 2$ (meaning there is no immediate collapse.) \varepsilonnd{defn} The following theorem is an immediate consequence of Corollary \rhoef{Non_Gibbs_Cor} and Proposition \rhoef{q_Monotonicity_Of_Beta}. \betaegin{thm} Consider the generalized $q$-state Potts model with interaction exponent bigger than $2$. For a regular collapsing scheme the following is true: \betaegin{itemize} \item[(i)] The model stays Gibbs forever iff $\beta <\beta_c(r_*({\cal A}_1,z),z)$. \item[(ii)] It is non-Gibbs for all $t\in \{1,\deltaots, T-1\}$ iff $\beta \gammaeq \beta_c(r_*({\cal A}_{T-1},z),z)$. \item[(iii)] For $\beta\in (\beta_c(r_*({\cal A}_{1},z),z),\beta_c(r_*({\cal A}_{T-1},z),z)]$ there is a transition time $t_G\in \{2,\deltaots, T-1\}$ such that the model is non-Gibbs for $t \in \{1,\deltaots, t_G -1\}$ and Gibbs for $t \in \{t_G, \deltaots, T\}$. \varepsilonnd{itemize} \varepsilonnd{thm} Note that the second temperature-regime of non-Gibbsianness contains temperatures which are strictly bigger than the critical temperature of the initial $q$-state Potts model. In the last regime there is an immediate out of Gibbsiannes, then the model stays non-Gibbs for a while and becomes Gibbsian again at the transition time $t_G$. Also note that for general collapsing schemes there can be temperature regions for which multiple in and out of Gibbsianness will occur. \betaegin{figure}[h] \betaegin{center} \includegraphics[width=14cm]{Pix_Collapsing_Scheme_1.eps} \varepsilonnd{center} \caption{\sigmacriptsize{Qualitative picture of the collapsing scheme $(r_*({\cal A}_t,5))_{t=1,\deltaots, 7}=(2,3,4,2,5,2,3,5)$. The gray area below the graph shows the non-Gibbsian temperature regime. Clearly the generalized Potts model with fixed temperature $1/\beta$ and the same exponent under fuzzyfication given by the collapsing scheme ${\cal A}_t$ can experience in and out of Gibbsianness multiple times.}} \lambdaabel{Collapsing scheme} \varepsilonnd{figure} \sigmaection{Proofs of statements presented in Section \rhoef{The generalized Potts model}} \sigmaubsection{Proof of Theorem \rhoef{Generalized_Ellis_Wang}} The empirical distribution $L_N$ obeys a large deviation principle with the relative entropy $I(\cdot|\alpha)$ as a rate function, where $\alpha$ is the equidistribution on $\{1,\deltaots,q\}$. Together with Varadhan's lemma the question of finding the limiting distribution of $L_N$ under $\pi_{\beta,q,z}^N$ is equivalent to finding the global minimizers of the so-called free energy $\Gamma_{\beta,q,z}: {\Phi}P(\{1,\deltaots,q\})\mapsto{\Bbb R}$, \betaegin{equation} \betaegin{split} \Gamma_{\beta,q,z}(\nu)&=F_{\beta,q,z}(\nu)+I(\nu|\alpha)=\varphirac{\beta}{z}\sigmaum_{i=1}^q\nu_i^z+\sigmaum_{i=1}^q\nu_i\lambdaog(q\nu_i). \varepsilonnd{split} \varepsilonnd{equation} For details on large deviation theory check \cite{DeZe10}. The proof of \rhoef{Generalized_Ellis_Wang} thus rests completely on the following theorem. \betaegin{thm}\lambdaabel{Minimizer_Theorem} \betaegin{enumerate} \item[(i)] Any global minimizer of $\Gamma_{\beta,q,z}$ with $z\gammaeq2$ and $q\gammaeq2$ must have the form \betaegin{equation} \sigmaetminusr{\nu}= \betaegin{pmatrix} \varphirac{1}{q}\betaigl(1+\lambdaeft(q-1\rhoight)u\betaigr)\\ \varphirac{1}{q}\lambdaeft(1-u\rhoight)\\ \vdots\\ \varphirac{1}{q}\lambdaeft(1-u\rhoight) \varepsilonnd{pmatrix} \quad \tauextup{\tauextit{with}} \quad u \in \lambdaeft[0,1\rhoight) \varepsilonnd{equation} or a point obtained from such a $\sigmaetminusr{\nu}$ by permutating the coordinates. \item[(ii)] There exists a critical temperature $\betaeta_c(q,z)>0$ such that for $\betaeta<\betaeta_c(q,z)$, $\sigmaetminusr{\nu}$ is given in the above form with $u=0$, in other words $\sigmaetminusr{\nu}^T=(\varphirac{1}{q},\deltaots,\varphirac{1}{q})$. If $\betaeta>\betaeta_c(q,z)$, then $\sigmaetminusr{\nu}$ is given in the above form with $u=u(\beta,q,z)$, where $u(\beta,q,z)$ is the largest solution of the mean-field equation \betaegin{equation}\lambdaabel{MFeqPhi} u=\varphirac{1-\varepsilonxp\betaigl(\Delta_{\beta,q,z}(u)\betaigr)}{1+\lambdaeft(q-1\rhoight)\varepsilonxp\betaigl(\Delta_{\beta,q,z}(u)\betaigr)} \varepsilonnd{equation} with $\Delta_{\beta,q,z}(u):=-\varphirac{\betaeta}{q^{z-1}}\lambdaeft[\betaigl(1+(q-1)u\betaigr)^{z-1}-\betaigl(1-u\betaigr)^{z-1}\rhoight]$. \item[(iii)] The function $\beta\mapsto u(\beta,q,z)$ is discontinuous at $\beta_c(q,z)$ for all $z\gammaeq2$ and $q\gammaeq2$ except for the case $(q,z)\in\{2\}\tauimes[2,4]$. \varepsilonnd{enumerate} \varepsilonnd{thm} \betaigskip For the proof of part (i) of \rhoef{Minimizer_Theorem} we use the following remark and lemma. \betaegin{rem}\lambdaabel{Permutation_Invariance} Due to the permutation invariance of the model it suffices to consider minimizers of $\Gamma_{\beta,q,n}$ with $\sigmaetminusr{\nu}_{k}\gammaeq\sigmaetminusr{\nu}_{k+1}$ for all $k\in\{1,\deltaots, q-1\}$. \varepsilonnd{rem} \betaegin{lem}\lambdaabel{Only_Two_Poss} Let $\sigmaetminusr{\nu}\in{\Phi}P(\{1,\deltaots,q\})$ be a minimizer of $\Gamma_{\beta,q,z}$ with $z\gammaeq2$, $q\gammaeq2$ and define the auxiliary function $$g(x):=\beta x^{z-1}-\lambdaog(qx)$$ with $x\in(0,1]$. Let $\tauilde{u}$ be the minimizer of $g$, given by \betaegin{equation}\lambdaabel{MinG} \tauilde{u}:=\varphirac{1}{\sigmaqrt[z-1]{\beta(z-1)}}, \varepsilonnd{equation} then the coordinates of $\sigmaetminusr{\nu}$ satisfy the following conditions: \betaegin{enumerate} \item[(i)] If $\sigmaetminusr{\nu}_1\lambdaeq\tauilde{u}$, then $\sigmaetminusr{\nu}_k=\sigmaetminusr{\nu}_1$ for all $k \in\{2,\deltaots,q\}$ and any minimizer of $\Gamma_{\beta,q,z}$ has the form \betaegin{equation*} \sigmaetminusr{\nu}=(\varphirac{1}{q},\deltaots,\varphirac{1}{q})^T. \varepsilonnd{equation*} \item[(ii)] If $\sigmaetminusr{\nu}_1 > \tauilde{u}$, then $\sigmaetminusr{\nu}_k \in\{\sigmaetminusr{\nu_0},\sigmaetminusr{\nu}_1\}$ for all $k \in\{2,\deltaots,q\}$ with $\sigmaetminusr{\nu}_1>\sigmaetminusr{\nu}_0$ and $g(\sigmaetminusr{\nu}_0)=g(\sigmaetminusr{\nu_1})$. In this case any minimizer of $\Gamma_{\beta,q,z}$ has the form \betaegin{equation*} \sigmaetminusr{\nu}=(\underbrace{\sigmaetminusr{\nu}_1,\deltaots,\sigmaetminusr{\nu}_1}_{{}l\tauext{ times}},\sigmaetminusr{\nu}_0,\deltaots,\sigmaetminusr{\nu}_0)^T \tauext{ with }\sigmaetminusr{\nu}_1=\varphirac{1-(q-l)\sigmaetminusr{\nu}_0}{l}, \varepsilonnd{equation*} where $1\lambdaeq l\lambdaeq q$. \varepsilonnd{enumerate} \varepsilonnd{lem} \tauextbf{Proof: }Since $\sigmaetminusr{\nu}$ is a minimizer $\nabla \Gamma_{\beta,q,z}(\sigmaetminusr{\nu})=(c,\deltaots,c)^T$. In other words $-\beta \sigmaetminusr{\nu}_k^{z-1}+\lambdaog(q\sigmaetminusr{\nu}_k)+1=c$ for all $k\in\{1,\deltaots,q\}$ and hence \betaegin{equation*} g(\sigmaetminusr{\nu}_{1})=\beta\sigmaetminusr{\nu}_{1}^{z-1}-\lambdaog(q\sigmaetminusr{\nu}_1)=\beta \sigmaetminusr{\nu}_{k}^{z-1}-\lambdaog(q\sigmaetminusr{\nu}_k)=g(\sigmaetminusr{\nu}_{k}) \varepsilonnd{equation*} for all $k\in\{1,\deltaots,q\}$. The function $g$ has the following properties: $\lambdaim_{x\tauo0}g(x)=+\infty$; $g(1)=\betaeta-\lambdaog(q)$; $g'(x)=\betaeta(z-1)x^{z-2)}-1/x$ and thus $g$ attains its unique extremal point in $\tauilde x$; $g''(x)=\beta(z-1)(z-2)x^{z-3}+\varphirac{1}{x^2}>0$ and hence $g$ is strictly convex with global minimum attained in $\tauilde x$. As a consequence $g$ is injective on $(0,\tauilde u]$ and hence if $\sigmaetminusr{\nu}_1\lambdaeq\tauilde u$ by Remark \rhoef{Permutation_Invariance} $\sigmaetminusr{\nu}_k\lambdaeq\sigmaetminusr{\nu}_1$ and thus from $g(\sigmaetminusr{\nu}_1)=g(\sigmaetminusr{\nu}_k)$ for all $k$ it follows $\sigmaetminusr{\nu}_k=\sigmaetminusr{\nu}_1$ for all $k$. So $\sigmaetminusr{\nu}$ must be the equidistribution. If $\sigmaetminusr{\nu}_1>\tauilde u$, since $g$ is strictly convex, $\lambdaim_{x\tauo0}g(x)=+\infty$ and $g(\sigmaetminusr{\nu}_1)=g(\sigmaetminusr{\nu}_k)$ we have $\sigmaetminusr{\nu}_k\in\{\sigmaetminusr{\nu}_0,\sigmaetminusr{\nu}_1\}$ for all $k$ where $\sigmaetminusr{\nu}_0<\sigmaetminusr{\nu}_1$ such that $g(\sigmaetminusr{\nu}_0)=g(\sigmaetminusr{\nu}_1)$. Consequently again by Remark \rhoef{Permutation_Invariance} $\sigmaetminusr\nu$ must have the following form \betaegin{equation}\lambdaabel{Pre_Minimizers} \sigmaetminusr{\nu}=(\underbrace{\sigmaetminusr{\nu}_1,\deltaots,\sigmaetminusr{\nu}_1}_{{}l\tauext{ times}},\sigmaetminusr{\nu}_0,\deltaots,\sigmaetminusr{\nu}_0)^T \tauext{ with }2\lambdaeq l \lambdaeq q. \varepsilonnd{equation} Since $\sigmaetminusr{\nu}$ is a probability measure $l\sigmaetminusr{\nu}_1+(q-l)\sigmaetminusr{\nu}_0=1$ and hence $\sigmaetminusr{\nu}_1=(1-(q-l)\sigmaetminusr{\nu}_0)/l$. $ \Box$ \betaigskip \tauextbf{Proof of Theorem \rhoef{Minimizer_Theorem} part (i): } First note, for $\sigmaetminusr{\nu}\in{\Phi}P(\{1,\deltaots,q\})$ a minimizer of $\Gamma_{\beta,q,z}$, $k\in\{2,\deltaots,q\}$ and $$D_{\sigmaetminusr{\nu}}^{k}:=\{\nu \in {\Phi}P(\{1,\deltaots,q\}):\nu_x=\sigmaetminusr{\nu}_x\tauext{ for all }x\in\{2,\deltaots,q\} \sigmaetminus\{k\}\}$$ of course $\min_{\nu\in D_{\sigmaetminusr{\nu}}^{k}}\Gamma_{\beta,q,z}(\nu)=\Gamma_{\beta,q,z}(\sigmaetminusr{\nu})$. Using this and the above Lemma \rhoef{Only_Two_Poss} for fixed $k$ we can set $a\in[0,1]$ such that $\sigmaum_{i\neq 1,k}\sigmaetminusr{\nu}_i=1-a$ where $\sigmaetminusr{\nu}$ is a minimizer. Hence $\nu_1+\nu_k=a$ and for $\nu\in D^{k}_{\sigmaetminusr{\nu}}$, $\Gamma_{\beta,q,z}(\nu)$ has to be minimized as a function of the variable $\nu_1$ alone. We calculate \betaegin{align*} \varphirac{\partial \Gamma_{\beta,q,z}}{\partial \nu_1} &=-\beta(\nu_1^{z-1}-(a-\nu_1)^{z-1})+\lambdaog\varphirac{\nu_1}{a-\nu_1} \varepsilonnd{align*} and thus have to analyse the inequality \betaegin{equation*} h_l(x):=\beta(x^{z-1}-(a-x)^{z-1})\lambdaeq\lambdaog\varphirac{x}{a-x}=:h_r(x). \varepsilonnd{equation*} Notice $h_l$ and $h_r$ are both point symmetric at $x=a/2$ and $h_l(a/2)=0=h_r(a/2)$. In particular $a/2$ is a candidate for the Minimum of $\nu_1\mapsto\Gamma_{\beta,q,z}(\nu_1,\sigmaetminusr\nu_2,\deltaots,\sigmaetminusr\nu_{k-1},a-\nu_1,\sigmaetminusr\nu_{k+1},\deltaots,\sigmaetminusr\nu_q)$ and if it is $\nu_1=\nu_k$. By point symmetry is suffices to look at $h_l$ and $h_r$ on the set $[a/2,a]$. Requiring $h'_l(a/2)=h'_r(a/2)$ is equivalent to \betaegin{equation*} \varphirac{a}{2}=\varphirac{1}{\sigmaqrt[z-1]{\beta(z-1)}}=\tauilde u. \varepsilonnd{equation*} Let us collect some further properties of $h_l$ and $h_r$: Both functions are convex on $[a/2,a)$; $\lambdaim_{x\tauo a}h_r(x)=\infty$ and $h_l(a)=\beta a^{z-1}<\infty$; $h''_l(a/2)=0=h''_r(a/2)$. Also \betaegin{equation*} \betaegin{split} h'''_l(a/2)=2\beta(z-1)(z-2)(z-3)(a/2)^{z-4}\tauext{ and } h'''_r(a/2)=4(a/2)^{-3} \varepsilonnd{split} \varepsilonnd{equation*} so if $a/2=\tauilde u$ some minor calculations show $h'''_l(a/2)=h'''_l(a/2)$ iff $z=4$. In particular for $z<4$, $h'''_l(a/2)<h'''_l(a/2)$ and for $z=4$ higher orders show the graph of $h_l$ close to $a/2$ is lower than the one of $h_r$. That is why we have to distinguish two cases with several subcases each. \betaigskip First let $2\lambdaeq z\lambdaeq4$. We show, there is either one or no additional point $x\in(a/2,a]$ such that $h'_l(x)=h'_r(x)$. Let us write the temperature as a function of solutions of $h'_l(x)=h'_r(x)$, \betaegin{equation}\lambdaabel{Beta_For_h'} \beta_{z,a}(x)=\varphirac{a}{(z-1)((a-x)x^{z-1}+x(a-x)^{z-1})}. \varepsilonnd{equation} This function is strictly increasing, indeed $\beta'_{z,a}>0$ is equivalent to \betaegin{equation}\lambdaabel{Beta_Ableitung} a(z-1)-zx-(xz-a)(\varphirac{a-x}{x})^{z-2}<0. \varepsilonnd{equation} Setting $y=(a-x)/x$ we can write this equivalently as \betaegin{equation}\lambdaabel{Beta_Ableitung_Replaced} \betaegin{split} a(z-1)-z\varphirac{a}{y+1}-(\varphirac{a}{y+1}z-a)y^{z-2}&<0\cr (z-1)y-1-((z-1)-y)y^{z-2}&<0\cr \varepsilonnd{split} \varepsilonnd{equation} where $x\mapsto y$, $(a/2,a]\mapsto [0,1)$ is bijective. Notice $z-1>y$ and $y^{z-2}\gammaeq y^2$, hence \betaegin{equation*} \betaegin{split} (z-1)y&-1-((z-1)-y)y^{z-2}<(z-1)y-1-((z-1)-y)y^{2}\cr &=y^3-(z-1)(y^2-y)-1<y^3-3(y^2-y)-1=(y-1)^3<0.\cr \varepsilonnd{split} \varepsilonnd{equation*} But this is true and thus $\beta'_{z,a}>0$ and for every $\beta\lambdaeq\beta_{z,a}(a/2)=\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$ there is no $x\in(a/2,a]$ with $h'_l(x)=h'_r(x)$, for every $\beta>\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$ there is exactly one $x\in(a/2,a]$ with $h'_l(x)=h'_r(x)$. \betaigskip Subcase one, let $a/2\lambdaeq\tauilde u$. This is equivalent to $\beta\lambdaeq\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$ and hence $h'_r>h'_l$ on $[a/2,a)$, in particluar there can not be a $x\in[a/2,a)$ such that $h_l(x)=h_r(x)$ and $h_r>h_l$ on $[a/2,a)$. Due to point symmetry $a/2$ is the unique global minimum of the free energy as a function of the first variable $\nu_1$ on $D^k_{\sigmaetminusr\nu}$. In particular $\nu_1\lambdaeq\tauilde u$ and thus by Lemma \rhoef{Only_Two_Poss} part (i), the free energy minimizer is the equidistribution. \betaigskip Subcase two, let $a/2>\tauilde u$. This is equivalent to $\beta>\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$ and hence there is exactly one $x_1\in(a/2,a)$ such that $h'_l(x_1)=h_r'(x_1)$. Since $\lambdaim_{x\tauo a}h_l(x)<\lambdaim_{x\tauo a}h_r(x)$ there must be at least $x_+\in(a/2,a)$ such that $h_l(x_+)=h_r(x_+)$. If there would be two different such points, for instance $x_+<x'_+$ then by the generalized mean value theorem there exists $\xii_+<\xii'_+$ such that \betaegin{equation}\lambdaabel{Mean_Value_Argument} \betaegin{split} 1=\varphirac{h_r(x'_+)-h_r(x_+)}{h_l(x'_+)-h_l(x_+)}=\varphirac{h'_r(\xii'_+)}{h'_l(\xii'_+)}\hspace{0.5cm}\tauext{ and }\hspace{0.5cm}1=\varphirac{h_r(x_+)-h_r(a/2)}{h_l(x_+)-h_l(a/2)}=\varphirac{h'_r(\xii_+)}{h'_l(\xii_+)} \varepsilonnd{split} \varepsilonnd{equation} in other words $h'_r(\xii'_+)=h'_l(\xii'_+)$ and $h'_r(\xii_+)=h'_l(\xii_+)$, a contradiction. Due to point symmetry $a/2$ then is a local maximum and $x_+$ as well as $x_-:=a-x_+$ are global minima of the free energy as a function of the first variable $\nu_1$ on $D^k_{\sigmaetminusr\nu}$. By Remark \rhoef{Permutation_Invariance}, $\nu_1\gammaeq\nu_k$ and since $x_+>x_-$ we have $\nu_1=x_+$ and $\nu_k=x_-$. In particluar $\nu_1>\tauilde u$ and thus by Lemma \rhoef{Only_Two_Poss} part (ii) the free energy minimizer has the form \betaegin{equation*} \sigmaetminusr\nu=(\underbrace{x_+,\deltaots,x_+}_{{}l\tauext{ times}},x_-,\deltaots,x_-)^T \tauext{ with }2\lambdaeq l<k. \varepsilonnd{equation*} Moreover if $l>1$, $\nu_1+\nu_l=2x_+>a>2\tauilde u$ and hence by the same arguments as above $\nu_1>\nu_l$, a contradiction. \betaegin{figure}[h] \betaegin{center} \includegraphics[width=4.5cm]{Pix_h_left_AND_h_right_1.eps} \includegraphics[width=4.5cm]{Pix_h_left_AND_h_right_2.eps} \includegraphics[width=4.5cm]{Pix_h_left_AND_h_right_3.eps} \varepsilonnd{center} \caption{\sigmacriptsize{On the left side, $h_r$ and $h_l$ in the cases $2\lambdaeq z\lambdaeq4$ subcase one and $z>4$ subcase two. In the middle, $h_r$ and $h_l$ in the cases $2\lambdaeq z\lambdaeq4$ subcase two and $z>4$ subcase one. On the right side, $h_r$ and $h_l$ in the case $z>4$ subcase four.}} \lambdaabel{Point_Symmetric_Functions} \varepsilonnd{figure} \betaigskip Now let $z>4$. We show, there is either one, two or no additional points $x\in(a/2,a]$ such that $h'_l(x)=h'_r(x)$. Let us look again at $\beta_{z,a}$ defined in \varepsilonqref{Beta_For_h'}. For $z>4$, $\beta_{z,a}$ has a local maximum in $a/2$ since $\beta'_{z,a}(a/2)=0$ which can easily be seen from equation \varepsilonqref{Beta_Ableitung} and $\beta''_{z,a}(a/2)=-(z-4)(\varphirac{a}{2})^{-(z+1)}<0$. We show, there is only one solution $\beta_{z,q}'(x)=0$ on $(a/2,a]$ which must be a global minimizer since $\lambdaim_{x\tauo a}\beta_{z,a}(x)=\infty$. Indeed from \varepsilonqref{Beta_Ableitung_Replaced} we see, requiring $\beta_{z,q}'$ to be zero is equivalent to the fixed point equation \betaegin{equation*} y=\varphirac{(z-y-1)y^{z-2}+1}{z-1}=:r_z(y) \varepsilonnd{equation*} having an unique solution on $[0,1)$. The r.h.s has the following properties: $r_z(0)=1/(z-1)>0$; $r_z(1)=1$; $r'_z(1)>1$ and $r_z$ is convex, since $r''_z(y)=(z^2-(3+y)z+2)y^{z-3}>0$. Combining these properties gives the uniqueness of the fixed point and thus the uniqueness of the extremal value of $\beta_{z,a}$ which is a minimum that we want to call $\beta_0(z,a)$. \betaigskip Subcase one, let $a/2\gammaeq\tauilde u$. This is equivalent to $\beta\gammaeq\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$ and hence by the exact same arguments as in the case $z\lambdaeq4$ subcase two, the free energy minimizer has the form $\sigmaetminusr\nu=(x_+,x_-,\deltaots,x_-)^T$ with $x_+>x_-$. \betaigskip Subcase two, let $a/2<\tauilde u$ and $\beta<\beta_0(z,a)$. Then we are in a situation as in case $z\lambdaeq4$ subcase one. In particular the free energy minimizer is the equidistribution. \betaigskip Subcase three, let $a/2<\tauilde u$ and $\beta=\beta_0(z,a)<\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$. In this case, there is exactly one $x_1\in(a/2,a]$ such that $h_l'(x_1)=h_r'(x_1)$ and hence by the mean value argument already presented in \varepsilonqref{Mean_Value_Argument} there cannot be more then one $x_+\in(a/2,a]$ such that $h_l(x_+)=h_r(x_+)$. If no such $x_+$ exists, we are in the same situation as in subcase two (right above.) If such $x_+$ exists it must belong to a touching point of the graphes of $h_l$ and $h_r$ since otherwise because of $\lambdaim_{x\tauo a}h_l(x)<\lambdaim_{x\tauo a}h_r(x)$ there must be another point $(a/2,a]\ni\sigmaetminusr x_+\neq x_+$ with $h_l(\sigmaetminusr x_+)=h_r(\sigmaetminusr x_+)$. If it is a touching point of $h_l$ and $h_r$, then the free energy as a function of the first entry cannot attain a minimum in $x_+$, instead it is a saddle point and the minimum is attained in $a/2$. Consequently the minimizing distribution of the free energy is the equidistribution. \betaigskip Subcase four, let $a/2<\tauilde u$ and $\beta_0(z,a)<\beta<\varphirac{1}{z-1}(\varphirac{a}{2})^{1-z}$. In this case we have exactly two points $x_1<x_2$ such that $h'_l(x_i)=h'_r(x_i)$ with $i\in\{1,2\}$ and again by the mean value argument \varepsilonqref{Mean_Value_Argument} there cannot be more than two points $x_+>x'_+$ with $h_l(x_+)=h_r(x_+)$ and $h_l(x'_+)=h_r(x'_+)$. If no such point or only one such point exists, we can apply the same arguments as in subcase three (right above) and the equidistribution is the free energy minimizer. If both points exist and both belong to touching points of the graphes of $h_l$ and $h_r$ then again the equidistribution must be the minimizer. The case that both points exist and only one is a touching point is impossible. Now if both points exist and belong to real intersections of the graphes of $h_l$ and $h_r$, then we have three local minima attained in $x_-<a/2<x_+$ with $x_-:=a-x_+$. Hence for $\nu_1$ the local minimizers $a/2$ and $x_+$ are competing to be the global minimizers. If $a/2$ is the global minimizer then by Lemma \rhoef{Only_Two_Poss} the free energy is minimized by the equidistribution. If $x_+$ is the global minimizer, then notice if $x_+\lambdaeq\tauilde u$ again by Lemma \rhoef{Only_Two_Poss} $x_+=a/2$ which contradicts $x_+>a/2$. Hence $x_+>\tauilde u$ and the free energy minimizer has the form \betaegin{equation*} \sigmaetminusr\nu=(\underbrace{x_+,\deltaots,x_+}_{{}l\tauext{ times}},x_-,\deltaots,x_-)^T \tauext{ with }2\lambdaeq l<k. \varepsilonnd{equation*} Moreover if $l>1$, $\nu_1+\nu_l=2x_+>2\tauilde u$ and hence by subcase one $\nu_1>\nu_l$, a contradiction. Finally, in order to have the minimizers in the format given in the theorem, define $u\in[0,1)$ such that $(1-u)/q=x_-$. This is always possible since $0<x_-\lambdaeq 1/q$. Of course $x_+=(1+(q-1)u)/q$. $ \Box$ \betaigskip For the proof of part (ii) of \rhoef{Minimizer_Theorem} we need the following lemmata. \betaegin{lem}\lambdaabel{Key_Lemma_Minimizer} For $q>2$ and $z\gammaeq2$ there exist two temperatures $0<\beta_0(q,z)<\beta_1(q,z)$ such that for $0<\beta<\beta_0$ the mean-field equation only has the trivial solution $u=0$. For $\beta_0<\beta<\beta_1$ the mean-field equation has two additional solutions $0<u_1<u_2<1$. Finally for $\beta=\beta_0$ or $\beta\gammaeq\beta_1$ there is only one additional solution $0<u_2<1$. \varepsilonnd{lem} \tauextbf{Proof: }Let us write the temperature as a function of positive solutions of the mean-field equation \betaegin{equation}\lambdaabel{Beta_Funktion} \beta_{q,z}(u):=q^{z-1}\varphirac{\lambdaog(1+(q-1)u)-\lambdaog(1-u)}{(1+(q-1)u)^{z-1}-(1-u)^{z-1}} \varepsilonnd{equation} \betaegin{figure}[h] \betaegin{center} \includegraphics[width=7cm]{Pix_BetaAsAFunction_1.eps} \includegraphics[width=7cm]{Pix_BetaAsAFunction_2.eps} \varepsilonnd{center} \caption{\sigmacriptsize{On the left side, $\beta_{q,z}$ for $q=3$ and $z=3,\deltaots,7$. The cup shape of the graphs is a common feature for the parameter regimes $(q,z)\in([2,\infty)\tauimes[2,\infty])\sigmaetminus(\{2\}\tauimes[2,4])$. On the right side, $\beta_{q,z}$ for $q=2$ and $z=2,\deltaots,4$. Here $\beta_{q,z}$ is strictly increasing and this is a common feature for the parameter regimes $(q,z)\in\{2\}\tauimes[2,4]$.}} \lambdaabel{Temperature_Function} \varepsilonnd{figure} and define $\lambdaim_{u\tauo0}\beta_{q,z}(u)=\varphirac{q^{z-1}}{z-1}=:\beta_1$. Notice $\lambdaim_{u\tauo0}\beta_{q,z}'(u)=-\varphirac{1}{2}(q-2)q^{z-1}<0$ and $\lambdaim_{u\tauo1}\beta_{q,z}(u)=\infty=\lambdaim_{u\tauo1}\beta_{q,z}'(u)$. We will show, that $\beta_{q,z}$ has exactly one extremal point attained in $u_0\in(0,1)$. This must be a local and hence global minimum that we want to call $\beta_0$. Let us calculate \betaegin{equation*} \betaegin{split} &0=\beta_{q,z}'(u)=q^{z-1}\Bigl(\varphirac{q}{(1+(q-1)u)(1-u)[(1+(q-1)u)^{z-1}-(1-u)^{z-1}]}-\cr &\varphirac{[\lambdaog(1+(q-1)u)-\lambdaog(1-u))](z-1)[(1+(q-1)u)^{z-2}(q-1)+(1-u)^{z-2}]}{[(1+(q-1)u)^{z-1}-(1-u)^{z-1}]^2}\Bigr). \varepsilonnd{split} \varepsilonnd{equation*} Replacing $v:=(1+\varphirac{qu}{1-u})^{z-1}$ we can write equivalently \betaegin{equation*} \betaegin{split} q-1&=v^{\varphirac{1}{z-1}}\varphirac{\lambdaog v-v+1}{v-1-v\lambdaog v}=:F_z(v). \varepsilonnd{split} \varepsilonnd{equation*} Notice $u\mapsto v$, $(0,1)\mapsto(1,\infty)$ is strictly increasing and bijective. It suffices to show, that $F_z$ is bijective on $F_z^{-1}(1,\infty)$. First we have $\lambdaim_{v\tauo1}F_z(v)=1$ and $\lambdaim_{v\tauo\infty}F_z(v)=\infty$. We show that $F_z$ is strictly increasing on $F_z^{-1}(1,\infty)$ and calculate \betaegin{equation*} \betaegin{split} 0=F_z'(v)=v^{\varphirac{2-z}{z-1}}\varphirac{(z-2)v\lambdaog^2 v+(v^2-1)\lambdaog v -(v-1)^2}{(z-1)(1-v+v\lambdaog v)^2} \varepsilonnd{split} \varepsilonnd{equation*} which is equivalent to \betaegin{equation*} \betaegin{split} z=\varphirac{(v^2-1)\lambdaog v-2v\lambdaog^2 v}{(v-1)^2-v\lambdaog^2 v}=:G(v). \varepsilonnd{split} \varepsilonnd{equation*} Since $G(v)>4$ on $(1,\infty)$ (which we will see right below) for $2\lambdaeq z\lambdaeq4$ there are no extremal points of $F_z$ and in particular $F_z$ is bijective on $F_z^{-1}(1,\infty)$. Since $G(v)$ is also strictly increasing (which we will also see right below) and for $z>4$, $\lambdaim_{v\tauo1}F'_z(v)=\varphirac{4-z}{3(z-1)}<0$ there is exactly one extremal points of $F_z$ which must be a minimum. In particular that minimum is smaller than one and hence $F_z$ is bijective on $F_z^{-1}(1,\infty)$. To see that $G(v)>4$ and strictly increasing, use $\lambdaim_{v\tauo1}G(v)=4$ and show $0<G'$ which is equivalent to \betaegin{equation*} \betaegin{split} G_1(v):=(v-1)^3(v+1)-6v(v-1)^2\lambdaog v+3v(v^2-1)\lambdaog^2v-v(v^2+1)\lambdaog^3 v>0. \varepsilonnd{split} \varepsilonnd{equation*} One way to see that this is true is to show strict convexity of $G_1$ and use $\lambdaim_{v\tauo1}G_1(v)=\lambdaim_{v\tauo1}G_1'(v)=0$ and $\lambdaim_{v\tauo\infty}G_1(v)=\infty$. Here $G_1''>0$ is equivalent to \betaegin{equation*} \betaegin{split} G_2(v):=4(v-1)^3-4(v-1)^2\lambdaog v+(v^2-1)\lambdaog^2 v-2v^2\lambdaog^3v>0 \varepsilonnd{split} \varepsilonnd{equation*} and again $\lambdaim_{v\tauo1}G_2(v)=\lambdaim_{v\tauo1}G_2'(v)=0$ and $\lambdaim_{v\tauo\infty}G_2(v)=\infty$. Now again we want to show strict convexity of $G_2$, but this is equivalent to \betaegin{equation*} \betaegin{split} G_3(v):=1+4v-17v^2+12v^3+\lambdaog v-7v^2\lambdaog v-8v^2\lambdaog^2v-2v^2\lambdaog^3 v>0 \varepsilonnd{split} \varepsilonnd{equation*} and as before $\lambdaim_{v\tauo1}G_3(v)=\lambdaim_{v\tauo1}G_3'(v)=0$ and $\lambdaim_{v\tauo\infty}G_3(v)=\infty$. Now again we want to show strict convexity of $G_3$, but this is equivalent to \betaegin{equation*} \betaegin{split} G_4(v):=-1-71v^2+72v^3-74v^2\lambdaog v-34v^2\lambdaog^2v-4v^2\lambdaog^3v>0 \varepsilonnd{split} \varepsilonnd{equation*} and $\lambdaim_{v\tauo1}G_4(v)=\lambdaim_{v\tauo1}G_4'(v)=0$ and $\lambdaim_{v\tauo\infty}G_4(v)=\infty$. Now as above we want to show strict convexity of $G_4$, but this is equivalent to \betaegin{equation*} \betaegin{split} G_5(v):=54(v-1)-47\lambdaog v-13\lambdaog^2v-\lambdaog^3v>0 \varepsilonnd{split} \varepsilonnd{equation*} and now $\lambdaim_{v\tauo1}G_5(v)=0$, $\lambdaim_{v\tauo1}G_4'(v)=7$ and $\lambdaim_{v\tauo\infty}G_4(v)=\infty$. Finally the strict convexity of $G_5$ is equivalent to $21+20\lambdaog v+3\lambdaog^2v>0$. But this is true and hence the above cascade gives $0<G'$. This finishes the proof. $ \Box$ \betaegin{lem}\lambdaabel{Key_Lemma_Minimizer_Curie_Weiss_High_Exp} For $q=2$ and $z>4$ there exist two temperatures $0<\beta_0(2,z)<\beta_1(2,z)$ such that for $0<\beta<\beta_0$ the mean-field equation only has the trivial solution $u=0$. For $\beta_0<\beta<\beta_1$ the mean-field equation has two additional solutions $0<u_1<u_2<1$. Finally for $\beta=\beta_0$ or $\beta\gammaeq\beta_1$ there is only one additional solution $0<u_2<1$. \varepsilonnd{lem} \tauextbf{Proof: }$\beta_{2,z}$ as defined in \varepsilonqref{Beta_Funktion} has the following properties: $\lambdaim_{u\tauo0}\beta_{2,z}'(u)=0$; $\lambdaim_{u\tauo0}\beta_{2,z}''(u)=2^{z-1}(4-z)/3<0$ and $\lambdaim_{u\tauo1}\beta_{2,z}(u)=\infty=\lambdaim_{u\tauo1}\beta_{2,z}'(u)$. Define $\lambdaim_{u\tauo0}\beta_{2,z}(u)=\varphirac{2^{z-1}}{z-1}=:\beta_1$. Using the exact same arguments as presented in the proof of Lemma \rhoef{Key_Lemma_Minimizer} one can again show that $\beta_{2,z}$ has exactly one extremal point $\beta_0$ attained in $u_0\in(0,1)$. As before, the indicated parameter regimes are an immediate consequence of this fact. $ \Box$ \betaegin{lem}\lambdaabel{Key_Lemma_Minimizer_Curie_Weiss_Low_Exp} For $q=2$ and $2\lambdaeq z\lambdaeq4$ there exist only one temperature $0<\beta_1(2,z)$ such that for $0<\beta\lambdaeq\beta_1$ the mean-field equation only has the trivial solution $u=0$. For $\beta>\beta_1$ there is one additional solution $0<u_1<1$. \varepsilonnd{lem} \tauextbf{Proof: }$\beta_{2,z}$ as defined in \varepsilonqref{Beta_Funktion} has the following properties: $\lambdaim_{u\tauo0}\beta_{2,z}'(u)=0$, $\lambdaim_{u\tauo1}\beta_{2,z}(u)=\infty=\lambdaim_{u\tauo1}\beta_{2,z}'(u)$; $\lambdaim_{u\tauo0}\beta_{2,z}''(u)=2^{z-1}(4-z)/3>0$ for $z<4$ and $\lambdaim_{u\tauo0}\beta_{2,4}''(u)=2^{z-1}(4-z)/3=0$; $\lambdaim_{u\tauo0}\beta_{2,4}'''(u)=0$; $\lambdaim_{u\tauo0}\beta_{2,4}''''(u)=64/5>0$. As a consequence for $2\lambdaeq z\lambdaeq4$, $\beta_{2,z}$ has a local minimum in zero. We show, $\beta_{2,z}$ is strictly increasing. Indeed $\beta_{2,z}'>0$ is equivalent to \betaegin{equation*} \betaegin{split} F_z(v):=(v-1)(v^\varphirac{1}{z-1}+1)-(v+v^\varphirac{1}{z-1})\lambdaog v>0 \varepsilonnd{split} \varepsilonnd{equation*} for $v\in(1,\infty)$ where we made the one-to-one replacement $v=(\varphirac{1+u}{1-u})^{z-1}$. Notice $z\mapsto F_z$ is strictly decreasing point wise since $d/dz F_z(v)<0$ is equivalent to $\lambdaog v<v-1$ which is of course true for all $v\in(1,\infty)$. Now in order to show $F_4>0$ we again use a cascade of convex functions. First, $F_4(1)=0$, $F'_4(1)=0$ and $F''_4>0$ is equivalent to $G(v):=5-9v^{2/3}+4v+2\lambdaog(v)>0$. Second, $G(1)=0$, $G'(1)=0$ and $G''>0$ is equivalent to $v>1$, but this is true. Consequently $\beta_1(2,z):=\lambdaim_{u\tauo0}\beta_{2,z}(u)=2^{z-1}/(z-1)$. $ \Box$ \betaigskip \tauextbf{Proof of Theorem \rhoef{Minimizer_Theorem} part (ii): } The above lemmata consider the temperature parameter as a function of positive solutions of the mean-field equation \betaegin{equation*} \beta_{q,z}(u)=q^{z-1}\varphirac{\lambdaog(1+(q-1)u)-\lambdaog(1-u)}{(1+(q-1)u)^{z-1}-(1-u)^{z-1}}. \varepsilonnd{equation*} This function is positive. In the parameter regimes considered in Lemma \rhoef{Key_Lemma_Minimizer} and Lemma \rhoef{Key_Lemma_Minimizer_Curie_Weiss_High_Exp} $\beta_0$ is the unique global minimum of $\beta_{q,z}$ and $\beta_1=\lambdaim_{u\tauo0}\beta_{q,z}(u)=\varphirac{q^{z-1}}{z-1}$. Let us connect this with the free energy as a function of $u$. \betaegin{equation}\lambdaabel{One_Input_Free_Energy} \betaegin{split} \Gamma_{\beta,q,z}(\sigmaetminusr{\nu})=&-\varphirac{\beta}{z}\sigmaetminusr{\nu}_1^{z}+\sigmaetminusr{\nu}_1\lambdaog(q\sigmaetminusr{\nu}_1)+ (q-1)(-\varphirac{\beta}{z}\sigmaetminusr{\nu}_2^{z}+\sigmaetminusr{\nu}_2\lambdaog(q\sigmaetminusr{\nu}_2))\cr =&\varphirac{1}{q}[(1+(q-1)u)\lambdaog(1+(q-1)u)+(q-1)(1-u)\lambdaog(1-u)]\cr &-\varphirac{\beta}{z}q^{-z}[(1+(q-1)u)^{z}+(q-1)(1-u)^{z}]=:k_{\beta,q,z}(u) \varepsilonnd{split} \varepsilonnd{equation} \betaegin{figure}[h] \betaegin{center} \includegraphics[width=7cm]{Pix_Free_Energy_2.eps} \includegraphics[width=7cm]{Pix_Free_Energy_1.eps} \varepsilonnd{center} \caption{\sigmacriptsize{On the left side, $k_{\beta,q,z}$ for $q=3$, $z=4$ and $\beta=3.8,\deltaots,9$. The fact that the shape of the graph changes from a single local minimum attained away from zero, to two local minima and back again to one locals minimum attained in zero is a common feature in the parameter regimes $(q,z)\in([2,\infty)\tauimes[2,\infty])\sigmaetminus(\{2\}\tauimes[2,4])$. One can clearly see the first-order nature of the phase-transition. On the right side, $k_{\beta,q,z}$ for $q=2$, $z=4$ and $\beta=2.4,2.5,\deltaots,3.3$. The fact that the point where the global minimum is attained moves into zero from the right is a common feature for the parameter regimes $(q,z)\in\{2\}\tauimes[2,4]$. This indicates a second-order phase-transition.}} \lambdaabel{Free_Energy} \varepsilonnd{figure} and its derivatives \betaegin{equation} \betaegin{split} k_{\beta,q,z}'(u)=&-\varphirac{q-1}{q^{z}}\beta[(1+(q-1)u)^{z-1}-(1-u)^{z-1}]-\varphirac{q-1}{q}\lambdaog\varphirac{1-u}{1+(q-1)u},\cr k_{\beta,q,z}''(u)=&-\varphirac{q-1}{q^{z}}\beta(z-1)[(q-1)(1+(q-1)u)^{z-2)}+(1-u)^{z-2)}]\cr &+\varphirac{q-1}{(1-u)(1+(q-1)u)}. \varepsilonnd{split} \varepsilonnd{equation} Notice $k_{\beta,q,z}'(0)=0$ and $k$ has a local minimum in zero iff $\beta<\varphirac{q^{z-1}}{z-1}=\beta_1$. Since also $\lambdaim_{u\tauo1}k_{\beta,q,z}'(u)=+\infty$ we can assert the following: \betaegin{enumerate} \item If $\beta<\beta_0<\beta_1$ then in $u=0$ the free energy must attain its global minimum. \item If $\beta\gammaeq\beta_1$ then in zero there is a local maximum and according to Lemma \rhoef{Key_Lemma_Minimizer} and Lemma \rhoef{Key_Lemma_Minimizer_Curie_Weiss_High_Exp} there is exactly one more extremal point, but this must be a global minimum. \item If $\beta=\beta_0<\beta_1$ the additional extremal point must be a saddle point since if it would be a local maximum, then there must be another local minimum and hence another extremal point, but the additional extremal point is the only one. \item If $\beta_0<\beta<\beta_1$ then the two additional extremal points $u_1<u_2$ are either two saddle points or a local maximum (attained in $u_1$) and a local minimum (attained in $u_2$.) \varepsilonnd{enumerate} Since $\varphirac{d}{d\beta}k_{\beta,q,z}(u)=-\varphirac{q^{-z}}{z}[(1+(q-1)u)^{z}+(q-1)(1-u)^{z}]<0$ the free energy decreases for every $u$ if $\beta$ increases. Since $\varphirac{d}{du}\varphirac{d}{d\beta}k_{\beta,q,z}(u)=-\varphirac{q-1}{q^{z}}[(1+(q-1)u)^{z-1}-(1-u)^{z-1}]<0$, for larger $u$ this decrease is also strictly larger and hence for $\beta$ moving up from $\beta_0$ to $\beta_1$, $k_{\beta,q,z}(u_2)$ is going down faster than $k_{\beta,q,z}(0)$. Since for $\beta\gammaeq\beta_1$, $u_2$ becomes the global minimum, and $k$ is continuous w.r.t every parameter, there must be a $\beta_0<\beta_c\lambdaeq\beta_1$ where $k_{\beta,q,z}(0)=k_{\beta,q,z}(u_2)$ and indeed for $\beta>\beta_c$ the minimizer of the free energy $\Gamma_{\beta,q,z}$ is defined by the largest solution of the mean-field equation. In the parameter regime considered in Lemma \rhoef{Key_Lemma_Minimizer_Curie_Weiss_Low_Exp} the situation is simpler and we can set $\beta_0=\beta_1=\beta_c$. In particular \betaegin{enumerate} \item If $\beta<\beta_c$ then in $u=0$ the free energy must attain its global minimum. \item If $\beta>\beta_c$ then in zero there is a local maximum and according to Lemma \rhoef{Key_Lemma_Minimizer_Curie_Weiss_Low_Exp} there is exactly one more extremal point, but this must be a global minimum. \varepsilonnd{enumerate} $ \Box$ \betaigskip \tauextbf{Proof of Theorem \rhoef{Minimizer_Theorem} part (iii): }In the cases $z\gammaeq2$, $q\gammaeq2$ and $z>4$, $q=2$ we have $\beta_0<\beta_c\lambdaeq\beta_1$ and $\lambdaim_{\beta\sigmaearrow\beta_c}u(\beta,q,z)=u_2(q,z)>0=\lambdaim_{\beta\nearrow\beta_c}u(\beta,q,z)$ where we used notation from the proof of part 2 of \rhoef{Minimizer_Theorem} with $u_2(q,z)=u_2$. Hence $\beta\mapsto u(\beta,q,z)$ is discontinuous in $\beta_c$. In the case $2\lambdaeq z\lambdaeq4$, $q=2$ we have $\lambdaim_{\beta\sigmaearrow\beta_c}u(\beta,q,z)=0=\lambdaim_{\beta\nearrow\beta_c}u(\beta,q,z)$ by the monotonicity of $u\mapsto\beta_{q,z}(u)$ and hence $\beta\mapsto u(\beta,q,z)$ is continuous in $\beta_c$. $ \Box$ \sigmaubsection{Proof of Proposition \rhoef{q_Monotonicity_Of_Beta}} It suffice to show $\partial_q\beta_c(q,z)\gammaeq0$, where $\partial_q\beta_c$ stands for the partial derivative of $\beta_c$ in the direction $q$. Without restriction we consider $3\lambdaeq q\in{\Bbb R}$. We know that $\beta_c>0$ and the corresponding value $u_c\in(0,1)$ are solutions of the equations: \betaegin{equation}\lambdaabel{ku=knull} k_{\beta,q,z}(u)=k_{\beta,q,z}(0)=-\varphirac{\beta}{z}q^{1-z}\hspace{0.5cm}\tauext{ and }\hspace{0.5cm}k_{\beta,q,z}'(u)=0, \varepsilonnd{equation} where $k_{\beta,q,z}$ is given in \varepsilonqref{One_Input_Free_Energy}. The first condition is equivalent to \betaegin{equation}\lambdaabel{F} \betaegin{split} &F(\beta,q,u):=\beta f(q,u)+g(q,u):=\cr &-\varphirac{\beta}{z}q^{-z}[(1+(q-1)u)^z+(q-1)(1-u)^z-q]\cr &+\varphirac{1}{q}[(1+(q-1)u)\lambdaog(1+(q-1)u)+(q-1)(1-u)\lambdaog(1-u)]=0. \varepsilonnd{split} \varepsilonnd{equation} The second condition is equivalent to \betaegin{equation*}\lambdaabel{G} \betaegin{split} G(\beta,q,u):=\partial_u F(\beta,q,u)=:\beta\partial_u f(q,u)+\partial_u g(q,u)=0. \varepsilonnd{split} \varepsilonnd{equation*} Taking the derivative along a path of solutions we get a two-dimensional system of equations \betaegin{align*} \varphirac{d}{dq}F(\beta(q),q,u(q))=\partial_\beta F(\beta,q,u)\partial_q\beta(q)+\partial_q F(\beta,q,u)+\partial_u F(\beta,q,u)\partial_q u(q)&=0\cr \varphirac{d}{dq}G(\beta(q),q,u(q))=\partial_\beta G(\beta,q,u)\partial_q\beta(q)+\partial_q G(\beta,q,u)+\partial_u G(\beta,q,u)\partial_q u(q)&=0, \varepsilonnd{align*} where we wrote for simplicity $\beta_c(q)=\beta_c(q,z)$. This is equivalent to \betaegin{align*} \betaegin{pmatrix} \partial_q\betaeta(q)\\ \partial_q u(q) \varepsilonnd{pmatrix} =-\betaegin{pmatrix} \partial_\betaeta F(\beta,q,u)&\partial_u F(\beta,q,u)\\ \partial_\betaeta G(\beta,q,u)&\partial_u G(\beta,q,u) \varepsilonnd{pmatrix}^{-1} \betaegin{pmatrix} \partial_q F(\beta,q,u)\\ \partial_q G(\beta,q,u) \varepsilonnd{pmatrix} \varepsilonnd{align*} which leads to \betaegin{align*}\lambdaabel{dbq} \partial_q\betaeta(q)=-\varphirac{\partial_u G(\beta,q,u)\partial_q F(\beta,q,u)-\partial_u F(\beta,q,u)\partial_q G(\beta,q,u)}{\partial_\betaeta F(\beta,q,u)\partial_u G(\beta,q,u)-\partial_\betaeta G(\beta,q,u)\partial_u F(\beta,q,u)}. \varepsilonnd{align*} Now we can use that for our solutions $G(\beta,q,u)=\partial_u F(\beta,q,u)=0$ and thus we have \betaegin{equation*}\lambdaabel{dbq_simp} \partial_q\beta_c(q)=-\varphirac{\partial_q F(\beta,q,u)}{\partial_\beta F(\beta,q,u)}=-\varphirac{\partial_q F(\beta,q,u)}{f(q,u)}. \varepsilonnd{equation*} Notice $f(q,u)<0$ since $f(q,0)=0$ and \betaegin{equation*} \partial_u f(q,u)=q^{-z}(q-1)[(1-u)^{z-1}-(1+(q-1)u)^{z-1}]<0. \varepsilonnd{equation*} where we used $1-u<1+(q-1)u$. Hence it suffices to show \betaegin{equation}\lambdaabel{Fq} \partial_q F(\beta,q,u)=\beta \partial_q f(q,u)+\partial_q g(q,u)\gammaeq 0. \varepsilonnd{equation} A solution of \varepsilonqref{F} satisfies $\beta=-g(q,u)/f(q,u)$. Thus we can eliminate $\beta$ in (\rhoef{Fq}) and show instead \betaegin{equation}\lambdaabel{inequ} \partial_q f(q,u)g(q,u)-\partial_qg(q,u)f(q,u)\gammaeq 0. \varepsilonnd{equation} It would be sufficient to show that \varepsilonqref{inequ} is true for solutions of \varepsilonqref{ku=knull}. Nevertheless, we will prove \varepsilonqref{inequ} for all $q\in{\Bbb R}_{+}$, $z\in{\Bbb R}_{+}$ and $u\in[0,1]$. Multiplying \varepsilonqref{inequ} with $zq^{z+2}$, the inequality becomes \betaegin{align}\lambdaabel{inequ_bar} 0&\lambdaeq zq^{z+1}\partial_q f(q,u)\cdot qg(q,u)-q^2\partial_qg(q,u)\cdot zq^{z}f(q,u)\cr &=\sigmaetminusr{f}_q(q,u)\cdot\tauilde{g}(q,u)+\sigmaetminusr{g}_q(q,u)\cdot\tauilde{f}(q,u), \varepsilonnd{align} with \betaegin{align*} \sigmaetminusr{f}_q(q,u):=&zq^{z+1}\partial_q f(q,u) =z(1-u)(1+(q-1)u)^{z-1}+(q(z-1)-z)(1-u)^{z}-q(z-1)\cr \tauilde{g}(q,u):=&qg(q,u)=(1+(q-1)u)\lambdaog(1+(q-1)u)+(q-1)(1-u)\lambdaog(1-u)\cr \sigmaetminusr{g}_q(q,u):=&q^2\partial_qg(q,u)=qu-(1-u)[\lambdaog(1+(q-1)u)-\lambdaog(1-u)]\cr \tauilde{f}(q,u):=&-zq^{z}f(q,u)=(1+(q-1)u)^{z}+(q-1)(1-u)^{z}-q. \varepsilonnd{align*} We have the following properties: \betaegin{enumerate} \item $u\mapsto\tauilde{f}(q,u)\gammaeq 0$ since $\tauilde{f}(q,0)=0$ and $\partial_u\tauilde{f}(q,u)=z(q-1)[(1+(q-1)u)^{z-1}-(1-u)^{z-1}]\gammaeq 0$ \item $u\mapsto\tauilde{g}(q,u)\gammaeq 0$ since $\tauilde{g}(q,0)=0$ and $\partial_u\tauilde{g}(q,u)=(q-1)[\lambdaog(1+(q-1)u)-\lambdaog(1-u)]\gammaeq 0$ \item $u\mapsto\sigmaetminusr{g}_q(q,u)\gammaeq 0$ since $\sigmaetminusr{g}_q(q,0)=0$ and $\partial_u\sigmaetminusr{g}_q(q,u)=q-\varphirac{(q-1)(1-u)}{1+(q-1)u}+\lambdaog(1+(q-1)u)-\lambdaog(1-u)-1\gammaeq 0$ since $q-1-(q-1)(1-u)/(1+(q-1)u)=q-q/(1+(q-1)u)>0$ \varepsilonnd{enumerate} The more involved function is $u\mapsto\sigmaetminusr{f}_q(q,u)$ since it can be positive and negative. For the problematic case we define a set of $u$'s where $\sigmaetminusr{f}_q(q,u)$ is negative, i.e. $[0, 1]\sigmaupset A_q:=\{u\in[0, 1]:\sigmaetminusr{f}_q(q,u)<0\}$. Of course \varepsilonqref{inequ_bar} is true on $[0, 1]\sigmaetminus A_q$. Hence we only have to show on $A_q$ the inequality \betaegin{equation*} 0\lambdaeq\sigmaetminusr{f}_q(q,u)\varphirac{\tauilde{g}(q,u)}{\sigmaetminusr{g}_q(q,u)}+\tauilde{f}(q,u). \varepsilonnd{equation*} Notice, $\sigmaetminusr{g}_q(u, q)=0$ only for $u=0$, but $0\notin A_q$ since $\sigmaetminusr{f}_q(0, q)=0$. We eliminate the fraction by the estimate \betaegin{equation*} \varphirac{\tauilde{g}(u, q)}{\sigmaetminusr{g}_q(u, q)}\lambdaeq (q-1). \varepsilonnd{equation*} To see that this is true we use the following equivalent expressions: \betaegin{equation*} \betaegin{split} \tauilde{g}(q,u)&\lambdaeq (q-1)\sigmaetminusr{g}_q(q,u)\cr (1+(q-1)u)\lambdaog(1+(q-1)u)&\lambdaeq(q-1)[qu-(1-u)\lambdaog(1+(q-1)u)]\cr \lambdaog(1+(q-1)u)&\lambdaeq(q-1)u \varepsilonnd{split} \varepsilonnd{equation*} Since $\sigmaetminusr{f}_q(q,u)$ is negative on $A_q$, we have $\sigmaetminusr{f}_q(q,u)(q-1)\lambdaeq \sigmaetminusr{f}_q(q,u)\varphirac{\tauilde{g}(q,u)}{\sigmaetminusr{g}_q(q,u)}$ and all that is left to prove is \betaegin{equation*} 0\lambdaeq\sigmaetminusr{f}_q(q,u)(q-1)+\tauilde{f}(q,u). \varepsilonnd{equation*} Since $\sigmaetminusr{f}_q(q,0)(q-1)+\tauilde{f}(q,0)=0$, it suffices to show \betaegin{equation}\lambdaabel{Final_Inequality} \varphirac{d}{du}(\sigmaetminusr{f}_q(q,u)(q-1)+\tauilde{f}(q,u))\gammaeq 0. \varepsilonnd{equation} For simplicity let us write $A:=1-u$ and $B:=1+(q-1)u$, then \varepsilonqref{Final_Inequality} is true since the last of following equivalent expressions is clearly true \betaegin{equation*} \betaegin{split} \partial_u (\sigmaetminusr{f}_q(q,u)(q-1)+\tauilde{f}(q,u))&\gammaeq 0\cr (z-q(z-1))A^{z-1}-A^{z-1}+(q-1)(z-1)AB^{z-2}&\gammaeq 0\cr AB^{z-1}-A^{z-1}B&\gammaeq 0\cr \varepsilonnd{split} \varepsilonnd{equation*} $ \Box$ \sigmaection{Proof of Theorem \rhoef{Generalized_HaKu}} Please note, most of the calculations done in this section work also for more general differentiable interaction functions. We prepare the proof by two propositions. \betaegin{prop} For each finite $N$ we have the representation \betaegin{equation}\lambdaabel{Representation_Kernel} Q^N_{\beta,q,z,(r_1,\deltaots,r_s)}(k|\nu)= \varphirac{r_kA(\beta_k,r_k,N_k)}{\sigmaum_{l=1}^sr_lA(\beta_l,r_l,N_l)} \varepsilonnd{equation} with $N_l=(N-1)\nu_l$, $\beta_l=\beta (N_l/N)^{z-1}$ and $A(\beta,r,M)=\pi^{M}_{\beta,r,z}(\varepsilonxp(\beta L_{M}^{\cdot}(1)^{z-1}))$. \varepsilonnd{prop} \tauextbf{Proof: }To compute the l.h.s of \varepsilonqref{Representation_Kernel} starting from the generalized fuzzy Potts measure, because of permutation invariance we can set $i=1$ and write for a fuzzy configuration $\varepsilonta$ on $\{2,\deltaots,N\}$ \betaegin{equation*} \mu^N_{\beta,q,z,(r_1,...,r_s)}(Y_1=k|Y_{\{2,\deltaots,N\}}=\varepsilonta)=\varphirac{1}{Z(\varepsilonta)}\sigmaum_{\xii:T(\xii)=(k,\varepsilonta)}\pi^N_{\beta,q,z}(\xii) \varepsilonnd{equation*} where $Z(\varepsilonta)$ is a normalization constant. Parallel to the proof of Proposition 5.2 in \cite{HK04}, it suffices to consider \betaegin{equation*} \betaegin{split} &\sigmaum_{\xii:T(\xii)=(k,\varepsilonta)}\varepsilonxp(\varphirac{\beta N}{z}\sigmaum_{i=1}^q(L_N^\xii(i))^z)=\sigmaum_{\xii:T(\xii)=(k,\varepsilonta)}\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i=1}^q(\sigmaum_{j=1}^N1_{\xii_j=i})^z)\cr &=\sigmaum_{\xii:T(\xii)=(k,\varepsilonta)}[\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i:T(i)=k}(1_{\xii_1=i}+\sigmaum_{j\in\Lambda_k}1_{\xii_j=i})^z)\prod_{l\neq k}\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i:T(i)=l}(\sigmaum_{j\in\Lambda_l}1_{\xii_j=i})^z)]\cr \varepsilonnd{split} \varepsilonnd{equation*} where we used $\Lambda_l:=\{j\in\{2,\deltaots,N\}:\varepsilonta_j=l\}$. Deviding this expression by $\prod_{l=1}^s\sigmaum_{\xii_{\Lambda_l}:T(\xii_{\Lambda_l})=l}\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i:T(i)=l}(\sigmaum_{j\in\Lambda_l}1_{\xii_j=i})^z)$ which is only dependent on $\varepsilonta$ gives \betaegin{equation*} \betaegin{split} &\varphirac{\sigmaum_{\xii_1:T(\xii_1)=k}\sigmaum_{\xii_{\Lambda_k}:T(\xii_{\Lambda_k})=k}\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i:T(i)=k}(1_{\xii_1=i}+\sigmaum_{j\in\Lambda_k}1_{\xii_j=i})^z)}{\sigmaum_{\xii_{\Lambda_k}:T(\xii_{\Lambda_k})=k}\varepsilonxp(\varphirac{\beta N}{zN^z}\sigmaum_{i:T(i)=k}(\sigmaum_{j\in\Lambda_k}1_{\xii_j=i})^z)}\cr &=\sigmaum_{\xii_1:T(\xii_1)=k} \pi^{|\Lambda_k|}_{\beta,r_k,z}(\varepsilonxp(\beta N\sigmaum_{i:T(i)=k}(\varphirac{|\Lambda_k|}{N}L_{|\Lambda_k|}^{\cdot}(i))^{z-1}\varphirac{1}{N}1_{\xii_1=i}+o(\varphirac{1}{N})))\cr &=r_k\pi^{|\Lambda_k|}_{\beta,r_k,z}(\varepsilonxp(\beta(\varphirac{|\Lambda_k|}{N}L_{|\Lambda_k|}^{\cdot}(1))^{z-1}+o(1)))\cr \varepsilonnd{split} \varepsilonnd{equation*} where we used Taylor expansion in the second last line. Since we are only interested in the limiting behavior of $Q^N$ as the system grows, by slight abuse of notation we can absorbe the asymptotic constant $o(1)$ into the normalization constant and hence the representation result follows. $ \Box$ \betaegin{prop}\lambdaabel{Limiting_Cond_Prob} We have for boundary conditions $\nu^{(N)}\tauo\nu$, \betaegin{equation} \lambdaabel{limpropfp} \lambdaim_{N\tauo\infty}Q^N_{\beta,q,z,(r_1,...,r_s)}(k|\nu^{(N)})=\varphirac{C(\beta\nu_k^{z-1},r_k)}{\sigmaum_{l=1}^s{C(\beta\nu_l^{z-1},r_l)}} \varepsilonnd{equation} whenever $\nu_k^{z-1}\neq\beta_c(r_k,z)/\beta$ for all $r_k\gammaeq2$ and $z\gammaeq2$, where \betaegin{align} C(\beta\nu_k^{z-1},r_k):=\notag \betaegin{cases} r_k\varepsilonxp(\beta(\varphirac{\nu_k}{r_k})^{z-1})&,\beta\nu_k^{z-1}<\beta_c(r_k,z)\cr (r_k-1)\varepsilonxp(\beta\nu_k^{z-1}(\varphirac{1-u(\beta\nu_k^{z-1},r_k,z)}{r_k})^{z-1})+&,\beta\nu_k^{z-1}>\beta_c(r_k,z)\cr \hspace{0.2cm}\varepsilonxp(\beta\nu_k^{z-1}(\varphirac{(r_k-1)u(\beta\nu_k^{z-1},r_k,z)+1}{r_k})^{z-1}). \varepsilonnd{cases}\lambdaabel{C_genfuzzy} \varepsilonnd{align} As a reminder, $u(\beta\nu_k^{z-1},r_k,z)$ is the largest solution of the generalized mean-field equation \varepsilonqref{MFeqPhi}. \varepsilonnd{prop} \tauextbf{Proof: }The result is a direct consequence of the generalized Ellis-Wang Theorem \rhoef{Generalized_Ellis_Wang}. $ \Box$ \betaigskip \tauextbf{Proof of Theorem \rhoef{Generalized_HaKu}: } By Proposition \rhoef{Limiting_Cond_Prob}, for $2<z\lambdaeq4$ the points of discontinuity are precisely given by the values $\nu_k^{z-1}=\beta_c(r_k,z)/\beta$ for those $k\in\{1,\deltaots,s\}$ with $r_k\gammaeq3$ for which $\beta_c(r_k,z)/\beta<1$. In particular if $r_i\lambdaeq2$ for all $i\in\{1,\deltaots,s\}$ no such points exist, this gives part (i). By Proposition \rhoef{q_Monotonicity_Of_Beta} $\beta_c(r,z)$ is an increasing function of $r$, thus points of discontinuity can only be present if $\beta$ is at least larger or equal than the critical inverse temperature of the smallest class that can have a second-order phase-transition. By picking two different approximating sequences of boundary conditions $\nu^{(N)}_k\sigmaearrow\nu_k$ and $\tauilde\nu^{(N)}_k\nearrow\nu_k$ it is also clear that for those points of discontinuity the limit does not exist. This gives (ii) and (iii). $ \Box$ \sigmaection{Appendix} \sigmaubsection{Bifurcation analysis} We have seen, that in different parameter regimes of the generalized Potts model different kinds of phase-transitions can appear. This is of course related to the apprearance (and disappearance) of local minima and maxima in the free energy as a function of $u\in[0,1]$ that we called $k_{\beta,q,z}$ (see \varepsilonqref{One_Input_Free_Energy}.) A complete picture of possible bifurcations for general potentials is presented in \cite{PoSt78}. In this appendix we want to at least provide some figures showing the bifurcation phenomena that can appear in the generalized Potts model in particular. \betaegin{figure}[h] \betaegin{center} \includegraphics[width=3.5cm]{Pix_Bifurcation_1klein.eps} \includegraphics[width=3.5cm]{Pix_Bifurcation_2klein.eps} \includegraphics[width=3.5cm]{Pix_Bifurcation_3klein.eps} \includegraphics[width=3.5cm]{Pix_Bifurcation_4klein.eps} \varepsilonnd{center} \caption{\sigmacriptsize{For $q=2$ the area left of the middle line is the phase-transition region. There is a triple point at $z=4$ where all extremal points fall in the same place, namely zero. Below $z=4$ there is the second-order phase-transition boundary and the three lines lie exactly on top of each other. Above $z=4$ there is a first-order phase-transition and the two additional lines right and left of the phase-transition boundary indicate bifurcation phenomena. To be more precise, the left line indicates where the local minimum at $u=0$ and the local maximum at $u_1\gammaeq0$ join. The right line indicates where the local maximum at $u_1>0$ and the local minimum at $u_2\gammaeq u_1$ join. Of course the phase-transition boundary must lie between these lines. We give a schematic picture for this in Figure \rhoef{Bifurcation_Schematisch}. For $q=3,4,5$ the situation is simpler since no second-order phase-transition is present.}} \lambdaabel{Bifurcation} \varepsilonnd{figure} Note that only the left bifurcation line in each image in Figure \rhoef{Bifurcation} and Figure \rhoef{Bifurcation_Schematisch} we can compute exactly via $(\varphirac{d}{du})^2k_{\beta,q,z}(u)|_{u=0}=0$ which is equivalent to $\varphirac{1}{\beta}=\varphirac{z-1}{q^{z-1}}$. The right line in each of the same images shows $\beta_0(q,z)$ as defined for example in Lemma \rhoef{Key_Lemma_Minimizer} which we computed numerically. The middle line showing $\beta_c(q,z)$ we also calculated numerically. \betaigskip On a computational level there is no reason not to assume $q$ to be continuous. In fact all our proofs work well with ${\Bbb R}\ni q\gammaeq2$. We already showed that the possibility of a second-order phase-transition disappears for $q>2$. This one can also see in the bifurcation picture as indicated in Figure \rhoef{Bifurcation_Schematisch}. \betaegin{figure}[h] \betaegin{center} \includegraphics[width=7cm]{Pix_Bifurcation_Schematisch.eps} \includegraphics[width=7cm]{Pix_Bifurcation_2_2.eps} \varepsilonnd{center} \caption{\sigmacriptsize{On the left: A schematic indication for the bifurcation phenomena present in case of $q=2$ where the small graphs are prototypical representations of the shape of the free energy. On the right: For $q>2$ the bifurcation lines do not join and the phase-transition boundary lies in an area where there are two local minima and one local maximum present.}} \lambdaabel{Bifurcation_Schematisch} \varepsilonnd{figure} \sigmaubsection{Random cluster representation and $z$-clique variables}\lambdaabel{Random cluster representation and $z$-clique variables} There is an equivalent notation for the Hamiltonian of the standard Potts model on the complete graph, namely $F_{\beta,q,2}(L_N^{\xii})=-\varphirac{\beta}{N^2}\sigmaum_{1\lambdaeq i<j\lambdaeq N}1_{\xii_i=\xii_j}-\varphirac{\beta}{N}$. For general integer valued exponents $z\gammaeq2$ an equivalent notation for the Hamiltonian is given by \betaegin{equation}\lambdaabel{Alt_Hamiltonian} F_{\beta,q,z}(L_N^{\xii})=-\varphirac{\beta (z-1)!}{N^z}\sigmaum_ {D\sigmaubset\{1,\deltaots,N\},|D|=z}1_{\xii|_D=c}+O(\varphirac{1}{N}) \varepsilonnd{equation} where $\xii|_D=c$ means that the given configuration $\xii$ has a constant $q$-coloring on the subset $D$ of size $z$. $N$ times the additional term is bounded by a constant as the system size grows and hence plays no role in the large deviation analysis and for the limiting Potts measure away from $\beta_c(q,z)$. We would like to describe now an extension of the well-known random cluster representation of the nearest-neighbor Potts model on a general graph with $N$ vertices to interactions between $z=2,3,4,\deltaots $ spins. Denote by $\Delta$ a subset of the set of subsets of vertices $\{1,\deltaots,N\}$ with $z$ sites. In other words $\Delta$ is a subset of the $z$-cliques. This defines a graph in the usual sense when we say that there is an edge between sites $i\neq j$ iff there exists $D\in\Delta$ with $i,j\in D$. We define the corresponding $\Delta$-Potts-Hamiltonian by \betaegin{equation}\lambdaabel{Alt_Hamiltonian-zwei} F_{\Delta}(\xii)=-\betaeta\sigmaum_ {D\sigmaubset \Delta}1_{\xii|_D=c} \varepsilonnd{equation} for a spin-configuration $\xi\in \{1,\deltaots, N\}$. In the limit away from $\beta_c(q,z)$ this corresponds to the generalized mean-field Potts measure for integer exponent $z$ when we take $\Delta$ to be the set of all subsets of $\{1,\deltaots,N\}$ with exactly $z$ elements. Let us now describe a random cluster representation for Gibbs measure corresponding to \varepsilonqref{Alt_Hamiltonian-zwei}. Given $\Delta$, define the probability measure on $\{1,\deltaots, q\}^N \tauimes \{0,1\}^\Delta$ by \betaegin{equation}\lambdaabel{Coupling} K(\sigmaigma,\omega)=C \prod_{D \in \Delta} \Bigl((1-p)1_{\omega(D)=0}+ p 1_{\omega(D)=1}1_D(\sigma)\Bigr) \varepsilonnd{equation} with $1_D(\sigma)$ the indicator of the event that $\sigma$ is constant on $D$ and $C$ the normalization. For $z=2$ this is the so-called \tauextit{Edwards-Sokal measure} presented in \cite{ES88}. Summing over the "clique-variables" $\omega$ we get the marginal distribution on $\{1,\deltaots, q\}^N$ \betaegin{equation*} \betaegin{split} \sigmaum_{\omega} K(\sigmaigma,\omega)&=C \sigmaum_{\omega}\prod_{D \in \Delta}\Bigl((1-p)1_{\omega(D)=0}+ p1_{\omega(D)=1}1_D(\sigma)\Bigr)\cr &=C\prod_{D \in \Delta}\Bigl((1-p)+ p 1_D(\sigma)\Bigr)=C\prod_{D \in \Delta}(1-p)^{1-1_D(\sigma)}. \varepsilonnd{split} \varepsilonnd{equation*} This equals the generalized Potts measure with Hamiltonian \varepsilonqref{Alt_Hamiltonian-zwei} for integer exponent $z$ when we put $p=1-e^{-\beta}$. Conversely, summing over $\sigma$ we get \betaegin{equation*} \betaegin{split} \sigmaum_{\sigma} K(\sigmaigma,\omega)&=C \sigmaum_{\sigma}\prod_{D \in \Delta}\Bigl((1-p)1_{\omega(D)=0}+ p1_{\omega(D)=1}1_D(\sigma)\Bigr)\cr &=C\prod_{D \in \Delta}(1-p)^{1-\omega(D)}p^{\omega(D)}q^{k(\omega)} \varepsilonnd{split} \varepsilonnd{equation*} where $k(\omega)$ is the number of connected components (in the sense that open $z$-subsets are called connected if they share at least one vertex) of the configuration $\omega\in\{0,1\}^\Delta$ also counting isolated elements of $\Delta$. We call this measure the \tauextit{generalized random cluster measure} (generalized RCM) assigning probability to configurations of $z$-cliques. More details for the case $z=2$ can be found for example in \cite{GHM00}. The case $q=1$ is independent percolation on $z$-clique variables since we declare each $z$-clique (subset of $z$ elements) independently to be open with probability $p$ and closed with probability $1-p$. For $q>1$ configurations additionally get $q$-dependent weights which give bias to configurations with many connected components. The coupling measure \varepsilonqref{Coupling} describes an intimate relation between the generalized Potts measure and the generalized RCM. For example let $C_1,\deltaots,C_k$ be a partition of $\{1,\deltaots,N\}$ given by the connected components of a configuration distributed according to the generalized mean-field RCM with parameters $q,z$ and $p=1-e^{-\varphirac{\beta (z-1)!}{N^{z-1}}}$. Then the empirical distribution under the generalized Potts measure with parameters $\beta,z$ and $q$ is given by \betaegin{equation*} L_N=\varphirac{1}{N}\sigmaum_{i=1}^k\alpha_i|C_i| \varepsilonnd{equation*} where the $\alpha_i$ are independent and equidistributed random variables on $\{\delta_1,\deltaots,\delta_q\}$ and we suppressed the additional term in the Hamiltonian \varepsilonqref{Alt_Hamiltonian}. Now let us consider the variance of the empirical distribution w.r.t the generalized Potts measure \betaegin{equation*} \betaegin{split} \tauext{Var}_{\pi^N_{\beta,q,z}}&[L_N(1)]={\Bbb E}_{\pi^N_{\beta,q,z}}[(L_N(1)-\varphirac{1}{q})^2]={\Bbb E}_{\tauext{RCM}}[(\sigmaum_{i=1}^k\alpha_i(1)\varphirac{|C_i|}{N}-\varphirac{1}{q})^2]\cr &={\Bbb E}_{\tauext{RCM}}[\sigmaum_{j,i=1}^k(\alpha_i(1)-\varphirac{1}{q})(\alpha_j(1)-\varphirac{1}{q})\varphirac{|C_i||C_j|}{N^2}] =\varphirac{q-1}{q^2}{\Bbb E}_{\tauext{RCM}}[\sigmaum_{i=1}^k(\varphirac{|C_i|}{N})^2].\cr \varepsilonnd{split} \varepsilonnd{equation*} We have ${\Bbb E}_{\tauext{RCM}}[\max_{i\in\{1,\deltaots,q\}}(\varphirac{|C_i|}{N})^2]\lambdaeq{\Bbb E}_{\tauext{RCM}}[\sigmaum_{i=1}^k(\varphirac{|C_i|}{N})^2]\lambdaeq {\Bbb E}_{\tauext{RCM}}[\max_{i\in\{1,\deltaots,q\}}(\varphirac{|C_i|}{N})]$ and hence $\tauext{Var}_{\pi^N_{\beta,q,z}}[L_N(1)]\tauo0$ iff $\max_{i\in\{1,\deltaots,q\}}(\varphirac{|C_i|}{N})\tauo0$ in probability w.r.t the RCM. In other words, phase-transition of the generalized Potts model is equivalent to percolation of the generalized RCM. The case $z=2$ has been studied in great detail in \cite{BGJ96}. Under the right scaling $p=\lambda/N$ the critical value $\lambda_c$ for percolation of the RCM equals the critical inverse temperature $\beta_c$ for phase-transition of the Potts model. We expect the same to be true for the generalized RCM and the generalized Potts measure (on a computational level even for $q$ non-integer valued) with $p=\lambda/N^{z-1}$. Notice that for the generalized RCM, the assumption of $q$ to be integer-valued can be abandoned. In \cite{B10} again for the case $z=2$ an interesting extension of the Potts measure (on the lattice) the so-called \tauextit{divide and color model} (DCM) is considered. The DCM is a probability measure on $\{1,\deltaots,s\}^{{\Bbb Z}^d}$ corresponding to the following two-step procedure: First pick a random edge configuration $\omega$ according to the $q$-biased RCM. Secondly assign spin $i\in\{1,\deltaots,s\}$ independently to every connected component of $\omega$ with probability $a_i$ where $\sigmaum_{i=1}^sa_i=1$. For integers $1<s<q$ and $a_i=k_i/q$ with $k_i\in{\Bbb N}$ and $\sigmaum_{i=1}^s k_i=q$ the fuzzy Potts model is contained as a special case. The main result is, that with the exception of the Potts model ($q=s$, $a_i\varepsilonquiv1/q$) the DCM is Gibbs only for large $p$. Notice that our result about loss of Gibbsianness of the fuzzy Potts model in the low temperature regime is again contained. \betaegin{thebibliography}{99} \betaegin{scriptsize} \betaibitem{B10} A.~B\'alint: Gibbsianness and non-Gibbsianness in divide and color models, The Annals of Probability, Vol. 38, No. 4, 1609-1638 (2010) \betaibitem{BGJ96}B.~Bollob\'as, G.R.~Grimmett and S.~Janson: The random-cluster process on the complete graph, Probability Theory and Related Fields 104, 283-317 (1996) \betaibitem{CU} J.-R.~Chazottes and E.~Ugalde: On the preservation of Gibbsianness under symbol amalgamation. In: Entropy of hidden Markov processes and connections to Dynamical Systems. Eds. B.~Marcus, K.~Petersen and T.~Weissman, Cambridge University Press (2011) \betaibitem{COM89} F.~Comets: Large deviation estimates for a conditional probability distribution. Applications to random interaction Gibbs measures. Prob. Theory Relat. Fields 80, 407-432 (1989) \betaibitem{DeZe10}A.~Dembo and O.~Zeitouni: Large Deviations Techniques and Applications (2nd. ed.), Stochastic Modelling and Applied Probability 38, Springer, Berlin (2010) \betaibitem{DOB}R.L.~Dobrushin: The description of a random field by means of conditional probabilities and conditions of its regularity, Theor. Prob. Appl. 13, 197-224 (1968) \betaibitem{ES88}R.G.~Edwards and A.D.~Sokal: Generalization of the Fortuin-Kasteleyn-Swendsen-Wang representation and Monte Carlo algorithm, The Physical Review D 38, 2009-2012 (1988) \betaibitem{ELLIS85} R.S.~Ellis: Entropy, Large Deviations, and Statistical Mechanics, Reprint of the 1st ed. Springer-Verlag New York 1985, XVIII (2006) \betaibitem{ElWa89} R.S.~Ellis, K.W.~Wang: Limit Theorems for the empiricial vector of the Curie- Weiss-Potts model, Stoch. Proc. Appl. 35, 59-79 (1989) \betaibitem{EFS} A.C.D.~van Enter, R.~Fern\'andez, A.D.~Sokal: Regularity properties and pathologies of position-space renormalization-group transformations: Scope and limitations of Gibbsian theory, J. Stat. Phys. 72, 879-1167 (1993) \betaibitem{ACD1} A.C.D.~van Enter, R.~Fern\'andez, F.~den Hollander and F.~Redig: Possible Loss and recovery of Gibbsianness during the stochastic evolution of Gibbs Measures, Commun. Math. Phys. 226, 101-130 (2002) \betaibitem{AFHR10}A.C.D.~van Enter, R.~Fern\'andez, F.~den Hollander and F.~Redig: A large-deviation view on dynamical Gibbs-non-Gibbs transitions, Moscow Math. J. 10, 687-711 (2010) \betaibitem{ACD} A.C.D.~van Enter, C.~K\"ulske : Two connections between random systems and non-Gibbsian measures, J. Stat. Phys. 126, 1007-1024 (2007) \betaibitem{EK10} V.N.~Ermolaev and C.~K\"ulske: Low-temperature dynamics of the Curie-Weiss model: Periodic orbits, multiple histories and loss of Gibbsianness. Journal of Statistical Physics, 141(5):727756 (2010) \betaibitem{DEZ} R.~Fern\'andez: Gibbsianness and non-Gibbsianness in lattice random fields, Les Houches, LXXXIII (2005) \betaibitem{FHM13-}R.~Fern\'andez, F.~den Hollander and J.~Mart\'{\i}nez: Variational description of Gibbs-non-Gibbs dynamical transitions for the Curie-Weiss model. Comm. Math. Phys. 319, no. 3, 703Ð730 (2013) \betaibitem{FHM13}R.~Fern\'andez, F.~den Hollander and J.~Mart\'{\i}nez: Variational description of Gibbs-non-Gibbs dynamical transitions for spin-flip systems with a Kac-type interaction, arXiv:1309.3667 (2013) \betaibitem{Geo} H.-O.~Georgii: Gibbs measures and phase transitions, volume 9 of de Gruyter Studies in Mathematics. Walter de Gruyter Co., Berlin, ISBN 0-89925-462-4 (1988) \betaibitem{GHM00}H.-O.~Georgii, O.~H\"aggstr\"om, C.~Maes: The random geometry of equilibrium phases, Phase Transitions and Critical Phenomena (Domb, C., Lebowitz, J. L., eds.), vol. 18, Academic Press, London, pp. 1-142 (2000) \betaibitem{H} O.~H\"aggstr\"om: Is the fuzzy Potts model Gibbsian? Ann. de l'Institut Henri Poincar\'e (B) Prob. and Stat. 39, 891-917 (2003) \betaibitem{HK04} O.~H\"aggstr\"om, C.~K\"ulske: Gibbs properties of the fuzzy Potts model on trees and in mean field, Markov Proc. Rel. Fields 10 No. 3, 477-506 (2004) \betaibitem{JaKu12} B.~Jahnel, C.~K\"ulske: A class of nonergodic interacting particle systems with unique invariant measure, to be published in the Annals of Applied Probability, arXiv:1208.5433 (2012) \betaibitem{JaKu13} B.~Jahnel, C.~K\"ulske: Synchronization for discrete mean-field rotators, arXiv:1308.1260 (2013) \betaibitem{KUL6}C.~K\"ulske: Non-Gibbsianness and phase transition in random lattice spin models, Markov. Proc. Rel. Fields 5, 357-383 (1999) \betaibitem{KUL1} C.~K\"ulske: Analogues of Non-Gibbsianness in Joint Measures of Disordered Mean Field Models, J. Stat. Phys., 112 (2003) \betaibitem{KUL2}C.~K\"ulske, A.~Le Ny: Spin-flip dynamics of the Curie-Weiss model: Loss of Gibbsianness with possibly broken symmetry, Commun. Math. Phys. 271, 431-454 (2007) \betaibitem{KULOP08} C.~K\"{u}lske, A. A.~Opoku: Continuous Spin Mean-Field models: Limiting kernels and Gibbs Properties of local transforms, J. Math. Phys. 49, 125215 (2008) \betaibitem{LEN1} A.~Le Ny: Gibbsian Description of Mean-Field Models. In: In and Out of Equilibrium, Eds. V.Sidoravicius, M.E. Vares, Birkh\"auser, Progress in Probability, vol 60, 463-480 (2008) \betaibitem{PoSt78} T.~Poston, I.~Stewart: Catastrophe Theory and its Applications, Surveys and reference works in mathematics. Pitman, London (1978) \betaibitem{Po} R.~B.~Potts: Some generalized order-disorder transformations, Math. Proc. Cambridge Phil. Soc. 48, 106--109 (1952) \varepsilonnd{scriptsize} \varepsilonnd{thebibliography} \varepsilonnd{document}
\begin{document} \title[PMP, (co)adjoint representation, geodesics]{Pontryagin maximum principle, (co)adjoint representation, and normal geodesics of left-invariant (sub-)Finsler metrics on Lie groups} \author{V.~N.~Berestovskii, I.~A.~Zubareva} \operatorname{ad}dress{Sobolev Institute of Mathematics, \newline Russia, 630090, Novosibirsk, Acad. Koptyug avenue, 4;\newline Novosibirsk State University,\newline Russia, 630090, Novosibirsk, Pirogova str., 1} \email{[email protected]} \operatorname{ad}dress{Sobolev Institute of Mathematics,\newline Russia, 644099, Omsk, Pevtsova str., 13} \email{i\[email protected]} \begin{abstract} On the ground of origins of the theory of Lie groups and Lie algebras, their (co)adjoint representations, and the Pontryagin maximum principle for the time-optimal problem are given an independent foundation for methods of geodesic vector field to search for normal geodesics of left-invariant (sub-)Finsler metrics on Lie groups and to look for the corresponding locally optimal controls in (sub-)\\Riemannian case, as well as some their applications. \noindent {\it Mathematics Subject Classification (2010):} 53C17, 53C22, 53C60, 49J15. \noindent {\it Keywords:} (co)adjoint representation, left-invariant (sub-)Finsler metric, left-invariant (sub-)Riemannian metric, Lie algebra, Lie group, mathematical pendulum, normal geodesic, optimal control. \end{abstract} \maketitle \section*{Introduction} An extensive geometric research subject is the class of homogeneous Riemannian manifolds which includes Lie groups with left-invariant Riemannian metrics \cite{BerNik12} and is a part of the class of homogeneous Finsler manifolds \cite{Deng12}. Every homogeneous Riemannian manifold is the image of some Lie group with a left-invariant Riemannian metric relative to a Riemannian submersion. After Gromov's 1980s papers, homogeneous sub-Finsler manifolds, in particular, sub-Riemannian manifolds were actively studied \cite{BR96}--- \cite{AS}. Their investigation is based on the Rashevsky--Chow theorem which states that any two points of a connected manifold can be joined by a piecewise smooth curve tangent to a given totally nonholonomic distribution \cite{R38}, \cite{C39}. Àn independent proof of some its version for Lie groups with left-invariant sub-Finsler metrics is given in Theorem \ref{contr}. All homogeneous (sub-)Finsler manifolds are contained in the class of locally compact homogeneous spaces with intrinsic metric. This class is a complete metric space with respect to the Busemann-Gromov-Hausdorff metric introduced in \cite{BerGorb14}. Its everywhere dense subset is the class of Lie groups with left-invariant Finsler metrics. In addition, 1) each homogeneous locally compact space $M$ with intrinsicr metric is the limit of some sequence of homogeneous manifolds $M_n$ with intrinsic metrics, bonded by submetries \cite{Ber88}, \cite{Ber891}, \cite{Ber87}, \cite{BerGu00}; 2) every homogeneous manifold with intrinsic metric is the quotient space $G/H$ of some connected Lie group $G$ by its compact subgroup $H,$ equipped with $G$-invariant Finsler or sub-Finsler metric $d$; in particular, it may be Riemannian or sub-Riemannian metric \cite{Ber88}, \cite{Ber881}, \cite{Ber89}; 3) moreover, according to a form of metric $d$, there exists a left-invariant Finsler, sub-Finsler, Riemannian or sub-Riemannian metric $\rho$ on $G$ such that the canonical projection $(G,\rho)\rightarrow (G/H,d)$ is a submetry \cite{Ber89}. The search for geodesics of homogeneous (sub)-Finsler manifolds are reduced to the case of Lie groups with left-invariant (sub)-Finsler metrics. The shortest arcs on Lie groups with left-invariant (sub)-Finsler metrics are optimal trajectories of the corresponding left-invariant time-optimal problem on Lie groups \cite{Ber88}. This permits to apply the Pontryagin maximum principle (PMP) for their search \cite{PBGM}. By this method, in \cite{Ber94} are found all geodesics and shortest arcs of an arbitrary sub-Finsler metric on the three-dimensional Heisenberg group. In \cite{Ber14} is proposed a search method of normal geodesics on Lie groups with left-invariant sub-Riemannian metrics. The method is applicable to Lie groups with left-invariant Riemannian metrics, since all their geodesics are normal. In this paper, to find geodesics of left-invariant (sub-)Finsler metrics on Lie groups and corresponding locally optimal controls in (sub-)Riemannian case we use the geodesic vector field method (Theorems \ref{main},\ref{vf}) and an improved version of method from \cite{Ber14}, applying (co)adjoint representations. The version is based on differential equations from Theorem \ref{kok} for controls, using only the structure constants of Lie algebras of Lie groups. An interesting feature of these two methods in (sub-)Riemannian case is that geodesics vector fields on Lie groups (their integral curves are geodesics, i.e., locally optimal trajectories) and locally optimal controls on Lie algebras of these Lie groups can be determined independently of each other, although there is a connection between them. Moreover, controls on different Lie algebras could be solutions of the same mathematical pendulum equation (see sections \ref{so3}--\ref{se2}). Analogues of Theorems \ref{hameq1} and \ref{main} (but for the last theorem is only along one geodesic) are proved in the book \cite{Jur97} on the basis of more complicated concepts and apparatus. Apparently, other researchers did not apply PMP {\it for the time-optimal problem} to find geodesics of left-invariant metrics on Lie groups. \section{Preliminaries} A smooth manifold $G$ which is a group with respect to an operation $\cdot$ is called the Lie group if the operations of multiplication and inversing are smooth maps. Smooth map of Lie groups that is a homomorphism is called a homomorphism of Lie groups. Monomorphisms, epimorphisms, and isomorphisms of Lie groups are defined in a similar way. A subgroup $H$ of a Lie group $G$ which is its smooth submanifold is called the Lie subgroup of the Lie group $G$. By E.Cartan's theorem, every closed subset $H$ of the Lie group $G$, which is its subgroup, is the Lie subgroup of the Lie group $G$ \cite{Ad}. The concept of the virtual Lie subgroup of a Lie group generalizes the concept of the Lie subgroup of a Lie group. A subgroup $H$ of a Lie group $G$ is called its virtual Lie subgroup, if $H$ admits the structure of the Lie group such that its topology base consists of connected components of open subsets of the induced topology and the inclusion map of $H$ in $G$ is an (injective) homomorphism of Lie groups. The left and the right shifts $l_g: h\in G\rightarrow g\cdot h,$ $ r_g: h\in G\rightarrow h\cdot g,$ $g,h\in G,$ of the Lie group $(G,\cdot)$ by an element $g$ are diffeomorphisms with the inverse shifts $l_{g^{-1}},$ $r_{g^{-1}},$ and their differentials $(dl_g)_h: T_hG\rightarrow T_{gh}G$ (respectively, $(dr_g)_h: T_hG\rightarrow T_{hg}G)$ are linear isomorphisms of tangent vector spaces to $G$ at corresponding points. A (smooth) vector field $V: G\rightarrow TG,\,\,V: g\in G\rightarrow T_gG$ on the Lie group $G$ such that $V\circ l_h = d(l_h)\circ V$ for all $h\in G,$ is called the left-invariant vector field on $G$. The right-invariant vector field on $G$ is defined in a similar way. Every left-invariant vector field on the Lie group $G$ has a form \begin{equation} \label{V} V(g)=(dl_g)_e(v),\quad v\in T_eG, \end{equation} where $e$ is the unit of the group $G$. A homomorphism of Lie groups $\phi: (\mathbb{R},+)\rightarrow (G,\cdot)$ is called the $1$--parameter subgroup of the Lie group $(G,\cdot)$. \label{onep} Every $1$--parameter subgroup $\phi(t), t\in \mathbb{R},$ of a Lie group $G$ is an integral curve of a left-invariant vector field $V$ on $G$ with formula (\ref{V}), where $v= (d\phi)_0(\overline{e}),$ and $\overline{e}\in T_0\mathbb{R}$ is the vector with the component 1. For a vector $v\in T_eG,$ we denote by $V_v$ and $\phi_v$ respectively the left-invariant vector field $V$ on $G,$ defined by (\ref{V}), and the $1$--parameter subgroup $\phi=\phi(t),$ $t\in\mathbb{R},$ in $G$ with condition $(d\phi)_0(\overline{e})=v$. The exponential map $\exp=\exp_G: T_eG\rightarrow G$ is defined by formula $v\in T_eG\rightarrow \phi_v(1).$ If $f: G\rightarrow H$ is a homomorphism of Lie groups then \begin{equation} \label{expc} f\circ \exp_G= \exp_H\circ (df)_e. \end{equation} For each vector $v\in T_eG,$ we have $(d\exp)_{0}(v)=v,$ where $0$ is zero of the tangent vector space $T_eG.$ As a result, there exist open neighborhoods $U$ of zero in $T_eG$ and $W$ of unit $e$ in $G$ such that $\exp: U\rightarrow V$ is a diffeomorphism. If $\dim(G)=n$ then after introduction of arbitrary Cartesian coordinates $(x_1,\dots, x_n)$ with zero origin $0$ in the tangent vector space $T_eG,$ it is naturally identified with $\mathbb{R}^n.$ Then $\exp^{-1}: V\rightarrow U\subset \mathbb{R}^n$ is a local chart (a coordinate system) on $G$ in the neighborhood $V$ of the point $e\in G.$ This coordinate system in $V$ is called {\it a coordinate system of the first kind}. A family of local charts $\exp^{-1}\circ l_{g^{-1}}: g\cdot V\rightarrow U\subset \mathbb{R}^n,$ $g\in G,$ sets a smooth structure on $G,$ identical with the initial smooth structure of the Lie group. The group $\operatorname{GL}(n)=\operatorname{GL}(n,\mathbb{R})$ of all nondegenerate real squared $(n\times n)$-matrices is a Lie group relative to the global map that associates to each matrix $g\in \operatorname{GL}(n)$ its elements $g_{ij},$ $i,j=1,\dots n.$ Obviously, for every $g\in G$ the mapping $I(g): G\rightarrow G$ such that $$I(g)(h)= g\cdot h\cdot g^{-1}=(l_g \circ r_{g^{-1}})(h)= (r_{g^{-1}} \circ l_{g})(h)$$ is an automorphism of the Lie group $(G,\cdot),$ $I(g)(e)=e,$ and the differential $$(dI(g))_e: = dl_g\circ dr_{g^{-1}}: T_eG\rightarrow T_eG$$ is a nondegenerate linear map (i.e. an element of the Lie group $\operatorname{GL}(n)$ relative to some vector basis in $T_eG$, if $\dim G=n$), denoted with $\operatorname{Ad}(g)$. The calculation rule for the differential of composition gives $$\operatorname{Ad}(g_1\cdot g_2)= (dI(g_1\cdot g_2))_e= (d(I(g_1)\circ I(g_2)))_e= (dI(g_1))_e\circ (dI(g_2))_e= \operatorname{Ad}(g_1)\circ \operatorname{Ad}(g_2),$$ i.e., $\operatorname{Ad}: G\rightarrow \operatorname{GL}(n)$ is a homomorphism of Lie groups, called the adjoint representation of the Lie group $G$. By formula (\ref{expc}), \begin{equation} \label{igexp} I(g)\circ \exp= \exp\circ \operatorname{Ad}(g), g\in G, \end{equation} the kernel of the homomorphism $\operatorname{Ad}$ for a connected Lie group $G$ is the center of the Lie group $G,$ \begin{equation} \label{digexp} \operatorname{Ad}\circ \exp_G= \exp_{\operatorname{GL}(n)}\circ (d\operatorname{Ad})_e. \end{equation} Set $\mathfrak{g}:=T_eG$ for a Lie group $(G,\cdot),$ $\mathfrak{gl}(n):= T_E\operatorname{GL}(n)= M(n)$ for the Lie group $\operatorname{GL}(n),$ where $M(n)$ is the vector space of all real $(n\times n)$-matrices, $\operatorname{ad} =\operatorname{ad}_{\mathfrak{g}}:= (d\operatorname{Ad})_e;$ $L(X,Y)$ is the (real) vector space of linear maps from the real vector space $X$ to the real vector space $Y$; $B(X\times Y, Z)$ is the vector space of bilinear maps from $X\times Y$ to $Z$. It is clear that $$\operatorname{ad} \in L(\mathfrak{g}, L(\mathfrak{g},\mathfrak{g}))=B(\mathfrak{g}\times \mathfrak{g},\mathfrak{g}).$$ {\it A vector $[v,w]: =\operatorname{ad}(v)(w)\in \mathfrak{g},$ $v,w\in \mathfrak{g}$, is called the Lie bracket of vectors $v,w\in \mathfrak{g}.$ The pair $(\mathfrak{g},[\cdot,\cdot])$ is called the Lie algebra of the Lie group $(G,\cdot)$}. The definition implies that the Lie bracket operation is bilinear. It is clear that $$\frac{\partial}{\partial s}[\exp(tv)\exp(sw)\exp(-tv)](0)=\operatorname{Ad}(\exp(tv))(w),$$ \begin{equation} \label{bra} [v,w]= \frac{\partial}{\partial t}\left(\frac{\partial}{\partial s}[\exp(tv)\exp(sw)\exp(-tv)](0)\right)(0), \end{equation} The formula (\ref{bra}) and the bilinearity of the Lie bracket imply the skew symmetry of the Lie bracket and the triviality of the Lie algebra of any commutative Lie group; for a connected Lie group the converse statement is also true. It follows from formulae (\ref{expc}), (\ref{bra}) that if $f: G\rightarrow H$ is a homomorphism of Lie groups and $(\mathfrak{h},[\cdot,\cdot])$ is the Lie algebra of the Lie group $H$, then for any elements $v,w\in \mathfrak{g},$ $$(df)_e([v,w])=[(df)_e(v),(df)_e(w)].$$ In other words, the differential $(df)_e: \mathfrak{g}\rightarrow \mathfrak{h}$ is a homomorphism of Lie algebras $(\mathfrak{g},[\cdot,\cdot])$ and $(\mathfrak{h},[\cdot,\cdot])$ of Lie groups $G$ and $H.$ As a corollary, Lie algebras of locally isomorphic Lie groups are isomorphic (the converse statement is also true) and \begin{equation} \label{adgi} \operatorname{Ad}(g)([v,w])= [\operatorname{Ad}(g)(v),\operatorname{Ad}(g)(w)],\quad g\in G,\quad v,w\in \mathfrak{g}. \end{equation} The substitution $g=\exp(tu),$ $u\in \mathfrak{g},$ to this formula and the differentiation by $t$ at $t=0$ gives the following formula \begin{equation} \label{difalg} [u,[v,w]]= [[u,v],w]+ [v,[u,w]],\quad u,v,w\in (\mathfrak{g},[\cdot,\cdot]), \end{equation} which is equivalent by the skew symmetry of the Lie bracket to {\it the Jacobi identity} \begin{equation} \label{jak} [ u,[v,w]]+ [v,[w,u]]+ [w,[u,v]]=0. \end{equation} It is well-known that \begin{equation} \label{expgl} \exp_{\operatorname{GL}(n)}(A)= \exp A = \sum_{k=0}^{\infty}\frac{A^k}{k!},\quad A\in\mathfrak{gl}(n), \end{equation} which together with (\ref{bra}) imply \begin{equation} \label{slgl} [A,B]= AB - BA,\quad A,B\in (\mathfrak{gl}(n),[\cdot,\cdot]). \end{equation} \section{Theoretic results} \begin{definition} \label{gen} Let $(\mathfrak{l},[\cdot,\cdot])$ be a Lie algebra; $\mathfrak{p}, \mathfrak{q}\subset \mathfrak{l}$ are nonzero vector subspaces. By definition, $$[\mathfrak{p},\mathfrak{q}]= \{[v,w]: v\in \mathfrak{p}, w\in \mathfrak{q}\}.$$ If $\dim(\mathfrak{p})\geq 2$ then by definition, $$\quad\mathfrak{p}^1=\mathfrak{p},\quad \mathfrak{p}^{k+1}=[\mathfrak{p},\mathfrak{p}^k],\quad \mathfrak{p}_m= \sum_{k=0}^{m}\mathfrak{p}^k.$$ The vector subspace $\mathfrak{p}\subset \mathfrak{l}$ generates the Lie algebra $(\mathfrak{l},[\cdot,\cdot])$, if $\mathfrak{l}=\mathfrak{p}_m$ for some natural number $m;$ the smallest number $m:=s$ with such property is called the generation degree (of the algebra $(\mathfrak{l},[\cdot,\cdot])$ by the subspace $\mathfrak{p}$). \end{definition} It is clear that subsets from Definition \ref{gen} are vector subspaces of $\mathfrak{l}.$ \begin{definition} \label{adapt} Let us assume that the vector subspace $\mathfrak{p}\subset \mathfrak{l}$ generates the Lie algebra $(\mathfrak{l},[\cdot,\cdot]),$ $2\leq \dim(\mathfrak{p})< \dim(\mathfrak{l}),$ $s$ is the generation degree, $r_m,$ $m=1,\dots, s,$ are dimensions (ranks) of the spaces $\mathfrak{p}_m.$ Thus $2\leq r_1< r_2<\dots < r_s,$ $r_1=\dim(\mathfrak{p})=r,$ $r_s=\dim(\mathfrak{l})=n$. A basis $\{e_1,\dots, e_{r_s}\}$ of the Lie algebra $\mathfrak{l}$ is called adapted to the subspace $\mathfrak{p},$ if $\{e_1,\dots, e_{r_m}\}$ is a basis of the subspace $\mathfrak{p}_m$ for every $m=1,\dots, s$. \end{definition} Let $\{e_1,\dots,e_r\}$ be any basis of the vector subspace $\mathfrak{p}\subset \mathfrak{g},$ generating the Lie algebra $(\mathfrak{g},[\cdot,\cdot])$ of a Lie group $(G,\cdot).$ \begin{theorem} \label{contr} Let $(G,\cdot)$ be a connected Lie group and a vector subspace $\mathfrak{p}\subset \mathfrak{g}$ generates Lie algebra $(\mathfrak{g},[\cdot,\cdot]).$ Then the control system \begin{equation} \label{dyn} \dot{g}=(dl_g)(u),\quad u\in \mathfrak{p}, \end{equation} is controllable (attainable) by means of piecewise constant controls \begin{equation} \label{co} u=u(t)\in \mathfrak{p},\quad 0\leq t\leq T, \end{equation} where $u(t)=\pm e_j,$ $j=1,\dots, r,$ in the constancy segments of the control. In other words, for any elements $g_0,g_1\in G$ there exists a piecewise constant control (\ref{co}) of this type such that $g(T)=g_1$ for solution of the Cauchy problem $$\dot{g}(t)=dl_{g(t)}(u(t)), \quad g(0)=g_0.$$ \end{theorem} \begin{proof} We shall apply the notation from Definitions \ref{gen} and \ref{adapt}. Let usl construct an adapted basis $\{e_1,\dots, e_n\}$ to the subspace $\mathfrak{p}$ of the Lie algebra $(\mathfrak{g},[\cdot,\cdot])$ by induction on $m=1,\dots, s.$ $m=1.$ First $r$ vectors of the basis coincide with vectors of basis for the space $\mathfrak{p}^1=\mathfrak{p}$ chosen before Theorem \ref{contr}. $m=2.$ It is clear that we can take some vectors of a form $e_j=[e_{i_j},e_{k_j}]\in \mathfrak{p}^2,$ $j=r+1,\dots, r_2,$ where $i_j,$ $k_j$ are some of numbers $1,\dots,r.$ Let us assume that vectors $e_1,\dots, e_{r_m}$ are constructed, where $2\leq m<s.$ Then we can take some vectors of a form $e_j=[e_{i_j},e_{k_j}]\in \mathfrak{p}^{m+1},$ $j=r_m+1,\dots, r_{m+1},$ where $i_j$ (respectively, $k_j$) are some of numbers $1,\dots, r$ (respectively, $r_{m-1}+1,\dots, r_m$). As a result, each vector $e_j,$ where $r_{m-1}< j\leq r_m,$ $m=2,\dots s,$ has a form \begin{equation} \label{vb} e_j=[e_{i_m(j)},[\dots, [e_{i_2(j)},e_{i_1(j)}]\dots ]],\quad 1\leq i_l(j)\leq r,\quad l=1,\dots, m. \end{equation} We claim that if every such vector $e_j$ is replaced by a vector $e'_j$ of a form \begin{equation} \label{nvb} e'_j= (Ad(\exp(t_m e_{i_m(j)})\circ \dots \circ Ad(\exp(t_2 e_{i_2(j)})))(e_{i_1(j)}) \end{equation} with sufficiently small nonzero numbers $t_2,\dots, t_m$ (preserving vectors $e_1,\dots, e_r$), then we get again some basis in $\mathfrak{g}$ (not necessarily adapted to the subspace $\mathfrak{p}$). Indeed, on the basis of formulae (\ref{nvb}), (\ref{digexp}), $$e'_j= (\exp(t_m\operatorname{ad}(e_{i_m(j)}))\circ \dots \circ \exp(t_2\operatorname{ad}(e_{i_2(j)})))(e_{i_1(j)})= $$ $$((E+t_m\operatorname{ad}(e_{i_m(j)})+O(t_m^2))\circ\dots \circ (E+t_2\operatorname{ad}(e_{i_2(j)})+O(t_m^2)))(e_{i_1(j)})=$$ $$e_{i_1(j)}+ t_2[e_{i_2(j)},e_{i_1(j)}]+ \dots +(t_m\dots t_2)[e_{i_m(j)},[\dots,[e_{i_2(j)},e_{i_1(j)}]\dots]]+\sum_{k=2}^m o(t_k).$$ We see from here and (\ref{vb}) that removing the last sum, we get a vector from $\mathfrak{p}_m$ that is equal to the vector $(t_m\dots t_2)e_j$ up to the module of the subspace $\mathfrak{p}_{m-1}$. This implies the statement from the previous paragraph. For simplicity, later on each such vector $e'_j$ is denoted by $e_j.$ On the groud of formulae (\ref{nvb}) and (\ref{igexp}), \begin{equation} \label{ige} \exp{(se_j)}=(I(\exp(t_m e_{i_m(j)}))\circ\dots\circ I(\exp(t_2 e_{i_2(j)})))(se_{i_1(j)}),\quad s\in \mathbb{R}. \end{equation} Let us show that the statement of Theorem \ref{contr} is true for elements $g_0=e$ and $g_1=\exp(se_j).$ For this, we apply a control $$u=u(\tau),\quad 0\leq \tau\leq |s|+ 2\sum_{k=2}^{m}|t_k|,$$ where $$u(\tau)=\operatorname{sgn}(t_l) e_{i_l(j)}, \quad \sum_{k=l}^m|t_k|-|t_l|\leq \tau\leq \sum_{k=l}^m|t_k|,\quad l=2,\dots m,$$ $$u(\tau)=\operatorname{sgn}(s) e_{i_1(j)}, \quad \sum_{k=2}^m|t_k|\leq \tau\leq \sum_{k=2}^m|t_k|+ |s|,$$ $$u(\tau)=-\operatorname{sgn}(t_l) e_{i_l(j)},\quad \sum_{k=2}^m|t_k|+ |s|+ \sum_{k=2}^l|t_k|-|t_l|\leq \tau\leq \sum_{k=2}^m|t_k|+ |s|+ \sum_{k=2}^l|t_k|,$$ where $l=2,\dots, m.$ Then it follows from the definition of $I(g),$ $g\in G,$ and the equation (\ref{ige}) that solution of the Cauchy problem for the system (\ref{dyn}) with $g(0)=e$ and with given control $u=u(\tau)$ is a piecewise smooth curve $$g(\tau)=\exp\left(\left(\tau- \sum_{k=l}^m|t_k|+|t_l|\right)\operatorname{sgn}(t_l) e_{i_l(j)}\right), \quad \sum_{k=l}^m|t_k|-|t_l|\leq \tau\leq \sum_{k=l}^m|t_k|;$$ $$g(\tau)=\exp\left(\left(\tau - \sum_{k=2}^m|t_k|\right)\operatorname{sgn}(s) e_{i_1(j)}\right), \quad \sum_{k=2}^m|t_k|\leq \tau\leq \sum_{k=2}^m|t_k|+ |s|;$$ $$g(\tau)=\exp\left(-\left(\tau - \left(\sum_{k=2}^m|t_k|+ |s|+ \sum_{k=2}^l|t_k|-|t_l|\right)\right)\operatorname{sgn}(t_l) e_{i_l(j)}\right), $$ $$\sum_{k=2}^m|t_k|+ |s|+ \sum_{k=2}^l|t_k|-|t_l|\leq \tau\leq \sum_{k=2}^m|t_k|+ |s|+ \sum_{k=2}^l|t_k|,$$ where $ l=2,\dots, m.$ In addition, $g\left(|s|+ 2\sum_{k=2}^{m}|t_k|\right)=\exp(se_j).$ It follows from proved assertions that for any collection $(s_1,\dots, s_n)\in \mathbb{R}^n$ the statement of Theorem \ref{contr} holds for elements $$g_0=e,\quad g_1=\Phi(s_1,\dots, s_n):= \exp(s_1e_1)\dots \exp(s_ne_n).$$ In addition, $$\frac{\partial \Phi}{\partial s_i}(0,\dots, 0)=e_i,\quad t=1,\dots, n.$$ Then on the ground of the inverse mapping theorem the map $\Phi$ is a diffeomorphism of some open neighborhood $W$ of zero $(0,\dots,0)$ in $\mathbb{R}^n$ onto some open neighborhood $V$ of the unit $e$ in $G.$ It follows from previously proved assertions that the statement of Theorem \ref{contr} holds for $g_0=e$ and any element $g_1\in V^k$, where $k$ is arbitrary natural number, hence for any element $g_1\in W:=\cup_{k=1}^{\infty}V^k.$ This set is nonempty, open and closed in $G.$ First two properties are obvious; we shall prove that the set is closed. Set $$V_0:= V\cap V^{-1},\quad\mbox{where}\quad V^{-1}=\{g^{-1}: g\in V\}.$$ It is clear that $V_0$ is a symmetric neighborhood of the unit $e$ in $G,$ i.e., $V_0^{-1}=V_0.$ Let $g_1\in \overline{W},$ where $ \overline{W}$ is the closure of $W.$ Then $g_1V_0\cap W\neq \emptyset,$ consequently, $g_1V_0\cap V^k\neq \emptyset$ for some $k,$ so there exists $g\in g_1V_0\cap V^k,$ $g=g_1v_0$ for $v_0\in V_0.$ Then $$g_1=gv_0^{-1}\in gV_0\subset gV\subset V^{k}V=V^{k+1}\subset W.$$ Therefore $W$ is an open and closed set and $W=G,$ because $G$ is connected. Now if $g_0, g_1\in G$ then $g_0=l_{g_0}(e),$ $g_1=l_{g_0}((g_0)^{-1}g_1),$ and since the statement of Theorem \ref{contr} holds for elements $e$ and $(g_0)^{-1}g_1,$ then it holds for $g_0$ and $g_1.$ \end{proof} It follows from the proof of Theorem \ref{contr} that the triple $(V,\Phi^{-1},W)$ is a local chart in $G.$ The corresponding coordinate system is called {\it the coordinate system of the second kind}. Every left-invariant (sub-)Finsler metric $d=d_F$ on a connected Lie group $G$ with Lie algebra $(\mathfrak{g},[\cdot,\cdot])$ is defined by a subspace $\mathfrak{p}\subset \mathfrak{g}$, generating $\mathfrak{g}$, and some norm $F$ on $\mathfrak{p}.$ A distance $d(g,h)$ for $g,h\in G$ is defined as the infimum of lengths $\int_0^T|\dot{g}(t)|dt$ of piecewise smooth paths $g=g(t),$ $0\leq t\leq T,$ such that $dl_{g(t)^{-1}}\dot{g}(t)\in \mathfrak{p}$ and $g(0)=g,$ $g(T)=h;$ $T$ is not fixed, $|\dot{g}(t)|=F(dl_{g(t)^{-1}}\dot{g}(t)).$ The existence of such paths and, consequently, the finiteness of $d$ are guaranteed by Theorem \ref{contr}. Obviously, all three metric properties for $d$ are fulfilled. If $\mathfrak{p}=\mathfrak{g}$ then $d$ is a left-invariant Finsler metric on $G$; if $F(v)=\sqrt{\langle v, v\rangle},$ $v\in \mathfrak{p},$ where $\langle\cdot,\cdot\rangle$ is some scalar product on $\mathfrak{p},$ then $d$ is a left-invariant sub-Riemannian metric on $G,$ and $d$ is a left-invariant Riemannian metric, if additionally $\mathfrak{p}=\mathfrak{g}.$ The following statements were proved in \cite{Ber881}. The space $(G,d)$ is a locally compact and complete. Then in consequence of S.E.~Con--Vossen theorem the space $(G,d)$ is a geodesic space, i.e. for any elements $g,h\in G$ there exists a shortest arc $c=c(t),$ $0\leq t\leq T,$ in $(G,d),$ which joins them. This means that $c$ is a continuous curve in $G,$ whose length in the metric space $(G,d)$ is equal to $d(g,h).$ Therefore we can assume that $c$ is parameterized by arc length, i.e. $T=d(g,h)$ and $d(c(t_1),c(t_2))=t_2-t_1$ if $0\leq t_1\leq t_2\leq d(g,h).$ Then $c=c(t),$ $0\leq t\leq d(g,h),$ is a Lipschitz curve relative to the smooth structure of the Lie group $G$. Therefore this curve is absolutely continuous. Then in consequence of well--known theorem from mathematical analysis, there exists a measurable, almost everywhere defined derivative function $\dot{c}(t),$ $0\leq t\leq d(g,h)$, and $c(t)=c(0)+ \int_0^t\dot{c}(\tau)d\tau,$ $0\leq t\leq T.$ \begin{theorem} \label{topt}\cite{Ber88} Every shortest arc $g=g(t),$ $0\leq t\leq T=d(g_0,g_1)$, in $(G,d)$ with $g(0)=g_0,$ $g(T)=g_1,$ is a solution of the time-optimal problem for the control system (\ref{dyn}) with compact control region $$U=\{u\in \mathfrak{p}: F(u)\leq 1\}$$ and indicated endpoints. \end{theorem} In consequence of Theorem \ref{topt}, one can apply the Pontryagin maximum principle \cite{PBGM} for the time-optimal problem from Theorem \ref{topt} and a covector function $\psi=\psi(t)\in T^{\ast}_{g(t)}$ to find shortest arcs on the Lie group $G$ with left-invariant sub-Finsler metric $d.$ The function $\psi$ can be considered as a left-invariant $1$-form on $(G,\cdot)$ and therefore it is natural to identify it with a covector function $\psi(t)\in\mathfrak{g}^{\ast}=T_e^{\ast}G.$ Then every optimal trajectory $g(t),$ $0\leq t\leq T,$ is determined by some (piecewise continuous) optimal control $\overline{u}=\overline{u}(t)\in U,$ $0\leq t\leq T.$ Moreover, for some non-vanishing absolutely continuous function $\psi=\psi(t),$ $0\leq t\leq T,$ we have \begin{equation} \label{H} H=H(g,\psi,u)=\psi((dl_g)(u))=\psi(u), \end{equation} \begin{equation} \label{partial} \dot{g}=\frac{\partial H}{\partial \psi},\quad \dot{\psi}=-\frac{\partial H}{\partial g}, \end{equation} \begin{equation} \label{max} H(\tau):=H(\psi(\tau),\overline{u}(\tau))=\psi(\tau)(\overline{u}(\tau))=\max_{u\in U}\psi(\tau)(u) \end{equation} at continuity points $\tau$ of the optimal control $\overline{u}=\overline{u}(t)$. \begin{definition} Later on, an extremal for the problem from Theorem \ref{topt} is called a parametrized curve $g = g(t)$, $t\in \mathbb{R},$ satisfying PMP for the time-optimal problem. \end{definition} \begin{remark} \label{pos} For every extremal, $H(t)=\operatorname{const}:= M_0\geq 0,$ $t\in \mathbb{R},$ \cite{AS, PBGM}. \end{remark} \begin{definition} An extremal is called normal (abnormal), if $M_0 > 0$ ($M_0=0$). Every normal extremal is parameterized by arc length; proportionally changing $\psi=\psi(t),$ $t\in \mathbb{R},$ if it is necessary, one can assume that $M_0=1.$ Every normal extremal for a left-invariant (sub-)Riemannian metric on a Lie group is a geodesic, i.e. a locally shortest curve \cite{LS95}. \end{definition} \begin{theorem} \label{hameq}\cite{Ber14} The Hamiltonian system for the function $H$ on the Lie group $G=\operatorname{GL}(n)$ with the Lie algebra $\mathfrak{g}=\mathfrak{gl}(n)$ has a form \begin{equation} \label{sgo} g^{\operatorname{pr}ime}=g\cdot u,\quad g\in G, \quad u\in \frak{g}, \end{equation} \begin{equation} \label{hame0} \psi(v)^{\operatorname{pr}ime}= \psi([u,v]),\quad g\in G, \quad u,v\in \frak{g}. \end{equation} \end{theorem} \begin{proof} Each element $g\in G\subset \operatorname{GL}(n)\subset \mathbb{R}^{n^2}$ is defined by its standard matrix coordinates $g_{ij},$ $i,j=1,\dots n,$ and $\psi$ is defined by its components $\psi_{ij}=\psi(e_{ij}),$ $i,j=1,\dots,n,$ where $e_{ij}\in \mathfrak{g}$ is a matrix having $1$ in the $i$th row and the $j$th column and 0 in all other places. In consequence of (\ref{H}), \begin{equation} \label{H1} H(\psi,g,u)=\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l=1}^ng_{il}u_{lj}\right)= \sum_{l,j=1}^n(g^T\psi)_{lj}u_{lj}. \end{equation} The variables $g_{ij},$ $\psi_{ij}$ must satisfy the Hamiltonian system of equations \begin{equation} \label{dx} g_{ij}^{\operatorname{pr}ime}=\frac{\partial H}{\partial \psi_{ij}}(\psi,g,u)=\sum_{l=1}^ng_{il}u_{lj}=(gu)_{ij}, \end{equation} \begin{equation} \label{dp} \psi_{ij}^{\operatorname{pr}ime}= -\frac{\partial H}{\partial g_{ij}}=-\sum_{m=1}^{n}\psi_{im}u_{jm}=-(\psi u^T)_{ij}. \end{equation} The formula (\ref{dx}) is a special case of the formula (\ref{sgo}). It is clear that $$\psi(v)=\psi(gv)=\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l=1}^ng_{il}v_{lj}\right).$$ On the ground of formulae (\ref{dx}) and (\ref{dp}) we get from here that $$(\psi(v))^{\operatorname{pr}ime}=\sum_{i,j=1}^n \psi_{ij}^{\operatorname{pr}ime}\left(\sum_{l=1}^ng_{il}v_{lj}\right)+\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l=1}^n g_{il}^{\operatorname{pr}ime}v_{lj}\right)=$$ $$-\sum_{i,j=1}^n\left(\sum_{m=1}^{n}\psi_{im}u_{jm}\sum_{l=1}^ng_{il}v_{lj}\right)+\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l,m=1}^ng_{im}u_{ml}v_{lj}\right)=$$ $$-\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l=1}^ng_{il}(vu)_{lj}\right)+\sum_{i,j=1}^n\psi_{ij}\left(\sum_{l=1}^ng_{il}(uv)_{lj}\right)= \sum_{i,j=1}^n\psi_{ij}(g[u,v])_{ij}=\psi([u,v]),$$ which proves the formula (\ref{hame0}). \end{proof} \begin{theorem} \label{hameq1}\cite{Ber14} The Hamiltonian system for the function $H$ on a Lie group $G$ with Lie algebra $\mathfrak{g}$ has a form \begin{equation} \label{sg} \dot{g}=dl_{g}(u),\quad g\in G, \quad u\in \frak{g}, \end{equation} \begin{equation} \label{hame} \psi(v)^{\operatorname{pr}ime}= \psi([u,v]),\quad g\in G, \quad u,v\in \frak{g}. \end{equation} \end{theorem} \begin{proof} In consequence of Theorem \ref{hameq}, Theorem \ref{hameq1} holds for every matrix Lie group and for every Lie group $(G,\cdot),$ because it is known that $(G,\cdot)$ is locally isomorphic to some connected Lie subgroup (may be, virtual) of the Lie group $\operatorname{GL}(n)\subset \mathbb{R}^{n^2}.$ \end{proof} It follows from Theorem \ref{hameq1}, especially from (\ref{hame}), and Remark \ref{pos} that \begin{theorem} \label{norm} If $\dim(G)=3$, $\dim(\mathfrak{p})\geq 2$ in Theorem \ref{topt} then every extremal of the problem from Theorem \ref{topt} is normal. \end{theorem} The following lemma holds. \begin{lemma} \label{lem2}\cite{Ber17} Let $g=g(t)$, $t\in (a,b)$, be a smooth path in the Lie group $G$. Then \begin{equation} \label{pro} (g(t)^{-1})^{\operatorname{pr}ime}=-g(t)^{-1} g^{\operatorname{pr}ime}(t) g(t)^{-1}. \end{equation} \end{lemma} \begin{proof} Differentiating the identity $g(t) g(t)^{-1}=e$ by $t$, we get $$0=(g(t)g(t)^{-1})^{\operatorname{pr}ime}=g^{\operatorname{pr}ime}(t) g(t)^{-1}+g(t)(g(t)^{-1})^{\operatorname{pr}ime},$$ whence the equality (\ref{pro}) follows immediately. \end{proof} \begin{theorem} \label{dad}\cite{Ber17} Let $\psi\in\mathfrak{g}^{\ast}=T^{\ast}_eG$ be a covector, $$\operatorname{Ad}^{\ast}\psi(g):=(\operatorname{Ad} g)^{\ast}(\psi)=\psi\circ Ad(g), \quad g\in G,$$ an action of the coadjoint representation of the Lie group $G$ on $\psi$. Then $$(d(\operatorname{Ad}^{\ast}\psi)(w))(v)=((\operatorname{Ad} g_0)^{\ast}(\psi))([u,v]),$$ if $$u,v\in\frak{g},\quad w=dl_{g_0}(u)\in T_{g_0}G,\quad g_0\in G.$$ \end{theorem} \begin{proof} In the case of a matrix Lie group, $$\operatorname{Ad}(g)(v)=gvg^{-1},\quad dl_g(u)=gu,\quad u,v\in\mathfrak{g}, \quad g\in G.$$ We choose a smooth path $g=g(t)$, $t\in (-\varepsilon,\varepsilon)$, in the Lie group $G$ such that $g(0)=g_0$, $g^{\operatorname{pr}ime}(0)=w$. Then by Lemma \ref{lem2}, $$(d(\operatorname{Ad}^{\ast}\psi)(w))(v)=(\psi(g(t)vg(t)^{-1}))^{\operatorname{pr}ime}(0)= \psi((g(t) vg(t)^{-1})^{\operatorname{pr}ime}(0))=$$ $$\psi(g^{\operatorname{pr}ime}(0)vg_0^{-1}+g_0 v(g(t)^{-1})^{\operatorname{pr}ime}(0))= \psi(g_0uvg_0^{-1}-g_0v(g_0^{-1}g^{\operatorname{pr}ime}(0)g_0^{-1}))=$$ $$\psi(g_0uvg_0^{-1}-g_0v(g_0^{-1}g_0ug_0^{-1}))= \psi(g_0uvg_0^{-1}-g_0vug_0^{-1})=$$ $$\psi(g_0[u,v]g_0^{-1})=((\operatorname{Ad} g_0)^{\ast}(\psi))([u,v]),$$ as required. \end{proof} It follows from Theorems \ref{hameq1} and \ref{dad} that \begin{theorem} \label{main} \cite{BerGich} 1. Any normal extremal $g=g(t):\,\mathbb{R}\rightarrow G$ (parameterized by arc length and with origin $e\in G$), of left-invariant (sub-)Finsler metric $d$ on a Lie group $G$, defined by a norm $F$ on the subspace $\mathfrak{p}\subset\frak{g}$ with closed unit ball $U$, is a Lipschitz integral curve of the following vector field $$v(g)=dl_g(u(g)),\quad u(g)=\psi_0(\operatorname{Ad}(g)(w(g)))w(g),\quad w(g)\in U, $$ $$\psi_0(\operatorname{Ad}(g)(w(g)))=\max_{w\in U}\psi_0(\operatorname{Ad}(g)(w)),$$ where $\psi_0\in\frak{g}^{\ast}$ is some fixed covector with $\max_{v\in U}\psi_0(v)=1.$ 2. (Conservation law) In addition, $\psi(t)(g(t)^{-1}g^{\operatorname{pr}ime}(t))\equiv1$ for all $t\in \mathbb{R}$, where $\psi(t):=(\operatorname{Ad} g(t))^{\ast}(\psi_0)$. \end{theorem} \begin{remark} Every extremal with origin $g_0$ is obtained by the left shift $l_{g_0}$ from some extremal with origin $e.$ \end{remark} \begin{remark} In (sub-)Riemannian case, the vector $u(g)$ is characterized by condition $\langle u(g),v\rangle=\psi_0(Ad(g)(v))$ for all $v\in \mathfrak{p}.$ In Riemannian case, every extremal is a normal geodesic, and we can assume that $\psi_0$ is an unit vector in $(\mathfrak{p}=\mathfrak{g},(\cdot,\cdot)),$ setting $\psi_0(v)=(\psi_0,v),$ $v\in \mathfrak{g}.$ Moreover, $\dot{g}(0)=\psi_0.$ \end{remark} \begin{corollary} \label{bi} Every geodesic of a biinvariant Riemannian metric on a Lie group with the unit origin is its $1$-parameter subgroup. \end{corollary} \begin{proof} This statement is a consequence of the right invariance of the vector field $v(g)=dl_{g}(\operatorname{Ad}(g^{-1})(\psi_0))=dr_{g}(\psi_0),$ since $ (\operatorname{Ad}(g^{-1})(\psi_0),(\operatorname{Ad}(g^{-1})(\psi_0))\equiv 1, $ $$(\operatorname{Ad}(g)^{\ast}(\psi_0),v)=(\psi_0,\operatorname{Ad}(g)(v))=(\operatorname{Ad}(g^{-1})(\psi_0),v) \Rightarrow u(g)=\operatorname{Ad}(g^{-1})(\psi_0).$$ \end{proof} \begin{theorem} \label{vf} If $v(g_0)\neq 0,$ $g_0\in G,$ then an integral curve of the vector field $v(g), g\in G,$ with origin $g_0$ is a normal extremal parametrized proportionally to arc length with the proportionality factor $|dl_{g_0^{-1}}(v(g_0))|.$ \end{theorem} \begin{proof} Let $g(t),$ $t\in\mathbb{R},$ be an integral curve under consideration and set $\gamma=\gamma(t)=g_0^{-1}g(t),$ $t\in\mathbb{R}.$ Then $\gamma$ is an integral curve of vector field $dl_{g_0^{-1}}v(g),$ $g\in G,$ with origin $e.$ Hence \begin{equation} \label{gam} \dot{\gamma}(t)=dl_{g_0^{-1}}\dot{g}(t)=dl_{g_0^{-1}}(dl_{g(t)}(u(g(t))))=dl_{\gamma(t)}(u(g(t))). \end{equation} In addition, \begin{equation} \label{adp} \operatorname{Ad}(g(t))^{\ast}=\operatorname{Ad}(g_0\cdot \gamma(t))^{\ast}=\operatorname{Ad}(\gamma(t))^{\ast}\circ \operatorname{Ad}(g_0)^{\ast}. \end{equation} By definition, $$u(g(t))=\operatorname{Ad}(g(t))^{\ast}(\psi_0)(w(g(t)))w(g(t)), $$ $$\operatorname{Ad}(g(t))^{\ast}(\psi_0)(w(g(t)))=\max_{w\in U}\operatorname{Ad}(g(t))^{\ast}(\psi_0)(w),$$ that by (\ref{adp}) can be rewrite as $$u(g(t))=\operatorname{Ad}(\gamma(t))^{\ast}(\psi_0')(w(g(t)),$$ $$ \operatorname{Ad}(\gamma(t))^{\ast}(\psi_0')(w(g(t)))=\max_{w\in U}\operatorname{Ad}(\gamma(t))^{\ast}(\psi_0')(w),$$ where $\psi_0'=\operatorname{Ad}(g_0)^{\ast}(\psi_0).$ As a result of this and (\ref{gam}), we see that $u(g(t))$ plays a role of $u(\gamma(t))$ for constant covector $\psi_0'$ (instead of $\psi_0$). Due to point 2 of Theorem \ref{main} the curve $\gamma(t)$ is a normal extremal parameterized proportionally to arc length with the proportionality factor $|dl_{g_0^{-1}}(v(g_0))|.$ Then its left shift $g(t)=g_0\gamma(t)$ also has this property. \end{proof} \begin{remark} Theorem \ref{vf} holds for left-invariant Riemannian metrics on (connected) Lie groups. In this case, $v(g_0)\neq 0$ for all $g_0\in G.$ \end{remark} Let us choose a basis $\{e_1,\dots, e_n\}$ in $\mathfrak{g},$ assuming that $\{e_1,\dots, e_r\}$ is an orthonormal basis for the scalar product $\langle\cdot,\cdot\rangle$ on $\mathfrak{p}$ in case of left-invariant (sub)-Finsler metric. Define a scalar product $\langle\cdot,\cdot\rangle$ on $\mathfrak{g},$ considering $\{e_1,\dots, e_n\}$ as its orthonormal basis. Then each covector $\psi\in \mathfrak{g}^{\ast}$ can be considered as a vector in $\mathfrak{g},$ setting $\psi(v)=\langle \psi,v\rangle$ for every $v\in \mathfrak{g}.$ If $\psi=\sum_{i=1}^{n}\psi_ie_i,$ $v=\sum_{k=1}^nv_ke_k,$ then $\psi(v)=\psi\cdot v,$ where $\psi$ and $v$ are corresponding vector-row and vector-column, $\cdot$ is the matrix multiplication. If $l:\mathfrak{g}\rightarrow \mathfrak{g}$ is a linear map, then we denote by $(l)$ its matrix in the basis $\{e_1,\dots, e_n\}.$ \begin{proposition} $$(\operatorname{Ad} g)^{\ast}(\psi)=\psi(\operatorname{Ad} g),\quad g\in G,\,\,\psi\in \mathfrak{g}^{\ast},$$ where on the right hand side of the equality $\psi$ indicates the corresponding vector-row. \end{proposition} \begin{proof} Obviously, the identity $$(\operatorname{Ad} g)^{\ast}(\psi)((\operatorname{Ad} g)^{-1}(v))=\psi(v)=\psi\cdot v$$ holds. Therefore, it is enough to verify that for matrix $A:=(\operatorname{Ad} g)$ $$(\psi A)(A^{-1}v)=\psi\cdot v.$$ But it is obvious. \end{proof} If $g(t),$ $t\in \mathbb{R},$ is a normal geodesic of a left-invariant (sub-)Riemannian metric $d$ on a Lie group $G,$ then $u(g(t))$ is the orthogonal projection onto $\mathfrak{p}$ of the vector $(\operatorname{Ad} g(t))^{\ast}(\psi_0)$ in the notation of Theorem \ref{main} for the scalar product $\langle\cdot,\cdot\rangle$ introduced above on $\mathfrak{g}.$ This fact and formula (\ref{hame}) imply \begin{theorem} \label{kok} Every normal parameterized by arc length geodesic of left-invariant (sub-)Riemannian metric on a Lie group $G$ issued from the unit is a solution of the following system of differential equations \begin{equation} \label{difur} \dot{g}(t)=dl_{g(t)}u(t),\,\,u(t)=\sum_{i=1}^r \psi_i(t)e_i,\,\,|u(0)|=1,\,\, \dot{\psi}_j(t)=\sum_{k=1}^{n}\sum_{i=1}^{r}c_{ij}^k\psi_i(t)\psi_k(t), \end{equation} where $ j=1,\dots, n,$ $c_{ij}^k$ are structure constants of Lie algebra $\mathfrak{g}$ in its basis $\{e_1,..., e_n\}.$ In Riemannian case, $r=n$. \end{theorem} \begin{corollary} \begin{equation} \label{unit} |\dot{g}(t)| = |u(t)|\equiv 1,\quad t\in \mathbb{R}. \end{equation} \end{corollary} \begin{proof} The first equality in (\ref{unit}) is a consequence of the first equality in (\ref{difur}) and left invariance of the scalar product. Therefore, due to the equality $|u(0)|=1$, it suffices to prove that $\frac{d}{dt}\langle u(t),u(t)\rangle)=0.$ Now by (\ref{difur}), $$\frac{d}{dt}\langle u(t),u(t)\rangle=\left(\sum_{j=1}^r\psi_j(t)\psi_j(t)\right)'=2\sum_{j=1}^r\psi_j(t)\psi_j'(t)=\sum_{k=1}^n\sum_{i,j=1}^rc_{ij}^k\psi_i(t)\psi_j(t)\psi_k(t),$$ which is zero by the skew symmetry of $c_{ij}^k$ with respect to subscripts. \end{proof} \begin{remark} In fact, the same equations for $\dot{\psi}_j(t)$ from (\ref{difur}) in a different interpretation were obtained in \cite{GK95} as ``normal equations''. Their derivation there uses more complicated concepts and techniques. \end{remark} \section{Lie groups with left-invariant Riemannian metrics of constant negative curvature} The only Lie groups which do not admit left-invariant sub-Finsler metrics are commutative Lie groups and Lie groups $G_n,$ $n\geq 2$, consisting of parallel translations and homotheties (without rotations) of Euclidean space $E^{n-1}$ \cite{BerGorb14}, \cite{Ber89}. Up to isomorphisms, Lie groups $G_n$ can be described as connected Lie groups every whose left-invariant Riemannian metric has constant negative sectional curvature \cite{Miln76}. The group $G_n$, $n\geq 2$, is isomorphic to the group of real block matrices \begin{equation} \label{elem} g=(y,x):=\left(\begin{array}{cc} xE_{n-1} & y^{\operatorname{pr}ime}\\ 0 & 1 c \end{array}\right), \end{equation} where $E_{n-1}$ is unit matrix of order $n-1$, $y^{\operatorname{pr}ime}$ is a transposed $(n-1)-$vector--row $y$, $0$ is a zero $(n-1)-$vector--row, $x>0$. It is clear that in vector notation the group operations have a form \begin{equation} \label{prod} (y_1,x_1)\cdot (y_2,x_2)= x_1(y_2,x_2)+(y_1,0),\quad (y,x)^{-1}=x^{-1}(-y,1). \end{equation} Let $E_{ij}$, $i,j=1,\dots,n$, be a $(n\times n)$-matrix having 1 in the ith row and the jth column and 0 in all other. Matrices \begin{equation} \label{abc} e_i=E_{in},\,\,i=1,\dots,n-1,\quad e_n=\sum\limits_{k=1}^{n-1}E_{kk} \end{equation} constitute a basis of Lie algebra $\frak{g}_n$ of the Lie group $G_n$. In addition, $$[e_i,e_j]=0,\,\,i,j=1,\dots,n-1;\quad [e_n,e_i]=e_i,\,\,i=1,\dots,n-1$$ so all nonzero structure constants in the basis $\{e_1,\dots,e_n\}$ are equal \begin{equation} \label{strconst} c_{ni}^i=- c_{in}^i=1,\,\quad i=1,\dots,n-1. \end{equation} Let $(\cdot,\cdot)$ be a scalar product on $\frak{g}_n$ with the orthonormal basis $e_1,\dots,e_n$. Then we get left-invariant Riemannian metric $d$ on the Lie group $G_n$ of constant sectional curvature $-1$ \cite{Miln76}. On the ground of Theorem \ref{kok} and (\ref{strconst}), $\psi_i=\psi_i(t)$, $i=1,\dots,n$ are solutions of the Cauchy problem \begin{equation} \label{sist0} \left\{\begin{array}{c} \dot{\psi}_i(t)=\psi_i(t)\psi_n(t),\,\,i=1,\dots,n-1,\quad\dot{\psi}_n(t)=-\sum\limits_{i=1}^{n-1}\psi_i^2(t); \\ \psi_i(0)=\varphi_i,\,\,i=1,\dots,n,\quad \sum\limits_{i=1}^n\varphi_i^2=1. \end{array}\right. \end{equation} It follows from (\ref{sist0}) that $$\ddot{\psi}_n(t)=-2\psi_n(t)\sum\limits_{i=1}^{n-1}\psi_i^2(t)=2\psi_n(t)\dot{\psi}_n(t)= \left(\psi_n^2\right)^{\cdot}(t),$$ whence on the ground of initial data of the Cauchy problem (\ref{sist0}), it follows that $$\dot{\psi}_n(t)=\psi_n^2(t)-1,\quad \psi_n(0)=\varphi_n.$$ Solving this Cauchy problem, we find that $$\psi_n(t)=\frac{\varphi_n\operatorname{ch} t-\operatorname{sh} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}.$$ Then on the base of (\ref{sist0}), for $i=1,\dots,n-1$, $$\ln|\psi_i(t)|=\int\limits_0^t\frac{\varphi_n\operatorname{ch}\tau-\operatorname{sh}\tau}{\operatorname{ch}\tau-\varphi_n\operatorname{sh}\tau}d\tau+\ln{|\varphi_i|}=-\ln{|\operatorname{ch} t -\varphi_n\operatorname{sh} t|}+\ln{|\varphi_i|},\quad \text{if }\varphi_i\neq 0,$$ so $$\psi_i(t)=\frac{\varphi_i}{\operatorname{ch} t-\varphi_n\operatorname{sh} t},\quad i=1,\dots,n-1,$$ and these formulae are true also when $\varphi_i=0$. Consequently, on the ground of (\ref{difur}), \begin{equation} \label{u} u(t)=\frac{1}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}\left(\sum\limits_{i=1}^{n-1}\varphi_ie_i+\left(\varphi_n\operatorname{ch} t-\operatorname{sh} t\right)e_n\right). \end{equation} If $g\in G_n$ is defined by formula (\ref{elem}), $u=\sum\limits_{i=1}^nu_ie_i\in\frak{g}_n$, then \begin{equation} \label{gu} gu=\left(\begin{array}{cc} (xu_n)E_{n-1} & v\\ 0 & 0 \end{array}\right),\quad v=(xu_1,\dots,xu_{n-1})^T. \end{equation} Therefore on the base of Theorem \ref{kok} and (\ref{u}) in the notation (\ref{elem}), parametrized by arclength normal geodesic $g=g(t)$, $t\in\mathbb{R}$, of the space $(G_n,d)$ with $g(0)=e$ is a solution of the Cauchy problem \begin{equation} \label{coshi} \left\{\begin{array}{c} \dot{x}(t)=\frac{\varphi_n\operatorname{ch} t-\operatorname{sh} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}x(t),\,\,\dot{y}_i(t)=\frac{\varphi_i}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}x(t),\quad i=1,\dots,n-1, \\ x(0)=1,\quad y_i(0)=0,\,\,i=1,\dots,n-1. \end{array}\right. \end{equation} Solving the problem, we find \begin{equation} \label{xy} x(t)=\frac{1}{\operatorname{ch} t-\varphi_n\operatorname{sh} t},\quad y_i(t)=\int\limits_0^t\frac{\varphi_idt}{(\operatorname{ch} t-\varphi_n\operatorname{sh} t)^2}=\frac{\varphi_i\operatorname{sh} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}. \end{equation} This implies that \begin{equation} \label{expon} x(t)=e^{\pm t},\quad y_i(t)\equiv 0,\quad i=1,\dots n-1,\quad\mbox{if}\quad \varphi_n = \pm 1. \end{equation} Let $\varphi_n^2<1$. Let us show that for any $t\in\mathbb{R}$, the equality \begin{equation} \label{u0} \sum\limits_{i=1}^{n-1}(y_i(t)-a_i)^2+x^2(t)=\sum\limits_{i=1}^{n-1}a_i^2+1 \end{equation} holds, where $a_i$, $i=1,\dots,n-1$, are real numbers such that \begin{equation} \label{ai} \sum\limits_{i=1}^{n-1}a_i\varphi_i=\varphi_n. \end{equation} We introduce a function $f(t)=\sum\limits_{i=1}^{n-1}(y_i(t)-a_i)^2+x^2(t)$. Due to initial data (\ref{coshi}), $f(0)=\sum\limits_{i=1}^{n-1}a_i^2+1$. On the ground of (\ref{coshi}), (\ref{xy}) and last equation in (\ref{sist0}), we get $$\frac{1}{2}f^{\operatorname{pr}ime}(t)=\sum\limits_{i=1}^{n-1}(y_i(t)-a_i)\dot{y}_i(t)+x(t)\dot{x}(t)= \sum\limits_{i=1}^{n-1}\left(\frac{\varphi_i\operatorname{sh} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}-a_i\right)\varphi_i+\frac{\varphi_n\operatorname{ch} t-\operatorname{sh} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}=$$ $$\frac{\operatorname{sh} t\left(\sum\limits_{i=1}^{n-1}\varphi_i^2-1\right)+\varphi_n\operatorname{ch} t}{\operatorname{ch} t-\varphi_n\operatorname{sh} t}-\sum\limits_{i=1}^{n-1}a_i\varphi_i=\varphi_n-\sum\limits_{i=1}^{n-1}a_i\varphi_i=0.$$ Consequently, $f(t)\equiv f(0)$ and the equality (\ref{u0}) is proved. It is easy to check that the equality (\ref{ai}) holds for \begin{equation} \label{pai} a_i=\varphi_i\varphi_n/(1-\varphi_n^2),\quad i=1,\dots, n-1;\quad\mbox{moreover}\quad \sum\limits_{i=1}^{n-1}a_i^2+1 = \frac{1}{1-\varphi_n^2}. \end{equation} These numbers $a_i$ are obtained as halves of sums of limits $y_i(t)$ when $t\rightarrow +\infty$ and $t\rightarrow -\infty$, which are equal to $\varphi_i/(1-\varphi_n)$ and $-\varphi_i/(1+\varphi_n)$ respectively. Formulae (\ref{prod}) show that the group $G_n$ is a simply transitive isometry group of the famous Poincare's model of the Lobachevskii space $L^n$ in the half space $\mathbb{R}^n_+$ with metric $ds^2=(\sum_{k=1}^{n-1}dy_k^2+dx^2)/x^2$. The above results, including formulae (\ref{xy}), (\ref{expon}), (\ref{pai}), show that geodesics of the space $L^n$ in this model, passing through the point $(0,\dots,0,1),$ are semi-straights or semi-circles (with centers $(a_1,\dots,a_{n-1},0)$ and radii $1/{\sqrt{1-\varphi_n^2}}$, (\ref{pai})), orthogonal to the hyperplane $\mathbb{R}^{n-1}\times \{0\}.$ Since all other geodesics are obtained by left shifts on the group, in other words, by indicated parallel translations and homotheties of this model, then also all straights and semi-circles, orthogonal to the hyperplane $\mathbb{R}^{n-1}\times \{0\},$ are geodesics of the space $L^n.$ We got a well-known description of geodesics in this Poincare's model. Now let us look what the vector field method gives us for the problem. Every vector $\psi\in\frak{g}_n$ can be considered as a covector $\frak{g}^{\ast}$, setting $\psi(v)=(\psi,v)$ for $v\in\frak{g}_n$. Then any (co)vector $\psi_0$ from Theorem \ref{main} has a form $$\psi_0=\sum\limits_{i=1}^n\varphi_ie_i,\quad\sum\limits_{i=1}^n\varphi_i^2=1.$$ Let $w=\sum\limits_{i=1}^nw_ie_i\in\frak{g}_n$, $g\in G_n$ is defined by formula (\ref{elem}). It is easy to see that $$\operatorname{Ad}(g)(w)=gwg^{-1}=\sum\limits_{i=1}^{n-1}(w_ix-w_ny_i)e_i+w_ne_n,$$ $$(\psi_0,\operatorname{Ad}(g)(w))=\sum\limits_{i=1}^{n-1}(w_ix-w_ny_i)\varphi_i+w_n\varphi_n= x\sum\limits_{i=1}^{n-1}\varphi_iw_i+\left(\varphi_n-\sum\limits_{i=1}^{n-1}\varphi_iy_i\right)w_n.$$ It is clear that $$u(g)=x\sum\limits_{i=1}^{n-1}\varphi_ie_i+\left(\varphi_n-\sum\limits_{i=1}^{n-1}\varphi_iy_i\right)e_n,$$ $$v(g)=gu(g)=x\sum\limits_{i=1}^{n}u_ie_i=x^2\sum\limits_{i=1}^{n-1}\varphi_ie_i+x\left(\varphi_n-\sum\limits_{i=1}^{n-1}\varphi_iy_i\right)e_n.$$ Thus geodesic $g=g(t)$, $t\in\mathbb{R}$, with $g(0)=e$ is a solution of the Cauchy problem \begin{equation} \label{coshi2} \left\{\begin{array}{c} \dot{x}(t)=\left(\varphi_n-\sum\limits_{i=1}^{n-1}\varphi_iy_i(t)\right)x(t),\quad\dot{y}_i(t)=\varphi_ix^2(t),\,\,i=1,\dots,n-1, \\ x(0)=1,\quad y_i(0)=0,\,\,i=1,\dots,n-1. \end{array}\right. \end{equation} Dividing the first equation in (\ref{coshi2}) by $x(t),$ we get on the left hand side the derivative of the function $\ln x(t):= z(t).$ Differentiating both sides of the resulting equation and using the second equation in (\ref{coshi2}) and the equality $\sum\limits_{i=1}^n\varphi_i^2=1$, we get $$\ddot{z}(t)=-\sum\limits_{i=1}^{n-1}\varphi_i^2x^2(t)= -(1-\varphi_n^2)e^{2z(t)},\quad z(0)=0,\,\,\dot{z}(0)=\varphi_n.$$ If $\varphi_n=\pm 1$ then $\ddot{z}(t)\equiv 0$ and due to the initial data and the second equation in (\ref{coshi2}), we get $z(t)=\pm t,$ $x(t)=e^{\pm t},$ $y_i(t)\equiv 0,$ $i=1,\dots,n-1$. Let $0\leq \varphi_n^2 < 1.$ Let us multiply both sides of the resulting equation by $2\dot{z}.$ Then $$2\dot{z}\ddot{z}=-(1-\varphi_n^2)e^{2z}2\dot{z},\quad d(\dot{z})^2=-(1-\varphi_n^2)e^{2z}d{(2z)},\quad \dot{z}^2=-(1-\varphi_n^2)e^{2z}+ C.$$ Taking into account the initial conditions for $z(t),$ we get $C=1$ and $\dot{z}(t)^2=1-(1-\varphi_n^2)e^{2z(t)}.$ The expression on the right is positive for $t$ sufficiently close to zero. Therefore, with these $t,$ we get $$\dot{z}(t)=\pm\sqrt{ 1-(1-\varphi_n^2)e^{2z(t)}},$$ where the sign coincides with the sign of $\varphi_n,$ if $\varphi_n\neq 0.$ Separating variables, we get $$dt= \frac{\pm dz}{\sqrt{ 1-(1-\varphi_n^2)e^{2z}}}= \frac{\pm dz}{e^z\sqrt{1-\varphi_n^2}\sqrt{(e^{-2z}/(1-\varphi_n^2))-1}}=$$ $$ \frac{\mp d(e^{-z}/\sqrt{1-\varphi_n^2})}{\sqrt{(e^{-2z}/(1-\varphi_n^2))-1}}= \mp d\left(\operatorname{arch}\left(\frac{e^{-z}}{\sqrt{1-\varphi_n^2}}\right)\right),$$ $$\pm \operatorname{arch}\left(\frac{e^{-z}}{\sqrt{1-\varphi_n^2}}\right)=c-t, \quad c = \operatorname{arch}\left(\frac{1}{\sqrt{1-\varphi_n^2}}\right).$$ The applying $\operatorname{ch}$ to the left and right sides of the resulting equality gives $$\frac{e^{-z(t)}}{\sqrt{1-\varphi_n^2}}=\operatorname{ch} c\operatorname{ch} t- \operatorname{sh} c\operatorname{sh} t= \frac{\operatorname{ch} t -\varphi_n \operatorname{sh} t}{\sqrt{1-\varphi_n^2}}.$$ Consequently, when $ t $ are sufficiently close to zero, $$x(t)=e^{z(t)}=\frac{1}{\operatorname{ch} t- \varphi_n\operatorname{sh} t}.$$ Since the right sides of the system of differential equations (\ref{coshi2}) are real analytic, this equality is true for all $t\in \mathbb{R}.$ We obtain from this and the second system in (\ref{coshi2}) the same solutions $y_i(t),$ $t\in\mathbb{R},$ $i=1,\dots, n-1,$ as in (\ref{xy}). Using formulae (\ref{prod}) and (\ref{xy}) for $x=x(t)$, $y_i=y_i(t),$ we shall find a formula for distances between group elements, or, which is the same, between points of the Lobachevsky space in Poincare's model under consideration. We obtain from (\ref{xy}) $$\frac{1}{x}=\operatorname{ch} t-\varphi_n\operatorname{sh} t,\quad x=\frac{\operatorname{ch} t +\varphi_n\operatorname{sh} t}{\operatorname{ch}^2t-\varphi_n^2\operatorname{sh}^2t}=\frac{\operatorname{ch} t +\varphi_n\operatorname{sh} t}{1 + (1-\varphi_n^2)\operatorname{sh}^2t},$$ $$\sum_{i=1}^{n-1}(y_i/x)^2=\operatorname{sh}^2t\sum_{i=1}^{n-1}\varphi_i^2=(1-\varphi_n^2)\operatorname{sh}^2t,$$ $$\operatorname{ch} t+\varphi_n\operatorname{sh} t=\frac{x}{x^2}\left(x^2+\sum_{i=1}^{n-1}y_i^2\right)=\frac{1}{x}\left(x^2+\sum_{i=1}^{n-1}y_i^2\right),$$ $$\operatorname{ch} t=\frac{1}{2x}\left(1+x^2+\sum_{i=1}^{n-1}y_i^2\right),\quad d((0,1),(y,x))=\operatorname{arch}\left[\frac{1}{2x}\left(1+x^2+\sum_{i=1}^{n-1}y_i^2\right)\right].$$ Now by (\ref{prod}), the last formula, and left-invariance of metric $d$, $$(y_1,x_1)^{-1}(y_2,x_2)=x_1^{-1}(-y_1,1)(y_2,x_2)=(x_1^{-1}(y_2-y_1), x_1^{-1}x_2),$$ $$d((y_1,x_1),(y_2,x_2))=d((0,1), (x_1^{-1}(y_2-y_1), x_1^{-1}x_2))=$$ $$\operatorname{arch}\left[\frac{x_1}{2x_2}\left(1+ \frac{x_2^2}{x_1^2}+\frac{1}{x_1^2}\sum_{i=1}^{n-1}(y_{2,i}-y_{1,i})^2\right)\right]=$$ \begin{equation} \label{dist} \operatorname{arch}\left[\frac{1}{2x_1x_2}\left(x_1^2+ x_2^2+ \sum_{i=1}^{n-1}(y_{2,i}-y_{1,i})^2\right)\right]= d((y_1,x_1),(y_2,x_2)). \end{equation} \section{ The three--dimensional Heisenberg group} This Heisenberg group is a nilpotent Lie group of upper--triangular matrices \begin{equation} \label{heis} H=\left\{h=\left(\begin{array}{ccc} 1& x & z\\ 0& 1 & y \\ 0 & 0 & 1 \end{array}\right)\right\},\,\, x,y,z \in \mathbb{R}. \end{equation} It is easy to compute that \begin{equation} \label{hm} h^{-1}=\left(\begin{array}{ccc} 1& -x & xy- z\\ 0& 1 & -y \\ 0 & 0 & 1 \end{array}\right). \end{equation} Clearly, $H$ is naturally diffeomorphic to $\mathbb{R}^3$ and $H$ is a connected Lie group with respect to this differential structure. Matrices \begin{equation} \label{base} e_1=\left(\begin{array}{ccc} 0& 1 & 0\\ 0& 0 & 0 \\ 0 & 0 & 0 \end{array}\right), \quad e_2=\left(\begin{array}{ccc} 0& 0 & 0\\ 0& 0 & 1 \\ 0 & 0 & 0 \end{array}\right),\quad e_3=\left(\begin{array}{ccc} 0& 0 & 1\\ 0& 0 & 0 \\ 0 & 0 & 0 \end{array}\right) \end{equation} constitute a basis of Lie algebra $\mathfrak{h}$ of Heisenberg group $H$. In addition, $$[e_1,e_2]= e_1e_2- e_2e_1= e_3.$$ Hence the vector subspace $\mathfrak{p}\subset \mathfrak{h}$ with basis $\{e_1,e_2\}$ generates $\mathfrak{h}.$ Thus the triple $(H,\mathfrak{h},\mathfrak{p})$ satisfies all conditions of Theorems \ref{contr} and \ref{topt}. Let us search for all geodesics of the problem from Theorem \ref{topt}. They are all normal by Theorem \ref{norm}, and we can use Theorem \ref{main}. Let us define a scalar product $(\cdot,\cdot)$ on $\mathfrak{h}$ with orthonormal basis $\{e_1,e_2,e_3\}$. Then each vector $\psi\in \mathfrak{h}$ can be considered as a covector from $\mathfrak{h}^{\ast},$ if we set $\psi(v)=(\psi,v)$ for $v\in\mathfrak{h}.$ Then any (co)vector $\psi_0$ from Theorem \ref{main} has a form \begin{equation} \label{psn} \psi_0= \cos\xi e_1+ \sin\xi e_2+ \beta e_3,\quad \xi, \beta\in \mathbb{R}. \end{equation} Let $$v=\sum_{k=1}^2v_ke_k=\left(\begin{array}{ccc} 0& v_1 & 0\\ 0& 0 & v_2 \\ 0 & 0 & 0 \end{array}\right),\quad v\in \mathfrak{p},\,\, v_k\in\mathbb{R},\,\, k=1,2.$$ Using formulae (\ref{heis}), (\ref{hm}), we get $$Ad(h)(v)=hvh^{-1}= \left(\begin{array}{ccc} 0& v_1 & -yv_1+xv_2\\ 0& 0 & v_2 \\ 0 & 0 & 0 \end{array}\right), $$ $$(\psi_0,\operatorname{Ad}(h)(v))=\cos\xi v_1+\sin\xi v_2+ \beta( -yv_1+xv_2)=$$ $$(\cos\xi -\beta y)v_1+ (\sin\xi +\beta x)v_2.$$ It is clear that $$u(h)=(\cos\xi -\beta y)e_1+ (\sin\xi +\beta x)e_2$$ and so a geodesic is an integral curve of the vector field $$v(h)= hu(h)=(\cos\xi -\beta y)e_1+ (\sin\xi +\beta x)e_2+ x (\sin\xi +\beta x)e_3.$$ Therefore $h(t)$ is a solution of the Cauchy problem \begin{equation} \label{sist} \left\{\begin{array}{l} \dot{x}=\cos\xi -\beta y, \\ \dot{y}=\sin\xi +\beta x, \\ \dot{z}= x (\sin\xi +\beta x)(=x\dot{y}) \\ \end{array}\right. \end{equation} with initial data $x(0)=y(0)=z(0)=0$. Let us turn to {\it the coordinate system $\tilde{x},\tilde{y},\tilde{z}$ of the first kind} on the Lie group $H:$ $$\exp\left(\begin{array}{ccc} 0& x & z\\ 0& 0 & y \\ 0 & 0 & 0 \end{array}\right)=\left(\begin{array}{ccc} 1& x & z+(xy)/2\\ 0& 1 & y \\ 0 & 0 & 1 \end{array}\right). $$ Hence $\tilde{x}=x, \tilde{y}=y, \tilde{z}=z-(xy)/2.$ It is easy to see that for $\beta=0$ we get $$x(t)=(\cos\xi)t,\,\, y(t)=(\sin\xi)t,\,\, z(t)=\frac{1}{2}\cos\xi\sin\xi t^2,\,\,\tilde{z}(t)\equiv 0,\,\, t\in\mathbb{R},$$ and geodesic is a $1$--parameter subgroup $$g(t)=\exp(t(\cos\xi e_1 + \sin\xi e_2)),\,\, t\in\mathbb{R}.$$ If $\beta\neq 0$, the calculations are more difficult: $$\ddot{x}=-\beta\dot{y}=-\beta(\sin\xi + \beta x)=-\beta^2x-\beta\sin\xi,$$ $$x(t)=C_1\cos\beta t + C_2\sin\beta t -\frac{\sin\xi}{\beta}.$$ Since $x(0)=0,$ $\dot{x}(0)=\cos\xi$, then $C_1=(\sin\xi)/\beta,$ $C_2=(\cos\xi)/\beta,$ \begin{equation} \label{xt} x(t)=\frac{1}{\beta}(\sin\xi\cos \beta t + \cos\xi \sin\beta t-\sin\xi)=\frac{1}{\beta}(\sin(\xi+ \beta t)-\sin\xi); \end{equation} $$\ddot{y}=\beta\dot{x}=\beta(\cos\xi - \beta y)=-\beta^2y+\beta\cos\xi,$$ $$y(t)=C_1\cos\beta t + C_2\sin\beta t +\frac{\cos\xi}{\beta}.$$ Since $y(0)=0,$ $\dot{y}(0)=\sin\xi$, then $C_1=-(\cos\xi)/\beta,$ $C_2=(\sin\xi)/\beta,$ \begin{equation} \label{yt} y(t)=\frac{1}{\beta}(-\cos\xi\cos \beta t + \sin\xi\sin\beta t+\cos\xi)=\frac{1}{\beta}(-\cos(\xi+ \beta t)+\cos\xi), \end{equation} $$ \tilde{z}'= \dot{z}-\frac{(xy)'}{2}=x\dot{y}-\frac{1}{2}(\dot{x}y+x\dot{y})=\frac{1}{2}(x\dot{y}-\dot{x}y)=$$ $$\frac{1}{2\beta}[(\sin(\xi+ \beta t)-\sin\xi)\sin(\xi+\beta t)-\cos(\xi+\beta t)(-\cos(\xi+ \beta t)+\cos\xi)]=$$ $$\frac{1}{2\beta}[1-(\sin\xi\cdot\sin(\xi+\beta t)+\cos(\xi+\beta t)\cos\xi)]=\frac{1}{2\beta}(1-\cos\beta t)= \tilde{z}'.$$ Since $\tilde{z}(0)=0$ then \begin{equation} \label{zt} \tilde{z}(t)= \frac{1}{2\beta}\left(t-\frac{\sin\beta t}{\beta}\right), t\in\mathbb{R}. \end{equation} It follows from equalities (\ref{xt}), (\ref{yt}), ($\ref{zt}$) that the projection of geodesic $g=g(t)$ onto the plane $x,y$ is {\it a circle with radius $1/|\beta|$ and center $(1/\beta)(-\sin\xi,\cos\xi)$, $T=2\pi/|\beta|$ is a circulation period}, while $\tilde{z}(t),$ $t\in\mathbb{R},$ does not depend on the parameter $\xi.$ Therefore, if we fix $\beta\neq 0$ then for different $\xi$ all geodesic segments $g(\beta,\xi,t), 0\leq t\leq 2\pi/|\beta|,$ start at $e$ and finish at the same point. It follows from the existence of the shortest arcs, Theorem \ref{topt}, PMP and our calculations that if $\beta=0$ (respectively, $\beta\neq 0$) then every segment (respectively, the length of such segment is less or equal to $T=2\pi/|\beta|$) of these geodesics is a shortest arc. There is no other geodesic or shortest arc except indicated above and their left shifts. \section{Controls for left-invariant sub-Riemannian metrics on $SO(3)$} \label{so3} It is well known that every two--dimensional vector subspace $\mathfrak{p}$ of Lie algebra $(\mathfrak{so}(3),[\cdot,\cdot])$ of the Lie group $SO(3)$ generates $\mathfrak{so}(3).$ Moreover, there exists a basis $\{e_1,e_2\}$ of the space $\mathfrak{p}$ such that $[e_2,e_3]=e_1,$ $[e_3,e_1]=e_2$ for the vector $e_3=[e_1,e_2]$. Let $(\cdot,\cdot)$ be a scalar product on $\mathfrak{so}(3)$ with orthonormal basis $\{e_1,e_2,e_3\}.$ Then if a scalar product $\langle\cdot,\cdot\rangle$ on $\mathfrak{p}$ defines a left-invariant sub-Riemannian metric $d$ on the Lie group $G=SO(3),$ then there exists a basis $\{v,w\}$ in $\mathfrak{p}$ that is orthonormal relative to $\langle\cdot,\cdot\rangle,$ orthogonal relative to $(\cdot,\cdot),$ and such that $(v,v)=a^2\leq b^2=(w,w),$ $[v,w]=(ab)e_3,$ where $0< a\leq b.$ Let $v,w$ be new vectors $e_1,e_2.$ Then \begin{equation} \label{e1e2e3} [e_1,e_2]=(ab)e_3,\,\,[e_3,e_1]=(b/a)e_2,\,\,[e_2,e_3]=(a/b)e_1,\,\, 0< a\leq b. \end{equation} It follows from (\ref{e1e2e3}) that all nonzero structure constants are $$c_{12}^3=- c_{21}^3= ab,\,\, c_{31}^2=- c_{13}^2=b/a,\,\,c_{23}^1=-c_{32}^1= a/b.$$ Let $g(t)$, $t\in\mathbb{R}$, be a geodesic of the space $(SO(3),d)$, parametrized by arclength, and $g(0)=e$. On the ground of Theorem \ref{kok}, $$g^{\operatorname{pr}ime}(t)=g(t)u(t),\quad u(t)=\psi_1(t)e_1+\psi_2(t)e_2,$$ where \begin{equation} \label{psi} \psi^{\operatorname{pr}ime}_1(t)=-ab\psi_2(t)\psi_3(t),\quad \psi^{\operatorname{pr}ime}_2(t)=ab\psi_1(t)\psi_3(t),\quad \psi^{\operatorname{pr}ime}_3(t)=\frac{a^2-b^2}{ab}\psi_1(t)\psi_2(t). \end{equation} Since $|u(t)|\equiv 1$ then $\psi_1(t)=\cos\xi(t)$, $\psi_2(t)=\sin\xi(t)$ and (\ref{psi}) is written as $$-\sin\xi(t)\dot{\xi}(t)=-ab\sin\xi(t)\psi_3(t), \quad \cos\xi(t)\dot{\xi}(t)=ab\cos\xi(t)\psi_3(t),$$ $$ \psi'_3(t)=\frac{a^2-b^2}{ab}\cos\xi(t)\sin\xi(t).$$ Then $\psi_3(t)=\frac{1}{ab}\xi^{\operatorname{pr}ime}(t)$ and $\xi=\xi(t)$ is a solution of the differential equation \begin{equation} \label{xid} \xi^{\operatorname{pr}ime\operatorname{pr}ime}(t)=\frac{a^2-b^2}{2}\sin 2\xi(t). \end{equation} If $a=b$ then $\xi''(t)=0,$ $\xi'(t)=\operatorname{const}=\beta.$ Then geodesics are obtained from geodesics in the case of $a=b=1$ with the change the parameter $s$ by the parameter $t=s/a.$ Geodesics, shortest arcs, the distance $d,$ the cut locus and conjugate sets for geodesics in the case of $a=b=1$ are found in papers \cite{BZ15} and \cite{BZ151}. The case $0<a<b$ is reduced to the case $a^2-b^2=-1$ by proportional change of the metric $d$. Then the variable $\omega(t):=2\xi(t)$ allows us to rewrite the equation as the mathematical pendulum equation \begin{equation} \label{omega} \omega^{\operatorname{pr}ime\operatorname{pr}ime}(t)=-\sin\omega(t). \end{equation} In \cite{BS16}, I.Yu.~Beschastnyi and Yu.L.~Sachkov studied geodesics of left-invariant sub-Riemannian metrics on the Lie group $SO(3)$ and gave estimates for the cut time and the metric diameter. Under replacement $b^2-a^2$ by $a^2$ and $\xi$ by $\psi,$ the equation (\ref{xid}) coincides with the equation (2.4) from their paper, obtained by another method. \section{To search for geodesics of a sub-Riemannian metric on $SH(2)$} The Lie group $SH(2)$ consists of all matrices of a form \begin{equation} \label{matr3} g=\left(\begin{array}{cc} A& v\\ 0& 1\end{array}\right);\quad A=\left(\begin{array}{cc} \operatorname{ch}\varphi & \operatorname{sh}\varphi\\ \operatorname{sh}\varphi & \operatorname{ch}\varphi \end{array}\right),\quad v=\left(\begin{array}{c} x\\ y \end{array}\right) \in \mathbb{R}^2. \end{equation} It is not difficult to see that \begin{equation} \label{inv} g^{-1}= \left(\begin{array}{cc} A & v\\ 0& 1 \end{array}\right)^{-1}=\left(\begin{array}{cc} A^{-1}& -A^{-1}v\\ 0& 1 \end{array}\right). \end{equation} Clearly, matrices \begin{equation} \label{abc} e_1=\left(\begin{array}{ccc} 0& 1 & 0\\ 1& 0 & 0 \\ 0 & 0 & 0 \end{array}\right),\quad e_2=\left(\begin{array}{ccc} 0& 0 & 1\\ 0& 0 & 0 \\ 0 & 0 & 0 \end{array}\right),\quad e_3=\left(\begin{array}{ccc} 0& 0 & 0\\ 0& 0 & 1 \\ 0 & 0 & 0 \end{array}\right) \end{equation} constitute a basis of Lie algebra $\mathfrak{sh}(2).$ In addition, \begin{equation} \label{abca} [e_1,e_2]=e_3,\quad [e_2,e_3]=0,\quad [e_1,e_3]=e_2. \end{equation} Let us define a scalar product $\langle\cdot,\cdot\rangle$ on $\mathfrak{sh}(2)$ with orthonormal basis $\{e_1,\,e_2,\,e_3\}$ and the subspace $\mathfrak{p}$ with orthonormal basis $\{e_1,\,e_2\}$ generating Lie algebra $\mathfrak{sh}(2)$. Thus a left-invariant sub-Riemannian metric $d$ is defined on the Lie group $SH(2).$ Let us take a (co)vector $\psi_0= \cos\alpha e_1+ \sin\alpha e_2+\beta e_3\in\frak{sh}(2)$. We calculate $$\psi_g(w)=\langle\psi_g,w\rangle=\langle\psi_0,gwg^{-1}\rangle\quad g\in SH(2),\,\,w= w_1e_1+w_2e_2\in\mathfrak{p}.$$ $$gwg^{-1}=\tiny{\left(\begin{array}{ccc} \operatorname{ch}\varphi & \operatorname{sh}\varphi & x\\ \operatorname{sh}\varphi & \operatorname{ch}\varphi & y \\ 0 & 0 & 1 \end{array}\right) \left(\begin{array}{ccc} 0 & w_1 & w_2 \\ w_1 & 0 & 0 \\ 0 & 0 & 0 \end{array}\right) \left(\begin{array}{ccc} \operatorname{ch}\varphi & -\operatorname{sh}\varphi & -x\operatorname{ch}\varphi+y\operatorname{sh}\varphi\\ -\operatorname{sh}\varphi & \operatorname{ch}\varphi & x\operatorname{sh}\varphi-y\operatorname{ch}\varphi \\ 0 & 0 & 1 \end{array}\right)}$$ $$=w_1e_1+(-w_1y+w_2\operatorname{ch}\varphi)e_2+ (-w_1x+w_2\operatorname{sh}\varphi)e_3,$$ $$\psi_g(v)=w_1\cos\alpha+(-w_1y+w_2\operatorname{ch}\varphi)\sin\alpha+(-w_1x+ w_2\operatorname{sh}\varphi)\beta=$$ $$w_1(\cos \alpha - e\sin\alpha-\beta x)+ w_2(\operatorname{ch}\varphi\sin\alpha + \beta\operatorname{sh}\varphi).$$ Therefore, $$u(g)=(\cos\alpha-y\sin\alpha-\beta x)e_1+(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi)e_2,$$ $$v(g)=gu(g)=\tiny{\left(\begin{array}{ccc} \operatorname{ch}\varphi & \operatorname{sh}\varphi & x\\ \operatorname{sh}\varphi & \operatorname{ch}\varphi & y \\ 0 & 0 & 1 \end{array}\right)\left(\begin{array}{ccc} 0 & \cos\alpha-y\sin\alpha-\beta x & \sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi\\ \cos\alpha-y\sin\alpha-\beta x & 0 & 0 \\ 0 & 0 & 0 \end{array}\right)}$$ $$=\tiny{\left(\begin{array}{ccc} \operatorname{sh}\varphi(\cos\alpha-y\sin\alpha-\beta x) & \operatorname{ch}\varphi(\cos\alpha-y\sin\alpha-\beta x) & \operatorname{ch}\varphi(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi) \\ \operatorname{ch}\varphi(\cos\alpha-y\sin\alpha-\beta x) & \operatorname{sh}\varphi(\cos\alpha-y\sin\alpha-\beta x) & \operatorname{sh}\varphi(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi) \\ 0 & 0 & 0 \end{array}\right)}.$$ Hence integral curves of vector field $v(g),$ $g\in SH(2),$ satisfy the system of differential equations \begin{equation} \label{sistem} \left\{\begin{array}{l} \dot{\varphi}=\cos\alpha-y\sin\alpha-\beta x, \\ \dot{x}=\operatorname{ch}\varphi(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi), \\ \dot{y}=\operatorname{sh}\varphi(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi). \\ \end{array}\right. \end{equation} The geodesic $g(t),$ $t\in\mathbb{R}$, with $g(0)=e$ is a solution of this system with initial data $\varphi(0)=x(0)=y(0)=0$. In this case, $|u(g(t))|\equiv 1$, i.e. \begin{equation} \label{m1} g(t)\in M_1=\{(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi)^2+(\cos\alpha-y\sin\alpha-\beta x)^2=1\}\subset SH(2). \end{equation} Therefore there exists a differentiable function $\gamma=\gamma(t)$ such that \begin{equation} \label{s0} \cos\frac{\gamma}{2}=\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi,\quad\sin\frac{\gamma}{2}=\cos\alpha-y\sin\alpha-\beta x. \end{equation} Since $\varphi(0)=x(0)=y(0)=0,$ then we can assume that $\gamma(0)=\pi-2\alpha$. On the ground of (\ref{s0}) the sistem (\ref{sistem}) is written in the form \begin{equation} \label{sistem1} \left\{\begin{array}{l} \dot{\varphi}=\sin{\frac{\gamma}{2}}, \\ \dot{x}=\cos{\frac{\gamma}{2}}\operatorname{ch}\varphi, \\ \dot{y}= \cos{\frac{\gamma}{2}}\operatorname{sh}\varphi. \end{array}\right. \end{equation} Differentiating the first and the second equalities in (\ref{s0}) and using (\ref{sistem1}), we get $$-\frac{\dot{\gamma}}{2}\sin\frac{\gamma}{2}= (\sin\alpha\operatorname{sh}\varphi+\beta\operatorname{ch}\varphi)\dot{\varphi}= \sin{\frac{\gamma}{2}}\left(\sin\alpha\operatorname{sh}\varphi+\beta\operatorname{ch}\varphi\right),$$ $$\frac{\dot{\gamma}}{2}\cos\frac{\gamma}{2}=-\dot{y}\sin\alpha-\beta\dot{x}= -\cos{\frac{\gamma}{2}}\left(\sin\alpha\operatorname{sh}\varphi+\beta\operatorname{ch}\varphi\right),$$ whence $$\dot{\gamma}=-2(\sin\alpha\operatorname{sh}\varphi+ \beta\operatorname{ch}\varphi),\quad \dot{\gamma}(0)=-2\beta.$$ Consequently, on the ground of the first equality in (\ref{s0}) and (\ref{sistem1}) $$\ddot{\gamma}=-2(\sin\alpha\operatorname{ch}\varphi+\beta\operatorname{sh}\varphi)\dot{\varphi}=-2\cos\frac{\gamma}{2}\sin\frac{\gamma}{2}=-\sin\gamma.$$ We got the mathematical pendulum equation. In paper \cite{BSB14} this equation together with equations (\ref{sistem1}) are obtained by another method replacing $\varphi$ with $z.$ \section{To search for geodesics of a sub-Riemannian metric on $SE(2)$} \label{se2} The Lie group $SE(2)$ is isomorphic to the group of matrices of a form \begin{equation} \label{matr4} \left(\begin{array}{cc} A& v\\ 0& 1\end{array}\right); \quad A=\left(\begin{array}{cc} \cos\varphi & -\sin\varphi\\ \sin\varphi & \cos\varphi \end{array}\right),\quad v=\left(\begin{array}{c} x\\ y \end{array}\right) \in \mathbb{R}^2. \end{equation} The same formula (\ref{inv}) is true. It is clear that matrices \begin{equation} \label{abc} e_1=\left(\begin{array}{ccc} 0& -1 & 0\\ 1& 0 & 0 \\ 0 & 0 & 0 \end{array}\right),\quad e_2=\left(\begin{array}{ccc} 0& 0 & 1\\ 0& 0 & 0 \\ 0 & 0 & 0 \end{array}\right),\quad e_3=\left(\begin{array}{ccc} 0& 0 & 0\\ 0& 0 & 1 \\ 0 & 0 & 0 \end{array}\right) \end{equation} constitute a basis of Lie algebra $\mathfrak{se}(2).$ In addition, \begin{equation} \label{abca} [e_1,e_2]=e_3,\quad [e_1,e_3]=-e_2,\quad [e_2,e_3]= 0. \end{equation} Let us define a scalar product $\langle\cdot,\cdot\rangle$ on $\mathfrak{se}(2)$ with orthonormal basis $\{e_1,\,e_2,\,e_3\}$ and the subspace $\mathfrak{p}$ with orthonormal basis $\{e_1,\,e_2\}$ generating Lie algebra $\mathfrak{se}(2)$. Thus a left-invariant sub-Riemannian metric $d$ is defined on the Lie group $SE(2)$ (see \cite{Ber941}--\cite{S10} and other papers). Let us take a (co)vector $\psi_0= \cos\alpha e_1+ \sin\alpha e_2+\beta e_3\in\frak{se}(2)$. We calculate $$\psi_g(w)=\langle\psi_g,w\rangle=\langle\psi_0,gwg^{-1}\rangle\quad g\in SH(2),\,\,w= w_1e_1+w_2e_2\in\mathfrak{p}.$$ $$gwg^{-1}=\tiny{\left(\begin{array}{ccc} \cos\varphi & -\sin\varphi & x\\ \sin\varphi & \cos\varphi & y \\ 0 & 0 & 1 \end{array}\right) \left(\begin{array}{ccc} 0 & -w_1 & w_2 \\ w_1 & 0 & 0 \\ 0 & 0 & 0 \end{array}\right) \left(\begin{array}{ccc} \cos\varphi & \sin\varphi & -x\cos\varphi-y\sin\varphi\\ -\sin\varphi & \cos\varphi & x\sin\varphi-y\cos\varphi \\ 0 & 0 & 1 \end{array}\right)}$$ $$=w_1e_1+(w_1y+ w_2\cos\varphi)e_2+(-w_1x+ w_2\sin\varphi)e_3,$$ $$\psi_g(w)=w_1\cos\alpha+(w_1y+w_2\cos\varphi)\sin\alpha+(-w_1x+ w_2\sin\varphi)\beta=$$ $$w_1(\cos\alpha+ y\sin\alpha- \beta x) + w_2(\sin\alpha\cos\varphi + \beta\sin\varphi).$$ Consequently, $$u(g)=(\cos\alpha + y\sin\alpha-\beta x)e_1+(\sin\alpha\cos\varphi+\beta\sin\varphi)e_2,$$ $$v(g)=gu(g)=\tiny{\left(\begin{array}{ccc} \cos\varphi & -\sin\varphi & x\\ \sin\varphi & \cos\varphi & y \\ 0 & 0 & 1 \end{array}\right)\left(\begin{array}{ccc} 0 & -\cos\alpha-y\sin\alpha+\beta x & \sin\alpha\cos\varphi+\beta\sin\varphi\\ \cos\alpha+y\sin\alpha-\beta x & 0 & 0 \\ 0 & 0 & 0 \end{array}\right)}$$ $$=\tiny{\left(\begin{array}{ccc} \sin\varphi(\beta x-\cos\alpha-y\sin\alpha) & \cos\varphi(\beta x-\cos\alpha-y\sin\alpha) & \cos\varphi(\sin\alpha\cos\varphi+\beta\sin\varphi) \\ \cos\varphi(\cos\alpha+y\sin\alpha-\beta x) & \sin\varphi(\beta x-\cos\alpha-y\sin\alpha) & \sin\varphi(\sin\alpha\cos\varphi+\beta\sin\varphi) \\ 0 & 0 & 0 \end{array}\right)}.$$ Hence integral curves of vector field $v(g),$ $g\in SE(2),$ satisfy the system of differential equations \begin{equation} \label{sisteme} \left\{\begin{array}{l} \dot{\varphi}=\cos\alpha+y\sin\alpha-\beta x, \\ \dot{x}=\cos\varphi(\sin\alpha\cos\varphi+\beta\sin\varphi), \\ \dot{y}=\sin\varphi(\sin\alpha\cos\varphi+\beta\sin\varphi) \\ \end{array}\right. \end{equation} The geodesic $g(t),$ $t\in\mathbb{R}$, with $g(0)=e$ is a solution of this system with initial data $\varphi(0)=x(0)=y(0)=0$. In this case, $|u(g(t))|\equiv 1$, i.e. \begin{equation} \label{m1} g(t)\in M_1=\{(\sin\alpha\cos\varphi+\beta\sin\varphi)^2+(\cos\alpha+y\sin\alpha-\beta x)^2=1\}\subset SE(2). \end{equation} Therefore there exist differentiable functions $\omega=\omega(t)=2\xi(t)$ such that \begin{equation} \label{s00} \sin\frac{\omega(t)}{2}=\sin\alpha\cos\varphi+\beta\sin\varphi,\quad\cos\frac{\omega(t)}{2}=\cos\alpha+y\sin\alpha-\beta x. \end{equation} Given the equality $\varphi(0)=x(0)=y(0)=0$, we can assume that $\omega(0)=2\xi(0)=2\alpha$. On the ground of formula (\ref{s00}) the system (\ref{sisteme}) is written in a form \begin{equation} \label{sisteme1} \left\{\begin{array}{l} \dot{\varphi}=\cos{\frac{\omega}{2}}, \\ \dot{x}=\sin{\frac{\omega}{2}}\cos\varphi, \\ \dot{y}= \sin{\frac{\omega}{2}}\sin\varphi. \end{array}\right. \end{equation} Differentiating the first and the second equalities in (\ref{s00}) and using (\ref{sisteme1}), we get $$\frac{\dot{\omega}}{2}\cos\frac{\omega}{2}=-\left(\sin\alpha\sin\varphi-\beta\cos\varphi\right)\dot{\varphi}= -\cos\frac{\omega}{2}\left(\sin\alpha\sin\varphi-\beta\cos\varphi\right),$$ $$-\frac{\dot{\omega}}{2}\sin\frac{\omega}{2}=\dot{y}\sin\alpha-\beta\dot{x}= \sin{\frac{\omega}{2}}\left(\sin\alpha\sin\varphi-\beta\cos\varphi\right),$$ whence \begin{equation} \label{init} \dot{\omega}=2(\beta\cos\varphi-\sin\xi\sin\varphi),\quad \dot{\omega}(0)=2\dot{\xi}(0)=2\beta. \end{equation} Differentiating the last equality, we get in view of formulae (\ref{s00}) and (\ref{sisteme1}) \begin{equation} \label{pendulum} \ddot{\omega}=-2(\beta\sin\varphi+\sin\alpha\cos\varphi)\dot{\varphi}=-2\sin\frac{\omega}{2}\cos\frac{\omega}{2}= -\sin\omega. \end{equation} We get again the mathematical pendulum equation. \end{document}
\begin{equation}gin{document} AMS 517.977.5 \begin{equation}gin{center} {\Lambdarge \bf Equivalent substitution in the control theory } \end{center} \begin{equation}gin{center} {\bf I.M. Proudnikov} \end{center} \begin{equation}gin{center} {\em pim\underlineine{ }[email protected]}, ph.num. +79203011393 \end{center} In this paper a system of the differential equations with a control is considered. We study a problem of looking for an optimal control that gives an infimum for an optimized functional. The system of differential equations is replaced by two systems with the upper and lower envelopes of a function on the right hand side of the initial system of the differential equations. The optimized functional is replaced by its lower envelope. All replacements are done in a region of attainability. The necessary conditions of optimality are sufficient for the substituted system. The rules for evaluation of the attainability set with the help of positively definite functions are given in the second part of the paper. {\bf Key words.} Optimal control, optimal trajectories, convex functions, lower and upper convex envelopes, attainability set, convex analysis, linear and convex functions. \normalsize \section{Introduction} The system of the differential equations is considered, the right side of that includes a control $u$. The problem is formulated as following: to find a control $u$ from a set $U$ for that a given functional $J(\cdot)$ gets its optimum. The rules for equivalent replacement of our system by more simple system are given for that the functional $J(\cdot)$ gets the same optimum. Moreover, the necessary optimal conditions turn into sufficient optimal conditions. All replacements are done in a region of attainability and consist in construction of the upper concave or lower convex envelopes of the optimized functional $J(\cdot)$ and the functions in the right side of the system. A method for construction of the envelopes is given in Appendix. Many specialists in different areas are interested in evaluations of a region of attainability (see \cite{chernousko}-\cite{kurganckivalyi2}). The author suggests a new method for evaluation of a region of attainability. This method is based on study of the differential equations of the first order with $n-1$ constants where $n$ is the order of the system of the differential equations. The solutions of the differential equations of the first order are the projections of the solutions of the system of the differential equations on a chosen direction $g$ (see further). Changing the constants, the direction $g$ and solving the differential equation of the first order we can obtain enough precise evaluation of a region of attainability. Consider the following general problem of the control theory. Suppose we have a system of the differential equations \begin{equation} \dot{x} (t) = \varphi (x (t), u (t)), \, \, \, \, \, x (0) = x_0, \lambdabel{eqsub1} \end{equation} $\varphi(\cdot)=(\varphi_1(\cdot)), \varphi_2(\cdot)), \cdots, \varphi_n(\cdot))^*$, and an optimized functional has a form \begin{equation} J (u) = \int^T_0 f (x (\tau), u (\tau)) d \tau \rightarrow \inf_{u}, \lambdabel{eqsub2} \end{equation} where $ x (t) \in \mathbb{R}^n $, $ u (t) $ takes values in $ U \subset \mathbb{R}^r$, where $U$ is a convex compact set in $ \mathbb{R}^r $, $ t \in [0, T] $. We assume that the function $ f (\cdot, \cdot): \mathbb{R}^n \times \mathbb{R}^r \rightarrow \mathbb{R} $ is continuous and the function $ \varphi (\cdot, \cdot) $ is a Lipschitz one in all arguments in totality, so that the system (\ref{eqsub1}) satisfies the conditions of uniqueness for a solution with the given initial values. We consider the autonomous systems of the differential equations it does not restrict the generality of consideration, as soon as if in the non-autonomous case $ \varphi (\cdot) $ is Lipschitz in all arguments uniformly in $t$, then all argumentations, made below, will be true as well. We have to find an optimal control $ u (\cdot) $ that is a piecewise continuously differentiable vector-function from $ KC^1 [0, T] $ with values in $ U $. We will assume that the derivatives $ u '(\cdot) $, where they exist, are bounded in the norm, i.e. $$ \partialarallel u '(t) \partialarallel \leq C \, \, \, \forall t \in \alphaeph_u [0, T], $$ uniformly in $u(\cdot)$. Here $ \alphaeph_u [0, T] $ is a set of points in $ [0, T] $, where the derivatives $ u' (\cdot) $ exist. In this case pointwise convergence of a sequence $ \{u_k (\cdot) \} $ on $ [0, T] $ is equivalent to uniform convergence of the functions $ u_k (\cdot) $ on a set of continuity and, of course, is equivalent to convergence in the metrics $ \rho $ of the space $KC^1 [0, T] $. The metrics of $KC^1 [0, T] $ is equal, by definition, to the metrics of the space $ C [0, T] $, i.e. $$ \rho (u_1 (t), u_2 (t)) \stackrelackrel{\mathrm{def}}{=} \max_{t \in [0, T]} \| u_1 (t) - u_2 (t) \|. $$ The function $ u (\cdot) $ is defined on the segment $ [0, T] $. We are looking for $ u (\cdot) $ for that a solution to the system (\ref{eqsub1}) gives an infimum for the functional $ J (\cdot) $. In the beginning we replace the optimization problem (\ref{eqsub2}) by the following problem \begin{equation} J (u, t) = \int^t_0 f (x (\tau), u (\tau)) d \tau \longrightarrow \mbox{inf} \,_{u \in KC^1 [0, T], t \in [0, T]}. \lambdabel{eqsub2b} \end{equation} Let us include into consideration all the functions $u(t) \in KC^1 [0, T]$ resulting from pointwise convergence. It is obvious that all limit functions belong to a closed, bounded set of functions defined on $ [0, T] $, which we denote by ${\cal KC}^1 [0, T] $. The two functions from the set ${\cal KC}^1 [0, T] $ are equivalent (equal) if these functions are equal on a set of full measure. It is clear that all measurable functions on $[0,T]$ belong to ${\cal KC}^1 [0, T] $. We will solve the above formulated optimization problem (\ref{eqsub2b}) on the set ${\cal KC}^1 [0, T] $, i.e. \begin{equation} J (u, t) \longrightarrow \mbox{inf} \,_{{u \in{\cal KC}^1 [0, T]},{t \in [0, T]}}. \lambdabel{eqsub2a} \end{equation} The problem is that an optimal control does not exist always. For this reason generalized control (lower or upper semicontinuous) is considered. As an example, consider the following system of differential equations $$ \dot{x} (t) = u, \,\,\,\, x(0)=0, $$ and the optimized functional is defined as $$ J (u) = \int^1_0 ((1-u^2)^2 + x^2) d \tau \rightarrow \inf_{u \in{\cal KC}^1 [0,1]}. $$ This problem does not have an optimal control $ u (\cdot) $ in the set of piecewise continuously differentiable functions on $[0,1] $, but it has an optimizing sequence of controls $ \{ u_k (\cdot)\} $ that are piecewise continuous functions with values $ \partialm 1 $. It is easy to see that an optimizing sequence $\{ x_k (\cdot) \}$, corresponding to the sequence of controls $ \{ u_k (\cdot)\} $, has the limit $ x \equiv 0 $ on $ [0,1] $. The control $u(\cdot) \equiv 0$, that corresponds to the solution $x\equiv 0$, can not be received as the pointwise limit of $ \{ u_k (\cdot)\} $ and it's not an optimal control. The right-hand side of the equation (\ref{eqsub1}) can be very complex, and the exact solution of this equation can often be found approximately using numerical methods. Optimization of the function $ J (\cdot) $ is also not easy if it has a complex form. But optimization of the lower convex envelope (LCE) of $ J (\cdot) $ is easier. Moreover, a global optimum point does not disappear if we construct a lower convex envelope of our optimized functional. In addition, the construction of the lower convex envelope of $ J (\cdot) $, that we denote by $ \tilde{J} (\cdot) $, turns it into a lower weakly semicontinuous function. It means that $$ {\underlineine{lim}}_{u_k \longrightarrow u} J (u_k) \gammaeq J (u) $$ for any sequence $ \{u_k \} $ converging to $u$ weakly. This requirement is important for weak convergence of an optimizing sequence to a solution of the problem (\ref{eqsub2}). We propose here a method of equivalent substitution with the help of which we can overcome these difficulties. Namely, we suggest a replacement of the right-hand side of the equation (\ref{eqsub1}) by another function with a simpler structure. The search for solutions of the system (\ref{eqsub1}) (numerical or not) becomes simpler. The principle of equivalent replacement claims that although we have another function with a simpler structure, but the function $\tilde J (\cdot) $ attains the same infimum on the set of piecewise continuously differentiable functions and on its closure. At the same time the new optimized functional $ \tilde J (\cdot) $ becomes lower semicontinuous. This idea is different from the idea of relaxation \cite{cesary} because we take the convex (concave) envelopes of functions on the right hand side of our system of the differential equations for all points from the region of attainability. Taking into consideration the information about the replacements of the functions $ \varphi (\cdot) $ and $ J (\cdot) $, we can conclude that searching for an optimal control and optimal trajectory becomes easier and the new optimization problem is equivalent to the initial one in the sense of finding an optimal control. In this case the conditions of optimality become necessary and sufficient. For the first time the author used the idea about replacing a function with its lower convex approximation for finding its optimal points in \cite{lowapp1} -\cite{lowapp3}. \section{\bf The principle of equivalent replacement} Let us consider the same system of differential equations (\ref{eqsub1}), and the optimized functional (\ref{eqsub2a}). We rewrite the system (\ref{eqsub1}) and (\ref{eqsub2a}) in the following form \begin{equation} \left \{\begin{equation}gin{array}{ll} \dot{x} (t) = \varphi (x (t), u (t)) , \\ \dot{y} (t) = f (x (t), u (t)) \end{array} \right. \lambdabel{eqsub3} \end{equation} with the initial conditions $ x (0) = x_0, \, y (0) = 0 $. The optimization problem (\ref{eqsub2a}) is replaced by another optimization problem \begin{equation} y (t, u) \longrightarrow \mbox{inf} \,_{{u \in{\cal KC}^1 [0, T]},{t \in [0, T]}}. \lambdabel{eqsub4} \end{equation} Any solution of (\ref{eqsub3}) is a solution of the integral equations \begin{equation} \left \{ \begin{equation}gin{array}{ll} x (t) = \int^t_0 \varphi (x (\tau), u (\tau))d\tau + x_0, \\y (t) = \int_0^tf (x (\tau), u (\tau))d \tau, \\u (t) \in{{\cal KC}^1 [0, T]}, \, \, t \in [0, T], \end{array} \right. \lambdabel{eqsub5} \end{equation} where $ u (\cdot) $ is a piecewise continuously differentiable control. Unite all solutions of (\ref{eqsub5}) in one set $ D (t) $ for $t \in [0,T]$ $$ D (t) = \{(x, y, z) \mid x= x (t) = \int^t_0 \varphi (x (\tau), u (\tau)) d \tau + x_0, \, \, $$ \begin{equation} y=y (t) = \int_0^t f (x (\tau), u (\tau)) d \tau,{} z = u (t) \in{\cal KC}^1 [0, T] \}, \end{equation} which is called the set of attainability for the systems (\ref{eqsub1}) and (\ref{eqsub2a}) at time $ t \in [0, T] $. It is easy to see that the optimization problem (\ref{eqsub4}) is equivalent to the following optimization problem \begin{equation} L (x, y, z) = y \longrightarrow {\mbox{inf} \,}_{y \in{\bigcup_{t \in [0, T]}} D (t)}. \lambdabel{eqsub6} \end{equation} The function $ L (x, y, z) = y $ is linear in the coordinates $ (x, y, z) $. (The function $ L (\cdot, \cdot, \cdot) $ depends only on the coordinate $ y $). It is well known that any linear function reaches its maximum or minimum on boundary of any compact set on which maximum or minimum are looked for. Since the set of solutions of (\ref{eqsub1}) in accordance with the assumptions is bounded on $ [0, T] $, the set of the vector-valued functions $u(\cdot)$ is closed and bounded in ${\cal KC}^1 [0, T] $. Then $ D (t) $ is closed and bounded for any $ t \in [0, T] $ in the metrics $ \rho $ of the space ${\cal KC}^1 [0, T] $. Indeed, if $u_k \xrightarrow{\rho}{ } u \in U $, then, as it was mentioned above, there is uniform convergence of $ u_k (\cdot) $ to $ u (\cdot) $ on $ [0, T] \backslash e $, where $e$ is a set of any small measure. Then convergence in measure \cite{kolmogoroffomin} holds. The convergences $$ \varphi (x (\tau), u_k (\tau)) \rightarrow_k \varphi (x (\tau), u (\tau)) \, \, \, \forall \tau \in [0, T], $$ and $$ f (x (\tau), u_k (\tau)) \rightarrow_k f (x (\tau), u (\tau)) \, \, \, \forall \tau \in [0, T]. $$ follow from continuity of the functions $ \varphi (\cdot, \cdot) $, $ f (\cdot, \cdot) $ in all variables. Uniform convergence of the integrals $$ \int^t_0 \varphi (x (\tau), u_k (\tau)) d \tau \rightarrow_k \int^t_0 \varphi (x (\tau, u (\tau)) d \tau $$ and $$ \int^t_0 f (x (\tau), u_k (\tau)) d \tau \rightarrow_k \int^t_0 f (x (\tau), u (\tau)) d \tau. $$ in $ t \in [0, T] $ and $ k $ follows from Egorov's theorem \cite{vulich}. Indeed, otherwise the sequences $ \{T_k \} $ and $ \{u_k (\cdot) \} $ exist, for that and for some $ \varepsilon> 0 $, the inequalities $$ \mid \int^{t_{k}}_0 \varphi (x (\tau), u_k (\tau)) d \tau - \int^{t_{k}}_0 \varphi (x (\tau, u (\tau)) d \tau \mid> \varepsilon $$ and $$ \mid \int^{t_{k}}_0 f (x (\tau), u_k (\tau)) d \tau - \int^{t_{k}}_0 f (x (\tau), u (\tau)) d \tau \mid> \varepsilon. $$ hold. The integrals can be considered as functions of $ t $. According to Egorov's theorem for any small $ \deltata> 0 $ there is a set $ e $ with measure $ \mu (e) <\deltata $ , that the integrals, as the functions of $ t $, will converge uniformly in $ k $ on a set $ [0, T] \backslash \, e $. As soon as the integrals are absolutely continuous in measure, the integrals over the set $e$ with measure $\mu(e)<\deltata $ will be arbitrarily small if $ \deltata $ is also arbitrarily small. As a result, we come to the contradiction with existence of $\varepsilon $, for which the inequalities, written above, are true. Thus we have proved the following theorem. \begin{equation}gin{thm} The set $ D (t) $ is closed and bounded in ${\cal KC}^1 [0, T] $ for any $ t \in [0, T] $ in the metrics $ \rho $ of the space $ KC^1 [0, T] $. \end{thm} Consider a sequence of the functions defined on $ [0, T] $, \begin{equation} x_{k +1} (t) = \int^{t}_0 \varphi (x_{k} (\tau), u_k (\tau)) d \tau. \lambdabel{eqsub7} \end{equation} The sequence $ \{x_k (\cdot) \} $ converges on $ [0, T] $ uniformly in $ k $, if the sequence $ u_k (\cdot) $ converges in the metrics $ \rho $ to the function $ u (\cdot) $ a.e. on $ [0, T] $. Prove this fact. Indeed, we know from the said above that the functions $ u_k (\cdot) $ converge to $ u (\cdot) $ uniformly on $ [0, T] $. We replace the control $ u_k (\cdot) $ by the control $ u (\cdot) $ in (\ref{eqsub7}). The difference between the original value of the integral (\ref{eqsub7}) and the new value of the same integral can be evaluated in the following way. According to the inequality $$ \mid \varphi (x_{k} (\tau), u_k (\tau)) - \varphi (x_{k} (\tau), u (\tau)) \mid \leq L \mid u_k (\tau) - u (\tau) \mid \, \, \, \forall \tau \in [0, T], $$ where $ L $ is a Lipschitz constant of the function $ \varphi (\cdot, \cdot) $, the mentioned above difference is arbitrarily small for large $ k $ as well. Indeed, we have $$ \mid \int_0^t \varphi (x_{k} (\tau), u_k (\tau)) - \int_0^t \varphi (x_{k} (\tau), u (\tau)) \mid \leq L \int_0^t \mid u_k (\tau) - u (\tau) \mid \, \, \, \, \forall t \in [0, T] $$ and the right hand side of this inequality is arbitrary small for large $k$. We will use the following result. \begin{equation}gin{lem} \cite{tricomi}, \cite{krasnov}. The sequence $ \{x_k (\cdot, u) \}, k = 1,2, \dots $ converges uniformly for $u \in U $ and $ k $ to a solution of (\ref{eqsub1}). \end{lem} Summing up everything mentioned above, we can conclude about uniform convergence on $ [0, T] $ of the solutions $ x_k (\cdot) $ of (\ref{eqsub1}) for $ u = u_k(\cdot) $ to a solution $ x (\cdot) $ of the same system (\ref{eqsub1}) with the control $ u (\cdot) $ as $ k \rightarrow \infty $. \begin{equation}gin{lem} The sequence $ \{x_k (\cdot) \}, k = 1,2, ... , $ defined by (\ref{eqsub7}), converges uniformly on $ [0, T] $ in $k$ to a solution $ x (\cdot) $ of (\ref{eqsub1}). \lambdabel{lemequivsubst1} \end{lem} \begin{equation}gin{rem} Lemma (\ref{lemequivsubst1}) is also valid for the case when $ u_k \rightarrow u $ in the metrics $ \rho_1 $ of the space $ L_1 [0, T] $, i.e. $$ \rho_1 (u_k, u) = \int^T_0 \mid u_k (\tau) - u (\tau) \mid d \tau. $$ \end{rem} The problem (\ref{eqsub6}) has a solution if the functional (\ref{eqsub2}) is lower semicontinuous. It will be shown how to make it lower semicontinuous. If there is a solution to the problem (\ref{eqsub6}) on the set of piecewise continuously differentiable functions $ KC^1 [0, T] $, then we have a solution to the problem \begin{equation} L (x, y, z) = y \longrightarrow \inf_{y \in{\begin{equation}gin{center}o} \bigcup_{t \in [0, T]} D (t)}, \lambdabel{eqsub8} \end{equation} where $ \begin{equation}gin{center}o $ is a symbol of taking convex hull. We introduce a set of attainability (or an attainability set) for the time $ T $, which, by definition, is \begin{equation} D_T = \overlineine{\begin{equation}gin{center}o} \, \cup_{t \in [0, T]} \, D (t), \lambdabel{eqsub8a} \end{equation} where $ \overlineine{\begin{equation}gin{center}o} $ means closed convex hull. It is easy to see that for an arbitrary $$ (x_k (t_k), y (t_k), u_k(t_k)) \in D (t_k) $$ such as \[(x_k (t_k), y (t_k), u_k(t_k)) \rightarrow_k (x (t), y (t), u(t) ) \] and \[t_k \in [0, T], \, t \in [0, T], \, \, \, t_k \rightarrow_k t, \] the inclusion \[(x (t), y (t), u(t)) \in D (t) \] will be true. Therefore, the closure in (\ref{eqsub8a}) can be removed and definition of the set $ D_T $ can be given as the following \begin{equation} D_T ={\begin{equation}gin{center}o} \, \cup_{t \in [0, T]} \, \lambdabel{eqsub8c} D (t). \end{equation} Moreover, the problems (\ref{eqsub6}) and (\ref{eqsub8}) are equivalent that means: if one of them has a solution, then the other one has a solution as well and these solutions are equal to each other. In addition, since projections of the set $ D (t), t \in [0, T] $ on the axes $ x, y $ are closed and bounded, and, hence, compact in the corresponding finite-dimensional spaces and $D(\cdot)$ is continuous in $ t $ as the set-valued mapping, then $ \inf $ in (\ref{eqsub8}) can be replaced by $ \min $ and the problem (\ref{eqsub8}) can be rewritten in the following way \begin{equation} L (x, y, z) = y \longrightarrow \min_{y \in D_T}. \lambdabel{eqsub8b} \end{equation} But a global optimal point of the problem (\ref{eqsub8b}) will not change if we replace the functions $\varphi_i(\cdot, \cdot), i \in 1:n,$ by their upper concave and lower convex envelopes and $f(\cdot)$ by its lower convex envelope constructed on a set of attainability for the time $ T $. Further we will understand under taking the upper concave and lower convex envelopes of $\varphi(\cdot, \cdot)$ the similar operations for all coordinates of $\varphi(\cdot, \cdot)$. Indeed, take two arbitrary points $ (x_1 (t), y_1 (t), u_1) $ and $ (x_2 (t), y_2 (t), u_2), $ $ t \in [0, T] $ from the set $ D (t) $. Consider a combination with nonnegative coefficients $ \alpha_1 $, $ \alpha_2 $, $ \alpha_1 + \alpha_2 = 1 $. Then, the point $ (\alpha_1 x_1 (t) + \alpha_2 x_2 (t), \alpha_1 y_1 (t) + \alpha_2 y_2 (t), \alpha_1 u_1 + \alpha_2 u_2) $ will belong to the set $ \begin{equation}gin{center}o D (t) $, if we replace the functions $ \varphi (\cdot, \cdot) $ and $ f (\cdot, \cdot) $ by the following: $$ \tilde{\varphi} (\alpha_1x_1 (\tau) + \alpha_2 x_2 (\tau), \alpha_1 u_1 + \alpha_2 u_2) = \alpha_1 \varphi (x_1 (\tau), u_1) + \alpha_2 \varphi (x_2 (\tau), u_2) $$ and $$ \tilde{f} (\alpha_1x_1 (\tau) + \alpha_2 x_2 (\tau), \alpha_1 u_1 + \alpha_2 u_2) = \alpha_1 f (x_1 (\tau), u_1) + \alpha_2 f (x_2 (\tau), u_2). $$ But this construction, performed for all points of the regions $ D (t), t \in [0, T], $ it just means that we construct the lower and upper convex envelopes of the function $ \varphi (\cdot, \cdot) $ and the lower convex envelope of the functions $f(\cdot, \cdot)$ in the attainability set for the time $ T $, i.e. in $D_T$. Indeed, it follows from the above formula (\ref{eqsub8c}) for $D_T$ that the function $L(\cdot, \cdot, \cdot)$ reaches its minimum (\ref{eqsub8b}) on some $D(\tau), \tau \in [0,T]$. Consequently, we can construct the lower convex or upper concave envelopes of the functions $\varphi(\cdot)$ and $f(\cdot, \cdot)$ in all region $D_T$. Denoted by $ \tilde{J} (\cdot, \cdot) $ a new optimization function obtained after the replacement of the function $ f (\cdot, \cdot) $ by $ \tilde{f} (\cdot, \cdot) $ in $ D_T \, $ $$ \tilde{J} (u, t) = \int^t_0 \tilde{f} (x (\tau), u (\tau)) d \tau. $$ It is clear that $ \tilde{J} (\cdot, \cdot) $ takes the same optimal value in the attainability set $ D_T $, that the functional $ (\ref{eqsub2a}) $ $ J (\cdot, \cdot) $ takes for the system (\ref{eqsub1}). Replace the system $ (\ref{eqsub1}) $ by the system \begin{equation} \dot{x} (t) = - \varphi (x (t), u (t)), \, \, \, \, \, x (0) =-x_0, \lambdabel{eqsub9} \end{equation} and the optimized functional by the functional \begin{equation} J (u, t) = \int^t_0 f (-x (\tau), u (\tau)) d \tau. \lambdabel{eqsub10} \end{equation} It is easy to see that the minimum or the maximum of the functional $ J (\cdot, \cdot) $ did not change. Hence, the problems $ (\ref{eqsub1}) $, $ (\ref{eqsub2a}) $ and $ (\ref{eqsub9}) $, $ (\ref{eqsub10}) $ are replaceable. So "convexification" $ \, $ of the function $ \varphi (\cdot, \cdot) $, in contrast to the procedure of "convexification" of the function $ f (\cdot, \cdot) $ should be as following: \begin{equation}gin{enumerate} \item Construction of the lower convex envelope (LCE) of the function $ \varphi_i (\cdot, \cdot) $ in the variables $ (x, u) $ for each $i \in 1:n$ from the attainability set for the time $T$, i.e. $ D_T $, which we denote by $ \tilde{\varphi}_{1i} (\cdot, \cdot) $. LCE of $ \varphi_i (\cdot, \cdot) $ is the biggest convex function that does not exceed $ \varphi_i (\cdot, \cdot) $ in $D_T$. \item Construction of the upper concave envelope (UCE) of the function $ \varphi_i (\cdot, \cdot) $ (or, equivalently, we construct the lower convex envelope for the function $ - \varphi_i (\cdot, \cdot) $ and after that take minus of this function) in the variables $ (x, u) $ for each $i \in 1:n$ from the attainability set for the time $T$, i.e. $ D_T $, which we denote by $ \tilde{\varphi}_{2i} (\cdot, \cdot) $. UCE of $\varphi_i(\cdot, \cdot)$ is the smallest concave function that is not less $\varphi_i(\cdot,\cdot)$ in $D_T$. \item Let us replace the system $ (\ref{eqsub1}) $, $ (\ref{eqsub2a}) $ by two systems of the equations: \begin{equation} \dot{x} (t) = \tilde{\varphi}_1 (x (t), u (t)), \, \, \, \, \, x (0) = x_0, \, \, u (\cdot) \in{\cal KC}^1 [0, T] \lambdabel{eqsub11} \end{equation} with the optimization function $ \tilde{J} (u, t) $ and \begin{equation} \dot{x} (t) = \tilde{\varphi}_2 (x (t), u (t)), \, \, \, \, \, x (0) = x_0, \, \, u (\cdot) \in{\cal KC}^1 [0, T] \lambdabel{eqsub12} \end{equation} with the same optimization function $ \tilde{J} (u,t) $; \item Let us find among the solutions of $ (\ref{eqsub11}) $ and $ (\ref{eqsub12}) $ such that gives the smallest value of the functional $ \tilde{J} (u, t) $ in $ D_T $. \end{enumerate} We obtain the following result. \begin{equation}gin{thm} There are the solutions among the solutions of $ (\ref{eqsub11}) $ and $ (\ref{eqsub12}) $ such that deliver a minimum (maximum) in $u(\cdot) \in {\cal KC}^1 [0, T]$ and $t \in [0,T]$ for the functional $$ \tilde{J} (u, t) = \int^t_0 \tilde{f} (x (\tau), u (\tau)) d \tau, $$ that coincides with an infimum (supremum) of the functional $ J (u, t) $ (see (\ref{eqsub2a})). Moreover, necessary conditions for the minimum (maximum) are also sufficient conditions. \end{thm} \begin{equation}gin{rem} The set $ D (T) $ is not necessarily compact, although its projections on the axis $ x, y $ are compact . That's why we are able to go to the problem $$ L (x, y, z) = y \longrightarrow \min_{y \in D_T}, $$ if the problem (\ref{eqsub6}) has a solution. The last one coincides with the formulation of Mazur's theorem. It asserts that in any weakly convergent sequence $ \{u_k (\cdot) \} \in L_p ([0, T]) $, $ u_k (\cdot) \longrightarrow u (\cdot), $ a subsequence can be chosen for each $ k $ convex hull of which is almost everywhere on $ [0, T] $ converges as $ k \rightarrow \infty $ to some $ u (\cdot) \in L_p [0, T] $. In our case, there exists a sequence $ \{u_k (\cdot) \} \in {\cal KC}^1 [0, T] $, the convex hull of which will converge to an optimal control $ u (\cdot) \in {\cal KC}^1 [0, T] $. The sequence of the solutions $ \{x_k (\cdot) \} $, corresponding to the controls $ u_k (\cdot)$, will converge to an optimal solution $ x (\cdot) $, corresponding to the control $ u (\cdot) $, provided that the solutions have been calculated to the problems with the modified right-hand side. \end{rem} \begin{equation}gin{rem} The rules for construction of LCE and UCE are given in Appendix. \end{rem} \begin{equation}gin{rem} In many cases we have to construct only LCE or UCE for the function $ \varphi(\cdot)$. \end{rem} Return back to the initial problem (\ref{eqsub2}) with the fixed time $T$. Consider a set $$ D(t) = \{ (x,y,z) \mid x=x(t)= \int^t_0 \varphi(x(\tau),u(\tau))d\tau+ x_0, $$ $$ y= y(T)= \int_0^T f(x(\tau),u(\tau))d\tau, {} z=u(t) \in {\cal KC}^1 [0,T] \}, $$ that is called the set of attainability of the system (\ref{eqsub1}),(\ref{eqsub2}) at time $t$. Let us introduce a set of attainability for the time $T$ for the system (\ref{eqsub1}),(\ref{eqsub2}) that is by definition \begin{equation} D_T= \overlineine{\begin{equation}gin{center}o} \, \cup_{t \in [0,T]} \, D(t). \lambdabel{eqsub12a} \end{equation} As above it is possible to prove that we can remove the closure in (\ref{eqsub12a}) and write $$ D_T= {\begin{equation}gin{center}o} \, \cup_{t \in [0,T]} \, D(t). $$ The optimization problem can be reformulated in the form \begin{equation} L(x,y,z)=y \longrightarrow \inf(\sup)_{(x, y, z) \in D_T}. \lambdabel{eqsub13a} \end{equation} The problems (\ref{eqsub1}), (\ref{eqsub2}) and (\ref{eqsub13a}) are equivalent which means if one has a solution, then another one has a solution and these solutions are the same. Moreover, as soon as the projections of the sets $D(t), t \in [0,T],$ on the axes $x,y$ are closed, bounded and continuous as a set valued mappings, then we can write instead of $\mbox{inf} \,, \mbox{sup} \,$ $\mbox{min} \,, \mbox{max} \,$ if a solution of (\ref{eqsub13a}) exists. We come to the following result. \begin{equation}gin{thm} There are some solutions among the solutions of $ (\ref{eqsub11}) $ and $ (\ref{eqsub12}) $ such that deliver a minimum (maximum) in $u(\cdot) \in {\cal KC}^1 [0, T]$ and $t \in [0,T]$ to the functional $$ \tilde{J} (u) = \int^T_0 \tilde{f} (x (\tau), u (\tau)) d \tau, $$ that coincides with an infimum (supremum) of the functional $ J (u) $ (see (\ref{eqsub2})) where $ \tilde{f} (\cdot, \cdot) $.is LCE of the function $ {f} (\cdot, \cdot) $ Moreover, the necessary conditions for minimum (maximum) are also the sufficient conditions. \end{thm} Consider some examples. It is clear that an equivalent replacement of one system by another can be applied to a differential system without control $u$. Example 1. Consider the differential equation $$ \dot{x} (t) = \varphi (x (t)) = \left \{ \begin{equation}gin{aligned} (X-1)^2, \mbox{if $ x \gammaeq 0 $} \\ (X +1)^2, \mbox{if $ x <0 $} \\ \end{aligned} \right. $$ with the initial condition $ x (0) = 0 $. The optimized functional is given by $$ f (x (t)) = x^2 (t) \rightarrow \min \, \, \, \mbox{for $ t \in (- \infty, + \infty) $}, $$ The general solution of the differential equations for $ x \gammaeq 0 $ has the form $$ x (t) = - \frac{1}{t + c} +1, $$ which tends to $ 1 $ as $ t \rightarrow \infty $. The general solution of the differential equation for $ x <0 $ is given by $$ x (t) = - \frac{1}{t-c} -1, $$ which tends to $ -1 $ as $ t \rightarrow \infty $. In order to meet the initial condition we have to put $ c = 1$. The projection of the attainability set $ D_{(- \infty, + \infty)} $ on $ OX $ axis is the interval $ (-1, +1).$ It is clear that the function $ f (\cdot) $ takes its minimum at $ x = 0 $. But we get the same solution if instead of the function $ \varphi (\cdot) $ we take its lower convex envelope, namely, the function $$ \tilde{\varphi} (x) = \begin{equation}gin{cases} (X-1)^2, \mbox{if $ x \gammaeq 1 $} \\ 0, \mbox{if $ -1 \leq x \leq 1 $} \\ (X +1)^2, \mbox{if $ x <-1 $}. \\ \end{cases} $$ Example 2. The same example, but $$ {J} (x, u) = \int^t_0 x^2 (\tau)) d \tau, $$ which we minimize for $ t \in [0,1] $. The equation of the solution is $$ x (t) = - \frac{1}{t +1} + 1. $$ Here the replacement of the function $ \varphi (\cdot) $ by the function $ \tilde{\varphi} (\cdot) $ on the whole line is not correct, since the projection of the attainability set $ D_{1} $ on $ OX $ axis is the interval $ [0, 1/2] $. Example 3. Let us give the differential equation $$ \dot{x} (t) = x^2 $$ with the initial condition $ x (0) = 1 $. The general solution has the form $$ x (t) = - \frac{1}{t + c}, $$ a solution, satisfying the initial condition, is $$ x (t) = - \frac{1}{t-1}. $$ The optimized functional has the form $$ {J} (x, u) = \int^t_0 (-x^2 (\tau)) d \tau \longrightarrow_t \, \, \mbox{inf} \, $$ for $ t \in [0,1] $. It is easy to compute its optimal value $$ {J} (x, u) = \int^1_0 (-x^2 (\tau)) d \tau = \int^1_0 (- \dot{x} (\tau)) d \tau = x (0)-x (1) = - \infty. $$ In this case, the projection of the attainability set $ D_{1} $ on $ OX $ axis is the set $ (- \infty, 0) \cup [1, + \infty) $. It is easy to see that the lower convex envelope of the functional $ J (\cdot) $ on $ D_{1} $, which we denote by $ \tilde{J} (\cdot) $, takes the same infimum value. It is also true for the functional $$ {J} (x, u) = \int^{\infty}_1 (-x^2 (\tau)) d \tau = \int^{\infty}_1 (- \dot{x} (\tau)) d \tau = x (1)-x (+ \infty) = - \infty. $$ Example 4. Let us consider the following problem $$ \dot {x}(t)= x sin(1/x) + u, \,\,\, x(0)=0. $$ The optimized functional is $$ J(u)=\int_0^{\infty} \mid u(\tau)-x(\tau) \mid d \tau \rightarrow \mbox{inf} \,_u. $$ We will get the following system after construction of the lower and upper envelopes $$ \dot {x}(t)= x + u, \,\,\, x(0)=0 $$ and $$ \dot {x}(t)= - x + u, \,\,\, x(0)=0. $$ The optimal solution exists among their solutions $x(t)\equiv u(t)= 0$. Example 5. Let us consider the differential equation $$ \dot {x}(t)= x^2 - u^2 $$ with the initial condition $x(0)=0$. We are considering piecewise continuously differentiable functions $u(\cdot), |u(\cdot)| \leq 1, $ on the segment $[0,1]$ for such that delivers minimum to the functional $$ J(u)=\int_0^1 x^2(\tau) d\tau . $$ The solution when $u(\cdot)$ is constant on the segment $[0,1]$ is given by the form $$ x(t)=\frac{u(1 - e^{2u(t+c)})}{1 + e^{2u(t+c)}}. $$ Here the constant $c$ is defined by the initial conditions. We can see from here that if $u(\cdot)$ is not constant on $[0,1]$, then $| x(\cdot) | \leq | u(\cdot) |$ for any initial conditions. It means that a curve $x(t), t \in [0,1],$ will be in a set bounded by the lines $x=\partialm u$ on the plane $XOU$, where $| x(\cdot) | \leq | u(\cdot) |$. The set of attainability $D_T, T=1,$ will belong to the same set. UCE of the function $\varphi(x.u) = x^2 - u^2$ in $D_T$ is a function the graph of which goes through the point $(0,0,0)$. Therefore, if we solve the differential equations with the right sides $\tilde{\varphi}_1(\cdot.\cdot)$ and $\tilde{\varphi}_2(\cdot.\cdot)$, then among the solutions there are such that deliver the minimum $0$ to the functional $J(\cdot)$ i.e. the formulated theorem is true. \section{\bf An evaluation of the attainability set} Let us have a system of differential equations \begin{equation} \dot{x} (t) = \varphi (x, u), \, \, \, x \in \mathbb{R}^n, \, \, t \in [0, T], \, \, u (t) \in U \subset \mathbb{R}^r \lambdabel{eqsub13} \end{equation} with the initial condition $ x (0) = 0 $, where $ \varphi (\cdot, \cdot) $ is Lipschitz in the variables $ x, u $, $ U $ is a convex compact set in $ \mathbb{R}^r $. The problem is to estimate the attainability set. By definition, the area of attainability for the time $ T $ is the set $$ D_T = \overlineine{\begin{equation}gin{center}o} \, \cup_{t \in [0, T]} \, D (t), $$ where $$ D (t) = \{x \in \mathbb{R}^n \mid x = x (t) = \int^t_0 \varphi (x (\tau), u (\tau)) d \tau, \, \, u (\tau) \in U, u (\cdot) \in {\cal KC}^1 [0, T] \}. $$ The choice of the initial position and the initial time of zero is not a loss of generality. Take an arbitrary positively definite function $ V (x) $ (see \cite{zub1}), satisfying the condition $$ m_1 \| x \|^2 \leq V (x) \leq m_2 \| x \|^2. $$ Let $$ \varphi (x, u, t) = \varphi_1 (x, u, t) + \varphi_2 (x, u, t) $$ and $v(\cdot,\cdot): [0,T] \times U \rightarrow \mathbb{R}^n$ is a piecewise continuous vector-function. Consider the systems of differential equations \begin{equation} \dot{x} (t) = \tilde{\varphi}_1 (x, u) = \varphi_1 (x, u)+v(u,t) , \, \, t \in [0, T], \lambdabel{eqsub14} \end{equation} and \begin{equation} \dot{x} (t) = \tilde{\varphi}_2 (x, u) = \varphi_2 (x, u)-v(u,t). \, \, t \in [0, T], \lambdabel{eqsub15} \end{equation} We denote by \begin{equation} D_T^{(i)} = \overlineine{\begin{equation}gin{center}o} \, \cup_{t \in [0, T]} \, D_i (t), \, i=1,2, \lambdabel{eqsub16} \end{equation} the attainability sets for the systems (\ref{eqsub14}), (\ref{eqsub15}), where $$ D_i (t) = \{x \in \mathbb{R}^n \mid x = x_i (t) = \int^t_0 \tilde{\varphi}_i (x (\tau), u (\tau)) d \tau, \, \, u (\tau) \in U, $$ \begin{equation} u (\cdot) \in{\cal KC}^1 [0, T] \}, i = 1,2. \lambdabel{eqsub17} \end{equation} Let the estimates of the attainability sets for the time $ T $ be given respectively by the inequalities $$ 0 \leq V (x) \leq c_{1}, \, \, \, 0 \leq V (x) \leq c_{2}. $$ We get the estimation for the attainability set of the system (\ref{eqsub13}) for the time $ T $. We show that the attainability set $ D_T $ for this system satisfies the inclusion $$ D_T \subset D_T^{(1)} + D_T^{(2)}. $$ Indeed, by definition, the set $ D_T^{(1)} + D_T^{(2)} $ will consist of the points on the curves the tangents to which are the sum of the tangents to the curves consisting of the points of the sets $ D_{1} (t_1) $ and $ D_{2} (t_2) $ for all $ t_1, t_2 \in [0, T] $. It is clear, that for some vector-function $v(\cdot,\cdot)$ the resulting set will include $ D_T $, which consists of the points on the curves the tangents to which are the sum of the tangents to the curves consisting of the points of the sets $ D_{1} (t) $ and $ D_{2} (t) $ for all $ t \in [0, T] $. As a result, the following theorem is proved. \begin{equation}gin{thm} For the attainability set $ D_T $ of (\ref{eqsub13}) the inclusion $$ D_T \subset D_T^{(1)} + D_T^{(2)}, $$ is true for some vector-function $v(\cdot,\cdot)$, where $ D_T^{(i)} $, $ i = 1,2, $ are given by (\ref{eqsub16}), (\ref{eqsub17}) . \end{thm} The following lemma follows from here. \begin{equation}gin{lem} The function $ V (\cdot) $ satisfies the inequality $$ 0 \leq V (x) \leq c_1 + c_2 $$ in the attainability set $ D_T $ of the system (\ref{eqsub13}). \end{lem} Now consider two differential systems with the right sides $\varphi(\cdot,\cdot)$, $\varphi_1(\cdot,\cdot)$ and zero initial conditions at the time equaled to zero. Let us suppose that the vectors $\varphi(\cdot,\cdot)$ , $\varphi_1(\cdot,\cdot)$ are collinear and also the inequality \begin{equation} \| \varphi (x, u) \| \leq k_2 \| \varphi_1 (x, u) \| \lambdabel{eqsub19} \end{equation} is true for all $ x, u $. We assume that we know the attainability set $ D^{(1)}_T $ for the time $ T $ of the system with the right hand side $\varphi_1(\cdot,\cdot)$. The problem is to obtain some estimates of the attainability set of the system (\ref{eqsub13}). The arguments will be carried out as previously, considering the trajectories of the corresponding systems. Any vector in the set $ D_T $ for some $ t \in [0, T] $ and $ u \in {\cal KC}^1[0,T] $ is $$ x = x (t) = \int^t_0 \varphi (x (\tau), u (\tau)) d \tau. $$ Consequently, $$ \| x (t) \| = \| \int^t_0 \varphi (x (\tau), u (\tau)) d \tau \| \leq \int^t_0 \| \varphi (x (\tau), u (\tau)) \| d \tau \leq $$ $$ \leq k_2 \int^t_0 \| \varphi_1 (x (\tau), u (\tau)) \| d \tau \subset k_2 D_1 (t). $$ Since the previous inclusion holds for any $ t \in [0, T] $, it follows that $$ D_T \subset \cup_{t = 0}^T k_2 D_1 (t) = k_2 \cup_{t = 0}^T D_1 (t) \subset k_2 \overlineine{co} \cup_{t = 0}^T D_1 (t) = k_2 D_T^{(1)}. $$ The following theorem is proved. \begin{equation}gin{thm} For the systems of the differential equations with the right sides $\varphi(\cdot,\cdot)$, $\varphi_1(\cdot,\cdot)$ and with the attainability sets $ D_T $ and $ D_T^{(1)} $ respectively, for which the inequality (\ref{eqsub19}) holds, the inclusion $$ D_T \subset k_2 D_T^{(1)} $$ is true. \end{thm} From here we can easily obtain the following conclusion. \begin{equation}gin{lem} In the attainability set $ D_T $ of the system (\ref{eqsub13}) the function $ V (\cdot) $ satisfies the inequality $$ 0 \leq V (x) \leq k_2 c_1, $$ where the constant $ c_1 $ limits the top value of the function $ V (\cdot) $ in the attainability set $ D_T^{(1)} $ of the system (\ref{eqsub15}). \end{lem} Let us give a general method for evaluation of $D_T$ of the system (\ref{eqsub13}). This method does not require any additional information for the system (\ref{eqsub13}). As is known, a convex set can be given by its extreme points. There are no problems if there is a finite number of such points. But very often these points are unknown or their number is infinite. We can reconstruct a convex set if we know its projections on different directions. If we project any trajectory, then we project not only the points but the tangents constructed at these points. It means that we have to consider the following system for any direction $g \in \mathbb{R}^n, \| g \| =1,$ $$ (\dot{x},g)(t)=(\varphi((x,g),u),g), \,\,\, x(t) \in \mathbb{R}^n, \,\, t \in [0,T], \,\, u(t) \in U \subset \mathbb{R}^r. $$ As a result, we have \begin{equation} \dot{\theta}(t)=(\varphi(x(t),u),g), \,\,\, \theta(t) \in \mathbb{R}^1, \,\, t \in [0,T], \,\, u(t) \in U \subset \mathbb{R}^r, \lambdabel{eqsub19a} \end{equation} where $\theta$ is the scalar production $(x,g)$. We can do an orthogonal transformation that the direction of the vector $g$ was the first coordinate axis $x_1$. Then the projection of the velocity vector $(\dot{x}_1, \dot{x}_2,\dot{x}_3, \dots, \dot{x}_n)$ on the line $x_1$ will be equal to $(\dot{x}_1, 0,0, \dots, 0)$. It means that we have to substitute $x_2=c_2, x_3=c_3, \dots, x_n=c_n$ into the first equation and to solve the first order differential equation for different values of the constants $c_2, c_3, \dots, c_n$ that are corresponding coordinates of the start point. We can make the following conclusion: {\em convex hull of the attainability sets of the equation (\ref{eqsub19a}) for different $g \in S_1^{n-1}(0)$ will include the attainability set of the equation (\ref{eqsub13})}. It is possible to do, because calculation methods are developed very well for the first order differential equations. \normalsize \section{Conclusion} The obtained results allow us to pass from local to global optimization problem. To implement this it is required to construct the lower convex and upper concave envelops of the function written on the right hand side of the differential system (\ref{eqsub1}). We also construct the lower convex envelops for the optimized functional. All constructions are done in the attainability set for the time $T$. A method for estimation of the attainability set with the help of the positively definite functions (Lyapunov functions) is suggested. The proposed method is based on the decomposition of the function, stayed on the right hand side of the system of the differential equations, into the components the sets of attainability of which are already known. It makes it different from the paper \cite{kostousova}, where the linear systems are considered. It is suggested to find projections of $D_T$ onto any direction $g \in \mathbb{R}^n$. For this reason we have to find projections of the trajectories of the differential system and the tangents to them to the direction $g$. We come across the problem of definition of a set of attainability for the differential equation of the first order. The proposed transformation method of the systems is especially useful when it is difficult to get a solution of differential equations in an explicit form, but while using approximate methods only. In addition, the sufficient conditions of optimality for an optimal control are obtained according to the proposed method. {\bf \leftarrowge APPENDIX} We will prove a theorem giving a rule for construction of LCE and UCE. Let $f(\cdot):\mathbb{R}^n \rightarrow \mathbb{R}- $ be continuous function on a convex compact set $D$. It is required to construct LCE and UCE in $D$. Consider a function $\varphi_p(\cdot):\mathbb{R}^n \rightarrow \mathbb{R}$ $$ \varphi_p(x)=\frac{1}{\mu(D)} \int_D f(x+y)p(y) dy, $$ where $p(\cdot)$ is a distribution function satisfying the following equalities \begin{equation} p(y) \gammaeq 0 \;\;\;\;\; \forall y \in D, \;\;\;\;\; \frac{1}{\mu(D)} \int_D p(y) dy = 1, \;\;\;\; \int_D y p(y) dy =0. \lambdabel{eqsub19} \end{equation} We will consider the functions $\varphi_p(\cdot)$ for different distributions $p(\cdot)$. \begin{equation}gin{thm} The functions $$ \overlineine{\varphi}(x)=\mbox{sup} \,_{p(\cdot)} \varphi_p(x), \;\;\;\;\; \underlineine{\varphi}(x)=\mbox{inf} \,_{p(\cdot)} \varphi_p(x) $$ are UCE and LCE of $f(\cdot)$ on $D$ correspondingly. \end{thm} {\bf Proof.} Without loss of generality we will consider that $f(y) \gammaeq 0 $ for all $ y \in D$. Divide $D$ into subsets $\Deltata D_i, i \in 1:N,$ $D=\cup_i \Deltata D_i$, $\mu(D)=\sum_i \mu(\Deltata D_i)$. We can approximate the function $\varphi_p(\cdot)$ with any precision by the integral sums \begin{equation} \sum_{i=1}^N f(x+y_i) \alphapha_i \begin{equation}ta_i, \lambdabel{eqsub20} \end{equation} where $$ \alphapha_i=\frac{\mu(\Deltata D_i)}{\mu(D)},\,\, \begin{equation}ta_i=p(y_i), \,\,\, y_i \in \Deltata D_i. $$ It follows from (\ref{eqsub19}) that \begin{equation} \sum_{i=1}^N \alphapha_i \begin{equation}ta_i \simeq 1, \;\;\;\; \sum_{i=1}^N y_i \alphapha_i \begin{equation}ta_i \simeq0. \lambdabel{eqsub21} \end{equation} The sign $\simeq$ means that the values on the left hand side from this sign can be close to the values on the right hand side with any precision depending on $N$. The expression (\ref{eqsub20}) means that we take a convex hull of $N$ vectors $(x+y_1), (x+y_2), \cdots , (x+y_N)$ with coefficients $(\alphapha_1 \begin{equation}ta_1, \alphapha_2 \begin{equation}ta_2, \cdots, \alphapha_N \begin{equation}ta_N)$, i.e. we calculate a vector $$ \bar{x}=\sum_{i=1}^N (x+y_i) \alphapha_i \begin{equation}ta_i \simeq x+\sum_{i=1}^N y_i\alphapha_i\begin{equation}ta_i \simeq x $$ and define a value of the function $\varphi_p(\cdot)$ at this point equaled to $$ \sum_{i=1}^N f(x+y_i) \alphapha_i \begin{equation}ta_i . $$ Changing the points $x+y_i \in D$ and the coefficients $\{ \alphapha_i \begin{equation}ta_i \}, i \in 1:N,$ satisfying (\ref{eqsub21}), we define in such way the functions $\varphi_p(\cdot)$ with different values at $x$. Let us prove that the function $$ \underlineine{\varphi}(x)=\mbox{inf} \,_{p(\cdot)} \varphi_p(x) $$ is LCE. As soon as $\mbox{inf} \,$ is taken for all distributions $p(\cdot)$, then the inequality $ \underlineine{\varphi}(x) \leq f(x)$ is true for all $x \in D$. The function $\varphi_p(\cdot)$ can be approached by the sums (\ref{eqsub20}) for any distribution $p(\cdot)$ under conditions on the coefficients (\ref{eqsub21}). It follows from here that $\varphi_p(\cdot)$ can not be smaller than LCE of $f(\cdot)$. The operation $\mbox{inf} \,$ keeps this quality. Consequently, $\underlineine{\varphi}(\cdot)$ is LCE of $f(\cdot)$. We can prove in the same way that $\overlineine{\varphi}(\cdot)$ is UCE of $f(\cdot)$. The Theorem is proved. $\Box$ The construction of LCE can be done using Fenchel-Morrey's theorem \cite{alextihomfomin}. According to it LCE is equal to the second conjugate function $f^{**}(\cdot)$. Construction of $f^{**}(\cdot)$ is not easy. To find a value of LCE at one point we have to solve two difficult optimization problems, namely, $$ f^*(p)=\mbox{sup} \,_{x \in D} \{ (p,x) - f(x) \} $$ and $$ f^{**}(x)=\mbox{sup} \,_{p \in D^*} \{ (p,x) - f^*(p) \}. $$ \begin{equation}gin{thebibliography}{10} \bibitem{chernousko} {\em Chernousko F.L.} An evaluation of fase states of the dynamic systems. A method of ellipsoids. M.: Nauka, 1988. 320p. \bibitem{kurganckivalyi} {\em Kurzhanski A. B., Varaiya P.} A comparison principle for equations of the hamilton-jacobi type in set-membership filtering. // Communications in information and systems. Vol. 6, No. 3, 2006. pp. 179-192. \bibitem{kurgancki} {\em Kurzhanski A.B.} Control and Observation Under Uncertainty. M.: Nauka, 1977. \bibitem{kurgancki2} {\em Kurzhanski A.B., Valyi I.} Ellipsoidal Calculus for Estimation and Control. SCFA. Birkhauser. Boston, 1997. \bibitem{kurganckivalyi2} {\em Kurzhanski A. B., Varaiya P.} Ellipsoidal techniques for reachability analysis. In: Lecture Notes in Computer Sciences, 1790, Springer-Verlag, 2000. pp. 202-214. \bibitem{cesary}{\em Cesary L.} Optimization $-$ Theory and Application. Springer-Verlag, New York, 1983. V. 17. 542 p. \bibitem{lowapp1} {\em Proudnikov I.M. } The lower convex approximations for Lipschitz functions // J.Comp. Math. and Mathematical Physics. 2000. T. 40. N 3. P. 378-386. \bibitem{lowapp3} {\em Proudnikov I.M. } The rules for constructions of lower convex approximations for convex functions // J.Comp. Math. and Mathematical Physics. 2003. T. 43. N 7. P. 939-950. \bibitem{kolmogoroffomin}{\em Kolmogorov A.N., Fomin S.V.} Elements function theory and functional analysis. Moscow: Nauka, 1976. \bibitem{vulich}{\em Vulich B.Z.} A short course in the theory of functions of real variable. Moscow: Nauka, 1973. \bibitem{tricomi}{\em Tricomi F.G.} Integral equations. Springer-Verlag, 1986. \bibitem{krasnov}{\em Krasnov M.L.} Integral Equations. Moscow: Nauka, 1976. 217 p. \bibitem{zub1}{\em} Zubov V.I., Lectures in control theory. M: Science, 1975. 496 p. \bibitem{kostousova}{\em Kostousova E.K.} External and internal evaluation of the attainability set with help of the parallelotopics // Computational technology. In 1998. Number 2. T. 3. S. 11 - 20. \bibitem{alextihomfomin} Alekseev V.M., Tichomirov V.M., Fomin S.V. Optimal control. M.: Nauka, 1979. 224 p. \end{thebibliography} \end{document}
\mathfrak{b}egin{document} \title[Stationary measures on homogeneous bundles over flag varieties]{Stationary measures for $\SL_2(\R)$-actions on homogeneous bundles over flag varieties} \mathfrak{a}uthor{Alexander Gorodnik} \mathfrak{a}ddress{Institut f\"{u}r Mathematik, Universit\"{a}t Z\"{u}rich, 8057 Z\"{u}rich, Switzerland} \email{[email protected], [email protected], [email protected]} \thanks{} \mathfrak{a}uthor{Jialun Li} \email{} \thanks{} \mathfrak{a}uthor{Cagri Sert} \email{} \thanks{ A.G. and J.L. were supported by the SNF grant 200021--182089; C.S. was supported by SNF Ambizione grant 193481} \mathfrak{b}egin{abstract} Let $G$ be a real semisimple Lie group with finite centre and without compact factors, $Q<G$ a parabolic subgroup and $X$ a homogeneous space of $G$ admitting an equivariant projection on the flag variety $G/Q$ with fibres given by copies of lattice quotients of a semisimple factor of $Q$. Given a probability measure $\mu$, Zariski-dense in a copy of $H=\SL_2(\R)$ in $G$, we give a description of $\mu$-stationary probability measures on $X$ and prove corresponding equidistribution results. Contrary to the results of Benoist--Quint corresponding to the case $G=Q$, the type of stationary measures that $\mu$ admits depends strongly on the position of $H$ relative to $Q$. We describe possible cases and treat all but one of them, among others using ideas from the works of Eskin--Mirzakhani and Eskin--Lindenstrauss. \end{abstract} \maketitle \section{Introduction} Let $G$ be a real Lie group and $R<G$ a closed subgroup. The actions of subgroups of $G$ on the homogeneous space $X=G/R$ constitute a natural class of dynamical systems whose (topological, statistical etc.) properties are of key relevance to various problems in mathematics. Accordingly, the study of such dynamical systems has a rich history; it has prompted the introduction of various new techniques and contains major results. The nature of these systems varies according to the acting subgroup and the group $R$ to be factored out, ranging over classes such as partially hyperbolic, parabolic, proximal dynamics, etc. One type of ambient homogeneous space $X$ is obtained by considering quotients by discrete subgroups $R=\Lambda<G$. For the actions of connected subgroups of $G$ generated by unipotents (e.g.~ semisimple subgroups) on these quotients $X$, settling conjectures of Raghunathan and Dani, definitive results were obtained by Ratner \cite{ratner.measure.class,ratner.topological}. Her results can be considered as a vast generalization of classical results on vector flows on the tori $\mathbb{T}^d$ and have far-reaching consequences. A key step/result in Ratner's works --- corresponding to Dani's conjecture --- is the classification of measures invariant under unipotent flows. To obtain this result, Ratner introduced an important technique, the polynomial drift argument. Setting aside the actions of commuting diagonal flows, a next major step for actions on quotients $X$ by lattices $\Lambda<G$ is reached by the seminal work of Benoist--Quint \cite{BQ1,bq.non-escape,BQ2,BQ3}. Their work involved describing dynamics of actions by subgroups $\Gamma$ whose algebraic (Zariski) closure has semisimplicity but the subgroups themselves can be genuinely irregular, e.g.~ discrete. Focusing on stationary measures of random walks on quotients $X$, they developed the exponential drift argument used to obtain a description of all stationary measures. The drift argument of Benoist--Quint requires a \textit{precise control of random matrix products} (e.g.~ local limit theorem), a feature not readily available without a semisimplicity assumption. Benoist--Quint's ideas were then remarkably modified in a non-homogeneous setting by Eskin--Mirzakhani \cite{eskin-mirzakhani} who managed to set up a much more flexible argument bypassing for example the need for a local limit theorem. This development enabled further extensions of Benoist--Quint's results in several directions by Eskin--Lindenstrauss \cite{eskin-lindenstraus.short,eskin-lindenstrauss.long}. Some of our arguments in this article (e.g.~ the six points drift argument) draws on the ideas of the latter works \cite{eskin-mirzakhani,eskin-lindenstraus.short} and in fact can be seen as a slightly modified and simpler version of them. Continuing to expound elements of our setting, a second type of homogeneous space is obtained by considering quotients by parabolic subgroups $R=Q<G$, giving rise to flag varieties $X=G/Q$. The dynamics on these quotients are quite different from those on quotients by discrete subgroups; in particular, when the acting group has semisimple (non-compact) Zariski-closure, the action is proximal (if the group is split) and the space supports no invariant measures. Starting with the pioneering works of Furstenberg \cite{furstenberg.poisson, furstenberg.nc, furstenberg.boundary.theory} on random matrix products and boundary theory, a thorough qualitative description of dynamics is established by Guivarc'h--Raugi \cite{guivarch-raugi.isom.ext} and Benoist--Quint \cite{BQ.compositio}. The homogeneous space $X=G/R$ considered in this article is a combination of the two types of classical homogeneous spaces discussed above; it has the structure of a fibre bundle over the flag variety $G/Q$ with fibres given by a homogeneous space $S/\Lambda$, where $S$ is a semisimple group and $\Lambda$ is a discrete subgroup. To illustrate and motivate this structure, recall that a standard example for the first kind of spaces (obtained as quotients by discrete subgroups) is provided by the space $X_{d,d}$ of rank-$d$ lattices in $\R^d$ up to homothety; which can be identified with $\PGL_d(\R)/\PGL_d(\Z)$. Now when one considers more generally the space $X_{k,d}$ of rank-$k$ lattices in $\R^d$ up to homothety, if $k \mathfrak{n}eq d$, then $X_{k,d}$ has a natural structure of a bundle over the space of $k$-Grassmannians in $\R^d$ (which is a standard example of a space realized as a quotient by a parabolic subgroup) with fibres given by copies of $\PGL_k(\R)/\PGL_k(\Z)$. The study of dynamics on these quotients is initiated by the work of Sargent--Shapira \cite{sargent-shapira}. Generalizing arguments of Benoist--Quint \cite{BQ1,bq.non-escape}, they managed to describe the dynamics on the space $X_{2,3}$ when the acting probability measure is Zariski-dense in $\SL_3(\R)$ or in an irreducible copy of $\PGL_2(\R)$. Remarkably, they discovered\footnote{interestingly, with a computer experiment} a somewhat unexpected phenomenon (a $\Gamma_\mu$-invariant section, see \cite{sargent-shapira}) in the latter case, a precise understanding of which was an initial motivation for our work. The goal of the current article is more generally to obtain measure classification and equidistribution results in all possible situations\footnote{We manage this except in one case, Case 2.3.b, see Figure \mathfrak{r}ef{figure.cases} and the discussion below.} when the acting probability measure is Zariski-dense in a copy of $\SL_2(\R)$ or $\PGL_2(\R)$ and when the ambient space $G/R$ has minimal assumptions. Among others, the results of our work show that in contrast to the type of results obtained by Benoist--Quint, a variety of various dynamical situations are possible even for the actions of groups such as $\SL_2(\R)$ (see Figure \mathfrak{r}ef{figure.cases}). Moreover, for some of these cases the exponential drift argument of Benoist--Quint is not applicable as such and indeed we develop a different drift argument inspired from those of Eskin--Mirzakhani \cite{eskin-mirzakhani} and Eskin--Lindenstrauss \cite{eskin-lindenstraus.short}. Alternatively, we also demonstrate that a precise control on random matrix products (such as a uniform renewal theorem) can also be used to obtain measure classification. Even though the actions on fibres and base individually are well-understood in by-now classical works, the description of dynamics on these homogeneous spaces for a general acting group remains a challenge. \mathfrak{b}igskip \mathfrak{b}egin{center} * \mathfrak{n}opagebreak * * \end{center} We now proceed with introducing the notation needed to state our results. In the sequel, the meaning of the following groups, spaces, measures etc.~ will be fixed unless otherwise stated. Let $G$ be a semisimple real Lie group with finite centre and $Q<G$ a parabolic subgroup. Let $R_0 \unlhd Q$ be a normal algebraic subgroup and $R<Q$ be a closed subgroup containing $R_0$ such that $S:=Q/R_0$ is semisimple with finite centre and without compact factors and $\Lambda:=R/R_0$ is a discrete subgroup of $S$. We denote by $X$ the quotient space $G/R$ which will serve as the ambient space. A guiding example is provided by the homothety classes of rank-$k$ lattices in $\R^d$, see Example \mathfrak{r}ef{ex.ss} for a detailed description of these groups in that case. All probability measures considered in this article will be Borel probability measures. We will denote by $\mu$ a probability measure on $G$. A measure $\mu$ on $G$ is said to have finite first moment if for a (equivalently any) irreducible finite-dimensional faithful linear representation $V$ of $G$, we have $\int \log \|g\|d\mu(g)<\infty$, where $\|.\|$ any choice of an operator norm on $\Endo(V)$. The group generated by the support of $\mu$ will be denoted $\Gamma_\mu$ and $H$ will denote the Zariski-closure of $\Gamma_\mu$ --- we will simply say that $\mu$ is Zariski-dense in $H$. Recall that a measure $\mathfrak{n}u$ on $X$ is said to be $\mu$-stationary if it satisfies $\mu \mathfrak{a}st \mathfrak{n}u=\int g_\mathfrak{a}st \mathfrak{n}u d\mu(g)=\mathfrak{n}u$, where $g_\mathfrak{a}st \mathfrak{n}u$ is the pushforward of $\mathfrak{n}u$ by $g \in G$. By a stationary measure, we will understand a stationary probability measure. A $\mu$-stationary measure is said to be ergodic if it is an extremal point in the compact convex set $P_\mu(X)$ of $\mu$-stationary measures on $X$. Finally, we will always suppose that $H$ is isomorphic to either $\SL_2(\R)$ or $\PGL_2(\R)$. The intersection of $H$ and the parabolic group $Q$ will be denoted $Q_H$. \mathfrak{b}egin{wrapfigure}{r}{4cm}\label{figure.bundle} \mathfrak{b}egin{tikzpicture} \coordinate[label = above :{${}$}] (0) at (-0.7,0); \coordinate[label = above :{$X\simeq G/R$}] (1) at (1, 1.8); \coordinate (2) at (1, 1.8); \coordinate (3) at (1, 0); \coordinate[label = below :{$G/Q$}] (4) at (1, 0); \coordinate[label = right :{$\simeq S/\Lambda$}] (5) at (1.2, 1); \draw[->] (2) -- (3); \end{tikzpicture} \end{wrapfigure} Before proceeding, in order to conceptually expose our results, we discuss the fibre bundle structure and various possible situations that arise; see the guiding Figure \mathfrak{r}ef{figure.cases}. Since the factored-out subgroup $R$ is contained in the parabolic $Q$, the space $X$ has a natural $G$-equivariant projection $\mathfrak{p}i$ onto the flag variety $G/Q$. The fibres of $\mathfrak{p}i$ are copies of the quotient $Q/R$, and by construction, we have $Q/R \simeq (Q/R_0)/(R/R_0)=S/\Lambda$. Since this projection is $G$, and hence $\Gamma_\mu$-equivariant, any $\mu$-stationary measure $\mathfrak{n}u$ on $X$ projects down to a $\mu$-stationary measure $\overline{\mathfrak{n}u}:=\mathfrak{p}i_\mathfrak{a}st \mathfrak{n}u$ on $G/Q$. It follows that a first rough classification of stationary measures is provided by the classification in the base $G/Q$. Thanks to the results of Guivarc'h--Raugi \cite{guivarch-raugi} and Benoist--Quint \cite{BQ.compositio} (see \S \mathfrak{r}ef{subsub.stat.meas.base}), there are two types of projections giving rise to \textbf{Case 1} and \textbf{Case 2}, respectively, Dirac measures and Furstenberg measures on the base. In Case 1, the works of Benoist--Quint \cite{BQ2,BQ3} and Eskin--Lindenstrauss \cite{eskin-lindenstraus.short,eskin-lindenstrauss.long} directly apply and hence we will not comment on it further here (see \S \mathfrak{r}ef{subsub.dirac.base}). If a stationary measure $\mathfrak{n}u$ is in Case 2, up to replacing $Q$ by a conjugate, $Q_H$ is a parabolic subgroup of $H$ and the projection $\overline{\mathfrak{n}u}$ of $\mathfrak{n}u$ is the Furstenberg measure on $\calC:=H/Q_H$ in $G/Q$ (see \S \mathfrak{r}ef{subsub.furstenberg.base}). We will denote this projection as $\overline{\mathfrak{n}u}_F$. In this case, the group $H$ preserves a subbundle of $X$, namely $\mathfrak{p}i^{-1}(H/Q_H)$ which we will denote as $X_\mathcal{C}$ for brevity. \mathfrak{b}egin{figure}[H]\label{figure.cases} \mathfrak{b}egin{tikzpicture} \coordinate (O) at (0,0); \coordinate (1) at (-5,0); \coordinate[label = right : \small{\textbf{Case 1:} Trivial base $Q_H=H \longrightarrow$ Benoist--Quint, Eskin--Lindenstrauss}] (2) at (-4, 2.2); \coordinate (3) at (-2.5, 0); \coordinate[label = {\small$\underset{\text{\small{$Q_H<H$ parabolic}}}{\text{\textbf{Case 2:}}}$}] (10) at (-3.25, -1); \coordinate[label = right: {\small \textbf{Case 2.1:} $Q_H^o<R_0$: (Decomposable) Trivial fibre action: Prop.~ \mathfrak{r}ef{prop.trivial.fibre.measure.class}.}] (4) at (-0.5, 1.3); \coordinate[label = right: {\small \textbf{Case 2.2:} $Q_H^o \cap R_0 =R_u(Q_H^o)$: Diagonal fibre action: Theorem \mathfrak{r}ef{thm.measure.class.geod}.}] (5) at (-0.5, 0.35); \coordinate[label = right:] (6) at (-0.5, -1.7); \coordinate[label = right: {\small $\underset{\text{\small Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}.}}{\text{\textbf{Case 2.3.a:} Irreducible $H$ $\longrightarrow$ Decomposable action }}$}] (7) at (2, -0.8); \coordinate[label = right: \small {\textbf{Case 2.3.b:} Example \mathfrak{r}ef{ex.to.be.treated}.}] (8) at (2, -2.2); \coordinate[label = {\small $\underset{\text{\small{$Q_H^o \cap R_0 =\{\id\}$}}}{\text{\textbf{Case 2.3:}}}$}] (12) at (-1.5, -2.5); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (1) -- (2); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (1) -- (3); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (3) -- (4); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (3) -- (5); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (3) -- (6); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (6) -- (7); \draw[-{Latex[length=2.7mm, width=1.3mm]}] (6) -- (8); \mathfrak{n}ode at (1)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (2)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (3)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (4)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (5)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (6)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (7)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (8)[circle,fill,inner sep=1.2pt]{}; \end{tikzpicture} \caption{List of all possible cases for $H \mathfrak{a}cts X_\mathcal{C}$} \end{figure} A convenient way (which we will follow) to read the stationary measures and the action on $X_\mathcal{C}$ is to choose natural Borel trivializations of the bundle $X_\mathcal{C}$. By working with a class of trivializations (those induced by sections $G/Q \to G$) which we call standard trivializations (see \S \mathfrak{r}ef{sec.prelim}), we will consider the identifications $X \simeq G/Q \times S/\Lambda$ and $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ where the latter identification is made equivariant by $H$-acting on the right-hand-side via a cocycle $\mathfrak{a}lpha: H \times H/Q_H \to S$. Now, according to the algebraic relations between $Q_H$ and the group $R_0 \unlhd Q$, we distinguish three (exhaustive) possibilities giving rise to different dynamics on fibres via the cocycle $\mathfrak{a}lpha$. \textbf{Case 2.1} is the case when $Q_H^o<R_0$. In this situation, we have trivial dynamics on the fibre and every stationary measure on $X$ is a copy of the Furstenberg measure (see \S \mathfrak{r}ef{subsec.case.2.1}). \textbf{Case 2.2} is when $Q^o_H \cap R_0$ is neither $Q^o_H$ nor $\{\id\}$. Since $R_0$ is a normal subgroup of $Q$, the intersection $Q^o_H\cap R_0$ must be the unipotent radical $R_u(Q_H^o)$. In this situation, up to a judicious choice of trivialization, the cocycle $\mathfrak{a}lpha$ takes values in a rank-one diagonal subgroup of $S$ and we obtain a classification of stationary measures (Theorem \mathfrak{r}ef{thm.measure.class.geod}) as product measures on $H/Q_H \times S/\Lambda$ in the second factor invariant under diagonal flow. This is the result for which we develop our drift argument and also give an alternative proof, under a stronger moment assumption, using a uniform quantitative renewal theorem for random matrix products. Finally, the remaining \textbf{Case 2.3} occurs when $Q_H^o \cap R_0=\{\id\}$. In this case we restrict the ambient group $G$ to be $\SL_n(\R)$ or $\PGL_n(\R)$. When the associated linear or projective $H$-action is irreducible (\textbf{Case 2.3.a}), we prove that the cocycle $\mathfrak{a}lpha$ comes from an algebraic morphism $H \to S$ (what we call a decomposable action, see \S \mathfrak{r}ef{subsub.bundle.dec}) allowing us to reduce the analysis to the work of Benoist--Quint and Eskin--Lindenstrauss again (Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}). Our result for this case allows an interpretation of the aforementioned phenomenon appearing in the work of Sargent--Shapira \cite{sargent-shapira} and generalizes it. Finally, \textbf{Case 2.3.b} occurs when $H$ is reducible. The description of dynamics in this case remains open, we provide a conjecture (Conjecture \mathfrak{r}ef{conjecture}) expressing our expectation. \mathfrak{b}igskip We now state our measure classification results in Case 2.2 and Case 2.3.a followed by the corresponding equidistribution results. \mathfrak{b}egin{theorem}[Case 2.2:~Diagonal flow invariance and product structure]\label{thm.measure.class.geod} Let the space $X$ and groups $G,Q,R_0,R,S\simeq Q/R_0,\Lambda\simeq R/R_0$ and $H$ be as defined above. Suppose that $Q_H<H$ is a parabolic subgroup and $Q_H^o \cap R_0=R_u(Q_H^o)$. Let $\mu$ be a Zariski-dense probability measure on $H$ with finite first moment. Then, there exist a standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ and a one-dimensional connected diagonal subgroup $D$ of $S$ satisfying the following. Let $\mathfrak{n}u$ be a $\mu$-stationary and ergodic probability measure on $X_\calC$. Then, there exists a $D$-invariant and ergodic probability measure $\tilde{\mathfrak{n}u}$ on $S/\Lambda$ such that we have $\mathfrak{n}u=\overline{\mathfrak{n}u}_F\otimes \tilde{\mathfrak{n}u}$. \end{theorem} The hypotheses of this result entail a lack of expansion on the fibres whose existence is a key feature exploited in Benoist--Quint's exponential drift argument. Instead, we adapt a drift argument inspired by the works of Eskin--Mirzakhani \cite{eskin-mirzakhani} and Eskin--Lindenstrauss \cite{eskin-lindenstraus.short} that exploits the interaction between different fibres. The commutativity of the target group of the cocycle considerably simplifies the steps compared to the previous works \cite{eskin-mirzakhani,eskin-lindenstraus.short}. Moreover, if $\mu$ is supposed to have a finite exponential moment, taking advantage of the special setting, we give an alternative proof using uniform quantitative renewal theorem due to Li \cite{jialun.ens} and Li--Sahlsten \cite{jialun.advances}. We defer any further comments to the more detailed discussion below in \S \mathfrak{r}ef{subsec.intro.proofs}. \mathfrak{b}egin{remark}\label{rk.conversely.to.diagonal.thm} The converse to Theorem \mathfrak{r}ef{thm.measure.class.geod} is also true in the sense that when $Q_H<H$ is a parabolic subgroup such that $Q_H^o \cap R_0=R_u(Q_H^o)$, there exists a standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ and an index-two extension $D^{\mathfrak{p}m}$ of $D$ such that for any $D^\mathfrak{p}m$-invariant probability $\tilde{\mathfrak{n}u}$, the measure $\overline{\mathfrak{n}u}_F \otimes \tilde{\mathfrak{n}u}$ is $\mu$-stationary probability measure on $X$. \end{remark} We now continue with Case 2.3.a. We first introduce the following definition to state our result. \mathfrak{b}egin{definition}\label{def.decomposable} An $H$-homogeneous subbundle $X_\mathcal{C}$ of $X$ is said to be \textbf{decomposable} if $X_\mathcal{C}$ is isomorphic as $H$-space to $\mathcal{C} \times S/\Lambda$, where the latter is endowed with the $H$-action $h(c,f)=(hc,\mathfrak{r}ho(h)f)$ and $\mathfrak{r}ho:H \to S$ is a morphism extending $Q_H \mathfrak{h}ookrightarrow Q \twoheadrightarrow S $. \end{definition} Our next measure classification result\footnote{After we have obtained our results, we have been informed by Uri Shapira that in a sequel work to \cite{sargent-shapira} together with Uri Bader and Oliver Sargent, for the classification of stationary measures, they independently obtain the same result (and also introduce a similar notion as in Definition \mathfrak{r}ef{def.decomposable}). Our proof ideas for this case seem to be similar. They also obtain equidistribution results in some situations of Case 2.3.a for random walks starting outside the bundle $X_\mathcal{C}$. We thank Uri Shapira for related and kind discussions.} is the following. \mathfrak{b}egin{theorem}[Decomposable action]\label{thm.irreducible.H.decompsable} Let $G=\PGL_n(\R)$, the space $X$ and groups $Q,R_0,R,S\simeq Q/R_0,\Lambda\simeq R/R_0$ and $H$ be as defined above. Suppose that the $H$-action on $\P(\R^{n})$ is irreducible. Then, there exists a unique $H$-compact orbit $\calC$ in $G/Q$ and the $H$-action on $X_\mathcal{C}$ is decomposable. In particular, given a Zariski-dense probability measure $\mu$ on $H$ with finite first moment, we have a bijection \mathfrak{b}egin{equation}\label{eq.bijection.in.thm.SL2.dec} P_\mu^{\erg}(X_{\mathcal{C}}) \simeq P_\mu^{\erg}(S/\Lambda), \end{equation} where the action of $\mu$ on $S/\Lambda$ comes from the morphism $\mathfrak{r}ho: H \to S$ in Definition \mathfrak{r}ef{def.decomposable}. \end{theorem} This result provides, in a more general setting, a conceptual explanation for the existence of the invariant section discovered by Sargent--Shapira (see the relevant discussion in \cite{sargent-shapira}) and allows us to deduce affirmative answers to (1),(2) and (6) \cite[Problem 1.13]{sargent-shapira}. \mathfrak{b}egin{remark} 1. Case 2.3.a is the main particular case of this theorem. In the above statement, if we suppose that $R_0 \mathfrak{n}eq Q$, then one can verify that $Q_H^\circ\cap R_0=\{\id\}$ and we are in Case 2.3.a.\\ 2. It might be possible to generalize the setting of the above theorem to the case where $G$ is a simple $\R$-split linear Lie group and $H<G$ is the image of a principal $\SL_2(\R)$ in $G$ in the sense of Kostant \cite{kostant}. \end{remark} In view of the measure classification results of Benoist--Quint and Eskin--Lindenstrauss \cite[Theorem 1.3]{eskin-lindenstrauss.long} on quotients by discrete subgroups, i.e.~ the right-hand-side of \eqref{eq.bijection.in.thm.SL2.dec}, the following is a consequence of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} (and Proposition \mathfrak{r}ef{prop.decomposable.measure.class}). Recall that a homogeneous measure $\tilde{\mathfrak{n}u}$ on $S/\Lambda$ is a probability measure supported on a closed orbit of its stabilizer $S_0<S$. We also say that such a measure is $S_0$-homogeneous. \mathfrak{b}egin{corollary}[Homogeneous fibres]\label{corol.homogeneous.fibres} Keep the setting of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}. Let $\mathfrak{n}u$ be a $\mu$-stationary and ergodic probability measure on $X_\mathcal{C}$. There exists a trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ in whose coordinates $\mathfrak{n}u$ is a product measure $\overline{\mathfrak{n}u}_F \otimes \tilde{\mathfrak{n}u}$, where $\tilde{\mathfrak{n}u}$ is $S_0$-homogeneous. \end{corollary} \mathfrak{b}egin{remark} Consider any standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$. Let $\mathfrak{n}u=\int \delta_\theta \otimes \mathfrak{n}u_\theta d\overline{\mathfrak{n}u}_F(\theta)$ be the disintegration of $\mathfrak{n}u$ over the base $H/Q_H$. Then, there exists a closed subgroup $S_0<S$ such that for $\overline{\mathfrak{n}u}_F$-a.e.~$\theta \in H/Q_H$, the fibre measure $\mathfrak{n}u_\theta$ is $S_\theta$-homogeneous, where $S_\theta$ is a conjugate of $S_0$. \end{remark} \mathfrak{b}igskip The last remaining possibility for the action of $H$ on an $H$-invariant subbundle $X_\mathcal{C}$ is Case 2.3.b, it happens when $Q_H$ is a parabolic subgroup of $H$ and $Q_H^o \cap R_0=\{\id\}$ but $H$-action on $\P(\R^n)$ is not irreducible (see Example \mathfrak{r}ef{ex.to.be.treated}). In the statement below, we conjecture that the fibre measures are homogeneous, supposing only that the natural morphism $Q_H \to S$ has finite kernel (equivalently, $Q_H^o \cap R_0=\{\id\}$); in other words, the conclusion of Corollary \mathfrak{r}ef{corol.homogeneous.fibres} holds, without the irreducibility assumption. \mathfrak{b}egin{conjecture}[Homogeneous fibres]\label{conjecture} Let $G=\PGL_n(\R)$, the space $X$ and groups $Q,R_0,R,S\simeq Q/R_0,\Lambda\simeq R/R_0$ and $H$ be as defined above. Suppose we are in Case 2.3, i.e.~ $Q_H$ is a parabolic subgroup of $H$ and $Q_H^o \cap R_0=\{\id\}$. Then the conclusion of Corollary \mathfrak{r}ef{corol.homogeneous.fibres} holds. \end{conjecture} We now turn to the equidistribution aspect of random walks on $H$-subbundles $X_\mathcal{C}$ of $X$. We keep the same setting as in the measure classification part above; we suppose in addition that $\Lambda$ is a lattice in $S$ (except for Theorem \mathfrak{r}ef{thm.equidist.geod} below). Let as usual $\mu$ be a probability measure on $G$ that is Zariski-dense in a copy $H$ of $\SL_2(\R)$ or $\PGL_2(\R)$ in $G$. Given a point $x \in X_\mathcal{C}$, we are interested in describing the asymptotic behaviour of the averaged distribution $\frac{1}{n} \sum_{k=1}^n \mu^{\mathfrak{a}st n}\mathfrak{a}st \delta_x$ of the random walk on $X$ starting from $x$ up to the step $n$. In all cases in which we treat the measure classification problem, i.e.~ all cases except Case 2.3.b, it will be possible to address the equidistribution problem. In fact, Case 1 (trivial base) is precisely the setting of Benoist--Quint \cite{BQ3} so the corresponding equidistribution results (see \cite{prohaska-sert-shi, benard-desaxce} extending the original results with respect to moment hypotheses) directly apply; we do not comment on it further here. Case 2.1 boils down to the equidistribution to the Furstenberg measure; even quantitative statements are known for this case, see \S \mathfrak{r}ef{subsub.equidist.trivial.fibre}. Finally, thanks to the decomposability obtained in Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}, it is not hard to see that Case 2.3.a also boils down to the setting of Benoist--Quint, see Proposition \mathfrak{r}ef{prop.equidist.H.irred}. Therefore the only case that needs to be handled is Case 2.2, i.e.~diagonal fibre action. In this case, we will observe that (see Lemma \mathfrak{r}ef{lemma.its.iwasawa}) we have a standard trivialization $X_\calC\simeq \calC\times_\mathfrak{a}lpha S/\Lambda$ such that the action $\mathfrak{a}lpha$ of $H$ on the fibre $S/\Lambda$ is by a one-dimensional diagonal subgroup $D$ of $S$ through the Iwasawa cocycle $\sigma$ up to a sign. It is well-known that the $D$-orbits of different points on $S/\Lambda$ can exhibit very different statistical behaviours, i.e.~ not characterized by a single $D$-invariant measure. Given the existence of this chaotic behaviour, the most one can hope to establish is that the statistical behaviour of the $\mu$-random walk in the fibres matches that of the $D$-flow. This is the content of the following result. In the statement, the equidistribution of a $D$-orbit is understood with respect to a Haar/Lebesgue measure on $D$. \mathfrak{b}egin{theorem}[Diagonal fibre action: equidistribution]\label{thm.equidist.geod} Keep the hypotheses and notation of Theorem \mathfrak{r}ef{thm.measure.class.geod} and let $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ be the trivialization given by Theorem \mathfrak{r}ef{thm.measure.class.geod}. Suppose in addition that the measure $\mu$ has finite exponential moment and $\Gamma_\mu$ is inside the connected component of $H\simeq\PGL_2(\R)$. Then, the $D$-orbit of $z \in S/\Lambda$ equidistribute to a probability measure $m$ on $S/\Lambda$ if and only if for any $x=(\theta,z)\in X_\calC$, we have the convergence \[ \frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x\mathfrak{r}ightarrow \mathfrak{b}ar{\mathfrak{n}u}_F\otimes m \mathfrak{q}uad \text{as} \; \; n \to \infty. \] \end{theorem} \mathfrak{b}egin{remark} For $H\simeq \SL_2(\R)$, we have a similar equidistribution result. But the statement is more complicated. See \S\mathfrak{r}ef{subsec.equidist.diagonal} for more details. \end{remark} \mathfrak{b}egin{remark}[Alternative proof for Theorem \mathfrak{r}ef{thm.measure.class.geod}] The results we establish to prove Theorem \mathfrak{r}ef{thm.equidist.geod} allow us to obtain a different proof of Theorem \mathfrak{r}ef{thm.measure.class.geod} under the additional finite exponential moment condition: let $\mathfrak{n}u$ be a $\mu$-stationary and ergodic measure. By Chacon-Ornstein ergodic theorem, there exists $x$ such that $\frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x\mathfrak{r}ightarrow \mathfrak{n}u$ as $n \to \infty$. From the proof of Theorem \mathfrak{r}ef{thm.equidist.geod}, we actually obtain that $\frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x$ and $\mathfrak{b}ar\mathfrak{n}u_F\otimes \frac{1}{t}\int_0^t \delta_{\mathfrak{a}lpha(t)(z)}\ dt$ have the same limit as $t,n \to \infty$, where $\mathfrak{a}lpha(t)$ is the flow of $D$ and $x=(\theta,z)$. Therefore $\mathfrak{b}ar\mathfrak{n}u_F\otimes \frac{1}{t}\int_0^t \delta_{\mathfrak{a}lpha(t)(z)}\ dt\to\mathfrak{n}u$, which implies $\frac{1}{t}\int_0^t \delta_{\mathfrak{a}lpha(t)(z)}\ dt\to m$ for some $D$-invariant probability measure $m$ and hence the conclusion of Theorem \mathfrak{r}ef{thm.measure.class.geod}. \end{remark} \subsection{Ideas of proofs}\label{subsec.intro.proofs} Before finishing the introduction, we give a brief overview of the ideas of proofs used to obtain the main results of this paper.\\[-4pt] \textbullet ${}$ \textbf{Case 2.2 (measure classification): Drift argument.} The basic idea in Case 2.2 is to use the non-triviality of the fibre bundle structure of $X_\mathcal{C}$ over $\mathcal{C}$ to obtain invariance of the measures. More concretely, for each cocycle $\mathfrak{a}lpha:H\times \calC\to S$, one can try to define a cross-ratio for quadruple $a,a',b,b'\in H^\N$ \[C_\mathfrak{a}lpha(a,a',b,b')=\lim_{n,m\to \infty}\mathfrak{a}lpha(a'^n,\xi(b))\mathfrak{a}lpha(a^{m},\xi(b'))^{-1}\mathfrak{a}lpha(a'^n,\xi(b'))^{-1}\mathfrak{a}lpha(a^m,\xi(b')) \] with suitable limits of $n,m$, where $\xi$ is some map from $H^\N$ to $\calC$. If the cocycle $\mathfrak{a}lpha$ is cohomologous to a morphism from $H$ to $S$, that is, the bundle structure is trivial, then any reasonable definition of cross-ratio will yield no information. This corresponds to \textit{decomposable} action in Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}. Otherwise, if the bundle structure is not trivial, as in Case 2.2, then the cross-ratio is non-trivial for generic four points $a,a',b,b'$ and yields certain information on the relation between asymptotic behaviour of products corresponding to those points. In this case, we adapt the drift argument of \cite{eskin-lindenstraus.short} to ``six points drift argument'' to exploit this information and obtain invariance under a limit cross-ratio. This six points drift argument is very different from the drift argument in \cite{BQ1} or \cite{sargent-shapira}; we do not use expansion on some tangent directions (indeed, in Case 2.2, we have no expansion). It is really the non-triviality of the cross-ratio or equivalently the bundle structure that helps us to obtain the invariance of the measures. \mathfrak{b}egin{figure}[H]\label{fig.drift} \mathfrak{b}egin{tikzpicture} \coordinate (O) at (0,0); \coordinate[label = left:$\mathfrak{n}u_b$] (1) at (-0.8,-1.2); \coordinate[label = right:$\mathfrak{n}u_{b'}$] (2) at (0.8, -1.2); \coordinate[label = above left:$\mathfrak{n}u_{a^nb}$] (3) at (-2.2,0.8); \coordinate[label = above right:$\mathfrak{n}u_{a^nb'}$] (4) at (-2,0.8); \coordinate[label = above left:$\mathfrak{n}u_{(a')^m b}$] (5) at (2,1.3); \coordinate[label = above right:$\mathfrak{n}u_{(a')^m b'}$] (6) at (2.2,1.3); \draw[blue] (1) -- (3); \draw[blue] (1) -- (5); \draw[red] (2) -- (4); \draw[red] (2) -- (6); \mathfrak{n}ode at (1)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (2)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (3)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (4)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (5)[circle,fill,inner sep=1.2pt]{}; \mathfrak{n}ode at (6)[circle,fill,inner sep=1.2pt]{}; \end{tikzpicture} \caption{Six points drift argument} \label{tikzpic} \end{figure} \textbullet ${}$ \textbf{Case 2.3 (measure classification): Decomposable action.} In some sense, the key difficulty in this case is to suspect the possibility of the existence of a decomposable action in our setting. Once one has this possibility in mind, one can use ideas about cocycles going back to Mackey \cite{mackey49, mackey58} (see Varadarajan \cite[\S 5]{varadarajan.quantum} or Zimmer \cite{zimmer.book} for precise expressions) to establish this decomposability. The latter expresses a certain algebraic structure in, or equivalently triviality of, the fibre-bundle $X_\mathcal{C}$ and in more concrete terms it boils down to an extension of a natural morphism $Q_H \to S$ to a larger group (only possibility being $H$ in our setting). Once this is established, one reduces the situation to a trivial-bundle structure and hence one can bring in the results of Benoist--Quint and Eskin--Lindenstrauss.\\[-3pt] \textbullet ${}$ \textbf{Case 2.2 (Equidistribution): Uniform quantitative renewal theorem.} The key point that enables us to obtain the equidistribution result (Theorem \mathfrak{r}ef{thm.equidist.geod}) and an alternative proof of Theorem \mathfrak{r}ef{thm.measure.class.geod} under a stronger moment assumption, is the fact that thanks to the particular situation we have precise control of random matrix products in the form of a uniform quantitative renewal theorem and exponential large deviation estimates. More precisely, under suitable trivialization, the Ces\`{a}ro average can be expressed as \[\frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x=\frac{1}{n}\sum_{k=1}^n\int \delta(g\theta,\mathfrak{a}lpha(\sigma_\chi(g,\theta))z) \ d\mu^{*k}(g), \] where $x=(\theta,z)$ and $\sigma_\chi$ is some cocycle from $H\times \calC$ to $\R$. This is very similar to the renewal sum $\sum_{k=1}^\infty \int \delta(g\theta,\sigma_\chi(g,\theta)-t)\ d\mu^{*k}(g)$ which converges to the product measure $\mathfrak{b}ar{\mathfrak{n}u}_F\otimes Leb_{\R^+}$ with respect to compactly supported continuous functions. Combined with exponential large deviation estimates and good error estimates from the uniform quantitative renewal theorem, we can prove the equidistribution result. \mathfrak{b}igskip This article is organized as follows. Section \mathfrak{r}ef{sec.prelim} contains some preliminary tools about fibred dynamics, cocycles and stationary measures. Section \mathfrak{r}ef{sec.meas.class} is devoted to proving the measure classification results; Theorem \mathfrak{r}ef{thm.measure.class.geod} and Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} are proved therein. Finally, Section \mathfrak{r}ef{sec.equidist} contains the equidistribution results, in particular the proof of Theorem \mathfrak{r}ef{thm.equidist.geod}. \section{Preliminaries: Cocycles, decomposable actions and stationary measures}\label{sec.prelim} This section contains a collection of preliminaries for the proofs in the following parts. We adopt a general setting. In \S \mathfrak{r}ef{subsec.cocycles}, after a discussion of cocycle induced by trivializations or sections, we introduce the notion of a decomposable action and present an important criterion for decomposability. \S \mathfrak{r}ef{subsec.stat.measures} contains a discussion of stationary measures and their decompositions, due to Furstenberg. Finally, in \S \mathfrak{r}ef{subsec.meas.class.sec2}, we single out a description of stationary measures for decomposable actions. \subsection{Generalities on cocycles and decomposable actions}\label{subsec.cocycles} Let $G$ be a locally compact and second countable (lcsc) group and $X$ a standard Borel space endowed with a Borel $G$-action $G \times X \to X$. Let $Q<G$ be a closed subgroup and suppose we have a measurable $G$-equivariant surjection $\mathfrak{p}i: X \to G/Q$. We shall refer to such a $G$-space $X$ as a fibre bundle over $G/Q$. A fruitful way to describe the $G$-action on such bundles $X$ is by using the notion of cocycles. This approach -- going back to the work of Mackey \cite{mackey49, mackey58} (see Varadarajan \cite[\S 5]{varadarajan.quantum}) on induction of unitary representations -- will be instrumental to our considerations. \subsubsection{Cocycles defined by actions and vice versa} Given a bundle over $G/Q$ with $G$-action, let $F$ be a copy of the fibre above $Q$, i.e.~ of the Borel set $\mathfrak{p}i^{-1}(Q)$ endowed with the $Q$-action. A $(G,Q)$-bundle trivialization of $X$ is a Borel isomorphism $\mathfrak{p}hi=(\mathfrak{p}hi_1,\mathfrak{p}hi_2):X \simeq G/Q \times F$ such that $\mathfrak{p}hi_1$ is $G$-equivariant and $\mathfrak{p}hi_2$ is equivariant with respect to a $Q$-valued cocycle $\mathfrak{a}lpha: G \times G/Q \to Q$, i.e.~ $\mathfrak{p}hi(gx)=(g\mathfrak{p}hi_1(x),\mathfrak{a}lpha(g,\mathfrak{p}hi_1(x))\mathfrak{p}hi_2(x))$. Notice that $F$ has a natural $Q$-action. Recall that a cocycle $\mathfrak{a}lpha:G \times G/Q \to Q$ is a map satisfying $\mathfrak{a}lpha(g_1g_2,f)=\mathfrak{a}lpha(g_1,g_2f) \mathfrak{a}lpha(g_2,f)$ for every $g_1,g_2 \in G$ and $f \in G/Q$ (this corresponds to what is called a strict cocycle in \cite{varadarajan.quantum}). By using the cocycle relation, one sees that any cocycle $\mathfrak{a}lpha: G \times G/Q \to Q$ endows the space $G/Q \times F$ with a $G$-action. We shall denote the space $G/Q \times F$ endowed with a $G$-action induced by a $Q$-valued cocycle $\mathfrak{a}lpha$ by $G/Q \times_\mathfrak{a}lpha F$. Therefore, a $(G,Q)$-bundle trivialization is a $G$-equivariant isomorphism between $X$ and $G/Q \times_\mathfrak{a}lpha F$ for some $Q$-valued cocycle $\mathfrak{a}lpha$. In the rest of this paper, the ambient space $X$ which we will work with will be, in particular, a homogeneous space of a lcsc group $G$. For $x \in X$, we denote by $G_x$ the stability group $\{g \in G : gx=x\}$ so that we have a $G$-equivariant identification $X \simeq G/G_x$. We will suppose that the stability group $G_{x_0}=:R$ of a base point $x_0 \in X$ is contained in $Q$ so that we have a continuous $G$-equivariant surjection $\mathfrak{p}i: G/R \simeq X \to G/Q$ turning the homogeneous space $X$ into a bundle over $G/Q$ with $G$-action. The choice of the base point $x_0$ identifies the fibre $\mathfrak{p}i^{-1}(Q)$ with the $Q$-homogeneous space $Q/R$. In this setting, any Borel section $s:G/Q \to G$ yields a trivialization of $X$ given by the Borel isomorphism \mathfrak{b}egin{equation}\label{eq.trivialization} \mathfrak{b}egin{aligned} X &\to G/Q \times Q/R\\ x &\mapsto (\mathfrak{p}i(x), s(\mathfrak{p}i(x))^{-1}x). \end{aligned} \end{equation} Here the Borel section $s$ is a section of the principal $Q$-bundle $G\mathfrak{r}ightarrow G/Q$, i.e.~ $s(gQ)Q=gQ$. Such a section $s$ induces a Borel cocycle $\mathfrak{a}lpha:G \times G/Q \to Q$ by setting \mathfrak{b}egin{equation}\label{eq.construct.cocycle} \mathfrak{a}lpha(g,hQ):=s(ghQ)^{-1}g s(hQ), \end{equation} which makes the trivialization \eqref{eq.trivialization} into a $(G,Q)$-bundle trivialization. For a $G$-homogeneous bundle $X$, we will say that a $(G,Q)$-bundle trivialization $X \simeq G/Q \times_\mathfrak{a}lpha Q/R$ is \textit{standard} if the morphism $\mathfrak{r}ho_\mathfrak{a}lpha: Q \to Q$ given by $\mathfrak{r}ho_\mathfrak{a}lpha(q)=\mathfrak{a}lpha(q,Q)$ is conjugate to the identity morphism $Q \to Q$. A trivialization given by a choice of section as above is standard. Conversely, a standard trivialization is induced by a choice of section $s:G/Q \to G$. In the sequel, we will assume further structure on the groups making up the space $X$. Namely, we will suppose that there exists a closed normal subgroup $R_0$ of $Q$ which is also a subgroup of $R$ so that writing $S:=Q/R_0$ and $\Lambda=R/R_0$, we have an identification of the fibre $Q/R$ with $S/\Lambda$. The reason for this is that starting from Section \mathfrak{r}ef{sec.meas.class}, the group $\Lambda$ will be a discrete subgroup of $S$ which will make the action on $Q/R \simeq S/\Lambda$ more tractable. Composing a cocycle $\tilde{\mathfrak{a}lpha}:G \times G/Q \to Q$ with the epimorphism $\mathfrak{p}i_1:Q \to S=Q/R_0$, we obtain an $S$-valued cocycle $\mathfrak{a}lpha: G \times G/Q \to S$. Since the action of $Q$ on the fibre $Q/R$ factors through $S$, the $S$-valued cocycle $\mathfrak{a}lpha=\mathfrak{p}i_1\circ\tilde{\mathfrak{a}lpha}$ is sufficient to reconstruct the bundle. From now on, we will mainly consider $S$-valued cocycles. \subsubsection{Equivalence of cocycles}\label{subsub.equiv.cocycles} Let $G'$ be a lcsc group. Two $G'$-valued cocycles $\mathfrak{a}lpha, \mathfrak{b}eta: G \times G/Q \to G'$ are said to be equivalent, denoted $\mathfrak{a}lpha \sim \mathfrak{b}eta$, if there exists a Borel map $\mathfrak{p}hi:G/Q \to G'$ such that for every $x \in G/Q$ and $g \in G$, we have \mathfrak{b}egin{equation}\label{eq.eq.cocycles} \mathfrak{a}lpha(g,x)=\mathfrak{p}hi(gx)^{-1} \mathfrak{b}eta(g,x) \mathfrak{p}hi(x). \end{equation} It is clear that for two $S$-valued cocycles $\mathfrak{a}lpha$ and $\mathfrak{b}eta$ over $G/Q$, if $\mathfrak{a}lpha \sim \mathfrak{b}eta$, then the associated $G$-spaces $G/Q \times_\mathfrak{a}lpha Q/R$ and $G/Q \times_\mathfrak{b}eta Q/R$ are isomorphic. Accordingly, $S$-valued cocycles obtained from different sections $s:G/Q \to G$ via \eqref{eq.construct.cocycle} are equivalent. In the sequel, we will be interested in actions of subgroups $H$ of $G$ on a $G$-homogeneous bundle $X$ and the associated subbundles. We will use similar terminology for cocycles restricted to $H$. Let $H$ be a closed subgroup of $G$ and $\mathcal{C} \subseteq G/Q$ an $H$-homogeneous closed subset of $G/Q$. Up to replacing $Q$ by a conjugate, suppose $\mathcal{C}=HQ$ so that $\mathcal{C} \simeq H/Q_H$ where $Q_H:=H \cap Q$. In that case $X_\mathcal{C}:=\mathfrak{p}i^{-1}(\mathcal{C}) \subseteq X$ is an $H$-invariant closed (not necessarily $H$-homogeneous) subset of $X$ giving rise to a bundle over $\mathcal{C}$ with $H$-action. \mathfrak{b}egin{example}\label{ex.ss} The above situation appears in (and is motivated by) the setting of the work of Sargent--Shapira \cite{sargent-shapira}: Let $X$ be the set of homothety-equivalence classes of rank-2 lattices in $\mathbb{R}^3$. The set $X$ has a natural lcsc topology and the group $G=\SL_3(\R)$ acts continuously and transitively on $X$. The connected component $R_0$ of the stabilizer $R$ of a point $x \in X$ consists of the solvable radical of a maximal parabolic group $Q$ in $G$ and we have a surjective map $S = Q/R_0 \to Q/R\simeq S/\Lambda\simeq \SL_2(\R)/\SL_2(\Z)$, where $R/R_0 \simeq \Lambda=\PGL_2(\Z)$. One can then consider the action of the subgroup $H=\SO(2,1)<G$ on $X$ which has a unique minimal invariant subset $\mathcal{C}$ in $G/Q$. The group $Q_H=H \cap Q$ corresponds to a Borel subgroup of $H$. \end{example} \subsubsection{Induced morphisms and decomposable subbundle actions}\label{subsub.bundle.dec} Recall the notion of a decomposable bundle in Definition \mathfrak{r}ef{def.decomposable}. We provide a criterion to ensure that an $H$-homogeneous bundle $X_\mathcal{C}$ is decomposable. \mathfrak{b}egin{proposition}\label{prop.decomposable} If the morphism $Q_H \mathfrak{h}ookrightarrow Q \twoheadrightarrow S $ extends to a morphism $H \to S$, then $X_\mathcal{C}$ is decomposable. \end{proposition} The following statement is a version of \cite[Proposition 4.2.16]{zimmer.book} and provides a useful characterization of a decomposable action in our setting. Let $\Co(H,Q_H,S)$ denote the set of Borel cocycles $H \times H/Q_H \to S$. Given $\mathfrak{a}lpha \in \Co(H,Q_H,S)$, the map $\mathfrak{r}ho_\mathfrak{a}lpha$ defined by $\mathfrak{r}ho_\mathfrak{a}lpha(p):=\mathfrak{a}lpha(p,Q_H)$ for $p \in Q_H$ defines a Borel (hence continuous) morphism from $Q_H \to S$. The proof is based on an important observation of Mackey which characterizes equivalence classes of Borel cocycles $\Co(H,P,G')$ by conjugacy classes of induced morphisms $P \to G'$ where conjugacy is understood by an element of $G'$ in the target. We have the following result from \cite[Theorem 5.27]{varadarajan.quantum} that we adapt to our setting here. \mathfrak{b}egin{lemma}\label{lemma.cocycle.bijection} The map \mathfrak{b}egin{equation*} \mathfrak{b}egin{aligned} \Co(H,P,G') &\to \Hom(P,G')\\ \mathfrak{a}lpha & \mapsto \mathfrak{r}ho_\mathfrak{a}lpha \end{aligned} \end{equation*} is a surjective map that descends to a bijection when we quotient $\Co(H,P,G')$ by equivalence of cocycles and $\Hom(P,G')$ by conjugation in $G'$. \end{lemma} \mathfrak{b}egin{proof} Fix a Borel section $s:H/P \to H$ with $s(P)=\id$. Given a morphism $\mathfrak{r}ho:P \to G'$, the map given by \mathfrak{b}egin{equation}\label{eq.morphism.to.cocycle} \mathfrak{a}lpha(h_1,hP)=\mathfrak{r}ho(s(h_1hP)^{-1}h_1s(hP)) \end{equation} is a cocycle whose restriction to $P \simeq P \times \{P\}$ recovers $\mathfrak{r}ho:P \to G'$. This shows that the map is surjective. Note that if two cocycles $H \times H/P \to G'$ are equivalent, then it is clear that the morphisms $P \to G'$ that they induce are conjugate by an element of $G'$. In particular, the map $\mathfrak{a}lpha \mapsto \mathfrak{r}ho_\mathfrak{a}lpha$ descends to a (surjective) map on the equivalence classes of cocycles. To show that this map is a bijection, let $\mathfrak{a}lpha$ and $\mathfrak{b}eta$ be two cocycles $H \times H/P \to G'$ and suppose that $\mathfrak{r}ho_\mathfrak{a}lpha(.)=l\mathfrak{r}ho_\mathfrak{b}eta(.)l^{-1}$ for some element $l \in G'$. By direct computation, we have \mathfrak{b}egin{equation}\label{eq.section.inout} \mathfrak{a}lpha(s(h_1hP),P)^{-1}\mathfrak{a}lpha(h_1,hP)\mathfrak{a}lpha(s(hP),P)=\mathfrak{r}ho_\mathfrak{a}lpha(s(h_1hP)^{-1}h_1s(hP))^{-1}. \end{equation} Writing the equivalent of \eqref{eq.section.inout} for $\mathfrak{b}eta$ (substituting $\mathfrak{r}ho_\mathfrak{b}eta$ for $\mathfrak{r}ho_\mathfrak{a}lpha$) and combining it with \eqref{eq.section.inout}, we get $$ \mathfrak{a}lpha(h_1,hP)=\mathfrak{a}lpha(s(h_1hP), P)l^{-1}\mathfrak{b}eta(s(h_1hP),P)^{-1}\mathfrak{b}eta(h_1,hP)\mathfrak{b}eta(s(hP), P)l\mathfrak{a}lpha(s(hP),P)^{-1}. $$ This shows that $\mathfrak{a}lpha \sim \mathfrak{b}eta$ via the fibre automorphisms given by the map $$hP \mapsto \mathfrak{b}eta(s(hP), P)l\mathfrak{a}lpha(s(hP), P)^{-1}$$ which proves the claim. \end{proof} We say that a cocycle $\mathfrak{a}lpha: H \times H/P \to S$ is of morphism-type with morphism $\mathfrak{r}ho$ if there exists a Borel morphism $\mathfrak{r}ho:H \to S$ such that $\mathfrak{a}lpha(h_1,hP)=\mathfrak{r}ho(h_1)$ for every $h_1,h \in H$. We can now give proof of the decomposability criterion. \mathfrak{b}egin{proof}[Proof of Proposition \mathfrak{r}ef{prop.decomposable}] Choose a Borel section $s: G/Q \to G$ with $s(Q)=\id$. Let $\tilde\mathfrak{a}lpha:G \times G/Q \to Q$ be the associated cocycle. Recall $\mathfrak{p}i_1$ is the quotient map $Q\to S=Q/R_0$. The associated morphism $\mathfrak{r}ho_{\tilde\mathfrak{a}lpha}:Q \to Q$ is then the identity map and hence the map $\mathfrak{p}i_1\circ \mathfrak{r}ho_{\tilde\mathfrak{a}lpha}:Q_H\to Q\to S$ extends to a morphism $\tau:H \to S$ by assumption. We thus get a morphism-type cocycle $\mathfrak{b}eta:H \times H/Q_H \to S$ by taking $\mathfrak{b}eta(h,c)=\tau(h)$. By Lemma \mathfrak{r}ef{lemma.cocycle.bijection}, the cocycle $\mathfrak{a}lpha:=\mathfrak{p}i_1\circ \tilde\mathfrak{a}lpha$ restricted to $H \times H/Q_H$ and $\mathfrak{b}eta$ are equivalent. Since equivalent cocycles induce isomorphic bundles, we are done. \end{proof} \subsection{Skew-product systems and stationary measures}\label{subsec.stat.measures} Let $H^\Z$ be the set of two-sided sequences of elements of $H$. We denote an element (bi-infinite word) of $H^\Z$ by $w=(b,a)$ where, by convention, we consider $b=(\ldots, b_{-2},b_{-1}) \in H^{-\N^\mathfrak{a}st}$ and $a=(a_0,a_1,\ldots) \in H^\N$. The sequence $b$, also denoted $w^-$ will be referred to as the past of $w$ and $a$, also denoted $w^+$, as the future of $w$. We denote by $T$ the shift map on $H^\Z$ taking one step forward to future, i.e.~ $T(b,a)=(ba_0,Ta)$, where $ba_0$ is the concatenation $(\ldots, b_{-1},a_0)$ and $Ta$ is the image of $a$ under the usual shift map, also denoted $T$, on $H^\N$. Accordingly, the inverse of $T$ is given by $T^{-1}(b,a)=(T^{-1}b,b_{-1}a)$, where $T^{-1}b=(\ldots, b_{-2})$. Let $\mu$ be a probability measure on $H$ and $\mathfrak{n}u$ a $\mu$-stationary probability measure on a locally compact and second countable $H$-space $Y$. It follows from the martingale convergence theorem that the limit as $n\to \infty$ of $b_{-1}\ldots b_{-n} \mathfrak{n}u$ exists for $\mu^{\Z}$-almost every $w$; it will be denoted by $\mathfrak{n}u_{w}$, or sometimes $\mathfrak{n}u_b$. These limit measures satisfy a key equivariance property which says that for $\mu^{-\N^\mathfrak{a}st}$-a.e. $b \in H^{-\N^\mathfrak{a}st}$, we have $\mathfrak{n}u_{T^{-1}b}=b_{-1}^{-1}\mathfrak{n}u_b$ or equivalently, for $\mu^{-\N^\mathfrak{a}st}$-a.e. $b \in H^{-\N^\mathfrak{a}st}$ and $\mu$-a.e. $a_0 \in H$, we have $\mathfrak{n}u_{ba_{0}}=a_0\mathfrak{n}u_b$. Stationary measures can also be seen as part of invariant measures on skew-product systems. Let $\mathfrak{h}at{Y}$ denote the product $H^\Z \times Y$ and $\mathfrak{h}at{T}$ the skew-shift given by $\mathfrak{h}at{T}((b,a),y)=(T(b,a), a_0 y)$. A basic fact (see e.g.~ \cite[Chapter 2]{bq.book}) is that given a probability measure $\mu$ on $H$, any $\mu$-stationary measure on $Y$ gives rise to a $\mathfrak{h}at{T}$-invariant measure on $\mathfrak{h}at{Y}$ that projects onto $\mu^\Z$ on the $H^\Z$ factor: indeed given a $\mu$-stationary measure $\mathfrak{n}u$, the measure $$\mathfrak{h}at{\mathfrak{n}u}=\int \delta_{w} \otimes \mathfrak{n}u_{w} d\mu^\Z(w)$$ defines a $\mathfrak{h}at{T}$-invariant measure. The measure $\mathfrak{h}at{\mathfrak{n}u}$ is $\mathfrak{h}at{T}$-ergodic if and only if $\mathfrak{n}u$ is $\mu$-ergodic \cite[Chapter 2.6]{bq.book}. \subsubsection{Stationary measures on the flag varieties}\label{subsub.stat.meas.flag} Let $H$ be a real semisimple linear Lie group and $P$ a parabolic subgroup. According to a fundamental result of Furstenberg \cite{furstenberg.boundary.theory} (generalized to the current form by Guivarc'h--Raugi \cite{guivarch-raugi} and Goldsheid--Margulis \cite{goldsheid-margulis}), for any Zariski-dense probability measure $\mu$ on $H$, there exists a unique $\mu$-stationary probability measure on $H/P$. We shall refer to this measure as the Furstenberg measure and denote it by $\overline{\mathfrak{n}u}_F$. Recall that a stationary probability measure $\mathfrak{n}u$ is said to be \textbf{$\mu$-proximal} if the limit measures $\mathfrak{n}u_b$ are Dirac measures $\mu^{-\N^\mathfrak{a}st}$-a.s. We will also say that the $H$-action on a space $Y$ is \textbf{$\mu$-proximal} if for every $\mu$-stationary and ergodic probability measure $\mathfrak{n}u$ on $Y$ is $\mu$-proximal. For a Zariski-dense probability $\mu$, the Furstenberg measure (and hence the $H$ action on $H/P$) is $\mu$-proximal. If $H$ acts $\mu$-proximally on a space $Y$, then every $\mu$-stationary probability measure $\mathfrak{n}u$ induces a boundary map $w=(b,a) \mapsto \xi(w)=\xi(b)$ defined $\mu^{\Z}$-a.s.\ satisfying $\mathfrak{n}u_w=\delta_{\xi(w)}$ and the equivariance property $b_{-1}\xi(T^{-1}w)=\xi(w)$. Conversely, a boundary map $\xi$ with the last equivariance property induces a $\mu$-proximal stationary probability measure. We will use the shorthand $P_{\mu}^{\erg}(Y)$ to denote the set of $\mu$-stationary and ergodic probability measures on $Y$. \subsubsection{Limit measures on the fibre}\label{subsub.fibre.measures} Let $Y_0$ and $Y=Y_0 \times F$ be $H$-spaces such that the projection $Y \to Y_0$ is $H$-equivariant. Let $\mathfrak{n}u$ be a $\mu$-stationary probability measure on $Y$ such that its projection $\overline{\mathfrak{n}u}$ on $Y_0$ (which is also automatically $\mu$-stationary) is $\mu$-proximal. Then, that for $\mu^{-\N^\mathfrak{a}st}$-almost every $b$, the limit measure $\mathfrak{n}u_{b}$ on $Y \simeq Y_0 \times F$ is of the form $\delta_{\xi(b)}\otimes \tilde{\mathfrak{n}u}_{b}$, where $\xi: H^{-\N^\mathfrak{a}st} \to Y_0$ is a measurable equivariant map (i.e.~ $\mu^{\Z}$-a.s. $\xi(ba_0)=a_0\xi(b)$) and $\tilde{\mathfrak{n}u}_{w}=\tilde{\mathfrak{n}u}_{b}$ is a probability measure on $F$. \subsection{Measure classification for product systems with equivariant projections}\label{subsec.meas.class.sec2} In the following result, we record, in a general setting, a description of stationary measures for actions on product spaces with equivariant projections on both factors. It is based on the Furstenberg decomposition of a stationary measure into its limit measures, i.e.~ $\mathfrak{n}u=\int \mathfrak{n}u_b d\mu^{-\N^\mathfrak{a}st}(b)$. \mathfrak{b}egin{proposition}\label{prop.decomposable.measure.class} Let $H$ be a lcsc group, $Y_0$ and $F$ be lcsc $H$-spaces. Consider the $H$-action on $Y=Y_0 \times F$ for which both projections $Y \to Y_0$ and $Y \to F$ are $H$-equivariant. Let $\mu$ be a probability measure on $H$ such that the $H$-action on $Y_0$ or $F$ is $\mu$-proximal. Then, we have $$ P_\mu^{\erg}(Y) \simeq P_\mu^{\erg}(Y_0)\times P_\mu^{\erg}(F). $$ More precisely, the map \mathfrak{b}egin{equation}\label{eq.bijection.map.decomposable} \mathfrak{n}u \mapsto (\overline{\mathfrak{n}u},\mathfrak{n}u^F) \end{equation} is a bijection where the latters are, respectively, pushforwards of $\mathfrak{n}u$ by the projections $Y \to Y_0$ and $Y \to F$. \end{proposition} The assumption of proximality induces a certain disjointness between two factors; it is clear that without such an assumption the conclusion fails (e.g.~ if $Y_0$ and $F$ have a common non-trivial factor). \mathfrak{b}egin{proof} Note that since the projections $Y \to Y_0$ and $Y \to F$ are both $H$-equivariant, the pushforward measures $\overline{\mathfrak{n}u}$ and $\mathfrak{n}u^F$ are both $\mu$-stationary. Moreover, it is clear that if $\mathfrak{n}u$ is ergodic, then so are $\overline{\mathfrak{n}u}$ and $\mathfrak{n}u^F$. Let us show that the map $P_\mu^{\erg}(Y) \mathfrak{n}i \mathfrak{n}u \to P_\mu^{\erg}(Y_0)\times P_\mu^{\erg}(F)$ given by $\mathfrak{n}u \mapsto (\overline{\mathfrak{n}u},\mathfrak{n}u^F)$ yields the desired bijection. Without loss of generality, let us suppose that the $H$-action on $Y_0$ is $\mu$-proximal and show that the above map is injective. Given $\mathfrak{n}u \in P_\mu^{\erg}(Y)$, since the projections to each factor commute with the $H$-action, we have $\mu^{-\N^\mathfrak{a}st}$-a.s.\ $\overline{\mathfrak{n}u_b}=(\overline{\mathfrak{n}u})_b$ and $(\mathfrak{n}u_b)^F=(\mathfrak{n}u^F)_b$. Moreover, since the $H$-action on $Y_0$ is $\mu$-proximal and $\overline{\mathfrak{n}u}$ is ergodic, there exists a boundary map $\xi:B \to Y_0$ such that $(\overline{\mathfrak{n}u})_b=\delta_{\xi(b)}$ for $\mu^{-\N^\mathfrak{a}st}$-a.s.~ $b \in H^{-\N^\mathfrak{a}st}$. Therefore, the probability measure $\mathfrak{n}u_b$ is given by $\delta_{\xi(b)} \otimes (\mathfrak{n}u^F)_b$ and hence by the Furstenberg decomposition, we can recover the measure $\mathfrak{n}u$ as $\mathfrak{n}u=\int \delta_{\xi(b)} \otimes (\mathfrak{n}u^F)_b d\mathfrak{b}eta(b)$. This shows that the map $\mathfrak{n}u \mapsto (\overline{\mathfrak{n}u},\mathfrak{n}u^F)$ is injective. Surjectivity does not use the $\mu$-proximality assumption and follows from the fact that both projections $\overline{\mathfrak{n}u}$ and $\mathfrak{n}u^F$ are $\mu$-stationary and ergodic. Indeed, one readily checks that $\int (\overline{\mathfrak{n}u})_b \otimes (\mathfrak{n}u^F)_b d\mathfrak{b}eta(b)$ is a $\mu$-stationary and ergodic probability measure on $Y$. \end{proof} \section{$\SL_2(\R)$-Zariski closure: measure classification}\label{sec.meas.class} We now begin the main part on classifying stationary measures on homogeneous bundles over flag varieties. Following the scheme exposed in the Introduction, in \S \mathfrak{r}ef{sub.base.and.cases}, we start by distinguishing Case 1 (Dirac base) and Case 2 (Furstenberg base) according to the classification in the base followed by a precise description of various possibilities that occur (see Figure \mathfrak{r}ef{figure.cases}) in Case 2. In the rest, we focus on Case 2. \S \mathfrak{r}ef{subsec.case.2.1} treats the trivial fibre case. In \S \mathfrak{r}ef{subsec.case.2.2.diag}, we treat the diagonal fibre action case and prove Theorem \mathfrak{r}ef{thm.measure.class.geod}. Finally, in \S \mathfrak{r}ef{subsec.remaining.case}, we prove Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} and provide an example for Case 2.3.b. \subsection{The setting, classification on the base and the cases}\label{sub.base.and.cases} Let us start by recalling the notations from the introduction. Let $G$ be a semisimple real Lie group with finite centre and $Q<G$ a parabolic subgroup. Let $R_0 \unlhd Q$ be a normal algebraic subgroup and $R<Q$ be a closed subgroup containing $R_0$ such that $S:=Q/R_0$ is semisimple with finite centre and without compact factors and $\Lambda:=R/R_0$ is a discrete subgroup of $S$. We denote by $X$ the quotient space $G/R$. As explained in the introduction, the space $X$ has a natural $G$-equivariant projection to $G/Q$ endowing it with a fibre-bundle structure over the flag variety $G/Q$ with fibres given by copies of $S/\Lambda$. A convenient way to find such subgroup $R_0 \unlhd Q$ as above is by considering the refined Langlands decomposition $Q=S_Q E_QA_QN_Q$ of $Q$ (see e.g.~ \cite[VII,7]{knapp.book}), where $S_Q$ is a semisimple subgroup of $Q$ without compact factors, $E_Q$ is a compact subgroup commuting to $S_Q$, $A_Q$ is a maximal $\R$-split diagonalizable subgroup commuting with $S_QE_Q$, and $N_Q$ is the unipotent radical of $Q$. One can then take the normal subgroup $R_0$ of $Q$ to be of the form $S'_QE_Q A_Q N_Q$ where $S'_Q$ is a simple factor of $S_Q$ or the trivial group. Let $\mu$ be a probability measure on $G$ with finite first moment, $\Gamma_\mu$ the closed semigroup generated by the support of $\mu$ and suppose that the Zariski-closure $\overline{\Gamma}_\mu^Z$ of $\Gamma_\mu$ is a copy of either $\PGL_2(\R)$ or $\SL_2(\R)$ in $G$. We will denote this Zariski closure by $H$. On our way to establishing a description of $\mu$-stationary probability measures $\mathfrak{n}u$ on $X$, we remark that a measure $\mathfrak{n}u$ on $X$ determines, and in turn is determined by, its projection on $G/Q$ via $\mathfrak{p}i: X \to G/Q$ and the fibre measures of this projection. Therefore, we proceed by first discussing the possible $\mu$-stationary measures on the base $G/Q$. \subsubsection{Stationary measures on the base}\label{subsub.stat.meas.base} Since the projection $X \to G/Q$ is $G$-equivariant, any $\mu$-stationary probability measure $\mathfrak{n}u$ on $X$ projects down to $\mu$-stationary probability measure $\overline{\mathfrak{n}u}$ on the base $G/Q$. The description of stationary measures on the base is handled by Guivarc'h--Raugi \cite{guivarch-raugi} and by Benoist--Quint \cite{BQ.compositio}. We have the following. \mathfrak{b}egin{lemma}\label{lemma.base.cases} There exists a bijection between $\mu$-stationary and ergodic probability measures on $G/Q$ and compact $H$-orbits on $G/Q$. \end{lemma} \mathfrak{b}egin{proof} It is clear that any compact $H$-orbit carries a $\mu$-stationary and ergodic probability measure. Conversely, let $\mathfrak{n}u$ be a $\mu$-stationary and ergodic probability measure. By Chacon--Ornstein ergodic theorem, there exists $x \in G/Q$ such that $\frac{1}{N} \sum_{k=1}^N \mu^{\mathfrak{a}st k} \mathfrak{a}st \delta_x \to \mathfrak{n}u$ as $N \to \infty$. So, in particular, $\mathfrak{n}u$ is supported in the compact $\overline{\Gamma_\mu x}$. Since $G/Q$ is a flag variety, the orbit $Hx$ is locally closed (see e.g.~\cite[Theorem 3.1.1]{zimmer.book}). Hence the compact $\overline{\Gamma_\mu x}$ is contained in $Hx$. Moreover, since $Hx$ is locally compact, up to conjugating $Q$, it is ($H$-equivariantly) homeomorphic to $H/Q_H$ where $Q_H=H \cap Q$. It now follows from \cite[Proposition 5.5]{BQ.compositio} that $Q_H$ is cocompact in $H$ and $\mathfrak{n}u$ is the unique $\mu$-stationary and ergodic probability measure supported in $H/Q_H \simeq Hx$. This concludes the proof. \end{proof} It follows from this result that there are two types of $\mu$-stationary and ergodic probability measures on the base $G/Q$. The first type, which we will refer to as \textbf{Case 1}, is Dirac measures. This happens if and only if $H$ is contained in a conjugate of $Q$. The second type (\textbf{Case 2}) is the Furstenberg measure supported in a copy of $H/P$ in $G/Q$ where $P$ is a parabolic subgroup of $H$. This happens if and only if $H$ intersects a conjugate of $Q$ in a parabolic subgroup. Note that both types of stationary measures can be simultaneously present in a $G$-homogeneous bundle $X$. Our study will primarily concern the analysis of stationary probability measures falling in Case 2. Indeed, as we now discuss, Case 1 is handled precisely by the seminal works of Benoist--Quint \cite{BQ1,BQ2} and Eskin--Lindenstrauss \cite{eskin-lindenstrauss.long}. \subsubsection{Case 1: Dirac base}\label{subsub.dirac.base}\label{subsub.dirac.base} Let us observe that the results of Benoist--Quint \cite{BQ1,BQ2} and Eskin--Lindenstrauss \cite{eskin-lindenstrauss.long} imply the following. \mathfrak{b}egin{proposition} Keep the setting above and let $\mathfrak{n}u$ be a $\mu$-stationary and ergodic probability on $X$ whose projection onto $G/Q$ is a Dirac measure. Then the fibre measure $\mathfrak{n}u^F$ of $\mathfrak{n}u$ on $(Q/R_0)/(R/R_0)\simeq S/\Lambda$ is homogeneous. \end{proposition} This result follows from a direct application of \cite[Theorem 1.3]{eskin-lindenstrauss.long} which extends the main measure classification results of Benoist--Quint \cite{BQ2} with regards to the moment assumption and the fact that the group $\Lambda$ is only required to be discrete. \mathfrak{b}egin{proof} By assumption, the projection $\overline{\mathfrak{n}u}$ of $\mathfrak{n}u$ is a Dirac $\delta_{gQ}$ on $G/Q$. Replacing if necessary $Q$ by a conjugate, we can suppose $H<Q$ and that $g=\id$. The fibre of the map $X \to G/Q$ above $\id Q$ identifies $Q$-equivariantly with $Q/R \simeq (Q/R_0)/(R/R_0)$. Identifying $\mu$ with its image in $HR_0/R_0<Q/R_0$, the measure $\mathfrak{n}u^F$ is $\mu$-stationary and ergodic. Therefore, it follows from \cite[Theorem 1.3]{eskin-lindenstrauss.long} that $\mathfrak{n}u^F$ is a homogeneous measure. \end{proof} \subsubsection{Case 2: Furstenberg base}\label{subsub.furstenberg.base} The rest of this section is devoted to the analysis of the remaining case, i.e.~ the description of a $\mu$-stationary and ergodic probability on $X$ whose projection to $G/Q$ is non-atomic (and which is consequently the Furstenberg measure on a copy of $H/P$ in $G/Q$, where $P$ is a parabolic subgroup of $H$, see Lemma \mathfrak{r}ef{lemma.base.cases}). In this case $H$ intersects a conjugate $gQg^{-1}$ of $Q$ in a parabolic subgroup. By conjugating $Q$ if necessary, we can and will suppose that $g=\id$, i.e.~ $P=Q_H:=H \cap Q$ and $\mathfrak{n}u$ lives in $\mathfrak{p}i^{-1}(H/P)$. As before, the analysis of stationary measure will vary depending on the relative position of $H$ with respect to the parabolic group $Q$ within the ambient group $G$. We will distinguish three cases that will be dealt with separately in the following subsections.\\ \mathfrak{n}oindent \textbullet ${}$ \textit{Case 2.1}: Trivial fibre action. This is a trivial case that occurs when the parabolic subgroup $Q_H^o$ of $H$ is contained in $R_0$.\\[4pt] \textbullet ${}$ \textit{Case 2.2}: Diagonal fibre action. This is the case when $Q_H^o \cap R_0$ is a proper non-trivial subgroup of $Q_H^o$. As we shall see, this positioning gives rise to a situation where the $S$-valued cocycle describing the fibre action has values in a diagonal subgroup of $S$.\\[4pt] \textbullet ${}$ \textit{Case 2.3}: This is the remaining case, i.e.~ the case where $Q_H^o \cap R_0$ is trivial. Interestingly, the analysis in this case depends on further properties $H$ with respect to $G$ that we will explain. Accordingly, our analysis will involve two subcases (\textit{2.3.a} and \textit{2.3.b}). In this paper, we will not be able to give the full description of stationary measures in the last of these two subcases. \subsection{Case 2.1: Trivial fibre action}\label{subsec.case.2.1} We express the description in this simple case in the following result. \mathfrak{b}egin{proposition}\label{prop.trivial.fibre.measure.class} Let the space $X$ and groups $G,Q,R_0,R,S\simeq Q/R_0,\Lambda\simeq R/R_0$ and $H$ be as defined before. Suppose that $Q_H^o$ is contained in $R_0$. Then, there exists a standard trivialization $X \simeq G/Q \times S/\Lambda$ such that any $\mu$-stationary and ergodic probability measure $\mathfrak{n}u$ on $X_\mathcal{C}$ can be written as $\overline{\mathfrak{n}u}_F \otimes \delta_{q\Lambda}$ a product of the Furstenberg measure $\overline{\mathfrak{n}u}_F$ with a Dirac measure $\delta_{q\Lambda}$ for some $q \in Q$. \end{proposition} \mathfrak{b}egin{proof} Start by noting that since $Q_H$ is a (Zariski) connected algebraic group, $Q_H^o<R_0$ implies that $Q_H<R_0$. Now fix any standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$, let $\mathfrak{b}eta$ be the associated cocycle. The hypothesis $Q_H<R_0$ then entails that the associated morphism $\mathfrak{r}ho_\mathfrak{b}eta:Q_H \to S$ has trivial image. In particular $\mathfrak{r}ho_\mathfrak{b}eta$ extends trivially to a morphism $H \to S$ and hence by Proposition \mathfrak{r}ef{prop.decomposable}, the $H$-action on $X_\mathcal{C}$ is decomposable. Therefore there exists a standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ for which the associated cocycle is morphism-type with trivial morphism. The result follows. \end{proof} Here are two examples where the trivial fibre action situation arises. \mathfrak{b}egin{example}\label{ex.case.2.1} 1. (Trivial example) Let $G=\SL_4(\R)$, $Q$ be the minimal parabolic subgroup preserving the standard full flag in $\R^4$. Let $H$ be the copy of $\SL_2(\R)$ on the top-left corner, i.e.~ acting on the plane generated by the standard basis elements $e_1$ and $e_2$. In this case, $R$ is necessarily equal to $R_0$ which is $Q$ itself. 2. Let $G=\SL_4(\R)$, $Q$ be the parabolic subgroup stabilizing the plane generated by the first two vectors $e_1,e_2$ of the standard basis of $\R^4$, $H$ be the reducible representation given by the sum of the standard representation of $\SL_2(\R)$ on the planes generated by the basis vectors $e_1,e_4$ and $e_2,e_3$; $$ Q= \left \{ \mathfrak{b}egin{pmatrix} \mathfrak{a}st & \mathfrak{a}st & \mathfrak{a}st & \mathfrak{a}st \\ \mathfrak{a}st & \mathfrak{a}st & \mathfrak{a}st & \mathfrak{a}st \\ 0 & 0 & \mathfrak{a}st & \mathfrak{a}st \\ 0 & 0 & \mathfrak{a}st & \mathfrak{a}st \\ \end{pmatrix} \mathfrak{r}ight \}, \mathfrak{q}uad \mathfrak{q}uad H = \left \{ \mathfrak{b}egin{pmatrix} a & 0 & 0 & b \\ 0 & a & b & 0 \\ 0 & c & d & 0 \\ c & 0 & 0 & d \\ \end{pmatrix} | \mathfrak{b}egin{pmatrix} a & b \\ c & d \end{pmatrix} \in \SL_2(\R) \mathfrak{r}ight \}. $$ We can take $R$ to be the group generated by $R_0$ which is the solvable radical of $Q$ and $\SL_2(\Z) \times \SL_2(\Z)$ acting in the standard way on the planes generated by $e_1,e_2$ and $e_3,e_4$. \end{example} \mathfrak{b}egin{remark} Case 1 and Case 2.1 work more generally and we only need the assumption that $H$ is a real semisimple linear Lie group. \end{remark} \subsection{Case 2.2: Diagonal fibre action}\label{subsec.case.2.2.diag} The main goal of this part is to prove Theorem \mathfrak{r}ef{thm.measure.class.geod}. We start by discussing an example to which this result applies. \mathfrak{b}egin{example}\label{ex.reducible} We start by recalling Example \mathfrak{r}ef{ex.ss}. Let $X$ be the space of $2$-lattices in $\R^3=V$ up to homotheties of $V$. The space $X$ with its natural topology admits a continuous transitive action of $G=\SL_3(\R)$. Let $y_0<V$ be a copy of $\R^2$ generated by $e_1,e_2$, where $e_i$'s denote the standard base elements of $V$ and $x_0$ be the class of $\Z^2$ in $y_0$, $R$ its stabilizer in $G$ and $Q$ the parabolic subgroup of $G$ stabilizing $y_0$. Note that the connected component $R_0$ of $R$ is the solvable radical of $Q$ and we have $R<Q$ so that the space $X$ is a $G$-homogeneous bundle over $G/Q$ with fibres $Q/R \simeq \PGL_2(\R)/\PGL_2(\Z)$. Let $\mathfrak{p}i:X \to G/Q$ denote the natural projection associating to a class of $2$-lattice the $2$-plane that it generates. Let $H$ be a copy of $\SL_2(\R)$ given by the classes of matrices of the form $ \mathfrak{b}egin{pmatrix} 1 & 0 & 0 \\ 0 & a & b \\ 0 & c & d \end{pmatrix}$, where $ \mathfrak{b}egin{pmatrix} a & b \\ c & d \end{pmatrix} \in \SL_2(\R)$. This configuration falls into Case 2.2: indeed, $Q_H=H \cap Q$ is a parabolic subgroup of $H$ and $Q_H^\circ \cap R_0$ is the unipotent radical of $Q_H^\circ$. One can also see in explicitly how the one-dimensional split subgroup of $S$, for which Theorem \mathfrak{r}ef{thm.measure.class.geod} proves invariance, appears: let $\mathcal{C}$ be the $H$-invariant circle of 2-planes given by $\langle e_1, t e_2+se_3 \mathfrak{r}angle$ for $t,s \in \R$ and $X_{\mathcal{C}}$ be the bundle over $\calC$ given by the closed subset of $X$ given by $2$-lattices contained in subspaces belonging to $\mathcal{C}$. We can choose an explicit Borel section $s:H/Q_H \to H$ as follows to obtain a standard trivialization of $X_\mathcal{C}$: given a vector space $y=\langle e_1, t e_2+se_3 \mathfrak{r}angle \in \mathcal{C} \simeq H/Q_H$, we can associate the class of the matrix $R_{\theta(y)}:=\mathfrak{b}egin{pmatrix} \cos \theta(y) & -\sin \theta(y) \\ \sin \theta(y) & \cos \theta(y) \end{pmatrix} $ with $\theta(y) \in [0,\mathfrak{p}i)$ chosen so that $R_{\theta(y)}$ seen in $H$ sends $y_0$ on $y$. The resulting trivialization writes as \mathfrak{b}egin{equation*} \mathfrak{b}egin{aligned} X_\mathcal{C} &\simeq H/Q_H \times F\\ x &\mapsto (\mathfrak{p}i(x), R_{\theta(\mathfrak{p}i(x))}^{-1}(x)). \end{aligned} \end{equation*} A straightforward calculation shows that the $S \simeq \PGL_2(\R)$-valued cocycle $H \times H/Q_H \to S$ given by this trivialization takes values in the full diagonal subgroup $D^{\mathfrak{p}m}<\PGL_2(\R)$ and coincides with the Iwasawa cocycle of $H$ up to a sign, which will be defined in the following section. It follows then from Theorem \mathfrak{r}ef{thm.measure.class.geod}, Remark \mathfrak{r}ef{rk.conversely.to.diagonal.thm} and uniqueness of $\mu$-stationary measure on $H/Q_H$ that there is a bijection between diagonal-flow (or an index-two extension of it) invariant probability measures on $\PGL_2(\R)/\PGL_2(\Z)$ and $\mu$-stationary probability measures on $X_\mathcal{C}$. The difference between diagonal invariance and the index-two extension is a minor one related to the sign group. This is discussed further below in \S \mathfrak{r}ef{subsub.iwasawa.sign}. Note finally that Case 1 also appears within this same example, namely the singleton corresponding to the two-plane generated by $\{e_2,e_3\}$ is $H$-invariant. \end{example} \mathfrak{b}igskip The rest of this Subsection \mathfrak{r}ef{subsec.case.2.2.diag} is devoted to the proof of Theorem \mathfrak{r}ef{thm.measure.class.geod}. \subsubsection{Iwasawa cocycle and representation theory}\label{subsub.iwasawa} Let $H$ denote either the group $\SL_2(\R)$ or $\PGL_2(\R)$. Let $K$ be a maximal compact subgroup of $H$ and $P$ be a minimal parabolic subgroup so that we have the decomposition $H=K^oP $. Let $D$ be the maximal connected diagonal subgroup of $P$, $N$ the unipotent radical of $P$ and $M=K \cap P$. Let $H/P$ be the flag variety of $H$. Given $h \in H$ and $\xi=kP \in H/P$, we denote by $\sigma(h,\xi)$ the unique element of $D$ such that \mathfrak{b}egin{equation}\label{eq.iwasawa.characterizing} hk \in K \sigma(h,\xi) N. \end{equation} This map $\sigma: H \times H/P \to D$ defines a continuous cocycle (see \cite[Lemma 8.2]{bq.book}), called the \textit{Iwasawa cocycle}. The morphism $\mathfrak{r}ho_\sigma$ associated with the Iwasawa cocycle is simply the projection $P \to D \simeq P/MN$. An alternative way, more in the spirit of Section \mathfrak{r}ef{sec.prelim}, to construct the Iwasawa cocycle is as follows. Consider $H$ as a fibre bundle over $H/P$ and let $s$ be a section $s:H/P \to H$ given by the Iwasawa decomposition, namely $s(kP)\in kM$ for $k \in K^\circ$. We then get a trivialization $H \simeq H/P \times P$ (see \eqref{eq.trivialization}) and an associated cocycle $\tilde{\sigma}:H \times H/P \to P$ (see \eqref{eq.construct.cocycle}). It is not hard to verify that the cocycle obtained by composing $\tilde{\sigma}$ with the projection $P \to P/MN \simeq D$ satisfies the characterizing property \eqref{eq.iwasawa.characterizing} of the Iwasawa cocycle. Regarding the section $s$, for $\PGL_2(\R)$ case, we can define it canonically to have values in $K^\circ$. For $\SL_2(\R)$ case, we need to make a choice in $kM$ so that $s(kP)$ is a Borel section. Even though the cocycle $\tilde{\sigma}$ depends on $s$, by \eqref{eq.construct.cocycle}, since the ambiguity $M$ is in the centre, we know that Iwasawa cocycle does not depend on the choice of the value of $s(kP)$ in $kM$. In the course of our proofs, sometimes it will be more convenient to switch to the additive notation for cocycles. Let $\mathfrak{d}$ be the Lie algebra of $D$. For a $D$-valued cocycle $\mathfrak{a}lpha$, we will denote by $\overline{\mathfrak{a}lpha}$, the $\mathfrak{d}$-valued cocycle obtained by composing $\mathfrak{a}lpha$ with the logarithm map $D \to \mathfrak{d}$. Given an algebraic irreducible representation $\mathfrak{r}ho: H \to \GL(V)$ of $H$ in a finite dimensional real vector space $V$, for every character $\chi$ of $D$, the associated weight space is $V^\chi=\{v \in V : \mathfrak{r}ho(a) v= \chi(a)v \; \; \text{for every} \; a \in D \}$. The set of characters $\chi$ for which $V^\chi \mathfrak{n}eq \{0\}$ is called the set of (restricted) weights of $(V,\mathfrak{r}ho)$ and denoted $\Sigma(\mathfrak{r}ho)$. For a character $\chi$ of $D$, we denote by $\overline{\chi}$ the corresponding additive character on $\mathfrak{d}$. The set $\Sigma(\mathfrak{r}ho)$ is endowed with an order: $\overline{\chi}_1 \mathfrak{g}eqslant \overline{\chi}_2$ if and only if $\overline{\chi}_1 - \overline{\chi}_2$ is a sum of positive roots of $H$ in $\mathfrak{d}$. $\Sigma(\mathfrak{r}ho)$ has a largest element $\chi$, called the highest weight of $\mathfrak{r}ho$. The corresponding eigenspace is the subspace $V^N$ of $N$-fixed vectors. Since $H$ is $\R$-split, this is a line in $V$. For an element $\eta=gP$ in the flag variety $H/P$, we denote by $V_\eta$ the line $gV^N$ in $V$ constructing a map $H/P \to \mathbb{P}(V)$. The following lemma will be important for our considerations; it will allow us to control the Iwasawa cocycle. \mathfrak{b}egin{lemma}\cite[Lemma 6.33]{bq.book}\label{lemma.iwasawa.norm} Let $(V,\mathfrak{r}ho)$ be an algebraic irreducible representation of $H$ with the highest weight $\chi$. Then, there exists a $K$-invariant Euclidean norm $\|.\|$ on $V$ such that for every element $a \in D$, $\mathfrak{r}ho(a)$ is a symmetric endomorphism of $V$. Moreover, for every $\eta \in H/P$, non-zero $v \in V_\eta$ and $h \in H$, we have $$ \overline{\chi}(\overline{\sigma}(h,\eta))=\log \frac{\|\mathfrak{r}ho(h)v\|}{\|v\|}. $$ \end{lemma} \subsubsection{Iwasawa cocycle, sign group and standard trivialization}\label{subsub.iwasawa.sign} The goal of this part is to obtain a lemma (Lemma \mathfrak{r}ef{lemma.its.iwasawa} below) which, for a standard trivialization, expresses the action of $H$ on the fibres of the subbundle $X_{H/Q_H}$ of $X \to G/Q$ with the Iwasawa cocycle of the group $H$, up to a sign. Recall that in Case 2.2, $R_0 \cap Q_H^\circ$ is a non-trivial proper subgroup of $Q_H^\circ$. Since $R_0$ is normal in $Q$, $R_0 \cap Q_H^\circ$ is also normal in $Q_H^\circ$. It follows that this intersection is the unipotent radical of $Q_H^\circ$. Therefore the projection of $Q_H^\circ$ to $S$ given by $Q_H^\circ/(Q_H^\circ\cap R_0)$ is a connected split torus. We will denote by $D$ the image of $Q_H^\circ$ in $S$ obtained by projection. Let $D^{\mathfrak{p}m}$ be the algebraic $\R$-split torus containing $D$. Then $Q_H/(Q_H\cap R_0)$, the image of $Q_H$ in $S$, is contained in $D^{\mathfrak{p}m}$. The group $D^{\mathfrak{p}m}\simeq \R^*$ is isomorphic to $D\times(\Z/2\Z)\simeq \R_{>0}\times \{\mathfrak{p}m 1\}$ in $S$. In order to treat the sign problem of the cocycle in $D^\mathfrak{p}m$, we need to go to the two-fold cover space $K$ of $H/Q_H$ to recover the information of the sign. Here we need to distinguish two cases in a similar way for both $H \simeq \SL_2(\R)$ or $\PGL_2(\R)$. For $H \simeq \SL_2(\R)$ case: Let $V=\R^2$. A convex cone in $V$ is called proper if it does not contain a line. From Iwasawa cocycle, or just from the action of $\SL_2(\R)$ on $\mathbb{S}^1\subset V$, we have a group action of $\SL_2(\R)$ on $K\simeq \mathbb{S}^1$. Guivarc'h and Le Page \cite[Proposition 2.14]{GLP} proved that if $\Gamma_\mu$ preserves a closed proper convex cone in $V$ then there exist two $\mu$-stationary and ergodic measures $\mathfrak{n}u_1$ and $\mathfrak{n}u_2$ on the circle $K$. The supports of these two measures are just the inverses of each other, and we denote them by $\Lambda_1$ and $-\Lambda_1$, respectively. Otherwise, there exists a unique $\mu$-stationary measure on $K$. We now distinguish two cases depending on the action of $\Gamma_\mu$ on $K$. \mathfrak{b}egin{itemize} \item Case 2.2.a: $\Gamma_\mu$ preserves a closed proper convex cone in $V$. In this case we take a section $s:K/M\to K$ such that $s$ takes values in a half circle containing $\Lambda_1$. \item Case 2.2.b: Otherwise. We just take a section $s:K/M\to K$. There is no better choice in this case. \end{itemize} For $H\simeq \PGL_2(\R)$ case: The maximal compact subgroup $K$ has two connected components and each component is isomorphic to $H/Q_H$. In this case, we take the section $s:K/M\to K$ in the connected component of $K$. \mathfrak{b}egin{itemize} \item Case 2.2.a: If $\Gamma_\mu$ is inside the connected component $\PGL_2(\R)^\circ$. \item Case 2.2.b: Otherwise. In this case, we have a unique $\mu$-stationary measure on $K$, which has weight $1/2$ on each connected component. \end{itemize} We mention that unlike other main cases (those appearing in Figure \mathfrak{r}ef{figure.cases}), Cases 2.2.a or b depend on $\Gamma_\mu$ rather than the Zariski closure $H$ itself. For $H$ equal to either $\SL_2(\R)$ or $\PGL_2(\R)$, from now on we distinguish Case 2.2.a and Case 2.2.b, and choose a Borel section $s:H/Q_H\simeq K/M\to K<H$ as specified above. We define a sign function on $K$ by \[\mathrm{sg}(k):=k^{-1}s(kM)\in M\simeq \Z/2\Z. \] We define a sign cocycle with respect to the section $s$ for $g\in H$ and $\eta\in K/M$ by \[\mathrm{sg}(g,\eta):=\mathrm{sg}(k)\mathrm{sg}(k_g)=k^{-1}s(kM)k_g^{-1}s(k_gM), \] where $k$ is a preimage of $\eta$ in $K$ and $k_g$ is the $K$-part of $gk\in k_g\sigma(g,k)N$ in the Iwasawa decomposition. The value of $\mathrm{sg}$ does not depend on the choice of preimage $k$. In Case 2.2.b, with this sign function, we can recover the sign in $D^\mathfrak{p}m$ of the cocycle $\mathfrak{a}lpha$. Recall the quotient map $Q_H$ to $S$, whose image is $Q_H/(Q_H\cap R_0)<D^{\mathfrak{p}m}$. If $Q_H/(Q_H\cap R_0)=D$, then there is no ambiguity about the sign. In the following, in order to simplify the notation, we suppose that we are in the case where $Q_H/(Q_H\cap R_0)=D^\mathfrak{p}m$. The proof of the case $Q_H/(Q_H\cap R_0)=D$ is simpler; the sign cocycle disappears, or equivalently, it is constant with value identity. \mathfrak{b}egin{lemma}\label{lem:sign}\label{lemma.its.iwasawa} Under the above choice of the section $s$, for $g\in H$ and $\eta\in H/Q_H$, as an element in $D^\mathfrak{p}m$, we have \[\mathfrak{a}lpha(g,\eta)=(\sigma(g,\eta),\mathrm{sg}(g,\eta)). \] In particular, for Case 2.2.a, when $\eta$ is in the support of the Furstenberg measure and $g\in\Gamma_\mu$, the cocycle $\mathfrak{a}lpha$ coincides with the Iwasawa cocycle. \end{lemma} \mathfrak{b}egin{proof} By definition of the Borel section $s$ and cocycle $\mathfrak{a}lpha$, \[\mathfrak{a}lpha(g,\eta)=s(g\eta)^{-1}gs(\eta)R_0\in S=Q/R_0. \] The Iwasawa cocycle is defined by \[\sigma(g,\eta)=k_g^{-1}gkN. \] Recall that $k$ is a preimage of $\eta$ in $K$ and $k_g$ is the $K$-part of $gk\in k_g\sigma(g,k)N$ in the Iwasawa decomposition. The difference of the sign comes from the product of the differences of the signs of $k,s(\eta)$ and $k_g,s(g\eta)$. By definition of the sign cocycle, we obtain the formula for $\mathfrak{a}lpha(g,\eta)$. Regarding the second statement, in the case of $H \simeq \PGL_2(\R)$, it is a consequence of positive determinant. For $H \simeq \SL_2(\R)$, since the action of $\Gamma_\mu$ preserves $\Lambda_1$ inside $K$, if we take $k$ in $\Lambda_1$, then $k_g$ is still in $\Lambda_1$ for $g\in \Gamma_\mu$. In this case we obtain that the sign cocycle $\mathrm{sg}$ is identically equal to $\id$ for the $\Gamma_\mu$-action, whence the claim. \end{proof} \mathfrak{b}egin{comment} \mathfrak{b}egin{lemma} Let $X \simeq G/Q \times Q/R$ be a standard trivialization and $\mathfrak{a}lpha:H \times H/Q_H \to S$ the restriction of the associated $S$-valued cocycle. Then, the cocycle $\mathfrak{a}lpha$ is equivalent to a cocycle $\mathfrak{a}lpha':H\times H/Q_H\to D^{\mathfrak{p}m}$, whose projection to $D$ is the Iwasawa cocycle of $H$ with respect to $Q_H$. \end{lemma} The claim in Remark \mathfrak{r}ef{} \label{rk.when.pgl2.start.of.proof} In the sequel, in order to avoid burdening the proof of Theorem \mathfrak{r}ef{thm.measure.class.geod} with new notations, we will suppose that $\Gamma_\mu<H^\circ$. This is pertinent only when $H=\PGL_2(\R)$. See Remark \mathfrak{r}ef{rk.when.pgl2.end.of.proof} for an indication of how one can restrict to this case. \mathfrak{b}egin{proof} Recall that by definition of a standard trivialization, the morphism $Q \to Q$ associated to the cocycle $\tilde{\mathfrak{a}lpha}: G \times G/Q \to Q$ inducing $\mathfrak{a}lpha: G \times G/Q \to S$ (by composing with the projection $Q \to Q/R_0 \simeq S$) is conjugate to the identity $Q \to Q$. So the morphism $\mathfrak{r}ho_\mathfrak{a}lpha:Q_H \to S$ is conjugate to the projection, where the projection is given by $Q_H\to D^{\mathfrak{p}m}=Q_H/(Q_H\cap R_0)<S$. The morphism $\mathfrak{r}ho_\sigma$ induced by the Iwasawa cocycle $\sigma$ is the projection $Q_H \to D$. \mathfrak{r}ed{Due to $D^{\mathfrak{p}m}=D\times (\Z/2\Z)$ and the sign part commutes with $D$, by the same proof as Lemma \mathfrak{r}ef{lemma.cocycle.bijection}, we can find another cocycle $\mathfrak{a}lpha'$ equivalent to $\mathfrak{a}lpha$, with values in $D^{\mathfrak{p}m}$ and the projection of $\mathfrak{a}lpha'$ to $D$ is exactly the Iwasawa cocycle $\sigma$.} \end{proof} \end{comment} It follows from this lemma that our choice of the section $s:G/Q \to G$ implies that the cocycle $\mathfrak{a}lpha$ for the associated standard trivialization, projected on $D$, is equal to the Iwasawa cocycle $H \times H/Q_H \to D$. In the rest of this part (Case 2.2), we will work with this choice of coordinates on $X$ (i.e.~ trivialization induced by the section $s$). We will identify the space $Q/R$ with the quotient $S/\Lambda$ where $S\simeq Q/R_0$ and $\Lambda$ is the lattice $R/R_0$. To alleviate the notation, sometimes we will write $F=S/\Lambda$. \subsubsection{Limit measures on the fibre} Let $\mu$ be a Zariski-dense probability measure on $H$ and $\mathfrak{n}u$ be a $\mu$-stationary probability measure on $H/Q_H \times_\mathfrak{a}lpha F$. Recall from \S \mathfrak{r}ef{subsub.stat.meas.flag} that $\mu$ admits a unique stationary probability measure $\overline{\mathfrak{n}u}_F$ on $H/Q_H$ which is also $\mu$-proximal (the Furstenberg measure). It follows (see \S \mathfrak{r}ef{subsub.fibre.measures}) that $\mu^{-\N^\mathfrak{a}st}$-almost every $w^-$, the measure $\mathfrak{n}u_{w^-}$ on $H/Q_H \times_\mathfrak{a}lpha F$ is of the form $\delta_{\xi(w^-)}\otimes \tilde{\mathfrak{n}u}_{w^-}$, where $\xi: H^{-\N^\mathfrak{a}st} \to H/Q_H$ is a measurable equivariant map (i.e.~ $\xi(ba_0)=a_0\xi(b)$ for $\mu^\Z$-a.e.~ $w$) and $\tilde{\mathfrak{n}u}_{w^-}=\tilde{\mathfrak{n}u}_{b}$ is a probability measure on $F$. In view of the equivariance property of $\mathfrak{n}u_{w^-}$ and the fact that the action on the $F$-coordinate is given by the cocycle $\mathfrak{a}lpha$ over $H/Q_H$, the measure $\tilde{\mathfrak{n}u}_{b}$ satisfies the following equivariance formula for $\mu^{-\N^\mathfrak{a}st}$-a.e.~ $b \in H^{-\N^\mathfrak{a}st}$ and $\mu$-a.e.~ $a_0 \in H$, \mathfrak{b}egin{equation}\label{eq.equiv.cocycle} \tilde{\mathfrak{n}u}_{ba_0}=\mathfrak{a}lpha(a_0,\xi(b))\tilde{\mathfrak{n}u}_b. \end{equation} In the sequel, to simplify the notation, we also use the notation $\mathfrak{n}u_{w^-}$ (or $\mathfrak{n}u_w$) to denote the fibre measure $\tilde{\mathfrak{n}u}_{w^-}$. This should not cause confusion. We start with a first claim which will allow us to focus attention on a single generic fibre measure $\mathfrak{n}u_{w^-}$. \mathfrak{b}igskip \mathfrak{n}oindent \textbf{Claim 0:} To prove Theorem \mathfrak{r}ef{thm.measure.class.geod}, it suffices to show that for $\mu^{-\N^\mathfrak{a}st}$-a.e. $w^-$, the measure $\mathfrak{n}u_{w^-}$ on $S/\Lambda$ is $D$-invariant. \mathfrak{b}igskip \textit{Proof of Claim 0:} If we are in Case 2.2.a, by Lemma \mathfrak{r}ef{lemma.its.iwasawa}, the cocycle $\mathfrak{a}lpha$ actually takes values in $D$. From the equivariance formula \eqref{eq.equiv.cocycle} for $\mathfrak{n}u_{w^-}$, it follows that the map $w^- \mapsto \mathfrak{n}u_{w^-}$ is invariant under the inverse of the shift $T$ and hence is almost surely constant, by ergodicity of the map $T^{-1}$. For Case 2.2.b, we need to consider an extension by $\Z/2\Z=\{\mathfrak{p}m 1 \} \simeq M$. We define $T^\mathrm{sg}$ on $H^{\Z}\times \Z/2\Z$ by \[T^\mathrm{sg}(w,j)=(Tw, \mathrm{sg}(w_0,\xi(w^-))j), \] where $w\in H^\Z$ and $j\in \Z/2\Z$. For $\mu^\Z$-a.e. $w\in H^\Z$, we define \[\mathfrak{n}u_{w,1}=\mathfrak{n}u_{w^-},\ \mathfrak{n}u_{w,-1}=(-1)_*\mathfrak{n}u_{w^-}, \] where $(-1)_*$ is understood as the action of $-id\in D^\mathfrak{p}m$. Then the formula \eqref{eq.equiv.cocycle} and Lemma \mathfrak{r}ef{lem:sign} imply \[ {\mathfrak{n}u}_{T^\mathrm{sg}(w,j)}=\sigma(w_0,\xi(w^-)){\mathfrak{n}u}_{w,j}. \] Now since the Iwasawa cocycle $\sigma$ takes values in $D$ and $\mathfrak{n}u_{\omega,j}$'s are $D$-invariant by running the same argument as in Case 2.2.a, we see that it is sufficient to prove that the measure $\mathfrak{b}eta^\mathrm{sg}:=\mu^\Z\otimes ((\delta_1+\delta_{-1})/2)$ is $T^\mathrm{sg}$-ergodic. We now proceed to prove this. We consider the $\mu^\Z$-a.e.~ defined map $p$ from $H^\Z\times \Z/2\Z$ to $ H^\Z\times K$, by letting \[ p(w,j)=(w,k_{w^-} ),\text{ where } \mathrm{sg}(k_{w^-})=j, k_{w^-}Q_H=\xi(w^-). \] Let $\tilde T^\mathrm{sg}(w,k)=(Tw, w_0k)$. The pushforward of the measure $\mathfrak{b}eta^\mathrm{sg}$ yields the measure \[\tilde\mathfrak{b}eta^\mathrm{sg}:=\int_{H^\Z} \delta_w\otimes ((\delta_{k_{w^-}}+\delta_{-k_{w^-}})/2)\ d\mu^\Z(w) \] on $H^\Z\times K$. Then $p$ is a semiconjugacy from $(H^\Z\times\Z/2\Z,T^\mathrm{sg},\mathfrak{b}eta^\mathrm{sg})$ to $(H^\Z\times K,\tilde T^\mathrm{sg},\tilde \mathfrak{b}eta^\mathrm{sg})$. The fiber measure $(\delta_{k_{w^-}}+\delta_{-k_{w^-}})/2 $ is actually the measure $(\mathfrak{n}u_K)_w$ for the unique $\mu$-stationary measure $\mathfrak{n}u_K$ on $K$ and $(\mathfrak{n}u_K)_w$ is the limit of $b_{-1}\cdots b_{-n}\mathfrak{n}u_K$ for $\mu^Z$-a.e.~ $w$. (This measure $(\mathfrak{n}u_K)_w$ is a lift of the Dirac mass $\delta_{\xi(w^-)}$ on $H/Q_H$. Since $\mathfrak{n}u_K$ is unique, we can verify that the limiting measure has equal mass on two preimages). By \cite[Section 2.6]{bq.book}, since $\mathfrak{n}u_K$ is $\mu$-ergodic, we know that $\tilde\mathfrak{b}eta^\mathrm{sg}$ is $\tilde T^\mathrm{sg}$ ergodic. Then from the semiconjugacy $p$, we obtain that $\mathfrak{b}eta^\mathrm{sg}$ is $T^\mathrm{sg}$ ergodic. The proof is complete. \mathfrak{q}ed \mathfrak{b}egin{remark}[$D^{\mathfrak{p}m}$-invariance in Case 2.2.b] In Case 2.2.b, the argument above implies that $\mathfrak{n}u_{w,1}=\mathfrak{n}u_{w,-1}$ for $\mu^\Z$-a.e.~ $w$. So the fiber measure $\mathfrak{n}u^F$ is indeed $D^\mathfrak{p}m$-invariant. We will also see later in the equidistribution part that the limiting measure will be $D^\mathfrak{p}m$-invariant. \end{remark} \subsubsection{Dynamically defined norms} To obtain the required $D$-invariance for a typical limit measure $\mathfrak{n}u_{w^-}$ on the fibre, using the equivariance formula \eqref{eq.equiv.cocycle}, we will be passing to a limit of cocycle differences of type $\mathfrak{a}lpha(a_m'\ldots a_0', \xi(b))\mathfrak{a}lpha(a_n \ldots a_0, \xi(b))^{-1}$ for various sequences $b$ and $a$ as well as carefully chosen times $m,n \in \N$. The choice of times and sequences will be made so that the sequences land in some nice compact subset of the shift space and, simultaneously, the cocycle differences are controlled. An important tool for this purpose will be the dynamically defined norms given by the next result. We fix an irreducible algebraic representation $V$ of $H$, where $V$ is a finite-dimensional real vector space. Endow it with a $K$-invariant Euclidean structure and let $\|\cdot\|$ be the standard Euclidean norm on $V$. Here and below, we will also use the shorthand $a^n$ to denote the finite product $a_{n-1} \ldots a_0$ of the corresponding sequence $(a_0,\ldots, a_{n-1}) \in H^n$. We have \mathfrak{b}egin{proposition}\label{prop.dynnorm} \cite[Proposition 2.3]{eskin-lindenstraus.short} There exists a measurable map $w \mapsto \|.\|_w$ from $H^\Z$ into the space of Euclidean norms on $V$ and a $T$-invariant full measure subset $\Psi$ of $H^{\Z}$ such that for every $w=(b,a) \in \Psi$ and $n\in\N$, letting \[\lambda_1(w,n):=\log\frac{\|a^nv_b \|_{T^n w}}{\|v_b \|_w}, \] there exists $\kappa>1$ such that \[\lambda_1(w,n)\in [1/\kappa,\kappa]n. \] In particular, due to cocycle property, for $w\in\Psi$ and $m>n$ in $\N$, \mathfrak{b}egin{equation}\label{eq.lip.dynnorm} \lambda_1(w,m)-\lambda_1(w,n)\in[1/\kappa,\kappa](m-n) . \end{equation} \end{proposition} We note at this point that the finite first moment assumption in Theorem \mathfrak{r}ef{thm.measure.class.geod} is required in the proof of the previous proposition in \cite{eskin-lindenstraus.short}. This norm $\|.\|_w$ is called \textit{dynamical defined norm}. It is chosen with respect to the dynamics such that Proposition \mathfrak{r}ef{prop.dynnorm} holds. Due to measurability of $w \mapsto \|.\|_w$, we can always compare the dynamically defined norms and the original norm on a large measure subset of $H^\Z$. \mathfrak{b}egin{lemma}\cite[Lemma 2.7]{eskin-lindenstraus.short} \label{lemma.comparison.dyn.norm} For every $\delta>0$, there exists a compact subset $K(\delta)$ of $\Psi$ with $\mu^{\Z}(K(\delta))>1-\delta/10$ and a constant $C(\delta)>0$ such that for $v\in V$ and $w\in K(\delta)$ \[ 1/C(\delta)\leqslant \frac{\|v\|_w}{\|v\|}\leqslant C(\delta). \] \end{lemma} We denote by $\chi$ the highest weight of the representation from Lemma \mathfrak{r}ef{lemma.iwasawa.norm}. Combining Lemmas \mathfrak{r}ef{lemma.iwasawa.norm} and \mathfrak{r}ef{lemma.comparison.dyn.norm}, and Proposition \mathfrak{r}ef{prop.dynnorm}, we deduce the following \mathfrak{b}egin{corollary}\label{corol.alpha.close.to.dynamical.cocycle} For every $\delta>0$, there exists a compact subset $K(\delta)$ of $H^\Z$ with $\mu^{\Z}(K(\delta))>1-\delta/10$ and a constant $C(\delta)>0$ such that for every $w \in \Psi$ and $n \in \N$ such that $w$ and $T^n w$ are both in $ K(\delta)$, we have \mathfrak{b}egin{equation}\label{eq.comparison.dyn.norm} |\overline{\chi}(\overline{\mathfrak{a}lpha}(a_{n-1}\ldots a_0,\xi(b)))-\lambda_1(w,n)|\leqslant C(\delta). \end{equation} \end{corollary} \mathfrak{b}egin{proof} Recall from \S \mathfrak{r}ef{subsub.stat.meas.flag} that given a Zariski-dense probability measure $\mu$ on $H$, we have a map $\xi: H^\Z \to H/P$ defined for $\mu^\Z$-a.e.~ $w=(b,a)$ satisfying $(\overline{\mathfrak{n}u}_F)_\omega=\delta_{\xi(w)}$ and the equivariance property $b_{-1}\xi(T^{-1}w)=\xi(w)$. Recall also (see \S \mathfrak{r}ef{subsub.iwasawa}) that there exists an $H$-equivariant map $H/P \to \P(V)$ given by $hP \mapsto hV^N$ where $N$ is the unipotent radical of $P$. The image of $\overline{\mathfrak{n}u}_F$ under this map is the unique $\mu$-stationary and proximal measure on $\P(V)$. It follows that the line $\R v_w$ is the image of $\xi(w)$ under the map $hP \mapsto hV^N$. Therefore, Lemma \mathfrak{r}ef{lemma.iwasawa.norm} implies that we have $\overline{\chi}(\overline{\mathfrak{a}lpha}(h,\xi(w)))=\log \frac{\|h v_w\|}{\|v_w\|}$ for $\mu^\Z$-a.e.~ $w \in H^\Z$. Given $\delta>0$, let $K(\delta)$ and $C(\delta)>1$ be as given by Lemma \mathfrak{r}ef{lemma.comparison.dyn.norm}, $C(\delta)$ increased if necessary to satisfy $2 \log C(\delta) \leqslant C(\delta)$. Then, if $w$ and $T^n w $ belong to $K(\delta)$, since $a^n v_w=v_{T^nw}$, by Lemma \mathfrak{r}ef{lemma.comparison.dyn.norm}, both $\frac{\|a^n v_w\|_{T^nw}}{\|a^nv_w\|}$ and $\frac{\|v_w\|_w}{\|v_w\|}$ belong to $[1/C(\delta), C(\delta)]$. The corollary follows. \end{proof} \subsubsection{Divergence estimates} We also need the following lemma which essentially follows from Oseledets' theorem and Lemma \mathfrak{r}ef{lemma.comparison.dyn.norm}. \mathfrak{b}egin{lemma}\label{lemma.div.est} \cite[Lemma 3.5]{eskin-lindenstraus.short} For every $\delta>0$ and $t_0 \in \N$, there exists a compact subset $K'(\delta,t_0)=K$ of $H^\Z$ with $\mu^\Z(K)>1-\delta/10$ and a constant $C=C(\delta,t_0)>0$ with the following property: for every $w \in K$, $w' \in W^-_1(w) \cap K$ and $t>0$ such that $T^t w \in K$ and $T^tw' \in T^{[-t_0,t_0]}K$, we have $$ |\lambda_1(w,t)-\lambda_1(w',t)| \leqslant C.$$ \end{lemma} Here $W^-_1(w)$ is the local stable leaf of $w$ in the shift space $H^\Z$, i.e.~ $W^-_1(w)=\{w' \in H^\Z : w'_k=w_k, \; \forall k \mathfrak{g}eqslant 0\}$. \subsubsection{Non-degeneracy of the stationary measure on projective space} \mathfrak{b}egin{theorem}\cite[Theorem 3.1]{bougerol.lacroix}\label{thm.random.matrix.product} Let $\mu$ be a Zariski-dense probability measure on a linear semisimple $\R$-split group $H$ and let $V$ be an irreducible algebraic representation of $H$. Then, for $\mu^{-\N^\mathfrak{a}st}$-a.e.~ $b=(b_{-1},\ldots)$, any limit point $\mathfrak{h}at{\mathfrak{p}i}_b$ of the sequence $\frac{b_{-1}\ldots b_{-n}}{\| b_{-1}\ldots b_{-n}\|}$ in $\Endo(V)$ has rank one and the same image. Moreover, for any hyperplane $W<V$, the set of $b \in H^{-\N^\mathfrak{a}st}$ such that $Im(\mathfrak{h}at{\mathfrak{p}i}_b) \in W$ has zero measure. \end{theorem} The image of any such limit point will be denoted $\R v_b$, i.e.~ $v_b \in V$ denotes a choice of a non-zero unit vector (for the norm $\|.\|$) in the image line. We record the following statement which follows from Theorem \mathfrak{r}ef{thm.random.matrix.product}. \mathfrak{b}egin{lemma}\label{lemma.random.lin.form} For $\mu^\N$-a.e.~ $a \in H^\N$, there exists a linear form $\varphi_a$ of unit norm on $V$. For every such $a$ and for every $\delta>0$, there exist $\epsilon>0$ and a compact subset $K_a(\delta)$ of $H^{-\N^\mathfrak{a}st}$ with $\mu^{-\N^\mathfrak{a}st}(K_a(\delta))>1-\delta/10$ with the property that if $b, b' \in K_a(\delta)$, we have $|\varphi_a(v_b)|>\epsilon$ and $$ \lim_{n \to \infty} \frac{\|a_{n}\ldots a_0 v_b\|}{\|a_{n}\ldots a_0 v_{b'}\|} =\frac{|\varphi_a(v_b)|}{|\varphi_a(v_{b'})|}. $$ Moreover, for any linear form $\varphi$ on $V$, the set of $a\in H^\N$ such that $\varphi_a\in \R \varphi$ has zero measure. \end{lemma} \mathfrak{b}egin{proof} Applying Theorem \mathfrak{r}ef{thm.random.matrix.product} to the sequence of transposes of $a_i$'s, we get that for $\mu^\N$-a.e.~ $a=(a_0,a_1,\ldots)$, any limit point of the sequence $\frac{a_n \ldots a_0}{\|a_n \ldots a_0\|}=(\frac{a_0^t \ldots a_n^t}{\|a_0^t \ldots a_n^t\|})^t$ of linear transformations is a rank-one linear map; denote it by $\mathfrak{p}i_a$. Note that the kernel of $\mathfrak{p}i_a$ does not depend on the choice of the limit rank-one transformation. We then define $\varphi_a$ linear form given by orthogonal projection onto the line orthogonal to $\ker\mathfrak{p}i_a$. By the transpose relation, indeed we have \[\ker \varphi_a=\ker\mathfrak{p}i_a=(\im{\mathfrak{h}at{\mathfrak{p}i}_a})^\mathfrak{p}erp, \] where $\mathfrak{h}at{\mathfrak{p}i}_a$ is a limit point of the sequence $\frac{a_0^t \ldots a_n^t}{\|a_0^t \ldots a_n^t\|}$ and $\mathfrak{p}i_a=\mathfrak{h}at{\mathfrak{p}i}_a^t$. Due to the last claim of Theorem \mathfrak{r}ef{thm.random.matrix.product}, we obtain the last claim of this lemma. Applying Theorem \mathfrak{r}ef{thm.random.matrix.product} to $b$, then the last claim of Theorem \mathfrak{r}ef{thm.random.matrix.product} implies that the $\mu^{-\N^\mathfrak{a}st}$-measure of $b$'s such that $\ker \mathfrak{p}i_a$ contains $v_b$ is zero. Therefore, given a typical $a$ (i.e.~ in a set of full measure) and $\delta$, there exists a compact set $K_a(\delta)$ in $H^{-\N^\mathfrak{a}st}$ such that $\mu^{-\N^\mathfrak{a}st}(K_a(\delta))>1-\delta/10$ and for every $b \in K_a(\delta)$, we have $d(\ker \mathfrak{p}i_a, v_b)>\epsilon$, where $d$ denotes the projective distance induced by $\|.\|$. That is, $d(\ker \mathfrak{p}i_a, v_b)=\frac{|\varphi_a(v_b)|}{\|\varphi_a\|\|v_b\|}$. \end{proof} \subsubsection{Relative density of typical points} We also need one more lemma that will be used to spread the initial invariance obtained via the drift argument for a word $\mathfrak{h}at{\omega}$ to a set of words with positive measure. \mathfrak{b}egin{lemma}\label{lemma.relative.density.typical.orbit} Let $X$ be a separable metric space, $m$ a Borel probability measure on $X$ and $T$ a measurable measure-preserving and ergodic transformation $X \to X$. Then, for any measurable subset $K$ of $X$ with positive $m$-measure, there exists a conull subset $\dot{K}$ such that \[K_1:=\{x\in K : \overline{K\cap T^{\N}x}\supset \dot{K} \}.\] is conull subset of $K$. \end{lemma} \mathfrak{b}egin{proof} Consider the induced system $(K_0,m|_{K_0}, T^K)$ where $T^K$ is the first return map to the set $K$ and $K_0$ is the conull subset of $K$ on which the points are infinitely recurrent (Poincar\'e recurrence). By ergodicity of $T$, we know that $T^K$ is also ergodic with respect to the measure $m|_{K_0}$. By Birkhoff's theorem, we know that for $m|_{K_0}$ a.e.~ $x \in K_0$ the orbit $\{(T^K)^nx\}_{n\in\N}$ equidistributes to the measure $m|_{K_0}$. So for $m|_{K_0}$ a.e.~ $x$, we have \[ \overline{K\cap T^{\N}x}= \overline{ (T^K)^{\N}x}\supset \Supp{m|_{K_0}}=:\dot{K}. \] The proof is complete. \end{proof} We are now ready to start \mathfrak{b}egin{proof}[Proof of Theorem \mathfrak{r}ef{thm.measure.class.geod}] \textbf{Choosing parameters and sets}: Let $\delta \in (0,1/10)$ be a small enough positive constant. Let $\Psi$ be a $T$-invariant full measure set contained in the intersection of the full-measure subset of $H^\Z$ given by Proposition \mathfrak{r}ef{prop.dynnorm} with the full-measure subset on which the map $\omega \mapsto \mathfrak{n}u_\omega$ is defined. Denote by $C(\delta)$ the constant given by Corollary \mathfrak{r}ef{corol.alpha.close.to.dynamical.cocycle} and $K(\delta) \subseteq \Psi$ a compact set chosen using the same corollary satisfying $\mu^\Z(K(\delta))>1-\delta/20$. Fix a compact subset $K_{cont}$ of $H^\Z$ of $\mu^\Z$-measure $>1-\delta/20$ on which the map from $w\in H^\Z$ to $\mathfrak{n}u_w$ in the space of probability measures on $F$ is continuous. Let $$K_0(\delta)=K(\delta) \cap K_{cont} \cap K'(\delta,1),$$ where $K'(\delta,1)$ is the compact subset of $\Psi$ obtained from Lemma \mathfrak{r}ef{lemma.div.est} and let $C$ be the positive constant given by the same lemma. \mathfrak{b}igskip Now applying Lemma \mathfrak{r}ef{lemma.relative.density.typical.orbit} to the shift system $X=H^\Z$ and $m=\mu^\Z$ with $K=K_{0}(\delta)$, by regularity of $\mu^\Z$, we can find a compact subset $K_0'(\delta)$ of $K_{0}(\delta)$ with $$ \mu^\Z(K_{0}(\delta))-\mu^\Z(K_0'(\delta))<\delta/20 $$ such that for every $w \in K_0'(\delta)$, the closure of the intersection of the $T$-orbit of $w$ with $K_0(\delta)$ contains a $\mu^\Z$-conull subset of $K_0(\delta)$. Finally, fix a compact $\underline{K}$ of $H$ with sufficiently large $\mu$-measure, so that $$ \mathfrak{h}at{\underline{K}}:=\{w \in H^\Z : (w_{-C_2},\ldots,w_0,\ldots,w_{C_2}) \in \underline{K}^{2C_2+1}\} $$ has $\mu^\Z$-measure $>1-\delta/20$, where $C_2=[\kappa(2\kappa +C)]+1$, with $\kappa$ as given by Proposition \mathfrak{r}ef{prop.dynnorm}. We now let $$K_{0}''(\delta)=K_{0}(\delta) \cap K_0'(\delta) \cap \mathfrak{h}at{\underline{K}}.$$ \mathfrak{b}igskip Let $N(\delta) \in \N$ be a constant so that there exists a compact subset \mathfrak{b}egin{equation}\label{equ:Kgen} K^{gen}(\delta) \subset \{w \in H^\Z : \frac{1}{n}\#\{k=1,\ldots,n : T^k w \in K_0''(\delta)\}>1-\delta/2, \; \forall n \mathfrak{g}eqslant N(\delta)\} \end{equation} with $\mu^\Z$-measure $\mathfrak{g}eqslant 1-\delta/10$. The existence of this set is ensured thanks to Birkhoff's ergodic theorem. Indeed, due to Birkhoff's ergodic theorem, we obtain for $\mu^{\Z}$ a.e.~ $w\in H^{\Z}$, \[\lim_{n\mathfrak{r}ightarrow\infty}\frac{1}{n}\#\{k=1,\cdots,n : T^kw\in K_0''(\delta) \}\mathfrak{r}ightarrow \mu^{\Z}(K_0''(\delta))>1-4\delta/10. \] We can therefore find a large constant $N(\delta)$ such that \eqref{equ:Kgen} holds. Let $$K_{00}(\delta)=K_0''(\delta) \cap K^{gen}(\delta).$$ \mathfrak{b}igskip For an element $a \in H^\N$ and a subset $K$ of $H^\Z$, let $K_a^-$ denote the set $\{b \in H^{-\N^\mathfrak{a}st} : (b,a) \in K\}$. By Markov's inequality, the set $$K^+=\{a \in H^{\N} \, | \, \mu^{-\N^{\mathfrak{a}st}}(K_a^-) \mathfrak{g}eqslant 1-\sqrt{\delta}\}$$ satisfies $\mu^{\N}(K^{+}) \mathfrak{g}eqslant 1- \sqrt{\delta}$, if $\mu^\Z (K)>1-\delta$. Specializing to $K=K_{00}(\delta)$, we fix two elements $a,a' \in K^+$, so that the set $$K_{a,a'}^-:=K_a^- \cap K_{a'}^-$$ has $\mu^{-\N^\mathfrak{a}st}$-measure larger than $1-2\sqrt{\delta}$. \mathfrak{b}igskip For each $t \in \N$ and $w \in \Psi$, let $n_t(w)=\min\{n : \lambda_1(w,n) \mathfrak{g}eqslant t\}$. We then have \mathfrak{b}egin{equation}\label{equ:stopping} |\lambda_1(w, n_t(w))-t| \leqslant \kappa, \end{equation} which is due to Proposition \mathfrak{r}ef{prop.dynnorm}. \mathfrak{b}igskip \textbf{Drift argument}: The output of this part is \mathfrak{b}egin{proposition}\label{prop:drift} For two futures $a,a'\in K_{00}^+(\delta)$ and two pasts $b,b' \in K_{a,a'}^-$, there exist sequences of natural numbers $m_\ell, m_\ell'\to\infty$ as $\ell\to\infty$, a point $\mathfrak{h}at{\omega}\in K_0''(\delta)$ and $s(b,a,a'),\ s(b',a,a')\in D^{\mathfrak{p}m}$ such that $\mathfrak{n}u_{T^{m_\ell}(b,a)}\to \mathfrak{n}u_{\mathfrak{h}at\omega}$ and $$\mathfrak{n}u_{\mathfrak{h}at{\omega}}=s(b,a,a')^{-1}s(b',a,a')\mathfrak{n}u_{\mathfrak{h}at{\omega}},$$ where the element $s(b,a,a')$ is given by \[ s(b,a,a')=\lim_{\ell\to\infty}\mathfrak{a}lpha(a_{m_{{\ell}}'}' \ldots a_0', \xi(b))\mathfrak{a}lpha(a_{m_{\ell}} \ldots a_0, \xi(b))^{-1}, \] and similarly for $s(b',a,a')$. \end{proposition} We start the drift argument here. Set $w=(b,a)$, $w'=(b,a')$, $w''=(b',a)$, and $w'''=(b',a')$. \mathfrak{b}egin{claim} There are constants $p(\delta)$ with $p(\delta)=O(\delta)$ as $\delta \to 0$ and $N_1(\delta) \in \N$ such that for any $weta, weta' \in K^{gen}(\delta) \cap \Psi$ and $T \mathfrak{g}eqslant N_1(\delta)$, we have \mathfrak{b}egin{equation}\label{eq.often.common.rec} \#\{t=1,\ldots, T : T^{n_t(weta)}weta' \mathfrak{n}otin K_0''(\delta)\}<p(\delta)T. \end{equation} \end{claim} \mathfrak{b}egin{proof} By \eqref{equ:Kgen}, we have for $n\mathfrak{g}eqslant N(\delta)$ \[\#\{k=1,\cdots, n : T^kweta'\mathfrak{n}otin K_0''(\delta) \}<\delta n/2. \] By Lipschitz property \eqref{eq.lip.dynnorm} for $weta\in\Psi$, we have $n_T(weta)\in [1/\kappa,\kappa] T$. Hence for $T>N_1(\delta)=\kappa N(\delta)$, using Lipschitz property \eqref{eq.lip.dynnorm}, we have \[\#\{t=1,\ldots, T : T^{n_t(weta)}weta' \mathfrak{n}otin K_0''(\delta)\}\leqslant \kappa \#\{k=1,\ldots, n_T(weta) : T^{k}weta' \mathfrak{n}otin K_0''(\delta)\}<\delta\kappa^2T/2, \] proving the claim \eqref{eq.often.common.rec} with $p(\delta)=\delta \kappa^2/2$. \end{proof} Therefore, choosing $\delta>0$ small enough so that to have $p(\delta)<1/16$ and applying \eqref{eq.often.common.rec} with all possible choices of $\omega,\omega' \in \{w, w', w'',w'''\}$, we find a sequence of positive integers $t_\ell$ tending to infinity as $\ell \to \infty$ and such that for every $\ell \in \N$, and $\omega,\omega' \in \{w, w', w'',w'''\}$, we have \mathfrak{b}egin{equation}\label{eq.common.return.times} T^{n_{t_\ell}(\omega)}\omega' \in K_0''(\delta). \end{equation} \mathfrak{b}egin{claim} For every $\ell \in \N$, \mathfrak{b}egin{equation}\label{eq.control.nt.difference} |n_{t_\ell}(w)-n_{t_\ell}(w'')| \, \, \, \text{and} \, \, \, |n_{t_\ell}(w')-n_{t_\ell}(w''')| \, \, \, \text{are bounded above by} \, \, \kappa(2\kappa+C), \end{equation} where $C=C(\delta,1)$ is the constant given by Lemma \mathfrak{r}ef{lemma.div.est}. \end{claim} \mathfrak{b}egin{proof} Indeed, by construction, we have $w'' \in W^-_1(w)$, $w''' \in W^-_1(w')$. Moreover, thanks to \eqref{eq.common.return.times} and the fact that $K_0''(\delta)$ is contained in $K'(\delta,1)$, we can apply Lemma \mathfrak{r}ef{lemma.div.est} and deduce that $$|\lambda_1(w,n_{t_\ell}(w))-\lambda_1(w'',n_{t_\ell}(w))| \leqslant C.$$ On the other hand, by \eqref{equ:stopping}, we have $$|\lambda_1(w,n_{t_\ell}(w))-t_\ell| \leqslant \kappa,\ |\lambda_1(w'',n_{t_\ell}(w''))-t_\ell| \leqslant \kappa,$$ so that $$|\lambda_1(w,n_{t_\ell}(w))-\lambda_1(w'',n_{t_\ell}(w''))| \leqslant 2\kappa.$$ This implies that $$|\lambda_1(w'',n_{t_\ell}(w''))-\lambda_1(w'',n_{t_\ell}(w))| \leqslant 2 \kappa+C.$$ Referring once more to the Lipschitz property \eqref{eq.lip.dynnorm}, we deduce that $|n_{t_\ell}(w'')-n_{t_\ell}(w)| \leqslant \kappa(2\kappa+C)$ as claimed. Clearly, the same argument applies to $n_{t_\ell}(w')$ and $n_{t_\ell}(w''')$ proving \eqref{eq.control.nt.difference}. \end{proof} \mathfrak{b}igskip It then follows by \eqref{eq.comparison.dyn.norm}, construction of $K_0''(\delta)$, and \eqref{equ:stopping} that for every $\ell \in \N$, we have \mathfrak{b}egin{equation}\label{eq.bdd.drift.ww'} \mathfrak{b}egin{split} &|\overline{\chi}(\overline{\mathfrak{a}lpha}(a_{n_{t_\ell}(w')}' \ldots a_0', \xi(b))- \overline{\chi}(\overline{\mathfrak{a}lpha}(a_{n_{t_\ell}(w)} \ldots a_0, \xi(b))| \\ &\leqslant |\lambda_1(w',n_{t_\ell}(w'))-\lambda_1(w,n_{t_\ell}(w))|+2C(\delta) \leqslant 2 \kappa + 2 C(\delta), \end{split} \end{equation} and similarly, \mathfrak{b}egin{equation}\label{eq.bdd.drift.w''w'''} |\overline{\chi}(\overline{\mathfrak{a}lpha}(a_{n_{t_\ell}(w''')}' \ldots a_0', \xi(b'))- \overline{\chi}(\overline{\mathfrak{a}lpha}(a_{n_{t_\ell}(w'')} \ldots a_0, \xi(b'))| \leqslant 2 \kappa + 2 C(\delta). \end{equation} \mathfrak{b}igskip Now thanks to the fact that $\mathfrak{n}u_w=\mathfrak{n}u_{w'}$ (since $w$ and $w'$ have the same past), using the equivariance relation \eqref{eq.equiv.cocycle} at times $n_{t_\ell}(w)$ and $n_{t_{\ell}}(w')$, we get \mathfrak{b}egin{equation}\label{eq.use.equiv.1} \mathfrak{n}u_{T^{n_{t_{\ell}}(w')}w'}=\underbrace{\left(\mathfrak{a}lpha(a_{n_{t_{\ell}}(w')}' \ldots a_0', \xi(b))\mathfrak{a}lpha(a_{n_{t_\ell}(w)} \ldots a_0, \xi(b))^{-1}\mathfrak{r}ight)}_{=:D_\ell}\underbrace{\mathfrak{a}lpha(a_{n_{t_\ell}(w)} \ldots a_0, \xi(b)) \mathfrak{n}u_w}_{\mathfrak{n}u_{T^{n_{t_{\ell}}(w)}w}}, \end{equation} and similarly, \mathfrak{b}egin{equation}\label{eq.use.equiv.1'} \mathfrak{n}u_{T^{n_{t_{\ell}}(w''')}w'''}=\underbrace{\left(\mathfrak{a}lpha(a_{n_{t_{\ell}}(w''')}' \ldots a_0', \xi(b'))\mathfrak{a}lpha(a_{n_{t_\ell}(w'')}\ldots a_0, \xi(b'))^{-1}\mathfrak{r}ight)}_{=:D'_\ell}\underbrace{\mathfrak{a}lpha(a_{n_{t_\ell}(w'')} \ldots a_0, \xi(b')) \mathfrak{n}u_{w''}}_{\mathfrak{n}u_{T^{n_{t_{\ell}}(w'')}w''}}. \end{equation} Here, thanks to, respectively \eqref{eq.bdd.drift.ww'} and \eqref{eq.bdd.drift.w''w'''}, the sequences $D_\ell$ and $D_\ell'$ are bounded. Moreover, by construction (see \eqref{eq.common.return.times}) $T^{n_{t_{\ell}}(weta')}weta$ belong to the compact continuity set $K_{cont}$ for every $weta,weta' \in \{w,w',w'',w'''\}$. In particular, there exists a subsequence of $t_\ell$ such that for any such $weta,weta'$ and for some $\mathfrak{h}at{\omega} \in H^\Z$, we have \mathfrak{b}egin{equation}\label{eq.use.continuity1} T^{n_{t_\ell}(weta')}weta \to_{\ell \to \infty} \mathfrak{h}at{\omega} \implies \mathfrak{n}u_{T^{n_{t_{\ell}}(weta')}weta} \to_{\ell \to \infty} \mathfrak{n}u_{\mathfrak{h}at{\omega}}. \end{equation} \mathfrak{b}igskip Now we redefine the time. Let $m_{t_\ell}(w'')=m_{t_\ell}(w)=\max\{n_{t_\ell}(w),n_{t_\ell}(w'') \}$ and similarily $m_{t_\ell}(w''')=m_{t_\ell}(w')=\max\{n_{t_\ell}(w'),n_{t_\ell}(w''') \}$. Indeed, by construction of $n_{t_\ell}$'s and $m_{t_\ell}$'s, the convergence property is unaffected: see \eqref{eq.use.continuity1} and the choice of $weta,weta'\in\{w,w',w'',w''' \}$. Moreover, thanks to the equivariance property, we still have the relations \eqref{eq.use.equiv.1} and \eqref{eq.use.equiv.1'} for these modified times $m_{t_\ell}$'s. Finally, the differences between $n_{t_\ell}(w)$ and $n_{t_\ell}(w'')$, and similarly, between $n_{t_\ell}(w')$ and $n_{t_\ell}(w''')$ are bounded, (see \eqref{eq.control.nt.difference}). Since we have chosen $K_0''(\delta)$ so that it is contained in the set $ \mathfrak{h}at{\underline{K}}$, the modified differences --- as appearing in \eqref{eq.use.equiv.1} and \eqref{eq.use.equiv.1'} after modifying the times --- $D_\ell$ and $D_\ell'$ are still bounded. \mathfrak{b}igskip Notice that since $w$ and $w''$; and similarly, $w'$ and $w'''$ have the same futures, for any sequence of $\ell$'s such that $T^{m_{t_\ell}(w)}w \to \mathfrak{h}at{w}$ for some $\mathfrak{h}at{w}$, we also have $T^{m_{t_\ell}(w'')}w''=T^{m_{t_\ell}(w)}w''\to \mathfrak{h}at{w}$ (and similarly for the pair $w'$ and $w'''$). As a conclusion passing to a subsequence of $t_\ell$'s (that we still denote by $t_\ell$) so that we have\\[2pt] \indent \textbullet ${}$ $D_\ell \to s(b,a,a')$ for some $s(b,a,a') \in D^{\mathfrak{p}m}$ and similarly, $D'_\ell \to s(b',a,a')$ for some $s(b',a,a') \in D^{\mathfrak{p}m}$, and\\[2pt] \indent \textbullet ${}$ $T^{m_{t_\ell}(w)}w \to \mathfrak{h}at{w} \in K_0''(\delta)$ and $T^{m_{t_\ell}(w')}w' \to \mathfrak{h}at{w}' \in K_0''(\delta)$,\\[2pt] we deduce from \eqref{eq.use.equiv.1} and \eqref{eq.use.equiv.1'} that $$ \mathfrak{n}u_{\mathfrak{h}at{w}'}=s(b,a,a')\mathfrak{n}u_{\mathfrak{h}at{w}} \mathfrak{q}uad \text{and} \mathfrak{q}uad \mathfrak{n}u_{\mathfrak{h}at{w}'}=s(b',a,a')\mathfrak{n}u_{\mathfrak{h}at{w}}, $$ and hence we get \mathfrak{b}egin{equation}\label{eq.take.square.if.pgl} \mathfrak{n}u_{\mathfrak{h}at{w}}=s(b,a,a')^{-1}s(b',a,a') \mathfrak{n}u_{\mathfrak{h}at{w}}. \end{equation} By letting $m_\ell=m_{t_\ell}(w)$ and $m_\ell'=m_{t_\ell}(w')$, we obtain Proposition \mathfrak{r}ef{prop:drift} stated in the beginning of this part. \mathfrak{b}igskip \textbf{From invariance of one typical point to the full set}: By the equivariance property and commutativity, for every $t \in \N$, the measure $\mathfrak{n}u_{T^t \mathfrak{h}at{w}}$ is also invariant by $s(b,a,a')^{-1}s(b',a,a')$. On the other hand, we have $\mathfrak{h}at{w} \in K_0''(\delta)$ and recall that the latter set is contained in $K_0'(\delta)$. So letting $K_{acc}$ be the set of elements $\omega$ in $K_0(\delta)$ such that there exists a sequence $n_m \to \infty$ such that $T^{n_m}\mathfrak{h}at{w} \in K_0(\delta)$ and $T^{n_m}\mathfrak{h}at{w} \to \omega$, by the definition of $K_0'(\delta)$, we get that the $\mu^\Z$-measure of $K_{acc}$ is positive. Since $K_0(\delta)$ is contained in the continuity set, this implies that for every $\omega \in K_{acc}$, the measure $\mathfrak{n}u_{\omega}$ is invariant by $s(b,a,a')^{-1}s(b',a,a')$. By ergodicity and commutativity (since the set of $\omega$ such that $\mathfrak{n}u_{\omega}$ is invariant by an element of $D$ is shift-invariant), this entails that $$ \mathfrak{n}u_{\omega}=s(b,a,a')^{-1}s(b',a,a')\mathfrak{n}u_{\omega} \mathfrak{q}uad \text{for $\mu^\Z$-a.e. $\omega \in H^\Z$}. $$ \mathfrak{b}igskip \textbf{Constructing arbitrary small drift}: Since for $\mu^\Z$-a.e.~ $\omega$ the stability group of $\mathfrak{n}u_{\omega}$ is closed, to prove the hypothesis of Claim 0 (and hence Theorem \mathfrak{r}ef{thm.measure.class.geod}), it suffices to find sequences $\delta_n>0$, couples of futures $a_n,a'_n \in K^+:= K^+_{00}(\delta_n)$ and couples of pasts $b_n,b'_n \in K_{a_n,a'_n}^{-}$ such that \mathfrak{b}egin{equation}\label{eq.completes.drift} \id \mathfrak{n}eq (s(b_n,a_n,a'_n)^{-1}s(b'_n,a_n,a'_n))^2 \to \id \end{equation} as $n \to \infty$. Here we take square to make sure the invariance is in $D$ instead of $D^{\mathfrak{p}m}$. \mathfrak{b}egin{comment} \mathfrak{b}igskip To prove \eqref{eq.completes.drift}, we now turn to the construction of the limiting elements $s(b,a,a')$. For every $\delta>0$ small enough, for any $a,a'\in K_{00}^+(\delta)$ and any $b\in K_{a,a'}^-$, we can find a sequences of natural numbers, $t_\ell$, $m_{t_\ell}(a)$, and $m_{t_\ell}(a')$ such that for any $b \in B_{fp}$ \mathfrak{b}egin{equation}\label{eq.with.m.pasts} s(b,a,a')=\lim_{\ell \to \infty}\mathfrak{a}lpha(a_{m_{t_{\ell}}(a')}' \ldots a_0', \xi(b))\mathfrak{a}lpha(a_{m_{t_\ell}(a)} \ldots a_0, \xi(b))^{-1}. \end{equation} Therefore, for any $b,b' \in B_{fp}$, we can express the difference $s(b,a,a')^{-1}s(b',a,a')$ as \mathfrak{b}egin{equation}\label{eq.cross.ratio.limit} s(b,a,a')^{-1}s(b',a,a')=\lim_{\ell \to \infty} \log \left(\frac{\|a'_{m_{t_\ell}(a')} \ldots a'_0 v_{b'}\|\|a_{m_{t_\ell}(a)} \ldots a_0 v_{b}\|}{\|a'_{m_{t_\ell}(a')} \ldots a'_0 v_{b}\|\|a_{m_{t_\ell}(a)} \ldots a_0 v_{b'}\|} \mathfrak{r}ight). \end{equation} \end{comment} Recall that $a,a'$ are two different points in $K_{00}^+(\delta)$. Due to Lemma \mathfrak{r}ef{lemma.random.lin.form} and the set $K_{00}^+(\delta)$ having a positive measure, we can suppose that the corresponding linear forms $\varphi_a$ and $\varphi_{a'}$ are not colinear. The set $K_{a,a'}^-$ has measure greater than $1-2\sqrt{\delta}$. Now given $\delta'>0$, consider the compact set $K_{a,a'}(\delta'):=K_a(\delta')\cap K_{a'}(\delta')$ given by Lemma \mathfrak{r}ef{lemma.random.lin.form}. Clearly, if $\delta$ and $\delta'$ are small enough, the set $K_{a,a'}(\delta') \cap K_{a,a'}^-$ has positive measure, bounded below by $1-2\delta'-2\sqrt{\delta}$. On the other hand, by \textit{drift argument} (Proposition \mathfrak{r}ef{prop:drift}), for every $b,b' \in K_{a,a'}(\delta') \cap K_{a,a'}^-$, there exist sequences of natural numbers $m_\ell, m_\ell'$ tending to infinity as $\ell \to \infty$ such that \mathfrak{b}egin{equation}\label{eq.cross.ratio.limit} \overline{\chi}\left(\log(s(b,a,a')^{-1}s(b',a,a'))\mathfrak{r}ight)=\lim_{\ell \to \infty} \log \left(\frac{\|a'_{m_{\ell}'} \ldots a'_0 v_{b'}\|\|a_{m_{\ell}} \ldots a_0 v_{b}\|}{\|a'_{m_{\ell}'} \ldots a'_0 v_{b}\|\|a_{m_{\ell}} \ldots a_0 v_{b'}\|} \mathfrak{r}ight). \end{equation} By Lemma \mathfrak{r}ef{lemma.random.lin.form}, we have for two linear forms $\varphi=\varphi_a$ and $\varphi'=\varphi_{a'}$ of unit norm on $V$ that \mathfrak{b}egin{equation}\label{eq.loglin.form.ratio} \overline{\chi}\left(\log(s(b,a,a')^{-1}s(b',a,a'))\mathfrak{r}ight)= \log \frac{|\varphi'(v_{b'})\varphi(v_b)|}{|\varphi'(v_b)\varphi(v_{b'})|} \end{equation} and $|\varphi(v_b)|,|\varphi'(v_b)|>\epsilon'>0$, where $\epsilon'=\epsilon'(\delta')$ is given by Lemma \mathfrak{r}ef{lemma.random.lin.form}. Let $\epsilon>0$ be given. Since $\mu^{-\N^\mathfrak{a}st}(K_{a,a'}(\delta') \cap K_{a,a'}^-)>0$ and the Furstenberg measure is atomless, we can find two different points $b,b'$ in $K_{a,a'}(\delta') \cap K_{a,a'}^-$ with $v_b\wedge v_{b'}\mathfrak{n}eq 0$ and $d(v_b,v_{b'})<\epsilon$. \mathfrak{b}egin{claim} If $2\epsilon<(\epsilon')^2$, the drift element associated to $a,a',b,b'$ (as in \eqref{eq.loglin.form.ratio}) is non-trivial and has size $O_{\epsilon'}(\epsilon)$. \end{claim} \mathfrak{b}egin{proof} This is because \[\frac{\varphi(v_b)\varphi'(v_{b'})}{\varphi(v_{b'})\varphi'(v_b)}-1=\frac{(\varphi,\varphi')(v_{b'}\wedge v_b)}{\varphi(v_{b'})\varphi'(v_b)}, \] where $(\varphi,\varphi')$ is a linear form on $\wedge^2 V$ given by \[(\varphi,\varphi')(v\wedge v')=\varphi(v)\varphi'(v')-\varphi(v')\varphi'(v). \] Non-triviality comes from the choice of $a,a'$ and $b,b'$, that $\varphi$ and $\varphi'$ are not colinear and $v_b\wedge v_{b'}\mathfrak{n}eq 0$. By taking $\epsilon<(\epsilon')^2/2$, we have \[ \frac{|(\varphi,\varphi')(v_{b'}\wedge v_b)|}{|\varphi(v_{b'})\varphi'(v_b)|}\leqslant \frac{\|v_{b'}\wedge v_b\|}{| \varphi(v_{b'})\varphi'(v_b)|}\leqslant \epsilon/(\epsilon')^2<1/2. \] Applying the inequality $|\log(1+t)|\leqslant 2|t|$ for $|t|<1/2$, we obtain \[\left|\log \frac{\varphi'(v_{b'})\varphi(v_b)}{\varphi'(v_b)\varphi(v_{b'})}\mathfrak{r}ight|\leqslant 2 \frac{|(\varphi,\varphi')(v_{b'}\wedge v_b)|}{|\varphi(v_{b'})\varphi'(v_b)|}\leqslant 2\epsilon/(\epsilon')^2. \] The proof of the claim is complete. \end{proof} \mathfrak{b}igskip Fixing $\epsilon'>0$ and choosing $\epsilon$ arbitrarily small --- i.e.~ taking a sequence $\epsilon_n \to 0$ and associated couples $b_n,b'_n \in K_{a,a'}(\delta') \cap K_{a,a'}^-$ --- we obtain \eqref{eq.completes.drift} and conclude the proof. \end{proof} \mathfrak{b}egin{comment}\label{rk.when.pgl2.end.of.proof} When $\Gamma_\mu \mathfrak{n}otin H^\circ$ (cf.~ Remark \mathfrak{r}ef{rk.when.pgl2.start.of.proof}), with an adaptation of Corollary \mathfrak{r}ef{corol.alpha.close.to.dynamical.cocycle} to include the signs of the cocycle $\mathfrak{a}lpha$, the only new point will be that the elements $s(b,a,a'), s(b',a,a')$, etc.~ will belong to the product group $D\times (\Z/2\Z)$. This problem disappears by simply taking square in \eqref{eq.take.square.if.pgl}; the rest (i.e.~ \eqref{eq.completes.drift}) then goes through. \end{comment} \subsection{Case 2.3: The remaining case}\label{subsec.remaining.case} In this part, we will restrict ourselves to a slightly more specific situation; we will assume that the ambient group $G$ is $\PGL_n(\R)$; the subgroups $H, Q,R,R_0$ have the same meaning as before. The group $S = Q/R_0$ is a quotient of a product of $\PGL_{k_i}(\R)$'s. We are in Case 2.3, so we suppose $H$ is positioned so that $Q_H:=Q \cap H$ is a parabolic subgroup of $H$ and that $Q_H^\circ \cap R_0$ is trivial. In light of Proposition \mathfrak{r}ef{prop.decomposable} and Definition \mathfrak{r}ef{def.decomposable} of a decomposable action, it might be tempting at first sight to think that in Case 2.3, the morphism extends and we are in the decomposable situation. However, it turns out this is not the case and whether the morphism can extend depends for example on the irreducibility of the action of $H$ on $\P(\R^n)$. We signal at this point that in this paper we are not able to get a characterization of when we are in the decomposable case; as we shall see (Case 2.3.a), if $H$ acts projectively irreducibly on $\R^n$, we will be able to ensure this. Without this irreducibility assumption (Case 2.3.b), the description of what may happen is widely open; we content with some examples. \subsubsection{Case 2.3.a: Irreducible $H$} In this case, the irreducibility of $H$ implies that there is a unique $H$-compact orbit $\calC$ on the flag variety $G/Q$. \mathfrak{b}egin{comment} If $Q$ is a minimal parabolic subgroup, then we are in Case 2.1 and the fibre $Q/R$ is trivial. So by Proposition \mathfrak{r}ef{prop.trivial.fibre.measure.class}, the same result is also true and the space $P_\mu^{\erg}(X_\calC)$ is just composed of one point, the Furstenberg measure $\mathfrak{b}ar\mathfrak{n}u_F$. \mathfrak{b}egin{remark} The last assertion \eqref{eq.bijection.in.thm.SL2.dec} above is a consequence of the first assertion (decomposability), combined with Proposition \mathfrak{r}ef{prop.decomposable.measure.class} and the fact that $H/P$ has a unique $\mu$-stationary probability measure. Moreover, the bijection \eqref{eq.bijection.in.thm.SL2.dec} is explicit in a certain standard trivialization and is given by the map \eqref{eq.bijection.map.decomposable}. This is useful since, under a moment assumption on $\mu$, $\mu$-stationary and ergodic measures on $S/\Lambda$ (i.e.~ elements of $P_\mu^{\erg}(S/\Lambda)$) are completely described by the works of Benoist--Quint \cite{BQ1,BQ2} and Eskin--Lindenstrauss \cite{eskin-lindenstrauss.long}. \end{remark} \end{comment} \mathfrak{b}egin{proof}[Proof of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}] If $R_0=Q$, then the fiber is trivial and we are in Case 2.1. Otherwise, $S\simeq Q/R_0$ is a nontrivial semisimple group. In this situation, one can verify directly that $Q_H\cap R_0$ is trivial. For example by the explicit computation given in the following proof. So we are in Case 2.3. Let $D<Q_H$ be a rank-one $\R$-split torus in $Q_H<H$ and $U$ be the unipotent radical of $Q_H$. We denote by $\mathfrak{u}$, $\mathfrak{d}$, and $\mathfrak{h}$ the Lie algebras of $U$, $D$, and $H$, respectively. Fix a Weyl chamber $\mathfrak{d}^+$ in $\mathfrak{d}$ and two elements $x \in \mathfrak{d}^+$ and $e \in \mathfrak{u}$ such that $[x,e]=2e$, where $[.,.]$ denotes the Lie bracket in $\mathfrak{h}$. We consider the Lie algebra representation of $\mathfrak{h}$ induced by the irreducible representation of $H$ coming from the embedding $H < \PGL_n(\R)$. By the representation theory of $\mathfrak{sl}_2(\R)$, the space $\R^n$ writes as a sum of a string of one-dimensional weight spaces of $\mathfrak{d}$, we denote them by $V_1,V_2,\ldots,V_n$. They are ordered in increasing order with respect to the order on weights of $D$ coming from the choice of $\mathfrak{d}^+$. The elements of $\mathfrak{u}$ act as raising operators, i.e.~ for any non-zero $e' \in \mathfrak{u}$, we have $e' V_i =V_{i+1}$ if $i\mathfrak{n}eq n$ and $e'V_n=0$. Let $W_1<W_2<\ldots<W_k=\R^n$ be the maximal flag preserved by $Q$. Since the diagonal subgroup $D$ is contained in $Q_H$, each space $W_i$ is also preserved by $\mathfrak{d}$ and hence each $W_i$ is a sum of the weight spaces $V_i$'s. Moreover, since $U$ is also contained in $Q$ (and hence preserves $W_i$'s) and $\mathfrak{u}$ acts as raising operator for $\mathfrak{d^+}$ in the Lie algebra representation, it follows that $W_i=V_n \oplus \ldots \oplus V_{n-k_i+1}$, where $k_1<k_2<\ldots<k_j=n$ are the dimensions of $W_1,W_2,\ldots$ respectively. We also set $k_0=0$ and set $m_i=k_i-k_{i-1}$ for $i=1,\ldots,k$. The group $S$ is then a quotient of the product $\mathfrak{p}rod_{i=1}^j S_i$ where $S_i \simeq \PGL_{m_i}(\R)$. The product $\Pi_i S_i \simeq \Pi_i\PGL_{m_i}(\R)=Q/(R_0')$, where $R_0'$ is the solvable radical of $Q$. We have a natural projection from $\mathfrak{p}rod_i S_i=Q/(R_0')\to S=Q/R_0$. The projection $Q_H$ to $S=Q/R_0$ factors through $\mathfrak{p}rod_i S_i=Q/(R_0')$. Therefore, to extend the morphism to $S$, we only need to extend the morphism from $Q_H$ to $\mathfrak{p}rod_i S_i$. Let $\mathfrak{s}_i$ be the Lie algebra $S_i$'s. The Lie algebra morphism from the Lie algebra of $Q_H$ to $\mathfrak{s}_i$ coming from the morphism $Q_H \to \mathfrak{p}rod_i S_i$ is the morphism obtained by extending $$ x \mapsto \mathfrak{b}egin{pmatrix} m_i-1 & 0 & \cdots & & & 0 \\ 0 & m_i-3 & & & & \vdots \\ \vdots & & \ddots & & \\ & & & & -m_i+3 & 0 \\ 0 & & & & 0 & -m_i+1 \end{pmatrix}_{m_i \times m_i}$$ $$ e \mapsto \mathfrak{b}egin{pmatrix} 0 & 1 & 0 & \cdots & 0 \\ & 0 & 1 & & \\ & & \ddots & \ddots & \\ & & & 0 & 1\\ 0 & & & \cdots & 0 & \\ \end{pmatrix}_{m_i \times m_i}\\ . $$ To extend this morphism to $\mathfrak{h} \to \mathfrak{s}_i$, let $f$ be an element of $\mathfrak{h}$ so that $(e,x,f)$ is an $\mathfrak{sl}_2$-triple, i.e.~ $[x,f]=-2f$ and $[e,f]=x$. Mapping the element $f$ to the element $$ \mathfrak{b}egin{pmatrix} 0 & & & & \\ (m_i-1) & 0 & & & \\ & 2(m_i-2) & 0 & & \\ & & \ddots & \ddots & & \\ & & & (m_i-2)2& 0 & & \\ & & & & (m_i-1) & 0 \\ \end{pmatrix}_{m_i \times m_i} $$ of $\mathfrak{s}_i=\mathfrak{pgl}_{m_i}(\R)$, a direct calculation (see e.g.~ \cite[\S 3.7]{neil-ginzburg.book}) shows that we obtain a Lie algebra morphism $\mathfrak{h} \to \mathfrak{s}_i$ for each $i=1,\ldots,j$. We hence get a morphism $\mathfrak{h} \to \mathfrak{b}igoplus_i\mathfrak{s}_i$ which gives rise to an algebraic morphism $H \to \mathfrak{p}rod_i S_i$ extending the initial morphism $Q_H \to \mathfrak{p}rod_i S_i$. (For the $\PGL_2(\R)$ case, notice that an irreducible algebraic representation from $\SL_2(\R)$ to $\PGL_{m_i}(\R)$ always induces a representation of $\PGL_2(\R)$) Therefore Proposition \mathfrak{r}ef{prop.decomposable} yields that the $H$-action on $X_\mathcal{C}$ is decomposable. The last assertion then follows from Proposition \mathfrak{r}ef{prop.decomposable.measure.class} and uniqueness of the $\mu$-stationary probability measure (the Furstenberg measure) on $H/P$. \end{proof} We single out the following consequence which gives a generalization (and an explanation) of the phenomenon of embedding of the Furstenberg boundary in the fibre bundle $X$. This phenomenon is discovered in the work of Sargent--Shapira \cite{sargent-shapira} when $X$ is the space of $2$-lattices inside $\R^3$. \mathfrak{b}egin{example}[Rank-$k$ lattices in $n$-space] Let $G=\PGL_n(\R)$ and $Q$ the stabilizer of a $k$-space $W$ in $\R^{n}$. Let $R$ be the stabilizer in $Q$ of the homothety class of a lattice in $W$, and $R_0$ the connected component of $R$. In this case, the bundle $G/R$ over $G/Q$ will be denoted as $X_{n,k}$. It is actually the space of homothety-equivalence classes of rank-$k$ lattices in $\R^n$. Recall that $H$ is a copy of $\SL_2(\R)$ or $\PGL_2(\R)$ acting irreducibly on $\P(\R^{n})$ and $\mathcal{C} \subset G/Q$ is the unique compact $H$-orbit in $G/Q$. That is $\calC=HQ\subset G/Q$. It is then easy to check that we are in the setting of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} and therefore we get a trivialization $(X_{n,k})_\mathcal{C} \overset{\mathfrak{p}hi}{\simeq} H/Q_H \times S/\Lambda$, where $S=\PGL_k(\R)$, and $\Lambda=\PGL_k(\Z)$, such that the associated cocycle $H \times H/Q_H \to S$ is morphism-type, i.e.~ it does not depend on the $H/Q_H$ coordinate (in particular it is a morphism $\mathfrak{r}ho: H \to S$). In the statement below, the $\mu$-action on $S/\Lambda$ is defined via $\mathfrak{r}ho$. \end{example} \mathfrak{b}egin{corollary} Keep the above setting. In particular, let $H$ be an algebraic subgroup of $G$ isomorphic to $\SL_2(\R)$ or $\PGL_2(\R)$ and acting irreducibly on $\P(\R^n)$. Let $(X_{n,k})_\calC$ be the sub-bundle of $X_{n,k}$ over the base $\calC\subset G/Q$. Then, we have \[P_\mu^{erg}((X_{n,k})_\calC)\simeq P_\mu^{erg}(S/\Lambda). \] \end{corollary} \mathfrak{b}egin{comment} Let $x_1$ and $x_2$ be the equivalences classes of a lattice in $W$ and a lattice in $V/W$, $x_0$ be the tuple $(x_1,x_2)$ so that the stabilizer of $x_0$ is the group $R$ and the stabilizer of $x_1$ is the group $R_1$. Via the choices of $x_0$ and $x_1$, we can identify $X$ and $X_{n,k}$ with $G/R$ and $G/R_1$, respectively. We obtain a projection $X \to X_{n,k}$ from the natural projection $G/R \to G/R_1$. Recall that $\mathfrak{p}hi=(\mathfrak{p}i,\mathfrak{p}hi_2)$ is the standard trivialization $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ given by the application of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}. The map $\mathfrak{p}hi_2: X_\mathcal{C} \to S / \Lambda \simeq Q/R$ is given by $\mathfrak{p}hi_2(gR)=s(gQ)^{-1}gR$, where $s$ is a Borel section of the map $G\mathfrak{r}ightarrow G/Q$. In particular, for any $f \in R_1$, we have $\mathfrak{p}hi_2(gfR)R_1=\mathfrak{p}hi_2(gR)R_1$ so that the following diagram commutes and is $H$-equivariant. \mathfrak{b}egin{center} \mathfrak{b}egin{tikzcd} X_\calC \mathfrak{a}rrow[d, ""' ] \mathfrak{a}rrow[r, "" ] & \calC\times S/\Lambda \mathfrak{a}rrow[d,""] \\ (X_{n,k})_\calC \mathfrak{a}rrow[r, " "'] & \calC\times S_1/\Lambda_1 \end{tikzcd} \end{center} It follows that the the bundle $(X_{n,k})_\mathcal{C}$ is $H$-equivariantly mapped to $\mathcal{C} \times S_1 / \Lambda_1$ where the latter is endowed with the diagonal action induced by the morphism $\mathfrak{r}ho$ precomposed with the projection $S \to S_1$. The statement now follows from Proposition \mathfrak{r}ef{prop.decomposable.measure.class}. \end{comment} \mathfrak{b}egin{comment} Since $H$ acts irreducibly on $\R^n$, up to conjugate, we can always suppose that $Q$ is the parabolic group fixing the $k$ plane in $\R^n$ generated by $k$ vectors $e_1,\cdots, e_k$ and these vectors are the highest $k$ vectors of $H$ up to a choice of roots with respect to the diagonal subgroup of $Q_H$. Let $x_0$ be base point given by the rank-$k$ lattice generated by vectors $e_1,\cdots, e_k$. Then the stabilizer of $G$ at $x_0$ is $R_1:=\Lambda_1S_{2,Q}A_QN_Q$, where $\Lambda_1\simeq \SL_k^{\mathfrak{p}m}(\Z)$ and $S_{2,Q}\simeq \SL_{n-k}(\R)$. Now with the notation in the paper, let $X=G/R_0$, where $R_0=A_QN_Q$. We have $X=G/R_0\to X_{n,k}=G/R_1\to G/Q$. Since $H$-acts irreducibly, we can apply Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} to obtain that the action of $H$ on $X_\calC$ is decomposable. Let $\mathfrak{p}hi:X_\calC\to \calC\times S/\Lambda$ be a trivilization $\mathfrak{p}hi(x)=(\mathfrak{p}i(x),\mathfrak{p}hi_2(x))$ such that \mathfrak{b}egin{equation}\label{eq:r1} \mathfrak{p}hi_2(gR)R_1=\mathfrak{p}hi_2(gfR)R_1 \end{equation} for $gR\in X_\calC$ and $f\in R_1$, which is always possible by taking a standard trivialization. For an element in $(X_{n,k})_\calC$, we can take a lift in $X_\calC$, then map to $\calC\times S/\Lambda$ and project down to $\calC\times S_1/\Lambda_1$. Due to relation \eqref{eq:r1}, this map doesn't depend on the choice of the lift, hence it is well-defined. \mathfrak{b}egin{center} \mathfrak{b}egin{tikzcd} X_\calC \mathfrak{a}rrow[d, ""' ] \mathfrak{a}rrow[r, "" ] & \calC\times S/\Lambda \mathfrak{a}rrow[d,""] \\ (X_{n,k})_\calC \mathfrak{a}rrow[r, " "'] & \calC\times S_1/\Lambda_1 \end{tikzcd} \end{center} By Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} and Proposition \mathfrak{r}ef{prop.decomposable}, we can modify the trivialization such $H$ acts diagonally on the product space. The modification is in fact given by conjugate the associated cocycle by a Borel map $s$ from $\calC$ to $S$. The resulting map $\mathfrak{p}hi'(x)=(\mathfrak{p}i(x), s(\mathfrak{p}i(x))\mathfrak{p}hi_2(x))$. The second coordinate still satisfies \eqref{eq:r1}, due to $\mathfrak{p}i(gR)=\mathfrak{p}i(gfR)$ for $gR\in X_\calC$ and $f\in R_1$. So the trivilization $\mathfrak{p}hi'$ passes to $(X_{n,k})_\calC$ and we obtain that $(X_{n,k})_\calC$ is isomorphic as $H$-space $\calC\times S_1/\Lambda_1$ with some $H$-action $h(c,f)=(hc,\mathfrak{r}ho(h)f)$. Then we can use Proposition \mathfrak{r}ef{prop.decomposable.measure.class} to conclude. \end{comment} \subsubsection{Case 2.3.b: Reducible $H$} Below, we give an example for Case 2.3.b and justify that for this example it is not possible to extend the morphism $Q_H \to S$. \mathfrak{b}egin{example}\label{ex.to.be.treated} Let $G=\PGL_4(\R)$ and $Q$ be the parabolic subgroup given by the stabilizer of the 3-plane generated by the standard basis vectors $\{e_1,e_2,e_3\}$. We take $R_0$ to be the solvable radical of $Q$ and $R$ to be the stabilizer of the $3$-lattice generated by $\{e_1,e_2,e_3\}$. Finally, we take $H$ to be the copy of $\PGL_2(\R)$ in $G$ given by \mathfrak{b}egin{equation}\label{eq.2.3.b.embedding} \left \{ \mathfrak{b}egin{pmatrix} a^2 & ab & 0 & b^2 \\ 2ac & ad+bc & 0 & 2bd \\ 0 & 0 & 1 & 0 \\ c^2 & cd & 0 & d^2 \\ \end{pmatrix} | \mathfrak{b}egin{pmatrix} a & b \\ c & d \end{pmatrix} \in \SL_2^{\mathfrak{p}m}(\R) \mathfrak{r}ight \}. \end{equation} We claim that this configuration falls into Case 2.3.b. Indeed, the intersection $Q_H=Q \cap H$ is given by the image of the upper-triangular subgroup of $\PGL_2(\R)$ in the embedding \eqref{eq.2.3.b.embedding} described above; in other words $$ Q_H= \left \{ \mathfrak{b}egin{pmatrix} a^2 & ab & 0 & b^2 \\ 0 & \mathfrak{p}m 1 & 0 & \mathfrak{p}m 2ba^{-1} \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & a^{-2} \\ \end{pmatrix} | \; a \mathfrak{n}eq 0 \mathfrak{r}ight \}. $$ So $Q_H$ is a parabolic subgroup of $H$ and therefore we are in Case 2. It is easy to see that intersection of $Q_H \cap R_0$ is trivial, hence we are in Case 2.3. Finally, clearly the $H$-representation described in \eqref{eq.2.3.b.embedding} is not irreducible justifying the claim. Now note that $S=Q/R_0$ is the group $\PGL_3(\R)$ and the projection $Q \to S$ is given by the projectivization of the top-left 3-by-3 block in $Q$. It follows that the morphism $Q_H \to S$ is given by \mathfrak{b}egin{equation}\label{eq.not.extend} \PGL_2(\R) \mathfrak{n}i \mathfrak{b}egin{pmatrix} a & b \\ 0 & \mathfrak{p}m a^{-1} \end{pmatrix} \mapsto \mathfrak{b}egin{pmatrix} a & b & 0 \\ 0 & \mathfrak{p}m a^{-1} & 0 \\ 0 & 0 & a^{-1} \end{pmatrix} \in \PGL_3(\R) \end{equation} However, it is not hard to see that the morphism \eqref{eq.not.extend} from the upper-triangular subgroup of $\PGL_2(\R)$ to $\PGL_3(\R)$ is not the restriction of a morphism $\PGL_2(\R) \to \PGL_3(\R)$. One can either use the classification of $\SL_2(\R)$-representations to see this or otherwise verify this claim by direct computation: note by $x$ and $e$ a pair of Lie algebra elements of $\mathfrak{sl}_2(\R)$ in Lie algebra of the upper-triangular group satisfying $[x,e]=2e$. Let $\overline{x}$ and $\overline{e}$ be their images in $\mathfrak{pgl}_3(\R)$ under the Lie algebra representation induced by \eqref{eq.not.extend}. Now one checks by direct computation that it is not possible to find an element $\overline{f}$ in $\mathfrak{pgl}_3(\R)$ satisfying $[\overline{x},\overline{f}]=-2\overline{f}$ and $[\overline{e},\overline{f}]=\overline{x}$. \end{example} \section{$\SL_2(\R)$-Zariski closure: equidistribution}\label{sec.equidist} In this part, we study equidistribution of the averaged measure $\frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x$ for $x$ inside the bundle $X_\calC$. In fact, as we start by briefly explaining in \S \mathfrak{r}ef{subsec.equidist.trivial} below, all of them except the diagonal fibre action (Case 2.2) boils down to the corresponding results of Benoist--Quint \cite{BQ1,BQ3}. The part \S \mathfrak{r}ef{subsec.equidist.diagonal} is devoted to the diagonal fibre action case. \subsection{Equidistribution from Benoist--Quint}\label{subsec.equidist.trivial} In each case below, we keep the corresponding assumptions from \S \mathfrak{r}ef{sub.base.and.cases}. \subsubsection{Case 1 (Dirac Base)} Recall that Case 1 corresponds to the situation when the acting group $H$ is contained in the parabolic $Q$ of $G$. As explained in \S \mathfrak{r}ef{subsub.dirac.base}, it follows that $H$ fixes a point in $G/Q$ and hence stabilizes the fibre above the fixed point. Therefore, up to conjugating $Q$, we are left with studying the associated $\mu$-random walk on the fibre $S/\Lambda$, where the probability measure $\mu$ is seen as a Zariski-dense measure in a copy of $\SL_2(\R)$ in the subgroup of $S$. This is then a particular situation of the setting treated in Benoist--Quint's work \cite{BQ1,BQ3}. Consequently, the corresponding equidistribution results apply. We do not state the result here as it would be a repetition. We refer the reader to the more recent \cite[Theorem 1.5]{prohaska-sert-shi}, where the compact support assumption of \cite{BQ3} is relaxed to finite exponential moment. \subsubsection{Case 2.1 (Trivial fiber action)}\label{subsub.equidist.trivial.fibre} Recall from Proposition \mathfrak{r}ef{prop.trivial.fibre.measure.class} that in this case the $H$-action on $X_\mathcal{C}$ is decomposable with trivial morphism, i.e.~ there exists a standard trivialization $X \simeq G/Q \times Q/R$ for which the associated cocycle restricted to $\mathcal{C}$ is the trivial morphism. Therefore in this case we have $\mu^{\mathfrak{a}st k} \mathfrak{a}st \delta_{(\theta,f)}=\int \delta_{g \theta} d\mu^{\mathfrak{a}st k}(g) \otimes \delta_f$, in other words, the equidistribution problem is only the one in $\mathcal{C} \subseteq G/Q$. It is well-known that by spectral gap property we have the convergence $\int \delta_{g \theta} d\mu^{\mathfrak{a}st k}(g) \to \overline{\mathfrak{n}u}_F$ moreover with exponential speed estimates with respect to a class of H\"{o}lder functions. We omit the statement to avoid repetition; see \cite[Ch.~ V, Theorem 4.3]{bougerol.lacroix}. \subsubsection{Case 2.3.a (Irreducible $H$-action)} \mathfrak{b}egin{proposition}\label{prop.equidist.H.irred} Keep the setting of Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable} and suppose moreover that the measure $\mu$ on $H$ has finite exponential moment. Then, there exists a standard trivialization $X \simeq G/Q \times S/\Lambda$ such that for every $x \in X_\mathcal{C}$, the limit as $n$ tends to infinity of $\frac{1}{n}\sum_{k=1}^n \mu^{\mathfrak{a}st k}\mathfrak{a}st \delta_x$ exists and equals to a product $\overline{\mathfrak{n}u}_F \otimes \mathfrak{n}u^F$, where $\overline{\mathfrak{n}u}_F$ is the Furstenberg measure on $H/Q_H$ and $\mathfrak{n}u^F$ is a homogeneous probability measure on $S/\Lambda$. \end{proposition} As we shall see, the statement follows as a consequence of the decomposability of $H$-action (Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}), Benoist--Quint \cite{BQ1,BQ3} equidistribution results. We note however that we do not treat the question of equidistribution of trajectories of points $x \in X \setminus X_\mathcal{C}$. For such points, already in the level of the base space $G/Q$, the corresponding equidistribution question does not seem to be well-understood in all cases (cf.~ \cite{BQ.compositio}). \mathfrak{b}egin{remark} The conclusion of Proposition \mathfrak{r}ef{prop.equidist.H.irred} also holds if we replace the Ces\`{a}ro average $\frac{1}{n}\sum_{k=1}^n \mu^{k}\mathfrak{a}st \delta_x $ by the sequence of empirical measures. More precisely, for every $x \in X_\mathcal{C}$, for $\mu^{\N}$-a.e.~ $a \in H^{\N}$, the sequence $\frac{1}{n}\sum_{k=0}^{n-1} \delta_{a_k\ldots a_0 x}$ converges to a product measure of the same form as in Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}. This follows in the same way, using in addition Breiman's law of large numbers (see e.g.~ \cite[Corollary 3.3]{BQ3}) and the corresponding empirical measure equidistribution results of Benoist--Quint. \end{remark} \mathfrak{b}egin{proof} It is clear that any limit point $\mathfrak{n}u$ of $\frac{1}{n}\sum_{k=1}^n \mu^{\mathfrak{a}st k}\mathfrak{a}st \delta_x$ is a $\mu$-stationary probability measure. By Theorem \mathfrak{r}ef{thm.irreducible.H.decompsable}, there exists a standard trivialization yielding $H$-equivariant projections on $\mathfrak{p}i_1:X \to G/Q$ and $\mathfrak{p}i_2:X \to S/\Lambda$, where equivariance in the latter is with respect to a morphism $H \to S$. As a result, a limit point $\mathfrak{n}u$ projects via $\mathfrak{p}i_1$ and $\mathfrak{p}i_2$ to the limit points of $\frac{1}{n}\sum_{k=1}^n \mu^{\mathfrak{a}st k}\mathfrak{a}st \delta_{\mathfrak{p}i_1x}$ and $\frac{1}{n}\sum_{k=1}^n \mu^{\mathfrak{a}st k}\mathfrak{a}st \delta_{\mathfrak{p}i_2x}$, respectively. However, by the uniqueness of Furstenberg measure, the first sequence admits the Furstenberg measure $\overline{\mathfrak{n}u}_F$ as a limit. Moreover, by \cite[Theorem 1.5]{prohaska-sert-shi}, the second sequence also admits a limit $\mathfrak{n}u^{F}$ which is a homogeneous probability measure on $S/\Lambda$. Since the factor $H/Q_H$ is $\mu$-proximal, it follows by the bijection in Proposition \mathfrak{r}ef{prop.decomposable.measure.class} that $\mathfrak{n}u$ is the unique coupling of $\overline{\mathfrak{n}u}_F$ and $\mathfrak{n}u^{F}$, i.e.~ the product $\overline{\mathfrak{n}u}_F \otimes \mathfrak{n}u^{F}$.\end{proof} \subsection{Equidistribution for diagonal fiber actions (Case 2.2)}\label{subsec.equidist.diagonal} As mentioned above, unlike the previous cases, the equidistribution problem for the diagonal fiber actions case does not boil down to the corresponding work of Benoist--Quint and we now proceed with our result in this case. Recall from Case 2.2 and Lemma \mathfrak{r}ef{lemma.its.iwasawa} that we have a standard trivialization $X_\calC\simeq \calC\times_\mathfrak{a}lpha S/\Lambda$ such that the action of $H$ on the fibre $S/\Lambda$ is by a one-dimensional split subgroup $D^\mathfrak{p}m$ of $S$ through the Iwasawa cocycle $\mathfrak{a}lpha$ up to a sign. The main statement for $\PGL_2(\R)$-case is given in the introduction. Here is the statement for $\SL_2(\R)$ case. \mathfrak{b}egin{theorem}\label{thm.equidist.geod.sl} Keep the hypotheses and notation of Theorem \mathfrak{r}ef{thm.measure.class.geod} and let $X_\mathcal{C} \simeq H/Q_H \times S/\Lambda$ be the trivialization given by Theorem \mathfrak{r}ef{thm.measure.class.geod}. Suppose in addition that $H\simeq \SL_2(\R)$ and the measure $\mu$ has finite exponential moment. Suppose $\Gamma_\mu$ preserves a proper closed cone in $\R^2$. Then, the $D$-orbit of $z \in S/\Lambda$ equidistribute to a probability measure $m$ on $S/\Lambda$ if and only if for any $x=(\theta,z)\in X_\calC$ with $\theta$ inside the support of the Furstenberg measure, we have the convergence \[ \frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x\mathfrak{r}ightarrow \mathfrak{b}ar{\mathfrak{n}u}_F\otimes m \mathfrak{q}uad \text{as} \; \; n \to \infty. \] If $\Gamma_\mu$ does not preserve a proper closed cone in $\R^2$, then the $D^\mathfrak{p}m$-orbit of $z \in S/\Lambda$ equidistribute to a probability measure $m$ on $S/\Lambda$ if and only if for any $x=(\theta,z)\in X_\calC$, we have the convergence \[ \frac{1}{n}\sum_{k=1}^n\mu^{*k}*\delta_x\mathfrak{r}ightarrow \mathfrak{b}ar{\mathfrak{n}u}_F\otimes m \mathfrak{q}uad \text{as} \; \; n \to \infty. \] \end{theorem} \subsubsection{Equidistribution result on $K\times \R$} For $H\simeq \PGL_2(\R)$, if $\mu$ is supported on $\PSL_2(\R)$, then there is no sign issue thanks to the choice of the section $s$ (as taking values in $K^o$). We only need to prove equidistribution result on $K^o\times \R$. The proof is the same as the $\SL_2(\R)$-case. We will comment at the end on the changes needed to handle the $\PSL_2(\R)$-case (i.e.~ Theorem \mathfrak{r}ef{thm.equidist.geod}). In order to treat the sign part in the cocycle $\mathfrak{a}lpha$, we start with equidistribution result on $K\times \R$ instead of $H/Q_H\times \R$. Recall that for $H \simeq \SL_2(\R)$ and $\Gamma_\mu$ preserves a closed proper cone (Case 2.2.a), we have two $\mu$-stationary and ergodic measures $\mathfrak{n}u_1,\mathfrak{n}u_2$ on $\mathbb{S}^1$, both are the lifts of the Furstenberg measure on the projective space $\P(V)$. In this case, there exist two continuous non-negative functions $p_1$ and $p_2$ (for the characterization of $p_1$ and $p_2$, see \cite[Theorem 2.16]{GLP}) on $\mathbb{S}^1$ such that $p_1+p_2=1$, $p_i|{\Supp \mathfrak{n}u_j}=\delta_{i,j}$, where $\delta_{i,j}$ is the Kronecker symbol, and for $j=1,2$, and $x\in \mathbb{S}^1$, we have \[ p_j(x)=\int p_j(gx)\,d\mu(g). \] Otherwise (Case 2.2.b), there exists a unique $\mu$-stationary measure $\mathfrak{n}u_K$ on $ \mathbb{S}^1$. Let us define the following measures $\mathfrak{n}u_x$: \mathfrak{b}egin{definition}\label{defi:nux} For $x\in \mathbb{S}^1$, we define \[\mathfrak{n}u_x :=p_1(x)\mathfrak{n}u_1+p_2(x)\mathfrak{n}u_2 \, \text{ in Case 2.2.a, otherwise } \mathfrak{n}u_x=\mathfrak{n}u_K.\] \end{definition} According to \cite[Theorem 2.16]{GLP}, these measures $\mathfrak{n}u_x$ are the limit distributions for the random walk on $\mathbb{S}^1$ starting from $x$, following the law of $\mu$. For the probability measure $\mu$, let $\lambda_\mu$ be its Lyapunov exponent, defined as the almost sure limit of $\frac{1}{n}\log\|g_1\cdots g_n\|$ where $g_1,\cdots,g_n$ are i.i.d.~ random variables with distribution $\mu$. Let $\sigma_\chi(g,x)=\mathfrak{b}ar\chi(\mathfrak{b}ar\sigma(g,\eta))$ for $g\in H$, $x\in \mathbb{S}^1$ and $\eta=\R x\in H/Q_H$, where $\mathfrak{b}ar\chi(\mathfrak{b}ar\sigma(g,\eta))$ is defined in Lemma \mathfrak{r}ef{lemma.iwasawa.norm}. Clearly, $\sigma_\chi$ does not depend on the lift $x$ of $\eta$ to $\mathbb{S}^1$, so we sometimes use equivalently $\eta$ in the second coordinate to ease the notation. \mathfrak{b}egin{proposition}\label{prop.equidistribution} Under the same assumptions as in Theorem \mathfrak{r}ef{thm.equidist.geod.sl}, there exist $\mathfrak{g}amma >0$ and $\eta>0$ such that the following holds. For $n\in\N$, $t=\lambda_\mu n$, $\lambda_\mu/2>\eps_1>2/n$, for any $\varphi\in C^3(\mathbb{S}^1\times \R)$ and for $w \in \mathbb{S}^1$ \mathfrak{b}egin{equation} \label{eq.renewal+LDP} \mathfrak{b}egin{aligned} \frac{1}{n}\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\ d\mu^{*k}(g)&=\frac{1}{t}\int_{\mathbb{S}^1}\int_0^t \varphi(y,s)\ d s\ d \mathfrak{n}u_w (y)\\ &+O(e^{-\eta\eps_1 n}|\varphi|_{C^3}+|\varphi|_\infty\eps_1+\frac{C|\varphi|_\infty}{n(1-e^{-c})}), \end{aligned} \end{equation} where the constants $C,c>0$ come from the large deviation estimates with rate $\varepsilon_1$ (see Theorem \mathfrak{r}ef{thm:LDP}). \end{proposition} The proof of Proposition \mathfrak{r}ef{prop.equidistribution} mainly uses the renewal theorem to get the equidistribution and large deviation bounds to get some control of the error. \mathfrak{b}egin{remark}[Error term] In order to get a rate in the convergence, we need to know the dependence of the constants $C$ and $c>0$ on $\eps_1$. When $\mu$ has bounded support, both constants can be estimated with $C$ bounded and $c$ quadratic in $\varepsilon_1$, see \cite[Proposition 1.13]{aoun-sert.concentration} which provides subgaussian concentration estimates. In this case, we can get an explicit error term $O(n^{-1/3}|\varphi|_{C^3})$ in Proposition \mathfrak{r}ef{prop.equidistribution}. With exponential moment, $c$ can still be shown to be quadratic in $\varepsilon_1$ (locally). On the other hand, it might also be possible to use large deviations bounds in a more clever way to get a better error term. \end{remark} We now proceed to prove Proposition \mathfrak{r}ef{prop.equidistribution}. For a function $\varphi$ on $\mathbb{S}^1\times \R$, we define its $L^1C^\mathfrak{g}amma$ norm by \[|\varphi|_{L^1C^\mathfrak{g}amma}=\int_{\R}\|\varphi(\cdot ,s)\|_{C^\mathfrak{g}amma(\mathbb{S}^1)}ds,\ \] and its $W^{1,2}C^\mathfrak{g}amma$ norm by \[|\varphi|_{W^{1,2}C^\mathfrak{g}amma}=|\mathfrak{p}artial_{ss}\varphi|_{L^1C^\mathfrak{g}amma}+|\varphi|_{L^1C^\mathfrak{g}amma}, \] where $C^\mathfrak{g}amma$ is the $\mathfrak{g}amma$-H\"older norm. The first ingredient of the proof of Proposition \mathfrak{r}ef{prop.equidistribution} is the following uniform quantitative renewal theorem which was first proven in \cite{jialun.ens}. We borrow the current version from \cite{jialun.advances}. \mathfrak{b}egin{theorem}\cite[Proposition 5.4]{jialun.advances}\label{thm.renewal} Under the same assumptions as in Theorem \mathfrak{r}ef{thm.equidist.geod.sl}, we have the following. For a compactly supported $C^3$ function $f$ on $\mathbb{S}^1\times\R$, define the renewal sum for $w \in \mathbb{S}^1$ and $t\in\R^+$ by \[Rf(w ,t)=\sum_{k=1}^\infty \int f(gw ,\sigma_\chi(g,w )-t)d\mu^{*k}(g). \] Then, there exists $\eta>0$ such that \mathfrak{b}egin{equation}\label{equ_renewal} Rf(w ,t)=\frac{1}{\lambda_\mu}\int_{\mathbb{S}^1} \int_{-t}^\infty f(y,u)\ d Leb(u)\ d\mathfrak{n}u_w (y)+O(e^{-\eta (t-|\Supp f|)}|f|_{W^{1,2}C^\mathfrak{g}amma}), \end{equation} where $$|\Supp f|=\sup\{|s| : (w ,s)\in\Supp f \text{ for some }w \in \mathbb{S}^1\}.$$ \end{theorem} A crucial point in this theorem is that the error term is of the form $e^{-\eta(t-|\Supp{f}|)}$, which enables us to take $f$ with support of size $(1-\eps)t$. The second ingredient of the proof of Proposition \mathfrak{r}ef{prop.equidistribution} is the following large deviation estimate; we borrow the precise statement from \cite[Thm. 13.11 (iii)]{bq.book}. \mathfrak{b}egin{theorem}[Le Page]\label{thm:LDP} For every $\eps_1>0$, there exist constants $C>0$ and $c>0$ such that \[\mu^{*n}\{g\in G : |\sigma_\chi(g,w )-\lambda_\mu n|\leqslant \eps_1 n \}\leqslant Ce^{-cn}. \] \end{theorem} We can now give \mathfrak{b}egin{proof}[Proof of Proposition \mathfrak{r}ef{prop.equidistribution}] We fix $n \in \N$ large enough so that $\lambda_\mu \mathfrak{g}eqslant 5/n$ (recall that the Lyapunov exponent $\lambda_\mu$ is positive, a well-known result of Furstenberg), fix $\varepsilon_1$, $\varphi$ and $\omega$ as in the statement. We will estimate the left-hand side of \eqref{eq.renewal+LDP} separately for $\sigma_\chi(g,w )$ inside three different intervals $[(\lambda_\mu-\eps_1)n,\infty)$, $[\eps_1n,(\lambda_\mu-\eps_1)n]$ and $(-\infty,\eps_1n]$. The second interval will give us the main term, other intervals will yield the error term. Take a smooth cutoff $\chi$ which equals $1$ on $[\eps_1n,(\lambda_\mu -\eps_1)n]$ and equals $0$ outside of $[\eps_1n-1,(\lambda_\mu -\eps_1)n+1]$ so that we have $\mathds{1}-\chi\leqslant\mathds{1}_{s<\eps_1n}+\mathds{1}_{s>(\lambda_\mu-\eps_1)n} $. Then, we can write \mathfrak{b}egin{equation}\label{eq:split} \mathfrak{b}egin{split} &\left|\frac{1}{n}\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\ d\mu^{*k}(g)-\frac{1}{n}\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\chi(\sigma_\chi(g,w ))\ d\mu^{*k}(g)\mathfrak{r}ight|\\ \leqslant & \frac{1}{n}\left|\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )<\eps_1n}\ d\mu^{*k}(g)\mathfrak{r}ight|\\ &+\frac{1}{n}\left|\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )>(\lambda_\mu-\eps_1)n}\ d\mu^{*k}(g)\mathfrak{r}ight|. \end{split} \end{equation} \textbf{Main term: } Let $t=n\lambda_\mu$ and $f(w ,s)=\varphi(w ,s+t)\chi(s+t)$. Then by \eqref{equ_renewal} \mathfrak{b}egin{equation}\label{eq:main-renewal} \mathfrak{b}egin{split} &\frac{1}{n}\sum_{k=1}^\infty\int \varphi(gw ,\sigma_\chi(g,w ))\chi(\sigma_\chi(g,w ))\ d\mu^{*k}(g)\\ =&\frac{1}{n}\sum_{k=1}^\infty\int f(gw ,\sigma_\chi(g,w )-t)\mu^{*k}(g)\\ =&\frac{1}{t}\int_{\mathbb{S}^1}\int_{-t}^\infty f\ dLeb\ d\mathfrak{n}u_w +\frac{1}{n}O\mathfrak{b}ig(e^{-\eta(t-|\Supp f|)}|f|_{W^{1,2}C^\mathfrak{g}amma}\mathfrak{b}ig), \end{split} \end{equation} where in the error term, we have $t-|\Supp f|=t-(t-\eps_1 n)=\eps_1n$. For the main term, using the formula of $f$, we have \mathfrak{b}egin{align*} &\frac{1}{n\lambda_\mu}\int_{\mathbb{S}^1}\int_0^\infty \varphi(y,s)\chi(s)\ dLeb(s)d\mathfrak{n}u_w(y)\\ =&\frac{1}{n\lambda_\mu}\int_{\mathbb{S}^1}\int_{\eps_1n}^{(\lambda_\mu-\eps_1)n} \varphi(y,s)\chi(s)\ dLeb(s)d\mathfrak{n}u_w(y) +|\varphi|_\infty\frac{2}{n\lambda_\mu}\\ =&\frac{1}{t}\int_{\mathbb{S}^1}\int_0^{t}\varphi(y,s)\ dLeb(s)\ d\mathfrak{n}u_w (y)+|\varphi|_\infty O(\eps_1+\frac{1}{n}). \end{align*} For the error term in \eqref{eq:main-renewal}, we have \[ \frac{1}{n}|f|_{W^{1,2}C^\mathfrak{g}amma}\leqslant \sup_s\{|\varphi|_{C^\mathfrak{g}amma}\}+\sup_s\{ |\mathfrak{p}artial_{ss}\varphi|_{C^\mathfrak{g}amma}\}\leqslant |\varphi|_{C^3}.\] Now, we give an upper bound of the sum over ${k> n}$: \mathfrak{b}egin{align*} &\frac{1}{n}\sum_{k>n}^\infty\int \varphi(gw ,\sigma_\chi(g,w ))\chi(\sigma_\chi(g,w ))d\mu^{*k}(g) \\ \leqslant & |\varphi|_\infty \frac{1}{n}\sum_{k>n}^\infty\mu^{*k}(\{g,\ \sigma_\chi(g,w )<(\lambda_\mu-\eps_1)n+1\}). \end{align*} Due to the assumption $\eps_1 n\mathfrak{g}eqslant 2$, we obtain \[\sigma_\chi(g,w )-\lambda_\mu n\leqslant -\eps_1n+1\leqslant -\eps_1n/2. \] We use the large deviation estimate (Theorem \mathfrak{r}ef{thm:LDP}) to obtain \mathfrak{b}egin{equation*} \frac{1}{n}\sum_{k>n}^\infty\mu^{*k}(\{g : \sigma_\chi(g,w )<(\lambda_\mu-\eps_1)n+1\})\leqslant \frac{1}{n}\sum_{k>n}Ce^{-ck}=\frac{Ce^{-cn}}{n(1-e^{-c})}, \end{equation*} where the constants $C,c$ depend on $\eps_1$. Collecting above estimates, we obtain \mathfrak{b}egin{equation}\label{eq:second} \mathfrak{b}egin{split} \frac{1}{n}\sum_{k=1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\chi(\sigma_\chi(g,w ))d\mu^{*k}(g)=&\frac{1}{t}\int_{\mathbb{S}^1}\int_0^t \varphi(y,s)\ dLeb(s)\ d\mathfrak{n}u_w (y)\\ &\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\!\! +O\left(e^{-\eta\eps_1 n}|\varphi|_{C^3}+|\varphi|_\infty \left(\eps_1+\frac{1}{n}+\frac{Ce^{-cn}}{n(1-e^{-c})}\mathfrak{r}ight)\mathfrak{r}ight). \end{split} \end{equation} \textbf{Error term I: } For $k<n_0:=\frac{\lambda_\mu-\eps_1}{\lambda_\mu+\eps_1}n$, we have $k(\lambda_\mu+\eps_1)<(\lambda_\mu-\eps_1)n$. By the large deviation estimates (Theorem \mathfrak{r}ef{thm:LDP}), we have \mathfrak{b}egin{align*} &\frac{1}{n}\sum_{k=1}^{n_0}\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\mathfrak{g}eqslant (\lambda_\mu-\eps_1)n}d\mu^{*k}(g)\\ \leqslant& |\varphi|_\infty \frac{1}{n}\sum_{k=1}^{n_0}\mu^{*k}(\{g\in G : \sigma_\chi(g,w )>(\lambda_\mu+\eps_1)k \})\\ \leqslant & |\varphi|_\infty C\frac{1}{n}\sum_{k=1}^{n_0} e^{-ck}\leqslant \frac{|\varphi|_\infty C}{n(1-e^{-c})}. \end{align*} For the part $n_0\leqslant k\leqslant n$, we use the absolute value to bound \[\frac{1}{n}\sum_{k=n_0}^{n}\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\mathfrak{g}eqslant (\lambda_\mu-\eps_1)n}d\mu^{*k}(g)\leqslant |\varphi|_\infty\frac{n-n_0}{n}=|\varphi|_\infty\frac{2\eps_1}{\lambda_\mu+\eps_1}. \] Thus, we have \mathfrak{b}egin{equation}\label{eq:first} \frac{1}{n}\sum_{k=1}^{n}\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\mathfrak{g}eqslant (\lambda_\mu-\eps_1)n}d\mu^{*k}(g)\leqslant \frac{|\varphi|_\infty C}{n(1-e^{-c})}+|\varphi|_\infty\frac{2\eps_1}{\lambda_\mu+\eps_1}. \end{equation} \textbf{Error term II: }If $k>n_1:=\eps_1n/(\lambda_\mu-\eps_1)$, then we have $\eps_1n<k(\lambda_\mu-\eps_1)$ and hence we can apply the large deviation estimate to obtain \mathfrak{b}egin{align*} &\frac{1}{n}\sum_{k=n_1}^n\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\leqslant \eps_1 n}d\mu^{*k}(g)\\ \leqslant & |\varphi|_\infty \frac{1}{n}\sum_{k=n_1}^n\mu^{*k}(\{g\in G : \sigma_\chi(g,w )<(\lambda_\mu-\eps_1)k \})\leqslant \frac{C|\varphi|_\infty e^{-cn_1}}{n(1-e^{-c})}. \end{align*} For the part $k\leqslant n_1$, \mathfrak{b}egin{align*} \frac{1}{n}\sum_{k=1}^{n_1}\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\leqslant \eps_1 n}d\mu^{*k}(g)\leqslant|\varphi|_\infty\frac{n_1}{n}=|\varphi|_\infty\frac{\eps_1}{\lambda_\mu-\eps_1}. \end{align*} Thus, we have \mathfrak{b}egin{equation}\label{eq:thrid} \frac{1}{n}\sum_{k=1}^{n}\int \varphi(gw ,\sigma_\chi(g,w ))\mathds{1}_{\sigma_\chi(g,w )\leqslant \eps_1 n}d\mu^{*k}(g)\leqslant|\varphi|_\infty\frac{\eps_1}{\lambda_\mu-\eps_1}+\frac{C|\varphi|_\infty e^{-cn_1}}{n(1-e^{-c})}. \end{equation} Finally, combining \eqref{eq:split}, \eqref{eq:second}, \eqref{eq:first} and \eqref{eq:thrid}, we obtain \mathfrak{b}egin{align*} \frac{1}{n}\sum_{k=1}^\infty\int \varphi(gw ,\sigma_\chi(g,w ))\ d\mu^{*k}(g)=\frac{1}{t}\int_0^t \varphi(y,s)\ dLeb(s)\ d\mathfrak{n}u_w (y)\\ +O\left(e^{-\eta\eps_1 n}|\varphi|_{C^3}+|\varphi|_\infty\frac{\eps_1}{\lambda_\mu-\eps_1}+\frac{C|\varphi|_\infty}{n(1-e^{-c})}\mathfrak{r}ight). \end{align*} \end{proof} \subsubsection{Equidistribution on $X_\calC$} We now use the equidistribution on $K\times \R$ (Proposition \mathfrak{r}ef{prop.equidistribution}) to deduce the equidistribution on $X_\calC$, that is, to give \mathfrak{b}egin{proof}[Proof of Theorem \mathfrak{r}ef{thm.equidist.geod.sl}] Let $K\times_\sigma D$ be the fiber bundle with $H$ action, the action of $H$ is given by $h(k,d)=(hk,\sigma(h,k)d)$, where we identify $K \simeq \mathbb{S}^1 \simeq H/AN$. We define a map $p$ from $K\times_\sigma D$ to $H/Q_H\times_\mathfrak{a}lpha D^\mathfrak{p}m$ by \[ p(k,d)=(kM,\mathrm{sg}(k)d), \] where $\mathrm{sg}(k)$ is the sign element in $M$. By Lemma \mathfrak{r}ef{lem:sign}, we have \mathfrak{b}egin{lemma}\label{lem.hequivariant} The map $p$ is an $H$-equivariant map from $K\times_\sigma D$ to $H/Q_H\times_\mathfrak{a}lpha D^\mathfrak{p}m$. \end{lemma} We denote by $\calG(r,\mathrm{sg}(w))$ the element $(e^r,\mathrm{sg}(w))\in D^\mathfrak{p}m$ and we use additive parameter $r\in \R$. Under this parametrization, the equidistribution of $D$ or $D^\mathfrak{p}m$-orbits of $z \in S/\Lambda$ to some measure $m$ means, respectively, that the measure $\frac{1}{t}\int_0^t\delta_{\calG(r,1)z}\ dr$ or the measure $\frac{1}{2t}\int_0^t\delta_{\calG(r,1)z}+\delta_{\calG(r,-1)z}\ dr$ converges to $m$ as $t \to \infty$. Let $\mathfrak{p}si$ be $C^3$ function on $X_\mathcal{C} \simeq H/Q_H \times S /\Lambda$ and $z \in S/\Lambda$. Set $\varphi(w,r):=\mathfrak{p}si(\eta,\calG(r,\mathrm{sg}(w))z)$ where $\eta$ is the projection of $w$ on $H/Q_H \simeq K/M$. Thanks to Lemma \mathfrak{r}ef{lem.hequivariant}, we have the relation \mathfrak{b}egin{equation} \varphi(g s(\eta),\sigma_\chi(g,\eta))=\mathfrak{p}si(g\eta,\mathfrak{a}lpha(g,\eta)z)=\mathfrak{p}si(gx), \end{equation} for $x=(\eta,z) \in X_\mathcal{C}$ and where, we recall, $s:K/M \to K$ is the section. Therefore, we have \mathfrak{b}egin{equation}\label{eq.corresp.phi.psi} \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x(\mathfrak{p}si)=\frac{1}{n}\sum_{k=1}^n\int \varphi(g s(\eta) ,\sigma_\chi(g,\eta))\ d\mu^{*k}(g). \end{equation} We know that for any $\mathfrak{p}si\in C^3(X_\calC)$ with bounded $C^3$ norm, with suitable choice of $\eps_1$ depending on $n$, thanks to Proposition \mathfrak{r}ef{prop.equidistribution} and the relation \eqref{eq.corresp.phi.psi}, for $t=\lambda_\mu n$, we have \[ \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x(\mathfrak{p}si)- \frac{1}{t}\int_{\mathbb{S}^1}\int_0^t\varphi(k',r)\ dr\ d\mathfrak{n}u_{s(\eta)}(k')\mathfrak{r}ightarrow 0 \] as $n$, equivalently $t$, tends to $\infty$. On the other hand, by construction of the function $\varphi(\cdot,\cdot)$ and the measures $\mathfrak{n}u_w$ for $w \in \mathbb{S}^1$, we have \mathfrak{b}egin{align*} \int_{\mathbb{S}^1} \varphi(k',r)\ d\mathfrak{n}u_w(k')=& p_1(w)\int_{\mathbb{S}^1} \mathfrak{p}si(k'M,\calG(r,\mathrm{sg}(k'))z)\ d\mathfrak{n}u_1(k')\\ &+p_2(w)\int_{\mathbb{S}^1} \mathfrak{p}si(k'M,\calG(r,\mathrm{sg}(k'))z) d\mathfrak{n}u_2(k')\\ =&p_1(w)\int \mathfrak{p}si(\eta,\calG(r,1)z)\ d\overline{\mathfrak{n}u}_F(\eta)+p_2(w)\int \mathfrak{p}si(\eta,\calG(r,-1)z)\ d\overline{\mathfrak{n}u}_F(\eta). \end{align*} We get that \mathfrak{b}egin{equation*} \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x(\mathfrak{p}si)- \frac{1}{t}\int \int_0^t p_1(s(\eta))\mathfrak{p}si(\eta,\calG(r,1)z)+p_2(s(\eta))\mathfrak{p}si(\eta,\calG(r,-1)z)\ dr\ d \overline{\mathfrak{n}u}_F(\eta)\mathfrak{r}ightarrow 0. \end{equation*} Recall that for Case 2.2.a, the section $s$ is chosen so that its image contains the support of $\mathfrak{n}u_1$. In particular, $p_1(s(\eta))=1$ and $p_2(s(\eta))=0$ for every $\eta$ in the support of the Furstenberg measure $\overline{\mathfrak{n}u}_F$. Therefore, in Case 2.2.a, if the $\eta$ coordinate of $x=(\eta,z)$ belongs to the support of $\overline{\mathfrak{n}u}_F$, then we have \mathfrak{b}egin{equation}\label{eq.conv1} \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x(\mathfrak{p}si)- \frac{1}{t}\int \int_0^t \mathfrak{p}si(\eta,\calG(r,1)z)\ dr\ d\mathfrak{b}ar\mathfrak{n}u_F(\eta)\mathfrak{r}ightarrow 0. \end{equation} In Case 2.2.b, by a similar computation and using the fact that the unique measure $\mathfrak{n}u_K$ on $\mathbb{S}^1$ writes as $\mathfrak{n}u_K=\frac{1}{2} \int (\delta_w + \delta_{-w}) d\overline{\mathfrak{n}u}_F(\R w)$, we have \mathfrak{b}egin{equation}\label{eq.conv2} \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x(\mathfrak{p}si)- \frac{1}{2t}\int \int_0^t (\mathfrak{p}si(\eta,\calG(r,1)z)+\mathfrak{p}si(\eta,\calG(r,-1)z)) \ dr\ d\overline{\mathfrak{n}u}_F(\eta)\mathfrak{r}ightarrow 0. \end{equation} By density of $C^3(X_\calC)$ in $C(X_\calC)$, we deduce from \eqref{eq.conv1} and \eqref{eq.conv2} that $ \frac{1}{n}\sum_{1\leqslant k\leqslant n}\mu^{*k}*\delta_x$ converges weakly to a measure $\mathfrak{b}ar\mathfrak{n}u_F\otimes m$ if and only if the $D$-orbit or the $D^{\mathfrak{p}m}$- orbit (respectively in Case 2.2.a or Case 2.2.b) starting at $z \in S/\Lambda$ equidistributes to the measure $m$. \end{proof} \mathfrak{b}egin{proof}[Proof of Theorem \mathfrak{r}ef{thm.equidist.geod}] For $\Gamma_\mu< \PSL_2(\R)\simeq \PGL_2(\R)^o$, we take the lift $\mu_1$ on $\SL_2(\R)$ of $\mu$ on $\PSL_2(\R)$ with equal probability on two preimages of each element. Then we can apply Theorem \mathfrak{r}ef{thm.renewal} to this new measure $\mu_1$. Here the element $\diag(-1,-1)$ (which is the non-trivial element of $M$ in the case of $H \simeq \SL_2(\R)$) maps to identity in $G$, which acts trivially. Then the same argument as in the proof of Theorem \mathfrak{r}ef{thm.renewal} readily yields Theorem \mathfrak{r}ef{thm.equidist.geod}. \end{proof} \mathfrak{b}egin{thebibliography}{99} \mathfrak{b}ibitem{aoun-sert.concentration} Aoun, R. and Sert, C. Random walks on hyperbolic spaces: Concentration inequalities and probabilistic Tits alternative. Probab. Theory Relat. Fields vol: 184, pages 323--365 (2022). \mathfrak{b}ibitem{benard-desaxce} B\'{e}nard T. and de Saxc\'{e}, N. Random walks with bounded first moment on finite-volume spaces. Geom. Func. Anal. 32, no. 4 (2022): 687--724. \mathfrak{b}ibitem{BQ1} Benoist, Y. and Quint, J.-F. Mesures stationnaires et fermés invariants des espaces homogènes. Ann. Math. 174 (2011), no. 2, 1111--1162. \mathfrak{b}ibitem{BQ2} Benoist, Y. and Quint, J.-F. Stationary measures and invariant subsets of homogeneous spaces (II). J. Amer. Math. Soc. 26 (2013), no. 3, 659–734. \mathfrak{b}ibitem{bq.non-escape} Benoist, Y. and Quint J.-F. Random walks on finite volume homogeneous spaces. Invent. Math. 187 (2012), no. 1, 37--59. \mathfrak{b}ibitem{BQ3} Benoist, Y. and Quint, J.-F. Stationary measures and invariant subsets of homogeneous spaces (III). Ann. Math. 178 (2013), no. 3, 1017--1059. \mathfrak{b}ibitem{BQ.compositio} Benoist, Y. and Quint, J.-F. Random walks on projective spaces. Compos. Math. 150 (2014), no. 9, 1579--1606. \mathfrak{b}ibitem{bq.book} Benoist, Y. and Quint, J-F. Random walks on reductive groups. Ergebnisse der Mathematik und ihrer Grenzgebiete. 3. Folge., Vol 62. Springer, Cham, 2016. \mathfrak{b}ibitem{bougerol.lacroix} Bougerol, Ph. B. and Lacroix, J. Products of random matrices with applications to Schr\"{o}dinger operators. Vol. 8. Springer Science \& Business Media, 2012. \mathfrak{b}ibitem{eskin-lindenstraus.short} Eskin, A. and Lindenstrauss E. Zariski dense random walks on homogeneous spaces. Preprint on webpage at \url{https://www.math.uchicago.edu/~eskin/RandomWalks/short_paper.pdf}. \mathfrak{b}ibitem{eskin-lindenstrauss.long} Eskin, A. and Lindenstrauss, E. Random walks on locally homogeneous spaces. Preprint on webpage at \url{http://www.math.uchicago.edu/~eskin/RandomWalks/paper.pdf}. \mathfrak{b}ibitem{eskin-mirzakhani} Eskin, A. and Mirzakhani, M. Invariant and stationary measures for the action on moduli space. Publ. Math. Inst. Hautes \'Etudes Sci. 127 (2018), 95--324. \mathfrak{b}ibitem{furstenberg.poisson} Furstenberg, H. A Poisson formula for semi-simple Lie groups. Ann. Math. 77 (1963), 335--386. \mathfrak{b}ibitem{furstenberg.nc} Furstenberg, H. Noncommuting random products. Trans. Amer. Math. Soc. 108 (1963), 377--428. \mathfrak{b}ibitem{furstenberg.boundary.theory} Furstenberg, H. Boundary theory and stochastic processes on homogeneous spaces. Harmonic analysis on homogeneous spaces (Proc. Sympos. Pure Math., Vol. XXVI, Williams Coll., Williamstown, Mass., 1972), pp. 193--229. Amer. Math. Soc., Providence, R.I., 1973. \mathfrak{b}ibitem{guivarch-raugi.isom.ext} Guivarc’h, Y. and Raugi, A. Actions of large semigroups and random walks on isometric extensions of boundaries. Ann. Sci. \'Ecole Norm. Sup. 40 (2007), no. 2, 209--249. \mathfrak{b}ibitem{goldsheid-margulis} Goldsheid, I. and Margulis, G. Lyapunov Indices of a Product of Random Matrices. Russian Math. Surveys 44 (1989), no. 5, 11--71 \mathfrak{b}ibitem{GLP} Guivarc’h, Y. and Le Page, \'E. Spectral gap properties for linear random walks and Pareto’s asymptotics for affine stochastic recursions. Ann. Inst. Henri Poincaré Probab. Stat. 52 (2016), no. 2, 503--574. \mathfrak{b}ibitem{guivarch-raugi} Guivarc’h, Y. and Raugi, A. Fronti\`{e}re de Furstenberg, propri\'{e}et\'{e}s de contraction et th\'{e}eor\`{e}mes de convergence. Z. Wahrsch. Verw. Gebiete 69 (1985), no. 2, 187--242. \mathfrak{b}ibitem{jialun.ens} Li, J. Fourier decay, Renewal theorem and Spectral gaps for random walks on split semisimple Lie groups. To appear in Ann. Sci. \'Ecole Norm. Sup. \mathfrak{b}ibitem{jialun.advances} Li, J. and Sahlsten, T. Fourier transform of self-affine measures. Adv. Math. 374 (2020), 107349, 35 pp. \mathfrak{b}ibitem{knapp.book} Knapp, A. W. Lie groups beyond an introduction. Progress in Mathematics, 140. Birkhäuser Boston, Inc., Boston, MA, 2002. \mathfrak{b}ibitem{kostant} Kostant, B. The principal three-dimensional subgroup and the Betti numbers of a complex simple Lie group. Amer. J. Math., 81 (1959): 973--1032. \mathfrak{b}ibitem{mackey49} Mackey, G. W. Imprimitivity for representations of locally compact groups I. Proc. Nat. Acad. Sci. U.S.A. 35 (1949), 537--545. \mathfrak{b}ibitem{mackey58} Mackey, G. W. Unitary representations of group extensions. I. Acta Math. 99 (1958), 265--311. \mathfrak{b}ibitem{neil-ginzburg.book} Neil, C. and Ginzburg, V. Representation theory and complex geometry. Vol. 42. Boston: Birkh\"auser, 1997. \mathfrak{b}ibitem{prohaska-sert-shi} Prohaska, R., Sert, C. and Shi, R. Expanding measures: Random walks and rigidity on homogeneous spaces. ArXiv:2104.09546. \mathfrak{b}ibitem{ratner.measure.class} Ratner, M. On measure rigidity of unipotent subgroups of semisimple groups. Acta Math. 165 (1990), no. 3--4, 229--309. \mathfrak{b}ibitem{ratner.topological} Ratner, M. Raghunathan’s topological conjecture and distributions of unipotent flows. Duke Math. J. 63 (1991), no. 1, 235--280. \mathfrak{b}ibitem{sargent-shapira} Sargent, O. and Shapira, U. Dynamics on the space of 2-lattices in 3-space. Geom. Funct. Anal. 29 (2019), no. 3, 890--948. \mathfrak{b}ibitem{varadarajan.quantum} Varadarajan, V. S. Geometry of quantum theory. The University Series in Higher Mathematics. D. Van Nostrand Co., Inc., Princeton, N.J.-Toronto, Ont.-London, 1968. \mathfrak{b}ibitem{zimmer.book} Zimmer, R. J. Ergodic theory and semisimple groups. Vol. 81. Springer Science $\&$ Business Media, 2013. \end{thebibliography} \end{document}
\begin{document} \title[Generalized near-group fusion categories]{On generalized near-group fusion categories} \author{Jingcheng Dong} \email{[email protected]} \address{College of Mathematics and Statistics, Nanjing University of Information Science and Technology, Nanjing 210044, China} \keywords{Generalized near-group fusion category; exact factorization; slightly degenerate; Yang-Lee category; Ising category} \subjclass[2010]{18D10} \date{\today} \begin{abstract} In this paper, we study the structure of a generalized near-group fusion category and classified it when it is slightly degenerate. \end{abstract} \maketitle \section{Introduction}\label{sec1} Let ${\mathcal C}$ be a fusion category, and let $G$ be the group generated by invertible simple objects of ${\mathcal C}$. Then there is an action of $G$ on the set of non-isomorphic non-invertible simple objects by left tensor product. If this action is transitive then ${\mathcal C}$ is called a generalized near-group fusion category in \cite{Thornton2012Generalized}. In his thesis \cite{Thornton2012Generalized}, Thornton proved that ${\mathcal C}$ is $\varphi$-pseudounitary and classified ${\mathcal C}$ when it is symmetric or modular. \medbreak Let ${\mathcal C}$ be a generalized near-group fusion category. Then for every non-invertible simple object $X$, $X\otimes X^*$ admits the same decomposition (see Section \ref{sec3}): \begin{equation} \begin{split} X\otimes X^*=\bigoplus_{h\in {\mathcal G}amma}h\oplus k_1X_{1}\oplus \mathrm{cd}ots\oplus k_nX_{n}, \end{split}\nonumber \end{equation} where $\{X_1,\mathrm{cd}ots,X_n\}$ is a full list of non-isomorphic non-invertible simple objects of ${\mathcal C}$, ${\mathcal G}amma$ is the stabilizer of $X$ under the action of $G$. In this paper, we shall say that ${\mathcal C}$ is a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. If $(k_1,\mathrm{cd}ots,k_n)=(0,\mathrm{cd}ots,0)$ then ${\mathcal C}$ is a generalized Tambara-Yamagami fusion category introduced in \cite{liptrap2010generalized}. If ${\mathcal C}$ exactly has one non-invertible simple object, then $G={\mathcal G}amma$ and ${\mathcal C}$ is a near-group fusion category introduced in \cite{siehler2003near}. The main goal of this paper is to study the structure of ${\mathcal C}$ and classify it when it is slightly degenerate. \medbreak The paper is organized as follows. In Section \ref{sec2}, we recall some basic results and prove some basic lemmas which will be used throughout. In Section \ref{sec3}, we study the fusion rules, non-pointed fusion subcategories of a generalized near-group fusion category ${\mathcal C}$. In particular, we obtain that every component ${\mathcal C}_g$ of the universal grading exactly contains the simple objects $\alpha_g, \alpha_g\otimes Y_1, \mathrm{cd}ots, \alpha_g\otimes Y_s $, where $\alpha_g$ is an invertible simple object in ${\mathcal C}_g$ and $\textbf{1},Y_{1},\mathrm{cd}ots,Y_{s}$ is a list of all nonisomorphic simple object in the adjoint subcategory ${\mathcal C}_{ad}$. In Section \ref{sec4}, we study the slightly degenerate generalized near-group fusion categories. Our result shows that slightly degenerate generalized near-group fusion categories fit into four classes. \section{Preliminaries}\label{sec2} A fusion category ${\mathcal C}$ is a $\mathbb{C}$-linear semisimple rigid tensor category with finitely many isomorphism classes of simple objects, finite-dimensional vector space of morphisms and the unit object $\textbf{1}$ is simple. \subsection{Invertible simple objects}\label{sec2.1} Let ${\mathcal C}$ be a fusion category. The tensor product in ${\mathcal C}$ induces a ring structure on the Grothendieck ring $K({\mathcal C})$. By \cite[Section 8]{etingof2005fusion}, there is a unique ring homomorphism ${\mathcal F}Pdim:K({\mathcal C})\to\mathbb{R}$ such that ${\mathcal F}Pdim(X)\geq 1$ for all nonzero $X\in {\mathcal C}$. We call ${\mathcal F}Pdim(X)$ the Frobenius-Perron dimension of $X$. The Frobenius-Perron dimension of ${\mathcal C}$ is defined by ${\mathcal F}Pdim({\mathcal C})=\sum_{X\in\mathcal{I}rr({\mathcal C})}{\mathcal F}Pdim(X)^2$, where $\mathcal{I}rr({\mathcal C})$ is the set of isomorphism classes of simple objects in ${\mathcal C}$. A simple object $X\in {\mathcal C}$ is called invertible if $X\otimes X^*\cong \textbf{1}$, where $X^*$ is the dual of $X$. This implies that $X$ is invertible if and only if ${\mathcal F}Pdim(X)=1$. A fusion category ${\mathcal C}$ is called pointed if every element in $\mathcal{I}rr({\mathcal C})$ is invertible. Let ${\mathcal C}_{pt}$ be the fusion subcategory generated by all invertible simple objects in ${\mathcal C}$. Then ${\mathcal C}_{pt}$ is the largest pointed fusion subcategory of ${\mathcal C}$. Let $G({\mathcal C})$ be the group generated by $\mathcal{I}rr({\mathcal C}_{pt})$. Then $G({\mathcal C})$ admits an action on the set $\mathcal{I}rr({\mathcal C})$ by left tensor product. Let $G[X]$ be the stabilizer of any $X\in \mathcal{I}rr({\mathcal C})$ under this action. Hence for any simple object $X$, we have a decomposition \begin{equation}\label{decom1} \begin{split} X\otimes X^*=\bigoplus_{g\in G[X]}g\oplus\sum_{Y\in \mathcal{I}rr({\mathcal C})/G[X]} \dim\Hom(Y,X\otimes X^*)Y. \end{split} \end{equation} \subsection{Group extensions of fusion categories}\label{sec2.2} Let $G$ be a finite group. A fusion category ${\mathcal C}$ is graded by $G$ if ${\mathcal C}$ has a direct sum of full abelian subcategories ${\mathcal C}=\oplus_{g\in G}{\mathcal C}_g$ such that $({\mathcal C}_g)^*={\mathcal C}_{g-1}$ and ${\mathcal C}_g\otimes{\mathcal C}_h\subseteq{\mathcal C}_{gh}$ for all $g,h\in G$. If ${\mathcal C}_g\neq 0$ for any $g\in G$ then this grading is called faithful. If this is the case we say that ${\mathcal C}$ is a $G$-extension of the trivial component ${\mathcal C}_e$. If ${\mathcal C}=\oplus_{g\in G}{\mathcal C}_g$ is faithful then \cite[Proposition 8.20]{etingof2005fusion} shows that \begin{equation}\label{FPdimgrading} \begin{split} {\mathcal F}Pdim({\mathcal C}_g)={\mathcal F}Pdim({\mathcal C}_h),\,\, {\mathcal F}Pdim({\mathcal C})=|G| {\mathcal F}Pdim({\mathcal C}_e), \forall g,h\in G. \end{split} \end{equation} It follows from \cite{gelaki2008nilpotent} that every fusion category ${\mathcal C}$ has a canonical faithful grading ${\mathcal C}=\oplus_{g\in \mathcal{U}({\mathcal C})}{\mathcal C}_g$ with trivial component ${\mathcal C}_e={\mathcal C}_{ad}$, where ${\mathcal C}_{ad}$ is the adjoint subcategory of ${\mathcal C}$ generated by simple objects in $X\otimes X^*$ for all $X\in \mathcal{I}rr({\mathcal C})$. This grading is called the universal grading of ${\mathcal C}$, and $\mathcal{U}({\mathcal C})$ is called the universal grading group of ${\mathcal C}$. \subsection{M\"{u}ger centralizer}\label{sec2.3} A braided fusion category ${\mathcal C}$ is a fusion category admitting a braiding $c$, where the braiding is a family of natural isomorphisms: $c_{X,Y}$:$X\otimes Y\rightarrow Y\otimes X$ satisfying the hexagon axioms for all $X,Y\in{\mathcal C}$. Let ${\mathcal D}$ be a fusion subcategory of a braided fusion category ${\mathcal C}$. Then the M\"{u}ger centralizer ${\mathcal D}'$ of ${\mathcal D}$ in ${\mathcal C}$ is the fusion subcategory generated by $${\mathcal D}'=\{Y\in{\mathcal C}|c_{Y,X}c_{X,Y}=\id_{X\otimes Y}\, \mbox{for all}\, X\in{\mathcal D}\}.$$ The M\"{u}ger center $\mathcal{Z}_2({\mathcal C})$ of ${\mathcal C}$ is the M\"{u}ger centralizer ${\mathcal C}'$ of ${\mathcal C}$. \begin{definition} A braided fusion category ${\mathcal C}$ is called non-degenerate if its M\"{u}ger center $\mathcal{Z}_2({\mathcal C})=\vect$ is trivial. \end{definition} The following theorem implies that a braided fusion category containing a non-degenerate subcategory admits a decomposition in terms of Deligne tensor product. In the case when ${\mathcal C}$ is modular, it is due to M\"{u}ger \cite[Theorem 4.2]{muger2003structure} \begin{theorem}{\cite[Theorem 3.13]{drinfeld2010braided}}\label{MugerThm} Let ${\mathcal C}$ be a braided fusion category and ${\mathcal D}$ be a non-degenerate subcategory of ${\mathcal C}$. Then ${\mathcal C}$ is braided equivalent to ${\mathcal D}\boxtimes {\mathcal D}'$, where $\boxtimes$ stands for the Deligne tensor product. \end{theorem} A braided fusion category ${\mathcal C}$ is called symmetric if $\mathcal{Z}_2({\mathcal C})={\mathcal C}$. A symmetric fusion category ${\mathcal C}$ is called Tannakian if there exists a finite group $G$ such that ${\mathcal C}$ is equivalent to $\operatorname{Rep}(G)$ as braided fusion categories. By \cite[Corollary 2.50]{drinfeld2010braided}, a symmetric fusion category ${\mathcal C}$ is a $\mathbb{Z}_2$-extension of its maximal Tannakian subcategory. In particular, if ${\mathcal F}Pdim({\mathcal C})$ is odd then ${\mathcal C}$ is automatically Tannakian. Symmetric categories are completely degenerate categories, while non-degenerate fusion categories are completely non-degenerate. Between these two extremes, we also consider the following case. \begin{definition} A braided fusion category ${\mathcal C}$ is called slightly degenerate if its M\"{u}ger center $\mathcal{Z}_2({\mathcal C})$ is equivalent, as a symmetric category, to the category $\svect$ of super vector spaces. \end{definition} \begin{lemma}{\cite[Proposition 2.5]{Dong2018extensions}}\label{Cpt_of_sligdegen} Let ${\mathcal C}$ be a slightly degenerate braided fusion category. Then one of the following holds true. (1)\, ${\mathcal F}Pdim({\mathcal C}_{pt})=|\mathcal{U}({\mathcal C})|$ and $\mathcal{Z}_2({\mathcal C}) \nsubseteq {\mathcal C}_{ad}$. (2)\, ${\mathcal F}Pdim({\mathcal C}_{pt})=2|\mathcal{U}({\mathcal C})|$ and $\mathcal{Z}_2({\mathcal C})\subseteq\mathcal{Z}_2({\mathcal C}_{ad})=\mathcal{Z}_2({\mathcal C}_{ad}^{'})$. \end{lemma} Let $\mathcal{I}rr_{\alpha}({\mathcal C})$ be the set of non-isomorphic simple objects of Frobenius-Perron dimension $\alpha$. \begin{lemma}\label{slight_degenerate} Let ${\mathcal C}$ be a braided fusion category. Suppose that the M\"{u}ger center $\mathcal{Z}_2({\mathcal C})$ contains the category $\svect$ of super vector spaces. Then the cardinal number of $\mathcal{I}rr_{\alpha}({\mathcal C})$ is even for every $\alpha$. \end{lemma} \begin{proof} Let $\delta$ be the invertible object generating $\svect$, and let $X$ be an element in $\mathcal{I}rr_{\alpha}({\mathcal C})$. Then $\delta\otimes X$ is also an element in $\mathcal{I}rr_{\alpha}({\mathcal C})$. By \cite[Lemma 5.4]{muger2000galois}, $\delta\otimes X$ is not isomorphic to $X$. This implies that $\mathcal{I}rr_{\alpha}({\mathcal C})$ admits a partition $\{X_1,\mathrm{cd}ots,X_n\}\cup \{\delta\otimes X_1,\mathrm{cd}ots,\delta\otimes X_n\}$. Hence the cardinal number of $\mathcal{I}rr_{\alpha}({\mathcal C})$ is even. \end{proof} \subsection{Exact factorizations of fusion categories} Let ${\mathcal C}$ be a fusion category, and let ${\mathcal A}, {\mathcal B}$ be fusion subcategories of ${\mathcal C}$. Let ${\mathcal A}{\mathcal B}$ be the full abelian (not necessarily tensor) subcategory of ${\mathcal C}$ spanned by direct summands in $X\otimes Y$, where $X\in {\mathcal A}$ and $Y\in {\mathcal B}$. We say that ${\mathcal C}$ factorizes into a product of ${\mathcal A}$ and ${\mathcal B}$ if ${\mathcal C}={\mathcal A}{\mathcal B}$. A factorization ${\mathcal C}={\mathcal A}{\mathcal B}$ of ${\mathcal C}$ is called exact if $A\cap {\mathcal B}=\vect$, and is denoted by ${\mathcal C}={\mathcal A}\bullet{\mathcal B}$, see \cite{gelaki2017exact}. By \cite[Theorem 3.8]{gelaki2017exact}, ${\mathcal C}={\mathcal A}\bullet{\mathcal B}$ is an exact factorization if and only every simple object of ${\mathcal C}$ can be uniquely expressed in the form $X\otimes Y$, where $X\in \mathcal{I}rr({\mathcal A})$ and $\mathcal{I}rr({\mathcal B})$. \section{Structure of a generalized near-group fusion category}\label{sec3} In the rest of this paper, we assume that the fusion categories involved is not pointed, since pointed fusion categories have been classified, see e. g. \cite{Ostrik2003}. \medbreak Let ${\mathcal C}$ be a fusion category. Recall from Section \ref{sec2.1} that $G:=G({\mathcal C})$ acts on $\mathcal{I}rr({\mathcal C})$ by left tensor product. \begin{definition} A generalized near-group fusion category is a fusion category ${\mathcal C}$ such that $G$ transitively acts on the set $\mathcal{I}rr({\mathcal C})/G$. \end{definition} Let ${\mathcal C}$ be a generalized near-group fusion category and let $\mathcal{I}rr({\mathcal C})/G=\{X_1,\mathrm{cd}ots,X_n\}$ be a full list of non-isomorphic non-invertible simple objects of ${\mathcal C}$. By equation \ref{decom1}, we may assume \begin{equation}\label{decom2} \begin{split} X_1\otimes X_1^*=\bigoplus_{h\in {\mathcal G}amma}h\oplus k_1X_{1}\oplus \mathrm{cd}ots\oplus k_nX_{n}, \end{split} \end{equation} where ${\mathcal G}amma=G[X_1]$ is the stabilizer of $X_1$ under the action of $G$, $k_1,\mathrm{cd}ots, k_n$ are non-negative integers. \begin{lemma}\label{fusionrules} Let ${\mathcal C}$ be a generalized near-group fusion category. Then the fusion rules of ${\mathcal C}$ are determined by: (1)\, For any $1\leq i\leq n$, we have \begin{equation} \begin{split} X_i\otimes X_i^*=X_1\otimes X_1^*. \end{split}\nonumber \end{equation} (2)\, For any $1\leq i,j\leq n$, there exists $g\in G$ such that \begin{equation} \begin{split} X_i\otimes X_j=\bigoplus_{h\in {\mathcal G}amma}gh\oplus k_1g\otimes X_{1}\oplus \mathrm{cd}ots\oplus k_n g\otimes X_{n}. \end{split}\nonumber \end{equation} \end{lemma} \begin{proof} (1)\, Since $G$ transitively acts on $\mathcal{I}rr({\mathcal C})/G({\mathcal C})$, there exists $g_i\in G$ such that $X_i^*=g_i\otimes X_1^*$ for any $i$. Then \begin{equation} \begin{split} X_i\otimes X_i^*&\cong X_i^{**}\otimes X_i^*\cong(g_i\otimes X_1^*)^*\otimes (g_i\otimes X_1^*)\\ &\cong X_1\otimes g_i^*\otimes g_i\otimes X_1^*\cong X_1\otimes X_1^*. \end{split}\nonumber \end{equation} (2)\, For any $i,j$, there exists $g\in G$ such that $X_i\cong g\otimes X_j^*$. Then \begin{equation} \begin{split} X_i\otimes X_j&\cong g\otimes X_j^*\otimes X_j\cong g\otimes(\bigoplus_{h\in {\mathcal G}amma}h\oplus k_1X_{1}\oplus \mathrm{cd}ots\oplus k_nX_{n})\\ &\cong\bigoplus_{h\in {\mathcal G}amma}gh\oplus k_1g\otimes X_{1}\oplus \mathrm{cd}ots\oplus k_s g\otimes X_{n}. \end{split}\nonumber \end{equation} \end{proof} Let $G,{\mathcal G}amma$ and $k_1,\mathrm{cd}ots,k_n$ be the data associated to ${\mathcal C}$ as in Lemma \ref{fusionrules}. We shall say ${\mathcal C}$ is a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. \begin{proposition}\label{normalsubgroup} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Then (1)\, ${\mathcal G}amma$ is a normal subgroup of $G$. (2)\, $\mathcal{I}rr({\mathcal C})=G\cup \{X_s|s\in G/{\mathcal G}amma\}$, where $X_{\overline{g}}=g\otimes X_1$, $g\in G$. (3)\, The rank of ${\mathcal C}$ is $[G:{\mathcal G}amma](1+|{\mathcal G}amma|)$ and ${\mathcal F}Pdim({\mathcal C})=[G:{\mathcal G}amma]({\mathcal F}Pdim(X)^2+|{\mathcal G}amma|)$. \end{proposition} \begin{proof} (1)\, By Lemma \ref{fusionrules}, $G[g\otimes X_1]=G[X_1]={\mathcal G}amma$ for any $g\in G$. On the other hand, $G[g\otimes X_1]=gG[X_1]g^{-1}=g{\mathcal G}amma g^{-1}$. Hence ${\mathcal G}amma$ is normal in $G$. (2)\, Let $X_{\overline{g}}=g\otimes X_1$ for every $\overline{g}\in G/{\mathcal G}amma$. Since ${\mathcal G}amma=G[X_1]$, we have $g\otimes X_1\cong h\otimes X_1$ if and only if $h^{-1}g\otimes X_1\cong X_1$ if and only if $h^{-1}g\in {\mathcal G}amma$ if and only if $\overline{g}=\overline{h}$ in $G/{\mathcal G}amma$. Hence the isomorphic class of $X_{\overline{g}}$ is well defined. (3)\, Part (3) follows from Part (2). \end{proof} \begin{remark}\label{T_Y} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. (1)\, If $(k_1,\mathrm{cd}ots,k_n)=(0,\mathrm{cd}ots,0)$ then $X_i\otimes X_j$ is a direct sum of invertible simple objects by Lemma \ref{fusionrules}. Then ${\mathcal C}$ is a generalized Tambara-Yamagami fusion category introduced in \cite{liptrap2010generalized}. In fact, it is easily observed that ${\mathcal C}$ is a generalized Tambara-Yamagami fusion category if and only if $(k_1,\mathrm{cd}ots,k_n)=(0,\mathrm{cd}ots,0)$. (2)\, If ${\mathcal C}$ exactly has one non-invertible simple object, then $G={\mathcal G}amma$ and ${\mathcal C}$ is a near-group fusion category introduced in \cite{siehler2003near}. \end{remark} \begin{proposition}\label{subcategory} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that ${\mathcal D}$ is a non-pointed fusion subcategory of ${\mathcal C}$. Then ${\mathcal D}$ is also a generalized near-group fusion category. \end{proposition} \begin{proof} We shall prove that $G({\mathcal D})$ transitively acts on $\mathcal{I}rr({\mathcal D})/G({\mathcal D})$. Let $X_i$ and $X_j$ be non-invertible simple objects in ${\mathcal D}$. Then there exists $g\in G$ such that $X_j=g\otimes X_i$. From $\dim\Hom(X_j,g\otimes X_i)=\dim\Hom(g,X_j\otimes X_i^*)=1$, we know that $g$ is a summand of $X_j\otimes X_i^*$. On the other hand, $X_j\otimes X_i^*$ lies in ${\mathcal D}$ since ${\mathcal D}$ is a fusion subcategory of ${\mathcal C}$. Hence $g$ is an element of $G({\mathcal D})$. This proves that $G({\mathcal D})$ transitively acts on $\mathcal{I}rr({\mathcal D})/G({\mathcal D})$ \end{proof} \begin{theorem}\label{categorytype} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that $(k_1,\mathrm{cd}ots,k_n)\neq (0,\mathrm{cd}ots,0)$. Then (1)\, The adjoint subcategory ${\mathcal C}_{ad}$ is non-pointed. There is a 1-1 correspondence between the non-pointed fusion subcategories of ${\mathcal C}$ and the subgroups of the universal grading group $\mathcal{U}({\mathcal C})$. (2)\, For any $g\in \mathcal{U}({\mathcal C})$, the component ${\mathcal C}_g$ contains at least one invertible simple object. In particular, $\mathcal{I}rr({\mathcal C}_g)=\{\alpha_g, \alpha_g\otimes Y_1, \mathrm{cd}ots, \alpha_g\otimes Y_s \}$, where $\alpha_g$ is an invertible simple object in ${\mathcal C}_g$ and $\mathcal{I}rr({\mathcal C}_{ad})=\{\textbf{1},Y_{1},\mathrm{cd}ots,Y_{s}\}$. \end{theorem} \begin{proof} (1)\, Let ${\mathcal D}$ be a non-pointed fusion subcategory of ${\mathcal C}$. For every non-invertible simple object $X\in {\mathcal D}$, Lemma \ref{fusionrules} shows that \begin{equation} \begin{split} X\otimes X^*=\bigoplus_{h\in {\mathcal G}amma}h\oplus k_1X_{1}\oplus \mathrm{cd}ots\oplus k_nX_{n}. \end{split}\nonumber \end{equation} Hence the adjoint subcategory ${\mathcal C}_{ad}$ is generated by ${\mathcal G}amma$ and $X_i$'s with $k_i\neq 0$. Since $(k_1,\mathrm{cd}ots,k_n)\neq (0,\mathrm{cd}ots,0)$, ${\mathcal C}_{ad}$ is not pointed. In particular, ${\mathcal C}_{ad}$ is a fusion subcategory of ${\mathcal D}$. This shows that every non-pointed fusion subcategory of ${\mathcal C}$ contains ${\mathcal C}_{ad}$. Therefore, part (1) follows from \cite[Corollary 2.5]{drinfeld2010braided}. (2)\, We shall first show that every component ${\mathcal C}_g$ of the universal grading at least contains an invertible simple object. By part (1), ${\mathcal C}_{ad}$ contains a non-invertible simple object $Y$. Let $X$ be a simple object in ${\mathcal C}_g$. We may assume that $X$ is not invertible. Then $X\otimes Y\in {\mathcal C}_g\otimes {\mathcal C}_{ad}\subseteq {\mathcal C}_g$. By Lemma \ref{fusionrules}(2), $X\otimes Y$ contains $|{\mathcal G}amma|$ invertible simple objects. Hence ${\mathcal C}_g$ contains at least one invertible simple object. Let $\alpha_g\in{\mathcal C}_g$ be an invertible simple object, and $\textbf{1},Y_{1},\mathrm{cd}ots,Y_{s}$ be all non-isomorphic simple objects in ${\mathcal C}_{ad}$. Then $\alpha_g,\alpha_g\otimes Y_{1},\mathrm{cd}ots,\alpha_g\otimes Y_{s}$ are non-isomorphic simple objects in ${\mathcal C}_g$. Since $${\mathcal F}Pdim(\alpha_g\otimes Y_{i})={\mathcal F}Pdim(Y_{i})\quad \mbox{and}\quad {\mathcal F}Pdim({\mathcal C}_g)={\mathcal F}Pdim({\mathcal C}_{ad}),$$ we obtain that $\alpha_g, \alpha_g\otimes Y_1, \mathrm{cd}ots, \alpha_g\otimes Y_s$ are all non-isomorphic simple objects in ${\mathcal C}_g$. This completes the proof. \end{proof} \begin{remark}\label{remark1} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Then Proposition \ref{categorytype} implies the following two facts: (1)\, If $(k_1,\mathrm{cd}ots,k_n)\neq (0,\mathrm{cd}ots,0)$ then the adjoint subcategory ${\mathcal C}_{ad}$ is the smallest non-pointed fusion subcategory of ${\mathcal C}$. This is because that ${\mathcal C}_{ad}$ corresponds to the trivial subgroup of $\mathcal{U}({\mathcal C})$. (2)\, Assume that $(k_1,\mathrm{cd}ots,k_n)\neq (0,\mathrm{cd}ots,0)$. Then ${\mathcal C}_{ad}$ is not pointed by Proposition \ref{categorytype}. Let $X\in {\mathcal C}_{ad}$ be a non-invertible simple object. Then Lemma \ref{fusionrules} shows the decomposition of $X\otimes X^*$ contains non-invertible simple objects. Hence $({\mathcal C}_{ad})_{ad}$ is not pointed. But part (1) shows that ${\mathcal C}_{ad}$ is the smallest non-pointed fusion subcategory of ${\mathcal C}$. Hence ${\mathcal C}_{ad}=({\mathcal C}_{ad})_{ad}$, and hence the universal grading group $\mathcal{U}({\mathcal C}_{ad})$ of ${\mathcal C}_{ad}$ is trivial. \end{remark} \begin{corollary}\label{main01} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that $(k_1,\mathrm{cd}ots,k_n)\neq (0,\mathrm{cd}ots,0)$ and the group $G({\mathcal C}_{ad})$ is trivial. Then ${\mathcal C}={\mathcal C}_{pt}\bullet{\mathcal C}_{ad}$ admits an exact factorization of ${\mathcal C}_{pt}$ and ${\mathcal C}_{ad}$. \end{corollary} \begin{proof} Since $G({\mathcal C}_{ad})$ is trivial, Theorem \ref{categorytype}(2) shows that every component ${\mathcal C}_g$ exactly contains only one invertible simple object. Let $\delta_g$ be the invertible simple object in ${\mathcal C}_g$. Then $\{\delta_g|g\in U({\mathcal C})\}=G({\mathcal C})$, and hence every simple object of ${\mathcal C}$ can be expressed in the form $X\otimes Y$, where $X\in {\mathcal C}_{pt}$ and $Y\in {\mathcal C}_{ad}$ are simple objects, also by Theorem \ref{categorytype}(2). The result then follows from \cite[Theorem 3.8]{gelaki2017exact}. \end{proof} \section{Slightly degenerate generalized near-group fusion categories}\label{sec4} Recall from \cite{ostrik2003fusion} that a Yang-Lee category is a rank $2$ modular category which admits the Yang-Lee fusion rules. \begin{lemma}\label{dimuc} Let ${\mathcal C}$ be a generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that ${\mathcal F}Pdim({\mathcal C}_{pt})=|\mathcal{U}({\mathcal C})|$ and $(k_1,\mathrm{cd}ots,k_n)\neq(0,\mathrm{cd}ots,0)$. Then ${\mathcal C}_{ad}$ is a Yang-Lee category. \end{lemma} \begin{proof} By Proposition \ref{categorytype}, every component ${\mathcal C}_g$ of the universal grading of ${\mathcal C}$ at least has one invertible simple object. Hence, our assumption implies that every component ${\mathcal C}_g$ exactly contains one invertible simple object. By Proposition \ref{normalsubgroup}, the number of non-isomorphic non-invertible simple objects is not more than the order of $G$. In addition, Theorem \ref{categorytype} shows that every component ${\mathcal C}_g$ admits the same type. Hence every component ${\mathcal C}_g$ only contains two simple objects: one is invertible and the other is not. In particular, ${\mathcal C}_{ad}$ is a Yang-Lee category by the classification of rank $2$ fusion categories \cite{ostrik2003fusion}. \end{proof} An Ising category $\mathcal{I}$ is a fusion category which is not pointed and has Frobenius-Perron dimension $4$. Recall from \cite{drinfeld2010braided} that any Ising category $\mathcal{I}$ is a non-degenerate braided fusion category and the adjoint subcategory $\mathcal{I}_{ad}=\mathcal{I}_{pt}$ is braided equivalent to $\svect$. \begin{lemma}\label{slightly-deg1} Let ${\mathcal C}$ be a braided generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that $(k_1,\mathrm{cd}ots,k_n)=(0,\mathrm{cd}ots,0)$ and ${\mathcal C}$ is slightly degenerate. Then ${\mathcal C}$ is exactly one of the following: (1)\, ${\mathcal C}\cong \mathcal{I}\boxtimes {\mathcal B}$, where $\mathcal{I}$ is an Ising category, ${\mathcal B}$ is a slightly degenerate pointed fusion category. (2)\, ${\mathcal C}$ is generated by a $\sqrt{2}$-dimensional simple object. In this case, ${\mathcal C}$ is prime. \end{lemma} \begin{proof} Since we assume that $(k_1,\mathrm{cd}ots,k_n)=(0,\mathrm{cd}ots,0)$, the adjoint subcategory ${\mathcal C}_{ad}$ is generated by ${\mathcal G}amma$ and ${\mathcal F}Pdim(X)=\sqrt{|{\mathcal G}amma|}$ for all non-invertible simple object $X$ of ${\mathcal C}$. In particular, ${\mathcal C}$ is a generalized Tambara-Yamagami fusion category. By \cite[Proposition 5.2(ii)]{natale2013faithful}, we have \begin{equation}\label{order} \begin{split} |\mathcal{U}({\mathcal C})|=2[G:{\mathcal G}amma]. \end{split} \end{equation} By Proposition \ref{Cpt_of_sligdegen}, $|G|=2|\mathcal{U}({\mathcal C})|$ or $|G|=|\mathcal{U}({\mathcal C})|$. \medbreak Case $|G|=2|\mathcal{U}({\mathcal C})|$. In this case, equality (\ref{order}) implies that $|{\mathcal G}amma|=4$. Proposition \ref{Cpt_of_sligdegen} shows that in our case ${\mathcal C}_{ad}$ contains the M\"{u}ger center $\svect$ of ${\mathcal C}$. Let $\delta$ be the invertible simple object generating $\svect$. Then we may write ${\mathcal G}amma=\{\textbf{1},\delta,g,h\}$. Hence $X\otimes X^*=\textbf{1}\oplus \delta\oplus g\oplus h$ for any non-invertible simple object $X$. In particular, $\dim\Hom(\delta\otimes X,X)=\dim\Hom(\delta,X\otimes X^*)=1$ shows that $\delta\otimes X\cong X$, which contradicts \cite[Proposition 2.6(i)]{etingof2011weakly}. So we can discard this case. \medbreak Case $|G|=|\mathcal{U}({\mathcal C})|$. In this case, equality (\ref{order}) implies that $|{\mathcal G}amma|=2$. Hence ${\mathcal C}$ is an extension of a rank $2$ pointed fusion category. The result then follows from \cite[Theorem 5.11]{Dong2018extensions}. \end{proof} In fact, Remark \ref{T_Y}(1) implies that Lemma \ref{slightly-deg1} classifies slightly degenerate generalized Tambara-Yamagami fusion categories. \begin{lemma}\label{slightly-deg2} Let ${\mathcal C}$ be a braided generalized near-group fusion category of type $(G,{\mathcal G}amma,k_1,\mathrm{cd}ots,k_n)$. Assume that $(k_1,\mathrm{cd}ots,k_n)\neq(0,\mathrm{cd}ots,0)$ and ${\mathcal C}$ is slightly degenerate. Then ${\mathcal C}$ is exactly one of the following. (1)\, ${\mathcal C}\cong {\mathcal C}_{ad}\boxtimes {\mathcal C}_{pt}$, where ${\mathcal C}_{ad}$ is a Yang-Lee category. (2)\, ${\mathcal C}\cong{\mathcal C}_{ad}\boxtimes {\mathcal B}$, where ${\mathcal C}_{ad}$ is a slightly degenerate fusion category of the form ${\mathcal C}(\mathfrak{psl}_2,q^t,8)$ with $q=e^{\frac{\pi i}{8}}$ and $(t,2)=1$, ${\mathcal B}$ is a non-degenerate pointed fusion category. \end{lemma} \begin{proof} By Proposition \ref{Cpt_of_sligdegen}, ${\mathcal F}Pdim({\mathcal C}_{pt})=|\mathcal{U}({\mathcal C})|$ or ${\mathcal F}Pdim({\mathcal C}_{pt})=2|\mathcal{U}({\mathcal C})|$. Case ${\mathcal F}Pdim({\mathcal C}_{pt})=|\mathcal{U}({\mathcal C})|$. In this case, ${\mathcal C}_{ad}$ is a Yang-Lee category by Lemma \ref{dimuc}. Hence ${\mathcal C}\cong {\mathcal C}_{ad}\boxtimes {\mathcal C}_{ad}'$ by Theorem \ref{MugerThm}, where ${\mathcal C}_{ad}'={\mathcal C}_{pt}$ by \cite[Corollary 3.29]{drinfeld2010braided}. Hence ${\mathcal C}\cong {\mathcal C}_{ad}\boxtimes {\mathcal C}_{pt}$. This proves Part (1). \medbreak Case ${\mathcal F}Pdim({\mathcal C}_{pt})=2|\mathcal{U}({\mathcal C})|$. By Theorem \ref{categorytype}, every component ${\mathcal C}_g$ of the universal grading of ${\mathcal C}$ at least has one invertible simple object. Moreover, every component ${\mathcal C}_g$ admits the same type. Hence every component ${\mathcal C}_g$ exactly contains two invertible simple objects. By Proposition \ref{normalsubgroup}, the number of non-isomorphic non-invertible simple objects is not more than the order of $G$. Hence the number of non-isomorphic non-invertible simple objects in ${\mathcal C}_g$ is $1$ or $2$. \medbreak If the first case holds true then ${\mathcal C}_{ad}$ is a fusion category of rank $3$. By Lemma \ref{Cpt_of_sligdegen}, the M\"{u}ger center of ${\mathcal C}_{ad}$ contains the category $\svect$. This contradicts Lemma \ref{slight_degenerate} which says that the rank of ${\mathcal C}_{ad}$ should be even. \medbreak If the second case holds true then ${\mathcal C}_{ad}$ is a rank $4$ fusion category. Let $\delta$ be the non-trivial invertible simple object in ${\mathcal C}_{ad}$, and $Y_1,Y_2$ be the non-invertible simple objects in ${\mathcal C}_{ad}$. Then $\delta$ generates the category $\svect$ by Lemma \ref{Cpt_of_sligdegen}(2). By \cite[Lemma 5.4]{muger2000galois}, $\delta\otimes Y_i$ is not isomorphic to $Y_i$ for $i=1,2$. Hence $G[Y_i]$ is trivial and $\delta\otimes Y_i\cong Y_j$ for $i\neq j$. The fact obtained above implies that if the M\"{u}ger center ${\mathcal Z}_2({\mathcal C}_{ad})$ of ${\mathcal C}_{ad}$ contains $Y_1$ or $Y_2$ then ${\mathcal Z}_2({\mathcal C}_{ad})={\mathcal C}_{ad}$ and hence ${\mathcal C}_{ad}$ is symmetric. Since ${\mathcal C}_{ad}$ contains $\svect$, ${\mathcal C}_{ad}$ is not Tannakian. In addition, ${\mathcal F}Pdim({\mathcal C}_{ad})>2$. Hence ${\mathcal C}_{ad}$ should admit a $\mathbb{Z}_2$-extension of a Tannakian subcategory by \cite[Corollary 2.50]{drinfeld2010braided}. This contradicts Remark \ref{remark1} which says the universal grading group of ${\mathcal C}_{ad}$ is trivial. This proves that ${\mathcal Z}_2({\mathcal C}_{ad})=\svect$ and hence ${\mathcal C}_{ad}$ is slightly degenerate. By \cite[Theorem 3.1]{bruillard2017classification}, ${\mathcal C}_{ad}$ is a fusion category of the form ${\mathcal C}(\mathfrak{psl}_2,q^t,8)$ with $q=e^{\frac{\pi i}{8}}$ and $(t,2)=1$. By Lemma \ref{Cpt_of_sligdegen}(2) and the arguments above, $\mathcal{Z}_2({\mathcal C}_{ad})=\mathcal{Z}_2({\mathcal C}_{ad}^{'})=\svect$. On the other hand, \cite[Proposition 3.29]{drinfeld2010braided} shows that ${\mathcal C}_{ad}^{'}={\mathcal C}_{pt}$. Hence ${\mathcal C}_{pt}$ is slightly degenerate and admits a decomposition ${\mathcal C}_{pt}\cong\svect\boxtimes{\mathcal B}$ by \cite[Proposition 2.6(ii)]{etingof2011weakly}, where ${\mathcal B}$ is a non-degenerate pointed fusion category. So ${\mathcal C}$ admits a decomposition ${\mathcal C}\cong{\mathcal B}\boxtimes{\mathcal B}'$ by Theorem \ref{MugerThm}. Counting rank and Frobenius-Perron dimensions of simple objects on both sides, we obtain that ${\mathcal B}'$ is a rank $4$ non-pointed fusion category. By Remark \ref{remark1}, ${\mathcal C}_{ad}$ is the smallest non-pointed fusion subcategory of ${\mathcal C}$. Hence ${\mathcal C}_{ad}={\mathcal B}'$. This proves Part (2). \end{proof} Combing Lemma \ref{slightly-deg1} and \ref{slightly-deg2}, we obtain the classification of slightly degenerate generalized near-group fusion categories. \begin{theorem} Let ${\mathcal C}$ be a slightly degenerate generalized near-group fusion category. Then ${\mathcal C}$ is exactly one of the following:: (1)\, ${\mathcal C}\cong \mathcal{I}\boxtimes {\mathcal B}$, where $\mathcal{I}$ is an Ising category, ${\mathcal B}$ is a slightly degenerate pointed fusion category. (2)\, ${\mathcal C}\cong {\mathcal C}_{ad}\boxtimes {\mathcal C}_{pt}$, where ${\mathcal C}_{ad}$ is a Yang-Lee category. (3)\, ${\mathcal C}\cong{\mathcal C}_{ad}\boxtimes {\mathcal B}$, where ${\mathcal C}_{ad}$ is a slightly degenerate fusion category of the form ${\mathcal C}(\mathfrak{psl}_2,q^t,8)$ with $q=e^{\frac{\pi i}{8}}$ and $(t,8)=1$, ${\mathcal B}$ is a non-degenerate pointed fusion category. (4)\, ${\mathcal C}$ is generated by a $\sqrt{2}$-dimensional simple object. In this case, ${\mathcal C}$ is prime. \end{theorem} \end{document}
\begin{document} \markboth{F. Aroca, G. Ilardi and L. López de Medrano} {Puiseux power series solutions for systems of equations} \title{PUISEUX POWER SERIES SOLUTIONS FOR SYSTEMS OF EQUATIONS} \author{Fuensanta Aroca, Giovanna Ilardi and Luc\'ia L\'opez de Medrano} \address{Instituto de Matem\'aticas, Unidad Cuernavaca Universidad Nacional Aut\'onoma de M\'exico, A.P. 273-3 Admon. 3, Cuernavaca, Morelos, 62251 M\'exico} \address{Dipartimento Matematica Ed Applicazioni ``R. Caccioppoli'' Universit\`{a} Degli Studi Di Napoli ``Federico II'' Via Cintia - Complesso Universitario Di Monte S. Angelo 80126 - Napoli - Italia} \subjclass[2000]{Primary 14J17, 52B20; Secondary 14B05, 14Q15, 13P99} \keywords{Puiseux series, Newton polygon, singularity, tropical variety.} \maketitle \begin{abstract} We give an algorithm to compute term by term multivariate Puiseux series expansions of series arising as local parametrizations of zeroes of systems of algebraic equations at singular points. The algorithm is an extension of Newton's method for plane algebraic curves replacing the Newton polygon by the tropical variety of the ideal generated by the system. As a corollary we deduce a property of tropical varieties of quasi-ordinary singularities. \end{abstract} \section*{Introduction} Isaac Newton described an algorithm to compute term by term the series arising as $y$-roots of algebraic equations $f(x,y)=0$ \cite["Methodus fluxionum et serierum infinitorum" ]{Newton:1670}. The main tool used in the algorithm is a geometrical object called the Newton polygon. The roots found belong to a field of power series called Puiseux series \cite{Puiseux:1850}. The extension of Newton-Puiseux's algorithm for equations of the form $f(x_1,\ldots ,x_N,y)=0$ is due to J. McDonald \cite{JMcDonald:1995}. As can be expected, the Newton polygon is extended by the Newton polyhedron. An extension for systems of equations of the form $\{ f_1(x,y_1,\ldots ,y_M)= {\cdots} = {f_r(x,y_1,\ldots ,y_M)=0} \}$ is described in \cite{Maurer:1980} using tropism and in \cite{JensenMarkwig:2008} using tropical geometry. J. McDonald gives an extension to systems of equations $$\{ f_1(x_1,\ldots ,x_N,y_1,\ldots ,y_M)= {\cdots} = {f_r(x_1,\ldots ,x_N,y_1,\ldots ,y_M)=0} \}$$ using the Minkowski sum of the Newton Polyhedra. However, this algorithm works only for ``general" polynomials \cite{JMcDonald:2002}. In this note we extend Newton's method to any dimension and codimension. The Newton polyhedron of a polynomial is replaced by its normal fan. The tropical variety comes in naturally as the intersection of normal fans. We prove that, in an algebraically closed field of characteristic zero, the algorithm given always works. The natural field into which to embed the algebraic closure of polynomials in one variable is the field of Puiseux series. When it comes to several variables there is a family of fields to choose from. Each field is determined by the choice of a vector $\omega\in{\mathbb R}^N$ of rationally independent coordinates. The need to choose $\omega$ had already appeared when working with a hypersurface \cite{JMcDonald:1995,GonzalezPerez:2000}. The introduction of the family of fields is done in \cite{ArocaIlardi:2009}.\\ We start the article recalling the main statements on which the Newton-Puiseux method for algebraic plane curves relies (Section \ref{Newton-Puiseux's Method}) and extending these statements to the general case (Section \ref{The general statement}). In the complex case, a series of positive order, obtained by the Newton-Puiseux method for algebraic plane curves, represents a local parametrization of the curve around the origin. In Section \ref{The local parametrizations defined by the series} we explain how an $M$-tuple of series arising as a solution to the general Newton-Puiseux statement also represents a local parametrization recalling results form \cite{FAroca:2004}. In Section \ref{The fields} we recall the definition given in \cite{ArocaIlardi:2009} of the family of fields of $\omega$-positive Puiseux series and their natural valuation. Then, in Section \ref{The extended ideal} we reformulate the question using the fields introduced and show how it becomes a lot simpler. Then we work in the ring of polynomials with coefficients $\omega$-positive Puiseux series (Sections \ref{Weighted orders and initial parts}, \ref{Initial Ideals} and \ref{A special case of Kapranow's Theorem}). The Newton-Puiseux algorithm is based on the fact that the first term of a $y$-root is the $y$-root of the equation restricted to an edge of the Newton polygon. The analogous of this fact is expressed in terms of initial ideals. In Sections \ref{Weighted orders and initial parts} and \ref{Initial Ideals}, weighted orders and initial ideals are defined. In Section \ref{A special case of Kapranow's Theorem} we prove that initial parts of zeroes are zeroes of weighted initial ideals. Then we consider ideals in the ring of polynomials $${\mathbb K} [x^*,y]:= {\mathbb K} [x_1,{x_1}^{-1},\ldots,x_N,{x_N}^{-1},y_1,\ldots y_M]$$ (Section \ref{Polynomial initial ideals}) and characterize initial ideals with zeroes in a given torus. This is done in terms of the tropical variety of the ideal (Section \ref{Tropicalization}). Sections \ref{omega-data} to \ref{The solutions} are devoted to explaining the algorithm. In the last section we show the theoretical implications of the extension of Newton-Puiseux algorithm by giving a property of the tropical variety associated to a quasi-ordinary singularity.\\ \section{Newton-Puiseux's Method.}\label{Newton-Puiseux's Method} Given an algebraic plane curve ${\mathcal C}:=\{f(x,y)=0\}$, the Newton-Puiseux method constructs all the fractional power series $y(x)$ such that $f(x,y(x))=0$. These series turn out to be Puiseux series. Newton-Puiseux's method is based on two points: Given a polynomial $f(x,y)\in{\mathbb K} [x,y]$ \begin{enumerate} \item\label{dos} $cx^\mu$ is the first term of a Puiseux series $y(x)=cx^\mu+...$ with the property $f(x,y(x))=0$ if and only if \begin{itemize} \item $\frac{-1}{\mu}$ is the slope of some edge $L$ of the Newton polygon of $f$. \item $cx^\mu$ is a solution of the characteristic equation associated to $L$. \end{itemize} \item\label{tres} If we iterate the method: Take $c_i x^{\mu_i}$ to be a solution of the characteristic equation associated to the edge of slope $\frac{-1}{\mu_i}$ of $f_i := f_{i-1} (x, y+ c_{i-1} x^{\mu_{i-1}})$ with $\mu_i>\mu_{i-1}$. We do get a Puiseux series $\sum_{i=0}^{\infty} c_i x^{\mu_i}$ with the property $f(x,y(x))=0$. \end{enumerate} In this paper we prove the extension of these points: Point \ref{dos} is extended in Section \ref{Tropicalization} Theorem \ref{Extension del punto uno} and, then, Point \ref{tres} in Section \ref{The solutions} Theorem \ref{ultimo teorema}. Point \ref{dos} is necessary to assure that the sequences in Point \ref{tres} always exist. But Point \ref{dos} does not imply that any sequence constructed in such a way leads to a solution. Both results have led to a deep understanding of algebraic plane curves. \section{The general statement.}\label{The general statement} Take an $N$-dimensional algebraic variety $V\subset {\mathbb K}^{N+M}$. There is no hope to find $k\in{\mathbb N}$ and an $M$-tuple of series $y_1,\ldots ,y_M$ in ${\mathbb K} [[x_1^k,\ldots ,x_M^k]]$ such that the substitution $x_j\mapsto y_j(x_1,\ldots ,x_N)$ makes $f$ identically zero for all $f$ vanishing on $V$. (Parametrizations covering a whole neighborhood of a singularity do not exist in general.) McDonald's great idea was to look for series with exponents in cones. Introducing rings of series with exponents in cones served to prove Newton-Puiseux's statement for the hypersurface case \cite{JMcDonald:1995} and has been the inspiration of lots of other results (both in algebraic geometry \cite{SotoVicente:2006,GonzalezPerez:2000} and differential equations \cite{TAranda:2002,FArocaJCano:2001}). In order to give a general statement for all dimension and codimension, we need to recall some definitions of convex geometry: A {\bf convex rational polyhedral cone} is a subset of $\mathbb{R}^N$ of the form \begin{displaymath} \sigma =\{ \lambda_1v_1+\cdots+\lambda_r v_r \mid \lambda_i\in \mathbb{R}, \lambda_i\geq 0\}, \end{displaymath} where $v_1,\dots,v_r\in \mathbb{Q}^N$ are vectors. A cone is said to be {\bf strongly convex} if it contains no nontrivial linear subspaces. A {\bf fractional power series} $\varphi$ in $N$ variables is expressed as \[ \varphi=\sum_{\alpha \in {{\mathbb Q}}^N} c_\alpha x^\alpha, \qquad c_\alpha \in {{\mathbb K}}, \quad x^{\alpha}:=x_1^{\alpha_1} \dots x_N^{\alpha_N}. \] The {\bf set of exponents} of $\varphi$ is the set \[ {\mathcal E}(\varphi ):=\{\alpha\in {{\mathbb Q} }^N\mid c_\alpha\neq 0\}. \] A fractional power series $\varphi$ is a {\bf Puiseux series} when its set of exponents is contained in a lattice. That is, there exists $K\in{\mathbb N}$ such that ${\mathcal E}(\varphi )\subset {\frac{1}{K}{\mathbb Z}}^N$. Let $\sigma\subset{\mathbb R}^N$ be a strongly convex cone. We say that a Puiseux series $\varphi$ has {\bf exponents in a translate of $\sigma$} when there exists $\gamma\in{\mathbb Q}^N$ such that ${\mathcal E} (x^\gamma\varphi )\subset \sigma$. It is easy to see that the set of Puiseux series with exponents in translates of a strongly convex cone $\sigma$ is a ring. (But, when $N>1$, it is not a field). Given a non-zero vector $\omega\in {\mathbb R}^N$, we say that a cone $\sigma$ is {\bf $\omega$-positive} when for all $v\in\sigma$ we have $v\cdot\omega\geq 0$. If $\omega$ has rationally independent coordinates, an $\omega$-positive rational cone is always strongly convex. Denote by ${\large \textsf{V}} ({\mathfrak I}P)$ the set of common zeroes of the ideal ${\mathfrak I}P$. Extending Newton-Puiseux's statement for an algebraic variety of any dimension and codimension is equivalent to answering the following question: \begin{problem}\label{problema}Given an ideal ${{\mathfrak I}P}\subset {\mathbb K} [x_1,\ldots ,x_{N+M}]$ such that the projection \begin{equation}\label{la proyeccion} \begin{array}{cccc} \oplusi: & {\large \textsf{V}}({{\mathfrak I}P }) &\longrightarrow & {\mathbb K}^N\\ & (x_1,\ldots ,x_{N+M}) & \mapsto & (x_1,\ldots ,x_N) \end{array} \end{equation} is dominant and of generic finite fiber. Given $\omega\in{\mathbb R}^N$ of rationally independent coordinates. Can one always find an $\omega$-positive rational cone $\sigma$ and an $M$-tuple $\oplushi_1,\ldots, \oplushi_M$ of Puiseux series with exponents in some translate of $\sigma$ such that \[ f(x_1,\ldots ,x_N,\oplushi_1(x_1,\ldots ,x_N),\ldots, \oplushi_M(x_1,\ldots ,x_N))=0. \] for any $f\in{\mathfrak I}P$? \end{problem} If the projection is not {dominant} the problem has no solution. If the generic fiber is not {finite} an output will not be a parametrization. To emphasize the roll of the projection, the indeterminates will be denoted by $x_1,\ldots ,x_N,y_1,\ldots y_M$. We will work with an ideal ${\mathfrak I}P\subset {\mathbb K} [x,y]:= {\mathbb K} [x_1,\ldots ,x_N,y_1,\ldots ,y_M]$. With this notation, the set of common zeroes of ${\mathfrak I}P$ is given by \[ {\large \textsf{V}}({\mathfrak I}P )= \{ (x,y)\in {\mathbb K}^{N+M}\mid f(x,y)=0,\forall f\in{\mathfrak I}P\}. \] \begin{defin} We will say that an ideal ${\mathfrak I}P\subset{\mathbb K} [x,y]$ is {\bf N-admisible} when the Projection (\ref{la proyeccion}) is dominant and of finite generic fiber. We will say that an algebraic variety $V\subset {\mathbb K}^{N+M}$ is N-admissible when its defining ideal is N-admissible. Given an N-admissible ideal ${\mathfrak I}P\subset{\mathbb K} [x,y]$, and a vector $\omega\in{\mathbb R}^N$ of rationally independent coordinates; an $M$-tuple $\oplushi_1,\ldots ,\oplushi_M$ solving Question \ref{problema} will be called an {\bf $\omega$-solution for ${\mathfrak I}P$}. \end{defin} \section{The local parametrizations defined by the series.}\label{The local parametrizations defined by the series} Let $({\mathcal C},(0,0))$ be a complex plane algebraic curve singularity \[ (0,0)\in{\mathcal C}:=\{(x,y)\in {\mathbb C}^2\mid f(x,y)=0\} \] where $f$ is a polynomial with complex coefficients. Each output of the Newton-Puiseux method $y(x)=c_0x^{\mu_0}+..$ with $\mu_0>0$ is a convergent series in a neighborhood of $0$. This series corresponds to a multi-valued mapping defined in a neighborhood of the origin $0\in U\subset{\mathbb C}$ \[ \begin{array}{cccc} \varphi: & U & \longrightarrow & {\mathcal C}\\ & x &\mapsto & (x,y(x)) \end{array} \] that is compatible with the projection \[ \begin{array}{cccc} \oplusi: &{\mathcal C} &\longrightarrow &{\mathbb C}\\ &(x,y) &\mapsto &x, \end{array} \] that is, $\oplusi\circ\varphi$ is the identity on $U$. When ${\mathcal C}$ is analytically irreducible at $(0,0)$, the image $\varphi (U)$ is a neighborhood of the curve at $(0,0)$. The series $\varphi$ contains all the topological and analytical information of $({\mathcal C}, (0,0))$ and there are different ways to recover it (see for example \cite{Walker:1978,BrieskornKnorrer:1986}). If $\omega\in{{\mathbb R}_{>0}}^N$ has rationally independent positive coordinates, then the first orthant is $\omega$-positive and we may suppose that the series of an output of the extended Newton-Puiseux method has exponents in a cone $\sigma$ that contains the first orthant. Let $\sigma$ be a strongly convex cone that contains the first orthant. In \cite{FAroca:2004} it is shown that (when it is not empty) the domain of convergence of a series with exponents in a strongly convex cone $\sigma$ contains an open set $W$ that has the origin as accumulation point. Moreover, by the results of \cite[Prop 3.4]{FAroca:2004}, the intersection of a finite number of such domains is non-empty. Let $V$ be an N-admissible complex algebraic variety embedded in ${\mathbb C}^{N+M}$ and let $\omega\in{{\mathbb R}_{>0}}^N$ be of rationally independent coordinates. Each $M$-tuple of series $(y_1(\underline{x}),\ldots ,y_M(\underline{x}))$ found solving Question \ref{problema} corresponds to a multi-valued function defined on an open set $W\subset{\mathbb C}^N$ that has the origin as accumulation point \[ \begin{array}{cccc} \varphi: &W &\longrightarrow &V\\ &\underline{x} &\mapsto (\underline{x},y_1(\underline{x}),\ldots ,y_M(\underline{x}). \end{array} \] The image $\varphi (W)$ contains an open set (a wedge) of $V$. When \begin{equation}\label{el valor es positivo} \omega\cdot\alpha > 0\quad\text{for all}\quad \alpha\in\bigcup_{j=1,\ldots ,M}{\EuScript E} (y_j) \end{equation} (when for each $j$, $y_j$ does not have constant term and its set of exponents is contained in an $\omega-$positive cone with apex at the origin) the open set has the origin as accumulation point. Since analytic continuation is unique, when the origin is an analytically irreducible singularity, this parametrization contains all the topological and analytic information of the singularity. \section{The field of $\omega$-positive Puiseux series.}\label{The fields} In all that follows $\omega$ will be a vector in ${\mathbb R}^N$ of rationally independent coordinates. We will work with an algebraically closed field ${\mathbb K}$ of characteristic zero. Given a N-admissible ideal we are looking for solutions in the ring of Puiseux series with exponents in some translate of an $\omega$-positive cone $\sigma$. The cone $\sigma$ may be different for different ideals. It is only natural to work with the infinite union of all these rings. We say that a Puiseux series $\varphi$ is {\bf $\omega$-positive} when there exists $\gamma\in{\mathbb Q}^N$ and an $\omega$-positive cone $\sigma$ such that ${\mathcal E} (x^\gamma\varphi )\subset \sigma$. The set of $\omega$-positive Puiseux series was introduced in \cite{ArocaIlardi:2009} where it was proved that it is an algebraically closed field. This field is called the \textbf{field of $\omega$-positive Puiseux series} and will be denoted by ${\sl S}_\omega$. The vector $\omega$ induces a total order on ${\mathbb Q}^N$ \[ \alpha\leq \alpha '\Longleftrightarrow\omega\cdot\alpha\leq\omega\cdot\alpha '. \] This gives a natural way to choose the first term of a series in ${\sl S}_\omega$. This is the order we will use to compute the $\omega$-solutions ``term by term''. More precisely, the {\bf order} of an element $\oplushi=\sum_{\alpha}c_\alpha x^\alpha$ in ${\sl S}_\omega$ is \[ \ordser{\omega} (\oplushi) :=\min_{\alpha \in {\mathcal E}(f)} \omega \cdot \alpha \] and its {\bf first term} is \[ \inser{\omega} (\oplushi ):= c_\alpha x^\alpha\qquad\text{where}\qquad \omega \cdot \alpha=\ordser{\omega} (\oplushi). \] Set $\ordser{\omega}(0) \colon = \infty$ and $\inser{\omega} (0)=0$. \begin{rem}\label{propiedades de valser e inser} For $\oplushi ,\oplushi'\in {\sl S}_\omega$ \begin{enumerate} \item $\ordser{\omega} (\oplushi+\oplushi')\geq\min \{\ordser{\omega} (\oplushi), \ordser{\omega} (\oplushi')\}$.\label{Propiedad valuacion 1} \item $\ordser{\omega} (\oplushi+\oplushi')\neq\min \{\ordser{\omega} (\oplushi), \ordser{\omega}(\oplushi')\}$ if and only if $\ordser{\omega} (\oplushi)=\ordser{\omega} (\oplushi')$ and $\inser{\omega}(\oplushi) +\inser{\omega}(\oplushi')=0$.\label{segunda propiedad valser} \item $\ordser{\omega} (\oplushi\cdot\oplushi')= \ordser{\omega}(\oplushi) +\ordser{\omega}(\oplushi')$. Moreover $\inser{\omega} (\oplushi\cdot\oplushi')= \inser{\omega}(\oplushi)\cdot\inser{\omega}(\oplushi')$. \label{multiplication} \item $\inser{\omega} (\inser{\omega} (\oplushi))=\inser{\omega} (\oplushi) .$ \item\label{orden y ramificacion} $\ordser{\omega}(\oplushi ({x_1}^r,\ldots ,{x_N}^r))=r\ordser{\omega}(\oplushi (x_1,\ldots ,x_N))$ for any $r\in{\mathbb Q}$. \end{enumerate} \end{rem} A map from a ring into the reals with Properties \ref{Propiedad valuacion 1} and \ref{multiplication} is called a {\bf valuation}. The {\bf first $M$-tuple} of an element $\varphi=(\varphi_1,\ldots ,\varphi_M)\in{\sl S}_\omega^M$ is the $M$-tuple of monomials \[ \inser{\omega}(\varphi)=(\inser{\omega}(\varphi_1),\ldots ,\inser{\omega}(\varphi_M)) \] and the {\bf order} of $\varphi$ is the $M$-tuple of orders \[ \ordser{\omega}(\varphi)=(\ordser{\omega}(\varphi_1),\ldots ,\ordser{\omega}(\varphi_M)). \] \begin{rem} With the language introduced, Equation (\ref{el valor es positivo}) is equivalent to $\ordser{omega} (y)\in {{\mathbb R}_{>0}}^M$. \end{rem} \section{The extended ideal.}\label{The extended ideal} Given an ideal ${\mathfrak I}P\subset {\mathbb K} [x,y]$, let ${\mathfrak I}P^*\subset {\mathbb K} [x^*,y]$ be the extension of ${\mathfrak I}P$ to ${\mathbb K} [x^*,y]$ via the natural inclusion. We have \[ {\bf V} ( {\mathfrak I}P^*\cap {\mathbb K} [x,y])= \overline{ {\bf V}({\mathfrak I}P) \setminus \{ x_1\cdots x_N=0\}}. \] In regard to our question, it is then equivalent to work with ideals in ${\mathbb K} [x,y]$ or in ${\mathbb K}[x^*,y]$. For technical reasons we will start with ideals in ${\mathbb K}[x^*,y]$. \begin{defin} And ideal ${\mathfrak I}P\subset{\mathbb K} [x^*,y]$ is said to be {\bf N-admissible} if the ideal ${\mathfrak I}P\cap{\mathbb K} [x,y]\subset{\mathbb K} [x,y]$ is N-admissible. \end{defin} Given an ideal ${\mathfrak I}P\subset {\mathbb K}[x^*,y]$, let ${{\mathfrak I}P}^{\rm e}\subset{\sl S}_\omega [y]$ be the extension of ${\mathfrak I}P$ via the natural inclusion \[ {\mathbb K} [x^*, y]={\mathbb K}[x^*][y]\hookrightarrow {\sl S}_{\omega} [y]. \] When ${\mathfrak I}P$ is an N-admissible ideal, ${\large \textsc{V}} ({{\mathfrak I}P}^{\rm e})$ is a discrete subset of ${{\sl S}_\omega}^M$. By definition, $\oplushi\in{\large \textsc{V}} ({{\mathfrak I}P}^{\rm e})$, if and only if $\oplushi$ is an $\omega$-solution for ${\mathfrak I}P$. Question \ref{problema} may be reformulated as follows:\\ \begin{problem}{\bf Reformulation of Question \ref{problema}} Given an N-admissible ideal ${\mathfrak I}P\subset{\mathbb K} [x^*,y]$, and a vector $\omega\in{\mathbb R}^N$ of rationally independent coordinates. Find the (discrete) set of zeroes of in ${{\sl S}_\omega}^M$ of the extended ideal ${{\mathfrak I}P}^{\rm e}\subset {\sl S}_\omega [y]$ . \end{problem} A polynomial $f\in{\mathbb K} [x^*,y]$ may be considered a polynomial in $N+M$ variables with coefficients in ${\mathbb K}$, or a polynomial in $M$ variables with coefficients in ${\mathbb K} [x^*]\subset{\sl S}_\omega$. To cope with this fact we will use a slightly different notation: \begin{itemize} \item[*] $\ordser\omega$ and $\inser\omega$ refer to the field ${\sl S}_\omega$. (Section \ref{The fields}.) \item[*] $\ordpol\omega\eta$, $\inpol\omega\eta$ and $\idinpol\omega\eta$ refer to the ring ${\sl S}_\omega [y]$. (Sections \ref{Weighted orders and initial parts} and \ref{Initial Ideals}.) \item[*] $\ordPol\omega\eta$, $\inPol\omega\eta$ and $\idinPol\omega\eta$ refer to the ring ${\mathbb K} [x^*, y]$. (Section \ref{Polynomial initial ideals}.) \end{itemize} Given an ideal ${\mathfrak I}P\subset{\mathbb K} [x,y]$ the notation ${\large \textsf{V}} ({\mathfrak I}P)$ will stand for the set of common zeroes of ${\mathfrak I}P$ in ${\mathbb K}^{N+M}$. Given an ideal ${\mathfrak I}S\subset{\sl S}_\omega [y]$ the set of common zeroes of ${\mathfrak I}S$ in ${{\sl S}_\omega}^{M}$ will be denoted by ${\large \textsc{V}} ({\mathfrak I}S)$. \section{Weighted orders and initial parts in ${\sl S}_\omega [y]$.}\label{Weighted orders and initial parts} The classical definition of weighted order and initial part considers as weights only vectors in ${\mathbb R}^M$. For technical reasons we need to extend the classical definition to weights in $\left({\mathbb R}\cup\{\infty\}\right)^M$. A polynomial in $M$ variables with coefficients in ${\sl S}_\omega$ is written in the form \[ f=\sum_{\beta\in E\subset ({{\mathbb Z}_{\geq 0}})^M}\oplushi_\beta y^\beta,\qquad \oplushi_\beta\in{\sl S}_\omega,\qquad y^\beta := {y_1}^{\beta_1}\cdots {y_M}^{\beta_M} \] where $E$ is a finite set. Set $\infty\cdot a=\infty$ for $a\in{\mathbb R}^*$ and $\infty\cdot 0=0$. A vector $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ induces a (not necessarily total) order on the terms of $f$. The {\bf $\eta$-order of $f$ as an element of ${\sl S}_\omega [y]$} is \[ \ordpol{\omega}{\eta}(f):=\min_{\oplushi_\beta\neq 0}\left(\ordser{\omega}\oplushi_\beta +\eta\cdot\beta\right) \] and, if $\ordpol{\omega}{\eta}f<\infty$, the {\bf $\eta$-initial part of $f$ as an element of ${\sl S}_\omega [y]$} is \[ \inpol{\omega}{\eta}(f):=\sum_{\ordser{\omega}\oplushi_\beta+\eta\cdot\beta=\ordpol{\omega}{\eta}(f)}(\inser{\omega}\oplushi_\beta) y^\beta . \] \begin{examp}\label{inpol de un binomio} Consider a binomial of the form $y^\beta -\oplushi$ we have \[ \ordpol{\omega}{\eta} (y^\beta -\oplushi )= \left\{ \begin{array}{ll} \eta\cdot\beta &\text{if}\quad \eta\cdot\beta\leq\ordser{\omega}(\oplushi)\\ \ordser{\omega}(\oplushi) &\text{if}\quad \ordser{\omega}\oplushi\leq\eta\cdot\beta \end{array} \right. \] and \[ \inpol{\omega}{\eta}(y^\beta -\oplushi )= \left\{ \begin{array}{ll} y^\beta &\text{if}\quad \eta\cdot\beta <\ordser{\omega}(\oplushi)\\ y^\beta -\inser{\omega}(\oplushi) &\text{if}\quad \eta\cdot\beta =\ordser{\omega}(\oplushi)\\ \inser{\omega}(\oplushi) &\text{if}\quad \ordser{\omega}(\oplushi)<\eta\cdot\beta . \end{array} \right. \] \end{examp} \begin{lem} \label{key} If $\varphi\in{\sl S}_\omega^M$ is a zero of $f\in{\sl S}_\omega [y]$, then $\inser{\omega} (\varphi)$ is a zero of $\inpol{\omega}{\ordser{\omega}\varphi}(f)$. \end{lem} \begin{proof} Set $\eta :=\ordser{\omega}(\varphi)$. For $\oplushi\in{\sl S}_\omega$ and $\beta\in {{\mathbb Z}_{\geq 0}}^M$ the following equality holds: \begin{equation}\label{relacion entre ordser y ordpol} \ordser{\omega} \left(\oplushi\varphi^\beta\right) \stackrel{\ref{propiedades de valser e inser},\, \ref{multiplication}}{=} \ordser{\omega}(\oplushi) +\eta\cdot\beta = \ordpol{\omega}{\eta}\oplushi y^\beta. \end{equation} Suppose that $\varphi\in{\sl S}_\omega^M$ is a zero of $f=\sum_{\beta}\oplushi_\beta y^\beta$, we have \[ \begin{array}{lcl} \sum_{\beta}\oplushi_\beta \varphi^\beta=0 & \stackrel{(\ref{relacion entre ordser y ordpol})+ \ref{propiedades de valser e inser},\,\ref{segunda propiedad valser} }{\Longrightarrow} &\sum_{\ordser{\omega}\left(\oplushi_\beta\varphi^\beta\right) =\ordpol{\omega}{\eta} (f)} \inser{\omega}\left(\oplushi_\beta\varphi^\beta\right)=0\\ &\stackrel{\ref{propiedades de valser e inser},\, \ref{multiplication}}{\Longrightarrow} & \sum_{\ordser{\omega}\left(\oplushi_\beta\right)+\eta\cdot\beta =\ordpol{\omega}{\eta} (f)} \inser{\omega}\oplushi_\beta {\left(\inser{\omega}\varphi\right)}^\beta=0\\ &\stackrel{\text{By definition}}{\Longrightarrow} & \inpol{\omega}{\eta}(f)\left(\inser{\omega}\varphi\right)=0. \end{array} \] \end{proof} For any $f\in{\sl S}_\omega [y]$ and $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ we have $\inpol{\omega}{\eta}(f)\in {\mathbb K} ({x^{\frac{1}{K}}})[y]$. An element of the form $cx^\alpha$ with $c\in{\mathbb K}$ will be called a monomial. \begin{lem}\label{Sistema de coeficiente} Given $f\in{\sl S}_\omega [y]$, let ${\mathfrak m}(x)\in {{\mathbb K} ({x^{\frac{1}{K}}})}^M$ be an $M$-tuple of monomials. Set $\eta:= \ordser{\omega} {\mathfrak m}$. We have $\inpol{\omega}{\eta}(f)\in {\mathbb K} ({x^{\frac{1}{K}}})[y]$ and \[ \inpol{\omega}{\eta}(f(x,{\mathfrak m}(x)))=0\Longleftrightarrow \inpol{\omega}{\eta}(f(\underline{1},{\mathfrak m}(\underline{1})))=0. \] An $M$-tuple of monomials ${\mathfrak m}\in {{\mathbb K} ({x^{\frac{1}{K}}})}^M$ with $\ordser{\omega} {\mathfrak m}=\eta$ is a zero of $\inpol{\omega}{\eta}(f)$ as an element of ${\mathbb K} ({x^{\frac{1}{K}}})[y]$ if and only if ${\mathfrak m}(\underline{1})$ is a zero of $\inpol{\omega}{\eta}(f(\underline{1},y))$. \end{lem} \begin{proof} If $\ordser{\omega} ({\mathfrak m})=\eta$ then $\ordser{omega}x^\alpha m^\beta=\omega\cdot\alpha+\eta\cdot\beta$. Since $\omega$ has rationally independent coordinates, $x^\alpha {\mathfrak m}^\beta = a x^\gamma$ where $a={{\mathfrak m}(\underline{1})}^\beta\in{\mathbb K}$ and $\gamma$ is the unique vector in ${\mathbb Q}^N$ such that $w\cdot\gamma=\omega\cdot\alpha+\eta\cdot\beta$. Now write \[ \inpol{\omega}{\eta}(f)=\sum_{\omega\cdot\alpha+\eta\cdot\beta= \ordpol{\omega}{\eta}(f)} a_{\alpha ,\beta} x^\alpha y^\beta \] we have $\sum_{\omega\cdot\alpha+\eta\cdot\beta=\ordpol{\omega}{\eta}(f)} a_{\alpha ,\beta} x^\alpha {\mathfrak m}^\beta=0$ if and only if \[ \sum_{\omega\cdot\alpha+\eta\cdot\beta=\ordpol{\omega}{\eta}(f)} a_{\alpha ,\beta} \frac{x^\alpha {\mathfrak m}^\beta}{x^\gamma}=0\Leftrightarrow \sum_{\omega\cdot\alpha+\eta\cdot\beta=\ordpol{\omega}{\eta}(f)} a_{\alpha ,\beta} {{\mathfrak m}(\underline{1})}^\beta=0. \] \end{proof} \section{Initial Ideals in ${\sl S}_\omega [y]$.}\label{Initial Ideals} For an $M$-tuple $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ we will denote by $\Lambda (\eta )$ the set of subindexes \[ \Lambda (\eta ):=\{ i\in\{1,\ldots ,M\}\mid \eta_i\neq\infty\}. \] \begin{rem} $\ordpol{\omega}{\eta}(f)=\infty$ if and only if $f$ is in the ideal generated by $\{ y_i\mid i\in {\Lambda (\eta )}^{\rm C}\}$. \end{rem} Let ${\mathfrak I}S$ be an ideal of ${\sl S}_\omega [y]$ and $\eta\in {({\mathbb R}\cup\infty )}^M$. The {\bf $\eta$-initial part of ${\mathfrak I}S$} is the ideal of ${\sl S}_\omega [y]$ generated by the $\eta$-initial parts of its elements: \[ \idinpol{\omega}{\eta} {\mathfrak I}S=\left< \{ \inpol{\omega}{\eta}f\mid f\in {\mathfrak I}S\}\cup \{y_i\}_{i\in \Lambda(\eta)^{\rm C}}\right> . \] Let ${\mathcal A}$ and ${\mathcal B}$ be ideals. We have \begin{equation}\label{inicial de la interseccion menor que interseccion de iniciales} \idinpol{\omega}{\eta}\left( {\mathcal A}\cap {\mathcal B}\right)\subset \idinpol{\omega}{\eta} {\mathcal A}\cap \idinpol{\omega}{\eta}{\mathcal B} \end{equation} and \begin{equation}\label{Parte inicial respeta la inclusion} {\mathcal A}\subset {\mathcal B}\Longrightarrow \idinpol{\omega}{\eta} {\mathcal A}\subset\idinpol{\omega}{\eta} {\mathcal B}. \end{equation} Since ${\mathcal A}\cdot{\mathcal B}\subset {\mathcal A}\cap {\mathcal B}$ then \begin{equation}\label{inicial del producto menor que inicial de interseccion} \idinpol{\omega}{\eta}\left( {\mathcal A}\cdot{\mathcal B}\right) \subset \idinpol{\omega}{\eta}\left( {\mathcal A}\cap {\mathcal B}\right) \end{equation} and, since $\inpol{\omega}{\eta} (a\cdot b)=\inpol{\omega}{\eta} a\cdot \inpol{\omega}{\eta} b$ then \begin{equation}\label{producto de iniciales menor que inicial del producto} \idinpol{\omega}{\eta}{\mathcal A}\cdot \idinpol{\omega}{\eta}{\mathcal B}\subset \idinpol{\omega}{\eta}\left( {\mathcal A}\cdot{\mathcal B}\right). \end{equation} Let $A$ be an arbitrary set. For an M-tuple $y\in A^M$ and a subset $\Lambda \subset \{ 1,\ldots ,M\}$ we will use the following notation: \begin{equation}\label{Quedarse solo con unas coordenadas} y_\Lambda := (y_i)_{i\in\Lambda}. \end{equation} Given two subsets $B\subset A$ and $C\subset A$ the set $B^{\Lambda}\times C^{\Lambda^{\rm C}}$ is defined to be: \[ B^{\Lambda}\times C^{\Lambda^{\rm C}}:=\{ y\in A^M\mid y_\Lambda\in B^{\#\Lambda}\,\text{and}\, y_{\Lambda^{\rm C}}\in C^{\#\Lambda^{\rm C}}\}. \] We will use the notation $\toro{\eta}$ for the $\# \Lambda (\eta )$-dimensional torus \[ \toro{\eta} := {\left({\sl S}_\omega^*\right)}^{\Lambda (\eta )}\times {\{ 0\}}^{{\Lambda (\eta )}^{\rm C}}. \] \begin{rem}\label{Contenidos en cierre de toro} ${\large \textsc{V}} \left( \idinpol{\omega}{\eta}{\mathfrak I}S\right)\subset \overline{\toro{\eta}}$. \end{rem} \begin{examp} For a point $\varphi=(\varphi_1,\ldots ,\varphi_M)\in {{\sl S}_\omega}^M$ denote by $\idmax{\varphi}$ be the maximal ideal \[ \idmax{\varphi}=\left< y_1-\varphi_1,\ldots ,y_M-\varphi_M\right>\subset {\sl S}_\omega [y]. \] Given $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ we have \[ \left\{ \begin{array}{ll} \idinpol{\omega}{\eta}\idmax{\varphi}={\sl S}_\omega [y] &\text{if}\quad \ordser{\omega}(\varphi_i)<\eta_i\quad\text{for some}\quad i\in\{1,\ldots ,M\}\\ y_i\in\idinpol{\omega}{\eta}\idmax{\varphi} &\text{if}\quad \ordser{\omega}(\varphi_i)>\eta_i\\ \idinpol{\omega}{\eta}\idmax{\varphi}=\idmax{\inser{\omega}(\varphi)} &\text{if}\quad \ordser{\omega}(\varphi)=\eta . \end{array} \right. \] The first two points and the inclusion $\idinpol{\omega}{\eta}\idmax{\varphi}\supset\idmax{\inser{\omega}(\varphi)}$ in the third are direct consequence of Example \ref{inpol de un binomio}. The inclusion $\idinpol{\omega}{\eta}\idmax{\varphi}\subset\idmax{\inser{\omega}(\varphi)}$ in the third point is equivalent to $\inser{\omega}(\varphi)\in{\large \textsc{V}}\left( \idinpol{\omega}{\eta}\idmax{\varphi}\right)$ which follows from Lemma \ref{key}. And then \begin{equation}\label{ceros de la parte inicial de J} \toro{\eta}\cap {\large \textsc{V}}\left( \idinpol{\omega}{\eta}\idmax{\varphi}\right)=\left\{ \begin{array}{ccc} \emptyset & \text{if} & \ordser{\omega}(\varphi)\neq\eta\\ \inser{\omega}(\varphi) & \text{if} & \ordser{\omega}(\varphi)= \eta.\\ \end{array} \right. \end{equation} \end{examp} \section{Zeroes of the initial ideal in ${\sl S}_\omega [y]$.} \label{A special case of Kapranow's Theorem} Now we are ready to characterize the first terms of the zeroes of the ideal ${\mathfrak I}S\subset {\sl S}_\omega [y]$. The following is the key proposition to extend Point \ref{dos} of Newton-Puiseux's method. \begin{prop}\label{Kapranow finito} Let ${\mathfrak I}S\subset {\sl S}_\omega [y]$ be an ideal with a finite number of zeroes and let $\eta$ be an $M$-tuple in ${({\mathbb R}\cup\{\infty\})}^M$. An element $\oplushi\in \toro{\eta}$ is a zero of the ideal $\idinpol{\omega}{\eta}{\mathfrak I}S$ if and only if $\ordser{\omega}(\oplushi) = \eta$ and there exists $\varphi\in{\large \textsc{V}} ({\mathfrak I}S )$ such that $\inser{\omega}(\varphi) =\oplushi$. \end{prop} \begin{proof} Given $\varphi=(\varphi_1,\ldots ,\varphi_M)\in {{\sl S}_\omega}^M $ consider the ideal \[ \idmax{\varphi}=\left< y_1-\varphi_1,\ldots ,y_M-\varphi_M\right>\subset {\sl S}_\omega [y]. \] Set $H:={\large \textsc{V}} ({\mathfrak I}S )$. By hypothesis $H$ is a finite subset of ${{\sl S}_\omega}^M$. By the Nullstellensatz there exists $k\in{\mathbb N}$ such that \[ {\left( \bigcap_{\varphi\in H} {\mathcal J}_{\varphi}\right)}^k\subset {\mathfrak I}S\subset \bigcap_{\varphi\in H} {\mathcal J}_{\varphi}. \] By (\ref{Parte inicial respeta la inclusion}) and (\ref{producto de iniciales menor que inicial del producto}) we have \begin{equation}\label{Meto la parte inicial del ideal entre dos} {\left( \idinpol{\omega}{\eta}\bigcap_{\varphi\in H} {\mathcal J}_{\varphi}\right)}^k\subset \idinpol{\omega}{\eta}{\mathfrak I}S\subset \idinpol{\omega}{\eta}\bigcap_{\varphi\in H} {\mathcal J}_{\varphi}. \end{equation} On the other hand \begin{equation}\label{Entre la interseccion y el producto} \oplusrod_{\varphi\in H} \idinpol{\omega}{\eta}\idmax{\varphi} \stackrel{(\ref{producto de iniciales menor que inicial del producto})+(\ref{inicial del producto menor que inicial de interseccion})}{\subset} \idinpol{\omega}{\eta}\bigcap_{\varphi\in H}\idmax{\varphi} \stackrel{(\ref{inicial de la interseccion menor que interseccion de iniciales})}{\subset} \bigcap_{\varphi\in H}\idinpol{\omega}{\eta}\idmax{\varphi}. \end{equation} The zeroes of the right-hand and left-hand side of Equation (\ref{Entre la interseccion y el producto}) coincide. Therefore \begin{equation} {\large \textsc{V}}\left( \idinpol{\omega}{\eta}\bigcap_{\varphi\in H}\idmax{\varphi}\right) \stackrel{(\ref{Entre la interseccion y el producto})}{=} {\large \textsc{V}}\left( \bigcap_{\varphi\in H}\idinpol{\omega}{\eta}\idmax{\varphi}\right) = \bigcup_{\varphi\in H}{\large \textsc{V}}\left( \idinpol{\omega}{\eta}\idmax{\varphi}\right) \end{equation} and then, by (\ref{ceros de la parte inicial de J}), \begin{equation}\label{ceros de No se que poner} \toro{\eta}\cap {\large \textsc{V}}\left(\idinpol{\omega}{\eta}\bigcap_{\varphi\in H} \idmax{\varphi}\right)=\{\inser{\omega}\varphi\mid\varphi\in H,\ordser{\omega}(\varphi) = \eta\}. \end{equation} The conclusion follows directly from (\ref{Meto la parte inicial del ideal entre dos}) and (\ref{ceros de No se que poner}). \end{proof} \begin{cor}\label{Los ceros del inicial son monomios de orden eta} Let ${\mathfrak I}S\subset {\sl S}_\omega [y]$ be an ideal with a finite number of zeroes and let $\eta$ be an $M$-tuple in ${({\mathbb R}\cup\{\infty\})}^M$. The zeroes of the ideal $\idinpol{\omega}{\eta}{\mathfrak I}S$ in $\toro{\eta}$ are $M$-tuples of monomials of order $\eta$. \end{cor} \section{Initial ideals in ${\mathbb K} [x^*,y]$.}\label{Polynomial initial ideals} A polynomial $f\in{\mathbb K} [x^*, y]={\mathbb K}[x^*][y]$ is an expression of the form: \[ \sum_{(\alpha ,\beta )\in ({\mathbb Z}^N\times {{\mathbb Z}_{\geq 0}})^M} a_{(\alpha ,\beta )} x^\alpha y^\beta\qquad a_{(\alpha ,\beta )}\in{\mathbb K}. \] The $\eta$-order of $f\in{\mathbb K} [x^*][y]$ as an element of ${\sl S}_\omega [y]$ is called the {\bf $(\omega ,\eta)$-order} of $f$. That is \[ \ordPol{\omega}{\eta} (f) :=\min_{a_{(\alpha ,\beta ) }\neq 0} \omega\cdot\alpha +\eta\cdot\beta. \] And the $\eta$-initial part of $f$ as an element of ${\sl S}_\omega [y]$ is called the {\bf $(\omega ,\eta)$-initial part} $f$. That is: if $\ordPol{\omega}{\eta}f<\infty$, then \[ \inPol{\omega}{\eta} (f) := \sum_{\omega\cdot\alpha +\eta\cdot\beta=\ordPol{\omega}{\eta} (f)} a_{(\alpha,\beta )} x^\alpha y^\beta \] and, if $\ordPol{\omega}{\eta}(f)=\infty$, $\inPol{\omega}{\eta} (f)=0$. Given an ideal ${{\mathfrak I}P}\subset{\mathbb K} [x^*][y]$ the {\bf $(\omega ,\eta )$-initial ideal of ${{\mathfrak I}P}$} is the ideal \[ \idinPol{\omega}{\eta}{{\mathfrak I}P}:= \left< \{\inPol{\omega}{\eta}(f)\mid f\in {{\mathfrak I}P}\}\cup \{ y_i\}_{i\in {\Lambda (\eta )}^{\rm C}}\right>\subset {\mathbb K} [x^*][y]. \] Given an ideal ${{\mathfrak I}P}\subset{\mathbb K} [x^*][y]$ let ${\mathcal I}^{\rm e}$ denote the extension of ${\mathcal I}$ to ${\sl S}_\omega [y]$. \begin{prop}\label{las extensiones y sin extender} Given $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ and an ideal ${{\mathfrak I}P}\subset{\mathbb K} [x^*,y]$ we have that \[ {\left(\idinPol{\omega}{\eta} {{\mathfrak I}P}\right)}^{\rm e}= \idinpol{\omega}{\eta} {{\mathfrak I}P}^{\rm e}. \] \end{prop} \begin{proof} The inclusion ${\left(\idinPol{\omega}{\eta} {{\mathfrak I}P}\right)}^{\rm e}\subset \idinpol{\omega}{\eta} {{\mathfrak I}P}^{\rm e}$ is straightforward. Now, $h\in \{\inpol{\omega}{\eta}f\mid f\in{{\mathfrak I}P}^{\rm e}\}$ if and only if $h= \inpol{\omega}{\eta}(\sum_{i=1}^r g_i P_i)$ where $g_i\in{\sl S}_\omega [y]$ and $P_i\in {{\mathfrak I}P}$. Let $\Lambda =\{ i\mid \ordpol{\omega}{\eta}\left( g_iP_i\right)=\min_{j=1,\ldots r}\ordpol{\omega}{\eta}\left( g_jP_j\right)\}$. If $\sum_{i\in\Lambda} \inpol{\omega}{\eta} \left( g_i P_i\right) = 0$ then $h= \inpol{\omega}{\eta}(\sum_{i=1}^r g_i') P_i $ where $g_i'= g_i-\inpol{\omega}{\eta}(g_i)$ for $i\in\Lambda$ and $g_i'=g_i$ otherwise. Then we can suppose that $\sum_{i\in\Lambda} \inpol{\omega}{\eta} \left( g_i P_i\right)\neq 0$. Then $h= \sum_{i\in\Lambda} \inpol{\omega}{\eta} \left(g_i P_i\right) =\sum_{i\in\Lambda} \inpol{\omega}{\eta} (g_i )\inpol{\omega}{\eta} (P_i)$ is an element of ${\left(\idinPol{\omega}{\eta} {{\mathfrak I}P}\right)}^{\rm e}$. \end{proof} We will be using the following technical result: \begin{lem}\label{inicial del inicial} Given $\eta\in {({\mathbb R}\cup\{\infty\})}^M$ and an ideal ${\mathcal I}\subset{\mathbb K} [x^*][y]$ we have that \[ \idinPol{\omega}{\eta}( {\idinPol{\omega}{\eta} {{\mathfrak I}P}}) =\idinPol{\omega}{\eta} {{\mathfrak I}P}. \] \end{lem} \begin{proof}It is enough to see that for any $g\in{\idinPol{\omega}{\eta} {{\mathfrak I}P}}$ there exists $f\in {\mathfrak I}P$ such that ${\idinPol{\omega}{\eta} {g}}={\idinPol{\omega}{\eta} (f)}$: Given $p=\sum_{i=1}^d a_i x^{\alpha_i}y^{\beta_i}\in{\mathbb K} [x^*][y]$ and $h\in {{\mathfrak I}P}$, we have \[ p\idinPol{\omega}{\eta}(h)= \sum_{i=1}^d a_i x^{\alpha_i}y^{\beta_i} \idinPol{\omega}{\eta}(h)=\sum_{i=1}^d\idinPol{\omega}{\eta} (a_i x^{\alpha_i}y^{\beta_i}h). \] Then the product $p\idinPol{\omega}{\eta}(h)$ is a sum of $({\omega},{\eta})$-initial parts of elements of ${{\mathfrak I}P}$. Therefore, $g\in\idinPol{\omega}{\eta}{{\mathfrak I}P}$ if and only if there exists $f_1,\ldots ,f_r\in {{\mathfrak I}P}$, such that $g=\sum_{i\in\{ 1,\ldots , r\}} \idinPol{\omega}{\eta}(f_i)$. The $f_i$'s may be chosen such that $\sum_{i\in\Lambda}\idinPol{\omega}{\eta}(f_i)\neq 0$ for all non-empty $\Lambda\subset\{ 1,\ldots , r\}$. Let $m=\min_{i\in\{ 1,\ldots , r\}} \ordPol{\omega}{\eta}(f_i)$. Since $\sum_{\ordPol{\omega}{\eta} (f_i)=m}\idinPol{\omega}{\eta} (f_i)\neq 0$ then $\idinPol{\omega}{\eta} (g)=\sum_{\ordPol{\omega}{\eta} (f_i)=m} \idinPol{\omega}{\eta} (f_i)$, and then $f:=\sum_{\ordPol{\omega}{\eta} (f_i)=m}f_i$ has the property we were looking for. \end{proof} \begin{prop}\label{primera traduccion del teorema} Let $\omega\in{\mathbb R}^N$ be of rationally independent coordinates, let $\eta$ be an $M$-tuple in ${({\mathbb R}\cup\{\infty\})}^M$ and let ${\mathfrak I}P\subset {\mathbb K} [x^*,y]$ be an N-admisible ideal. An element $\oplushi\in \toro{\eta}$ is an $\omega$-solution for the ideal $\idinPol{\omega}{\eta}{\mathfrak I}P$ if and only if $\ordser{\omega}(\oplushi) = \eta$ and there exists $\varphi\in {{\sl S}_\omega}^M$, an $\omega$-solution for ${\mathfrak I}P$, such that $\inser{\omega}(\varphi) =\oplushi$. \end{prop} \begin{proof} This is a direct consequence of Proposition \ref{las extensiones y sin extender} and Proposition \ref{Kapranow finito}. \end{proof} \section{The tropical variety.}\label{Tropicalization} The tropical variety of a polynomial $f\in {\mathbb K} [x^*, y]$ is the $(N+M-1)$-skeleton of the normal fan of its Newton polyhedron. The tropical variety of an ideal ${{\mathfrak I}P}\subset {\mathbb K} [x^*, y]$ is the intersection of the tropical varieties of the elements of ${{\mathfrak I}P}$. More precisely, the {\bf tropical variety} of ${{\mathfrak I}P}$ is the set \[ \tau ({{\mathfrak I}P}):= \{(\omega ,\eta )\in{\mathbb R}^N\times {({\mathbb R}\cup\{\infty\})}^M\mid \idinPol{\omega}{\eta}{{\mathfrak I}P}\cap {\mathbb K} [x^*,y_{\Lambda (\eta )}]\text{ does not have a monomial}\}. \] Tropical varieties have become an important tool for solving problems in algebraic geometry. See for example \cite{ItenbergMikhalkin:2007,Gathmann:2006,RichterSturmfels:2005}. In \cite{Bogart:2007,HeptTheobald:2007} algorithms to compute tropical varieties are described. \begin{prop}\label{proposicion Tropicalizacion} Let ${{\mathfrak I}P}$ be an ideal of ${\mathbb K} [x^*,y]$. Given $\eta \in {({\mathbb R}\cup\{\infty\})}^{M}$ the ideal $\idinPol{\omega}{\eta}{{\mathfrak I}P}$ has an $\omega$-solution in $\toro{\eta}$ if and only if $(\omega ,\eta)$ is in the tropical variety of ${{\mathfrak I}P}$. \end{prop} \begin{proof} Suppose that $\varphi\in \toro{\eta}$ is an $\omega$-solution of $\idinPol{\omega}{\eta}{{\mathfrak I}P}$ and that $c x^\alpha y^\beta\in \idinPol{\omega}{\eta}{{\mathfrak I}P}\cap {\mathbb K} [x^*,y_{\Lambda (\eta )}]$. We have $x^\alpha\varphi^\beta=0$ and then, $\varphi_i=0$ for some $i\in\Lambda (\eta )$ which gives a contradiction. Let ${\mathbb K} (x)$ denote the field of fractions of ${\mathbb K} [x]$ and let $\widetilde{{{\mathfrak I}P}}$ be the extension of $\idinPol{\omega}{\eta}{{\mathfrak I}P}$ to ${\mathbb K} (x)[y]$ via the natural inclusion $ {\mathbb K} [x,y]={\mathbb K} [x][y]\subset {\mathbb K} (x)[y]$. Since ${\sl S}_\omega$ contains the algebraic closure of ${\mathbb K} (x)$, the zeroes of $\idinPol{\omega}{\eta}{{\mathfrak I}P}$ are the algebraic zeroes of $\tilde{{{\mathfrak I}P}}$. Suppose that $\idinPol{\omega}{\eta}{{\mathfrak I}P}$ does not have zeroes in $\toro{\eta}$ then, by Remark \ref{Contenidos en cierre de toro} \[ {\large \textsc{V}}\left(\idinPol{\omega}{\eta}{{\mathfrak I}P}\right)\subset \overline{\toro{\eta}}\setminus\toro{\eta}. \] Let $v$ be the only element of ${\{ 1\} }^{\Lambda (\eta )}\times {\{ 0\} }^{{\Lambda (\eta )}^{\rm C}}$. The monomial $y^v$ vanishes in all the algebraic zeroes of $\tilde{{{\mathfrak I}P}}$. By the Nullstellensatz, there exists $k\in{\mathbb N}$ such that $y^{kv}$ belongs to $\tilde{{{\mathfrak I}P}}$. Now $y^{kv}$ belongs to $\tilde{{{\mathfrak I}P}}$ if and only if there exists $h_1,\dots ,h_r\in {\mathbb K} [x]\setminus\{ 0\}$ and $f_1,\dots ,f_r\in \idinPol{\omega}{\eta}{{\mathfrak I}P}$ such that \[ y^{kv}=\sum_{i=1}^r \frac{1}{h_i}f_i \Rightarrow \left(\oplusrod_{i=1}^r h_i(x)\right) y^{kv} = \sum_{i=1}^r \left(\oplusrod_{\begin{array}{c}j=1\\i\neq j\end{array}}^r h_j\right) f_i\in \idinPol{\omega}{\eta}{{\mathfrak I}P}. \] Then, by Lemma \ref{inicial del inicial}, $\inPol{\omega}{\eta} \left(\left(\oplusrod_{i=1}^r h_i(x)\right) y^{kv}\right)\in\idinPol{\omega}{\eta}{{\mathfrak I}P}$ and $\inPol{\omega}{\eta} \left(\left(\oplusrod_{i=1}^r h_i(x)\right) y^{kv}\right)= \inser{\omega}\left(\oplusrod_{i=1}^r h_i(x)\right)y^{kv}$ is a monomial. And the result is proved. \end{proof} As a direct consequence of of Propositions \ref{primera traduccion del teorema} and \ref{proposicion Tropicalizacion} we have the extension Point 1 of Newton-Puiseux's method. \begin{thm}\label{Extension del punto uno} Let ${\mathfrak I}P\subset {\mathbb K} [x^*,y]$ a N-admissible ideal and let $\omega\in{\mathbb R}^N$ be of rationally independent coordinates. $\oplushi= (c_1x^{\alpha^{(1)}},\ldots ,c_M x^{\alpha^{(M)}})$ is the first term of an $\omega$-solution of ${\mathfrak I}P$ if and only if \begin{itemize} \item $(\omega ,\ordser\omega\oplushi)$ is in the tropical variety of ${\mathfrak I}P$. \item $\oplushi$ is an $\omega$-solution of the ideal $\idinPol{\omega}{\ordser\omega\oplushi} {\mathfrak I}P$. \end{itemize} \end{thm} These statements recall Kapranov's theorem. Kapranov's theorem was proved for hypersurfaces in \cite{EinsiedlerKapranov:2006} and the first published proof for an arbitrary ideal may be found in \cite{Draisma:2008}. There are several constructive proofs \cite{Payne:2009,Katz:2009} in the literature. An other proof of Proposition \ref{Kapranow finito} could probably be done by using Proposition \ref{las extensiones y sin extender}, showing that $(\omega ,\eta)\in {\mathcal T} ({{\mathfrak I}P})$ if and only if $\eta\in {\mathcal T} ({{\mathfrak I}P}^e)$, and checking each step of one of the constructive proofs. \section{$\omega$-set.}\label{omega-data} At this stage we need to introduce some more notation: Given a $M\times N$ matrix \[ \Gamma=\left(\!\!\begin{array}{ccc} \tiny\Gamma_{1,1} & \ldots & \tiny\Gamma_{1,N}\\ \vdots & & \vdots\\ \tiny\Gamma_{M,1} & \ldots & \tiny\Gamma_{M,N} \end{array}\!\!\right). \] The $i$-th row will be denoted by $\Gamma_{i,*}:= (\Gamma_{i,1},\ldots ,\Gamma_{i,N})$ and \[ x^\Gamma :=\left(\!\!\begin{array}{c} x^{\tiny\Gamma_{1,*}}\\ \vdots \\ x^{\tiny\Gamma_{M,*}} \end{array}\!\!\right). \] In particular, if $I\in {\mathcal M}_{N\times N}$ is the identity, then $x^{\frac{1}{k}I}=({x_1}^{\frac{1}{k}},\ldots ,{x_N}^{\frac{1}{k}})$. An $M$-tuple of monomials ${\mathfrak m}\in {{\mathbb K} ({x^{\frac{1}{K}I}})}^M$ can be written as an entrywise product \[ {\mathfrak m}=x^{\Gamma}c=\left(\begin{array}{c} c_1 x^{\Gamma_{1,*}}\\ \vdots\\ c_M x^{\Gamma_{M,*}} \end{array}\right). \] Given an $M$-tuple of monomials ${\mathfrak m}\in {{\mathbb K} ({x^{\frac{1}{K}I}})}^M$ the {\bf defining data of ${\mathfrak m}$} is the $3$-tuple \[ D({\mathfrak m})=\{\ordser{\omega}{\mathfrak m} ,\Gamma ,{\mathfrak m}(\underline{1})\} \] where $\Gamma\in {\mathcal M}_{M\times N}({\mathbb Q}\cup\{\infty\})$ is the unique matrix such that $\omega\cdot\Gamma^T=\ordser{\omega}{\bf m}$ and $\Gamma_{i,*}=\underline{\infty}$ for all $i\in {\Lambda (\ordser{\omega}{\bf m})}^{\rm C}$. \begin{examp} If $\omega = (1,\sqrt{2})$ and \[ {\mathfrak m} = \left(\begin{array}{c} 3{x_1}^{3}\\ 7{x_1}^{2}{x_2}\\ 0 \end{array}\right) \] then \[ D({\mathfrak m}) =\{ (3,2+\sqrt{2},\infty ),\left(\begin{array}{cc} 3 & 0\\ 2 & 1\\ \infty & \infty \end{array}\right), (3,7,0)\}. \] \end{examp} \begin{defin} An {\bf $\omega$-set} is a $3$-tuple $\{ \eta,\Gamma,c\}$ where \begin{equation}\label{Donde estan los elementos de un starting data} \eta\in (\mathbb{R}\cup\{\infty\})^M,\,\,\Gamma\in {\mathcal M}_{M\times N}(\mathbb{Q}\cup \{\infty\}),\, c\in {\mathbb K}^M \end{equation} and \begin{itemize} \item $\omega\cdot\Gamma^T=\eta$ \item $\Gamma_{i,*}=\underline{\infty}$ for all $i\in {\Lambda (\eta )}^{\rm C}$ \item $c\in {{\mathbb K}^*}^{\Lambda (\eta)}\times {\{ 0\}}^{{\Lambda (\eta)}^{\rm C}}$. \end{itemize} \end{defin} Given an $\omega$-set $D=\{\eta,\Gamma, c\}$ the {\bf M-tuple defined by $D$} is the M-tuple of monomials \[ {\mathfrak M}_D: = x^{\Gamma}c. \] We have \[ {\mathfrak M}_{\{\eta ,\Gamma ,c\}}(x^{rI})={\mathfrak M}_{\{r\eta ,r\Gamma ,c\}}(x). \] \begin{rem} ${\mathfrak m}={\mathfrak M}_{D({\mathfrak m})}$ and $D({\mathfrak M}_D)=D$. \end{rem} \section{Starting $\omega$-set for $\mathcal{I}$.}\label{omega-starting data} Given an N-admissible ideal ${{\mathfrak I}P}\subset {\mathbb K}[x^*,y]$. {\bf A starting $\omega$-set for ${{\mathfrak I}P}$} is an $\omega$-set $D=\{ \eta,\Gamma,c\}$ such that \begin{itemize} \item The vector $(\omega,\eta)$ is in the tropical variety of ${{\mathfrak I}P}$. \item $c$ is a zero of the system $\{f(\underline{1},y)=0\mid f\in \text{In}_{\omega,\eta}{\mathfrak I}P\}$. \end{itemize} \begin{examp} Let ${{\mathfrak I}P}=\left< x_1+y_1-y_2+y_1y_2+y_3,x_2-y_1+y_2+2y_1 y_2, y_3\right>$. For $\omega =(1,\sqrt{2})$ there are two possible starting $\omega$-sets \[ D1=\{ (1,1,\infty ),\left(\begin{array}{cc} 1 & 0\\ 1 & 0\\ \infty & \infty \end{array}\right), (1,1,0)\} \] and \[ D2=\{ (0,0,\infty ),\left(\begin{array}{cc} 0 & 0\\ 0 & 0\\ \infty & \infty \end{array}\right), (\frac{1}{3},\frac{1}{5},0)\}. \] and \[ {\mathfrak M}_{D1}(x)=\left(\begin{array}{c} x_1\\ x_1\\ 0 \end{array}\right),\, {\mathfrak M}_{D1} (x^{\frac{1}{3}I})=\left(\begin{array}{c} {x_1}^{\frac{1}{3}}\\ {x_1}^{\frac{1}{3}}\\ 0 \end{array}\right)\,\text{and} \,{\mathfrak M}_{D2}(x)=\left(\begin{array}{c} \frac{1}{3}\\ \frac{1}{5}\\ 0 \end{array}\right). \] \end{examp} \begin{prop}\label{Mupla asociada a data} The $\omega$-set $D=\{\eta ,\Gamma ,c\}$ is a starting $\omega$-set for ${{\mathfrak I}P}$ if and only if ${\mathfrak M}_D$ is an $\omega$-solution of $\idinPol{\omega}{\eta} {{\mathfrak I}P}$. Moreover all the $\omega$-solutions of $\idinPol{\omega}{\eta} {{\mathfrak I}P}$ in $\toro{\eta}$ are of the form ${\mathfrak M}_D$ where $D=\{\eta ,\Gamma ,c\}$ is a starting $\omega$-set for ${{\mathfrak I}P}$. \end{prop} \begin{proof} That ${\mathfrak M}_D$ is an $\omega$-solution of $\idinPol{\omega}{\eta} {{\mathfrak I}P}$ when $D$ is a starting $\omega$-set is a direct consequence of Lemma \ref{Sistema de coeficiente}. The other implication is consequence of Proposition \ref{proposicion Tropicalizacion} and Lemma \ref{Sistema de coeficiente}. The last sentence follows from Corollary \ref{Los ceros del inicial son monomios de orden eta}. \end{proof} \section{The ideal ${{\mathfrak I}P}_D$.}\label{The ideal ID} Given a matrix $\Gamma\in {\mathcal M}_{M\times N}({\mathbb Q}\cup\{\infty\})$ the minimum common multiple of the denominators of its entries will be denoted by ${\bf d}\Gamma$. That is \[ {\bf d}\Gamma := \min\{ k\in{\mathbb N}\mid \Gamma\in {\mathcal M}_{N\times M}(\mathbb{Z}\cup\{\infty\})\}. \] Given an $\omega$-set $D=\{\eta,\Gamma, c\}$, we will denote by ${{\mathfrak I}P}_D$ the ideal in $\mathbb{K}[x^*,y]$ given by \[ {{\mathfrak I}P}_D:= \left<\{f(x^{{\bf d}\Gamma I},y+{\mathfrak M}_D(x^{{\bf d}\Gamma I})\mid f\in \mathcal{I}\}\right>\subset{\mathbb K} [x^*,y]. \] \begin{rem}\label{ceros de I y de ID} A series $\oplushi\in{\sl S}_\omega^M$ is an $\omega$-solution of ${{\mathfrak I}P}$ if and only if the series $\tilde{\oplushi}:= \oplushi(x^{{\bf d}\Gamma I})- {\mathfrak M}_D(x^{{\bf d}\Gamma I})$ is an $\omega$-solution of ${{\mathfrak I}P}_D$. \end{rem} \begin{prop}\label{En la tropicalizacion hay uno de pendiente mayor} Let $D=\{\eta,\Gamma, c\}$ be a starting $\omega$-set for an ideal ${\mathcal I}$. There exists $\tilde{\eta}\in(\mathbb{R}\cup\{\infty\})^M$ such that $(\omega,\tilde{\eta})\in\tau ({\mathcal I}_D)$ and $\tilde{\eta}_{\Lambda (\tilde{\eta})}>{\bf d}\Gamma\eta_{\Lambda (\tilde{\eta})}$ coordinate-wise. \end{prop} \begin{proof} By Proposition \ref{Kapranow finito} and Proposition \ref{Mupla asociada a data}, ${\mathfrak M}_D$ is the first term of at least one $\omega$-solution of $\mathcal{I}$. Say \[ \oplushi={\mathfrak M}_D+\tilde{\oplushi}\in{\large \textsc{V}}({\mathcal I}),\quad \tilde{\oplushi}= \left(\begin{array}{c} \tilde{\oplushi}_1\\ \vdots\\ \tilde{\oplushi}_M \end{array}\right)\in {\sl S}_\omega^M, \] with $\ordser{\omega}(\tilde{\oplushi}_i)> \omega\cdot\Gamma_{i,*}=\eta_i $ when $\tilde{\oplushi}_i\neq 0$. Set \[ \tilde{\eta}:={\bf d}\Gamma\ordser{\omega}(\tilde{\oplushi})\stackrel{\text{Remark} \ref{propiedades de valser e inser},\ref{orden y ramificacion}}{=}\ordser{\omega}(\tilde{\oplushi})(x^{{\bf d}\Gamma I}) \] then $\tilde{\eta}_i>{\bf d}\Gamma\eta_i$ for all $i\in\Lambda (\tilde{\eta})$. By Remark \ref{ceros de I y de ID} $\tilde{\oplushi}(x^{{\bf d}\Gamma I})$ is an $\omega$-solution of ${\mathcal I}_D$. Then, by Theorem \ref{Kapranow finito}, $\inser{\omega}\tilde{\oplushi}$ is an $\omega$-solution of $\idinPol{\omega}{\tilde{\eta}}{\mathcal I}_D$. Finally, by Proposition \ref{proposicion Tropicalizacion}, $(\omega ,\tilde{\eta})$ is in the tropical variety of ${\mathcal I}_D$. \end{proof} \begin{prop}\label{Hay uno de pendiente mayor} Let $D=\{\eta,\Gamma, c\}$ be a starting $\omega$-set for an ideal ${\mathcal I}$. There exists a starting $\omega$-set $D'=\{\eta',\Gamma', c'\}$ for ${\mathcal I}_D$ such that ${\eta'}_{\Lambda (\eta')}>{\bf d}\Gamma\eta_{\Lambda (\eta')}$ coordinate-wise. \end{prop} \begin{proof} By Proposition \ref{En la tropicalizacion hay uno de pendiente mayor}, there exists $\eta'\in {({\mathbb R}\cup\{\infty\})}^M$ such that ${\eta'}_{\Lambda (\eta')}>{\bf d}\Gamma\eta_{\Lambda (\eta')}$ coordinate-wise and $(\omega ,\eta')\in\tau ({\mathcal I}_D)$. By Proposition \ref{proposicion Tropicalizacion}, the ideal $\idinPol{\omega}{\eta'}{\mathcal I}_D$ has an $\omega$-solution $\oplushi$ in $\toro{\eta'}$. By Proposition \ref{Mupla asociada a data}, $\oplushi={\mathfrak M}_{D'}$ where $D'=\{\eta',\Gamma',c'\}$ is a starting $\omega$-set for ${\mathcal I}_D$. \end{proof} \section{$\omega$-sequences.}\label{omega-sequences} Given an $M$-tuple $\oplushi\in{\sl S}_\omega^M$ define inductively $\{\oplushi^{(i)}\}_{i=0}^\infty$, and $\{ D^{(i)}\}_{i=0}^\infty$ by: \begin{itemize} \item{For $i=0$} \begin{itemize} \item $\oplushi^{(0)}:=\oplushi$ \item $D^{(0)}$ is the defining data of $\inser{\omega}\oplushi$. \end{itemize} \item{For $i>0$:} \begin{itemize} \item $\oplushi^{(i)}:=\oplushi^{(i-1)}(x^{{\bf d}\Gamma^{(i-1)} I})-{\mathfrak M}_{D^{(i-1)}}(x^{{\bf d}\Gamma^{(i-1)}I})$ \item $D^{(i)}$ is the defining data of the $M$-tuple of monomials $\inser{\omega}\oplushi^{(i)}$. ($D^{(i)}:=D(\inser{\omega}\oplushi^{(i)})$). \end{itemize} \end{itemize} The sequence above \[ {\bf seq} (\oplushi):= \{D^{(i)}\}_{i=0}^\infty \] will be called {\bf the defining data sequence for $\oplushi$}. \begin{rem}\label{Los eta crecen en el data asociado a serie} For any $\oplushi\in{\sl S}_\omega^M$. If ${\bf seq} (\oplushi):= \{\eta^{(i)},\Gamma^{(i)},c^{(i)}\}_{i=0}^\infty$ then ${\eta^{(i)}}_{\Lambda (\eta^{(i)})}>{\bf d}\Gamma^{(i-1)}{\eta^{(i-1)}}_{\Lambda (\eta^{(i)})}$ coordinate-wise. \end{rem} Given a sequence $S=\{D^{(i)}\}_{i=0\ldots K}=\{\eta^{(i)},\Gamma^{(i)}, c^{(i)}\}_{i=0\ldots K}$, with $K\in{\mathbb Z}_{\geq 0}\cup\{\infty\}$. Set \begin{itemize} \item ${\mathcal I}^{(0)}={\mathcal I}$ \item ${\mathcal I}^{(i)}={\mathcal I}^{(i-1)}_{D^{(i-i)}}$ for $i\in\{1,\ldots ,K\}$ \end{itemize} $S$ is called an {\bf $\omega$-sequence for ${\mathcal I}$} if and only if for $i\in\{0,\ldots ,K\}$ \begin{itemize} \item $D^{(i)}=\{\eta^{(i)},\Gamma^{(i)}, c^{(i)}\}$ is a starting $\omega$-set for ${\mathcal I}^{(i)}$ \item ${\eta^{(i)}}_{\Lambda (\eta^{(i)})}>{\bf d}\Gamma^{(i-1)}{\eta^{i-1}}_{\Lambda (\eta^{(i)})}$ coordinate-wise. \end{itemize} As a corollary to Proposition \ref{Hay uno de pendiente mayor} we have: \begin{cor} Let $\{D^{(i)}\}_{i=0\ldots K}$ be an $\omega$-sequence for ${\mathcal I}$. For any $K'\in\{ K+1,\ldots ,\infty\}$ there exists a sequence $\{D^{(i)}\}_{i=K+1\ldots K'}$ such that $\{D^{(i)}\}_{i=0\ldots K'}$ is an $\omega$-sequence for ${\mathcal I}$. \end{cor} \begin{prop}\label{ceros dan w-secuencia} If $\oplushi$ is an $\omega$-solution of ${\mathcal I}$ then ${\bf seq} (\oplushi )$ is an $\omega$-sequence for ${\mathcal I}$. \end{prop} \begin{proof} This is a direct consequence of Remarks \ref{Los eta crecen en el data asociado a serie} and \ref{ceros de I y de ID}. \end{proof} \section{The solutions.}\label{The solutions} Given an $\omega$-sequence $S=\{ D^{(i)}\}_{i=0\ldots K}$ for ${\mathcal I}$, with $D^{(i)}=\{\eta^{(i)},\Gamma^{(i)},c^{(i)}\}$ set \begin{equation}\label{sucesion de ramificaciones} r^{(0)}:=1\quad\text{and}\quad r^{(i)}:=\frac{1}{\oplusrod_{j=0}^{i-1}{\bf d}\Gamma^{(j)}}\,\text{for}\, i>0. \end{equation} The {\bf series defined by $S$} is the series \[ {\bf ser} (S):= \sum_{i=0}^K {\mathfrak M}_{D^{(i)}} (x^{r^{(i)}I}). \] The following theorem is the extension of Point \ref{tres} of Newton-Puiseux's method: \begin{thm}\label{ultimo teorema} \label{data da ceros} If $S=\{D^{(i)}\}_{i=0}^\infty$ is an $\omega$-sequence for ${\mathcal I}$ then ${\bf ser} (S)$ is an $\omega$-solution of ${\mathcal I}$. \end{thm} \begin{proof} Let $S=\{ D^{(i)}\}_{i=1}^\infty$ be an $\omega$-sequence for ${\mathcal I}$. Where $D^{(i)}=\{\eta^{(i)},\Gamma^{(i)},c^{(i)}\}$. Let $\{ {\mathcal I}^{(i)}\}_{i=0}^\infty$ be defined by ${\mathcal I}^{(0)}:={\mathcal I}$ and ${\mathcal I}^{(i)}:={\mathcal I}^{(i-1)}_{D^{(i-1)}}$. We have that $D^{(i)}$ is a starting $\omega$-set for ${\mathcal I}^{(i)}$. By Proposition \ref{proposicion Tropicalizacion} for each $i\in{\mathbb N}$ there exists $\oplushi^{(i)}\in{\sl S}_\omega^M$ such that $\ordser{\omega}\oplushi^{(i)}=\eta^{(i)}$ and $\oplushi^{(i)}\in{\large \textsc{V}} \left( {\mathcal I}^{(i)}\right)$. Set $\{r^{(i)}\}_{i=0}^\infty$ as in (\ref{sucesion de ramificaciones}) and $\tilde{\oplushi^{(i)}}:= {\bf ser} (\{D^{(i)}\}_{j=0}^{i-1})+\oplushi^{(i)}(x^{r^{(i)} I})$. For each $i\in{\mathbb N}$, by Remark \ref{ceros de I y de ID}, $\tilde{\oplushi^{(i)}}\in{\large \textsc{V}} \left( {\mathcal I}\right)\subset{\sl S}_\omega^M$. Since there are only a finite number of zeroes there exists $K\in{\mathbb N}$ such that $\tilde{\oplushi^{(i)}}=\tilde{\oplushi^{(K)}}$ for all $i>K$. Then \[ {\bf ser} (S)=\tilde{\oplushi^{(K)}}\in{\large \textsc{V}} \left({\mathcal I}\right)\subset{\sl S}_\omega^M. \] \end{proof} Theorem \ref{ultimo teorema} together with Proposition \ref{ceros dan w-secuencia} gives: \begin{cor} {\bf Answer to Question \ref{problema}.}\\ Let ${\mathcal I}\subset{\mathbb K}[x^*,y]$ be an N-admissible ideal and let $\omega\in{\mathbb R}^N$ be of rationally independent coordinates. The M-tuple of series defined by an $\omega$-sequence for ${\mathcal I}$ is an element of ${\sl S}_\omega^M$. An M-tuple of series $\oplushi\in{\sl S}_\omega^M$ is an $\omega$-solution of ${\mathcal I}$ if and only if $\oplushi$ is an M-tuple of series defined by an $\omega$-sequence for ${\mathcal I}$. \end{cor} \section{The tropical variety of a quasi-ordinary singularity.} Let $(V,\underline{0})$ be a singular $N$-dimensional germ of algebraic variety. $(V,\underline{0})$ is said to be {\bf quasi-ordinary} when it admits a projection $\oplusi: (V,\underline{0})\longrightarrow ({\mathbb K}^N,\underline{0})$ whose discriminant is contained in the coordinate hyperplanes. Such a projection is called a {\bf quasi-ordinary projection}. Quasi-ordinary singularities admit analytic local parametrizations. This was shown for hypersurfaces by S. Abhyankar \cite{Abhyankar:1955} and extended to arbitrary codimension in \cite{FAroca:2004}. Quasi-ordinary singularities have been the object of study of many research papers \cite[...]{ArocaSnoussi:2005,Gau:1988,Tornero:2001,Popescu-Pampu:2003}. \begin{cor} Let $V$ be an $N$-dimensional algebraic variety embedded in ${\mathbb C}^{N+M}$ with a quasi-ordinary analytically irreducible singularity at the origin. Let \[ \begin{array}{cccc} \oplusi: & V & \longrightarrow & {\mathbb C}^N\\ & (x_1,\ldots ,x_{N+M}) & \mapsto & (x_1,\ldots ,x_N) \end{array} \] be a quasi-ordinary projection. Let ${\mathcal I}\subset{\mathbb K} [x_1,\ldots ,x_{N+M}]$ be the defining ideal of $V$. Then for any $\omega\in {{\mathbb R}_{>0}}^N$ of rationally independent coordinates there exists a {\bf unique} $e\in {{\mathbb R}_{>0}}^M$ such that $(\omega ,e)$ is in the tropical variety ${\mathcal T}({\mathcal I})$. \end{cor} \begin{proof} Since $\oplusi$ is quasi-ordinary there exists $k$ and an $M$-tuple of analytic series in $N$ variables $\varphi_1,\ldots ,\varphi_M$ such that $(t_1^k,\ldots ,t_N^k,\varphi_1(\underline{t}),\ldots ,\varphi_M(\underline{t}))$ are parametric equations of ${\large \textsf{V}} ({\mathcal I})$ about the origin \cite{FAroca:2004}. For any $\omega\in {{\mathbb R}_{>0}}^N$ of rationally independent coordinates, the first orthant is $\omega$-positive and then $(\varphi_1,\ldots ,\varphi_M)$ is an element of ${{\sl S}_\omega}^M$. Set $\eta_\omega:=\ordser\omega (\varphi_1,\ldots ,\varphi_M)$. The $M$-tuple of Puiseux series (with positive exponents) $y:=(\varphi_1({x_1}^{\frac{1}{k}},\ldots ,{x_N}^{\frac{1}{k}}),\ldots ,\varphi_M({x_1}^{\frac{1}{k}},\ldots ,{x_N}^{\frac{1}{k}}))$ is an $\omega$-solution of ${\mathfrak I}P$ with $\ordser\omega (y) =\frac{1}{k}\eta_\omega$ By Theorem \ref{Extension del punto uno} $(\omega ,\frac{1}{k}\eta_\omega)$ is of ${\mathfrak I}P$. Now we will show that $(\omega ,e)\in {\mathcal T} ({\mathfrak I}P)$ implies $e=\frac{1}{k}\eta_\omega$. Set $\Phi$ to be the map: \[ \begin{array}{cccc} \Phi: & U & \longrightarrow & {\mathbb C}^{N+M}\\ & (t_1,\ldots ,t_N) & \mapsto & (t_1^k,\ldots ,t_N^k,\varphi_1(\underline{t}),\ldots ,\varphi_M(\underline{t})) \end{array} \] Since the singularity is analytically irreducible this parametrization covers a neighborhood of the singularity at the origin. Let $U\subset{\mathbb C}^N$ be the common domain of convergence of $\varphi_1,\ldots ,\varphi_M$. $U$ contains a neighborhood of the origin. We have $\Phi (U)= \oplusi^{-1} (U)\cap V$. Let $y_1,\ldots ,y_M$ be Puiseux series with exponents in some $\omega$-positive rational cone $\sigma$ (with $\ordser{\omega} \underline{y}=e\in{{\mathbb R}_{> 0}}^M$) such that $f(x_1,\ldots ,x_N, y_1(\underline{x}),\ldots ,y_M(\underline{x}))=0$ for all $f\in {\mathcal I}$. Take $k'$ such that $\oplussi_j(\underline{t}):= y_j({t_1}^{k'},\ldots ,{t_N}^{k'})$ is a series with integer exponents for all $j=1\ldots M$. (This may be done since they are Puiseux series). There exists an open set $W\subset{\mathbb C}^N$ where all the series $\oplussi_j$ converge, that has the origin as accumulation point . Set $\Psi$ to be the map: \[ \begin{array}{cccc} \Psi : & W & \longrightarrow & {\mathbb C}^{N+M}\\ & (t_1,\ldots ,t_N) & \mapsto & ({t_1}^{k'},\ldots ,{t_N}^{k'}, \oplussi_1(\underline{t}),\ldots ,\oplussi_M(\underline{t})). \end{array} \] Since $U$ is a neighborhood of the origin, $W':= U\cap W\neq \emptyset$. Ramifying again, if necessary, we may suppose that $k=k'$. Take $\underline{t}\in W'$. There exists $\underline{t}'\in U$ such that $\Psi (\underline{t}) = \Phi (\underline{t}')$. Then $\oplusi\circ\Psi (\underline{t}) = \oplusi\circ\Phi (\underline{t}')$, and, then $({t_1}^k,\ldots ,{t_N}^k)=({t'_1}^k,\ldots ,{t'_N}^k).$ There exists an $N$-tuple of $k$-roots of the unity $\xi_1,\ldots \xi_N$ such that $(t_1,\ldots ,t_N)=(\xi_1 t'_1,\ldots ,\xi_N t'_N)$. By continuity this $N$-tuple is the same for all $\underline{t}\in W'$. We have $\Psi (t_1,\ldots ,t_N)=\Phi (\xi_1 t_1,\ldots ,\xi_N t_N)$ on $W'$. Since $W'$ contains an open set, we have the equality of series $\oplussi_j (t_1,\ldots ,t_N)=\oplushi_j (\xi_1 t_1,\ldots ,\xi_N t_N)$. Then $(y_1,\ldots ,y_M)= (\varphi_1 (\xi_1 {x_1}^{\frac{1}{k}},\ldots ,\xi_N {x_N}^{\frac{1}{k}}),\ldots ,\varphi_M (\xi_1 {x_1}^{\frac{1}{k}},\ldots ,\xi_N {x_N}^{\frac{1}{k}}))$ for some $N$-tuple of roots of unity $(\xi_1,\ldots ,\xi_N)$ and then \[ \ordser{\omega} (y_1,\ldots ,y_M) =\frac{1}{k} \ordser{\omega} (\varphi_1,\ldots \varphi_M). \] The conclusion follows from Theorem \ref{Extension del punto uno}. \end{proof} \section*{Closing remarks.} \label{Closing remarks section} In the literature there are many results relating the Newton polyhedron of a hypersurface and invariants of its singularities. See for example \cite{Kouchirenko:1976}. To extend this type of theorems to arbitrary codimension the usual approach has been to work with the Newton polyhedrons of a system of generators (see for example \cite{Oka:1997}). The results presented here suggest that using the notion of tropical variety better results may be obtained. Both Newton-Puiseux's and McDonald's algorithm have been extended for an ordinary differential equation \cite{Fine:1889} and a partial differential equation \cite{FArocaJCano:2001,FArocaJCanoFJung:2003} respectively. The algorithm presented here can definitely be extended to systems of partial differential equations; a first step in this direction can be found in \cite{FAroca:2009}. \section*{Acknowledgments} The first and the third author were partially supported by CONACyT 55084 and UNAM: PAPIIT IN 105806 and IN 102307.\\ During the preparation of this work the third author was profiting of the post-doctoral fellowship CONACyT 37035. \def$'${$'$} \end{document}
\begin{document} \begin{abstract} We study the existence of stationary classical solutions of the incompressible Euler equation in the plane that approximate singular stationary solutions of this equation. The construction is performed by studying the asymptotics of equation $-\varepsilon^2 \Delta u^\varepsilon=(u^\varepsilon-q-\frac{\kappa}{2\pi} \log \frac{1}{\varepsilon})_+^p$ with Dirichlet boundary conditions and $q$ a given function. We also study the desingularization of pairs of vortices by minimal energy nodal solutions and the desingularization of rotating vortices. \end{abstract} \title{Desingularization of vortices for the Euler equation} \section{Introduction} \subsection{Singular solutions to the Euler equation} The incompressible Euler equations \[ \left\{ \begin{aligned} \nabla \cdot \mathbf{v} &= 0, \\ \mathbf{v}_t + \mathbf{v}\cdot \nabla \mathbf{v}&=-\nabla p, \end{aligned} \right. \] describe the evolution of the velocity $\mathbf{v}$ and the pressure $p$ in an incompressible flow. In $\mathbf{R}^2$, the vorticity $\omega = \nabla \times \mathbf{v}=\partial_1 \mathbf{v}_2-\partial_2 \mathbf{v}_1$ of a solution of the Euler equations obey the transport equation \[ \omega_t + \mathbf{v} \cdot \nabla \omega = 0 \] and the velocity field $\mathbf{v}$ can be recovered from the vorticity function $\omega$ through the Biot--Savart law \[ \mathbf{v} = \omega * \frac{1}{2\pi} \frac{-x^\perp}{\abs{x}^2}, \] where $x^\perp=(x_2, -x_1)$. Special singular solutions of the Euler equations are given by \footnote{One needs to give a meaning to the equation in this case, since the velocity field generated by a vortex point is singular precisely on that vortex point. It consists in considering that each vortex point is transported only by the velocity field created by the other vortex points (see e.g. S.\thinspace Schochet \protect{\cite{Schochet_CPDE_95}} for details and further discussion). } \[ \omega = \sum_{i=1}^k \kappa_i \delta_{x_i(t)}, \] corresponding to \[ \mathbf{v}(x)=-\sum_{i=1}^k \frac{\kappa_i}{2\pi} \frac{(x-x_i(t))^\perp}{\abs{x-x_i(t)}^2}, \] and the positions of the vortices $x_i : \mathbf{R} \to \mathbf{R}^2$ satisfy \[ \dot{x}_i(t)=-\sum_{\substack{j=1 \\ j \ne i}}^k \frac{\kappa_j}{2\pi} \frac{(x_i(t)-x_j(t))^\perp}{\abs{x_i(t)-x_j(t)}^2}. \] In terms of the Kirchhoff--Routh function \[ \mathcal{W}(x_1, \dotsc, x_k)=\frac{1}{2} \sum_{i \ne j} \frac{\kappa_i\kappa_j}{2\pi} \log \frac{1}{\abs{x_i-x_j}}, \] the positions obey Kirchhoff's law \begin{equation} \label{equationKirchhoff} \kappa_i \dot{x}_i=(\nabla_{x_i} \mathcal{W})^\perp, \end{equation} which is a Hamiltonian formulation of the dynamics of the vortices. In simply-connected bounded domains $\Omega \subset \mathbf{R}^2$, similar singular solutions exist. If one requires for example that the normal component of $\mathbf{v}$ vanishes on the boundary, the associated Kirchoff--Routh function is then given by \begin{equation} \label{eqKRDomainsHomog} \mathcal{W}(x_1, \dotsc, x_k)=\frac{1}{2} \sum_{i \ne j} \kappa_i\kappa_j G(x_i, x_j)+\sum_{i=1}^k \frac{\kappa_i^2}{2}H(x_i, x_i), \end{equation} where $G$ is the Green function of $-\Delta$ on $\Omega$ with Dirichlet boundary conditions and $H$ is its regular part.\footnote{The function $x \mapsto H(x, x)$ is called the \emph{Robin function} of $\Omega$.} One can also prescribe a condition $v_n$ on the outward component of the velocity on the boundary. Since we are dealing with an incompressible flow, the boundary data should satisfy $\int_{\partial \Omega} v_n=0$. Let $\mathbf{v}_0$ be the unique harmonic field whose normal component on the boundary is $v_n$; i.e., $\mathbf{v}_0$ satisfies \[ \left\{ \begin{aligned} \nabla \cdot \mathbf{v}_0&=0, & & \text{in $\Omega$}, \\ \nabla \times \mathbf{v}_0&=0, & & \text{in $\Omega$}, \\ n \cdot \mathbf{v}_0&=v_n& & \text{on $\partial \Omega$}, \end{aligned} \right. \] where $\nabla \times (u, v)=\partial_1 v-\partial_2 u$ and $n$ is the outward normal, then the positions of the vortices are obtained by the modified law \[ \dot{x}_i=(\nabla_{x_i} \mathcal{W})^\perp +\mathbf{v}_0. \] Since $\Omega$ is simply-connected $\mathbf{v}_0$ can be written $\mathbf{v}_0=(\nabla \psi_0)^\perp$ where the stream function $\psi_0$ is characterized up to a constant by \begin{equation} \label{eqpsi0} \left\{ \begin{aligned} -\Delta \psi_0&=0& &\text{in $\Omega$}, \\ -\frac{\partial \psi_0}{\partial \tau}&=v_n & & \text{on $\partial \Omega$}, \end{aligned} \right. \end{equation} where $\frac{\partial \psi_0}{\partial \tau}$ denotes the tangential derivative on $\partial \Omega$. The Kirchhoff--Routh function associated to the vortex dynamics becomes then \begin{equation} \label{KRDomains} \mathcal{W}(x_1, \dotsc, x_k)=\frac{1}{2} \sum_{i \ne j} \kappa_i\kappa_j G(x_i, x_j)+\sum_{i=1}^k \frac{\kappa_i^2}{2}H(x_i, x_i)+\sum_{i=1}^k \kappa_i \psi_0(x_i), \end{equation} see C.\thinspace C.\thinspace Lin \cite{Lin1941} (who uses opposite sign conventions). \subsection{Desingularization of vortices} One way to justify the weak formulation for point vortex solutions of the Euler equations is to approximate these solutions by classical solutions. This can actually be done, on finite time intervals, by considering regularized initial data for the vorticity (see e.g.\ C.\thinspace Marchioro and M. Pulvirenti \cite{MarchioroPulvirenti1983}). Critical points of the Kirchhoff--Routh function $\mathcal{W}$ give rise to stationary vortex points solutions of the Euler equations. As noted above, these weak stationary solutions can be approximated by classical solutions of the Euler equations. These do not need be stationary solutions though, and one can wish to approximate the stationary vortex-point solutions by stationary classical solutions. In the simplest case, corresponding to a single point vortex in a simply-connected domain, we obtain the following \begin{theorem}\label{thm:resu} Let $\Omega \subset \mathbf{R}^2$ be a bounded simply-connected smooth\footnote{Here and in the sequel, smooth means Lipschitz and is sufficient for our goals.} domain and $v_n:\partial \Omega \to \mathbf{R}\in L^s(\partial \Omega)$ for some $s>1$ be such that $\int_{\partial \Omega} v_n = 0$. Let $\kappa >0$ be given. For $\varepsilon>0$ there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with outward boundary flux given by $v_n$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$. Moreover, as $\varepsilon \to 0$, \[ \int_\Omega \omega_\varepsilon \to \kappa, \] and \[ \mathcal{W}(x^\varepsilon) \to \sup_{x \in \Omega} \mathcal{W}(x). \] \end{theorem} Other situations, corresponding to pairs of vortices of opposite signs, multiply-connected bounded domains or unbounded domains are discussed in Section~\ref{sect:resu}. We are aware essentially of two methods to construct stationary solutions of the Euler equations that we call the vorticity method and the stream-function method. The vorticity method was introduced by V.\thinspace Arnold (see \cite{ArnoldKhesin}*{Chapter II \S 2}), and was implemented successfully by G.\thinspace R.\thinspace Burton \cite{Burton1988} and B.\thinspace Turkington \cite{Turkington1983}. It roughly consists in maximizing the kinetic energy \[ \frac{1}{2}\int_{\Omega} \int_{\Omega} \omega(x)G(x, y)\omega(y)\, dx\, dy+\int_{\Omega} \psi_0(x)\omega(x)\, dx+\frac{1}{2} \int_{\Omega} \abs{\nabla \psi_0}^2, \] under some constraints on the sublevel sets of $\omega$. The function $\omega$ is the vorticity of the flow and a stream function $\psi$ is the solution to \[ \left\{ \begin{aligned} -\Delta \psi &=\omega & & \text{in $\Omega$},\\ \psi&=\psi_0 & & \text{on $\partial \Omega$}. \end{aligned} \right. \] Considering suitable families of constraints on the sublevel sets of $\omega$, one can obtain families of solutions converging to stationary vortex-point solutions. The differentiability of those solutions is not guaranteed (the solutions correspond to vortex patches of constant density). The stream-function method starts from the observation that if $\psi$ satisfies \[ -\Delta \psi=f(\psi), \] for some arbitrary function $f \in C^1(\mathbf{R})$, then $\mathbf{v}=(\nabla \psi)^\perp$ and $p=F(\psi)-\frac{1}{2}\abs{\nabla \psi}^2$, with $F(s)=\int_0^s f$ form a stationary solution to the Euler equations. Moreover, the velocity $\mathbf{v}$ is irotational on the set where $f(\psi)=0$. We now set $q=-\psi_0$ and $u=\psi-\psi_0$, so that $u=0$ on $\partial \Omega$ and $-\Delta u = f(u-q)$ in $\Omega$. If we assume that $\inf_{\Omega} q > 0$ and $f(t)=0$ when $t \le 0$, the vorticity set $\{ x \: :\: f(\psi(x))> 0 \}$ is bounded away from the boundary. When $f$ satisfies also some monotonicity and growth conditions, $\Omega=\mathbf{R}^2_+$ and $q(x)=W x_1+d$ with $W > 0$ and $d>0$, J.\thinspace Norbury \cite{Norbury1975} has shown the existence of solutions to $-\Delta u = \nu f(u-q)$, where $\nu > 0$ is a Lagrange multiplier a priori unknown by minimizing $\int_{\Omega} \abs{\nabla u}^2$ under the constraint \[ \int_{\Omega} F(u-q)=\mu \] in $H^1_0(\Omega)$ when $\Omega$ is the half-plane $\mathbf{R}^2_+$. M.\thinspace S.\thinspace Berger and L.\thinspace E.\thinspace Fraenkel \cite{BergerFraenkel1980} have obtained corresponding results for a bounded domain $\Omega \subset \mathbf{R}^2$, and they began studying the asymptotics for variable $\mu$ and $q$, but the lack of information on $\nu$ remained an obstacle. The unknown $\nu$ can be avoided by minimizing $\int_{\Omega} \frac{1}{2}\abs{\nabla u}^2-\frac{1}{\varepsilon^2}F(u-q)$ under the natural constraint $\int_{\Omega} \frac{1}{2}\abs{\nabla u}^2-\frac{1}{\varepsilon^2}uf(u-q)=0$. Yang Jianfu \cite{Yang1991} has used this approach in $\mathbf{R}^2_+$ with $q(x)=Wx_1+d$ and has studied the asymptotic behavior of the solution $u^\varepsilon$ when $\varepsilon \to 0$: If \begin{align*} A_\varepsilon&=\{ x \in \mathbf{R}^2_+ \: :\: f(u^\varepsilon-q) > 0\}, & \kappa_\varepsilon&=\frac{1}{\varepsilon^2} \int_{\Omega} f(u^\varepsilon-q), \end{align*} and $x^\varepsilon \in A^\varepsilon$, then $\diam A^\varepsilon \to 0$, $\dist(x^\varepsilon, \partial \mathbf{R}^2_+)\to 0$, and \[ \frac{u^\varepsilon}{\kappa^\varepsilon}-G(\cdot, x^\varepsilon) \to 0 \] in $W^{1, r}_{\mathrm{loc}}(\mathbf{R}^2_+)$, for $r \in [1, 2)$. Li Gongbao, Yan Shusen and Yang Jianfu \cite{LiYanYang2005} obtained a similar result on bounded domains, with the additional information that $q(a^\varepsilon) \to \min_{\Omega} q$. These results are in striking contrast with the observation made at the beginning that the dynamics of the vortices is governed by the Kirchhoff--Routh function $\mathcal{W}$ defined by \eqref{KRDomains}, which implies that stationary vortices should be localized around a critical point of $x \mapsto \frac{\kappa^2}{2} H(x, x)-\kappa q(x)$. In fact, the results in \cites{Yang1991, LiYanYang2005} do not answer the question about the desingularization of stationary vortex point solutions to the Euler equation. Indeed, in the case of bounded domains for example, their solutions satisfy $\mathbf{N}orm{\nabla u}_{\mathrm{L}^2}^2=O\bigl(\logeps^{-1}\bigr)$, so that testing the equation against the function $\min(u^\varepsilon, q)$ and using the fact that $q$ is harmonic and nonnegative, we have \[ \kappa^\varepsilon \min_{\partial \Omega} q \le \frac{1}{\varepsilon^2}\int_{\Omega} f(u^\varepsilon-q) =\int_{\Omega \setminus A^\varepsilon} \abs{\nabla u^\varepsilon}^2=O\bigl(\logeps^{-1}\bigr), \] i.e.\ $\kappa^\varepsilon \to 0$. In some sense, the family of solutions $u^\varepsilon$ provides a desingularization of point-vortex solutions with vanishing vorticity. The asymptotic position is consistent with the fact that when the vorticities tend to zero, the term $\sum_{i=1}^k \kappa_i \psi_0(x_i)$ becomes dominant in the Kirchhoff--Routh function \eqref{KRDomains}. In order to desingularize point-vortex solutions with non-vanishing vorticity, M.\thinspace S. Berger and L.\thinspace E.\thinspace Fraenkel \cite{BergerFraenkel1980}*{Remark 2} suggest that $q$ should grow like $\log \frac{1}{\varepsilon}$. This brings us to the study of the problem \begin{equation} \label{problemPeps} \left\{ \begin{aligned} -\Delta u^\varepsilon &=\frac{1}{\varepsilon^2} f(u^\varepsilon - q^\varepsilon ) & &\text{in $\Omega$, }\\ u^\varepsilon &= 0 & &\text{on $\partial \Omega$}, \end{aligned} \right. \tag{\protect{$\mathcal{P}^\varepsilon$}} \end{equation} where $q^\varepsilon=q+\frac{\kappa}{2\pi} \log \frac{1}{\varepsilon}$. In Section \ref{sectionSingleVortex}, we study $(\mathcal{P}_\varepsilon)$ in a bounded domain: we first construct solutions and then analyze their asymptotic behavior. Theorem~\ref{thm:resu} is an easy consequence of the results in Section~\ref{sectionSingleVortex}. In Section~\ref{sectionmultiply} we present and extension to multiply-connected domains, while in Section~\ref{sectUnbounded}, we present an extension to unbounded domains which are a perturbation of a half-plane. In Section~\ref{sectionVortexPair} we modify slightly $(\mathcal{P}_\varepsilon)$ in order to construct desingularized solutions for two point vortices of opposite signs. As a final remark, our results seem connected with the work of M.\thinspace del Pino, M.\thinspace Kowalczyk, and M.\thinspace Musso \cite{delPinoKowalczykMusso2005} on the equation \[ -\Delta u=\varepsilon^2 K(x)e^u \] for which the energy concentrates in small balls around points $x_1^\varepsilon, \dotsc, x_k^\varepsilon$. These points tend to a critical point of the function $-\sum_{i=1}^k 2\log K(x_i)-8\pi H(x_i, x_i)-\sum_{i \ne j} 8\pi G(x_i, x_j)$. The connection is clear when one rewrites their equation as $-\Delta u=\frac{1}{\varepsilon^2}\exp(u+\log K-\frac{8\pi}{2\pi}\log \frac{1}{\varepsilon})$. Other related work include the study of the equation $-\Delta u = u^p$ as $p \to \infty$ by P.\thinspace Esposito, M.\thinspace Musso and A. Pistoia \cites{EspositoMussoPistoia2006,EspositoMussoPistoia2007}, and the recent work of T.\thinspace Bartsch, A.\thinspace Pistoia and T.\thinspace Weth \cite{BartschPistoiaWeth} in which systems of three and four vortices are desingularized by studying the equation $-\Delta u= \varepsilon^2 \sinh u$. In all the references, whereas the vorticity concentrates at points, its support does not shrink as $\varepsilon \to 0$. We also bring to the attention of the reader that there is a similar situation with similar results for three-dimensional axisymmetric incompressible inviscid flows by vorticity methods \cites{Burton1987, FridemannTurkington1981} and stream-function methods \cites{BergerFraenkel1974, AmbrosettiStruwe1989, Yang1995}. However we are not aware of a counterpart of the present work for three-dimensional axisymmetric incompressible inviscid flows. \noindent{\bf Acknowledgements.} This work was initiated during a visit of the second author at Laboratoire Jacques-Louis Lions of Universit\'e Pierre \& Marie Curie. The authors wish to thank Franck Sueur for fruitful remarks following a first version of the manuscript. \section{Single vortices in bounded domains} \label{sectionSingleVortex} In this section, $\Omega\subset \mathbf{R}^2$ is a bounded simply-connected smooth domain, $f : \mathbf{R} \to \mathbf{R}$ is the real function defined by $f(s)=s_+^p$ for some $1<p<+\infty$ and where $s_+=\max(s, 0)$, $\kappa > 0$ is given as well as $q\in \mathrm{W}^{1, r}(\Omega)$ for some $r > 2$.\footnote{Notice that for the proof of Theorem~\ref{thm:resu} we only require a harmonic function $q$ but the proofs of Theorems~\ref{thmLocalMinimum} and \ref{thmRotating} require more general $q$.} We will consider solutions of the boundary value problem \eqref{problemPeps} where $\varepsilon>0$ is a real parameter. The solutions we consider are the least energy solutions obtained by minimizing the energy functional \begin{equation} \label{energyFunctional} \mathcal{E}^\varepsilon(u)= \int_{\Omega} \Bigl(\frac{|\nabla u|^2}{2} - \frac{1}{\varepsilon^2}F(u-q^\varepsilon)\Bigr) \end{equation} over the natural constraint given by the Nehari manifold \[ \mathcal{N}^\varepsilon = \left\{ u\in H^1_0(\Omega)\setminus \{0\} \ : \ \langle d\mathcal{E}^\varepsilon(u), u\rangle = 0\right\}, \] where $F(s)=\frac{1}{p+1}s_+^{p+1}$ is a primitive of $f$. It is standard to prove the (see e.g.~\cite{Willem1996}*{Theorem 2.18}) \begin{proposition} \label{prop:2.1} Assume that $q^\varepsilon\geq 0$ on $\Omega$, so that $\mathbf{N}eps \neq \emptyset$, and define \[ c^\varepsilon = \inf_{u\in \mathcal{N}^\varepsilon} \mathcal{E}^\varepsilon(u). \] Then, there exists $u^\varepsilon \in \mathcal{N}^\varepsilon$ such that $\mathcal{E}^\varepsilon(u^\varepsilon)=c^\varepsilon$, and $u^\varepsilon$ is a positive solution of $(\mathcal{P}^\eps)$. \end{proposition} Note that $q$ is bounded since $r>2$, and therefore $q^\varepsilon\geq 0$ provided $\varepsilon$ is sufficiently small. Our focus is the asymptotics of $u^\varepsilon$ when $\varepsilon \to 0$. In order to describe the asymptotic behavior of $u^\varepsilon$, we introduce the limiting profile $U_\kappa : \mathbf{R}^2 \to \mathbf{R}$ defined as the unique radially symmetric solution of the problem \[ \tag{\protect{$\mathcal{U}_\kappa$}} \label{Ukappa} \left\{ \begin{aligned} &-\Delta U_\kappa = f(U_\kappa), \\ &\int_{\mathbf{R}^2} f(U_\kappa) =\kappa. \end{aligned} \right. \] For every $\kappa>0$, there exists $\rho_\kappa>0$ such that \[ U_\kappa(y)= \left\{ \begin{aligned} &V_{\rho_\kappa}(y)& &\text{if $y \in B(0, \rho_\kappa)$}, \\ &\frac{\kappa}{2\pi} \log \frac{\rho_\kappa}{\abs{y}} & &\text{if $y \in \mathbf{R}^2 \setminus B(0, \rho_\kappa)$}, \end{aligned}\right. \] where $V_\rho : B(0,\rho) \to \mathbf{R}$ satisfies \[ \left\{ \begin{aligned} \displaystyle -\Delta V_\rho &= V_\rho^p & & \text{in $B(0, \rho)$}, \\ V_\rho &= 0 && \text{on $\partial B(0, \rho)$}. \end{aligned} \right. \] One can show that $\kappa=\gamma \rho^{-\frac{2}{p-1}}$, for some constant $\gamma > 0$ depending on the value of $p$. The Kirchhoff-Routh function $\mathcal{W}$ for one vortex of vorticity $\kappa$ is defined by \[ \mathcal{W}(x)=\frac{\kappa^2}{2} H(x, x)-\kappa q(x). \] Let us also define the quantity \[ \mathcal{C} = \frac{\kappa^2}{4\pi} \log \rho_\kappa + \int_{B(0, \rho_\kappa)}\Bigl(\frac{|\nabla U_{\rho_\kappa}|^2}{2} - \frac{U_{\rho_\kappa}^{p+1}}{p+1}\Bigr). \] While the function $\mathcal{W}$ depends on $x \in \Omega$ and on $\kappa$, the quantity $\mathcal{C}$ only depends on $\kappa$ and on $p$. We set \begin{equation} \begin{aligned}\label{defiq} A^\varepsilon&=\Big\{ x \in \Omega \: :\: u^\varepsilon(x)> q^\varepsilon(x)\Big\}, \\ \omega^\varepsilon&=\frac{1}{\varepsilon^2} f(u^\varepsilon-q^\varepsilon), \\ \kappa^\varepsilon&=\int_{\Omega} \omega^\varepsilon, \\ x^\varepsilon&=\frac{1}{\kappa^\varepsilon}\int_{\Omega} x \, \omega^\varepsilon(x)\, dx, \\ \rho^\varepsilon&=\rho_{\kappa^\varepsilon}, \end{aligned} \end{equation} and respectively refer to these as the vorticity set, the vorticity, the total vorticity, the center of vorticity, and the vorticity radius. We will prove \begin{theorem}\label{thm:K1} As $\varepsilon \to 0$, we have \[ u^\varepsilon=U_{\kappa^\varepsilon} \Big(\frac{\cdot-x^\varepsilon}{\varepsilon}\Big)+\kappa^\varepsilon\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon \rho^\varepsilon}+ H(x^\varepsilon, \cdot)\Bigr)+o(1), \] \text{in $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$}, where \[ \kappa^\varepsilon=\kappa+\frac{2\pi}{\log \frac{1}{\varepsilon}}\Bigl(q(x^\varepsilon)-\kappa H(x^\varepsilon, x^\varepsilon) -\frac{\kappa}{2\pi} \log \frac{1}{\rho_\kappa} \Bigr)+o(\logeps^{-1}), \] and \[ \mathcal{W}(x^\varepsilon) \to \sup_{x \in \Omega} \mathcal{W}(x). \] One also has \[ B(x^\varepsilon, \Bar{r}^\varepsilon) \subset A^\varepsilon \subset B(x^\varepsilon, \mathring{r}^\varepsilon), \] with $\Bar{r}^\varepsilon=\varepsilon \rho_\kappa+o(\varepsilon)$ and $\mathring{r}^\varepsilon=\varepsilon \rho_\kappa +o(\varepsilon)$. Finally, \[ \mathcal{E}^\varepsilon (u^\varepsilon)= \frac{\kappa^2}{4\pi}\log \frac{1}{\varepsilon}-\mathcal{W}(x^\varepsilon)+\mathcal{C}+o(1). \] \end{theorem} Since $\mathcal{W}(x) \to -\infty$ as $x \to \partial \Omega$, by Theorem~\ref{thm:K1}, up to a subsequence, $x^\varepsilon \to x^*\in \Omega$. Combined with standard elliptic estimates this yields the convergence $u^\varepsilon \to \kappa G(x_*, \cdot\, )$ in $\mathrm{W}^{1, p}_0(\Omega)$ for any $p<2$ and in $\mathcal{C}^k_{\mathrm{loc}}(\Omega\setminus \{x_*\})$ for any $k\in \mathbf{N}$. If $\partial \Omega$ is smooth enough, then one also has convergence in $\mathcal{C}^k_{\mathrm{loc}}(\Bar{\Omega}\setminus \{x_*\}\})$. The proof of Theorem~\ref{thm:K1} is twofold. First, in Corollary~\ref{cor:upper}, we prove a sharp upper bounds for the critical level $c^\varepsilon$. Then, in Proposition~\ref{prop:1mai} we show that any solution satisfying this upper bound needs to satisfy the asymptotic expansion. \subsection{Upper bounds on the energy} \label{upperBounds} We will derive upper bounds for $c^\varepsilon$ by constructing elements of $\mathcal{N}^\varepsilon$ similar to the asymptotic expression of Theorem~\ref{thm:K1}. \begin{lemma} \label{lemmaHatuNehari} For every $\Hat{x} \in \Omega$, if $\varepsilon>0$ is small enough, there exists \[ \Hat{\kappa}^\varepsilon=\kappa+\frac{2\pi}{\log \tfrac{1}{\varepsilon}}\Bigl( q(\Hat{x})-\kappa H(\Hat{x}, \Hat{x})+\dfrac{\kappa}{2\pi} \log \rho_\kappa \Bigr)+O\bigl(\logeps^{-2}\bigr), \] such that, if \[ \Hat{u}^\varepsilon(x)=U_{\Hat{\kappa}^\varepsilon}\Bigl(\frac{x-\Hat{x}}{\varepsilon}\Bigr)+\Hat{\kappa}^\varepsilon \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon\rho_{\Hat{\kappa}^\varepsilon}}+H(\Hat{x}, x) \Bigr), \] then \[ \Hat{u}^\varepsilon\in \mathcal{N}^\varepsilon. \] Moreover, we have \[ \Hat{A}^\varepsilon:=\Bigl\{ x \: :\: \Hat{u}^\varepsilon(x) > q(x)+\frac{\kappa}{2\pi} \log \frac{1}{\varepsilonilon} \Bigr\} \subset B(\Hat{x}, \Hat{r}^\varepsilon), \] with $\Hat{r}^\varepsilon=O(\varepsilon)$. \end{lemma} \begin{proof} For $\sigma \in \mathbf{R}$, define \begin{align*} \Hat{\kappa}^{\varepsilon, \sigma}&=\frac{q^\varepsilon(\Hat{x})+\sigma}{\tfrac{1}{2\pi} \log \tfrac{1}{\varepsilon \rho_\kappa}+H(\hat{x}, \Hat{x})}, \\ \Hat{\rho}^{\varepsilon, \sigma}&=\rho_{\Hat{\kappa}^{\varepsilon, \sigma}}, \\ \Hat{u}^{\varepsilon, \sigma}(x)&=U_{\Hat{\kappa}^{\varepsilon, \sigma}}\Bigl(\frac{x-\Hat{x}}{\varepsilon}\Bigr)+\Hat{\kappa}^{\varepsilon, \sigma} \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon\rho_{\Hat{\kappa}^{\varepsilon, \sigma}}}+H(\Hat{x}, x) \Bigr). \end{align*} First note that when $\varepsilon>0$ is sufficiently small, $\Hat{u}^{\varepsilon, \sigma}(x)=\hat{\kappa}_{\sigma, \varepsilon} G(\Hat{x}, x)$ in a neighborhood of $\partial \Omega$, so that $\Hat{u}^{\varepsilon, \sigma} \in W^{1, 2}_0(\Omega)$ and we can define \[ g^\varepsilon(\sigma)=\langle d \mathcal{E}^\varepsilon (\Hat{u}^{\varepsilon, \sigma}), \Hat{u}^{\varepsilon, \sigma} \rangle. \] Among the terms involved in $g^\varepsilon(\sigma)$, we may already compute \[ \begin{split} \int_{\Omega} \abs{\nabla \Hat{u}^{\varepsilon, \sigma}}^2 &=\int_{B(\Hat{x}, \varepsilon \rho_{\Hat{\kappa}^{\varepsilon, \sigma}})}\!\!\!\!\!\!\!\!\!\!\!\! \abs{\nabla (U_{\Hat{\kappa}^{\varepsilon, \sigma}}(\tfrac{\cdot-\Hat{x}}{\varepsilon}) +\Hat{\kappa}^{\varepsilon, \sigma} H(\Hat{x}, \cdot))}^2 \\ &\qquad\qquad+(\Hat{\kappa}^{\varepsilon, \sigma})^2\int_{\Omega \setminus B(\Hat{x}, \rho_{\Hat{\kappa}^{\varepsilon, \sigma}} \varepsilon)}\!\!\!\!\!\!\!\!\!\!\!\! \abs{\nabla G(\Hat{x}, \cdot)}^2 \\ &=\int_{B(0, \rho_{\Hat{\kappa}^{\varepsilon, \sigma}})}\!\!\!\!\!\!\!\!\!\!\!\! \abs{\nabla U_{\Hat{\kappa}^{\varepsilon, \sigma}}}^2+O(\varepsilon) \\ &\qquad\qquad+(\Hat{\kappa}^{\varepsilon, \sigma})^2\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon \rho_{\Hat{\kappa}^{\varepsilon, \sigma}}}+H(\Hat{x}, \Hat{x})+O(\varepsilon) \Bigr) \\ &=\int_{B(0, \rho_{\Hat{\kappa}^{\varepsilon, \sigma}})}\!\!\!\!\!\!\!\!\!\!\!\! \abs{\nabla U_{\Hat{\kappa}^{\varepsilon, \sigma}}}^2 +\Hat{\kappa}^{\varepsilon, \sigma} \bigl( q^\varepsilon(\Hat{x})+\sigma \bigr)+O(\varepsilon). \end{split} \] In order to estimate the second term involved in $g^\varepsilon(\sigma)$, namely $\frac{1}{\varepsilon^2}\int_{\Omega} f(\Hat{u}^{\varepsilon, \sigma}-q^\varepsilon)\Hat{u}^{\varepsilon, \sigma}$, we first claim that \begin{equation} \label{HatAepssigma} \Hat{A}^{\varepsilon, \sigma}:=\bigl\{x \in \Omega \: :\: \Hat{u}^{\varepsilon, \sigma}(x) > q^\varepsilon(x)\bigr\} \subset B(\Hat{x}, r^\varepsilon), \end{equation} with $r^\varepsilon=O(\varepsilon)$. Indeed, let $x \in \Hat{A}^{\varepsilon, \sigma} \setminus B(\Hat{x}, \Hat{\rho}^{\varepsilon, \sigma}\varepsilon)$. One has, by definition of $\Hat{u}^{\varepsilon, \sigma}(x)$ and of $\Hat{\kappa}^{\varepsilon, \sigma}$, \[ \Hat{\kappa}^{\varepsilon, \sigma}\Bigl(\frac{1}{2\pi}\log \frac{1}{\varepsilon}+\frac{1}{2\pi} \log \frac{\varepsilon}{\abs{x-\Hat{x}}}+H(\Hat{x}, x)\Bigr) > q(x)+\frac{\kappa}{2\pi} \log \frac{1}{\varepsilon}, \] so that \begin{equation} \label{ineqVorticitySetUpperFrac} \frac{\dfrac{1}{2\pi} \log \dfrac{1}{\varepsilon}+\dfrac{1}{2\pi} \log \dfrac{\varepsilon}{\abs{x-\Hat{x}}}+ H(\Hat{x}, x)}{\dfrac{\kappa}{2\pi} \log \dfrac{1}{\varepsilon}+q(x)} \ge \dfrac{\log \dfrac{1}{\varepsilon}+H(\Hat{x}, \Hat{x})}{\dfrac{\kappa}{2\pi} \log \dfrac{1}{\varepsilon}+q(\Hat{x})+\sigma}. \end{equation} Since $q$ and $H(\Hat{x}, \cdot)$ are bounded functions, one obtains that \[ \frac{1}{\kappa}+\frac{\log \frac{\varepsilon}{\abs{x-\Hat{x}}}}{\kappa \log \dfrac{1}{\varepsilon}}\ge \frac{1}{\kappa}+O\bigl(\logeps^{-1}\bigr), \] and the claim is proved. We deduce from~\eqref{HatAepssigma}, that for every $x \in \Hat{A}^{\varepsilon, \sigma}$ \[ \Hat{u}^{\varepsilon, \sigma}(x)-q^\varepsilon(x)=U_{\Hat{\kappa}^{\varepsilon, \sigma}}\Bigl(\frac{x-\Hat{x}}{\varepsilon}\Bigr)+\sigma+O(\varepsilon). \] We may now estimate \[ \begin{split} \frac{1}{\varepsilon^2}\int_{\Omega}& f(\Hat{u}^{\varepsilon, \sigma}-q^\varepsilon)\Hat{u}^{\varepsilon, \sigma} =\frac{1}{\varepsilon^2}\int_{\Hat{A}^{\varepsilon, \sigma}} f(\Hat{u}^{\varepsilon, \sigma}-q^\varepsilon)\Hat{u}^{\varepsilon, \sigma}\\ &=\frac{1}{\varepsilon^2}\int_{\Hat{A}^{\varepsilon, \sigma}} f(\Hat{u}^{\varepsilon, \sigma}-q^\varepsilon)U_{\Hat{\kappa}^{\varepsilon, \sigma}}(\tfrac{\cdot-\Hat{x}}{\varepsilon}) \\ &\qquad\qquad+\frac{\Hat{\kappa}^{\varepsilon, \sigma}}{\varepsilon^2}\int_{\Hat{A}^{\varepsilon, \sigma}} f(\Hat{u}^{\varepsilon, \sigma}-q^\varepsilon)\bigl(\tfrac{1}{2\pi}\log \tfrac{1}{\varepsilon \Hat{\rho}^{\varepsilon, \sigma}}+ H(\Hat{x}, \cdot)\bigr) \\ &=\int_{\mathbf{R}^2} f(U_{\Hat{\kappa}^{\varepsilon, \sigma}}+\sigma)U_{\Hat{\kappa}^{\varepsilon, \sigma}}+O(\varepsilon) \\ &\qquad\qquad+ \Hat{\kappa}^{\varepsilon, \sigma}\bigl(\tfrac{1}{2\pi} \log \tfrac{1}{\varepsilon \rho_\kappa}+ H(\Hat{x}, \Hat{x})+O(\varepsilon)\bigr)\Bigl(\int_{\mathbf{R}^2} f(U_\kappa+\sigma)+O(\varepsilon)\Bigr)\\ &=\int_{\mathbf{R}^2} f(U_{\Hat{\kappa}^{\varepsilon, \sigma}}+\sigma)U_{\Hat{\kappa}^{\varepsilon, \sigma}} \\ &\qquad\qquad+ \big(\tfrac{\kappa}{2\pi} \log \tfrac{1}{\varepsilon}+q(\Hat{x})+\sigma\big)\int_{\mathbf{R}^2}f(U_{\Hat{\kappa}^{\varepsilon, \sigma}}+\sigma)+O(\varepsilon \logeps). \end{split} \] Summarizing, we have \[ \begin{split} g^\varepsilon(\sigma)&=\frac{\kappa}{2\pi}\log \frac{1}{\varepsilon} \Bigl(\Hat{\kappa}^{\varepsilon, \sigma} - \int_{\mathbf{R}^2} f(U_{\Hat{\kappa}^{\varepsilon, \sigma}}+\sigma)\Bigr)+O(1)\\ &=\frac{\kappa}{2\pi} \log \frac{1}{\varepsilon} \Bigl(\int_{\mathbf{R}^2} f(U_{\kappa})-f(U_{\kappa}+\sigma)\Bigr)+O(1). \end{split} \] Since $g^\varepsilon$ is continuous and $\sigma \cdot \Bigl(\int_{\mathbf{R}^2} f(U_\kappa)-f(U_\kappa+\sigma)\Bigr)<0$ when $\sigma \ne 0$, there exists $\sigma^\varepsilon$ such that $g(\sigma^\varepsilon)=0$ and $\sigma^\varepsilon \to 0$ as $\varepsilon\to 0$. One then sets $\Hat{\kappa}^\varepsilon=\Hat{\kappa}^{\varepsilon, \sigma^\varepsilon}$. \end{proof} \begin{lemma} \label{lemmaEnergyHatu} For every $\Hat{x} \in \Omega$, we have \[ c^\varepsilon \le \frac{\kappa^2}{4\pi}\log \frac{1}{\varepsilon} -\mathcal{W}(\Hat{x})+\mathcal{C}+o(1)\qquad\text{as }\varepsilon\to 0. \] \end{lemma} \begin{proof} By Lemma~\ref{lemmaHatuNehari}, $\Hat{u}^\varepsilon \in \mathcal{N}^\varepsilon$, so that $c^\varepsilon \leq \mathcal{E}^\varepsilon(\Hat{u}^\varepsilon)$. We compute the energy of $\Hat{u}^\varepsilon$ as follows. First, \[ \begin{split} \int_{\Omega} \abs{\nabla \Hat{u}^\varepsilon}^2 &=\int_{\Omega} \Hat{u}^\varepsilon \Delta \Hat{u}^\varepsilon\\ &= -\int_{\mathbf{R}^2} U_\kappa \Delta U_\kappa+(\Hat{\kappa}^\varepsilon)^2\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon}+H(\Hat{x}, \Hat{x})\Bigr)+o(1)\\ &=\int_{\mathbf{R}^2} \abs{\nabla (U_\kappa)_+}^2+\frac{\kappa^2}{2\pi} \log \frac{1}{\varepsilon} +2\kappa q(\Hat x)-\kappa^2 H(\Hat x, \Hat x) +\frac{\kappa^2}{2\pi} \log{\rho_\kappa} +o(1). \end{split} \] Next, \[ \begin{split} \frac{1}{\varepsilon^2}\int_{\Omega} F(\Hat{u}^\varepsilon-q^\varepsilon) &=\frac{1}{\varepsilon^2}\int_{\Hat{A}^\varepsilon} F(\Hat{u}^\varepsilon-q^\varepsilon)\\ &=\frac{1}{\varepsilon^2}\int_{\Hat{A}^\varepsilon} F(\Hat{u}^\varepsilon-q^\varepsilon(x^\varepsilon))+o(1)\\ &=\int_{\mathbf{R}^2} F(U_\rho)+o(1), \end{split} \] and the conclusion follows from the definitions of $\mathcal{W}$ and $\mathcal{C}$. \end{proof} \begin{corollary}\label{cor:upper} We have \[ c^\varepsilon \leq \frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon} -\sup_{x\in \Omega} \mathcal{W}(x) +\mathcal{C} + o(1). \] \end{corollary} \subsection{Asymptotic behavior of solutions} The main goal of this section is to prove \begin{proposition}\label{prop:1mai} Let $(v^\varepsilon)$ be a family of solutions to \eqref{problemPeps} such that $v^\varepsilon \ne 0$ \begin{equation} \label{assumptEnergyUpperbound} \mathcal{E}^\varepsilon(v^\varepsilon) \le \frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}+O(1), \end{equation} as $\varepsilon \to 0$. Define the quantities $A^\varepsilon$, $\omega^\varepsilon$, $\kappa^\varepsilon$, $x^\varepsilon$ and $\rho^\varepsilon$ for $v^\varepsilon$ as in \eqref{defiq} for $u^\varepsilon$. Then \[ v^\varepsilon=U_{\kappa^\varepsilon} (\tfrac{\cdot-x^\varepsilon}{\varepsilon})+\kappa^\varepsilon\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon \rho^\varepsilon}+ H(x^\varepsilon, \cdot)\Bigr)+o(1), \] in $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$, where \[ \kappa^\varepsilon=\kappa+\frac{2\pi}{\log \frac{1}{\varepsilon}}\Bigl(q(x^\varepsilon)-\kappa H(x^\varepsilon, x^\varepsilon) -\frac{\kappa}{2\pi} \log \frac{1}{\rho_\kappa} \Bigr)+o(\logeps^{-1}), \] In particular, we have \[ \mathcal{E}^\varepsilon (v^\varepsilon)= \frac{\kappa^2}{4\pi}\log \frac{1}{\varepsilon}-\mathcal{W}(x^\varepsilon)+\mathcal{C}+o(1) \] and \[ B(x^\varepsilon, \Bar{r}^\varepsilon) \subset A^\varepsilon \subset B(x^\varepsilon, \mathring{r}^\varepsilon), \] with $\Bar{r}^\varepsilon=\varepsilon \rho_\kappa+o(\varepsilon)$ and $\mathring{r}^\varepsilon=\varepsilon \rho_\kappa +o(\varepsilon)$. \end{proposition} In other words, $v^\varepsilon$ satisfies the same asymptotics as the one stated in Theorem~\ref{thm:K1} for $u^\varepsilon$ except for the convergence of $x^\varepsilon$. In the sequel, $v^\varepsilon$ denotes a family of nontrivial solutions to \eqref{problemPeps} verifying \eqref{assumptEnergyUpperbound}. We divide the proof of Proposition \ref{prop:1mai} into several steps. \subsubsection{Step 1: First quantitative properties of the solutions} In this section, we derive various types of estimates for $v^\varepsilon$. \begin{proposition}\label{propositionEstimatesueps} We have, as $\varepsilon \to 0$, \begin{gather} \label{ineqMuAeps}\muleb{2}(A^\varepsilon) = O\bigl(\logeps^{-1}\bigr), \\ \label{ineqVortexEnergy} \int_{A^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 =O(1), \\ \label{ineqVortexPotential}\frac{1}{\varepsilon^2}\int_{A^\varepsilon} F(v^\varepsilon-q^\varepsilon) =O(1), \\ \label{eq:2etoiles}\int_{\Omega\setminus A^\varepsilon} |\nabla v^\varepsilon|^2 \leq \frac{\kappa^2}{2\pi} \log\frac{1}{\varepsilon} + O(1), \\ \label{ineqTotalVorticity}\int_{\Omega} \omega^\varepsilon \leq \kappa + O\bigl(\logeps^{-1}\bigr). \end{gather} \end{proposition} \begin{proof} First note that for $\varepsilon>0$ sufficiently small, \begin{equation} \label{ineqEnergy} \Bigl(\frac{1}{2}-\frac{1}{p+1}\Bigr)\int_\Omega |\nabla v^\varepsilon|^2 \leq \mathcal{E}^\varepsilon(v^\varepsilon). \end{equation} Indeed, \[ \mathcal{E}^\varepsilon(v^\varepsilon) = \frac{1}{2}\int_\Omega |\nabla v^\varepsilon|^2 - \frac{1}{p+1}\int_\Omega \frac{1}{\varepsilon^2}f(v^\varepsilon-q^\varepsilon)(v^\varepsilon-q^\varepsilon)_+, \] and, by testing $(\mathcal{P}^\varepsilon)$ against $v^\varepsilon$, \[ 0 = \frac{1}{p+1}\int_\Omega |\nabla v^\varepsilon|^2 - \frac{1}{p+1}\int_\Omega \frac{1}{\varepsilon^2}f(v^\varepsilon-q^\varepsilon)v^\varepsilon. \] Since $(v^\varepsilon-q^\varepsilon)_+\leq v^\varepsilon$ when $q^\varepsilon\geq 0$, and hence when $\varepsilon$ is sufficiently small, \eqref{ineqEnergy} follows by subtraction. In order to obtain \eqref{ineqMuAeps}, first note that since $q$ is bounded from below, for $\varepsilon$ sufficiently small, $\inf_{\Omega} q_\varepsilon > \frac{\kappa}{4\pi} \log \frac{1}{\varepsilon}$. By the Chebyshev and Poincar\'e inequalities, it follows that \[ \muleb{2}(A^\varepsilon) \le \Bigl(\frac{1}{\inf_{\Omega} q^\varepsilon}\Bigr)^2 \int_{\Omega} \abs{v^\varepsilon}^2 \le \frac{C}{\logeps^2} \int_{\Omega} \abs{\nabla v^\varepsilon}^2\le \frac{C'}{\logeps}, \] where the last inequality is a consequence \eqref{ineqEnergy} and \eqref{assumptEnergyUpperbound}. We claim that \begin{equation} \label{ineqomegaepsL1} \int_{\Omega} \omega^\varepsilon \leq C. \end{equation} By testing $(\mathcal{P}^\eps)$ against $\min(v^\varepsilon, q^\varepsilon)$ we obtain \begin{equation} \label{ineqVorticityEnergy} \begin{split} \int_{\Omega} \omega^\varepsilon= \int_{A^\varepsilon} \frac{1}{\varepsilon^2}f(v^\varepsilon-q^\varepsilon)& \leq \frac{1}{\inf_\Omega q^\varepsilon}\int_{A^\varepsilon} \frac{q^\varepsilon}{\varepsilon^2}f(v^\varepsilon-q^\varepsilon)\\ &=\frac{1}{\inf_\Omega q^\varepsilon}\int_{\Omega\setminus A^\varepsilon} \abs{\nabla v^\varepsilon}^2 + \frac{1}{\inf_\Omega q^\varepsilon}\int_{A^\varepsilon} \nabla v^\varepsilon\nabla q. \end{split} \end{equation} In view of \eqref{ineqEnergy}, this yields \[ \kappa^\varepsilon \leq C \frac{\mathcal{E}^\varepsilon(v^\varepsilon) +o(1)}{\log \frac{1}{\varepsilon}}, \] and the estimate \eqref{ineqomegaepsL1} follows from assumption \eqref{assumptEnergyUpperbound}. Testing now $(\mathcal{P}^\eps)$ against $(v^\varepsilon-q^\varepsilon)_+$, we obtain \begin{equation} \label{eqNehariVortex} \int_{A^\varepsilon} |\nabla (v^\varepsilon-q^\varepsilon)|^2 = \int_{A^\varepsilon} \frac{1}{\varepsilon^2} (v^\varepsilon-q^\varepsilon)_+^{p+1} -\int_{A^\varepsilon}\nabla (v^\varepsilon-q^\varepsilon) \nabla q. \end{equation} The Gagliardo--Nirenberg inequality \cite{Nirenberg1959}*{p.\thinspace 125} yields \begin{equation} \label{ineqGN} \int_{A^\varepsilon} \frac{1}{\varepsilon^2} (v^\varepsilon-q^\varepsilon)_+^{p+1} \leq C \int_{A^\varepsilon} \frac{1}{\varepsilon^2} (v^\varepsilon-q^\varepsilon)_+^{p} \left(\int_{A^\varepsilon} |\nabla (v^\varepsilon-q^\varepsilon)|^2\right)^{\frac{1}{2}}, \end{equation} so that \[ \begin{split} \int_{A^\varepsilon} |\nabla (v^\varepsilon-q^\varepsilon)|^2 &\leq C \bigl(\mathbf{N}orm{\omega^\varepsilon}_{\mathrm{L}^1}+\mathbf{N}orm{\nabla q^\varepsilon}_{L^2(A^\varepsilon)} \bigr) \Bigl(\int_{A^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2\Bigr)^{\frac{1}{2}} \\ &\leq C'\Bigl(\int_{A^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2\Bigr)^{\frac{1}{2}}. \end{split} \] Inequality \eqref{ineqVortexEnergy} can therefore be deduced from \eqref{ineqomegaepsL1}, and \eqref{ineqVortexPotential} follows from \eqref{eqNehariVortex}. Finally, \[ \begin{split} \frac{1}{2}\int_{\Omega \setminus A^\varepsilon} \abs{\nabla v^\varepsilon}^2&=\mathcal{E}^\varepsilon(v^\varepsilon)+\frac{1}{\varepsilon^2}\int_{A^\varepsilon} F(v^\varepsilon-q^\varepsilon)-\frac{1}{2} \int_{A^\varepsilon} \abs{\nabla v^\varepsilon}^2 \\ &\le \frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}+O(1), \end{split} \] so that \eqref{eq:2etoiles} holds, and inequality \eqref{ineqTotalVorticity} then follows from \eqref{ineqVorticityEnergy}. \end{proof} \begin{remark} The use of the Gagliardo--Nirenberg inequality to obtain \eqref{ineqGN} is the only step in our proof that requires $f$ to be a power-like nonlinearity. \end{remark} \subsubsection{Step 2: Structure of the vorticity set} We now examine the vorticity set $A^\varepsilon$ further. Since $A^\varepsilon$ is open, it contains at most countably many connected components that we label $A^\varepsilon_i$, $i \in I^\varepsilon$. If $q$ were a harmonic function (e.g. if the only goal was to prove Theorem \ref{thm:resu}), one would deduce from the fact that $u^\varepsilon$ is a minimal energy solution that $A^\varepsilon$ is connected whenever $q^\varepsilon \ge 0$ \cite{BergerFraenkel1974}*{Theorem 3F}, \cite{Norbury1975}*{Theorem 3.4}, \cite{AmbrosettiMancini1981}*{Theorem 4}, \cite{Yang1991}*{Theorem 1}, \cite{LiYanYang2005}*{Proposition 3.1}; this would simplify considerably the analysis that we perform below. First we have a control on the total area and on the diameter of each connected component. \begin{lemma} \label{lemmaAreaDiameter} If $\varepsilon > 0$ is sufficiently small, we have \begin{equation} \label{ineqVorticityAreaStrong} \muleb{2}(A^\varepsilon) \le C \varepsilon^2 \end{equation} and, for every $i \in I^\varepsilon$, \begin{equation} \label{ineqVorticityDiameter} \diam(A^\varepsilon_i) \le C \varepsilon. \end{equation} \end{lemma} \begin{proof} Set \[ w^\varepsilon=\frac{v^\varepsilon}{\min_{\partial A^\varepsilon}q^\varepsilon}. \] Since $v^\varepsilon =q^\varepsilon $ on $\partial A^\varepsilon$, we have, by \eqref{eq:2etoiles}, \begin{equation} \label{ineqCapacity} \frac{2\pi}{\capa(A^\varepsilon, \Omega)} \ge \frac{2\pi}{\displaystyle \int_{\Omega\setminus A^\varepsilon} \abs{\nabla w^\varepsilon}^2} \ge 2 \pi \frac{\frac{\kappa^2}{4\pi} \bigl(\log \frac{1}{\varepsilon}\bigr)^2+O(\logeps)}{\displaystyle \int_{\Omega\setminus A^\varepsilon} \abs{\nabla v^\varepsilon}^2}=\log \frac{1}{\varepsilon}+O(1). \end{equation} By Proposition~\ref{propositionCapacityArea}, it follows that \[ \log \frac{\muleb{2}(\Omega)}{\muleb{2}(A^\varepsilon)} \ge 2\log \frac{1}{\varepsilon}+O(1), \] from which \eqref{ineqVorticityAreaStrong} follows. Similarly, we have \[ \frac{2\pi}{\capa(A^\varepsilon_i, \Omega)} \geq \frac{2\pi}{\capa(A^\varepsilon, \Omega)} \geq \log \frac{1}{\varepsilon}+O(1). \] It hence follows from Proposition~\ref{propositionBoundDiameter} and the boundedness of $\Omega$ that \[ \log C\Bigl(1+\frac{1}{\diam (A_i^\varepsilon)}\Bigr) \ge \log \frac{1}{\varepsilon}+O(1), \] which implies \eqref{ineqVorticityDiameter}. \end{proof} \begin{lemma} \label{lemmaVortexSplit} There exist positive constants $\gamma$ and $c$ such that when $\varepsilon$ is small enough, for every $i \in I_\varepsilon$, if \begin{equation} \label{eqSplitVortices} \int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 > \gamma^2, \end{equation} then \begin{gather} \label{ineqLowerBoundArea} \muleb{2}(A_i^\varepsilon)\ge c\varepsilon^2, \\ \label{ineqLowerBoundDiam} \diam(A^\varepsilon_i)\ge c\varepsilon, \\ \label{ineqLowerBoundDistance} \dist(A^\varepsilon_i, \partial \Omega)\ge c, \\ \label{ineqLowerBoundVortex} \int_{A^\varepsilon_i} \omega_\varepsilon \ge c, \end{gather} while if \eqref{eqSplitVortices} does not hold, then for every $s \ge 1$, \begin{equation} \label{ineqfsVanishing} \int_{A^\varepsilon_i} f(v^\varepsilon-q^\varepsilon)^s \le C \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(A^\varepsilon_i)}^{sp} \muleb{2}(A^\varepsilon_i)^{1+\frac{sp}{2}(1-\frac{2}{r})}, \end{equation} where $C>0$ only depends on $s \ge 1$. \end{lemma} \begin{proof} Starting from \eqref{eqNehariVortex}, and applying the Sobolev and Cauchy--Schwarz inequalities we obtain \begin{multline} \label{ineqGradientVortices} \int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)_+}^2 = \int_{A^\varepsilon_i} \frac{f(v^\varepsilon-q^\varepsilon)}{\varepsilon^2}(v^\varepsilon-q^\varepsilon)_+-\int_{A^\varepsilon_i} \nabla q \cdot \nabla (v^\varepsilon-q^\varepsilon)\\ \le C\frac{\muleb{2}(A^\varepsilon_i)}{\varepsilon^2} \Bigl(\int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)_+}^2\Bigr)^{\frac{p+1}{2}}\\ + \mathbf{N}orm{\nabla q}_{\mathrm{L}^2(A^\varepsilon_i)}\mathbf{N}orm{\nabla (v^\varepsilon-q^\varepsilon)_+}_{\mathrm{L}^2(A^\varepsilon_i)}. \end{multline} By Lemma~\ref{lemmaAreaDiameter}, we may choose $\gamma$ sufficiently small so that \[ \gamma^{p-1}\le \frac{\varepsilon^2}{2C\muleb{2}(A^\varepsilon_i)}, \] independently of $\varepsilon$, and therefore if \eqref{eqSplitVortices} does not hold we obtain \begin{equation} \label{ineqVanVorticesuq} \frac{1}{2}\int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)_+}^2\le \int_{A^\varepsilon_i} \abs{\nabla q}^2. \end{equation} Applying successively Sobolev inequality, \eqref{ineqVanVorticesuq} and Lemma~\ref{lemmaAreaDiameter}, we conclude \[ \begin{split} \int_{A^\varepsilon_i} f(v^\varepsilon-q^\varepsilon)^s &\le C \Bigl( \int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)_+}^2 \Bigr)^\frac{sp}{2} \muleb{2}(A^\varepsilon_i)\\ &\le C'\Bigl( \int_{A^\varepsilon_i} \abs{\nabla q}^2 \Bigr)^\frac{sp}{2} \muleb{2}(A^\varepsilon_i)\\ &\le C'' \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(A^\varepsilon_i)}^{sp} \muleb{2}(A^\varepsilon_i)^{1+\frac{sp}{2}(1-\frac{2}{r})}. \end{split} \] Assume now that \eqref{eqSplitVortices} holds. Combined with \eqref{ineqGradientVortices} and \eqref{ineqVortexEnergy}, this yields \[ \gamma^2 \le C \frac{\muleb{2}(A^\varepsilon_i)}{\varepsilon^2}+C \mathbf{N}orm{\nabla q}_{\mathrm{L}^2(A^\varepsilon_i)}. \] Since $\mathbf{N}orm{\nabla q}_{\mathrm{L}^2(A^\varepsilon_i)} \to 0$ as $\varepsilon \to 0$, one must have $\muleb{2}(A^\varepsilon) \ge c \varepsilon^2$. The isodiametric inequality then yields \eqref{ineqLowerBoundDiam}. Turning back to \eqref{ineqCapacity}, and using Proposition~\ref{propositionBoundDiameter}, we obtain \[ \log C\Bigl(1+\frac{\dist(A_i^\varepsilon, \partial \Omega)}{\varepsilon}\Bigr) \ge \log \frac{1}{\varepsilon}+O(1), \] from which \eqref{ineqLowerBoundDistance} follows. Testing $(\mathcal{P}^\eps)$ against $(v^\varepsilon-q^\varepsilon)_+ \chi_{A_\varepsilon^i}$, applying the Gagliardo--Nirenberg inequality and using then \eqref{ineqVortexEnergy}, we have \[ \int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 \leq C(\int_{A^\varepsilon_i} \omega_\varepsilon+\mathbf{N}orm{\nabla q^\varepsilon}_{L^2(A^\varepsilon_i)})\Bigl(\int_{A^\varepsilon_i} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2\Bigr)^{\frac{1}{2}} \le C'\int_{A^\varepsilon_i} \omega_\varepsilon, \] (cf.\ the proof of Proposition~\ref{propositionEstimatesueps}) and the inequality \eqref{ineqLowerBoundVortex} follows. \end{proof} In view of Lemma~\ref{lemmaVortexSplit}, we can split the vortices in two classes: the vanishing vortices \begin{align} \label{eqDefVeps} V^\varepsilon&=\bigcup \Bigl\{A_i^\varepsilon \: :\: \int_{A_i^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 \le \gamma^2\Bigr\}, \\ \intertext{and the essential vortices} \label{eqDefEeps} E^\varepsilon&=\bigcup \Bigl\{A_i^\varepsilon \: :\: \int_{A_i^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 > \gamma^2\Bigr\}. \end{align} In view of \eqref{ineqVortexEnergy}, $E^\varepsilon$ contains finitely many connected components. We can thus split $E^\varepsilon=\bigcup_{j=1}^{k^\varepsilon} E^\varepsilon_j$, where $E^\varepsilon_j$ are nonempty open sets which are not necessarily connected such that, up to a subsequence, \begin{equation} \label{eqDistEepsi} \frac{\dist(E^\varepsilon_i, E^\varepsilon_j)}{\varepsilon} \to \infty \end{equation} as $\varepsilon \to 0$, and \begin{equation} \label{ineqDiamEepsi} \Tilde{\rho}= \limsup_{\varepsilon \to 0} \frac{\diam (E^\varepsilon_i)}{\varepsilon} < \infty. \end{equation} By definition of $E^\varepsilon$ and by \eqref{ineqVortexEnergy}, $k^\varepsilon$ is bounded as $\varepsilon \to 0$. Finally, \begin{equation} \label{eqDistbord} \liminf_{\varepsilon \to 0} \dist(E^\varepsilon_i, \partial \Omega)>0. \end{equation} We set \begin{align*} \omega^\varepsilon_v&=\omega^\varepsilon \charfun{V^\varepsilon}, & \omega^\varepsilon_i&=\omega^\varepsilon \charfun{E^\varepsilon_i}, & \kappa^\varepsilon_i&=\int_{\Omega} \omega^\varepsilon_i. \end{align*} By \eqref{ineqTotalVorticity}, we have \begin{equation} \label{ineqSumVortices} \sum_{i=1}^{k^\varepsilon} \kappa^\varepsilon_i \le \kappa+O\bigl(\logeps^{-1}\bigr). \end{equation} \begin{lemma} \label{lemmaVanishingVorticity} For every $s \ge 1$, we have \[ \mathbf{N}orm{\omega^\varepsilon_v}_{\mathrm{L}^s} = o\bigl(\varepsilon^{p(1-\frac{2}{r})-2(1-\frac{1}{s})}\bigr). \] In particular, if $\frac{1}{s} \ge 1-p(\frac{1}{2}-\frac{1}{r})$, then $\omega^\varepsilon_v \to 0$ in $\mathrm{L}^s(\Omega)$. \end{lemma} \begin{proof} Set \[ I_v^\varepsilon=\Bigl\{ i \in I^\varepsilon\: :\: \int_{A_i^\varepsilon} \abs{\nabla (v^\varepsilon-q^\varepsilon)}^2 \le \gamma^2\Bigr\} \] We have, by Lemma~\ref{lemmaVortexSplit} and by \eqref{ineqVorticityAreaStrong}, \[ \begin{split} \int_{\Omega} \abs {\omega^\varepsilon_v}^s &=\sum_{i \in I^\varepsilon_v}\int_{A^\varepsilon_i} \abs{\omega^\varepsilon_v}^s \\ &\le C\frac{1}{\varepsilon^{2s}} \sum_{i \in I^\varepsilon_v} \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(A^\varepsilon_i)}^{sp} \muleb{2}(A^\varepsilon_i)^{1+\frac{sp}{2}(1-\frac{2}{r})}\\ &\le C\muleb{2}(V^\varepsilon) \max_{i \in I^\varepsilon_v} \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(A^\varepsilon_i)}^{sp} \frac{\muleb{2}(A^\varepsilon_i)^{1+sp(\frac{1}{2}-\frac{1}{r})}}{\varepsilon^{2s}}\\ &\le C' \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(V^\varepsilon)}^{sp} \varepsilon^{sp(1-\frac{2}{r})-2(s-1)}. \qedhere \end{split} \] \end{proof} \begin{lemma} \label{lemmaNonVanisingVortex} For $\varepsilon > 0$ sufficiently small, $k_\varepsilon \ge 1$. \end{lemma} \begin{proof} Assume by contradiction that there is a sequence $(\varepsilon_n)$ such that $\varepsilon_n \to 0$ and $k_{\varepsilon_n} =0$. Take $s > 1$ such that $\frac{1}{s} \ge 1-p(\frac{1}{2}-\frac{1}{r})$. Since $\omega_{\varepsilon_n}=\omega_{\varepsilon_n}^v \to 0$ in $\mathrm{L}^s(\Omega)$ for some $s > 1$ by Lemma~\ref{lemmaVanishingVorticity}; by classical estimates, \cite{GilbargTrudinger2001}*{Theorem 8.15} $v_{\varepsilon_n} \to 0$ in $L^\infty(\Omega)$. Therefore, when $n$ is large enough, one would have $\omega_{\varepsilon_n}=0$ and thus $v_{\varepsilon_n} = 0$. \end{proof} \subsubsection{Step 3: Small scale asymptotics} We define \[ x^\varepsilon_i=\frac{1}{\kappa^\varepsilon_i}\int_{\Omega}\omega^\varepsilon_i(x)x\, dx. \] By \eqref{eqDistEepsi} and \eqref{eqDistbord}, $x^\varepsilon_i \in \Omega$ and $x^\varepsilon_i\ne x^\varepsilon_j$ when $i \ne j$ and $\varepsilon$ is small. We also define \[ v^\varepsilon_i(y)=v^\varepsilon(x^\varepsilon_i+\varepsilon y)-q^\varepsilon(x^\varepsilon_i), \] and \[ q^\varepsilon_i(y)=q(x^\varepsilon_i+\varepsilon y)-q(x^\varepsilon_i). \] By \eqref{eqDistbord}, for every $R>0$, $v^\varepsilon_i$ is well-defined in $B(0, R)$ when $\varepsilon$ is sufficiently small, and it satisfies there the equation \begin{equation} \label{eqLimit} -\Delta v^\varepsilon_i=f(v^\varepsilon_i-q^\varepsilon_i). \end{equation} \begin{lemma} \label{lemmaSmallScaleLocalEstimates} For every $R > 0$ and $s\ge 1$, there exist $\varepsilon(R)>0$ and $C>0$ such that for $0<\varepsilon\leq \varepsilon(R)$ we have \begin{equation} \label{ineqRenormEstimate} \mathbf{N}orm{f(v^\varepsilon_i-q^\varepsilon_i)}_{\mathrm{L}^s(B(0, R))}\le C. \end{equation} Moreover, for $2\Tilde{\rho} < \abs{y} < R$, we have \begin{equation} \label{ineqvepsiDecay} \Bigl\lvert v^\varepsilon_i(y)-\frac{\kappa^\varepsilon_i}{2\pi}\log \frac{1}{\varepsilon\abs{y}}+q^\varepsilon(x^\varepsilon_i)-\kappa^\varepsilon_i H(x^\varepsilon_i, x^\varepsilon_i) -\sum_{j \ne i} \kappa_j^\varepsilon G(x^\varepsilon_i, x^\varepsilon_j)\Bigr\rvert \le \frac{\kappa}{2\pi} \log \frac{\abs{y}}{\abs{y}-\Tilde{\rho}}+o(1), \end{equation} and \begin{equation} \label{ineqNablavepsiDecay} \Bigl\lvert \nabla v^\varepsilon_i(y)-\frac{\kappa}{2\pi} \frac{y}{\abs{y}^2}\Bigr\rvert \le \frac{\Bar{C}}{\abs{y}^3}+o(1). \end{equation} as $\varepsilon \to 0$, where $\Bar{C}$ does not depend on $R$. \end{lemma} \begin{proof} Consider $D^{\varepsilon, R}_i=\bigcup \bigl\{A^\varepsilon_j \: :\: A^\varepsilon_j \cap B(x^\varepsilon_i, \varepsilon R)\ne \emptyset \bigr\}$. By \eqref{ineqVorticityDiameter}, $\muleb{2}(D_i^{\varepsilon, R})=O(\varepsilon^2)$ as $\varepsilon \to 0$, so that one obtains, by Sobolev's inequality, \begin{multline*} \int_{B(0, R)} f(v^\varepsilon_i-q^\varepsilon_i)^s \le \frac{1}{\varepsilon^2}\int_{D_i^{\varepsilon, R}} f(v^\varepsilon-q^\varepsilon)^s \\ \le C\frac{1}{\varepsilon^2} \mathbf{N}orm{\nabla(v^\varepsilon-q^\varepsilon)_+}_{\mathrm{L}^2(A^\varepsilon)}^{sp} \muleb{2}(D_i^{\varepsilon, R}) = O(1), \end{multline*} which proves \eqref{ineqRenormEstimate}. We have \begin{equation} \label{eqvepsiGomega} v^\varepsilon_i(y)=\int_{\Omega} G(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon(z)\, dz-q^\varepsilon(x^\varepsilon_i). \end{equation} We first prove \eqref{ineqvepsiDecay}. By a classical estimate \cite{GilbargTrudinger2001}*{Theorem 8.15}, \begin{equation} \label{ineqGomegav} \Bigl\lvert\int_{\Omega} G(x, z) \omega^\varepsilon_v(z)\, dz\Bigr\rvert \le C \mathbf{N}orm{\omega^\varepsilon_v}_{\mathrm{L}^s}. \end{equation} Since by Lemma~\ref{lemmaVanishingVorticity}, $\omega^\varepsilon_v \to 0$ in $\mathrm{L}^s(\Omega)$ for some $s > 1$, we have \[ \int_{\Omega} G(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon_v(z)\, dz \to 0 \] uniformly in $y$. We also have, since $\diam E^\varepsilon_j=O(\varepsilon)$, $\abs{x^\varepsilon_i-x^\varepsilon_j}/\varepsilon \to \infty$, for $j \ne i$, and $\abs{y} \le R$, \[ \int_{\Omega} G(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon_j(z)\, dz =\kappa_j^\varepsilon G(x^\varepsilon_i, x^\varepsilon_j)+o(1), \] and \[ \int_{\Omega} H(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon_i(z)\, dz =\kappa^\varepsilon_i H(x^\varepsilon_i, x^\varepsilon_i)+O(\varepsilon). \] Finally, we have \[ \begin{split} \int_{\Omega} \frac{1}{2\pi}\log \frac{1}{\abs{x^\varepsilon_i+\varepsilon y-z}} \omega^\varepsilon_i(z)\, dz &=\int_{E^\varepsilon_i} \frac{1}{2\pi} \log \frac{1}{\abs{x^\varepsilon_i+\varepsilon y-z}} \omega^\varepsilon_i(z)\, dz\\ &=\frac{\kappa^\varepsilon_i}{2\pi}\log \frac{1}{\varepsilon \abs{y}}+\frac{1}{2\pi} \int_{E^\varepsilon_i} \log \frac{\varepsilon \abs{y}}{\abs{x^\varepsilon_i+\varepsilon y-z}} \omega^\varepsilon_i(z)\, dz. \end{split} \] In view of \eqref{ineqDiamEepsi}, $\abs{x^\varepsilon_i-z} \le (1+o(1))\Tilde{\rho}\varepsilon$ when $z\in {\rm supp}(\omega^\varepsilon_i)$ so that for sufficiently small $\varepsilon$ \[ \left\lvert\int_{E^\varepsilon_i} \log \frac{\abs{\varepsilon y}}{\abs{\varepsilon y+x^\varepsilon_i-z}} \omega^\varepsilon_i(z)\, dz\right\rvert\le \kappa^\varepsilon_i \log\frac{\abs{y}}{\abs{y}-\Tilde{\rho}} + o(1). \] We now prove \eqref{ineqNablavepsiDecay}. By Lemma~\ref{lemmaVanishingVorticity}, $\varepsilon \omega^\varepsilon_v \to 0$ in $\mathrm{L}^s(\Omega)$ for $\frac{1}{s} \ge \frac{1}{2}-p(\frac{1}{2}-\frac{1}{r})$. Choosing $s > 2$, by \eqref{eqvepsiGomega} and classical elliptic estimates, one obtains that \[ \int_{\Omega} \varepsilon G(x, z) \omega^\varepsilon_v(z)\, dz \to 0 \] as a function of $x$ in $\mathrm{W}^{2, s}_{\mathrm{loc}}(\Omega)$ and thus in $C^1_{\mathrm{loc}}(\Omega)$. Therefore, \[ \int_{\Omega} \varepsilon \nabla G(x_i^\varepsilon+\varepsilon y, z) \omega^\varepsilon_v(z)\, dz \to 0 \] uniformly in $y$ on compact subsets. One also has \[ \int_{\Omega} \varepsilon \nabla G(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon_j(z)\, dz =\varepsilon \kappa_j^\varepsilon \nabla G(x^\varepsilon_i, x^\varepsilon_j)+o(1) \] and \[ \int_{\Omega} \varepsilon \nabla H(x^\varepsilon_i+\varepsilon y, z) \omega^\varepsilon_j(z)\, dz =\varepsilon \kappa^\varepsilon_i \nabla H(x_i, x_j)+O(\varepsilon^2). \] Finally, recall that $\int_{\Omega} \omega^\varepsilon_i=\kappa^\varepsilon_i$ and $\int_{\Omega} (x^\varepsilon_i-z)\omega^\varepsilon_i(z)\, dz=0$, so that \begin{multline*} \int_{\Omega} \varepsilon \frac{x^\varepsilon_i+\varepsilon y -z}{\abs{x^\varepsilon_i+\varepsilon y-z}^2} \omega^\varepsilon_i(z)\, dz-\kappa^\varepsilon_i\frac{y}{\abs{y}^2}= \\ \varepsilon \int_{E^\varepsilon_i} \Bigl(\frac{x^\varepsilon_i+\varepsilon y -z}{\abs{x^\varepsilon_i+\varepsilon y-z}^2}- \frac{\varepsilon y}{\abs{\varepsilon y}^2}-L(\varepsilon y) (x^\varepsilon_i-z) \Bigr)\omega^\varepsilon_i(z)\, dz, \end{multline*} where \[ L(a)h=\frac{\abs{a}^2h-2(a \cdot h)a}{\abs{a}^4}. \] On the other hand, for $2 \abs{h} \le \abs{a}$, \[ \Bigl \lvert \frac{a+h}{\abs{a+h}^2}-\frac{a}{\abs{a}^2} - L(a)h \Bigr \rvert \le C \frac{\abs{h}^2}{\abs{a}^3}, \] so that, by \eqref{ineqDiamEepsi}, \begin{multline*} \Bigl\lvert \int_{\Omega} \varepsilon \frac{x^\varepsilon_i+\varepsilon y -z}{\abs{x^\varepsilon_i+\varepsilon y-z}^2} \omega^\varepsilon_i(z) \, dz-\kappa^\varepsilon_i\frac{y}{\abs{y}^2}\Bigr\rvert \\ \le \int_\Omega \varepsilon \frac{\abs{x^\varepsilon_i-z}^2}{\abs{\varepsilon y}^3}\omega_i^\varepsilon (z)\, dz \le C \varepsilon \frac{(\diam E^\varepsilon_i)^2}{{\abs{\varepsilon y}^3}}\le \frac{\Bar{C}}{\abs{y}^3}, \end{multline*} and the lemma is proved. \end{proof} \begin{lemma} \label{lemmaLocalAsymptotics} When $\varepsilon$ is small, we have $k^\varepsilon=1$. Moreover, \[ \kappa^\varepsilon_1=\kappa+\frac{2\pi}{\log \frac{1}{\varepsilon}}\Bigl(q(x^\varepsilon_1)-\kappa H(x^\varepsilon_1, x^\varepsilon_1)-\frac{\kappa}{2\pi} \log \frac{1}{\rho_\kappa} \Bigr)+o(\logeps^{-1}) \] and $v^\varepsilon_1 \to U_\kappa$ in $\mathrm{W}^{3, r}_{\mathrm{loc}}(\mathbf{R}^2)$ as $\varepsilon\to 0$. \end{lemma} \begin{proof} Set \[w^\varepsilon_i(y)= v^\varepsilon_i(y)-\frac{\kappa^\varepsilon_i}{2\pi}\log \frac{1}{\varepsilon}+q^\varepsilon(x^\varepsilon_i)-\kappa^\varepsilon_i H(x^\varepsilon_i, x^\varepsilon_i) -\sum_{j \ne i} \kappa_j^\varepsilon G(x^\varepsilon_i, x^\varepsilon_j), \] so that in particular \[ -\Delta w^\varepsilon_i=f(v^\varepsilon_i-q^\varepsilon_i). \] By \eqref{ineqRenormEstimate}, \eqref{ineqvepsiDecay} and classical elliptic estimates \cite{GilbargTrudinger2001}*{Theorem 9.11}, the sequence $(w^\varepsilon_i)$ is bounded in $\mathrm{W}^{2, s}_{\mathrm{loc}}(\mathbf{R}^2)$ for every $s \ge 1$. By Rellich's compactness theorem, it is compact in $\mathrm{W}^{1, t}_{\mathrm{loc}}(\mathbf{R}^2)$ for every $1 \le t < \infty$, and therefore bounded on compact subsets. On the other hand, by construction, all the $v^\varepsilon_i+q^\varepsilon_i(x^\varepsilon_i)-q^\varepsilon_i$ take positive and negative value at a uniformly bounded distance from the origin, so that there exists a bounded sequence $\check{x}_i^\varepsilon$ such that $v^\varepsilon_i(\check{x}_i^\varepsilon)=q^\varepsilon_i(\check{x}_i^\varepsilon)-q^\varepsilon_i(x_i^\varepsilon)$. Therefore, $v^\varepsilon_i(\check{x}_i^\varepsilon)$ and $w^\varepsilon_i(\check{x}_i^\varepsilon)$ remain bounded and we obtain that for each $i \in \{1, \dotsc, k^\varepsilon\}$ \[ q^\varepsilon(x^\varepsilon_i)-\frac{\kappa^\varepsilon_i}{2\pi}\log \frac{1}{\varepsilon}-\kappa^\varepsilon_i H(x^\varepsilon_i, x^\varepsilon_i) -\sum_{j \ne i} \kappa_j^\varepsilon G(x^\varepsilon_i, x^\varepsilon_j)=O(1). \] This implies that \begin{equation} \label{eqVorticitiesGreen} \frac{\kappa^\varepsilon_i}{2\pi}\log \frac{1}{\varepsilon} + \sum_{\substack{ j \ne i}} \kappa_j^\varepsilon \log \frac{1}{\abs{x^\varepsilon_i-x^\varepsilon_j}} = \frac{\kappa}{2\pi}\log \frac{1}{\varepsilon} +O(1), \end{equation} and, in view of \eqref{ineqSumVortices}, that \[ k_\varepsilon \frac{\kappa}{2\pi} \log \frac{1}{\varepsilon}\ge \sum_{1 \le i, j \le k_\varepsilon } \frac{\kappa^\varepsilon_i }{2\pi}\log \frac{1}{\varepsilon} +O(1)=k_\varepsilon \frac{\kappa}{2\pi}\log \frac{1}{\varepsilon}+\sum_{\substack{1 \le i, j \le k^\varepsilon \\ j \ne i}} \kappa^\varepsilon_j\log \frac{\abs{x^\varepsilon_i-x^\varepsilon_j}}{\varepsilon}+O(1). \] Therefore, \[ \sum_{\substack{1 \le i, j \le k^\varepsilon \\ j \ne i}} \kappa^\varepsilon_j \log \frac{\abs{x^\varepsilon_i-x^\varepsilon_j}}{\varepsilon} \le O(1), \] and since $\abs{x^\varepsilon_i-x^\varepsilon_j}/\varepsilon \to \infty $ as $\varepsilon \to 0$, we deduce by \eqref{ineqLowerBoundVortex} that $k^\varepsilon\le 1$ for $\varepsilon$ sufficiently small. By Lemma~\ref{lemmaNonVanisingVortex}, $k^\varepsilon=1$. Going back to \eqref{eqVorticitiesGreen}, we get \[ \kappa^\varepsilon_1=\kappa+O\bigl(\logeps^{-1}\bigr). \] Since $v_1^\varepsilon-q^\varepsilon_1$ is compact in $\mathrm{W}^{1, r}_{\mathrm{loc}}(\mathbf{R}^2)$ and $f \in C^1(\mathbf{R})$, the sequence $f(v_1^\varepsilon-q^\varepsilon_1)$ is compact in $\mathrm{W}^{1, r}_{\mathrm{loc}}(\mathbf{R}^2)$. In view of \eqref{eqLimit}, $v^\varepsilon_1$ is compact in $\mathrm{W}^{3, r}_{\mathrm{loc}}$. Let $v$ be one of its accumulation points. It satisfies \[ -\Delta v=f(v) \] and \[ \int_{\mathbf{R}^2} f(v)=\kappa. \] Moreover, letting $\varepsilon$ go to zero, by \eqref{ineqvepsiDecay} we obtain \[ v(y)=\frac{\kappa}{2\pi} \log \frac{\Tilde{\rho}}{\abs{y}}+O\Bigl(\log\bigl(1+\frac{1}{\abs{y}} \bigr)\Bigr) \] for some $\Tilde{\rho} \in \mathbf{R}$, and \[ \nabla v(y)=\frac{\kappa}{2\pi}\frac{y}{\abs{y}^2}+O\Bigl(\frac{1}{\abs{y}^3}\Bigr). \] By a symmetry result of L.\thinspace A.\thinspace Caffarelli and A.\thinspace Friedman \cite[Theorem 1]{CaffarelliFriedman1980} (see also \cite[Theorem 4.2]{Fraenkel2000}), $v$ is radial, and therefore \[ v(y)=\frac{\kappa}{2\pi}\log \frac{\rho_\kappa}{\abs{y}} \] when $\abs{y} \ge \rho_\kappa$. Hence, $v=U_\kappa$. In view of \eqref{ineqvepsiDecay}, this yields \[ \Bigl\lvert \frac{\kappa}{2\pi}\log \frac{\rho_\kappa}{\abs{y}}+q^\varepsilon(x^\varepsilon_1)-\frac{\kappa^\varepsilon_1}{2\pi}\log \frac{1}{\varepsilon\abs{y}}-\kappa^\varepsilon_1 H(x^\varepsilon_1, x^\varepsilon_1) \Bigr\rvert \le \kappa \log \frac{\abs{y}}{\abs{y}-R}+o(1). \] First fixing $y$, this implies that \[ \frac{\kappa-\kappa^\varepsilon_1}{2\pi} \log \frac{1}{\varepsilon}=O(1), \] and next we deduce that for every $2\Tilde{\rho}<\abs{y}<R$, \[ \Bigl\lvert \frac{\kappa}{2\pi}\log \frac{\rho_\kappa}{\varepsilon}+q(x^\varepsilon_1)-\frac{\kappa^\varepsilon_1}{2\pi}\log \frac{1}{\varepsilon}-\kappa^\varepsilon_1 H(x^\varepsilon_1, x^\varepsilon_1) \Bigr\rvert \le \kappa \log \frac{\abs{y}}{\abs{y}-\Tilde{\rho}}+o(1), \] as $\varepsilon \to 0$. We obtain the required asymptotic development of $\kappa^\varepsilon_1$ by letting $R\to +\infty$ and choosing sufficiently large $\abs{y}$. \end{proof} \subsubsection{Step 4: Global asymptotics} We are now going to prove that $v^\varepsilon$ is well approximated by \[ \Tilde{v}^\varepsilon=U_{\kappa^\varepsilon_1}\Bigl(\frac{\cdot-x^\varepsilon_1}{\varepsilon}\Bigr)+\kappa^\varepsilon_1\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon \rho_{\kappa_1^\varepsilon}}+H(x^\varepsilon_1, \cdot)\Bigr). \] \begin{proposition} \label{propositionAsymptoticsW21} We have \[ v^\varepsilon=\Tilde{v}^\varepsilon+o(1) \] in $\mathrm{W}^{2, 1}_{\mathrm{loc}}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$. \end{proposition} \begin{proof} Choose $r>\Tilde{\rho}$ so that $E^\varepsilon_1 \subset B(x^\varepsilon_1, \varepsilon r)$ when $\varepsilon$ is small. By Lemma~\ref{lemmaLocalAsymptotics}, and the invariance of the $\dot{\mathrm{W}}^{2, 1}$ semi-norm by scaling, we have \[ \int_{B(x^\varepsilon_1, 2\varepsilon r)} \abs{D^2 v^\varepsilon-D^2 \Tilde{v}^\varepsilon} \to 0 \] as $\varepsilon \to 0$. Define \begin{align*} \Tilde{\omega}^\varepsilon_1(x)&=\frac{1}{\varepsilon^2}f(\Tilde{v}^\varepsilon-q^\varepsilon), \\ w_v^\varepsilon(x)&=\int_{\Omega} G(x, y) \omega^\varepsilon_v(y)\, dy, \\ w_r^\varepsilon(x)&=\int_{\Omega} H(x, y) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy, \\ w_s^\varepsilon(x)&=\int_{\Omega} \Gamma(x-y) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy, \end{align*} where $\Gamma (x)=\frac{1}{2\pi} \log \frac{1}{\abs{x}}$, so that $v^\varepsilon - \Tilde{v}^ \varepsilon=w_v^\varepsilon+w_r^\varepsilon+w_s^\varepsilon$. Since by Lemma~\ref{lemmaVanishingVorticity}, $\omega_v \to 0$ in $\mathrm{L}^s(\Omega)$ for some $s > 1$, we have, by elliptic estimates, $w^\varepsilon_v \to 0$ in $\mathrm{W}^{2, s}_{\mathrm{loc}}(\Omega)$. Next, since by \eqref{ineqDiamEepsi} $x^\varepsilon_1$ stays away from $\partial \Omega$ and $\omega^\varepsilon_1-\Tilde{\omega}^\varepsilon_1 \to 0$ in $\mathrm{L}^1(\Omega)$ by Lemma~\ref{lemmaLocalAsymptotics}, we have $w_r^\varepsilon \to 0$ in $C^\infty_{\mathrm{loc}}(\Omega)$. Finally, we have \[ D^2 w_s^\varepsilon (x)=\int_{\Omega} D^2\Gamma(x-y) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy. \] Since $\int_{\Omega} \omega^\varepsilon_1=\int_{\Omega} \Tilde{\omega}^\varepsilon_1=\kappa^\varepsilon_1$, one also has \[ D^2 w_s^\varepsilon (x)=\int_{B(x^\varepsilon_1, \varepsilon r)} \bigl(D^2\Gamma(x-y)-D^2\Gamma(x-x^\varepsilon_1)\bigr) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy. \] For every $y \in B(x^\varepsilon_1, \varepsilon r)$ and $x \in \Omega \setminus B(x^\varepsilon_1, \varepsilon 2r)$ \[ \abs{D^2\Gamma(x-y)-D^2\Gamma(x-x^\varepsilon_1)} \le C \frac{\abs{y-x^\varepsilon_1}}{\abs{x-x^\varepsilon_1}^3}, \] so that \[ \abs{D^2 w_s^\varepsilon (x)} \le \frac{C \varepsilon}{\abs{x^\varepsilon_1-x}^3}\mathbf{N}orm{\omega^\varepsilon_1-\Tilde{\omega}^\varepsilon_1}_{\mathrm{L}^1}. \] Integrating the previous inequality we conclude \begin{multline*} \int_{\Omega \setminus B(x^\varepsilon_1, \varepsilon 2r)} \abs{D^2 w_s^\varepsilon (x)} \le C\varepsilon \mathbf{N}orm{\omega^\varepsilon_1-\Tilde{\omega}^\varepsilon_1}_{\mathrm{L}^1}\int_{\mathbf{R}^2 \setminus B(x^\varepsilon_1, \varepsilon 2r)}\frac{1}{\abs{x^\varepsilon_1-x}^3}\, dx \\ =C\mathbf{N}orm{\omega^\varepsilon_1-\Tilde{\omega}^\varepsilon_1}_{\mathrm{L}^1}\frac{2\pi \varepsilon}{\varepsilon R}=o(1). \end{multline*} The $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$ convergence implies the $\mathrm{W}^{1, 2}_\mathrm{loc}(\Omega)$ and the $\mathrm{L}^\infty_{\mathrm{loc}}(\Omega)$ convergences. One needs then to prove the convergence in a neighbourhood of the boundary. Consider $U \subset V \subset \Omega$ open bounded sets such that $\partial \Omega \subset \Bar{U}$, $\Bar{U} \subset V$ and $\supp \omega_\varepsilon \cap V = \emptyset$. One has \[ \left\{ \begin{aligned} -\Delta (v_\varepsilon-\Tilde{v}_\varepsilon)&=\omega^\varepsilon_v && \text{in $U$},\\ v_\varepsilon-\Tilde{v}_\varepsilon&=0 && \text{on $\partial \Omega$}. \end{aligned} \right. \] Since $v_\varepsilon-\Tilde{v}_\varepsilon \to 0$ in $\mathrm{W}^{1,2}(V \setminus U)$ and in $\mathrm{L}^\infty (V \setminus U)$ and $\omega^\varepsilon_v \to 0$ in $\mathrm{L}^s(\Omega)$ for some $s > 1$, one obtains by classical regularity estimates that $v_\varepsilon-\Tilde{v}_\varepsilon \to 0$ in $\mathrm{W}^{1,2}(U)$ and in $\mathrm{L}^\infty(U)$. \end{proof} \begin{corollary} \label{corollaryAsymptotic} When $\varepsilon$ is small enough, $A^\varepsilon$ is connected, $x^\varepsilon_1=x^\varepsilon$, $\kappa^\varepsilon_1=\kappa^\varepsilon$, $\partial (A^\varepsilon_1-x^\varepsilon_1)/\varepsilon$ tends to $\partial B(0, \rho_\kappa)$ as a $C^2$ manifold. In particular, $-\Delta v^\varepsilon=0$ in $\Omega \setminus B(x^\varepsilon_1, 2\varepsilon \rho_\kappa)$ and \[ \omega^\varepsilon=\Tilde{\omega}^\varepsilon+o(1) \] in $\mathrm{L}^1(\Omega)$. \end{corollary} \begin{proof} Assume that $y\in A^\varepsilon\setminus B(x_1^\varepsilon, \varepsilon \rho_{\kappa^\varepsilon})$. We have \begin{equation} \label{ineqAepsBall} q(y)+ \frac{\kappa}{2\pi} \log \frac{1}{\varepsilon} < v^\varepsilon(y) \le \frac{\kappa_1^\varepsilon}{2\pi} \log \frac{1}{\abs{y-x^\varepsilon_1}}+o(1), \end{equation} uniformly in $y$, so that $\abs{y-x^\varepsilon_1}=O(\varepsilon)$. One obtains then in view of Proposition~\ref{propositionAsymptoticsW21} that $(A^\varepsilon_1-x^\varepsilon_1)/\varepsilon$ is connected when $\varepsilon$ is small and the required convergence of the boundary. \end{proof} \begin{corollary} \label{corEnergy} We have \[ \begin{split} \mathcal{E}^\varepsilon(v^\varepsilon) =\frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}-\mathcal{W}(x^\varepsilon)+\mathcal{C}+o(1). \end{split} \] \end{corollary} \begin{proof} First we have in view of Proposition~\ref{propositionAsymptoticsW21} and Corollary~\ref{corollaryAsymptotic}, \[ \begin{split} \int_{\Omega} \abs{\nabla v^\varepsilon}^2 &=\int_{\Omega} v^\varepsilon \omega^\varepsilon\\ &=\int_{\Omega} \Tilde{v}^\varepsilon \omega^\varepsilon+o(1). \end{split} \] Since $\mathbf{N}orm{\Tilde{v}^\varepsilon-q^\varepsilon}_{\mathrm{L}^\infty}$ remains bounded as $\varepsilon \to 0$, we obtain, by Proposition~\ref{propositionAsymptoticsW21} \[ \int_{\Omega} \abs{\nabla v^\varepsilon}^2=\frac{1}{\varepsilon^2}\int_{\Omega} \Tilde{v}^\varepsilon f\bigl(\Tilde{v}^\varepsilon-q^\varepsilon(x^\varepsilon)\bigr)+o(1). \] Similarly, by Proposition~\ref{propositionAsymptoticsW21}, \[ \frac{1}{\varepsilon^2}\int_{\Omega} F(v^\varepsilon-q^\varepsilon)=\frac{1}{\varepsilon^2}\int_{\Omega} F\bigl(\Tilde{v}^\varepsilon-q^\varepsilon(x^\varepsilon)\bigr)+o(1). \] It suffices then to compute $\mathcal{E}^\varepsilon(\Tilde{v}^\varepsilon)$ as in the proof of Lemma~\ref{lemmaEnergyHatu}. \end{proof} \subsubsection{Conclusion}We are now in position to present the \begin{proof}[Proof of Proposition~\ref{prop:1mai} completed] It is a direct consequence of Lemma~\ref{lemmaLocalAsymptotics}, Proposition~\ref{propositionAsymptoticsW21}, Corollary~\ref{corollaryAsymptotic} and Corollary~\ref{corEnergy}. \end{proof} \noindent and the \begin{proof}[Proof of Theorem~\ref{thm:K1}] It is a direct consequence of the upper estimate of Corollary~\ref{cor:upper} and the asymptotic properties obtained in Proposition~\ref{prop:1mai}. \end{proof} \section{Single vortices in multiply connected domains} \label{sectionmultiply} In this section we assume that $\Omega \subset \mathbf{R}^2$ is a bounded smooth multiply-connected domain; it can be written as \[ \Omega = \Omega_0 \setminus \bigcup_{h=1}^m \Omega_h, \] where $\Omega_0, \dotsc, \Omega_m$ are bounded simply-connected domains with $\Bar{\Omega}_h \subset \Omega$ for every $h \in \{1, \dotsc, m\}$. In place of problem \eqref{problemPeps}, we consider the problem of finding $u$ and $\lambda^\varepsilon_1, \dotsc, \lambda^\varepsilon_m$ such that \begin{equation} \label{problemPepsstar} \left\{ \begin{aligned} -\Delta u^\varepsilon &=\frac{1}{\varepsilon^2} f(u^\varepsilon - q^\varepsilon ) & &\text{in $\Omega$, }\\ u^\varepsilon &= 0 & &\text{on $\partial \Omega_0$},\\ u^\varepsilon &= \lambda^\varepsilon_h & &\text{on $\partial \Omega_h$},\\ \int_{\partial \Omega_h} \frac{\partial u}{\partial n}&=0 & & \text{for $h \in \{1, \dotsc, m\}$}. \end{aligned} \right. \tag{\protect{$\mathcal{P}^\varepsilon_*$}} \end{equation} The natural space to deal with this problem is the space of functions that are constant on the complement of $\Omega$: \[ H^1_*(\Omega)=\Bigl\{u \in H^1(\Omega) \: :\: \nabla u=0 \text{ in $\bigcup_{h=1}^m \Omega_h$}\Bigr\}. \] It is standard to show that solutions of \eqref{problemPepsstar} are critical points of the functional $\mathcal{E}^\varepsilon$ defined on $H^1_*(\Omega)$ by \eqref{energyFunctional}. We consider least energy solutions obtained by minimization of the functional on the Nehari manifold. In order to state our result we also need the corresponding (appropriate) Green functions. Following C.\thinspace C.\thinspace Lin \cites{Lin1941, Lin1943}, we define $G_*$ as the solution of \[ \left\{ \begin{aligned} -\Delta G(\cdot, y)&=\delta_y & & \text{in $\Omega$,}\\ G(\cdot, y)&=0 & & \text{on $\partial \Omega_0$},\\ G&=\lambda_h & & \text{on $\partial \Omega_h$},\\ \int_{\partial \Omega_h} \frac{\partial G}{\partial n}&=0 & & \text{for $h \in \{1, \dotsc, m\}$}.\\ \end{aligned}\right. \] Its regular part $H_*$ is defined by \[ H_*(x,y)=G_*(x,y)-\frac{1}{2\pi} \log \frac{1}{\abs{x-y}}. \] P.\thinspace Koebe \cite{Koebe1918}*{\S 6} (see also \cite{Lin1943}*{\S 9}), defined $G_*$ in terms of the Green function for the Dirichlet problem $G$ and the unique solutions $Z_k$ of \[ \left\{ \begin{aligned} -\Delta Z_k&=0 & & \text{in $\Omega$,}\\ Z_k&=0 & & \text{on $\partial \Omega_0$},\\ Z_k&=\delta_{kh} & & \text{on $\partial \Omega_h$ with $h \in \{1, \dotsc, m\}$.}\\ \end{aligned}\right. \] Since the $Z_k$ are linearly independent, the matrix $(\omega_{kh})_{1 \le k, h \le n}$ defined by \[ \omega_{kh}=\int_{\Omega} \nabla Z_k \cdot \nabla Z_h. \] is invertible; let $(\omega^{kh})_{1 \le k, h \le n}$ denote its inverse. We have \begin{equation} \label{eqGreenRelationship} G_*(x,y)=G(x,y)+\sum_{k, h=1}^m Z_k(x)\omega^{kh}Z_h(y). \end{equation} The Kirchhoff--Routh function in this context is defined by \[ \mathcal{W}_*(x)=\frac{\kappa^2}{2}H_*(x, x)-\kappa q(x), \] and the various quantities $A^\varepsilon, \omega^\varepsilon, \kappa^\varepsilon, x^\varepsilon, \rho^\varepsilon$ are still defined by \eqref{defiq}. Theorem~\ref{thm:K1} generalizes then to \begin{theorem}\label{thm:K1m} As $\varepsilon \to 0$, we have \[ u^\varepsilon=U_{\kappa^\varepsilon} \Big(\frac{\cdot-x^\varepsilon}{\varepsilon}\Big)+\kappa^\varepsilon\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon \rho^\varepsilon}+ H_*(x^\varepsilon, \cdot)\Bigr)+o(1), \] \text{in $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$}, where \[ \kappa^\varepsilon=\kappa+\frac{2\pi}{\log \frac{1}{\varepsilon}}\Bigl(q(x^\varepsilon)-\kappa H(x^\varepsilon, x^\varepsilon) -\frac{\kappa}{2\pi} \log \frac{1}{\rho_\kappa} \Bigr)+o(\logeps^{-1}), \] and \[ \mathcal{W}_*(x^\varepsilon) \to \sup_{x \in \Omega} \mathcal{W}_*(x). \] One also has \[ B(x^\varepsilon, \Bar{r}^\varepsilon) \subset A^\varepsilon \subset B(x^\varepsilon, \mathring{r}^\varepsilon), \] with $\Bar{r}^\varepsilon=\varepsilon \rho_\kappa+o(\varepsilon)$ and $\mathring{r}^\varepsilon=\varepsilon \rho_\kappa +o(\varepsilon)$. Finally, \[ \mathcal{E}^\varepsilon (u^\varepsilon)= \frac{\kappa^2}{4\pi}\log \frac{1}{\varepsilon}-\mathcal{W}_*(x^\varepsilon)+\mathcal{C}+o(1). \] \end{theorem} \begin{proof} The proof of Theorem~\ref{thm:K1m} follows almost the same lines as one of Theorem~\ref{thm:K1}, so that we only mention the few adaptations. First, the functions $G$ and $H$ should be replaced by $G_*$ and $H_*$. In view of the regularity of $\Theta_h$ and of \eqref{eqGreenRelationship} this does not bring any trouble in the upper estimate nor the small scale and global asymptotics. Next, the proof of Theorem~\ref{thm:K1} relies on the Dirichlet boundary condition to estimate $\capa(A^\varepsilon, \Omega)$ in \eqref{ineqCapacity}. Here, we define instead \[ w^\varepsilon=\frac{v^\varepsilon-\max_{\partial \Omega} v^\varepsilon}{\min_{\partial A_\varepsilon} q^\varepsilon - \max_{\partial \Omega}v^\varepsilon}. \] For every $h \in \{1, \dotsc, m\}$, let $\Theta_h \in \mathrm{H}^1_*(\Omega)$ be the unique solution of \[ \left\{ \begin{aligned} -\Delta \Theta_h&=0 & & \text{in $\Omega$,}\\ \Theta_h&=0 & & \text{on $\partial \Omega_0$},\\ \Theta_h&=\mu_{kh} & & \text{on $\partial \Omega_h$ and $h \in \{1, \dotsc, m\}$},\\ \int_{\partial \Omega_h} \frac{\partial \Theta_h}{\partial n}&=\delta_{hk} & & \text{for $k \in \{1, \dotsc, m\}$}, \end{aligned}\right. \] where $\mu_{kh}$ are unknown constants that are part of the problem\footnote{ This solution can be found by minimizing the functional $u \mapsto \frac{1}{2}\int_{\Omega} \abs{\nabla u}^2+u\vert_{\partial \Omega_k}$ over $\mathrm{H}^1_*(\Omega)$. (A similar problem appears in \cite[Chapter I, (3)]{BBH})}. By construction of $\Theta_k$, one has \[ v^\varepsilon \vert_{\Omega_h}=\int_{\partial \Omega_h} v^\varepsilon\frac{\partial \Theta_h}{\partial n} =\int_{\Omega} \nabla v^\varepsilon \cdot \nabla \Theta_h=\int_{\Omega} \omega_\varepsilon \Theta_h, \] and hence, in view of \eqref{ineqTotalVorticity}, \[ \mathbf{N}orm{v^\varepsilon}_{\mathrm{L}^\infty(\partial \Omega)} \le \max_{h \in \{1, \dotsc, m\}} \mathbf{N}orm{\Theta_h}_{\mathrm{L}^\infty(\Omega)}\bigl(\kappa+O(\logeps^{-1})\bigr), \] Therefore, \[ \frac{2\pi}{\capa (A^\varepsilon, \Omega)} \ge \frac{2\pi}{\int_\Omega \abs{\nabla w^\varepsilon}^2} \ge \log \frac{1}{\varepsilon}+O(1), \] and one can continue as in the proof of Lemma~\ref{lemmaAreaDiameter}. \end{proof} \section{Single vortices in unbounded domains} \label{sectUnbounded} In this section, we assume that $\Omega \subset \mathbf{R}^2$ is an unbounded simply-connected domain whose boundary is bounded in one direction; to fix the ideas, \[ ]a_0, +\infty[\times \mathbf{R} \subset \Omega \subset ]a_1, +\infty[ \times \mathbf{R}. \] Our goal is to carry out an analysis similar to that of the previous section. We assume that $q \in \mathrm{W}^{1, 1}_\textrm{loc}(\Omega)$, \[ \sup_{x \in \Omega} \int_{B(x, 1)} \abs{\nabla q}^r < \infty \] for some $r > 2$, and that \[ q(x) \ge W(x_1-a_0)+d, \] for some $W > 0$ and $d > 0$, where $x=(x_1, x_2)$. Since $\partial \Omega$ is bounded in the $x_1$ direction, this is equivalent with requiring that \[ q(x) \ge W \dist(x, \partial \Omega)+d'. \] The natural space for solutions is \[ \mathrm{D}^{1, 2}_0(\Omega)= \{ u \in \mathrm{W}^{1, 1}_{\mathrm{loc}}(\Omega) \: :\: \int_{\Omega} \abs{\nabla u}^2 < \infty\}. \] The Nehari manifold $\mathcal{N}^\varepsilon$ and the infimum value $c^\varepsilon$ are defined as in Proposition~\ref{prop:2.1}. The existence of a minimizer $u^\varepsilon \in \mathcal{N}^\varepsilon$ as in Proposition~\ref{prop:2.1} such that $\mathcal{E}^\varepsilon(u^\varepsilon)=c^\varepsilon$ is no longer direct nor true because of compactness issues. In a first step, we derive upper bounds on $c^\varepsilon$. Next, we perform the a priori asymptotic analysis of solutions of $(\mathcal{P}^\eps)$ satisfying similar upper bounds. Finally, we prove existence results in appropriate cases of $\Omega$ and $q$. \subsection{Upper bound on the energy} \begin{proposition} \label{propUnboundedUpper} We have \[ c^\varepsilon \leq \frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon} - \sup_{x\in \Omega} \mathcal{W}(x) +\mathcal{C} + o(1). \] \end{proposition} \begin{proof} The proof goes as the proof of Corollary~\ref{cor:upper}. The main difference is that $q$ and $H(\Hat{x}, \cdot)$ are not bounded as in the proof of Lemma~\ref{lemmaHatuNehari}. However, since $\lim_{x \to \infty} \frac{1}{2\pi} \log \frac{1}{\abs{x-\Hat{x}}} +H(x, \Hat{x})=0$, one still has, for every $x \in \Omega$, \[ H(\Hat{x}, x) \le \frac{q(x)}{\kappa}+C, \] whence, starting from \eqref{ineqVorticitySetUpperFrac}, one obtains \[ \frac{\dfrac{1}{2\pi} \log \dfrac{1}{\varepsilon}+\dfrac{1}{2\pi} \log \dfrac{\varepsilon}{\abs{x-\Hat{x}}}+ q(x)+C}{\dfrac{\kappa}{2\pi} \log \dfrac{1}{\varepsilon}+\frac{q(x)}{\kappa}} \ge \dfrac{\log \dfrac{1}{\varepsilon}+H(\Hat{x}, \Hat{x})}{\dfrac{\kappa}{2\pi} \log \dfrac{1}{\varepsilon}+q(\Hat{x})+\sigma}. \] Since $q \ge 0$, it follows that \[ \frac{1}{\kappa}+\frac{\dfrac{1}{2\pi} \log \dfrac{\varepsilon}{\abs{x-\Hat{x}}}}{\kappa \log \dfrac{1}{\varepsilon}}\ge \frac{1}{\kappa}+O\bigl(\logeps^{-1}\bigr), \] and it suffices to continue as in Lemmas~\ref{lemmaHatuNehari}~and~\ref{lemmaEnergyHatu}. \end{proof} \subsection{Functional inequalities on the half-plane} In order to perform the asymptotic analysis of the solutions and to study their existence, we first provide some useful functional type inequalities and convergence results on the half-plane $\mathbf{R}^2_+$ that will be used in the next section. \begin{proposition} \label{ineqUnboundedIneqW} We have for $u\in \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$, \[ \muleb{2}\bigl(\{ x \in \mathbf{R}^2_+ \: :\: u(x) \ge Wx_1\}\bigr)\le C \int_{\mathbf{R}^2_+} \abs{\nabla u}^2, \] and, for every $p>0$, \[ \int_{\mathbf{R}^2_+} \bigl(u(x)-Wx_1\bigr)^p_+\, dx \le \frac{C}{W^2} \Bigl( \int_{\mathbf{R}^2_+} \abs{\nabla u}^2 \Bigr)^{1+\frac{p}{2}}. \] \end{proposition} A similar statement is proved by Yang Jianfu \cite[Lemma 4]{Yang1991} with a different proof relying on an isometry between $\mathcal{D}^{1,2}_0(\mathbf{R}^2_+)$ and the space of cylindrically symmetric elements of $\mathcal{D}^{1,2}_0(\mathbf{R}^4)$ \cite[Lemma 1]{Yang1991}. \begin{proof} Define $A_u=\{ x \in \mathbf{R}^2_+ \: :\: u(x) \ge Wx_1\}$. First we have, by the Chebyshev and Hardy inequalities \[ \muleb{2}(A_u ) \le \frac{1}{W^2} \int_{\mathbf{R}^2_+} \frac{\abs{u(x)}^2}{\abs{x_1}^2}\, dx \le \frac{4}{W^2} \int_{\mathbf{R}^2_+} \abs{\nabla u}^2. \] By Sobolev's inequality, it follows \[ \begin{split} \int_{\mathbf{R}^2_+} (u(x)-Wx_1)_+^p\, dx &=\int_{A_u} (u-Wx_1)^p\, dx \\ &\le C \mathbf{N}orm{\nabla(u-Wx_1)}^p_{2} \muleb{2}(A_u) \\ &\le \frac{C'}{W^2} \mathbf{N}orm{\nabla u}_{2}^2(\mathbf{N}orm{\nabla u}_2+W \muleb{2}(A_u)^\frac{1}{2})^p \\ &\le \frac{C''}{W^2} \Bigl( \int_{\mathbf{R}^2_+} \abs{\nabla u}^2\Bigr)^{1+\frac{p}{2}}. \qedhere \end{split} \] \end{proof} As a consequence \begin{lemma} \label{lemmaUnboundedInequalityq} We have for $u\in \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$, \[ \muleb{2}\bigl(\{ x \in \mathbf{R}^2_+ \: :\: u(x) \ge q(x)\}\bigr)\le C \int_{\mathbf{R}^2_+} \abs{\nabla u}^2, \] and for every $p>0$ \[ \int_{\mathbf{R}^2_+} (u-q)^p_+ \le C \Bigl( \int_{\mathbf{R}^2_+} \abs{\nabla u}^2 \Bigr)^{1+\frac{p}{2}}. \] \end{lemma} We also have a compactness theorem \begin{lemma} \label{lemmaUnboundedCompactness} For every $p < \infty$ and $L>0$, the map $\Phi : \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+) \to \mathrm{L}^p(\mathbf{R}_+\times ]-L, L[) : u \mapsto (u-Wx_1)_+$ is completely continuous. \end{lemma} \begin{proof} By Rellich's Theorem, $u \mapsto \Phi(u)\chi_{]0, \lambda[\times ]-L, L[}$ is completely continuous for every $\lambda > 0$. On the other hand, \[ \int_{]\lambda, +\infty[\times ]-L, L[ } \hspace{-2em}(u(x)-Wx_1)_+^p\, dx \le \frac{C}{\lambda}\int_{]\lambda, +\infty[\times ]-L, L[} \hspace{-2em}(u(x)-\tfrac{W}{2}x_1)_+^{p+1}\, dx \le \frac{C}{\lambda} \mathbf{N}orm{\nabla u}_2^{p+3}, \] therefore, on every bounded subset of $\mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$, $\Phi$ is a uniform limit of completely continuous maps. The conclusion follows. \end{proof} \begin{lemma}\label{campeones} Let $(u_n) \subset \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$. If $(u_n)$ is bounded in $\mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$ and \[ \sup_{y\in \mathbf{R}} \int_{\mathbf{R}_+ \times ]y-1, y+1[} (u_n-Wx_1)_+^p \to 0, \] then \[ \int_{\mathbf{R}^+_2} (u_n-Wx_1)_+^s \to 0, \] for every $s>0$. \end{lemma} This kind of result was first obtained by P.-L.\thinspace Lions \cite[Lemma I.1]{Lions1984}. The idea of our proof comes from V.\thinspace Coti Zelati and P.\thinspace Rabinowitz \cite{CotiZelatiRabinowitz1992}. \begin{proof} By the Gagliardo--Nirenberg inequality \cite[p.\thinspace 125]{Nirenberg1959}, \[ \begin{split} \int_{\mathbf{R}_+\times ]y-1, y+1[}(u_n-Wx_1)_+^{p+2} \le C\int_{\mathbf{R}_+\times ]y-1, y+1[}\hspace{-4em}& (u_n-Wx_1)_+^p \\ &\times \int_{\mathbf{R}_+ \times ]y-1, y+1[}\hspace{-4em} (\abs{\nabla (u_n-Wx_1)_+}^2+\abs{(u_n-Wx_1)_+}^2). \end{split} \] Integrating with respect to $y\in \mathbf{R}$, one obtains \[ \begin{split} \int_{\mathbf{R}^2_+}(u_n-Wx_1)_+^{p+2} \le C\biggl(\sup_{y \in \mathbf{R}} \int_{\mathbf{R}_+\times ]y-1, y+1[}\hspace{-4em}& (u_n-Wx_1)_+^p\biggr)\\ &\times\int_{\mathbf{R}_+^2} \bigl(\abs{\nabla (u_n-Wx_1)_+}^2+\abs{(u_n-Wx_1)_+}^2\bigr). \end{split} \] Since by Lemma~\ref{ineqUnboundedIneqW} \[ \int_{\mathbf{R}_+^2} \abs{\nabla (u_n-Wx_1)_+}^2+\abs{(u_n-Wx_1)_+}^2 \le C \bigl(\mathbf{N}orm{\nabla u_n}_{2}^2+\mathbf{N}orm{\nabla u_n}_{2}^4\bigr), \] $(u_n-Wx_1) \to 0$ in $\mathrm{L}^{p+2}(\mathbf{R}^2_+)$. By Lemma~\ref{ineqUnboundedIneqW}, the general case $s \ne p+2$ follows by interpolation. \end{proof} \subsection{Asymptotic behavior of solutions} \label{sectionUnboundedAsymptotics} In this section, we assume that $(v^\varepsilon)$ is a sequence of solutions to $(\mathcal{P}^\eps)$ satisfying \eqref{assumptEnergyUpperbound}. We shall prove \begin{proposition} \label{prop:1maiUnbounded} Proposition~\ref{prop:1mai} holds under the assumptions on $\Omega$ and $q$ of this section. \end{proposition} \subsubsection{Step 1: First quantitative properties of the solutions} We first have the counterpart of Proposition~\ref{propositionEstimatesueps} \begin{proposition} \label{propositionUnboundedEstimatesueps} The estimates \eqref{ineqMuAeps}, \eqref{ineqVortexEnergy}, \eqref{ineqVortexPotential}, \eqref{eq:2etoiles} and \eqref{ineqTotalVorticity} hold for some constant $C$ independent of $\varepsilon$. \end{proposition} \begin{proof} The proof of Proposition~\ref{propositionEstimatesueps} provides the estimates \eqref{ineqVortexEnergy}, \eqref{ineqVortexPotential}, \eqref{eq:2etoiles} and \eqref{ineqTotalVorticity} without any modification. The inequality \eqref{ineqMuAeps} needs a little more work, since its proof in Proposition~\ref{propositionEstimatesueps} relies on the Poincar\'e inequality. In the present setting, we replace it by the Chebyshev inequality and Lemma~\ref{lemmaUnboundedInequalityq} \[ \begin{split} \muleb{2}(\{ x \in \Omega \: :\: v^\varepsilon(x) \ge q(x)+\tfrac{\kappa}{2\pi} \log \tfrac{1}{\varepsilon}\}) &\le \frac{1}{(\frac{\kappa}{2\pi} \log \frac{1}{\varepsilon})^4} \int_{\Omega} (v^\varepsilon-q)_+^4 \\ &\le \frac{C}{\logeps^4} \logeps^3=C \logeps^{-1}. \qedhere \end{split} \] \end{proof} \subsubsection{Step 2: Structure of the vorticity set} As previously, we consider the connected components of $(A^\varepsilon_i)_{i \in I_\varepsilon}$ of $A_\varepsilon$. \begin{lemma} \label{lemmaUnboundedAreaDiameter} If $\varepsilon > 0$ is sufficiently small, we have for every $i \in I^\varepsilon$, \begin{equation} \label{ineqUnboundedVorticityDiameter} \diam(A^\varepsilon_i) \le C \varepsilon \frac{\dist(A^\varepsilon_i, \partial \Omega)}{e^{2W \dist(A^\varepsilon, \partial\Omega)}}. \end{equation} Moreover, if for every $x \in \Omega$, one defines \[ A^\varepsilon_x=\bigcup \Bigl\{ A^\varepsilon_i \: :\: B(x, \tfrac{1}{2}\dist(x, \partial \Omega) +1) \cap A^\varepsilon_i \ne \emptyset \Bigr\}, \] then \[ \muleb{2}(A^\varepsilon_x) \le C \varepsilon^2e^{-\mu \dist(x, \partial \Omega)}. \] \end{lemma} \begin{proof} Let \[ w=\frac{v^\varepsilon}{\min_{\partial A^\varepsilon_i}q^\varepsilon}. \] Proceeding as in \eqref{ineqCapacity}, we obtain, using once more Proposition~\ref{propositionCapacityArea} \[ \frac{2\pi (\tfrac{\kappa}{2\pi}\log \tfrac{1}{\varepsilon}+W\dist(A^\varepsilon_i, \Omega)+d')^2}{\frac{\kappa^2}{2\pi} \log \frac{1}{\varepsilon}}\le \log \biggl(C\bigl(1+\dist(A^\varepsilon_i, \partial \Omega)\bigr)\Bigl(1+\frac{\dist(A^\varepsilon, \partial \Omega)}{\diam A^\varepsilon_i}\Bigr)\biggr). \] Therefore, \[ \frac{1}{\varepsilon}\le C\frac{1+\dist(A^\varepsilon, \partial \Omega)}{e^{2W(\dist(A_\varepsilon, \partial \Omega)-1)}} \Bigl(1+\frac{\dist(A^\varepsilon_i, \partial \Omega)}{\diam A^\varepsilon_i}\Bigr), \] from which \eqref{ineqUnboundedVorticityDiameter} follows. Consider now $A^\varepsilon_x$. By \eqref{ineqUnboundedVorticityDiameter}, $A^\varepsilon_x \subset B(x, \frac{2}{3} \dist(x, \partial \Omega)+1)$ when $\varepsilon$ is small enough, so that \[ \frac{2\pi}{\capa_\Omega (A^\varepsilon_x)} \ge \frac{\bigl(\tfrac{\kappa}{2\pi}\log \tfrac{1}{\varepsilon} + \frac{W}{3}\dist(x, \partial \Omega)+d'\bigr)^2}{\frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}}. \] By Proposition~\ref{propositionCapacityLocalArea}, we obtain \[ \muleb{2}(A^\varepsilon_x) \le C\bigl(\dist(x, \partial \Omega)+1\bigr)^2 \varepsilon^2 e^{-\frac{4W}{3}\dist(x, \partial \Omega)} \le C \varepsilon^2 e^{-\mu\dist(x, \partial \Omega)}. \qedhere \] \end{proof} \begin{remark} A slightly more careful proof shows that one can take any $\mu < W/2$, provided $C$ is large enough. \end{remark} The next Lemma, counterpart of Lemma~\ref{lemmaVortexSplit}, insures that essential vortices are not too far from the boundary. \begin{lemma} \label{lemmaUnboundedVortexSplit} There exists constants $\gamma, C, c>0$, such that, when $\varepsilon$ is small enough: If \eqref{eqSplitVortices} holds, we have \eqref{ineqLowerBoundArea}, \eqref{ineqLowerBoundDiam}, \eqref{ineqLowerBoundDistance}, \eqref{ineqLowerBoundVortex} and \[ \dist(A^\varepsilon_i, \partial \Omega)\le C, \] while if \eqref{eqSplitVortices} does not hold, then \eqref{ineqfsVanishing} holds. \end{lemma} \begin{proof} The proof follows essentially the one of Lemma~\ref{lemmaVortexSplit}. The inequality \eqref{ineqLowerBoundDiam} follows immediately from \eqref{ineqLowerBoundArea} and \eqref{ineqUnboundedVorticityDiameter}. \end{proof} As in the case of a bounded domain, the vorticity set can be split into a vanishing vorticity set $V^\varepsilon$ and an essential one $E^\varepsilon$, defined by \eqref{eqDefVeps} and \eqref{eqDefEeps}. Since the gradient of $q$ is only locally integrable, Lemma~\ref{lemmaVanishingVorticity} only gives local information. \begin{lemma} \label{lemmaUnboundedVanishing} For every $s \ge 1$, we have \[ \sup_{x \in \Omega} \mathbf{N}orm{\omega^\varepsilon_v}_{\mathrm{L}^s(B(x, 1))} = o(\varepsilon^{p(1-\frac{2}{r})-2(1-\frac{1}{s})}). \] In particular, if $\frac{1}{s} \ge 1-p(\frac{1}{2}-\frac{1}{r})$, then $\omega^\varepsilon_v \to 0$ in $\mathrm{L}^s_{\mathrm{loc}}(\Omega)$. \end{lemma} \subsubsection{Step 3: Small scale asymptotics} For the small scale asymptotics, one first note that Lemma~\ref{lemmaSmallScaleLocalEstimates} still holds. Indeed, the only step that relied on the boundedness of $\Omega$ was \eqref{ineqGomegav}. For every $\rho>0$, regularity estimates still yields for $x \in B(x^\varepsilon_i, \frac{1}{2})$ \[ \Bigl\lvert\int_{B(x^\varepsilon_i, \rho)} G(x, y)\omega^\varepsilon_v(y)\, dy \Bigr\rvert\le C \mathbf{N}orm{\omega^\varepsilon_v}_{\mathrm{L}^s(B(x^\varepsilon_i, 2\rho))}, \] and the conclusion follows from Lemma~\ref{lemmaUnboundedVanishing}. On the other hand, since $\Omega$ is contained in a half-plane, by comparing its Green function by the Green function of a half-plane, we have \[ G(x, y) \le \frac{1}{2\pi} \log \Bigl(1+\frac{C\bigl(1+ \dist(x, \partial \Omega)\bigr)}{\abs{x-y}}\Bigr). \] Since $\dist(x^\varepsilon_i, \partial \Omega)$ is bounded, we have, for every $x \in B(x^\varepsilon_i, 1)$, \[ \int_{\Omega \setminus B(x^\varepsilon_i, \rho)} G(x, y)\omega^\varepsilon_v(y)\, dy \le \frac{\kappa^\varepsilon}{2\pi} \log \Bigl(1+\frac{C}{\rho}\Bigr) \to 0, \] as $\rho \to \infty$, uniformly in $\varepsilon > 0$. Lemma~\ref{lemmaSmallScaleLocalEstimates} being established, the proof of Lemma~\ref{lemmaLocalAsymptotics} also adapts straightforwardly. \subsubsection{Step 4: Global asymptotics} For Proposition~\ref{propositionAsymptoticsW21}, one obtains a little more than the $\mathrm{W}^{2, 1}_{\mathrm{loc}}(\Omega)$ convergence. Setting $\Omega_\delta=\{ x \in \Omega \: :\: \dist(x, \partial \Omega)> \delta\}$, one has \begin{proposition} We have \[ v^\varepsilon=\Tilde{v}^\varepsilon+o(1) \] in $\mathrm{W}^{2, 1}_{\mathrm{loc}}(\Omega_\delta)$ for every $\delta > 0$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$. \end{proposition} \begin{proof} One defines $\Tilde{\omega}^\varepsilon_1$ and $w^\varepsilon_v$, and $w^\varepsilon_s$ as in the proof of Proposition~\ref{propositionAsymptoticsW21}. One defines \begin{align*} w^\varepsilon_r(x)&=\int_{\Omega} \Bigl(H(x,y)-\frac{1}{4\pi}\log (\abs{x-y}^2+4x_1y_1)\Bigr) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy,\\ w^\varepsilon_h(x)&=\int_{\Omega} \frac{1}{4\pi}\log (\abs{x-y}^2+4x_1y_1) \bigl(\omega^\varepsilon_1(y)-\Tilde{\omega}^\varepsilon_1(y)\bigr)\, dy. \end{align*} Recalling that $0 < c \le \dist(x^\varepsilon_1, \partial \Omega) \le C$, one treats the terms $w^\varepsilon_v$, $w^\varepsilon_s$ and $w^\varepsilon_v$ as in the proof of Proposition~\ref{propositionAsymptoticsW21}; the term $w^\varepsilon_s$ is treated similarly to the term $w^\varepsilon_s$. The proof of the convergences up to the boundary follows then as in the proof of Proposition~\ref{propositionAsymptoticsW21}. \end{proof} For Corollary~\ref{corollaryAsymptotic}, we have, instead of \eqref{ineqAepsBall}, \[ q(y)+ \frac{\kappa}{2\pi} \log \frac{1}{\varepsilon} < v^\varepsilon(y^\varepsilon) \le \frac{\kappa_1^\varepsilon}{2\pi} \log \Bigl(1+\frac{C\dist(x^\varepsilon_1, \partial \Omega)}{\abs{y-x^\varepsilon_1}}\Bigr)+O(1). \] The remaining part of the proof carries over identically since $\dist(x^\varepsilon_1, \partial \Omega)$ remains bounded as $\varepsilon \to 0$. Corollary~\ref{corEnergy} also follows without any modification. \subsection{Existence of solutions} In this section we present sufficient conditions for the existence of a minimizer for $c^\varepsilon$. Assume that $\Omega \subset ]a_0, +\infty[ \times \mathbf{R}$ is a Lipschitz domain, and that \begin{equation} \label{condPerturbation} \lim_{t \to +\infty} \inf \{ x_1 \in \mathbf{R} \: :\: \exists x_2 \in \mathbf{R}, (x_1, x_2) \in \Omega \text{ and } \abs{x_2} \ge t\}=0. \end{equation} Assume also that there exist $\Hat{W}, \Hat{d}>0$ such that and \[ \lim_{t \to +\infty} \inf_{\abs{x_2} > t} \frac{q(x)-\Hat{W}x_1-\Hat{d}}{1+\abs{x_1}} \ge 0. \] We define \[ \Hat{\mathcal{E}}^\varepsilon (u)= \frac{1}{2} \int_{\mathbf{R}^2_+} \abs{\nabla u}^2-\frac{1}{\varepsilon^2}\int_{\mathbf{R}^2_+} F(u-\Hat{W}x_1-\Hat{d}) \] and the minimax level \[ \Hat{c}^\varepsilon=\inf_{u \in \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)}\max_{t>0} \Hat{\mathcal{E}}^\varepsilon(tu). \] We first recall and investigate about the case where $q$ is affine and $\Omega$ is the half-plane. In this case, by definition, $c^\varepsilon=\Hat{c}^\varepsilon$. \begin{theorem}[Yang \cite{Yang1991}] \label{thmYang} If $\Omega=\mathbf{R}^2_+$ and $q(x)=Wx_1+d$, then problem \eqref{problemPeps} admits a solution $u \in \mathrm{D}^{1, 2}_0(\Omega)$. \end{theorem} The proof in \cite{Yang1991} allows to state that \begin{proposition}\label{prop:al} The critical level $c^\varepsilon=\Hat{c}^\varepsilon$ depends continuously on $W$ and $d$. \end{proposition} \begin{proof}[Sketch of the proof] We can assume without loss of generality that $\varepsilon=1$ and skip any reference to it. Given converging sequences $W_n \to W$ and $d_n \to d$, we set \[ \mathcal{E}_n(u)=\frac{1}{2} \int_{\mathbf{R}^2_+} \abs{\nabla u}^2-\int_{\mathbf{R}^2_+} F(u-W_nx_1-d_n) \] By Theorem~\ref{thmYang}, $\mathcal{E}$ and $\mathcal{E}_n$ possess (some) ground-states $u$ and $u_n$, for which we set $c_n=\mathcal{E}(u_n)$. There exist $\tau_n \to 1$ such that $\dualprod{d\mathcal{E}_n(\tau_n u)}{\tau_n u}=0$. Therefore, \[ c_n \le \mathcal{E}_n(\tau_n u) \to \mathcal{E} (u)=c. \] This implies that $c$ is upper semi-continuous. In particular, since \[ \Bigl(\frac{1}{2}-\frac{1}{p+1}\Bigr)\mathbf{N}orm{\nabla u_n}^2\le \mathcal{E}_n(u_n) \] the sequence $(u_n)$ is bounded in $\mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$. Choosing $\check{W}=\inf W_n>0$, we obtain by Proposition~\ref{ineqUnboundedIneqW} \begin{multline*} \Bigl(\int_{\mathbf{R}^2_+} (u_n-\tfrac{1}{2}\check{W}x_1)^{p+1}_+\Bigr)^\frac{2}{p+3} \le \int_{\mathbf{R}^2_+} \abs{\nabla u_n}^2 \le \int_{\mathbf{R}^2_+} u_n f(u_n-W_nx_1-d_n) \\ \le \int_{\mathbf{R}^2_+} u_n f(u_n-\check{W}x_1) \le C \int_{\mathbf{R}^2_+} (u_n-\tfrac{1}{2}\check{W}x_1)^{p+1}_+, \end{multline*} so that $(u_n-\frac{1}{2} \check{W} x_1)_+ \not \to 0$ in $L^{p+1}(\mathbf{R}^2_+)$. By Lemma~\ref{campeones}, up to translation in the $x_2$ direction, we have $(u_n-\frac{1}{2} \check{W} x_1)_+ \not \to 0$ in $L^{p+1}(\mathbf{R}_+\times ]-1, 1[)$. Hence, there exists $0\neq v\in \mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$ such that $u_n \rightharpoonup v$ in $\mathrm{D}^{1, 2}_0(\mathbf{R}^2_+)$ and $u_n \to v$ almost everywhere and in $L^r_{\mathrm{loc}}(\mathbf{R}^2_+)$ for $r \ge 1$. In particular, $d\mathcal{E}(v)=0$ and by Fatou's Lemma, we have \[ \begin{split} c &\le \mathcal{E}(v)=\int_{\mathbf{R}^2_+} (W x_1+d)^p (v-Wx_1-d)+(\tfrac{1}{2}-\tfrac{1}{p+1})(v-Wx_1-d)_+^{p+1}\\ &\le \liminf_{n \to \infty}\int_{\mathbf{R}^2_+} (W_n x_1+d)^p (u_n-Wx_1-d)+(\tfrac{1}{2}-\tfrac{1}{p+1})(u_n-Wx_1-d)_+^{p+1}\\ &= \liminf_{n \to \infty} \mathcal{E}_n(u_n)=\liminf_{n \to \infty} c_n. \qedhere \end{split} \] \end{proof} \begin{proposition} \label{propositionPS} If \[ c^\varepsilon < \Hat{c}^\varepsilon \] then there exists $u_\varepsilon \in \mathrm{D}^{1, 2}_0(\Omega)$ such that $d\mathcal{E}^\varepsilon(u_\varepsilon)=0$ and $\mathcal{E}^\varepsilon(u_\varepsilon)=c^\varepsilon$. \end{proposition} \begin{proof} We use the same strategy as P.\thinspace Rabinowitz \cite{Rabinowitz1992} for the nonlinear Schr\"odinger equation on $\mathbf{R}^N$. The minimization problem can be reformulated as a mountain-pass problem (see, e.g.\ \cite[Chapter 4]{Willem1996}). By Ekeland's variational principle, there exists a sequence $(u_n) \subset \mathrm{D}^{1, 2}_0(\Omega)$ such that $d\mathcal{E}^\varepsilon(u_n) \to 0$ and $\mathcal{E}^\varepsilon(u_n) \to c^\varepsilon$, see \cite[Theorem 4.3]{MawhinWillem} or \cite[Theorem 1.15]{Willem1996}. We have \[ \Bigl(\frac{1}{2}-\frac{1}{p+1}\Bigr)\mathbf{N}orm{\nabla u}_{\mathrm{L}^2}^2\le \mathcal{E}^\varepsilon(u_n)-\dualprod{\mathcal{E}^\varepsilon(u_n)}{u_n} \to c^\varepsilon, \] so that $(u_n)$ is bounded in $\mathrm{D}^{1, 2}_0(\Omega)$. There exists $u \in \mathrm{D}^{1, 2}_0(\Omega)$ such that, up to a subsequence, $u_n \rightharpoonup u$ $\mathrm{D}^{1, 2}_0(\Omega)$. By Rellich's Theorem, for every $\varphi \in \mathrm{D}^{1, 2}_0(\Omega)$, $\dualprod{d\mathcal{E}^\varepsilon(u_n)}{\varphi} \to \dualprod{d\mathcal{E}^\varepsilon(u)}{\varphi}$, so that $d\mathcal{E}^\varepsilon(u)=0$. If $u \ne 0$, then $u \in \mathcal{N}^\varepsilon$ and by Fatou's Lemma \[ \begin{split} \mathcal{E}^\varepsilon(u)&=\frac{1}{\varepsilon^2}\int_{\Omega} \frac{f(u-q^\varepsilon)u}{2}-F(u-q^\varepsilon)\\ &=\frac{1}{2\varepsilon^2}\int_{\Omega} q^\varepsilon(u-q^\varepsilon)_+^{p}+(1-\tfrac{2}{p+1}) (u-q^\varepsilon)_+^{p+1} \\ &\le \liminf_{n \to \infty} \frac{1}{2\varepsilon^2}\int_{\Omega} q^\varepsilon(u_n-q^\varepsilon)_+^{p}+(1-\tfrac{2}{p+1})(u_n-q^\varepsilon)_+^{p+1}\\ &= \liminf_{n \to \infty} \mathcal{E}^\varepsilon(u_n)-\tfrac{1}{2}\dualprod{d\mathcal{E}^\varepsilon(u_n)}{u_n}=c^\varepsilon, \end{split} \] so that $u$ fits the claim. Otherwise, for any $\delta < \min( \Hat{W}, \Hat{d})$, let $R > 0$ be such that \[ -\delta \le \inf \bigl\{ s \in \mathbf{R} \: :\: \exists r \in \mathbf{R}, (s, r) \in \Omega \text{ and } \abs{s}\ge R\bigr\}, \] and, \begin{equation} \label{ineqqdelta} q(x)\ge \Hat{q}_\delta(x):=(\Hat{W}-\delta)x_1+\Hat{d}-\delta \qquad \text{if $\abs{x_2} \ge R$}. \end{equation} We have, for $\Omega_R = \{ x \in \Omega \: :\: |x_2|\geq R\}$, and in view of Lemma~\ref{lemmaUnboundedCompactness}, \begin{equation} \label{ineqOmegaR} \begin{split} c^\varepsilon&=\lim_{n \to \infty}\mathcal{E}^\varepsilon(u_n)-\dualprod{d\mathcal{E}^\varepsilon(u_n)}{u_n}\\ &\le \liminf_{n \to \infty} \frac{1}{\varepsilon^2}\int_{\Omega} u_n (u_n-q)_+^p = \liminf_{n \to \infty} \frac{1}{\varepsilon^2}\int_{\Omega \setminus \Omega_R} u_n (u_n-q)_+^p \\ &\le C \liminf_{n \to \infty} \frac{1}{\varepsilon^2}\int_{\Omega_R} \Bigl(u_n-\frac{q}{1+\delta}\Bigr)_+^{p+1} \le C \liminf_{n \to \infty} \frac{1}{\varepsilon^2}\int_{\Omega_R} (u_n-\Hat{q}_\delta)_+^{p+1}. \end{split} \end{equation} Let $\psi \in C^\infty(\mathbf{R})$ such that $\supp \psi \subset [-2\delta, -\delta]$, $\psi (t)=0$ for $t \le -2 \delta$ and $\psi(t)=1$ for $t \ge -\delta$. We set $\varphi(x_1, x_2)=\psi(x_1)$. Note that $\supp \nabla \varphi \cap \Bar{\Omega}$ is compact, so that by Rellich's Theorem, \[ \int_{\Omega} \abs{\nabla \varphi}^2\abs{u_n}^2 \to 0, \] and therefore, defining $v_n=\varphi u_n$, \[ \int_{\Omega} \abs{\nabla v_n}^2=\int_{\Omega} \abs{\nabla u_n}^2+o(1). \] For every $\tau > 0$ \begin{multline*} \max_{\theta > 0} \mathcal{E}(\theta u_n) \ge \mathcal{E}(\tau u_n) =\Hat{\mathcal{E}}_{\delta}(\tau v_n)+\frac{\tau^2}{2}\int_{\Omega} \abs{\nabla u_n}^2-\abs{\nabla v_n}^2 \\ +\frac{1}{\varepsilon^2}\int_{\Omega} F(\tau v_n-\Hat{q}_\delta)-F(\tau u_n-q). \end{multline*} Choose now $\tau_n$ such that $\Hat{\mathcal{E}}_\delta(\tau_n v_n)=\sup_{\tau > 0}\Hat{\mathcal{E}}_\delta(\tau v_n)$. If $\tau_n \ge 1$, we have, \begin{multline*} \tau_n^2 \int_{\Omega} \abs{\nabla v_n}^2 = \frac{1}{\varepsilon^2}\int_{\Omega} \tau_n v_n f(\tau_n v_n-\Hat{q}_\delta) \ge \tau_n^{p+1}\frac{1}{\varepsilon^2}\int_{\Omega} (v_n-\Hat{q}_\delta)_+^{p+1} \\ \ge \tau_n^{p+1}\frac{1}{\varepsilon^2}\int_{\Omega_R} (v_n-\Hat{q}_\delta)^{p+1}_+ = \tau_n^{p+1} \frac{1}{\varepsilon^2}\int_{\Omega_R} (u_n-\Hat{q}_\delta)^{p+1}_+, \end{multline*} so that by \eqref{ineqOmegaR} we obtain \[ \tau_n \le \max\Biggl(1, \biggl( \frac{\int_{\Omega} \abs{\nabla v_n}^2}{\int_{\Omega_R} (u_n-\Hat{q}_\delta)^{p+1}_+} \biggr)^\frac{1}{p-1}\Biggr), \] and the quantity on the right-hand side is bounded in view of \eqref{ineqOmegaR}. This implies that $\tau_n v_n \rightharpoonup 0$ and $\tau_n u_n \rightharpoonup 0$ in $D^{1, 2}(\Omega)$, and by Lemma~\ref{lemmaUnboundedCompactness}, that \[ \int_{\Omega \setminus \Omega_R} F(\tau_n v_n-\Hat{q}_\delta)-F(\tau_n u_n-q) \to 0, \qquad\text{as }n\to +\infty. \] On the other hand, by \eqref{ineqqdelta}, $\Hat{q}_\delta \le q$ in $\Omega \setminus \Omega_R$, and \[ \int_{\Omega_R} F(\tau_n v_n-\Hat{q}_\delta)-F(\tau_n u_n-q) = \int_{\Omega_R} F(\tau_n u_n-\Hat{q}_\delta)-F(\tau_n u_n-q)\ge 0. \] Hence, \[ \liminf_{n \to \infty} \mathcal{E}(u_n) \ge \liminf_{n \to \infty} \Hat{\mathcal{E}}_\delta(\tau_n v_n) \ge \Hat{c}_\delta := \inf_{v \in \mathrm{D}^{1, 2}_0(]-2\delta, +\infty[\times \mathbf{R})} \Hat{\mathcal{E}}_\delta(v), \] and the conclusion follows from Proposition~\ref{prop:al}, sending $\delta$ to zero. \end{proof} From Proposition~\ref{propositionPS}, we derive \begin{theorem} \label{theoremExistenceLevels} If \[ \sup_{x \in \Omega}\frac{\kappa^2}{2} H(x, x)-\kappa q(x) > \frac{\kappa^2}{4\pi} \Bigl(\log \frac{\kappa}{2\pi \Hat{W}}-1\Bigr)-\kappa \Hat{d}, \] then, if $\varepsilon$ is sufficiently small, there exists $u^\varepsilon \in \mathrm{D}^{1, 2}_0(\Omega)$ such that $d\mathcal{E}^\varepsilon(u^\varepsilon)=0$ and $\mathcal{E}^\varepsilon(u^\varepsilon)=c^\varepsilon$. \end{theorem} \begin{proof} By Proposition~\ref{propUnboundedUpper}, we have \[ c^\varepsilon \le \frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}-\sup_{x\in \Omega}\Bigl(\frac{\kappa^2}{2} H(x, x)-\kappa q(x) \Bigr) +\mathcal{C} + o(1). \] On the other hand, in view of Theorem~\ref{thmYang}, $\Hat{\mathcal{E}}^\varepsilon$ possesses a ground-state whose energy is bounded by $\frac{\kappa^2}{4\pi}\log \frac{1}{\varepsilon}+O(1)$. It follows from Proposition~\ref{prop:1maiUnbounded} applied to these ground-states that \[ \begin{split} \Hat{c}^\varepsilon&=\frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}-\sup_{x\in \mathbf{R}^2_+} \Bigl(\frac{\kappa^2}{4\pi} \log 2x_1-\kappa (\Hat{W}x_1+\Hat{d})\Bigr) +\mathcal{C} + o(1) \\ &=\frac{\kappa^2}{4\pi} \log \frac{1}{\varepsilon}- \Bigl( \frac{\kappa^2}{4\pi} \Bigl(\log \frac{\kappa}{2\pi \Hat{W}}-1\Bigr)-\kappa \Hat{d} \Bigr) +\mathcal{C} + o(1). \end{split} \] Therefore, when $\varepsilon$ is small enough, $c^\varepsilon < \Hat{c}^\varepsilon$, and the conclusion follows from Proposition~\ref{propositionPS}. \end{proof} \section{Pair of vortices in bounded domains} \label{sectionVortexPair} In this section, $\Omega\subset \mathbf{R}^2$, $f : \mathbf{R} \to \mathbf{R}$ and $q: \Omega \to \mathbf{R}$ are as in Section~\ref{sectionSingleVortex}. For ${\boldsymbol{\varepsilon}}=(\varepsilon_+, \varepsilon_-) >0$, $\kappa_+>0$ and $\kappa_-<0$ given, and consider solutions of the boundary value problems \[ \left\{ \begin{aligned} -\Delta u^{\boldsymbol{\varepsilon}} &= \frac{1}{\varepsilon_+{}^2} f(u^{\boldsymbol{\varepsilon}} - q^\varepsilon_+) - \frac{1}{\varepsilon_-{}^2} f(q^\varepsilon_- -u^{\boldsymbol{\varepsilon}}) & & \text{in $\Omega$}, \\ u^{\boldsymbol{\varepsilon}} &= 0 & &\text{on $\partial \Omega$}, \end{aligned} \right. \tag{\protect{$\mathcal{Q}^{\boldsymbol{\varepsilon}}$}} \label{Qeps} \] where $q^\varepsilon_\pm = q+ \frac{\kappa_\pm}{2\pi} \log \frac{1}{\varepsilon_\pm}$. We consider are the least energy nodal solutions of \eqref{Qeps} obtained by minimizing the energy functional \[ \mathcal{E}^{\boldsymbol{\varepsilon}}(u)= \int_{\Omega} \Bigl(\frac{|\nabla u|^2}{2} - \frac{1}{\varepsilon_+{}^2}F(u-q^\varepsilon_+) -\frac{1}{\varepsilon_-{}^2}F(q^\varepsilon_- -u) \Bigr) \] over the natural constraint given by the nodal Nehari set \[ \mathcal{M}^{\boldsymbol{\varepsilon}} = \left\{ u\in H^1_0(\Omega) \ : \ u_+\neq 0, u_- \ne 0, \ \langle d\mathcal{E}^{\boldsymbol{\varepsilon}}(u), u_+\rangle = \langle d\mathcal{E}^{\boldsymbol{\varepsilon}}(u), u_-\rangle =0\right\}. \] It is a standard \cites{CastroCossioNeuberger,BartschWethWillem,BartschWeth2003,BartschWeth2005} to prove the \begin{proposition} Assume that $q^\varepsilon_+$ is positive on $\Omega$ and $q^\varepsilon_-$ is negative on $\Omega$, so that $\mathcal{M}^\eps \neq \emptyset$, and define \[ d^{\boldsymbol{\varepsilon}} = \inf_{u\in \mathcal{M}^{\boldsymbol{\varepsilon}}} \mathcal{E}^{\boldsymbol{\varepsilon}}(u). \] There exists $u^{\boldsymbol{\varepsilon}} \in \mathcal{M}^{\boldsymbol{\varepsilon}}$ such that $\mathcal{E}^{\boldsymbol{\varepsilon}}(u^{\boldsymbol{\varepsilon}})=d^{\boldsymbol{\varepsilon}}$, and $u^{\boldsymbol{\varepsilon}}$ is a nonnegative solution of $(\mathcal{Q}^\eps)$. \end{proposition} Our focus is the asymptotics of $u^{\boldsymbol{\varepsilon}}$ for a sequence ${\boldsymbol{\varepsilon}} \to (0, 0)$. We assume that $0 < c < \frac{\log \varepsilon_+}{\log \varepsilon_-} < C < \infty$, and we will write $\logEps$ instead of $\log \varepsilon_+$ or $\log \varepsilon_-$ in asymptotic expansions. We extend the definition of $U_\kappa$ given by \eqref{Ukappa} for $\kappa < 0$ by $U_\kappa=-U_{-\kappa}$ and $\rho_{\kappa}=\rho_{-\kappa}$. One still has, when $\abs{x}$ is large enough, $U_\kappa(x)=\frac{\kappa}{2\pi}\log \frac{\rho_\kappa}{\abs{x}}$. We also set \[ \mathcal{C}_\pm= \frac{\kappa_\pm^2}{4\pi} \log \rho_{\kappa_\pm} + \int_{B(0, \rho_{\kappa_\pm})}\Bigl(\frac{|\nabla U_{\rho_{\kappa_\pm}}|^2}{2} - \frac{U_{\rho_{\kappa_\pm}}^{p+1}}{p+1}\Bigr). \] The Kirchhoff--Routh function $\mathcal{W}$ is defined for $(x_+, x_-)\in \Omega^2_*=\{ (y_+, y_-) \in \Omega \: :\: y_+ \ne y_-\}$ by \[ \begin{split} \mathcal{W}(x_+,x_-)= \, &\frac{\kappa_+^2}{2}H(x_+, x_+) + \frac{\kappa_-^2}{2}H(x_-, x_-) + \kappa_+\kappa_- G(x_+, x_-)\\ & -\frac{\kappa_+}{2\pi} q(x_+) -\frac{\kappa_-}{2\pi} q(x_-). \end{split} \] We set \begin{equation} \begin{aligned}\label{defiqNodal} A^{\boldsymbol{\varepsilon}}_\pm &=\Big\{ x \in \Omega \: :\: \pm u^{\boldsymbol{\varepsilon}}(x)> \pm q^{\boldsymbol{\varepsilon}}(x) + \frac{\kappa_\pm}{2\pi} \log \frac{1}{\varepsilon_\pm}\Big\}, \\ \omega_\pm^{\boldsymbol{\varepsilon}}&=\pm \frac{1}{\varepsilon_\pm{}^2} f(\pm(u^{\boldsymbol{\varepsilon}}-q^{\boldsymbol{\varepsilon}})), \\ \kappa^{\boldsymbol{\varepsilon}}_\pm&=\int_{\Omega} \omega_\pm^{\boldsymbol{\varepsilon}}, \\ x^{\boldsymbol{\varepsilon}}&=\frac{1}{\kappa^{\boldsymbol{\varepsilon}}}\int_{\Omega} x \, \omega^{\boldsymbol{\varepsilon}}_\pm(x)\, dx, \\ \rho^{\boldsymbol{\varepsilon}}_\pm&=\rho_{\kappa^{\boldsymbol{\varepsilon}}_\pm}. \end{aligned} \end{equation} We will prove \begin{theorem}\label{thm:K3} As ${\boldsymbol{\varepsilon}} \to 0$, we have \[ \begin{split} u^{\boldsymbol{\varepsilon}}= \, & U_{\kappa_+^{\boldsymbol{\varepsilon}}} \Big(\frac{\cdot-x^{\boldsymbol{\varepsilon}}}{\varepsilon_+}\Big)+\kappa_+^{\boldsymbol{\varepsilon}}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_+ \rho_{+}^{\boldsymbol{\varepsilon}}}+ H(x^{\boldsymbol{\varepsilon}}_+, \cdot)\Bigr)\\ &+U_{\kappa_-^{\boldsymbol{\varepsilon}}} \Big(\frac{\cdot-x^{\boldsymbol{\varepsilon}}}{\varepsilon_-}\Big)+\kappa_-^{\boldsymbol{\varepsilon}}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_- \rho_-^{\boldsymbol{\varepsilon}}}+ H(x^{\boldsymbol{\varepsilon}}_-, \cdot)\Bigr)+o(1), \end{split} \] in $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$, where \[ \kappa^{\boldsymbol{\varepsilon}}=\kappa_\pm+\frac{2\pi}{\log \frac{1}{\varepsilon_\pm}}\Bigl(q(x^{\boldsymbol{\varepsilon}})-\kappa_\pm H(x^{\boldsymbol{\varepsilon}}, x^{\boldsymbol{\varepsilon}})-\kappa_\mp G(x_\pm, x_\mp) -\frac{\kappa}{2\pi} \log \frac{1}{\rho_{\kappa_\pm}} \Bigr)+o(\logEps^{-1}), \] and \[ \mathcal{W}(x^{\boldsymbol{\varepsilon}}_+,x^{\boldsymbol{\varepsilon}}_-) \to \sup_{(x_+,x_-) \in \Omega^2_*} \mathcal{W}(x_+,x_-). \] One also has \[ B(x^{\boldsymbol{\varepsilon}}_\pm, \Bar{r}_\pm^{\boldsymbol{\varepsilon}}) \subset A_\pm^{\boldsymbol{\varepsilon}} \subset B(x_\pm^{\boldsymbol{\varepsilon}}, \mathring{r}_\pm^{\boldsymbol{\varepsilon}}), \] with $\Bar{r}^{\boldsymbol{\varepsilon}}_\pm=\varepsilon_\pm \rho_{\kappa_\pm}+o(\varepsilon_\pm)$ and $\mathring{r}_\pm^{\boldsymbol{\varepsilon}}=\varepsilon_\pm \rho_{\kappa_\pm} +o(\varepsilon_\pm)$. Finally, \[ \mathcal{E}^{\boldsymbol{\varepsilon}} (u^{\boldsymbol{\varepsilon}})= \frac{\kappa^2_+}{4\pi} \log \frac{1}{\varepsilon_+}+\frac{\kappa^2_-}{4\pi} \log \frac{1}{\varepsilon_-}-\mathcal{W}(x^{\boldsymbol{\varepsilon}}_+,x^{\boldsymbol{\varepsilon}}_-)+\mathcal{C}_++\mathcal{C}_-+o(1). \] \end{theorem} \subsection{Upper bounds on the energy} We compute upper bounds on $d^\varepsilon$ by constructing suitable elements in $\mathcal{M}^\eps$. \begin{lemma} \label{lemNodalUpperBound} For every $\Hat{x}_+, \Hat{x}_- \in \Omega$ such that $\Hat{x}_+\ne \Hat{x}_-$, there exists \[ \Hat{\kappa}^{\pm}_{{\boldsymbol{\varepsilon}}}=\kappa_\pm+\frac{2\pi}{\log \dfrac{1}{\varepsilon_\pm}}\Bigl( q(\Hat{x}_\pm)-\kappa_\pm H(\Hat{x}_\pm, \Hat{x}_\pm)-\kappa_{\mp} G(\Hat{x}_\pm, \Hat{x}_\mp)+\dfrac{\kappa_\pm}{2\pi} \log \rho_{\kappa_\pm} \Bigr)+O\bigl(\logEps^{-2}\bigr), \] such that, if \[ \begin{split} \Hat{u}^{\boldsymbol{\varepsilon}}(x) =&U_{\Hat{\kappa}_+^{\boldsymbol{\varepsilon}}}\Bigl(\frac{x-\Hat{x}_+}{\varepsilon_+}\Bigr)+\Hat{\kappa}^{\boldsymbol{\varepsilon}}_+ \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon_+\Hat{\rho}_+^{\boldsymbol{\varepsilon}}}+H(\Hat{x}_+, x) \Bigr)\\ &+U_{\Hat{\kappa}_-^{\boldsymbol{\varepsilon}}}\Bigl(\frac{x-\Hat{x}_-}{\varepsilon_-}\Bigr)+\Hat{\kappa}^{\boldsymbol{\varepsilon}}_- \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon_-\Hat{\rho}_-^{\boldsymbol{\varepsilon}}}+H(\Hat{x}_-, x) \Bigr), \end{split} \] then \[ \Hat{u}^{\boldsymbol{\varepsilon}}\in \mathcal{M}^{\boldsymbol{\varepsilon}}. \] Moreover, \[ \Hat{A}_\pm^{\boldsymbol{\varepsilon}}:=\bigl\{ x \: :\: \pm \Hat{u}^{\boldsymbol{\varepsilon}}(x) > \pm q^{\boldsymbol{\varepsilon}}_\pm(x) \bigr\} \subset B(\Hat{x}_\pm, \Hat{r}_\pm^{\boldsymbol{\varepsilon}}), \\ \] with $\Hat{r}_\pm^{\boldsymbol{\varepsilon}}=\varepsilon_\pm \rho_{\kappa_\pm}+o({\boldsymbol{\varepsilon}})$. \end{lemma} \begin{proof} For every $\boldsymbol{\sigma}=(\sigma_+, \sigma_-) \in \mathbf{R}^2$, we define \begin{gather*} \Hat{\kappa}^\pm_{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}=\frac{q^{\boldsymbol{\varepsilon}}_\pm(x_\pm)-\kappa_\mp G(x_\pm, x_\mp)+\sigma_\pm}{\frac{1}{2\pi} \log \frac{1}{\varepsilon_\pm \rho_{\kappa_\pm}}+H(x_\pm, x_\pm)},\\ \begin{split} \Hat{u}_{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}} =&U_{\Hat{\kappa}_+^{\boldsymbol{\varepsilon}}}\Bigl(\frac{x-\Hat{x}_+}{\varepsilon_+}\Bigr)+\Hat{\kappa}^\varepsilon_+ \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon_+\Hat{\rho}_+^{\boldsymbol{\varepsilon}}}+H(\Hat{x}_+, x) \Bigr)\\ &+U_{\Hat{\kappa}_-^{\boldsymbol{\varepsilon}}}\Bigl(\frac{x-\Hat{x}_-}{\varepsilon_-}\Bigr)+\Hat{\kappa}^\varepsilon_- \Bigl( \frac{1}{2\pi} \log \frac{1}{\varepsilon_-\Hat{\rho}_-^{\boldsymbol{\varepsilon}}}+H(\Hat{x}_-, x) \Bigr), \end{split} \end{gather*} and we set \[ g^{{\boldsymbol{\varepsilon}}}_\pm(\boldsymbol{\sigma})=\langle d \mathcal{E}_{{\boldsymbol{\varepsilon}}} (\Hat{u}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}), \Hat{u}^{{\boldsymbol{\varepsilon}}, \sigma_\pm} \rangle. \] We compute as in the proof of Lemma~\ref{lemmaHatuNehari}, \begin{equation}\label{eq:badaboum1} \int_{\Omega} \abs{\nabla u^{{\boldsymbol{\varepsilon}}, \sigma_\pm}}^2 =\int_{B(0, \rho_{\Hat{\kappa}_{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}})} \abs{\nabla U_{\Hat{\kappa}_{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}}}^2+\Hat{\kappa}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}_{\pm} \Bigl(\frac{\kappa_\pm}{2\pi} \log \frac{1}{\varepsilon_\pm}+q(\Hat{x}_\pm)+\sigma_\pm\Bigr)+O(\abs{{\boldsymbol{\varepsilon}}}). \end{equation} We also set \[ \Hat{\omega}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}=\frac{1}{\varepsilon_+^2} f(\Hat{u}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}} - q^{\boldsymbol{\varepsilon}}_+) - \frac{1}{\varepsilon_-^2} f(q^{\boldsymbol{\varepsilon}}_- -\Hat{u}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}), \] and we compute as in the proof of Lemma~\ref{lemmaHatuNehari} \begin{multline}\label{eq:badaboum2} \frac{1}{\varepsilon_\pm^2}\int_{\Omega} \Hat{\omega}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}} \Hat{u}^{{\boldsymbol{\varepsilon}}, \boldsymbol{\sigma}}_\pm =\int_{\mathbf{R}^2} F(U_{\kappa_\pm}+\sigma_\pm) \\+ (\tfrac{\kappa}{2\pi} \log \tfrac{1}{\varepsilon_\pm}+q(\Hat{x}_\pm)+\sigma_\pm)\int_{\mathbf{R}^2}f(U_{\kappa_\pm}+\sigma_\pm)+o(1). \end{multline} Combining \eqref{eq:badaboum1} and \eqref{eq:badaboum2} we obtain \[ g^{\boldsymbol{\varepsilon}}_\pm(\boldsymbol{\sigma})=\frac{\kappa_\pm}{2\pi} \log \frac{1}{\varepsilon_\pm} \Bigl( \int_{\mathbf{R}^2} f(U_{\kappa_\pm})-f(U_{\kappa_\pm}+\sigma_\pm)\Bigr)+O(1). \] By the Poincar\'e--Miranda Theorem (see e.g.\ \cite{Kulpa1997}), when $\abs{{\boldsymbol{\varepsilon}}}$ is small, there exists $\boldsymbol{\sigma}_{\boldsymbol{\varepsilon}}$ such that $g^{\boldsymbol{\varepsilon}}(\boldsymbol{\sigma}_{\boldsymbol{\varepsilon}})=0$ and $\boldsymbol{\sigma}_{\boldsymbol{\varepsilon}}=o(1)$ as ${\boldsymbol{\varepsilon}} \to 0$. \end{proof} Evaluating $\mathcal{E}_{\boldsymbol{\varepsilon}}(\Hat{u}_{\boldsymbol{\varepsilon}})$ yields \begin{corollary}\label{cor:Nodalupper} As $\abs{{\boldsymbol{\varepsilon}}} \to 0$, we have \begin{equation*}\begin{split} d^{\boldsymbol{\varepsilon}} \leq\ &\frac{\kappa^2_+}{4\pi} \log \frac{1}{\varepsilon_+}+\frac{\kappa^2_-}{4\pi} \log \frac{1}{\varepsilon_-}-\mathcal{W}(x_+,x_-)+\mathcal{C}_++\mathcal{C}_-+o(1). \end{split}\end{equation*} \end{corollary} \subsection{Asymptotic behavior of solutions} We shall prove the counterpart of Proposition~\ref{prop:1mai} \begin{proposition}\label{prop:1maiNodal} Let $(v^{\boldsymbol{\varepsilon}})$ be a family of solutions to \eqref{problemPeps} such that $v^{\boldsymbol{\varepsilon}}_\pm \ne 0$ \begin{equation} \label{assumptEnergyUpperboundNodal} \mathcal{E}^{\boldsymbol{\varepsilon}}(v^{\boldsymbol{\varepsilon}}) \le \frac{\kappa^2_+}{4\pi} \log \frac{1}{\varepsilon_+}+\frac{\kappa^2_-}{4\pi} \log \frac{1}{\varepsilon_-}+O(1), \end{equation} as ${\boldsymbol{\varepsilon}} \to 0$. Define the quantities $A_\pm^{\boldsymbol{\varepsilon}}$, $\omega_\pm^{\boldsymbol{\varepsilon}}$, $\kappa_\pm^{\boldsymbol{\varepsilon}}$, $x_\pm^{\boldsymbol{\varepsilon}}$ and $\rho_\pm^{\boldsymbol{\varepsilon}}$ for $v^{\boldsymbol{\varepsilon}}$ as in \eqref{defiqNodal} for $u^{\boldsymbol{\varepsilon}}$. Then \[ \begin{split} v^{\boldsymbol{\varepsilon}}= \, & U_{\kappa_+^{\boldsymbol{\varepsilon}}} \Big(\frac{\cdot-x^{\boldsymbol{\varepsilon}}}{\varepsilon_+}\Big)+\kappa_+^{\boldsymbol{\varepsilon}}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_+ \rho_{+}^{\boldsymbol{\varepsilon}}}+ H(x^{\boldsymbol{\varepsilon}}_+, \cdot)\Bigr)\\ &+U_{\kappa_-^{\boldsymbol{\varepsilon}}} \Big(\frac{\cdot-x^{\boldsymbol{\varepsilon}}}{\varepsilon_-}\Big)+\kappa_-^{\boldsymbol{\varepsilon}}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_- \rho_-^{\boldsymbol{\varepsilon}}}+ H(x^{\boldsymbol{\varepsilon}}_-, \cdot)\Bigr)+o(1), \end{split} \] \text{in $\mathrm{W}^{2, 1}_\mathrm{loc}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$}, where \[ \kappa^{\boldsymbol{\varepsilon}}=\kappa_\pm+\frac{2\pi}{\log \frac{1}{{\boldsymbol{\varepsilon}}_\pm}}\Bigl(q(x^{\boldsymbol{\varepsilon}})-\kappa_\pm H(x^{\boldsymbol{\varepsilon}}, x^{\boldsymbol{\varepsilon}})-\kappa_\mp G(x_\pm, x_\mp) -\frac{\kappa}{2\pi} \log \frac{1}{\rho_{\kappa_\pm}} \Bigr)+o(\logEps^{-1}). \] In particular, we have \[ \mathcal{E}^{\boldsymbol{\varepsilon}} (v^{\boldsymbol{\varepsilon}})= \frac{\kappa^2_+}{4\pi} \log \frac{1}{\varepsilon_+}+\frac{\kappa^2_-}{4\pi} \log \frac{1}{\varepsilon_-}-\mathcal{W}(x_+,x_-)+\mathcal{C}_++\mathcal{C}_-+o(1). \] and \[ B(x^{\boldsymbol{\varepsilon}}_\pm, \Bar{r}_\pm^{\boldsymbol{\varepsilon}}) \subset A_\pm^{\boldsymbol{\varepsilon}} \subset B(x_\pm^{\boldsymbol{\varepsilon}}, \mathring{r}_\pm^{\boldsymbol{\varepsilon}}), \] with $\Bar{r}^{\boldsymbol{\varepsilon}}_\pm=\varepsilon_\pm \rho_{\kappa_\pm}+o(\varepsilon_\pm)$ and $\mathring{r}_\pm^{\boldsymbol{\varepsilon}}=\varepsilon_\pm \rho_{\kappa_\pm} +o(\varepsilon_\pm)$. \end{proposition} In other words, $v^{\boldsymbol{\varepsilon}}$ satisfies the same asymptotics as the one stated in Theorem~\ref{thm:K1} for $v^{\boldsymbol{\varepsilon}}$ except for the convergence of $x^{\boldsymbol{\varepsilon}}$. \subsubsection{Step 1: First quantitative properties of the solutions} \begin{proposition} \label{propositionNodalEstimatesueps} We have, as $\abs{{\boldsymbol{\varepsilon}}} \to 0$, \begin{gather*} \muleb{2}(A^{\boldsymbol{\varepsilon}}_\pm) =O\bigl(\logEps^{-1}\bigr), \\ \int_{A^{\boldsymbol{\varepsilon}}_+} \abs{\nabla (v^{\boldsymbol{\varepsilon}}-q^{\boldsymbol{\varepsilon}}_\pm)}^2 =O(1), \\ \frac{1}{\varepsilon_{\pm}^2}\int_{A^{\boldsymbol{\varepsilon}}_\pm} F(\pm(v^{\boldsymbol{\varepsilon}}-q^{\boldsymbol{\varepsilon}}_\pm)) =O(1),\\ \int_{\Omega\setminus A^{\boldsymbol{\varepsilon}}_\pm} \abs{\nabla v^{\boldsymbol{\varepsilon}}_\pm}^2 \leq \frac{\kappa^2_\pm}{2\pi} \log\frac{1}{{\boldsymbol{\varepsilon}}_\pm} + O(1), \\ \pm \int_{\Omega} \omega^{\boldsymbol{\varepsilon}}_\pm \leq \pm \kappa_\pm + O\bigl(\logEps^{-1}\bigr). \end{gather*} \end{proposition} \begin{proof} First note that by Theorem~\ref{thm:K1}, \[ \mathcal{E}_{\boldsymbol{\varepsilon}}(v^{\boldsymbol{\varepsilon}}_\pm) \ge \frac{\kappa_+^2}{4\pi} \log \frac{1}{\varepsilon_\pm}+O(1). \] By \eqref{assumptEnergyUpperboundNodal}, this implies that \[ \mathcal{E}_{\boldsymbol{\varepsilon}}(v^{\boldsymbol{\varepsilon}}_\pm) =\frac{\kappa_+^2}{4\pi} \log \frac{1}{\varepsilon_\pm}+O(1). \] We are now in position to proceed as in the proof of Proposition~\ref{propositionEstimatesueps}, testing $(\mathcal{Q}^{\boldsymbol{\varepsilon}})$ against $v^{\boldsymbol{\varepsilon}}_+$ and $v^{\boldsymbol{\varepsilon}}_-$ instead of $v^{\boldsymbol{\varepsilon}}$, then against $\min(v^{\boldsymbol{\varepsilon}}, q^{\boldsymbol{\varepsilon}}_+)$ and $\max(v^{\boldsymbol{\varepsilon}}, q^{\boldsymbol{\varepsilon}}_-)$ instead of $\min(v^{\boldsymbol{\varepsilon}}, q^{\boldsymbol{\varepsilon}})$, and finally against $(v^{\boldsymbol{\varepsilon}}-q^{\boldsymbol{\varepsilon}}_+)_+$ and $(q^{\boldsymbol{\varepsilon}}_--v^{\boldsymbol{\varepsilon}})_+$ instead of $(v^{\boldsymbol{\varepsilon}}-q^{\boldsymbol{\varepsilon}}_+)_+$. We skip the details. \end{proof} \subsubsection{Step 2: Structure of the vorticity set} In this subsection we further describe the vorticity set $A^{\boldsymbol{\varepsilon}}=A^{\boldsymbol{\varepsilon}}_+\cup A^{\boldsymbol{\varepsilon}}_-$. Since it is an open set, it contains at most countably many connected components that we label $A^{\boldsymbol{\varepsilon}}_{\pm, i}$, $i \in I^{\boldsymbol{\varepsilon}}_\pm$. First we have a control on the total area and on the diameter of each connected component. \begin{lemma} \label{lemmaNodalAreaDiameter} If $\abs{{\boldsymbol{\varepsilon}}}$ is sufficiently small, we have \[ \muleb{2}(A^{\boldsymbol{\varepsilon}}_\pm) \le C \varepsilon_\pm^2 \] and, for every $i \in I^{\boldsymbol{\varepsilon}}_\pm$, \begin{equation} \label{ineqNodalVorticityDiameter} \diam(A^{\boldsymbol{\varepsilon}}_{\pm, i}) \le C \varepsilon_\pm. \end{equation} \end{lemma} \begin{proof} It suffices to repeat the arguments in the proof of Lemma~\ref{lemmaNodalAreaDiameter}. \end{proof} \begin{lemma} \label{lemmaNodalVortexSplit} There exists constants $\gamma, C, c>0$ such that, when $\abs{{\boldsymbol{\varepsilon}}}$ is small enough, if \begin{equation} \label{eqNodalSplitVortices} \int_{A^{\boldsymbol{\varepsilon}}_{\pm, i}} \abs{\nabla (v^{\boldsymbol{\varepsilon}} - q^{\boldsymbol{\varepsilon}}_\pm)}^2 > \gamma^2, \end{equation} then for every $j \in I^{\boldsymbol{\varepsilon}}_\mp$, \begin{gather} \label{ineqNodalLowerBoundMeas} \muleb{2}(A_{\pm, i}^{\boldsymbol{\varepsilon}})\ge c\varepsilon^2_\pm, \\ \label{ineqNodalLowerBoundDiam} \diam(A^{\boldsymbol{\varepsilon}}_{\pm, i})\ge c\varepsilon_\pm, \\ \label{ineqNodalLowerBoundBoundary} \dist(A^{\boldsymbol{\varepsilon}}_{\pm, i}, \partial \Omega)\ge c, \\ \label{ineqNodalSignDistance} \dist(A^{\boldsymbol{\varepsilon}}_{\pm, i}, A^{\boldsymbol{\varepsilon}}_{\mp, j})\ge c, \end{gather} while if \eqref{eqNodalSplitVortices} does not hold, then \[ \int_{A^{\boldsymbol{\varepsilon}}_{\pm, i}} \abs{\omega^{\boldsymbol{\varepsilon}}}^s \le C \mathbf{N}orm{\nabla q}_{\mathrm{L}^r(A^{\boldsymbol{\varepsilon}}_{\pm, i})}^{sp} \muleb{2}(A^{\boldsymbol{\varepsilon}}_{\pm,i})^{1+sp(\frac{1}{2}-\frac{1}{r})}, \] where $C$ only depends on $s \ge 1$. \end{lemma} \begin{proof} The proof is very similar to the one of Lemma~\ref{lemmaVortexSplit} except for \eqref{ineqNodalSignDistance} which remains to be proved. To that purpose, we consider the function \[ \eta_{\boldsymbol{\varepsilon}}=\frac{\frac{v^{\boldsymbol{\varepsilon}}_+}{\kappa_+}+\frac{v^{\boldsymbol{\varepsilon}}_-}{\kappa_+}}{\log \frac{1}{\varepsilon_+\varepsilon_-}}. \] We have \[ \eta_{\boldsymbol{\varepsilon}} \restrictedto{A^{\boldsymbol{\varepsilon}}_{\pm, i}}=\frac{\log \frac{1}{\varepsilon_+}}{\log \frac{1}{\varepsilon_+\varepsilon_-}}+O\bigl(\logEps^{-1}\bigr), \] and \[ \eta_{\boldsymbol{\varepsilon}} \restrictedto{A^{\boldsymbol{\varepsilon}}_{\mp, j}}=\frac{- \log \frac{1}{\varepsilon_-}}{\log \frac{1}{\varepsilon_+\varepsilon_-}}+O\bigl(\logEps^{-1}\bigr). \] Therefore, \[ \frac{2\pi}{\capa(A^{\boldsymbol{\varepsilon}}_+, \mathbf{R}^2\setminus A^{\boldsymbol{\varepsilon}}_-)}\ge \log \frac{1}{\varepsilon_+ \varepsilon_-}+O(1). \] Using Proposition~\ref{propositionCapacityBoundDistance} with $\Omega=\mathbf{R}^2 \setminus \overline{A^{\boldsymbol{\varepsilon}}_{\pm, i}}$ and $K=\overline{A^{\boldsymbol{\varepsilon}}_{\mp, j}}$, and applying \eqref{ineqNodalVorticityDiameter} to $A^{\boldsymbol{\varepsilon}}_{\mp, j}$ and \eqref{ineqNodalLowerBoundMeas} to $A^{\boldsymbol{\varepsilon}}_{\pm, i}$, we are led to \[ \log \frac{1}{\varepsilon_+\varepsilon_-} \le \log C \Bigl( 1+\frac{\dist(A^{\boldsymbol{\varepsilon}}_{\pm, i}, A^{\boldsymbol{\varepsilon}}_{\mp, j})}{\varepsilon_\mp}\Bigr)\Bigl( 1+\frac{\dist(A^{\boldsymbol{\varepsilon}}_{\pm, i}, A^{\boldsymbol{\varepsilon}}_{\mp, j})}{\varepsilon_\pm}\Bigr)+O(1), \] which can not hold if $\dist(A^{\boldsymbol{\varepsilon}}_{\pm, i}, A^{\boldsymbol{\varepsilon}}_{\mp, j}) \to 0$. \end{proof} The vorticity set is split into four subsets: \begin{align*} V^{\boldsymbol{\varepsilon}}_\pm&=\bigcup \Bigl\{A_{\pm, i}^{\boldsymbol{\varepsilon}} \: :\: \int_{A_{\pm, i}^{\boldsymbol{\varepsilon}}} \abs{\nabla (v^{\boldsymbol{\varepsilon}} - q^{\boldsymbol{\varepsilon}}_\pm}^2 \le \gamma^2\Bigr\}, \\ E^{\boldsymbol{\varepsilon}}_\pm&=\bigcup \Bigl\{A_{\pm, i}^{\boldsymbol{\varepsilon}} \: :\: \int_{A_{\pm, i}^{\boldsymbol{\varepsilon}}} \abs{\nabla (v^{\boldsymbol{\varepsilon}} - q^{\boldsymbol{\varepsilon}}_\pm)}^2 > \gamma^2\Bigr\}. \end{align*} By Proposition~\ref{propositionNodalEstimatesueps}, the sets $E^{\boldsymbol{\varepsilon}}_+$ and $E^{\boldsymbol{\varepsilon}}_-$ contain finitely many connected components, and by \eqref{ineqNodalLowerBoundMeas}, \eqref{ineqNodalLowerBoundDiam}, \eqref{ineqNodalLowerBoundBoundary} and \eqref{ineqNodalSignDistance}, they can thus be split as $E^{\boldsymbol{\varepsilon}}_\pm=\bigcup_{j=1}^{k^{\boldsymbol{\varepsilon}}_\pm} E^{\boldsymbol{\varepsilon}}_{\pm, j}$, where $E^{\boldsymbol{\varepsilon}}_{\pm, j}$ are nonempty open sets such that \begin{gather*} \frac{\dist(E^{\boldsymbol{\varepsilon}}_{\pm, i}, E^{\boldsymbol{\varepsilon}}_{\pm, j})}{\varepsilon_\pm} \to \infty,\\ \liminf_{{\boldsymbol{\varepsilon}} \to 0} \dist(E^{\boldsymbol{\varepsilon}}_{\pm, i}, E^{\boldsymbol{\varepsilon}}_{\mp, j}) >0,\\ \liminf_{{\boldsymbol{\varepsilon}} \to 0} \dist(E^{\boldsymbol{\varepsilon}}_{\pm, i}, \partial \Omega) >0, \\ \limsup_{{\boldsymbol{\varepsilon}} \to 0} \frac{\diam (E^{\boldsymbol{\varepsilon}}_{\pm, i})}{\varepsilon_\pm} < \infty, \end{gather*} as ${\boldsymbol{\varepsilon}} \to 0$. By definition of $E^{\boldsymbol{\varepsilon}}$ and by \eqref{ineqVortexEnergy}, $k^{\boldsymbol{\varepsilon}}_+$ and $k^{\boldsymbol{\varepsilon}}_-$ remain bounded as ${\boldsymbol{\varepsilon}} \to 0$. \subsubsection{Step 3: Small scale asymptotics} We set \begin{align*} \omega^{\boldsymbol{\varepsilon}}_{\pm, v}&=\omega^{\boldsymbol{\varepsilon}} \charfun{V_{\pm}^{\boldsymbol{\varepsilon}}}, & \omega^{\boldsymbol{\varepsilon}}_{\pm, i}&=\omega^{\boldsymbol{\varepsilon}} \charfun{E^{\boldsymbol{\varepsilon}}_{\pm, i}}, \\ \kappa^{\boldsymbol{\varepsilon}}_{\pm, i}&=\int_{\Omega} \omega^{\boldsymbol{\varepsilon}}_{\pm, i}, & x^{\boldsymbol{\varepsilon}}_{\pm, i}&=\frac{1}{\kappa^{\boldsymbol{\varepsilon}}_{\pm, i}}\displaystyle\int_{\Omega} x\omega^{\boldsymbol{\varepsilon}}_{\pm, i}(x)\, dx. \end{align*} Using the analogues of Lemma~\ref{lemmaVanishingVorticity} and Lemma~\ref{lemmaSmallScaleLocalEstimates}, one obtains the analogue of Lemma~\ref{lemmaLocalAsymptotics}. \begin{lemma} \label{lemmaNodalLocalAsymptotics} When ${\boldsymbol{\varepsilon}}$ is small, we have $k^{\boldsymbol{\varepsilon}}_+=k^{\boldsymbol{\varepsilon}}_-=1$, and \begin{multline*} \kappa^{\boldsymbol{\varepsilon}}_{\pm, 1}=\kappa_\pm +\frac{2\pi}{\log \frac{1}{\varepsilon_\pm}}\Bigl(q(x^{\boldsymbol{\varepsilon}}_\pm)-\kappa_\pm H(x^{\boldsymbol{\varepsilon}}_\pm, x^{\boldsymbol{\varepsilon}}_\pm)-\kappa_{\mp} G(x^{\boldsymbol{\varepsilon}}_{\pm}, x^{\boldsymbol{\varepsilon}}_{\mp})-\frac{\kappa_\pm}{2\pi} \log \frac{1}{\rho_{\kappa_\pm}} \Bigr) \\ +o({\logEps}^{-1}) \end{multline*} and $v^{\boldsymbol{\varepsilon}}_{\pm} \to U_{\kappa_\pm}$ in $\mathrm{W}^{1, r}_{\mathrm{loc}}(\mathbf{R}^2)$. \end{lemma} \subsubsection{Step 4: Global asymptotics} The counterpart of Proposition~\ref{propositionAsymptoticsW21} is now \begin{proposition} \label{propositionAsymptoticsW21Nodal} We have \[ \begin{split} v^{\boldsymbol{\varepsilon}}= & \ U_{\kappa_{+,1}^{\boldsymbol{\varepsilon}}}\Bigl(\frac{\cdot-x^{\boldsymbol{\varepsilon}}_{+, 1}}{\varepsilon_+}\Bigr)+\kappa^{\boldsymbol{\varepsilon}}_{+, 1}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_+ \rho_{\kappa_{+, 1}^{\boldsymbol{\varepsilon}}}}+H(x^{\boldsymbol{\varepsilon}}_{+, 1}, \cdot)\Bigr) \\ &+U_{\kappa_{-,1}^{\boldsymbol{\varepsilon}}}\Bigl(\frac{\cdot-x^{\boldsymbol{\varepsilon}}_{-, 1}}{\varepsilon_-}\Bigr)+\kappa^{\boldsymbol{\varepsilon}}_{-, 1}\Bigl(\frac{1}{2\pi} \log \frac{1}{\varepsilon_- \rho_{\kappa_{-, 1}^{\boldsymbol{\varepsilon}}}}+H(x^{\boldsymbol{\varepsilon}}_{-, 1}, \cdot)\Bigr)+o(1) \end{split} \] in $\mathrm{W}^{2, 1}_{\mathrm{loc}}(\Omega)$, in $\mathrm{W}^{1, 2}_0(\Omega)$, and in $\mathrm{L}^\infty(\Omega)$. \end{proposition} We have now all the ingredients to complete the \begin{proof}[Proof of Proposition~\ref{prop:1maiNodal}] It follows from the combination of Lemma~\ref{lemmaNodalLocalAsymptotics}, Proposition~\ref{propositionAsymptoticsW21Nodal} and the counterparts of Corollaries~\ref{corollaryAsymptotic} and \ref{corEnergy}. \end{proof} \begin{proof}[Proof of Theorem~\ref{thm:K3}] Since the solutions have the upper bound Corollary~\ref{cor:Nodalupper}, one can conclude from Proposition~\ref{prop:1maiNodal}. \end{proof} \section{Desingularized solutions of the Euler equation} \label{sect:resu} \subsection{Bounded domains} In bounded domains we shall successively consider stationary vortices, rotating vortices and stationary pairs of vortices. \subsubsection{Stationary vortices in simply-connected bounded domains} Let us first deduce Theorem~\ref{thm:resu} from Theorem~\ref{thm:K1}. \begin{proof}[Proof of Theorem~\ref{thm:resu}] Take $q=-\psi_0$, where $\psi_0$ satisfies \eqref{eqpsi0}. One checks that $\psi_0 \in W^{1+\frac{1}{s}, s}(\Omega)$ so that $u \in W^{1, r}(\Omega)$ for every $r < \infty$. Define $\mathbf{v}_\varepsilon=(\nabla u_\varepsilon)^\perp$ where $u_\varepsilon$ is given by Proposition~\ref{prop:2.1}. The conclusion then follows from Theorem~\ref{thm:K1}. \end{proof} We have constructed in Theorem~\ref{thm:resu} a family of solutions that concentrates around a global maximum of the Kirchhoff--Routh function $\mathcal{W}$; it is also possible to construct family of solutions that concentrate around a \emph{local} maximum of $\mathcal{W}$: \begin{theorem}\label{thmLocalMinimum} Let $\Omega \subset \mathbf{R}^2$ be a bounded simply-connected smooth domain and $v_n:\partial \Omega \to \mathbf{R}\in L^s(\partial \Omega)$ for some $s>1$ be such that $\int_{\partial \Omega} v_n = 0.$ Let $\kappa >0$ be given and let $\Hat{x} \in \Omega$ be a strict local minimizer of $\mathcal{W}$. For $\varepsilon>0$ there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with outward boundary flux given by $v_n$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$. Moreover, as $\varepsilon \to 0$, \[ \int_\Omega \omega_\varepsilon \to \kappa \] and $x_\varepsilon \to \Hat{x}$. \end{theorem} \begin{proof} Assume that $\Hat{x}$ is the unique minimizer of $\mathcal{W}$ in $B(\Hat{x}, \rho)$. Define $q \in C^\infty(\Bar{\Omega})$ so that $q=-\psi_0$ in $B(\Hat{x}, \rho/2)$, where $\psi_0$ satisfies \eqref{eqpsi0} and for every $x \in \Omega$, \[ \kappa q(x)-\frac{\kappa^2}{2} H(x, x) > \kappa q(x_*)-\frac{\kappa^2}{2} H(x_*, x_*). \] We now apply Theorem~\ref{thm:K1} with $q$. By construction of $q$, we have $x_\varepsilon \to \Hat{x}$. But then, one has, still by Theorem~\ref{thm:K1} \[ u_\varepsilon(x) \ge \frac{\kappa}{2\pi} \log \frac{1}{\abs{x_\varepsilon-x}}+O(1). \] Therefore, when $\varepsilon$ is small enough, $u_\varepsilon \le -\psi_0+\frac{\kappa}{2\pi}\log \frac{1}{\varepsilon}$ and $u_\varepsilon \le q_\varepsilon$ in $\Omega \setminus B(x_\varepsilon, \rho/2)$. Therefore, for such $\varepsilon$, $u_\varepsilon$ solves $-\varepsilon^2\Delta u_\varepsilon=f(u_\varepsilon+\psi_0- \frac{\kappa}{2\pi} \log \frac{1}{\varepsilon})$ in $\Omega$. One can now take $\mathbf{v}_\varepsilon= (\nabla (u_\varepsilon+\psi_0))^\perp$ and show that this is a stationary solution to the Euler equation. \end{proof} \subsubsection{Stationary vortices in multiply-connected bounded domains} If $\Omega$ is not simply connected then $\Omega = \Omega_0 \setminus \bigcup_{h=1}^m \Omega_h$, where $\Omega_0, \dotsc, \Omega_m$ are bounded simply connected domains, one can prescribe for $h \in \{1, \dotsc, m\}$, the circulations $\int_{\partial \Omega_h} \mathbf{v}\cdot \tau=\gamma_h$. In that case $\mathbf{v}_0$ is the unique harmonic field whose normal component on the boundary is $v_n$; i.e., $\mathbf{v}_0$ satisfies \[ \left\{ \begin{aligned} \nabla \cdot \mathbf{v}_0&=0, & & \text{in $\Omega$}, \\ \nabla \times \mathbf{v}_0&=0, & & \text{in $\Omega$}, \\ n \cdot \mathbf{v}_0&=v_n& & \text{on $\partial \Omega$},\\ \int_{\partial \Omega_h} \mathbf{v}_0 \cdot \tau &=\gamma_h & &\text{for $h \in \{1, \dotsc, m\}$}. \end{aligned} \right. \] If $\int_{\partial \Omega_h} v_n=0$ for every $h \in \{1, \dotsc, m\}$, $\mathbf{v}_0=(\nabla \psi_0)^\perp$ where \begin{equation} \label{eqpsi0NotConnected} \left\{ \begin{aligned} -\Delta \psi_0&=0& &\text{in $\Omega$}, \\ -\frac{\partial \psi_0}{\partial \tau}&=v_n & & \text{on $\partial \Omega$},\\ \int_{\partial \Omega_h} \frac{\partial \psi_0}{\partial n} & = \gamma_h & &\text{for $h \in \{1, \dotsc, m\}$}.\\ \end{aligned} \right. \end{equation} The Kirchhoff--Routh function associated to the vortex dynamics is then given by \[ \mathcal{W}_*(x)=\frac{\kappa^2}{2}H_*(x, x)+\kappa \psi_0(x), \] where one should recall that $\psi_0$ depends on $v_n$ and $\gamma_h$ for $h \in \{1, \dotsc, m\}$. We have \begin{theorem}\label{thm:MultiplyConnected} Let $\Omega \subset \mathbf{R}^2$ be a bounded smooth domain and $v_n:\partial \Omega \to \mathbf{R}\in L^s(\partial \Omega)$ for some $s>1$ be such that $\int_{\partial \Omega_h} v_n = 0$ for every $h \in \{0, \dotsc, m\}$. Let $\gamma_h \in \mathbf{R}$ for $h \in \{1, \dotsc, m\}$ and let $\kappa >0$ be given. For $\varepsilon>0$ there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with outward boundary flux given by $v_n$ and circulations given by $\gamma_h$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$. Moreover, as $\varepsilon \to 0$, \[ \int_\Omega \omega_\varepsilon \to \kappa, \] and \[ \mathcal{W}_*(x^\varepsilon) \to \sup_{x \in \Omega} \mathcal{W}_*(x). \] \end{theorem} \begin{proof} The proof is almost identical to the one of Theorem~\ref{thm:resu}, it relies on Theorem~\ref{thm:K1m} instead of Theorem~\ref{thm:K1}. \end{proof} \begin{remark} One could similarly prove a counterpart of Theorem~\ref{thmLocalMinimum} for multiply connected domains. \end{remark} \subsubsection{Rotating vortices in a discs} If $\Omega$ is invariant under rotation, one can consider the Euler equation in a reference frame rotating with angular velocity $\alpha$: \[ \left\{ \begin{aligned} \nabla \cdot \mathbf{v} &= 0, \\ \mathbf{v}_t + \mathbf{v}\cdot \nabla \mathbf{v}&=-\nabla p+2\alpha \mathbf{v}^\perp-\alpha^2x. \end{aligned} \right. \] The vorticity of $\mathbf{v}$ with respect to an inertial frame is $\nabla \times \mathbf{v}+2\alpha$. The movement of singular vortices is governed by Kirchhoff's law \eqref{equationKirchhoff}, where $\mathcal{W}$ is replaced by $\mathcal{W}_\alpha(x)=\mathcal{W}(x)+\sum_i \alpha \frac{\abs{x}^2}{2}$. The stream-function method to construct stationary solutions in a rotating reference frame can be adapted to this situation. If $-\Delta \psi=f(\psi)-2\alpha$, setting $\mathbf{v}=(\nabla \psi)^\perp$ and $p= F(\psi)-\frac{\alpha^2}{2}\abs{x}^2-\frac{1}{2}\abs{\nabla \psi}^2$ yields a solution\footnote{With the same velocity field, choosing as pressure $p=F(\psi)-2\alpha \psi-\frac{1}{2}\abs{\nabla \psi}^2$ would of course give a solution to the Euler equation in a Galilean frame. }. In particular, the solution is irrotational outside on the set where $\psi=0$. \begin{theorem} \label{thmRotating} Let $\rho > 0$, $\kappa >0$ and $\alpha > 0$. If $\kappa < 2\pi \alpha \rho^2$ For $\varepsilon>0$ there exist smooth rotating solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $B(0, \rho)$ with angular velocity $\alpha$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon)$ is contained in a disc of radius $O(\varepsilon)$ around a point rotating on the circle of radius $\sqrt{\rho^2-\frac{\kappa}{2\pi\alpha}}$. Moreover, as $\varepsilon \to 0$, \[ \int_\Omega \omega_\varepsilon \to \kappa. \] \end{theorem} \begin{proof} Take \[ q(x)=-\alpha \frac{\abs{x}^2}{2}. \] and apply Theorem~\ref{thm:K1}. One checks that \[ \mathbf{v}_\varepsilon(x, t)=(\nabla u_\varepsilon)^\perp (R(\alpha t) x), \] where $R(\alpha t)$ denote the rotation of $\alpha t$, satisfies Euler equation. Since \[ \mathcal{W}_\alpha(x)=\frac{\kappa^2}{4\pi}\log \frac{\rho^2-\abs{x}^2}{\rho}+\frac{\kappa \alpha}{2} \abs{x}^2, \] attains its maximum on the circle of radius $\sqrt{\rho^2-\frac{\kappa}{2\pi\alpha}}$, one has the desired concentration result. \end{proof} \begin{remark} When $\kappa > 2\pi \alpha \rho^2$, the minimizer concentrates around $0$; one recovers thus stationary solutions as in Theorem~\ref{thm:resu}. \end{remark} \subsubsection{Stationary pairs of vortices in bounded domains} \begin{theorem} Let $\Omega \subset \mathbf{R}^2$ be a bounded simply-connected smooth domain and $v_n:\partial \Omega \to \mathbf{R}\in L^s(\partial \Omega)$ for some $s>2$ be such that $\int_C v_n = 0$ over each connected component $C$ of $\partial \Omega$. Let $\kappa_+ >0$ and $\kappa_- < 0$ be given. For $\varepsilon>0$ there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with outward boundary flux given by $v_n$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon^\pm) \subset B(x_\varepsilon^\pm, C\varepsilon)$ for some $x^{\pm}_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$. Moreover, as $\varepsilon \to 0$, \[ \int_\Omega \omega_\varepsilon^\pm \to \kappa^\pm \] and \[ \mathcal{W}(x_\varepsilon^+, x_\varepsilon^-) \to \sup_{x^+, x^- \in \Omega} \mathcal{W}(x^+, x^-). \] \end{theorem} \begin{proof} This follows from Theorem~\ref{thm:K3} in the same lines as Theorem~\ref{thm:resu}. \end{proof} \begin{remark} There is also a counterpart of Theorem~\ref{thmLocalMinimum} for vortex pairs, concerning the existence of solutions near local maxima of the Kirchoff--Routh function and a counterpart of Theorem~\ref{thm:MultiplyConnected} for domains which are not simply connected. \end{remark} \begin{remark} One can also address the question of rotating vortex pairs. Combining the ingredients of the proof of Theorem~\ref{thmRotating}, one can prove the existence of rotating vortex pairs of strength $\kappa_+ > 0$ and $\kappa_- > 0$ that concentrates around two antipodal rotating points at distance $\rho _+$ and $\rho _-$ which maximize the function \[ \frac{\alpha \kappa_+}{2} \rho _+^2 +\frac{\alpha \kappa_-}{2}\rho _-^2+\frac{\kappa_+^2}{4\pi}\log (1-\rho _+^{2})+\frac{\kappa_-^2}{4\pi}\log (1-\rho _-^{2})+\frac{\kappa_+\kappa_-}{2\pi} \log \frac{1+\rho _+\rho _-}{\rho _++\rho _-}. \] In contrast with Theorem~\ref{thmRotating}, the pair of vortices obtained is always a nontrivial pair of rotating vortices for any $\alpha \ne 0$, $\kappa_+ > 0$ and $\kappa_- < 0$. \end{remark} \subsection{Unbounded domains} We now consider the application of the results of Section~\ref{sectUnbounded} to the desingularization of vortices in unbounded domains. \subsubsection{Translating vortex pair in the plane} We first consider the construction of a pair of vortices in $\mathbf{R}^2$. First recall that pair of vortices translating at velocity $\mathbf{W}$ in a flow with vanishing velocity at infinity is, up to a Galilean change of variables a pair of stationary vortices in a flow with velocity at infinity $-\mathbf{W}$. The stream-function of the corresponding irrotational flow is $\psi_0(x)=\mathbf{W}^\perp \cdot x$. Therefore, the positions of two vortices of opposite intensities $\kappa$ and $-\kappa$ in the moving reference frame is a critical point of the Kirchhoff--Routh $\mathcal{W}$ defined by \[ \frac{-\kappa^2}{2\pi} \log \frac{1}{\abs{x-y}} + \mathbf{W}^\perp \cdot x. \] \begin{theorem} Let $W \ge 0$ and $\kappa \ge 0$, for every $\varepsilon > 0$ there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\mathbf{R}^2$ symmetric with respect to the $x_2$ axis and such that $\lim_{x_1 \to \infty} \mathbf{v}_\varepsilon(x)=(0, W)$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \cap \mathbf{R}^2_+ \subset B(\Bar{x}, C\varepsilon)$, where $\Bar{x}= (\frac{\kappa}{4\pi W}, 0)$. \end{theorem} \begin{proof} The problem can be reduced to finding a solution in $\mathbf{R}^2_+$ with vanishing flux on the boundary. The corresponding Kirchhoff--Routh function is \[ \mathcal{W}(x)=\frac{\kappa^2}{4\pi} (\log 2x_1)-\kappa Wx_1 \] This follows from the existence result of Theorem~\ref{thmYang}, the asymptotics of Proposition~\ref{propUnboundedUpper} and Proposition~\ref{prop:1maiUnbounded}. \end{proof} \subsubsection{Stationary vortex in the half-plane with non-vanishing flux} The method just used extends to non-vanishing flux boundary conditions: \begin{theorem} Let $v_n \in L^1 (\mathbf{R})\cap L^s_{\mathrm{loc}}(\mathbf{R})$ for $s > 1$. If $\int_{-\infty}^0 v_n=-\int_0^\infty v_n>0$. For every $W > 0$ and $\kappa > 0$, if $\kappa/W$ is small enough and if $\varepsilonilon > 0$ sufficiently small there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with outward boundary flux given by $v_n$ and $\lim_{x_1 \to \infty} \mathbf{v}_\varepsilon(x)=(0, W)$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$, and $\int_{\mathbf{R}^2_+} \omega_\varepsilon \to \kappa$. \end{theorem} \begin{proof} Define $\psi_0$ by \[ \left\{ \begin{aligned} -\Delta \psi_0 & = 0 & & \text{in $\mathbf{R}^2_+$}, \\ \partial_2 \psi_0 &=v_n& & \text{on $\partial \mathbf{R}^2_+$}, \\ \psi_0(0, x_2) &\to 0 & &\text{as $\abs{x_2} \to \infty$},\\ \frac{\psi_0(x)}{x_1} &\to -W & & \text{as $x_1 \to \infty$}. \end{aligned} \right. \] One checks that by our assumptions, \[ \psi_0(0) > 0. \] In order to apply Theorem~\ref{theoremExistenceLevels}, we need to find $\Hat{x} \in \Omega$ such that \begin{equation} \label{eqTrenchStrict} \kappa \psi_0 (\Hat{x})+\frac{\kappa^2}{4\pi} \log 2 \Hat{x}_1 > \frac{\kappa^2}{4\pi} \Bigl(\log \frac{\kappa}{2\pi W}-1\Bigr). \end{equation} One takes $\Hat{x}=(\frac{\kappa}{4\pi W}, 0)$. If $\kappa/W$ is small enough, one has \[ \kappa \psi_0 (\Hat{x}) > 0, \] and one checks that \[ \kappa \psi_0 (\Hat{x})+\frac{\kappa^2}{4 \pi} \log 2 \Hat{x}_1 > \frac{\kappa^2}{4\pi} \log \frac{\kappa}{2\pi W} > \frac{\kappa^2}{4\pi} \Bigl(\log \frac{\kappa}{2\pi W}-1\Bigr). \] The conclusion follows then from Theorem~\ref{theoremExistenceLevels}. \end{proof} \subsubsection{Stationary vortex in a perturbed half-plane} Instead of perturbing the boundary condition on the half-plane, one can instead perturb the geometry. The first situation is the situation in which one has for example enlarged a little bit the half-plane around $0$: \begin{theorem} Assume that $\Omega$ is a simply-connected perturbation of $\mathbf{R}^2_+$ in the sense of \eqref{condPerturbation}. Let $\Bar{x} \in \partial \Omega$ be such that $x_1 > \Bar{x}_1$ for every $x \in \Omega$, $\partial \Omega$ is of class $C^2$ in a neighborhood of $\Bar{x}$, then for every $W > 0$, if $\kappa > 0$ is sufficiently small and if $\varepsilonilon > 0$ sufficiently small there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\Omega$ with vanishing boundary flux and $\lim_{x_1 \to \infty} \mathbf{v}_\varepsilon(x)=(0, W)$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega$ and $C>0$ not depending on $\varepsilon$ and $\int_{\Omega} \omega_\varepsilon \to \kappa$. \end{theorem} \begin{proof} We are going to obtain the solutions by applying Theorem~\ref{theoremExistenceLevels} with $q=-\psi_0$. Let $\mathbf{v}_0$ be the irrotationnal stationary solution to the Euler equation with vanishing flux on $\partial \Omega$ and $\lim_{x_1 \to \infty} \mathbf{v}_0(x)=(0, W)$, i.e.\ $\mathbf{v}_0=\nabla \psi_0^\perp$ with \begin{equation} \label{eqPsi0} \left\{ \begin{aligned} -\Delta \psi_0 &= 0, & &\text{in $\Omega$}, \\ \psi_0 &= 0 & &\text{on $\partial \Omega$}, \\ \tfrac{\psi_0(x)}{x_1} &\to -W &&\text{as $x \to \infty$}. \end{aligned} \right. \end{equation} In order to apply Theorem~\ref{theoremExistenceLevels}, we need to find $\Hat{x} \in \Omega$ such that the condition \eqref{eqTrenchStrict} holds. First, by the strong maximum principle, one has $\partial_1 \psi(\Bar{x})>-W$, so that there exists $\gamma \in (0, W)$ such that in a neighborhood of $\Bar{x}$, \[ \psi_0(x) > -\gamma (x_1-\Bar{x_1}). \] If we consider the point $\Hat{x}=(\Bar{x}_1+\frac{\kappa}{4\pi W}, \Bar{x}_2)$, one has \[ \kappa \psi_0(\Hat{x}) > -\gamma \frac{\kappa^2}{4\pi W}. \] On the other hand, if $K$ denotes the curvature of $\partial \Omega$ at $\Bar{x}$, one has by Proposition~\ref{propositionAsymptotH}, \[ \frac{\kappa^2}{2} H (\Hat{x}, \Hat{x})=-\frac{\kappa^2}{4\pi} \log \frac{\kappa}{2\pi W}+O(\kappa^3). \] Therefore, if $\kappa$ is small enough, one has \eqref{eqTrenchStrict}, and one can then apply Theorem~\ref{theoremExistenceLevels} to obtain the conclusion. \end{proof} \subsubsection{Translating vortex pair near a translating axisymmetric obstacle} We can also treat a situation in some sense opposite to the situation of the previous section. We obtain the desingularization of vortices on a set which is obtained by removing some part of the half-plane. By a Galilean change of variables and by extension by symmetry of the flow, this corresponds also physically to a rigid body in translation together with a pair of vortices. A similar problem was studied through the vorticity method by B.\thinspace Turkington \cite{Turkington1983} \begin{theorem} Let $D \subset \mathbf{R}^2$ be a compact simply-connected set with non-empty interior and symmetric with respect to the $x_1$ variable. Then for every $\kappa> 0$ and $W > 0$, if $\varepsilon > 0$ is sufficiently small there exist smooth stationary solutions $\mathbf{v}_\varepsilon$ of the Euler equation in $\mathbf{R}^2 \setminus D$ symmetric with respect to the $x_2$ axis, with vanishing boundary flux and such that $\lim_{x_1 \to \infty} \mathbf{v}_\varepsilon(x)=(0, W)$, corresponding to vorticities $\omega_\varepsilon$, such that ${\rm supp}(\omega_\varepsilon) \cap \mathbf{R}^2_+ \subset B(x_\varepsilon, C\varepsilon)$ for some $x_\varepsilon \in \Omega \cap \mathbf{R}^2_+$ and $C>0$ not depending on $\varepsilon$ and $\int_{\mathbf{R}^2_+ \setminus D} \omega_\varepsilon \to \kappa$. \end{theorem} \begin{proof} Set $\Omega=\mathbf{R}^2_+ \setminus D$. We shall consider the case $W > 0$ and $\kappa > 0$, and we shall assume that $B(0, \rho) \subset D \subset B(0, R)$. We use again Theorem~\ref{theoremExistenceLevels} and therefore we shall prove that \eqref{eqTrenchStrict} holds for some $\Hat{x} \in \mathbf{R}^2$ where $\psi_0$ solves \eqref{eqPsi0}. We shall take $\Hat{x}^\lambda=(\frac{\kappa}{4\pi W}, \lambda \frac{\kappa}{4\pi W})$ where $\lambda \in \mathbf{R}$. By the maximum principle on $\Omega$, one has for $x \in \Omega$, \[ \psi_0(x)>-Wx_1+W \frac{x_1 \rho^2}{\abs{x}^2}. \] Hence, we have \[ \kappa \psi_0(\Hat{x}^\lambda) \ge - \frac{\kappa^2 }{4\pi} + \frac{4\pi W^2}{1+\lambda^2}, \] with $c' > 0$. We also use the formula of the Green function $\Tilde{G}$ of $\mathbf{R}^2_+ \setminus B(0, R)$ used by B.\thinspace Turkington \cite[p.\thinspace 1047]{Turkington1983} \[ \Tilde{G}(x, y)=\frac{1}{4\pi} \log \frac{1+\dfrac{4x_1y_1}{\abs{x-y}^2}}{1+\dfrac{4R^2 x_1y_1 } {(x_1y_1+x_2 y_2-R^2)^2+(x_2y_1-x_1y_2)^2}} \] Since $\Tilde{G}(x, y) \le G(x, y)$, one has therefore \[ H(x,x)\ge \frac{1}{2\pi} \log 2 x_1 - \frac{1}{2\pi} \log \Bigl(1+ \frac{4R^2x_1^2}{(\abs{x}^2-R^2)^2}\Bigr), \] whence \[ \frac{\kappa^2}{2} H(\Hat{x}^\lambda, \Hat{x}^\lambda) \ge \frac{\kappa^2}{4\pi} \log \frac{\kappa}{2\pi W}+O(\lambda^{-4}). \] One checks thus that for $\lambda$ sufficiently large, \[ \kappa \psi_0 (\Hat{x}^\lambda)+\frac{\kappa^2}{4\pi} \log 2 \Hat{x}^\lambda_1 > \frac{\kappa^2}{4\pi} \Bigl(\log \frac{\kappa}{2\pi W}-1\Bigr). \] and the conclusion thus follows from Theorem~\ref{theoremExistenceLevels}. \end{proof} \appendix \section{Capacity estimates} Let $\Omega \subset \mathbf{R}^2$ be open. The electrostatic capacity of a compact set $K \subset \Omega$ is \[ \capa(K, \Omega)=\inf \Bigl\{ \int_{\Omega} \abs{\nabla \varphi}^2 \: :\: \varphi \in C^\infty_c(\Omega)\text{ and } \varphi = 1 \text{ on $K$} \Bigr\}. \] Let us first recall the following standard capacity estimate which was discovered by H.\thinspace Poincar\'e \cite[p.\thinspace 17--22]{Poincare} and whose first complete proof was given by G.\thinspace Szeg\H o~\cite{Szego1930}. \begin{proposition} \label{propositionCapacityArea} Let $\Omega \subset \mathbf{R}^2$ have finite measure. For every $K \subset \Omega$, \[ \frac{4\pi}{\capa(K, \Omega)} \le \log \frac{\muleb{2}(\Omega)}{\muleb{2}(K)}. \] \end{proposition} \begin{proof} One shows by the P\'olya--Szeg\H o inequality (for a modern treatment, see e.g. \cite{Kawohl1985}, \cite{LiebLoss2001} or \cite{BrockSolynin2000}) that \[ \capa(K, \Omega) \ge \capa (\overline{B(0, \rho)}, B(0,R) \] if $\rho$ and $R$ are chosen so that $\muleb{2}(B(0,\rho))=\muleb{2}(K)$ and $\muleb{2}(B(0,R))=\muleb{2}(\Omega)$. One can then compute explicitly the right-hand-side to reach the conclusion. \end{proof} When $\mathcal{L}^2(\Omega)=+\infty$, Proposition~\ref{propositionCapacityArea} loses its interest. However, one still has: \begin{proposition} \label{propositionCapacityLocalArea} Let $K \subset \mathbf{R}^2_+$, we have \[ \frac{4\pi}{\capa(K, \Omega)} \le \log \frac{8\pi \sup_{x \in K} \abs{x}^2}{\muleb{2}(K)}. \] \end{proposition} \begin{proof} Set $a=\sup_{x \in K} \abs{x}^2=1$ and define the conformal transformation \[ \psi(z)= \frac{z-a}{z+a}. \] We have $\psi(\mathbf{R}^2_+)=B(0, 1)$. By the previous Lemma, we have \[ \frac{4\pi}{\capa(\psi(K), B(0, 1))} \le \log \frac{2\pi}{\muleb{2}(\psi(K))}. \] The conclusion comes from \[ \muleb{2}(\psi(K))=\int_K \abs{\psi'}^2\ge \frac{\muleb{2}(K)}{4a^2}. \qedhere \] \end{proof} Another question about estimates of the capacity is whether one can estimate the diameter of $K$, instead of its area, by its capacity. This is possible if one assumes moreover that $K$ is connected. L.\thinspace E.\thinspace Fraenkel \cite{Fraenkel1981} has obtained in this direction the inequality \[ \frac{2\pi}{\capa(K, \Omega)}\le \log C \frac{\diam K}{\sqrt{\mathcal{L}^2(\Omega)}}. \] We improve this estimate so that it holds on unbounded sets and it takes into account the distance from the boundary. \begin{proposition} \label{propositionBoundDiameter} Let $\Omega$ be such that $\mathbf{R}^2\setminus \Omega$ is connected and contains a ball of radius $\rho$ and $K \subset \Omega$ be compact. Then, \[ \frac{2\pi}{\capa(K, \Omega)}\le \log 16\Bigl(1+ \frac{\dist(K, \partial \Omega)}{2\rho}\Bigr)\Bigl(1+ \frac{2\dist(K, \partial \Omega)}{\diam(K)}\Bigr). \] \end{proposition} \begin{proof} Since $K$ is compact, up to translations and rotations we can assume that $0 \in K$ and $\dist(K, \partial \Omega)=\dist(0, \partial \Omega)$. Let $A^*$ and $\Omega^*$ be the sets obtained by circular symmetrization around $0$ introduced by V.\thinspace Wolontis \cite[III.1]{Wolontis1952} (see also J.\thinspace Sarvas \cite{Sarvas1972}). We have \begin{gather*} \capa(A^*, \Omega^*) \le \capa(A, \Omega),\\ [-\diam(A)/2, 0] \subset A^*, \end{gather*} and, since $\mathbf{R}^2 \setminus \Omega^*$ contains a ball of radius $\rho$, \[ [\dist(A, \partial \Omega), \dist(A, \partial \Omega)+2\rho] \subset \mathbf{R}^2\setminus \Omega^*. \] We have thus \[ \capa(A, \Omega) \ge \capa([-\diam(A)/2, 0], \mathbf{R}^2 \setminus [\dist(A, \partial \Omega), \dist(A, \partial \Omega)+2\rho]. \] Now, identifying $\mathbf{R}^2$ with $\mathbf{C}$, there exists a M\"obius transformations that brings the points $-\diam(A)/2$, $0$, $\dist(A, \partial \Omega)$ and $\dist(A, \partial \Omega)+2\rho$ to $-1$, $0$, $s$ and $\infty$ with \[ s=\frac{(2\rho+\dist(K, \partial \Omega)+\frac{1}{2}\diam(K))\dist(K, \partial \Omega)}{\rho \diam(K)}, \] from which we deduce that \[ \capa(A, \Omega) \ge \capa ([-1, 0], \mathbf{C} \setminus [s, +\infty[). \] The conclusion comes from the next lemma. \end{proof} As in L.\thinspace E.\thinspace Fraenkel's proof \cite{Fraenkel1981}, we use \begin{lemma} Let $s>0$. We have \[ \frac{2\pi}{\capa([-1, 0], \mathbf{R}^2 \setminus [s, \infty))}\le \log 16(1+s). \] \end{lemma} \begin{proof} We have the formula \cite[5.60 (1)]{Vuorinen1988} \[ \capa([-1, 0], \mathbf{R}^2 \setminus [s, \infty))=2 \frac {\mathcal{K}(\sqrt{1/(1+s)})}{\mathcal{K}(\sqrt{s/(1+s)})}, \] where $\mathcal{K}$ is the complete elliptic integral of the first kind \[ \mathcal{K}(\gamma)=\int_0^{\frac{\pi}{2}} \frac{1}{\sqrt{1-\gamma^2 (\sin \theta)^2}}\,d\theta. \] Since (see \cite{AndersonVamanamurthyVuorinen1997}) \[ \frac{\mathcal{K}(\gamma)}{\mathcal{K}(\sqrt{1-\gamma^2})} > \frac{\pi}{2\log\Bigl(2 \dfrac{1+\sqrt{1-\gamma^2}}{\gamma}\Bigr) } \] We have then \[ \capa([-1, 0], \mathbf{R}^2 \setminus [s, \infty))> \frac{\pi}{\log 2(\sqrt{s}+\sqrt{1+s})}> \frac{\pi}{\log 4\sqrt{1+s}}=\frac{2\pi}{\log 16(1+s)}.\qedhere \] \end{proof} We also have an estimate in the case where the inner radius $\rho$ of $\mathbf{R}^2 \setminus \Omega$ is replaced by the connectedness and the measure of $\mathbf{R}^2 \setminus \Omega$. \begin{proposition} \label{propositionCapacityBoundDistance} Let $\Omega$ be such that $\mathbf{R}^2\setminus \Omega$ is connected and has finite measure and $K \subset \Omega$ be compact. We have \[ \frac{2\pi}{\capa(K, \Omega)}\le \log 16\Bigl(1+ \frac{\pi \dist(K, \partial \Omega)\diam(\mathbf{R}^2 \setminus \Omega)}{2\muleb{2}(\mathbf{R}^2 \setminus \Omega)}\Bigr)\Bigl(1+ \frac{2\dist(K, \partial \Omega)}{\diam(K)}\Bigr) \] \end{proposition} \begin{proof} One begins as in the proof of the previous proposition. We have then that \[ [\dist(K, \partial \Omega), \dist(K, \partial \Omega)+\frac{2\muleb{2}(\mathbf{R}^2 \setminus \Omega)}{\pi \diam (\mathbf{R}^2 \setminus \Omega)}] \subset \mathbf{R}^2\setminus \Omega^*. \] And one continues as previously. \end{proof} \section{Green function asymptotics} This appendix is devoted to the study of the asymptotic expansion of Green's function near a point of the boundary: \begin{proposition} \label{propositionAsymptotH} Let $\Omega \subset \mathbf{R}^2$ and assume that $\Omega$ is of class $C^2$ around $0$ and that the tangent to $\partial \Omega$ is perpendicular to $x_1$. One has then as $\varepsilon \to 0$, \[ G (\varepsilon x, \varepsilon y)=\frac{1}{4\pi} \log \frac{\abs{x-y}^2+4x_1y_1}{\abs{x-y}^2}- \varepsilon \frac{K}{2\pi} \frac{x_1 \abs{y}^2+y_1 \abs{x}^2}{\abs{x-y}^2+4x_1y_1}+o(\varepsilon). \] uniformly on compact subsets of $\mathbf{R}^2_+ \times \mathbf{R}^2_+$, where $K$ is the curvature of $\partial \Omega$ at $0$. In particular, \[ H (\varepsilon x, \varepsilon x)=\frac{1}{2\pi} \log 2\varepsilon x_1-\varepsilon \frac{K\abs{x}^2}{4\pi x_1} +o(\varepsilon). \] \end{proposition} \begin{proof} Define \[ w_{\varepsilon, y}(x) = \frac{1}{\varepsilon} \Bigl(\frac{1}{4\pi} \log \frac{\abs{x-y}^2+4x_1y_1}{\abs{x-y}^2}-G(\varepsilon x, \varepsilon y)\Bigr). \] This function is defined for every $x, y \in \Omega^\varepsilon=\{ z \in \mathbf{R}^2 \: :\: \varepsilon z \in \Omega \}$. Moreover, $w_{\varepsilon, y}$ satisfies \[ \left\{\begin{aligned} -\Delta w_{\varepsilon, y} &= 0 &&\text{in $\Omega$},\\ w_{\varepsilon, y} &= \frac{1}{4\pi\varepsilon} \log \frac{\abs{x-y}^2+4x_1y_1}{\abs{x-y}^2} && \text{on $\partial \Omega$}. \end{aligned}\right. \] By construction, $w_{\varepsilon, y}$ is a bounded function. We first claim that $w_{\varepsilon, y}$ is bounded uniformly in $L^\infty(\Omega_\varepsilon)$ as $\varepsilon \to 0$ and $y$ stays in a compact subset of $\mathbf{R}^2$. Indeed, since $\Omega$ is $C^2$ around $0$, there exists $r > 0$ such that if $z \in \partial \Omega \cap B(0, r)$, $\abs{z_1} \le C \abs{z_2}^2$. One has thus, for $x \in \partial \Omega_\varepsilon \cap B(0, \frac{r}{\varepsilon})$, $\abs{x_1} \le C \varepsilon \abs{x_2}^2$, and therefore, when $\varepsilon$ is small enough \[ \abs{w_{\varepsilon, y}(x)} \le \frac{C'}{\varepsilon} \frac{\varepsilon y_1 \abs{x_2}^2}{\abs{x-y}^2} \] On the other hand, if $x \in \partial \Omega_\varepsilon \setminus B(0, \frac{r}{\varepsilon})$, then if $\varepsilon$ is small enough, $x \in \partial \Omega_\varepsilon \cap B(0, \frac{r}{2\varepsilon})$ so that $x_1 \le 2 \abs{x-y}$ and $\abs{x-y} \ge \frac{r}{2\varepsilon}$, and \[ \abs{w_{\varepsilon, y}}(x) \le C \varepsilon. \] Since, $\Omega$ is of class $C^2$, there exists a function $f : I \subset \mathbf{R} \to \mathbf{R}$ such that $\partial \Omega \cap B(0, r')= \{(f(t), t) \in \Omega \: :\: t \in I \}$. One has thus, using the Taylor expansion of $f$ and recalling that $f(0)=0$ and $f'(0)=0$, \[ w_{\varepsilon, y}(x)=\frac{1}{4\pi \varepsilon} \log \Bigl( 1+ \frac{4 y_1 \varepsilon^{-1}f(\varepsilon x_2)}{( \varepsilon^{-1}f(\varepsilon x_2)-y_1)^2+(x_2-y_2)^2}\Bigr). \] Therefore, by classical regularity estimates, $w_{\varepsilon, y}$ converges uniformly with respect to compact subsets of $\mathbf{R}^2_+ \times \mathbf{R}^2_+$ to the unique bounded solution of \[ \left\{ \begin{aligned} -\Delta w_y &= 0 &&\text{in $\mathbf{R}^2_+$}, \\ w_y&=\frac{f''(0)}{2\pi}\frac{y_1 x_2^2}{y_1^2+(x_2-y_2)^2}&&\text{on $\partial \mathbf{R}^2_+$}. \end{aligned} \right. \] One can check that \[ w_y(x)=\frac{f''(0)}{2\pi}\frac{y_1 (x_1^2+x_2^2)+x_1(y_1^2+y_2^2)}{(x_1+y_1)^2+(x_2-y_2)^2}. \] The announced expressions for $G(\varepsilon x, \varepsilon y)$ and $H(\varepsilon x, \varepsilon x)$ follow. \end{proof} \begin{bibdiv} \begin{biblist} \bib{AmbrosettiStruwe1989}{article}{ author={Ambrosetti, A.}, author={Struwe, M.}, title={Existence of steady vortex rings in an ideal fluid}, date={1989}, ISSN={0003-9527}, journal={Arch. Rational Mech. Anal.}, volume={108}, number={2}, pages={97\ndash 109}, } \bib{AmbrosettiMancini1981}{incollection}{ author={Ambrosetti, Antonio}, author={Mancini, Giovanni}, title={On some free boundary problems}, date={1981}, booktitle={Recent contributions to nonlinear partial differential equations}, series={Res. Notes in Math.}, volume={50}, publisher={Pitman}, address={Boston, Mass.}, pages={24\ndash 36}, } \bib{AndersonVamanamurthyVuorinen1997}{book}{ author={Anderson, Glen~D.}, author={Vamanamurthy, Mavina~K.}, author={Vuorinen, Matti~K.}, title={Conformal invariants, inequalities, and quasiconformal maps}, series={Canadian Mathematical Society Series of Monographs and Advanced Texts}, publisher={John Wiley \& Sons Inc.}, address={New York}, date={1997}, ISBN={0-471-59486-5}, } \bib{ArnoldKhesin}{book}{ author={Arnold, Vladimir~I.}, author={Khesin, Boris~A.}, title={Topological methods in hydrodynamics}, series={Applied Mathematical Sciences}, publisher={Springer-Verlag}, address={New York}, date={1998}, volume={125}, ISBN={0-387-94947-X}, } \bib{BartschPistoiaWeth}{unpublished}{ author={Bartsch, Thomas}, author={Pistoia, Angela}, author={Weth, Tobias}, title={$n$-vortex equilibria for ideal fluids in bounded planar domains and new nodal solutions of the $\sinh$-poisson and the Lane-Emden-Fowler equations}, note={preprint}, } \bib{BartschWeth2003}{article}{ author={Bartsch, Thomas}, author={Weth, Tobias}, title={A note on additional properties of sign changing solutions to superlinear elliptic equations}, date={2003}, ISSN={1230-3429}, journal={Topol. Methods Nonlinear Anal.}, volume={22}, number={1}, pages={1\ndash 14}, } \bib{BartschWeth2005}{article}{ author={Bartsch, Thomas}, author={Weth, Tobias}, title={Three nodal solutions of singularly perturbed elliptic equations on domains without topology}, date={2005}, ISSN={0294-1449}, journal={Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire}, volume={22}, number={3}, pages={259\ndash 281}, } \bib{BartschWethWillem}{article}{ author={Bartsch, Thomas}, author={Weth, Tobias}, author={Willem, Michel}, title={Partial symmetry of least energy nodal solutions to some variational problems}, date={2005}, ISSN={0021-7670}, journal={J. Anal. Math.}, volume={96}, pages={1\ndash 18}, } \bib{BergerFraenkel1974}{article}{ author={Berger, M.~S.}, author={Fraenkel, L.~E.}, title={A global theory of steady vortex rings in an ideal fluid}, date={1974}, ISSN={0001-5962}, journal={Acta Math.}, volume={132}, pages={13\ndash 51}, } \bib{BergerFraenkel1980}{article}{ author={Berger, M.~S.}, author={Fraenkel, L.~E.}, title={Nonlinear desingularization in certain free-boundary problems}, date={1980}, ISSN={0010-3616}, journal={Comm. Math. Phys.}, volume={77}, number={2}, pages={149\ndash 172}, } \bib{BBH}{book}{ author={Bethuel, Fabrice}, author={Brezis, Ha{\"{\i}}m}, author={H{\'e}lein, Fr{\'e}d{\'e}ric}, title={Ginzburg-Landau vortices}, series={Progress in Nonlinear Differential Equations and their Applications, 13}, publisher={Birkh\"auser Boston Inc.}, place={Boston, MA}, date={1994}, pages={xxviii+159}, isbn={0-8176-3723-0}, } \bib{BrockSolynin2000}{article}{ author={Brock, Friedemann}, author={Solynin, Alexander~Yu.}, title={An approach to symmetrization via polarization}, date={2000}, ISSN={0002-9947}, journal={Trans. Amer. Math. Soc.}, volume={352}, number={4}, pages={1759\ndash 1796}, } \bib{Burton1987}{article}{ author={Burton, G.~R.}, title={Vortex rings in a cylinder and rearrangements}, date={1987}, ISSN={0022-0396}, journal={J. Differential Equations}, volume={70}, number={3}, pages={333\ndash 348}, } \bib{Burton1988}{article}{ author={Burton, G.~R.}, title={Steady symmetric vortex pairs and rearrangements}, date={1988}, ISSN={0308-2105}, journal={Proc. Roy. Soc. Edinburgh Sect. A}, volume={108}, number={3\ndash 4}, pages={269\ndash 290}, } \bib{CaffarelliFriedman1980}{article}{ author={Caffarelli, Luis~A.}, author={Friedman, Avner}, title={Asymptotic estimates for the plasma problem}, date={1980}, ISSN={0012-7094}, journal={Duke Math. J.}, volume={47}, number={3}, pages={705\ndash 742}, } \bib{CastroCossioNeuberger}{article}{ author={Castro, Alfonso}, author={Cossio, Jorge}, author={Neuberger, John~M.}, title={A sign-changing solution for a superlinear {D}irichlet problem}, date={1997}, ISSN={0035-7596}, journal={Rocky Mountain J. Math.}, volume={27}, number={4}, pages={1041\ndash 1053}, } \bib{CotiZelatiRabinowitz1992}{article}{ author={Coti~Zelati, Vittorio}, author={Rabinowitz, Paul~H.}, title={Homoclinic type solutions for a semilinear elliptic {PDE} on {${\bf R}\sp n$}}, date={1992}, ISSN={0010-3640}, journal={Comm. Pure Appl. Math.}, volume={45}, number={10}, pages={1217\ndash 1269}, } \bib{delPinoKowalczykMusso2005}{article}{ author={del Pino, Manuel}, author={Kowalczyk, Michal}, author={Musso, Monica}, title={Singular limits in {L}iouville-type equations}, date={2005}, ISSN={0944-2669}, journal={Calc. Var. Partial Differential Equations}, volume={24}, number={1}, pages={47\ndash 81}, } \bib{EspositoMussoPistoia2006}{article}{ author={Esposito, Pierpaolo}, author={Musso, Monica}, author={Pistoia, Angela}, title={Concentrating solutions for a planar elliptic problem involving nonlinearities with large exponent}, date={2006}, ISSN={0022-0396}, journal={J. Differential Equations}, volume={227}, number={1}, pages={29\ndash 68}, } \bib{EspositoMussoPistoia2007}{article}{ author={Esposito, Pierpaolo}, author={Musso, Monica}, author={Pistoia, Angela}, title={On the existence and profile of nodal solutions for a two-dimensional elliptic problem with large exponent in nonlinearity}, date={2007}, ISSN={0024-6115}, journal={Proc. Lond. Math. Soc. (3)}, volume={94}, number={2}, pages={497\ndash 519}, } \bib{Fraenkel1981}{article}{ author={Fraenkel, L.~E.}, title={A lower bound for electrostatic capacity in the plane}, date={1981}, ISSN={0308-2105}, journal={Proc. Roy. Soc. Edinburgh Sect. A}, volume={88}, number={3\ndash 4}, pages={267\ndash 273}, } \bib{Fraenkel2000}{book}{ author={Fraenkel, L.~E.}, title={An introduction to maximum principles and symmetry in elliptic problems}, series={Cambridge Tracts in Mathematics}, publisher={Cambridge University Press}, address={Cambridge}, date={2000}, volume={128}, ISBN={0-521-46195-2}, } \bib{FridemannTurkington1981}{article}{ author={Friedman, Avner}, author={Turkington, Bruce}, title={Vortex rings: existence and asymptotic estimates}, date={1981}, ISSN={0002-9947}, journal={Trans. Amer. Math. Soc.}, volume={268}, number={1}, pages={1\ndash 37}, } \bib{GilbargTrudinger2001}{book}{ author={Gilbarg, David}, author={Trudinger, Neil~S.}, title={Elliptic partial differential equations of second order}, series={Classics in Mathematics}, publisher={Springer-Verlag}, address={Berlin}, date={2001}, ISBN={3-540-41160-7}, } \bib{Kawohl1985}{book}{ author={Kawohl, Bernhard}, title={Rearrangements and convexity of level sets in {PDE}}, series={Lecture Notes in Mathematics}, publisher={Springer-Verlag}, address={Berlin}, date={1985}, volume={1150}, ISBN={3-540-15693-3}, } \bib{Koebe1918}{article}{ author={Koebe, P.}, title={Abhandlungen zur Theorie der konformen Abbildung. IV. Abbildung mehrfach zusammenh\"angender schlichter Bereiche auf Schlitzbereiche.}, date={1918}, journal={Acta Math.}, volume={41}, pages={305\ndash 344}, } \bib{Kulpa1997}{article}{ author={Kulpa, Wladyslaw}, title={The {P}oincar\'e-{M}iranda theorem}, date={1997}, ISSN={0002-9890}, journal={Amer. Math. Monthly}, volume={104}, number={6}, pages={545\ndash 550}, } \bib{LiYanYang2005}{article}{ author={Li, Gongbao}, author={Yan, Shusen}, author={Yang, Jianfu}, title={An elliptic problem related to planar vortex pairs}, date={2005}, ISSN={0036-1410}, journal={SIAM J. Math. Anal.}, volume={36}, number={5}, pages={1444\ndash 1460}, } \bib{LiebLoss2001}{book}{ author={Lieb, Elliott~H.}, author={Loss, Michael}, title={Analysis}, edition={Second}, series={Graduate Studies in Mathematics}, publisher={American Mathematical Society}, address={Providence, RI}, date={2001}, volume={14}, ISBN={0-8218-2783-9}, } \bib{Lin1941}{article}{ author={Lin, C.~C.}, title={On the motion of vortices in two dimensions. {I}. {E}xistence of the {K}irchhoff--{R}outh function}, date={1941}, journal={Proc.\ Nat.\ Acad.\ Sci.\ U. S. A.}, volume={27}, pages={570\ndash 575}, } \bib{Lin1943}{book}{ author={Lin, C.~C.}, title={On the {M}otion of {V}ortices in {T}wo {D}imensions}, series={University of Toronto Studies, Applied Mathematics Series, no. 5}, publisher={University of Toronto Press}, address={Toronto, Ont.}, date={1943}, } \bib{Lions1984}{article}{ author={Lions, P.-L.}, title={The concentration-compactness principle in the calculus of variations. {T}he locally compact case. {II}}, date={1984}, ISSN={0294-1449}, journal={Ann. Inst. H. Poincar\'e Anal. Non Lin\'eaire}, volume={1}, number={4}, pages={223\ndash 283}, } \bib{MarchioroPulvirenti1983}{article}{ author={Marchioro, C.}, author={Pulvirenti, M.}, title={Euler evolution for singular initial data and vortex theory}, date={1983}, ISSN={0010-3616}, journal={Comm. Math. Phys.}, volume={91}, number={4}, pages={563\ndash 572}, } \bib{MawhinWillem}{book}{ author={Mawhin, Jean}, author={Willem, Michel}, title={Critical point theory and {H}amiltonian systems}, series={Applied Mathematical Sciences}, publisher={Springer-Verlag}, address={New York}, date={1989}, volume={74}, ISBN={0-387-96908-X}, } \bib{Nirenberg1959}{article}{ author={Nirenberg, L.}, title={On elliptic partial differential equations}, date={1959}, journal={Ann. Scuola Norm. Sup. Pisa (3)}, volume={13}, pages={115\ndash 162}, } \bib{Norbury1975}{article}{ author={Norbury, J.}, title={Steady planar vortex pairs in an ideal fluid}, date={1975}, ISSN={0010-3640}, journal={Comm. Pure Appl. Math.}, volume={28}, number={6}, pages={679\ndash 700}, } \bib{Poincare}{book}{ author={Poincar\'e, Henri}, title={Figures d'\'equilibre d'une masse fluide}, address={Paris}, date={1903}, } \bib{Rabinowitz1992}{article}{ author={Rabinowitz, Paul~H.}, title={On a class of nonlinear {S}chr\"odinger equations}, date={1992}, ISSN={0044-2275}, journal={Z. Angew. Math. Phys.}, volume={43}, number={2}, pages={270\ndash 291}, } \bib{Sarvas1972}{article}{ author={Sarvas, Jukka}, title={Symmetrization of condensers in {$n$}-space}, date={1972}, journal={Ann. Acad. Sci. Fenn. Ser. A I}, number={522}, pages={44}, } \bib{Schochet_CPDE_95}{article}{ author={Schochet, Steven}, title={The weak vorticity formulation of the {$2$}-{D} {E}uler equations and concentration-cancellation}, date={1995}, ISSN={0360-5302}, journal={Comm. Partial Differential Equations}, volume={20}, number={5-6}, pages={1077\ndash 1104}, } \bib{Szego1930}{article}{ author={Szeg{\H o}, G.}, title={{\" U}ber einige extremalaufgaben der potentialtheorie}, date={1930}, journal={Math. Z.}, volume={31}, pages={583\ndash 593}, } \bib{Turkington1983}{article}{ author={Turkington, Bruce}, title={On steady vortex flow in two dimensions. {I}, {II}}, date={1983}, ISSN={0360-5302}, journal={Comm. Partial Differential Equations}, volume={8}, number={9}, pages={999\ndash 1030, 1031\ndash 1071}, } \bib{Vuorinen1988}{book}{ author={Vuorinen, Matti}, title={Conformal geometry and quasiregular mappings}, series={Lecture Notes in Mathematics}, publisher={Springer-Verlag}, address={Berlin}, date={1988}, volume={1319}, ISBN={3-540-19342-1}, } \bib{Willem1996}{book}{ author={Willem, Michel}, title={Minimax theorems}, series={Progress in Nonlinear Differential Equations and their Applications, 24}, publisher={Birkh\"auser Boston Inc.}, address={Boston, MA}, date={1996}, ISBN={0-8176-3913-6}, } \bib{Wolontis1952}{article}{ author={Wolontis, Vidar}, title={Properties of conformal invariants}, date={1952}, ISSN={0002-9327}, journal={Amer. J. Math.}, volume={74}, pages={587\ndash 606}, } \bib{Yang1991}{article}{ author={Yang, Jianfu}, title={Existence and asymptotic behavior in planar vortex theory}, date={1991}, journal={Math.\ Models Methods Appl.\ Sc.}, volume={1}, number={4}, pages={461\ndash 475}, } \bib{Yang1995}{article}{ author={Yang, Jianfu}, title={Global vortex rings and asymptotic behaviour}, date={1995}, ISSN={0362-546X}, journal={Nonlinear Anal.}, volume={25}, number={5}, pages={531\ndash 546}, } \end{biblist} \end{bibdiv} \end{document}
\begin{document} \title[Avoiding infinite arithmetic progressions]{Large subsets of Euclidean space avoiding infinite arithmetic progressions} \author{Laurestine Bradford} \address{Department of Linguistics, McGill University, Montreal, Quebec, H3A 1A7, Canada and Centre for Research on Brain, Language and Music, Montreal, Quebec, H3G 2A8, Canada} \email{[email protected]} \thanks{The first author is supported by a CRBLM Graduate Student Stipend. The CRBLM is funded by the Government of Quebec via the Fonds de Recherche Nature et Technologies and Société et Culture} \author{Hannah Kohut} \address{Department of Mathematics, University of British Columbia, Vancouver, British Columbia, V6T 1Z2, Canada} \email{[email protected]} \thanks{The second author is supported by NSERC Discovery Grants 22R81123 and 22R00756} \author{Yuveshen Mooroogen} \address{Department of Mathematics, University of British Columbia, Vancouver, British Columbia, V6T 1Z2, Canada} \email{[email protected]} \thanks{The third author is supported by NSERC Discovery Grant GR010263} \subjclass[2020]{Primary 28A75; Secondary 11B25} \date{April 19, 2023} \begin{abstract} It is known that if a subset of $\mathbb{R}$ has positive Lebesgue measure, then it contains arbitrarily long finite arithmetic progressions. We prove that this result does not extend to infinite arithmetic progressions in the following sense: for each $\lambda$ in $[0,1)$, we construct a subset of $\mathbb{R}$ that intersects every interval of unit length in a set of measure at least $\lambda$, but that does not contain any infinite arithmetic progression. \end{abstract} \maketitle \section{Introduction} A \textit{finite arithmetic progression of length} $k$ is a set of the form $\{x, x + \Delta, x + 2\Delta, ..., x+(k-1) \Delta\}$ for some $x$ in $\mathbb{R}^n$ and $\Delta \neq 0$ in $\mathbb{R}^n$. It follows from the Lebesgue density theorem that every Lebesgue measurable subset $S$ of $\mathbb{R}^n$ with positive Lebesgue measure must contain finite arithmetic progressions of length $k$ for every $k$ in $\mathbb{N}$. However, the same is not true of \textit{infinite} arithmetic progressions. We say that a subset $S$ of $\mathbb{R}^n$ contains an \textit{infinite arithmetic progression} or simply an \textit{arithmetic progression} if there exists a point $x$ in $S$ and a vector $\Delta \neq 0$ in $\mathbb{R}^n$ such that the set $x + \Delta \mathbb{N} = \{x + n\Delta : n \in \mathbb{N}\}$ is contained in $S$. We call $\Delta$ the \textit{gap length} or \textit{gap} of the progression. In this paper, we focus on arithmetic progressions with positive gap length, although symmetrical arguments apply to progressions with negative gap length. It is plain that a set of positive Lebesgue measure need not contain an infinite arithmetic progression; indeed, no bounded set contains any infinite arithmetic progression. Sets of infinite measure may also fail to contain arithmetic progressions. For example, the set $S = \bigcup_{n \in \mathbb{N}} [n^2,n^2 + 1]$ has infinite measure but contains no infinite arithmetic progression. To see this, observe that while the set is made up of infinitely many intervals, the spaces between consecutive intervals grow without bound. On the other hand, the gap length in any given arithmetic progression is fixed, so no infinite arithmetic progression can lie completely within $S$. It is natural to ask whether a disjoint union of intervals must contain arithmetic progressions if it is not allowed to have arbitrarily large spaces between intervals. In this paper, we investigate the following question, proposed by Joe Repka to the third author \cite{Joe}: for each $\lambda \in [0,1)$, does there exist a subset $S$ of $\mathbb{R}$ such that $S$ intersects every interval of unit length in a set of measure at least $\lambda$, but $S$ does not contain any arithmetic progression? We prove the following constructive result, which leads to an affirmative answer to Repka's question. \begin{theorem}\label{th:mainresult} For every $N \geq 1$ in $\mathbb{N}$, there exists a subset $S = S(N)$ of $\mathbb{R}$ such that \begin{align*} \vert S \cap [m,m+1] \vert = 1 - \frac{1}{N} \end{align*} for every $m$ in $\mathbb{Z}$, but that does not contain any arithmetic progression $x + \Delta\mathbb{N}$ for any $x$ in $S$ and $\Delta > 0$ in $\mathbb{R}$. \end{theorem} We have recently become aware of two preprints, \cite{KandP} and \cite{newkeleti}, that were inspired by our above result. In \cite{KandP}, Kolountzakis and Papageorgiou prove the following theorem, which extends Theorem \ref{th:mainresult} to a larger class of sequences. \begin{externaltheorem}[Kolountzakis--Papageorgiou, Theorem 1.1 from \cite{KandP}]\label{th:kandp} Let $\mathbb{A} = \{a_n : n \in \mathbb{N}\}$ be a sequence of real numbers such that (i) $a_0 = 0$, (ii) $a_{n+1} - a_n \geq 1$ for all $n$ in $\mathbb{N}$, and (iii) $\log{a_n} = o(n)$. For each $\lambda \in [0,1)$, there exists a subset $S$ of $\mathbb{R}$ such that $\vert S \cap [m,m+1] \vert \geq \lambda$ for all $m$ in $\mathbb{Z}$, but that does not contain any affine copy of $\mathbb{A}$. \end{externaltheorem} In \cite{newkeleti}, Burgin, Goldberg, Keleti, MacMahon, and Wang show that the condition on the size of the sets $S$ in both Theorem \ref{th:mainresult} and Theorem \ref{th:kandp} can be slightly improved. \begin{externaltheorem}[Burgin--Goldberg--Keleti--MacMahon--Wang, Theorem 2.4 from \cite{newkeleti}]\label{th:burgin} Let $\mathbb{A} = \{a_n : n \in \mathbb{N}\}$ be a sequence of positive real numbers such that (i) $a_{n+1} - a_n \geq 1$ for all $n$ in $\mathbb{N}$, and (ii) $\log{a_n} = o(n)$. There exists a subset $S$ of $\mathbb{R}$ such that $\lim_{m \to \infty} \vert S \cap [m,m+1] \vert = 1$, but that does not contain any affine copy of $\mathbb{A}$. \end{externaltheorem} The main step leading to this improved result is Lemma 2.3 in \cite{newkeleti}. The techniques employed in the proof of Theorem \ref{th:kandp} differ from the ones we use to prove Theorem \ref{th:mainresult}. In particular, Kolountzakis and Papageorgiou give a probabilistic proof of their result, whereas we give an explicit elementary construction broadly based on equidistribution of sequences on $[0,1)$. Our approach has applications beyond the context of the present problem, which we intend to develop in subsequent work. Theorem \ref{th:mainresult}, Theorem \ref{th:kandp}, and Theorem \ref{th:burgin} are examples of \textit{avoidance} results, where one shows that a set may be large (in some quantifiable sense) without necessarily containing any affine copy of a prescribed set. Avoidance problems have been extensively studied both in the discrete setting and in the continuum. We identify a few salient results below. In the discrete setting, Behrend shows in \cite{Behrend} that for any $\epsilon > 0$ and all large enough positive integers $M$, there exists a subset $S$ of $\{0, 1, \ldots, M - 1\}$ with $\#(S) > M^{1 - \epsilon}$ that does not contain any three-term arithmetic progression. Wagstaff, in his paper \cite{Wag}, shows that for every two real numbers $a$ and $b$ with $0 \leq a \leq b \leq 1$, there exists an increasing sequence $S$ of the natural numbers with lower density \begin{align*} \text{\underbar{$\delta$}}(S) = \liminf_{n \to \infty} \frac{\#(S \cap [1,n])}{n} = a \end{align*} and upper density \begin{align*} \bar{\delta}(S) = \limsup_{n \to \infty} \frac{\#(S \cap [1,n])}{n} = b, \end{align*} but that does not contain any infinite arithmetic progression. In particular, when $a = b = 1$, Wagstaff's result gives a set of density $1$ that does not contain infinite arithmetic progressions. Behrend and Wagstaff's results both provide a counterpoint to Szemerédi's theorem, which says that every subset $S \subseteq \mathbb{N}$ with positive upper density must contain finite arithmetic progressions of length $k$ for every $k$ in $\mathbb{N}$ \cite{Szemeredi}. In the continuum, the Erd\H{o}s similarity problem asks: given a sequence $\{x_n\} \to 0$, does there exist a subset $S$ of the real line that has positive Lebesgue measure and that does not contain any affine copy of this sequence? This question has been answered in the affirmative for many slowly-decaying sequences, but remains open for many simple examples, such as $\{2^{-n}\}_{n = 0}^\infty$. See \cite{Svetic} for a detailed survey of the progress on this problem up to 2000. Still in the continuum, but now in the fractal regime, there is a large body of work concerned with constructing sets of large Hausdorff or Fourier dimension that avoid affine copies of prescribed sets. For example, in \cite{Keleti}, Keleti constructs a compact subset of \(\mathbb{R}\) that has full Hausdorff dimension but that does not contain any 3-term arithmetic progression. See also the work of Denson, Pramanik, and Zahl \cite{DensonPramanikZahl}, Fraser and Pramanik \cite{FraserPramanik}, Maga \cite{Maga}, Máthé \cite{Mathe}, Shmerkin \cite{Pablo}, and Yavicoli \cite{Alexia}. Throughout this paper, we write $\mathbb{N}$ for the set of natural numbers including zero. For any real number $x$, we denote by $\lceil x \rceil$ the smallest integer that is no less than $x$, and by $\lfloor x \rfloor$ the largest integer that is no greater than $x$. We also define the \textit{fractional part} of $x$ to be the real number $\langle x \rangle$ such that $\langle x \rangle = x - \lfloor x \rfloor$. For any subset $A$ of $\mathbb{R}$, we denote by $\#A$ the cardinality of $A$. If $A$ is measurable, we write $\vert A \vert$ for its Lebesgue measure. \section{Proof of Theorem 1.1.}\label{sec:proof} \subsection{Construction of the set $S$.} In what follows, we define $S$ as the disjoint union of a family of sets $\{S_m\}$ such that $S_m \subset [m,m+1)$ for every $m$ in $\mathbb{N}$. Fix an $N \geq 1$ in $\mathbb{N}$, and divide the interval $[0,1)$ into $N$ disjoint half-open subintervals $Q_0, \ldots, Q_{N-1}$, each of measure $1/N$. More precisely, each $Q_i$ is the interval $[{i}/{N},({i+1})/{N})$. Then, for each $i$ in $\{0, \ldots, N -1\}$, let $R_i = [0,1) \setminus Q_i$. Each $R_i$ has measure $1 - 1/N$. See Figure \ref{fig1} for the special case $N = 3$. \begin{figure} \caption{The sets $R_i$ when $N=3$. The deleted intervals $Q_i$ are shaded in red, and the remaining intervals are shaded in blue.} \label{fig1} \end{figure} We construct $S$ out of integer translates of the sets $R_i$. To do so, we introduce the sequence of integers $\{\beta_k\}_{k = 0}^\infty$ where $\beta_k$ is given by the geometric sum \begin{align*} \beta_k = \sum_{j = 0}^{k - 1} (N + 1)^j. \end{align*} We also require the translation maps $\tau_m : \mathbb{R} \to \mathbb{R}$, defined by $\tau_m(x) = x + m$. For each $m \in \mathbb{N}$, define $S_m$ as follows. Let $S_0 = R_0$. For $m > 0$, fix $k$ such that $\beta_{k} \leq m < \beta_{k+1}$ and let $S_m = \tau_m(R_i)$, where $i \equiv k\ (\operatorname{mod} N)$. For each $m \in \mathbb{Z}$ with $m < 0$, set $S_m = \tau_{2m}(S_{|m|})$. Finally, let \begin{align*} S = \bigcup_{m \in \mathbb{Z}} S_m. \end{align*} Observe that $S \cap [m,m+1) = S_m$ for every $m$ in $\mathbb{Z}$. Since translations preserve Lebesgue measure, we therefore have that \begin{align*} \vert S \cap [m,m+1]\vert = 1 - \frac{1}{N}, \end{align*} for every $m$ in $\mathbb{Z}$, as required by Theorem \ref{th:mainresult}. \begin{figure} \caption{Part of the set $S$ when $N = 3$.} \label{fig:setS} \end{figure} In Figure \ref{fig:setS}, we illustrate part of the set $S$ when $N=3$. We see that the first $(N+1)^0$ intervals $S_m$ are translates of $R_0$, the next $(N+1)^1$ are translates of $R_1$, the next $(N+1)^2$ are translates of $R_2$, and so on. After we reach $(N+1)^{N-1}$ translates of $R_{N-1}$, the next $(N+1)^{N}$ intervals are translates of $R_0$, and we continue cycling through the sets $R_i$ with each successive block of $(N+1)^j$ intervals. \subsection{Avoiding arithmetic progressions with rational gap length.} We show that $S$ does not contain any arithmetic progression whose gap length is a rational number. To do so, it suffices to check that $S$ avoids arithmetic progressions with integer gap length. Indeed, suppose that $S$ contains the arithmetic progression $x + \Delta\mathbb{N}$, where $\Delta = p / q$ for some integers $p$ and $q$. Then $S$ must contain the subset $x + \Delta(q\mathbb{N})$ of $x + \Delta\mathbb{N}$. This subset can be written as \begin{align*} x + \Delta(q\mathbb{N}) = x + (\Delta q)\mathbb{N} = x + p\mathbb{N}, \end{align*} which we recognise as an arithmetic progression with gap length $p$ in $\mathbb{Z}$. \begin{lemma} The set $S$ does not contain any arithmetic progression with integer gap length. \end{lemma} \begin{proof} By way of contradiction, suppose that there is an $x$ in $S$ and a $\Delta > 0$ in $\mathbb{N}$ such that the progression $x + \Delta\mathbb{N}$ is contained in $S$. For the purpose of this argument, we write this progression as an increasing sequence $\{x_n\}_{n = 0}^\infty$. Observe that every term in $x + \Delta\mathbb{N}$ has fractional part in the same $Q_j$. Indeed, every term of this progression has fractional part $\langle x \rangle$. Let $k$ be an index large enough so that \begin{enumerate}[(i)] \item $\beta_k > x$, \item $\beta_{k+1} - \beta_k > \Delta$, and \item $k \equiv j \ (\operatorname{mod} N)$. \end{enumerate} Such a choice of $k$ is always possible since the sequences $\{\beta_k\}_{k = 0}^\infty$ and $\{\beta_{k+1} - \beta_k\}_{k = 0}^\infty$ are both increasing and unbounded. Choose an $n$ so that $x_n$ is the largest term of $x + \Delta\mathbb{N}$ with $x_n < \beta_k$. Condition (i) guarantees that such a term exists. From condition (ii), we know that the next term in the progression, $x_{n + 1} = x_n + \Delta$, must belong to the interval $[\beta_k, \beta_{k+1})$. However, condition (iii) says that $S \cap [\beta_k, \beta_{k+1})$ is a union of translates of $R_j$. Therefore, no element of $S$ belonging to $[\beta_k, \beta_{k+1})$ has a fractional part in $Q_j$, and no term of the arithmetic progression $x+\Delta\mathbb{N}$ can lie in this interval. This is a contradiction. \end{proof} \subsection{Avoiding arithmetic progressions with irrational gap length.} It remains to show that $S$ does not contain any arithmetic progression where the gap length is an irrational number. \begin{lemma} The set $S$ does not contain any arithmetic progression with irrational gap length. \end{lemma} \begin{proof} Suppose for contradiction that there exists an $x$ in $S$ and a $\Delta > 0$ in $\mathbb{R} \setminus \mathbb{Q}$ such that $S$ contains the arithmetic progression $x + \Delta\mathbb{N}$. As above, we write this progression as an increasing sequence $\{x_n\}_{n = 0}^\infty$. We also assume that $x_0 > ((2N+1)/N)\Delta$. (If this condition is not true, apply the argument below to the subsequence $\{x_n\}_{n=3}^\infty$. If $\{x_n\}_{n=3}^\infty$ is not contained in $S$, then the arithmetic progression $\{x_n\}_{n=0}^\infty$ cannot be in $S$ either.) This last condition will help simplify a later estimate; see \eqref{eq:assumption}. By Weyl's polynomial equidistribution theorem \cite{Weyl}, the sequence of {fractional parts} $\{\langle x_n \rangle\}_{n = 0}^\infty$ is equidistributed in $[0,1)$. This means that for every subinterval $[a,b)$ of $[0,1)$, \begin{align*} \lim_{M \to \infty} \frac{\#(\{\langle x_n \rangle\}_{n=0}^M \cap [a,b))}{M+1} = b - a. \end{align*} In particular, this must be true for the subinterval $Q_0$ of $[0,1)$. Fix an arbitrary $\epsilon > 0$. Then there exists an $L$ in $\mathbb{N}$ with the property that whenever $M > L$, \begin{align}\label{eq:equidistribution} \left\vert \frac{\#(\{\langle x_n \rangle\}_{n=0}^M \cap Q_0)}{M+1} - \frac{1}{N}\right\vert < \epsilon. \end{align} To achieve a contradiction, fix $\epsilon < 1 / (N^2 + N)$. We exhibit an $M > L$ for which the above inequality fails. Let $k$ be an index large enough so that \begin{enumerate}[(i)] \item $\beta_k > x_{L + 1}$, \item $k \equiv 1 \ (\operatorname{mod} N)$, and \item $(N/(N+1))\beta_k > \Delta$. \end{enumerate} Then, let \(M\) be the {largest} natural number such that \(x_M < \beta_k\). Notice that we have \(M \geq L+1 > L\) by condition (i), so \eqref{eq:equidistribution} holds with this choice of $M$. We make the following observations, to be proved below. \textit{Claim 1:} The fraction of the terms of $\{x_0, \ldots, x_M\}$ which lie in the interval \begin{align*} I = \left[\frac{1}{N + 1}\beta_k, \beta_k\right) \end{align*} is at least $N/(N + 1)$. \textit{Claim 2:} If $x$ is a point in $S\cap I$, its fractional part is not in $Q_0$. Together, these claims imply that $\#(\{\langle x_n \rangle\}_{n=0}^M \cap Q_0)$ is at most $(M+1)/(N+1)$. This contradicts \eqref{eq:equidistribution} whenever $\epsilon < 1/(N^2 + N)$. To prove Claim 1, observe that for any positive real number $a$, the number of elements of the arithmetic progression $\{x_n\}_{n=0}^\infty$ that are contained in the interval \([0,a)\) is given by \(\lceil(a-x_0)/{\Delta}\rceil\). Therefore, the number of terms in the interval $[1/(N+1)\beta_k, \beta_k)$ is $$\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil-\left\lceil\frac{(1/(N+1))\beta_k-x_0}{\Delta}\right\rceil.$$ Notice also that by choice of $M$ there are $M+1$ terms of $\{x_n\}_{n=0}^\infty$ in $[0, \beta_k)$. It will be convenient to write this as $\left\lceil{(\beta_k-x_0)}/{\Delta}\right\rceil$. Hence, we can express the fraction of the terms of \(\{x_0, \ldots, x_M\}\) that lie in the interval $I$ as \begin{align}\label{eq:proportion} \frac{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil-\left\lceil\frac{(1/(N+1))\beta_k-x_0}{\Delta}\right\rceil}{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil}. \end{align} We show that, by our choice of $k$, the above quantity is at least $N/(N+1)$. Using the identity \(a \leq \lceil a\rceil < a+1\), we see that \begin{align*} \left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil-\left\lceil\frac{\frac{1}{N+1}\beta_k-x_0}{\Delta}\right\rceil &> \frac{\frac{N}{N+1}\beta_k - \Delta}{\Delta} \end{align*} is a lower bound for the numerator of \eqref{eq:proportion} and that \begin{align*} \left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil < \frac{\beta_k-x_0 + \Delta}{\Delta} \end{align*} is an upper bound for the denominator of \eqref{eq:proportion}. (Note that both of these bounds are positive due to conditions (iii) and (i) on $k$.) Together, these estimates imply that \begin{align}\label{eq:assumption} \frac{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil-\left\lceil\frac{(1/(N+1))\beta_k-x_0}{\Delta}\right\rceil}{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil} &> \frac{N}{N+1}\left(\frac{\beta_k-\frac{N+1}{N}\Delta}{\beta_k - x_0 +\Delta}\right). \end{align} Using our assumption that $x_0 > ((2N+1)/N)\Delta$, we see that the fraction in parentheses above is greater than $1$. We conclude that \begin{align*} \frac{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil-\left\lceil\frac{(\beta_k/(N+1))-x_0}{\Delta}\right\rceil}{\left\lceil\frac{\beta_k-x_0}{\Delta}\right\rceil} >\frac{N}{N+1}. \end{align*} Let us now prove Claim 2. Since the index $k$ satisfies $k \equiv 1 \ (\operatorname{mod} N)$, we must also have $k - 1 \equiv 0 \ (\operatorname{mod} N)$. This means that $S \cap [\beta_{k-1},\beta_k)$ is a union of integer translates of $R_0$. Therefore, no element of $S\cap [\beta_{k-1},\beta_k)$ has fractional part in $Q_0$. To prove the claim, it suffices to show that $\beta_k/(N+1) > \beta_{k - 1}$. This will imply that the interval $I$ is a subset of $[\beta_{k-1},\beta_k)$. This is a computation: \begin{equation*} \frac{\beta_k}{N+1} = \sum_{j=0}^{k-1} (N+1)^{j-1} = \frac{1}{N+1} + \sum_{j=0}^{k-2} (N+1)^j = \frac{1}{N+1} + \beta_{k-1} >\beta_{k-1}. \qedhere \end{equation*} \end{proof} \section{Solution to Repka's problem and higher dimensions} Let us now explain how Theorem \ref{th:mainresult} provides a positive answer to Repka's problem in the real line. We also show that it implies an analogous result in higher dimensions. \begin{corollary}\label{cor:reduction} For each $\lambda$ in $[0,1)$, there exists a subset $S = S(\lambda)$ of $\mathbb{R}$ that intersects every interval of unit length in a set of measure at least $\lambda$, but that does not contain any arithmetic progression. \end{corollary} \begin{proof} Fix $\lambda$ in $[0,1)$. Choose an integer $N$ large enough that $2/N \leq 1 - \lambda$, and apply Theorem \ref{th:mainresult} to obtain a set $S$ (depending on $N$, and hence on $\lambda$) which does not contain any arithmetic progressions, and with the property that \begin{align}\label{eq:msrpropertyofthm1} \vert S \cap [m,m+1] \vert = 1 - \frac{1}{N} \end{align} for every $m$ in $\mathbb{Z}$. We claim that this $S$ intersects every interval of unit length in a set of measure at least $\lambda$. Let $I$ be an arbitrary interval of unit length in $\mathbb{R}$, and choose an integer $m$ so that $I$ is contained in the union $[m, m+1] \cup [m + 1, m+2]$. Then \begin{align*} \vert I \setminus S \vert &\leq \vert [m,m+2]\setminus S \vert \\ &= \vert [m,m+1] \setminus S \vert + \vert [m+1, m+2] \setminus S \vert \\ &= \frac{1}{N} + \frac{1}{N} \\ &\leq 1 - \lambda, \end{align*} where we have used \eqref{eq:msrpropertyofthm1} in the third line. This implies that $S \cap I$ has measure at least $\lambda$. \end{proof} After submitting this article, we were informed by an anonymous referee that it suffices to construct a set that does not contain arithmetic progressions with irrational gap length. With the referee's permission, we include their argument below. While this argument is shorter than the construction in Section \ref{sec:proof}, the latter has the advantage of producing a more explicit set, in the sense that intersections with any unit interval are easily illustrated. \begin{proposition} Suppose that for each $\mu$ in $[0,1)$, there exists a subset $S$ of $\mathbb{R}$ that intersects every interval of unit length in a set of measure at least $\mu$, but that does not contain any arithmetic progression with irrational gap length. Then, for each $\lambda$ in $[0,1)$, there exists a subset $T$ of $\mathbb{R}$ that intersects every interval of unit length in a set of measure at least $\lambda$, but that does not contain any arithmetic progression with any gap length. \end{proposition} \begin{proof} Given $\lambda$ in $[0,1)$, choose any irrational number $r > 1$ and choose $\mu$ with $(\lambda + r)/(1 + r) \leq \mu < 1$. By assumption, we know that there exists a set $S$ that intersects every interval of unit length in a set of measure at least $\mu$, but that does not contain any arithmetic progression with irrational gap length. Let $T = S \cap rS$. Fix an interval of unit length $I$, and choose an interval of unit length $J$ such that $r^{-1}I \subset J$. Then \begin{align*} \vert I \setminus T \vert &\leq \vert I \setminus S \vert + \vert I \setminus rS \vert \\ &= \vert I \setminus S \vert + r \vert (r^{-1} I) \setminus S \vert \\ &\leq \vert I \setminus S \vert + r \vert J \setminus S \vert \\ &\leq 1 - \mu + r (1 - \mu) \\ &\leq 1 - \lambda. \end{align*} This implies that $T \cap I$ has measure at least $\lambda$. Since $T \subseteq S$, $T$ does not contain any arithmetic progression with irrational gap length. Suppose for contradiction that there exist $x \in \mathbb{R}$ and $\Delta \in \mathbb{Q}$ such that the arithmetic progression $x + \Delta \mathbb{N}$ is contained in $T$. Then $x + \Delta \mathbb{N}$ must also belong to $rS$. This implies that the arithmetic progression $(x/r) + (\Delta/r)\mathbb{N}$ with irrational gap length $\Delta/r$ belongs to $S$, which contradicts our choice of $S$. Thus, $T$ contains no arithmetic progression. \end{proof} The next corollary to Theorem \ref{th:mainresult} asserts that by taking $n$-fold Cartesian products of the sets $S$ constructed in the proof of Corollary \ref{cor:reduction}, we obtain large subsets of $n$-dimensional Euclidean space that avoid arithmetic progressions. \begin{corollary} For each $\lambda$ in $(0,1)$, and each $n \geq 1$, there exists a subset $S^n = S^n(\lambda, n)$ of $\mathbb{R}^n$ that intersects every cube (with sides parallel to the axes) of unit volume in a set of measure at least $\lambda$, but that does not contain any arithmetic progression. \end{corollary} \begin{proof} Observe that if $A \subset \mathbb{R}^m$ and $B \subset \mathbb{R}^n$ do not contain arithmetic progressions, then their Cartesian product $A \times B \subset \mathbb{R}^{m+n}$ also does not contain arithmetic progressions. We prove the contrapositive. Suppose that there is an $x = (x_1, \ldots, x_m, x_{m+1}, \ldots, x_{m+n})$ in $A \times B$ and a nonzero $\Delta = (\delta_1, \ldots, \delta_{m+n})$ in $\mathbb{R}^{m+n}$ such that \begin{align*} x + \Delta\mathbb{N} = \{(x_1 + k\delta_1, \ldots, x_m + k\delta_m, x_{m+1} + k\delta_{m+1}, \ldots, x_{m+n} + k\delta_{m+n}) : k \in \mathbb{N}\} \end{align*} is contained in $A \times B$. Consider the projection maps $\pi_A : A \times B \to A$ and $\pi_B : A \times B \to B$. Since $\Delta \neq 0$, we must have that either $\pi_A(\Delta) = (\delta_1, \ldots, \delta_m) \neq 0$ or $\pi_B(\Delta) = (\delta_{m+1}, \ldots, \delta_{m+n}) \neq 0$. Without loss of generality, suppose that $\pi_A(\Delta) \neq 0$. Then the image $\pi_A(x + \Delta\mathbb{N})$ in $A$ is the set \begin{align*} \pi_A(x + \Delta\mathbb{N}) = \{(x_1 + k\delta_1, \ldots, x_m + k\delta_m) : k \in \mathbb{N}\}. \end{align*} We recognize this as the arithmetic progression $\pi_A(x) + \pi_A(\Delta) \mathbb{N}$ with nonzero gap $\pi_A(\Delta)$. This progression is contained in $A$. Now fix $\lambda$ in $(0,1)$, and consider a cube $Q \subset \mathbb{R}^n$ with unit volume. For each $i$, let $\pi_i : \mathbb{R}^n \to \mathbb{R}$ be the projection map onto the $i^\text{th}$ coordinate axis. Then the sets $\pi_1(Q), \ldots, \pi_n(Q)$ are intervals of unit length in $\mathbb{R}$. Apply Corollary \ref{cor:reduction} with $\lambda^{1/n}$, to obtain a subset $S$ of $\mathbb{R}$ that does not contain any arithmetic progression, and with $\vert S \cap \pi_i(Q) \vert \geq \lambda^{1/n}$ for each $i$. Let $S^n$ be the $n$-fold Cartesian product $S^n = S \times \ldots \times S$. From our above observations, we know that $S^n$ does not contain any arithmetic progression. Also, by definition of product measures, we have that \begin{align*} \vert S^n \cap Q\vert = \vert (S \cap \pi_1(Q)) \times \ldots \times (S \cap \pi_n(Q)) \vert = \prod_{i=1}^n \vert S \cap \pi_i(Q) \vert \geq \lambda, \end{align*} as required. (Note that we use $\vert \cdot \vert$ to denote both the $1$-dimensional and $n$-dimensional Lebesgue measure.) \end{proof} \section{The range of $\lambda$ is optimal} If a subset $S$ of $\mathbb{R}$ intersects every interval of unit length in a set of \textit{full} measure, then the complement of $S$ in $\mathbb{R}$ must have finite measure. It is known, however, that any subset of $\mathbb{R}$ whose complement has finite measure must contain an arithmetic progression. Thus, Corollary \ref{cor:reduction} fails when $\lambda = 1$. The fact that sets whose complements have finite measure must contain arithmetic progressions follows Theorem \ref{th:chlebik}, below. This result is due to Chlebík, as reported by Svetic in 2000 \cite{Svetic}. A proof appears in a 2015 preprint by Chleb\'ik \cite{Chlebik}. Chleb\'ik terms a subset $X \subseteq \mathbb{R}$ \textit{uniformly locally finite} if \begin{align*} \operatorname{sup}\{\#(X \cap [u,u+1]): u \in \mathbb{R}\} < \infty. \end{align*} \begin{externaltheorem}[Chleb\'ik, Theorem 14(a) from \cite{Chlebik}]\label{th:chlebik} Let $X \subseteq \mathbb{R}$. If $X$ is uniformly locally finite then there is a constant $\epsilon > 0$ such that whenever $G \subseteq \mathbb{R}$ is a Lebesgue measurable set with $|G| < \epsilon$, then $\mathbb{R} \setminus G$ contains plenty of translations of $X$; namely $\{a \in \mathbb{R} : (a + X) \subseteq (\mathbb{R} \setminus G)\}$ has infinite Lebesgue measure. \end{externaltheorem} This result implies Proposition \ref{prop:finitecomplement} when $X = \mathbb{N}$ and $G = [\epsilon/(2\vert \mathbb{R}\setminus S \vert)](\mathbb{R} \setminus S)$. For convenience, we include below an elementary proof of the relevant case of Chleb\'ik's result. \begin{proposition}\label{prop:finitecomplement} If the complement of a subset $S$ of $\mathbb{R}$ has finite measure, then $S$ contains a two-sided arithmetic progression $x + \Delta \mathbb{Z}$ for some $x$ in $S$ and nonzero $\Delta$ in $\mathbb{R}$. \end{proposition} \begin{proof} Let $S$ be a subset of $\mathbb{R}$ whose complement has finite measure. Then $\vert \mathbb{R} \setminus S \vert < \xi < \infty$ for some positive real number $\xi$. For each integer $k$, let $I_k$ denote the open interval $(2k\xi, 2(k+1)\xi)$. Let $\tau_{y}:\mathbb{R} \rightarrow \mathbb{R}$ be the translation map defined by $\tau_y(x) = x+y$, and consider the set \begin{align}\label{eq:setI} I = \bigcap_{k \in \mathbb{Z}} \tau_{-2k\xi}(S \cap I_k). \end{align} To show that $S$ contains an infinite arithmetic progression, it suffices to show that $I$ is nonempty. Indeed, if $x \in I$, then we have $x + 2k\xi$ contained in $S \cap I_k$ for each $k \in \mathbb{Z}$, and hence the arithmetic progression $x + 2\xi\mathbb{Z}$ is contained in $S$. We prove that $I$ is nonempty by showing that it has positive measure. We require the fact that $I \subset I_0$, as well as the subadditivity and translation invariance of Lebesgue measure. By the definition of $I$ in \eqref{eq:setI}, \begin{align*} |I| &= \left|\bigcap_{k \in \mathbb{Z}} \tau_{-2k\xi}(S \cap I_k)\right|. \end{align*} Since $I \subset I_0$, we may write \begin{align*} |I| &=\left|I_0 \setminus \bigcup_{k \in Z} \left(I_0 \setminus \tau_{-2k\xi}(S \cap I_k)\right)\right|. \end{align*} Now, since $I_0 \setminus \tau_{-2k\xi}(S \cap I_k)$ is contained in $I_0$, and since $I_0$ has finite measure, we see that \begin{align*} |I| &=|I_0| - \left|\bigcup_{k \in Z} \left(I_0 \setminus \tau_{-2k\xi}(S \cap I_k)\right)\right|. \end{align*} From the subadditivity of measures, we obtain \begin{align*} |I| &\geq |I_0| - \sum_{k \in \mathbb{Z}} \left|I_0 \setminus \tau_{-2k\xi}(S \cap I_k)\right|. \end{align*} Translating the intervals $I_0 \setminus \tau_{-2k\xi}(S \cap I_k)$ forward by $2k\xi$, and appealing to the translation invariance of the Lebesgue measure, we see that \begin{align*} |I| &\geq |I_0| - \sum_{k \in \mathbb{Z}} \left|\tau_{2k\xi}(I_0) \setminus (S \cap I_k)\right|. \end{align*} Recognising $\tau_{2k\xi}(I_0)$ as the interval $I_k$, we may write \begin{align*} |I| &\geq|I_0| - \sum_{k \in \mathbb{Z}} \left|I_k \setminus (S \cap I_k)\right|. \end{align*} The identity $A\setminus (B\cap C) = (A\setminus B)\cup (A\setminus C)$ now gives \begin{align*} |I| &\geq |I_0| - \sum_{k \in \mathbb{Z}} \left|I_k \setminus S\right|, \end{align*} and by the countable additivity of measures, we have \begin{align*} |I| &\geq |I_0| - |\mathbb{R} \setminus S|. \end{align*} By definition, $I_0$ has Lebesgue measure $2\xi$. Moreover, the complement of $S$ in $\mathbb{R}$ has measure less than $\xi$, so \begin{align*} |I| &> \xi > 0 \end{align*} as needed. \end{proof} Although Corollary 3.1 fails when $\lambda = 1$, it is reasonable to ask the following weaker question: does there exist an increasing sequence of positive real numbers $\{\lambda_m\} \to 1^{-}$ in $[0,1)$ and a subset $S$ of $\mathbb{R}$ satisfying $\vert S \cap [m,m+1] \vert \geq \lambda_m$ for each $m$, such that $S$ does not contain any arithmetic progression? The following theorem answers this question affirmatively. \begin{externaltheorem}[Burgin--Goldberg--Keleti--MacMahon--Wang, Theorem 1.3 from \cite{newkeleti}] There exists a closed set $S \subset [0,\infty)$ satisfying $\lim_{m \to \infty} \vert S \cap [m,m+1] \vert = 1$ such that $S$ does not contain any infinite arithmetic progression. \end{externaltheorem} \begin{bibdiv} \begin{biblist} \bib{Behrend}{article}{ title={On sets of integers which contain no three terms in arithmetical progression}, author={F.A. Behrend}, journal={Proc. Nat. Acad. Sci.}, volume={32}, number={12}, date={1946}, pages={331--332}, doi={10.1073/pnas.32.12.331} } \bib{newkeleti}{misc}{ title = {Large sets avoiding infinite arithmetic/geometric progressions}, author = {Alex Burgin}, author = {Samuel Goldberg}, author = {Tamás Keleti}, author = {Connor MacMahon}, author = {Xianzhi Wang}, year = {2022}, note = {https://arxiv.org/abs/2210.09284} } \bib{Chlebik}{misc}{ title = {On the Erd\H{o}s similarity problem}, author = {Chleb\'ik, Miroslav}, year = {2015}, note = {https://arxiv.org/abs/1512.05607} } \bib{DensonPramanikZahl}{article}{ title={Large sets avoiding rough patterns}, author={Denson, Jacob}, author={Pramanik, Malabika}, author={Zahl, Joshua}, note={In \textit{Harmonic analysis and applications}, volume 168 of \textit{Springer Optim. Appl.}, 59--75. Springer, Cham, [2021], DOI 10.1007/978-3-030-61887-2\_4} } \bib{FraserPramanik}{article}{ title={Large sets avoiding patterns}, author={Fraser, Robert}, author={Pramanik, Malabika}, journal={Anal. PDE}, volume={11}, number={5}, date={2018}, pages={1083--1111}, doi={10.2140/apde.2018.11.1083} } \bib{Keleti}{article}{ title={Construction of one-dimensional subsets of the reals not containing similar copies of given patterns}, author={Keleti, Tamás}, journal={Anal. PDE}, volume={1}, number={1}, date={2008}, pages={29--33}, doi={10.2140/apde.2008.1.29} } \bib{KandP}{misc}{ title = {Large sets containing no copies of a given infinite sequence}, author = {Mihail N. Kolountzakis}, author = {Effie Papageorgiou}, year = {2022}, note = {https://arxiv.org/abs/2208.02637} } \bib{Maga}{article}{ title={Full dimensional sets without given patterns}, author={Maga, Péter}, journal={Real Anal. Exchange}, volume={36}, number={1}, date={2010/2011}, pages={79--90}, doi={10.14321/realanalexch.36.1.0079} } \bib{Mathe}{article}{ title={Sets of large dimension not containing polynomial configurations}, author={Máthé, András}, journal={Adv. Math.}, volume={316}, date={2017}, pages={691--709}, doi={10.1016/j.aim.2017.01.002} } \bib{Joe}{article}{ title={Personal Communication}, author={Repka, Joe}, date={2022} } \bib{Pablo}{article}{ title={Salem sets with no arithmetic progressions}, author={Shmerkin, Pablo}, journal={Int. Math. Res. Not. IMRN}, volume={7}, date={2017}, pages={1929--1941}, doi={10.1093/imrn/rnw097} } \bib{Svetic}{article}{ title={The Erd\H{o}s similarity problem: A survey}, author={R.E. Svetic}, journal={Real Anal. Exchange}, volume={25}, number={1}, date={1999/2000}, pages={181--184}, doi={10.2307/44153069} } \bib{Szemeredi}{article}{ title={On sets of integers containing $k$ elements in arithmetic progression}, author={E. Szemerédi}, journal={Acta Arith.}, volume={27}, date={1975}, pages={199--245}, doi={10.4064/aa-27-1-199-245} } \bib{Weyl}{article}{ title={Über die Gleichverteilung von Zahlen mod. Eins}, author={Weyl, Hermann}, journal={Math. Ann.}, volume={77}, date={1916}, pages={313--352}, doi={10.1007/BF01475864} } \bib{Alexia}{article}{ title={Large sets avoiding linear patterns}, author={Yavicoli, Alexia}, journal={Proc. Amer. Math. Soc.}, volume={149}, date={2021}, pages={4057--4066 }, doi={10.1090/proc/13959} } \bib{Wag}{article}{ title={Sequences not containing an infinite arithmetic progression}, author={Samuel S. Wagstaff Jr.}, journal={Proc. Amer. Math. Soc.}, volume={36}, number={2}, date={1972}, pages={395--397}, doi={10.2307/2039167} } \end{biblist} \end{bibdiv} \end{document}
\begin{document} \dedicatory{To Nigel Hitchin on the occassion of his 70th birthday} \title[Involutions of rank 2 Higgs bundle moduli spaces] {Involutions of rank 2 Higgs bundle moduli spaces} \author[Oscar Garc{\'\i}a-Prada]{Oscar Garc{\'\i}a-Prada} \address{Instituto de Ciencias Matem\'aticas \\ CSIC \\ Nicol\'as Cabrera, 13--15 \\ 28049 Madrid \\ Spain} \email{[email protected]} \author[S. Ramanan]{S. Ramanan} \address{Chennai Mathematical Institute\\ H1, SIPCOT IT Park, Siruseri\\ Kelambakkam 603103\\ India} \email{[email protected]} \thanks{ Partially supported by the Europena Commission Marie Curie IRSES MODULI Programme PIRSES-GA-2013-61-25-34. } \subjclass[2000]{Primary 14H60; Secondary 57R57, 58D29} \begin{abstract} We consider the moduli space $\mathcal{H}(2,\delta)$ of rank 2 Higgs bundles with fixed determinant $\delta$ over a smooth projective curve $X$ of genus 2 over $\mathbb{C}$, and study involutions defined by tensoring the vector bundle with an element $\alpha$ of order 2 in the Jacobian of the curve, combined with multiplication of the Higgs field by $\pm 1$. We describe the fixed points of these involutions in terms of the Prym variety of the covering of $X$ defined by $\alpha$, and give an interpretation in terms of the moduli space of representations of the fundamental group. \end{abstract} \maketitle \section{Introduction} Let $X$ be a smooth projective curve of genus $g\geqslantqslant 2$ over $\mathbb{C} $. A {\it Higgs bundle} $(E, \varphi )$ on $X$ consists of a vector bundle $E$ and a twisted endomorphism $\varphi :E \to E\otimes K$, where $K$ is the canonical bundle of $X$. The {\it slope} of $E$ is the rational number defined as $$\mu (E) = {\deg E}/{\ranglenk E}. $$ A Higgs bundle is said to be {\it stable} (resp. {\it semistable}) if $$\mu (F) < ({\rm resp. } \leqslantqslant )~ \mu (E)$$ for every proper subbundle $F$ of $E$ invariant under $\varphi $ in the sense that $\varphi (F) \subset F\otimes K$. Also, a Higgs bundle $(E, \varphi )$ is {\it polystable} if $(E, \varphi ) = \oplus_i (E_i, \varphi _i)$ where all the $(E_i, \varphi _i)$ are stable and all $E_i$ have the same slope as that of $E$. Let $\delta $ be a line bundle on $X$. We are interested in the moduli space $\mathcal{H}(n,\delta )$ of isomorphism classes of polystable Higgs bundles $(E, \varphi )$ of rank $n$ with determinant $\delta $ and traceless $\varphi $. This moduli space was constructed analytically by Hitchin \cite{hitchin} and later algebraically via geometric invariant theory by Nitsure \cite{nitsure}. This space is a normal quasi-projective variety of dimension $2(n^2-1)(g-1)$. If the degree of ${\delta }$ and $n$ are coprime, $\mathcal{H}(n,\delta )$ is smooth. Let $M(n,\delta )$ be the moduli space of polystable vector bundles of rank $n$ and determinant $\delta $. The set of points corresponding to stable bundles form a smooth open set and the cotangent bundle of it is a smooth, open, dense subvariety of $\mathcal{H}(n,\delta)$. In this paper, we focus on vector bundles and Higgs bundles of rank $2$, leaving the study of those of of higher rank (and indeed of $G$-principal bundles with $G$ reductive) for \cite{garcia-prada-ramanan}. There are two kinds of involutions that we consider. Firstly the subgroup $J_2$ of elements of the Jacobian $J$ consisting of elements of order $2$ acts on $\mathcal{H}(2,\delta )$ by tensor product. We also consider the involutions where in addition, the sign of the Higgs field is changed. More explicitly, for $\alpha\in J_2$ we consider the involutions \begin{equation}\langlebel{involutions} \begin{aligned} \iota(\alpha)^\pm: \mathcal{H}(2,\delta) & \to \mathcal{H}(2,\delta) \\ (E,\varphi) & \mapsto (E\otimes\alpha,\pm \varphi). \end{aligned} \end{equation} We determine the fixed point varieties in all these cases, and their corresponding subvarieties of the moduli space of representations of the fundamental group of $X$ (and its universal central extension) under the correspondence between this moduli space and the moduli space of Higgs bundles, established by Hitchin \cite{hitchin} and Donaldson \cite{donaldson}. The case of the involution $(E,\varphi)\mapsto (E,-\varphi)$ is already covered in the beautiful paper of Hitchin \cite{hitchin}. \section{Line bundles} To start with, we consider involutions in the case of line bundles. The moduli space of line bundles of degree $d$ is the {\it Jacobian variety} $J^d$. There is a universal line bundle (called a Poincar{\'e} bundle) on $J^d\times X$ which is unique up to tensoring by a line bundle pulled back from $J^d$. We will denote $J^0$ simply by $J$. The involution $\iota :L \to L^{-1}$ of $J$ has obviously the finite set $J_2$ of elements of order $2$, as its fixed point variety. The Higgs moduli space of line bundles consists of pairs $(L, \varphi )$ where $L$ is a line bundle of fixed degree and $\varphi $ is a section of $K$. The moduli space of rank 1 Higgs bundles of degree $d$ is thus isomorphic to $J^d\times H^0(X,K)$. There are a few involutions to consider even in this case. Firstly on the Higgs moduli space of line bundles of degree $d$, one may consider the involution $(L, \varphi ) \to (L, -\varphi )$. The fixed point variety is just $J^d$ imbedded in the Higgs moduli space by the map $L \mapsto (L, 0)$ since any automorphism of $L$ induces identity on the set of Higgs fields on $L$. When $d = 0$, one may also consider the involution $(L, \varphi ) \mapsto (L^{-1}, \varphi )$. This has as fixed points the set $\{ (L, \varphi ): L \in J_2 \;\;\mbox{and}\;\; \varphi\in H^0(X,K)\} $. Also, we may consider the composite of the two actions, namely $(L, \varphi ) \mapsto (L^{-1}, -\varphi )$. Again it is obvious that the fixed points are just points of $J_2$ with Higgs fields $0$. Finally, translations by elements of $J_2\smallsetminus \{ 0 \}$ are involutions without fixed points. \section{Fixed Points of $\iota(\alpha)^{-}$}\langlebel{triples} We wish now to look at involutions of $M = M(2, \delta )$ and $\mathcal{H} = \mathcal{H}(2, \delta )$. We will often assume that $\delta $ is either ${\mathcal{O}}$ or a line bundle of degree $1$. There is no loss of generality, since the varieties $M$ and $\mathcal{H}$ for any $\delta $ are isomorphic (on tensoring with a suitable line bundle) to ones with $\delta $ as above. In general, we denote by $d$ the degree of $\delta $. If $d$ is odd, the spaces $M$ and $\mathcal{H}$ are smooth and the points correspond to stable bundles and stable Higgs bundles, respectively. If $d$ is even (and $\delta $ trivial), there is a natural morphism $J \to M$ which takes $L$ to $L\oplus L^{-1}$ and imbeds the quotient of $J$ by the involution $\iota$ on $J$, namely the Kummer variety, in $M$. This is the non-stable locus (which is also the singular locus if $g > 2$) of $M$ and has $J_2$ as its own singular locus. \begin{remark} If $(E, \varphi )\in \mathcal{H}$, but $E$ is not semi-stable, then there is a line sub-bundle $L$ of $E$ which is of degree $>d/2$. Moreover, it is the unique sub-bundle with degree $\geqslantqslant d/2$. Clearly, since $(E, \varphi )$ is semi-stable, $\varphi $ does not leave $L$ invariant. Hence $(E, \varphi )$ is actually a stable Higgs bundle. In particular, it is a smooth point of $\mathcal{H}$. \end{remark} Before we take up the study of the involutions (\ref{involutions}) in general, we note that even when $\alpha$ is trivial, the involution $\iota^-:=\iota(\mathcal{O})^{-}$ is non-trivial and is of interest. In this case, the fixed point varieties were determined by Hitchin \cite{hitchin} and we recall the results with some additions and clarifications. \begin{proposition} Polystable Higgs bundles $(E, \varphi )$ fixed by the involution $\iota^{-}:(E, \varphi ) \mapsto (E, -\varphi )$ fall under the following types: \begin{itemize} \item[(i)] $E\in M = M(2, \delta )$ and $\varphi = 0$. \item[(ii)] For every integer $a$ satisfying $ 0 < 2a - d \leqslantqslant 2g - 2$, consider the set $T_a$ of triples $(L, \beta,\gamma )$ consisting of a line bundle $L$ of degree $a$ and homomorphisms $\beta : L^{-1}\otimes \delta \to L\otimes K$, with $\gamma \neq 0$ and $\gamma : L\to L^{-1}\otimes \delta \otimes K$. \item[(iii)] Same as in ii), but with $2a = d$ if $d$ is even. To every triple as in ii) or iii), associate the Higgs bundle $(E, \varphi )$ where \begin{equation}\langlebel{higgs-bundle} E = L\oplus (L^{-1}\otimes \delta ) \;\;\;\; \;\; \mbox{and}\;\;\;\;\;\; \varphi = \begin{pmatrix} 0 & \beta \\ \gamma & 0 \end{pmatrix}. \end{equation} \end{itemize} Any type {\em (ii)} Higgs bundle $(E, \varphi)$ is stable whereas $E$ is not even semi-stable. In type {\em (iii)} if $L^2$ is not isomorphic to $\delta $, and $\beta $ and $\gamma $ are both non-zero, then $(E, \varphi )$ is stable. If $L^2\cong \delta$ and $\beta $ and $\gamma $ (both of which are then sections of $K$) are linearly independent, then $(E, \varphi )$ is stable. \end{proposition} \begin{proof} Firstly, if $E\in M$ and $\varphi = 0$, it is obvious that it is fixed under the above involution. On the other hand, it is clear that if $(E, \varphi )$ is of type (ii) or (iii), then the automorphism of $E$ \begin{equation}\langlebel{i-matrix} \begin{pmatrix} i & 0 \\ 0 & -i \end{pmatrix}, \end{equation} \noindentndent takes $\varphi $ to $-\varphi $. In type (ii), since $2a - d > 0$, it follows that $L$ is the only line sub-bundle of $E$ of degree $\geqslantqslant d/2$. Since $(E, \varphi )$ is semi-stable, $L$ is not invariant under $\varphi $, (which is the case if and only if $\gamma $ is non-zero). Therefore, $(E, \varphi )$ is stable. Type (iii) is relevant only when $d$ is even and so we will assume that $\delta $ is trivial. If $L^2$ is not trivial, then every line subbundle of $E$ of degree $0$ is either $L$ or $L^{-1}$. Since we have assumed that $(E, \varphi )$ is poly-stable, either $\varphi $ leaves both $L$ and $L^{-1}$ invariant or neither, i.e. $\beta $ and $\gamma $ are both zero or both non-zero. The former case is covered under type i) and in the latter case, $(E, \varphi )$ is stable. Finally, if $L^2$ is trivial, then every line sub-bundle of degree $0$ is isomorphic to $L$, and all imbeddings of $L$ in $E = L \oplus L$ are given by $v \mapsto (\langlembda v , \mu v)$, with $(\langlembda , \mu ) \neq 0$. The restriction of $\varphi $ to $L$ composed with the projection of $E\otimes K$ to $(E/L) \otimes K = (L\otimes K)$, is given by $\langlembda \gamma + \mu \beta $. Hence this imbedding of $L$ is invariant under $\varphi $ if and only if $\langlembda \gamma + \mu \beta = 0$, proving that if $\beta $ and $\gamma $ are linearly independent, then $(E, \varphi )$ is stable. Otherwise, $(L,0)$ is a (Higgs) subbundle of $(E, \varphi )$ and hence it is covered again in i). Conversely, let $(E, \varphi )$ be a {\it stable} Higgs bundle fixed by the involution. Then there exists an automorphism $f$ of $E$ (of determinant 1) which takes $\varphi $ to $-\varphi $. If $E$ is a stable vector bundle, all its automorphisms are scalar multiplications which take $\varphi $ into itself. Hence $\varphi = 0$ in this case. Let $E$ be nonstable. Obviously, then $\varphi $ is non-zero. Since $f^2$ is an automorphism of the stable Higgs bundle $(E, \varphi )$, we have $f^2 = \pm \Id_E$. This implies that $f_x$ is semi-simple for all $x \in X$. If $f^2 = \Id_E$, the eigenvalues of $f_x$ are $\pm 1$ and since $\det(f_x) = 1 $ we have $f = \pm \Id_E$ which would actually leave $\varphi $ invariant. So $f_x$ has $\pm i$ as eigenvalues at all points. We conclude that $E$ is a direct sum of line bundles corresponding to the eigenvalues $\pm i$. Thus $f^2 =-\Id_E$ and $E = L \oplus (L^{-1}\otimes \delta )$ with $f|L = i.\Id_E$, and $f|(L^{-1}\otimes \delta ) = -i.\Id_E$. We may assume that $\deg L = a \geqslantqslant d/2$, replacing $L$ by $L^{-1}\otimes \delta $ (and $f$ by $-f$) if necessary. If $ a > d/2$, it also follows that the composite of $\varphi |L $ and the projection $E\otimes K \to L^{-1}\otimes \delta \otimes K$ is nonzero (since $(E, \varphi )$ is semi-stable) which implies that $a \leqslantqslant -a + d +2g -2$ , i.e . $2a - d \leqslantqslant 2g - 2$. Moreover, from the fact that $f$ takes $\varphi $ to $-\varphi $, one deduces that $\varphi $ is of the form claimed. If $(E, \varphi )$ is not stable, in which case we may assume $\delta $ is trivial, $(E, \varphi )$ is a direct sum of $(L, \psi )$ and $(L^{-1}, -\psi )$ with $\deg L = 0$. If $\psi $ is nonzero, then $(E, \varphi )$ is isomorphic to $(E, -\varphi )$ if and only if $L\cong L^{-1}$. If then $L\cong L^{-1}$ we may take $g =1/\sqrt 2\begin{pmatrix}1&1\\-1&1\end{pmatrix}$ and change the decomposition of $E$ to $g(L) \oplus g(L)$ and see that $(E, \varphi )$ falls under type (iii). \end{proof} \subsection{The set of triples} The above proposition leads us to consider the set of triples as in type (ii) and type (iii) above with $d \leqslantqslant 2 a \leqslantqslant d + 2g - 2$. Set $m = 2a - d$. To such a triple, we have associated the Higgs bundle $(E,\varphi )$ given by $E = L \oplus (L^{-1}\otimes \delta )$ and $\varphi $ by the matrix in (\ref{higgs-bundle}). Notice however that this triple and the triple $(L, \langlembda ^{-1}\beta,\langlembda \gamma)$ give rise to isomorphic Higgs bundles. So we consider the set of triples $(L, \beta , \gamma )$ as above, make $\mathbb{C} ^*$ act on it, in which $\langlembda \in \mathbb{C} ^*$ takes $(L, \beta , \gamma )$ to $(L, \langlembda ^{-1}\beta, \langlembda \gamma)$ and pass to the quotient. We have thus given an injective map of this quotient into the $\iota ^{-}$-fixed subvariety of Higgs bundles. We will equip this quotient with the structure of a variety. \subsection{Construction of the space of triples.} Take any line bundle ${{\mathscr{L}}}$ on $T \times X$, where $T$ is any parameter variety. For any $t\in T$, denote by ${{\mathscr{L}}}_t$ the line bundle ${\mathscr{L}}|{{t}\times X}$. Assume that $\deg ({{\mathscr{L}}}_t) = r$ for all $t\in T$. Then we get a (classifying) morphism $c_{{\mathscr{L}}}:T\to J^r$ mapping $t$ to the isomorphism class of ${{\mathscr{L}}}_t$. There is a natural morphism of $S = S^r(X) \to J^r$ since $S\times X$ has a universal divisor giving rise to a family of line bundles on $X$ of degree $r$, parametrised by $S$. The pull back of any Poincar{\'e} bundle on $J^r\times X$ to $S\times X$ is the tensor product of the line bundle given by the universal divisor on $S \times X$ and a line bundle pulled back from $S$. The composite of the projection of this line bundle $U$ to $S$ and the morphism $S\to J$ blows down the zero section of the line bundle to $Z$ and yields actually an affine morphism and the fibre over any $L\in J^r$ can be identified with $H^0(X,L)$, coming up with a section $Z$ of this affine morphism. Notice that if $r > 2g -2$, this is actually a vector bundle over $J^r$ of rank $r + 1 - g$ and $Z$ is its zero section. If ${\mathscr{L}}$ is a family of line bundles of degree $r$ on $X$, parametrised by $T$ as above the pull back of the morphism $U \to J^r$ by $c_E:T \to J^r$ will be denoted $A({{\mathscr{L}}})$. If $m >0$, let $V$ be the pull back by the map $J^a \to J^{2g - 2 + m}$ given by $L\to K\otimes L^2 \otimes \delta ^{-1}$ of the above vector bundle. On the other hand the map $L \to K \otimes L^{-2} \otimes \delta $ of $J^a \to J^{2g - 2 - m}$ pulls back the symmetric product $S^{2g - 2 -m}$ and gives a $2^{2g}$-sheeted \'etale covering. The inverse image of $V$ tensored with a line bundle on $S^{2g -2 -m}$ thus gives the required structure on the quotient of $T_a$ by $\mathbb{C} ^*$. \begin{proposition} For each $m$ with $0 < m < 2g -2$, consider the pull-back of the map $S^{2g - 2 - m} \to J^{2g - 2 - m}$ by the map $L \to K \otimes L^{-2}\otimes \delta $. A vector bundle over this of rank $g-1 + m$ is isomorphic to a subvariety of Higgs bundles which are all fixed by $\iota^-$. \end{proposition} We have seen that $M$ imbedded in $H$ by $E$ to $(E, 0)$ is a fixed point variety. It is of course closed and in fact, compact as well. The set of type (ii) fixed points is the disjoint union of $T_a$ with $d/2 < a < g - 1 + d$ and (disjoint from $M$ as well). Each of these gives an injective morphism of a vector bundle on a $2^{2g}$-sheeted \'etale covering of $S^a$ into the fixed point subvariety. Since the subvariety of $H$ corresponding to nonstable vector bundles is smooth and closed, this morphism is an isomorphism onto the image. We need to describe the image of the subvariety $T_a$ when $a = d/2$. We will assume $d =0$ and $\delta $ is trivial. Consider the natural map of $S^{2g -2}$ onto $J^{2g -2}$. Pull it back to $J$ by the two maps $L \to K\otimes L^2$ and $L\to K\otimes L^{-2}$. Take their fibre product and the quotient by the involution which changes the two factors. There is a natural map of this quotient into ${\mbox{$I\!\!\! P$}}H^0(K^2)$. Pull back the line bundle $\mathcal{O}(1)$ on ${\mbox{$I\!\!\! P$}}H^0 (K^2)$ to this . It is easy to check that this is irreducible and closed. There are other irreducible components of type (iii) in the case of $g = 2$. Take any line bundle $L$ of order 2 and consider \begin{equation} \begin{pmatrix} 0 & \beta \\ \gamma & 0 \end{pmatrix} \end{equation} as a Higgs field on $L \oplus L$. Consider the tensor product map $\beta \otimes \gamma$ into $H^0 (K^2)$. This is surjective and can be identified with the quotient by $\mathbb{C} ^*$ and $\mathbb{Z}/2$ of the fixed point set given by $(L, \beta , \gamma)$ with $L\in J_2$. \subsection{An Alternative point of view} Note that both in Type (ii) and Type (iii) we have a natural morphism of these components into $H^0(X, K^2)$ given by $(\beta , \gamma )\mapsto -\beta\gamma $. Clearly this is the restriction of the Hitchin map. Given a (non-zero) section of $H^0 (K^2)$ we can partition its divisor into two sets of cardinality $2g -2 -m $ and $2g -2 + m$. They yield elements of $J^{2g -2 -m}$ and $J^{2g -2 +m}$ together with non-zero sections $\beta $ and $\gamma $ which are defined up to the action of $\mathbb{C}^*$ as we have defined above. Passing to a $2^2g$-sheeted \'etale covering we get the required set. In particular it follows that except in case i) when the Hitchin map is $0$, in all other cases, the Hitchin map is finite and surjective. \section{Prym varieties and rank 2 bundles}\langlebel{prym} Let now $\alpha \in J_2\smallsetminus \{0\}$. To start with, we will determine the fixed points of the involution defined on $M$ defined by tensoring by $\alpha$ \begin{proposition} Let $E$ be a vector bundle of rank $2$ on $X$, and let $\alpha $ be a non-trivial line bundle of order $2$ such that $(E\otimes \alpha )\cong E$. Then $E$ is polystable. Moreover if $E$ is not stable it is of the form $L \oplus (L\otimes \alpha )$ with $L^2 \cong\alpha $. \end{proposition} \begin{proof} Assume that $(E\otimes \alpha )\cong E$. If $E$ is not poly-stable, then it has a unique line subbundle $L$ of maximal degree. This implies that $(L\otimes \alpha )\cong L$ which is absurd. If $E$ is of the form $L \oplus M$, then under our assumption, it follows that $M \cong L\otimes \alpha $. \end{proof} We recall \cite{mumford,narasimhan-ramanan} the relation between the Prym variety of a two-sheeted {\'e}tale cover of $X$ and vector bundles of rank 2 on $X$. If $\alpha $ is a non-trivial element of $J_2(X)$, there is associated to it a canonical $2$-sheeted {\' e}tale cover $\pi :X_{\alpha } \to X$, namely $\mathrm{Sp}ec({\mathcal{O}} \oplus \alpha )$ with the obvious algebra structure on this locally free sheaf. Let $\iota$ be the Galois involution. For every line bundle $L$ of degree $d$ on $X_\alpha$, the line bundle $L\otimes \iota^*L$ of degree $2d$ with the natural lift of $\iota$ clearly descends to a line bundle $\Nm(L)$ of degree $d$ on $X$. This gives the {\it norm homomorphism} $\Nm:\operatorname{Pic}(X_\alpha)\to \operatorname{Pic}(X)$. Its kernel consists of two components and the one which contains the trivial line bundle is the {\it Prym variety} $P_{\alpha }$ associated to $\alpha $. If $L$ is a line bundle on $X_\alpha$, its direct image $\pi _*(L)$ on $X$ is a vector bundle of rank $2$. Note that $\det(\pi _*({\mathcal{O}})) = \det({\mathcal{O}}\oplus \alpha) = \alpha$, and more generally that $\det(\pi _*(L)) = \Nm(L) \otimes \alpha $ for all $L$. The fibres of $\Nm$ consist of two cosets $F_\alpha$ of $P_{\alpha }$ and the Galois involution interchanges the two if the degree is odd and leaves each component invariant if the degree is even. In particular, it acts on $P_{\alpha }$, and indeed as $L\mapsto L^{-1}$ on it. \begin{proposition} For any line bundle $L$ on $X_\alpha$, the direct image $E = \pi _*L$ is a polystable vector bundle of rank $2$ on $X$ such that $E\otimes \alpha \cong E $. If $E$ is not stable, it is of the form $\xi \oplus (\xi \otimes \alpha)$. \end{proposition} \begin{proof} Indeed, if $\xi $ is any line subbundle of $E$, its inclusion in $E$ gives rise to a nonzero homomorphism $\pi ^*\xi \to L$, and hence $2\deg \xi = \deg (\pi ^*\xi ) \leqslantqslant \deg(L) = \deg(E)$, proving $E$ is semi-stable. If $\deg \xi = \deg E/2$, the homomorphism $\pi ^*\xi \to L$ is an isomorphism. But then $\pi _*L = \pi _*(\pi ^*\xi ) = \xi \otimes \pi _*{\mathcal{O}} = \xi \otimes ({\mathcal{O}} \oplus \alpha )$ proving our assertion. \end{proof} We have thus a morphism of $\Nm^{-1}(\delta \otimes \alpha )$ into $M(2, \delta )$ which maps $L$ to $\pi _*L$. Let $E$ be stable such that $E\otimes \alpha \cong E$. we may then choose an isomorphism $f:E \to E\otimes {\alpha }$ such that its iterate $(f\otimes \Id_{\alpha})\circ f: E\to E$ is the identity. Indeed this composite is an automorphism of $E$ and hence a non-zero scalar. We can then replace the isomorphism by a scalar multiple so that this composite is $\Id_E$. Now the locally free sheaf ${\mathcal{E}}$ can be provided a module structure over ${\mathcal{O}}\oplus \alpha $ by using the above isomorphism. This means that it is the direct image of an invertible sheaf on $X_\alpha$. On the other hand, if $E$ is poly-stable but not stable, it is isomorphic to $L\oplus M$. If $E\otimes \alpha $ is isomorphic to $E$, it follows that $L\cong M \otimes \alpha $. Hence we deduce that the above morphism $\Nm^{-1}(\delta\otimes \alpha )\to M(2, \delta )$ is onto the fixed point variety under the action of tensoring by $\alpha$ on $M(2, \delta )$. If $\pi _*L \cong \pi _*L'$, then by applying $\pi ^*$ to it, we see that $L'$ is isomorphic either to $L$ or $\iota^*L$. In other words, the above map descends to an isomorphism of the quotient of $\Nm^{-1}(\delta\otimes\alpha )$ by the Galois involution onto the $\alpha $-fixed subvariety of $M(2, \delta)$. Since the fibres of $\Nm$ are interchanged by the Galois involution when $\delta$ is of odd degree, this fixed point variety is isomorphic to a coset of the Prym variety. When $\delta $ is of even degree, the $\alpha $-fixed variety has two connected components, each isomorphic to the quotient of the Prym variety by the involution $L\to L^{-1}$, that is to say to the Kummer variety of Prym. We collect these facts in the following. \begin{theorem}\langlebel{fixed-points-M} Let $\alpha $ be a non-trivial element of $J_2(X)$. It acts on $M(2, \delta )$ by tensor product: $\iota(\alpha)(E):=E\otimes \alpha$. The fixed point variety $F_\alpha(\delta)$ is isomorphic to the Prym variety of the covering $\pi:X_{\alpha }\to X$ given by $\alpha $ if $d = \deg \delta$ is odd, and is isomorphic to the union of two irreducible components, each isomorphic to the Kummer variety of the Prym variety, if $d$ is even. \end{theorem} \begin{remarks} (1) If $L$ is a line bundle on $X_{\alpha }$ and $E = \pi _*L$, then since $E\otimes \alpha \cong E$, $\alpha $ is a line sub-bundle of $\ad(E)$. Indeed, since $E$ is poly-stable, $\alpha $ is actually a direct summand. To see this, interpret $\ad(E)$ as $S^2(E)\otimes \det(E)^{-1}$ and notice that there is a natural surjecion of $S^2(\pi_* L)$ onto $\pi_*(L^2)$. It follows that $\pi_*(L^2) \det(E)^{-1}$ is contained in $\ad(E)$. Thus we see that $$\ad (\pi _*L) \cong \alpha \oplus ((\pi _*L^2)\otimes \alpha \otimes \Nm(L^{-1})).$$ (2) As we have seen above, in the case $\delta $ is trivial, the fixed point variety intersects the non-stable locus, namely the Kummer variety of the Jacobian at bundles of the form $\xi \oplus (\xi \otimes \alpha )$, where $\xi $ is a line bundle with $\xi ^2 \cong \alpha $. Clearly, $\xi $ and $\xi \otimes \alpha $ give the same bundle. Thus the intersection of the two copies of the Prym Kummer variety (corresponding to any non-trivial $\alpha \in J_2$) with the Jacobian--Kummer variety is an orbit of smooth points, under the action of $J_2$. This geometric fact can be stated in the context of principally polarised abelian varieties and is conjectured to be characteristic of Jacobians. Analytically expressed, this is the Schottky equation. \end{remarks} \section{Fixed Points of $\iota(\alpha )^{\pm }$ when $d$ is odd.}\langlebel{fix-odd} If $(E, \varphi )$ is a polystable Higgs bundle fixed under either of the involutions $\iota(\alpha )^{\pm }$, we observe that $E$ is isomorphic to $E\otimes \alpha $. This implies that $E$ is itself polystable. Hence if $d$ is odd, we have only to consider the action of $\alpha$ on $M = M(2, \delta )$, given by $E\mapsto E\otimes \alpha$, and look at its action on the cotangent bundle. Let $F_\alpha$ be the fixed point variety in $M$ under the action of $\alpha$ (see Theorem \ref{fixed-points-M}), we have the exact sequence $$0\to N(F_\alpha, M)^* \to T^*(M)|_{F_\alpha} \to T^*(F_\alpha) \to 0,$$ where $N(F_\alpha,M)$ is the normal bundle of $F_\alpha$ in $M$. This sequence splits canonically since $\alpha $ acts on the restriction of the tangent bundle of $M$ to $F_\alpha$ and splits it into eigen-bundles corresponding to the eigen-values $\pm 1$. Clearly the subbundle corresponding to the eigen-value $+1$ (resp. -1) is $T(F_\alpha)$ (resp. $N(F_\alpha, M)$). Since $E\otimes \alpha \cong E$ and $d$ is odd, $E$ is stable, and we have the following. \begin{theorem} If $\deg \delta$ is odd the fixed point subvariety $\mathcal{F}_\alpha^+$ (resp. $\mathcal{F}_\alpha^-$) of the action of $\iota(\alpha )^+$ (resp. $\iota(\alpha )^{-}$) on $\mathcal{H}(2,\delta)$ is the cotangent bundle $T^*(F_\alpha)$ of $F_\alpha$ (resp. the conormal bundle $N(F_\alpha,M)^*$ of $F_\alpha$). \end{theorem} \section{Fixed points of $\iota(\alpha )^\pm$ when $d$ is even.}\langlebel{fix-even} We may assume that the determinant is trivial in this case. If $(E, \varphi )$ is fixed by either of the involutions $\iota(\alpha )^{\pm }$, with $E$ stable, the above discussion is still valid so that we have \begin{itemize} \item[(i)] The sub-variety of fixed points of $\iota(\alpha )^+$ is $T^*(F_\alpha^{stable})$. \item[(ii)] The sub-variety of fixed points of $\iota(\alpha )^{-}$ is $N^*(F_\alpha^{stable}, M)$. \end{itemize} Assume then that $(E,\varphi)$ is a fixed point of $\iota(\alpha )^{\pm }$, where $E$ is polystable of the form $L \oplus L^{-1}$. We have $L^{-1}\cong L \otimes \alpha $ and $\varphi $ is of the form \begin{equation}\langlebel{higgs-field} \varphi= \begin{pmatrix} \omega & \beta \\ \gamma & -\omega \end{pmatrix}, \end{equation} \noindentndent with $\beta,\gamma \in H^0(K\otimes \alpha )$ and $\omega \in H^0(K)$. Since the summands of $E$ are distinct, any isomorphism $f: E \otimes \alpha \to E$ has to be of the form $$ \begin{pmatrix} 0 & \langlembda \\ -\langlembda^{-1} & 0 \end{pmatrix}, $$ \noindentndent with $\langlembda \in \mathbb{C}^*$. Also, $f$ takes $\varphi $ to $\pm \varphi $ if and only if $$ \begin{pmatrix} 0 & \langlembda \\ -\langlembda^{-1} & 0 \end{pmatrix} \begin{pmatrix} \omega & \beta \\ \gamma & -\omega \end{pmatrix} \begin{pmatrix} 0 & -\langlembda \\ \langlembda^{-1} & 0 \end{pmatrix} =\pm \begin{pmatrix} \omega & \beta \\ \gamma & -\omega \end{pmatrix}. $$ In other words, \begin{equation}\langlebel{condition} \begin{pmatrix} -\omega & \langlembda^{-2}\gamma \\ \langlembda^{2}\beta & \omega \end{pmatrix} =\pm \begin{pmatrix} \omega & \beta \\ \gamma & -\omega \end{pmatrix}. \end{equation} We analyse the cases $\iota(\alpha)^+$ and $\iota(\alpha)^-$ separately. \subsection{Fixed points of $\iota(\alpha)^+$} In the case of $\iota(\alpha )^{+}$, (\ref{condition}) implies that $\omega = 0$ and $\langlembda ^2 \beta = \gamma $. If $\beta $ or $\gamma $ is $0$, the Higgs bundle is $S$-equivalent to $(L \oplus (L\otimes \alpha ), 0)$. Hence this fixed point variety is isomorphic to the product of $J/\alpha $ and the space of decomposable tensors in $H^0 (K) \otimes H^0(K)$. \begin{remark} Since $E$ is of the form $\pi _*(L)$, we conclude that the tangent space at $E$ to $M$ (assuming that $E$ is stable) is $$H^1(\ad (E)) = H^1(\alpha ) \oplus H^1(\pi_* (L^2)\otimes \alpha ).$$ It is clear that the first summand here is the tangent space to the Prym variety while the second is the space normal to Prym in $M$. \end{remark} \subsection{Fixed points of $\iota(\alpha)^-$} It is clear that \begin{equation} \begin{pmatrix} \omega & \beta \\ \gamma & -\omega \end{pmatrix}. \end{equation} is taken to its negative under the action of \begin{equation} \begin{pmatrix} 0 & \langlembda \\ - \langlembda^{-1} & 0 \end{pmatrix} \end{equation} if and only if $\beta $ and $\gamma $ are multiples of each other, in which case we may as well assume that $\beta = \gamma $. In other words, $E$ belongs to the Prym variety and $\varphi $ belongs to $H^0(K) \oplus H^0(K \otimes \alpha )$. \section{Higgs bundles and representations of the fundamental group} Let $G$ be a reductive Lie group, and let $\pi_1(X)$ be the fundamental group of $X$. A representation $\rho:\pi_1(X)\longrightarrow G$ is said to be {\em reductive} if the composition of $\rho$ with the adjoint representation of $G$ in its Lie algebra is completely reducible. When $G$ is algebraic, this is equivalent to the Zariski closure of the image of $\rho$ being a reductive group. If $G$ is compact or abelian every representation is reductive. We thus define the \emph{moduli space of representations} of $\pi_1(X)$ in $G$ to be the orbit space $$ \mathcal{R}(G) = \Hom^{red}(\pi_1(X),G) / G $$ of reductive representations. With the quotient topology, $\mathcal{R}(G)$ has the structure of an algebraic variety. In this section we briefly review the relation between rank 1 and rank 2 Higgs bundles, and representations of the fundamental group of the surface and its universal central extension in $\mathbb{C}^\ast$, $\mathrm{U}(1)$, $\mathbb{R}^*$, $\mathrm{SL}(2,\mathbb{C})$, $\mathrm{SU}(2)$ and $\mathrm{SL}(2,\mathbb{R})$. For more details, see \cite{hitchin,donaldson,corlette,narasimhan-seshadri}. \subsection{Rank 1 Higgs bundles and representations} As is well-known $\mathcal{R}(U(1))$ is in bijective correspondence with the space $J$ of isomorphism classes of line bundles of degree $0$. Also, if we identify $\mathbb{Z} /2$ with the subgroup $\pm 1$ in $U(1)$ we get a bijection of $\mathcal{R}(\mathbb{Z} /2)$ with the set $J_2$ of line bundles of order $2$. By Hodge theory one shows that $\mathcal{R}(\mathbb{C}^*)$ is in bijection with $T^*J\cong J\times H^0(X,K)$, the moduli space of Higgs bundles or rank 1 and degree $0$. The subvariety of fixed points of the involution $(L,\varphi)\to (L^{-1},\varphi)$ in this moduli space is $J_2\times H^0(X,K)$ and corresponds to the subvariety $\mathcal{R}(\mathbb{R}^*)\subset \mathcal{R}(\mathbb{C}^\ast)$. \subsection{Rank 2 Higgs bundles and representations} The notion of stability of a Higgs bundle $(E,\varphi)$ emerges as a condition for the existence of a Hermitian metric on $E$ satisfying the Hitchin equations. More precisely, Hitchin \cite{hitchin} proved the following. \begin{theorem} \langlebel{hk} An $\mathrm{SL}(2,\mathbb{C})$-Higgs bundle $(E,\varphi)$ is polystable if and only if $E$ admits a hermitian metric $h$ satisfying $$ F_h +[\varphi,\varphi^{*_h}]= 0, $$ where $F_h$ is the curvature of the Chern connection defined by $h$. \end{theorem} Combining Theorem \ref{hk} with a theorem of Donaldson \cite{donaldson} about the existence of a harmonic metric on a flat $\mathrm{SL}(2,\mathbb{C})$-bundle with reductive holonomy representation, one has the following non-abelian generalisation of the Hodge correspondence explained above for the rank 1 case \cite{hitchin}. \begin{theorem}\langlebel{correspondence} The varieties $\mathcal{H}(2,\mathcal{O})$ and $\mathcal{R}(\mathrm{SL}(2,\mathbb{C}))$ are homeomorphic. \end{theorem} The representation $\rho$ corresponding to a polystable Higgs bundle is the holonomy representation of the flat $\mathrm{SL}(2,C)$-connection given by \begin{equation}\langlebel{higgs-connection} D=\bar{\partial}_E+\partial_h+\varphi+\varphi^{*_h}, \end{equation} where $h$ is the solution to Hitchin equations and $\bar{\partial}_E+\partial_h$ is the $\mathrm{SU}(2)$-connection defined by $\bar{\partial}_E$, the Dolbeault operator of $E$ and $h$. \begin{remark} Notice that the complex structures of $\mathcal{H}(2,\mathcal{O})$ and $\mathcal{R}(\mathrm{SL}(2,\mathbb{C}))$ are different. The complex structure of $\mathcal{H}(2,\mathcal{O})$ is induced by the complex structure of $X$, while that of $\mathcal{R}(\mathrm{SL}(2,\mathbb{C}))$ is induced by the complex structure of $\mathrm{SL}(2,\mathbb{C})$. \end{remark} Higgs bundles with fixed determinant $\delta$ of odd degree can also be interpreted in terms of representations. For this we need to consider the universal central extension of $\pi_1(X)$ (see \cite{atiyah-bott,hitchin}). Recall that the fundamental group, $\pi_1(X)$, of $X$ is a finitely generated group generated by $2g$ generators, say $A_{1},B_{1}, \ldots, A_{g},B_{g}$, subject to the single relation $\prod_{i=1}^{g}[A_{i},B_{i}] = 1$. It has a universal central extension \begin{equation}\langlebel{eq:gamma} 0\longrightarrow\mathbb{Z}\longrightarrow\Gamma\longrightarrow\pi_1(X)\longrightarrow 1 \ \end{equation} \noindentndent generated by the same generators as $\pi_1(X)$, together with a central element $J$ subject to the relation $\prod_{i=1}^{g}[A_{i},B_{i}] = J$. Representations of $\Gamma$ into $\mathrm{SL}(2,\mathbb{C})$ are of two types depending on whether the central element $1\in \mathbb{Z}\subset \Gamma$ goes to $+I$ or $-I$ in $\mathrm{SL}(2,\mathbb{C})$. In the first case the representation is simply obtained from a homomorphism from $\Gamma/\mathbb{Z}=\pi_1(X)$ into $\mathrm{SL}(2,\mathbb{C})$. The $+I$ case corresponds to Higgs bundles with trivial determinant as we have seen. The $-I$ case corresponds to Higgs bundles with odd degree determinant. Namely, let \begin{equation}\langlebel{rgamma} \mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C})) = \{\rho\in \Hom^{red}(\Gamma,\mathrm{SL}(2,\mathbb{C})) / \mathrm{SL}(2,\mathbb{C}) \;\;: \;\;\rho(J)=\pm I\}. \end{equation} Here a reductive representation of $\Gamma$ is defined as at the beginning of the section, replacing $\pi_1(X)$ by $\Gamma$. Note that $\mathcal{R}^+(\Gamma,\mathrm{SL}(2,\mathbb{C}))=\mathcal{R}(\mathrm{SL}(2,\mathbb{C}))$. We then have the following \cite{hitchin}. \begin{theorem}\langlebel{correspondence} Let $\delta$ be a line bundle over $X$. Then there are homeomorphisms (i) $\mathcal{H}(2,\delta)\cong \mathcal{R}^+(\Gamma,\mathrm{SL}(2,\mathbb{C}))$ if $\deg \delta$ is even, (ii) $\mathcal{H}(2,\delta)\cong \mathcal{R}^-(\Gamma,\mathrm{SL}(2,\mathbb{C}))$ if $\deg \delta$ is odd. \end{theorem} \subsection{Fixed points of $\iota(\mathcal{O})^{-}$ and representations of $\Gamma$} For any reductive subgroup $G\subset \mathrm{SL}(2,\mathbb{C})$ containing $-I$ we consider \begin{equation}\langlebel{rgamma} \mathcal{R}^\pm(\Gamma,G) = \{\rho\in \Hom^{red}(\Gamma,G) / G \;\;: \;\;\rho(J)=\pm I\}. \end{equation} In particular we have $\mathcal{R}^\pm(\Gamma,\mathrm{SU}(2))$ and $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{R}))$. Note that, since $\mathrm{SU}(2)$ is compact, every representation of $\Gamma$ in $\mathrm{SU}(2)$ is reductive. We can define the subvarieties $\mathcal{R}_k^\pm(\Gamma,\mathrm{SL}(2,\mathbb{R}))$ of $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{R}))$ given by the representations of $\Gamma$ in $\mathrm{SL}(2,\mathbb{R})$ with Euler class $k$. By this, we mean that the corresponding flat $\mathrm{PSL}(2,\mathbb{R})$ bundle has Euler class $k$. If the $\mathrm{PSL}(2,\mathbb{R})$ bundle can be lift to an $\mathrm{SL}(2,\mathbb{R})$ bundle then $k=2d$, otherwise $k=2d-1$. The Milnor inequality \cite{milnor} says that the Euler class $k$ of any flat $\mathrm{PSL}(2,\mathbb{R})$ bundle satisfies $$ |k|\leqslantqslant 2g-2, $$ where $g$ is the genus of $X$. Hitchin proves the following \cite{hitchin}. \begin{theorem} Consider the involution $\iota(\mathcal{O})^-$ of $\mathcal{H}(2,\delta)$. We have the following. (i) The fixed point subvariety of $\iota(\mathcal{O})^-$ of points $(E,\varphi)$ with $\varphi=0$ is homeomorphic to the image of $\mathcal{R}^\pm(\Gamma,\mathrm{SU}(2))$ in $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$, where we have $\mathcal{R}^+$ if the degree of $\delta$ is even and $\mathcal{R}^-$ if the degree of $\delta$ is odd. (ii) The fixed point subvariety of $\iota(\mathcal{O})^-$ of points $(E,\varphi)$ with $\varphi\neq 0$ is homeomorphic to the image of $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{R}))$ in $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$, where we have $\mathcal{R}^+$ if the degree of $\xi$ is even and $\mathcal{R}^-$ if the degree of $\xi$ is odd. (iii) More precisely, the subvariety of triples $\mathcal{H}_a\subset \mathcal{H}(2,\delta)$ defined in Section \ref{triples} is homeomorphic to the image of $\mathcal{R}_{2a}^+(\Gamma,\mathrm{SL}(2,\mathbb{R}))$ in $\mathcal{R}^+(\Gamma,\mathrm{SL}(2,\mathbb{C}))$ if the degree of $\delta$ is even or to to the image of $\mathcal{R}_{2a-1}^-(\Gamma,\mathrm{SL}(2,\mathbb{R}))$ in $\mathcal{R}^-(\Gamma,\mathrm{SL}(2,\mathbb{C}))$ if the degree of $\delta$ is odd. \end{theorem} \begin{proof} The conjugations with respect to both real forms, $\mathrm{SU}(2)$ and $\mathrm{SL}(2,\mathbb{R})$, of $\mathrm{SL}(2,\mathbb{C})$ are inner equivalent and hence they induce the same antiholomorphic involution of the moduli space $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,C))$, where we recall that the complex structure of this variety is the one naturally induced by the complex structure of $\mathrm{SL}(2,\mathbb{C})$. To be more precise, at the level of Lie algebras, the conjugation with respect to the real form $\mathfraksu(2)$ is given by the $\mathbb{C}$-antilinear involution \begin{equation}\nonumber \begin{aligned} \tau:\mathfraksl(2,\mathbb{C}) & \to \mathfraksl(2,\mathbb{C}) \\ A &\mapsto -\overline{A}^t, \end{aligned} \end{equation} while the conjugation with respect to the real form $\mathfraksl(2,\mathbb{R})$ is given by the $\mathbb{C}$-antilinear involution \begin{equation}\nonumber \begin{aligned} \sigma:\mathfraksl(2,\mathbb{C}) & \to \mathfraksl(2,\mathbb{C}) \\ A &\mapsto \overline{A}. \end{aligned} \end{equation} Now, $$ \sigma(A)=J\tau(A)J^{-1} $$ for $J\in \mathfraksl(2,\mathbb{R})$ given by $$ J = \begin{pmatrix} 0 & 1 \\ -1 & 0 \end{pmatrix}. $$ This is simply because for every $A\in\mathfraksl(2,\mathbb{R})$, one has that \begin{equation}\langlebel{n=2} JA=-A^tJ. \end{equation} Under the correspondence $\mathcal{H}(2,\delta)\cong\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$, the antiholomorphic involution of $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$ defined by $\tau$ and $\sigma$ becomes the holomorphic involution $\iota(\mathcal{O})^-$ of $\mathcal{H}(2,\delta)$ \begin{equation}\langlebel{involution} (E,\varphi)\mapsto (E,-\varphi), \end{equation} where we recall that the complex structure of $\mathcal{H}(2,\delta)$ is that induced by the complex structure of $X$. This follows basically from the fact that the $\mathrm{SL}(2,\mathbb{C})$-connection $D$ corresponding to $(\bar{\partial}_E,\varphi)$ under Theorem \ref{correspondence} is given by (\ref{higgs-connection}) and hence $$ \tau(D)=\ast_{h}(\bar{\partial}_E)+\bar{\partial}_E +(\varphi)^{\ast_h}-\varphi, $$ from which we deduce that $\tau(D)$ is in correspondence with $(E,-\varphi)$. Notice also that $\tau(D)\cong \sigma(D)$. The proof of (i) follows now from the fact that if $\varphi=0$ in (\ref{higgs-connection}) the connection $D$ is and $\mathrm{SU}(2)$ connection. Note that this reduces to the Theorem of Narasimhan and Seshadri for $\mathrm{SU}(2)$ \cite{narasimhan-seshadri}. To proof of (ii) and (iii) one easily checks that the connection $D$ defined by a Higgs bundle in $\mathcal{H}_a(\delta)$ is $\sigma$-invariant and hence defines an $\mathrm{SL}(2,\mathbb{R})$-connection. Now, the Euler class $k$ of the $\mathrm{PSL}(2,\mathbb{R})$ bundle is $k=2d$ if $E=L\oplus L^{-1}$, or $k=2d-1$ $E=L\oplus L^{-1}\delta$, where $d=\deg L$. \end{proof} \section{Fixed points of $\iota(\alpha)^\pm$ with $\alpha\neq \mathcal{O}$ and representations of $\Gamma$} Consider the normalizer $N\mathrm{SO}(2)$ of $\mathrm{SO}(2)$ in $\mathrm{SU}(2)$. This is generated by $\mathrm{SO}(2)$ and $J=\begin{pmatrix} 0 & i \\ i & 0 \end{pmatrix}$. The group generated by $J$ is isomorphic to $\mathbb{Z}/4$ and fits in the exact sequence \begin{equation}\langlebel{z4} 0\longrightarrow \mathbb{Z}/2\longrightarrow \mathbb{Z}/4\longrightarrow \mathbb{Z}/2\longrightarrow 1, \end{equation} where the subgroup $\mathbb{Z}/2\subset \mathbb{Z}/4$ is $\{\pm I\}$. We thus have an exact sequence \begin{equation}\langlebel{normalizer} 1\longrightarrow \mathrm{SO}(2)\longrightarrow N\mathrm{SO}(2)\longrightarrow \mathbb{Z}/2\longrightarrow 1. \end{equation} The normalizer $N\mathrm{SO}(2,\mathbb{C})$ of $\mathrm{SO}(2,\mathbb{C})$ in $\mathrm{SL}(2,\mathbb{C})$ fits also in an extension \begin{equation}\langlebel{c-normalizer} 1\longrightarrow \mathrm{SO}(2,\mathbb{C})\longrightarrow N\mathrm{SO}(2,\mathbb{C})\longrightarrow \mathbb{Z}/2\longrightarrow 1, \end{equation} which is, of course, the complexification of (\ref{normalizer}). Similarly, we also have that $N\mathrm{SL}(2,\mathbb{R})$, the normalizer of $\mathrm{SL}(2,\mathbb{R})$ in $\mathrm{SL}(2,\mathbb{C})$, is given by \begin{equation}\langlebel{normalizer=sl2r} 1\longrightarrow \mathrm{SL}(2,\mathbb{R})\longrightarrow N\mathrm{SL}(2,\mathbb{R})\longrightarrow \mathbb{Z}/2\longrightarrow 1. \end{equation} Note that $N\mathrm{SO}(2)$ is a maximal compact subgroup of $N\mathrm{SL}(2,\mathbb{R})$. Given a representation $\rho:\Gamma \longrightarrow N\mathrm{SO}(2)$ there is a topological invariant $\alpha\in H^1(X,\mathbb{Z}/2)$, which is given by the map $$ H^1(X,N\mathrm{SO}(2))\longrightarrow H^1(X,\mathbb{Z}/2) $$ induced by (\ref{normalizer}). Let $$ \mathcal{R}_{\alpha}^\pm(\Gamma,N\mathrm{SO}(2)):=\{\rho\in \mathcal{R}^\pm(\Gamma,N\mathrm{SO}(2))\;\;:\;\; \mbox{with invariant} \;\;\alpha\in H^1(X,\mathbb{Z}/2)\}. $$ Similarly, we have this $\alpha$-invariant for representations of $\Gamma$ in $N\mathrm{SO}(2,\mathbb{C})$ and in $N\mathrm{SL}(2,\mathbb{R})$, and we can define $\mathcal{R}_\alpha(\Gamma,N\mathrm{SO}(2,\mathbb{C}))$ and $\mathcal{R}_{\alpha}^\pm(\Gamma,N\mathrm{SL}(2,\mathbb{R}))$. \begin{theorem} Let $\alpha\in J_2(X)=H^1(X,\mathbb{Z}/2)$. Then we have the following. (i) The subvariety $F_\alpha$ of fixed points of the involution $\iota(\alpha)$ in $M(\delta)$ defined by $E\mapsto E\otimes \alpha$ is homeomorphic to the image of $\mathcal{R}^\pm_\alpha(\Gamma,N\mathrm{SO}(2))$ in $\mathcal{R}^\pm(\Gamma,\mathrm{SU}(2))$, where we have $\mathcal{R}^+$ if the degree of $\delta$ is even and $\mathcal{R}^-$ if the degree of $\delta$ is odd. (ii) The subvariety $\mathcal{F}_\alpha^+$ of fixed points of the involution $\iota(\alpha)^+$ of $\mathcal{H}(\delta)$ is homeomorphic to the image of $\mathcal{R}^\pm_\alpha(\Gamma,N\mathrm{SO}(2,\mathbb{C}))$ in $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$, where we have $\mathcal{R}^+$ if the degree of $\delta$ is even and $\mathcal{R}^-$ if the degree of $\delta$ is odd. (iii) The subvariety $\mathcal{F}_\alpha^-$ of fixed points of the involution $\iota(\alpha)^-$ of $\mathcal{H}(\xi)$ is homeomorphic to the image of $\mathcal{R}^\pm_\alpha(\Gamma,N\mathrm{SL}(2,\mathbb{R}))$ in $\mathcal{R}^\pm(\Gamma,\mathrm{SL}(2,\mathbb{C}))$, where we have $\mathcal{R}^+$ if the degree of $\delta$ is even and $\mathcal{R}^-$ if the degree of $\delta$ is odd. \end{theorem} \begin{proof} The element $\alpha\in J_2(X)=H^1(X,\mathbb{Z}/2)$ defines a $\mathbb{Z}/2$ \'etale covering $\pi: X_\alpha\longrightarrow X$. The strategy of the proof is to lift to $X_\alpha$ and apply a $\mathbb{Z}/2$-invariant version of the correspondence between Higgs bundles on $X_\alpha$ and representations of $\Gamma_\alpha$ --- the universal central extension of $\pi_1(X_\alpha)$. We have a sequence \begin{equation}\langlebel{gamma-alpha} 1\longrightarrow \Gamma_\alpha\longrightarrow \Gamma \longrightarrow \mathbb{Z}/2\longrightarrow 1. \end{equation} since $\Gamma_\alpha$ is the kernel of the homomorphism $\alpha:\Gamma\to \mathbb{Z}/2$ defined by $\alpha$. For convenience, let $G$ be any of the subgroups $\mathrm{SO}(2)\subset \mathrm{SU}(2)$, $\mathrm{SO}(2,\mathbb{C})\subset \mathrm{SL}(2,\mathbb{C})$ and $\mathrm{SL}(2,\mathbb{R})\subset \mathrm{SL}(2,\mathbb{C})$, and let $NG$ be its normalizer in the corresponding group. We then have an extension \begin{equation}\langlebel{ng} 1\longrightarrow G\longrightarrow NG\longrightarrow \mathbb{Z}/2\longrightarrow 1. \end{equation} Let $\Hom_\alpha(\Gamma,NG)$ be the subset of $\Hom(\Gamma,NG)$ consisting of representations of $\rho: \Gamma\to NG$ such that the following diagram is commutative \begin{equation}\langlebel{commu} \begin{matrix} 1 & \longrightarrow & \Gamma_\alpha&\longrightarrow &\Gamma & \;:\;ackrel{\alpha}{\longrightarrow} & \mathbb{Z}/2 &\longrightarrow & 1\\ && \Big\downarrow && ~\Big\downarrow\rho && \mathbb{V}ert\\ 1 & \longrightarrow & G &\longrightarrow & NG & \longrightarrow & \mathbb{Z}/2 &\longrightarrow & 1 \end{matrix} \end{equation} The group $NG$ is a disconnected group with $\mathbb{Z}/2$ as the group of connected components and $G$ as the connected component containing the identity. If $G$ is abelian ($G=\mathrm{SO}(2),\mathrm{SO}(2,\mathbb{C})$), $\mathbb{Z}/2$ acts on $G$ and, since $\mathbb{Z}/2$ acts on $X_\alpha$ (as the Galois group) and hence on $\Gamma_\alpha$, there is thus an action of $\mathbb{Z}/2$ on $\Hom(\Gamma_\alpha,G)$. A straightforward computation shows that \begin{equation}\langlebel{reps-inv-reps} \Hom_\alpha(\Gamma, NG)\cong \Hom(\Gamma_\alpha,G)^{\mathbb{Z}/2}. \end{equation} If $G$ is not abelian (which is the case for $G=\mathrm{SL}(2,\mathbb{R})$), the extension (\ref{ng}) still defines a homomorphism $\mathbb{Z}/2 \to \Out(G)=\Aut(G)/\Int(G)$. We can then take a splitting of the sequence \begin{equation}\langlebel{aut-g} 1\longrightarrow \Int(G)\longrightarrow \Aut(G) \longrightarrow \Out(G)\longrightarrow 1, \end{equation} which always exists \cite{de-siebenthal}. This defines an action on $\Hom(\Gamma_\alpha,G)$. However only the action on $\Hom(\Gamma_\alpha,G)/G$ is independent of the splitting. In particular, as consequence of (\ref{reps-inv-reps}), we have homeomorphisms $$ \mathcal{R}_\alpha^\pm(\Gamma,NG)\cong \mathcal{R}^\pm(\Gamma_\alpha,G)^{\mathbb{Z}/2}. $$ The result follows now from the usual correspondences between representations of $\Gamma_\alpha$ and vector bundles or Higgs bundles on $X_\alpha$, combined with the fact that the fixed point subvarieties $F_\alpha$, $\mathcal{F}^\pm_\alpha$ described in Sections \ref{prym}, \ref{fix-odd} and \ref{fix-even} are push-forwards to $X$ of objects on $X_\alpha$ that satisfy the $\mathbb{Z}/2$-invariance condition (see \cite{garcia-prada-ramanan} for more details). \end{proof} \providecommand{\bysame}{\leqslantavevmode\hbox to3em{\hrulefill}\thinspace} \end{document}
\begin{document} \begin{center} {\bf On Relatively Prime Subsets and Supersets} \vskip 20pt {\bf Mohamed El Bachraoui\footnote{Supported by RA at UAEU, grant: 02-01-2-11/09}} \\ \emph{Dept. Math. Sci., United Arab Emirates University, P.O.Box 17551, Al-Ain, UAE} \\ {\tt [email protected]}\\ \vskip 10pt \end{center} \vskip 30pt \vskip 30pt \centerline{\bf Abstract} \noindent A nonempty finite set of positive integers $A$ is relatively prime if $\gcd(A)=1$ and it is relatively prime to $n$ if $\gcd(A\cup \{n\})=1$. The number of nonempty subsets of $A$ which are relatively prime to $n$ is $\Phi(A,n)$ and the number of such subsets of cardinality $k$ is $\Phi_k(A,n)$. Given positive integers $l_1$, $l_2$, $m_2$, and $n$ such that $l_1\leq l_2\leq m_2$ we give $\Phi( [1,m_1]\cup [l_2, m_2],n)$ along with $\Phi_k( [1,m_1]\cup [l_2, m_2],n)$. Given positive integers $l, m$, and $n$ such that $l\leq m$ we count for any subset $A$ of $\{l,l+1,\ldots,m\}$ the number of its supersets in $[l,m]$ which are relatively prime and we count the number of such supersets which are relatively prime to $n$. Formulas are also obtained for corresponding supersets having fixed cardinalities. Intermediate consequences include a formula for the number of relatively prime sets with a nonempty intersection with some fixed set of positive integers. \pagestyle{myheadings} \thispagestyle{empty} \baselineskip=15pt \vskip 30pt \noindent \textbf{Keywords:}\quad Relatively prime sets, Phi function, M\"obius inversion. \\ \\ \noindent \textbf{Subject Class:}\quad 11A25, 11B05, 11B75. \section*{\normalsize 1. Introduction} Throughout let $k, l, m , n$ be positive integers such that $l \leq m$, let $[l,m] = \{l,l+1,\ldots,m\}$, let $\mu$ be the M\"obius function, and let $\lfloor x \rfloor$ be the floor of $x$. If $A$ is a set of integers and $d\not= 0$, then $\frac{A}{d}= \{ a/d:\ a \in A\}$. A nonempty set of positive integers $A$ is called \emph{relatively prime} if $\gcd(A)=1$ and it is called \emph{relatively prime to $n$} if $\gcd(A\cup \{n\}) = \gcd(A,n) = 1$. Unless otherwise specified $A$ and $B$ will denote nonempty sets of positive integers. We will need the following basic identity on binomial coefficients stating that for nonnegative integers $L\leq M \leq N$ \begin{equation}\label{binomial} \sum_{j=M}^{N}\binom{j}{L} = \binom{N+1}{L+1}-\binom{M}{L+1}. \end{equation} \noindent {\bf Definition 1.} Let \[ \begin{split} \Phi(A,n) &= \# \{X\subseteq A:\ X\not= \emptyset\ \text{and\ } \gcd(X,n) = 1 \}, \\ \Phi_k (A,n) &= \# \{X\subseteq A:\ \# X= k \ \text{and\ } \gcd(X,n) = 1 \}, \\ f(A) &= \# \{X\subseteq A:\ X\not= \emptyset\ \text{and\ } \gcd(X) = 1 \}, \\ f_k (A) &= \# \{X\subseteq A:\ \# X= k \ \text{and\ } \gcd(X) = 1 \}. \end{split} \] \noindent Nathanson in \cite{Nathanson} introduced $f(n)$, $f_k(n)$, $\Phi(n)$, and $\Phi_k(n)$ (in our terminology $f([1,n])$, $f_k([1,n])$, $\Phi([1,n],n)$, and $\Phi_k([1,n],n)$ respectively) and gave their formulas along with asymptotic estimates. Formulas for $f([m,n])$, $f_k([m,n])$, $\Phi([m,n],n)$, and $\Phi_k([m,n],n)$ are found in \cite{ElBachraoui1, Nathanson-Orosz} and formulas for $\Phi([1,m],n)$ and $\Phi_k([1,m],n)$ for $m\leq n$ are obtained in \cite{ElBachraoui2}. Recently Ayad and Kihel in \cite{Ayad-Kihel2} considered phi functions for sets which are in arithmetic progression and obtained the following more general formulas for $\Phi([l,m],n)$ and $\Phi_k ([l,m],n)$. \noindent {\bf Theorem 1.}\ We have \[ \begin{split} \text{(a)\quad } &\ \Phi([l,m],n) = \sum_{d|n}\mu(d) 2^{\lfloor m/d \rfloor- \lfloor (l-1)/d \rfloor}, \\ \text{(b)\quad } &\ \Phi_k ([l,m],n) = \sum_{d|n} \mu(d) \binom{\lfloor m/d \rfloor- \lfloor (l-1)/d \rfloor}{k}. \end{split} \] \section*{\normalsize 2. Relatively prime subsets for $[1,m_1]\cup [l_2,m_2]$} If $[1,m_1]\cap[l_2,m_2]= \emptyset$, then phi functions for $[1,m_1]\cup[l_2,m_2]= [1,m_2]$ are obtained by Theorem 1. So we may assume that $1 \leq m_1 < l_2 \leq m_2$. \noindent {\bf Lemma 1.}\label{lem:psi} Let \[ \Psi(m_1,l_2,m_2, n)= \# \{X \subseteq [1,m_1]\cup [l_2,m_2]:\ l_2\in X\ \text{and\ } \gcd(X,n)=1 \}, \] \[ \Psi_k(m_1,l_2,m_2,n)=\# \{X \subseteq [1,m_1]\cup [l_2,m_2]:\ l_2\in X,\ |X| = k,\ \text{and\ } \gcd(X,n)=1 \}. \] Then \[ \text{(a)\quad } \Psi(m_1,l_2,m_2, n) = \sum_{d|(l_2,n)}\mu(d) 2^{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor- l_2/d}, \] \[ \text{(b)\quad } \Psi_k (m_1,l_2,m_2, n) = \sum_{d|(l_2,n)}\mu(d) \binom{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor - l_2/d }{k-1}. \] \begin{proof} (a) Assume first that $m_2\leq n$. Let $\mathcal{P}(m_1,l_2,m_2)$ denote the set of subsets of $[1,m_1]\cup[l_2,m_2]$ containing $l_2$ and let $\mathcal{P}(m_1,l_2,m_2,d)$ be the set of subsets $X$ of $[1,m_1]\cup[l_2,m_2]$ such that $l_2\in X$ and $\gcd(X,n) = d$. It is clear that the set $\mathcal{P}(m_1,l_2,m_2)$ of cardinality $2^{m_1+m_2-l_2}$ can be partitioned using the equivalence relation of having the same $\gcd$ (dividing $l_2$ and $n$). Moreover, the mapping $A \mapsto \frac{1}{d} X$ is a one-to-one correspondence between $\mathcal{P}(m_1,l_2,m_2,d)$ and the set of subsets $Y$ of $[1, \lfloor m_1/d \rfloor ]\cup [l_2/d,\lfloor m_2/d \rfloor]$ such that $l_2/d \in Y$ and $\gcd(Y,n/d)= 1$. Then \[ \# \mathcal{P}(m_1,l_2,m_2,d) = \Psi(\lfloor m_1/d \rfloor,l_2 /d,\lfloor m_2/d \rfloor,n/d). \] Thus \[ 2^{m_1+m_2-l_2} = \sum_{d|(l_2,n)} \# \mathcal{P}(m_1,l_2,m_2,d)= \sum_{d|(l_2,n)} \Psi (\lfloor m_1/d \rfloor,l_2 /d,\lfloor m_2/d \rfloor,n/d), \] which by the M\"obius inversion formula extended to multivariable functions \cite[Theorem 2]{ElBachraoui1} is equivalent to \[ \Psi(m_1,l_2,m_2,n) = \sum_{d|(l_2,n)}\mu(d) 2^{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor - l_2/d}. \] Assume now that $m_2 >n$ and let $a$ be a positive integer such that $m_2 \leq n^a$. As $\gcd(X,n)=1$ if and only if $\gcd(X,n^a)=1$ and $\mu(d) =0$ whenever $d$ has a nontrivial square factor, we have \[ \begin{split} \Psi(m_1,l_2,m_2,n) &= \Psi(m_1,l_2,m_2,n^a) \\ &= \sum_{d|(l_2,n^a)}\mu(d) 2^{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor - l_2/d} \\ &= \sum_{d|(l_2,n)}\mu(d) 2^{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor - l_2/d}. \end{split} \] (b) For the same reason as before, we may assume that $m_2 \leq n$. Noting that the correspondence $X\mapsto \frac{1}{d} X$ defined above preserves the cardinality and using an argument similar to the one in part (a), we obtain the following identity \[ \binom{m_1+m_2-l_2}{k-1}= \sum_{d|(l_2,n)}\Psi_k (\lfloor m_1/d \rfloor,l_2 /d,\lfloor m_2/d \rfloor, n/d ) \] which by the M\"obius inversion formula \cite[Theorem 2]{ElBachraoui1} is equivalent to \[ \Psi_k (m_1,l_2,m_2,n) = \sum_{d|(l_2,n)}\mu(d)\binom{\lfloor m_1/d \rfloor + \lfloor m_2/d \rfloor - l_2/d }{k-1}, \] as desired. \end{proof} \noindent {\bf Theorem 2.}\label{thm:main2} We have \[ \begin{split} \text{(a)\quad } \Phi([1,m_1]\cup [l_2,m_2],n) &= \sum_{d|n}\mu(d) 2^{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \lfloor\frac{l_2 -1}{d} \rfloor}, \\ \text{(b)\quad } \Phi_k ([1,m_1]\cup [l_2,m_2],n) &= \sum_{d|n} \mu(d) \ \binom{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \lfloor\frac{l_2 -1}{d} \rfloor}{k}. \end{split} \] \begin{proof} (a) Clearly \begin{equation}\label{help1} \begin{split} \Phi([1,m_1]\cup [l_2,m_2],n) & = \Phi([1,m_1]\cup [l_2 -1,m_2],n) - \Psi(m_1,l_2 -1,m_2,n) \\ &= \Phi([1,m_1]\cup [m_1+1,m_2],n) - \sum_{i=m_1 +1}^{l_2 -1}\Psi(m_1,i,m_2,n) \\ &= \Phi([1,m_2] - \sum_{i=m_1 +1}^{l_2 -1}\Psi(m_1,i,m_2,n) \\ &= \sum_{d|n} \mu(d) 2^{\lfloor m_2/d \rfloor} - \sum_{i=m_1 +1}^{l_2 -1}\sum_{d|(n, i)} \mu(d) 2^{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \frac{i}{d}}, \end{split} \end{equation} where the last identity follows by Theorem 1 for $l=1$ and Lemma 1. Rearranging the last summation in (\ref{help1}) gives \begin{equation}\label{help2} \begin{split} \sum_{i=m_1 +1}^{l_2- 1}\sum_{d|(n, i)} \mu(d) 2^{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \frac{i}{d}} &= \sum_{d|n}\sum_{\substack{i=m_1+1\\ d|i}}^{l_2-1} \mu(d) 2^{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \frac{i}{d}} \\ &= \sum_{d|n}\mu(d) 2^{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor} \sum_{j=\lfloor \frac{m_1}{d} \rfloor +1}^{\lfloor \frac{l_2-1}{d} \rfloor} 2^{-j} \\ &= \sum_{d|n}\mu(d) 2^{\lfloor \frac{m_2}{d} \rfloor} \left(1- 2^{-\lfloor \frac{l_2-1}{d}\rfloor +\lfloor \frac{m_1}{d}\rfloor} \right). \end{split} \end{equation} Now combining identities (\ref{help1}, \ref{help2}) yields the result. \noindent (b) Proceeding as in part (a) we find \begin{equation}\label{help3} \begin{split} \Phi_k ([1,m_1]\cup [l_2,m_2],n) &= \sum_{d|n} \mu(d) \binom{\lfloor \frac{m_2}{d}\rfloor}{k} - \sum_{i=m_1 +1}^{l_2-1}\sum_{d|(n, i)} \mu(d) \binom{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \frac{i}{d}}{k-1}. \end{split} \end{equation} Rearranging the last summation on the right of (\ref{help3}) gives \begin{equation}\label{help4} \begin{split} \sum_{i=m_1 +1}^{l_2 -1}\sum_{d|(n, i)} \binom{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor - \frac{i}{d}}{k-1} &= \sum_{d|n}\mu(d) \sum_{j=\lfloor \frac{m_1}{d} \rfloor +1}^{\lfloor \frac{l_2-1}{d} \rfloor} \binom{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor- j}{k-1}\\ &= \sum_{d|n}\mu(d) \sum_{i=\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor-\lfloor \frac{l_2-1}{d} \rfloor}^{ \lfloor \frac{m_2}{d} \rfloor -1} \binom{i}{k-1} \\ &= \sum_{d|n}\mu(d) \left( \binom{\lfloor \frac{m_2}{d} \rfloor}{k}- \binom{\lfloor \frac{m_1}{d} \rfloor +\lfloor \frac{m_2}{d} \rfloor-\lfloor \frac{l_2-1}{d} \rfloor}{k} \right), \end{split} \end{equation} where the last identity follows by formula (\ref{binomial}). Then identities (\ref{help3}, \ref{help4}) yield the desired result. \end{proof} \noindent {\bf Definition 2.} Let \[ \begin{split} \varepsilon(A,B,n) &= \# \{ X \subseteq B:\ X \not=\emptyset,\ X \cap A= \emptyset,\ \text{and\ } \gcd(X,n)=1 \}, \\ \varepsilon_k(A,B,n) &= \# \{ X \subseteq B:\ \# X = k,\ X \cap A= \emptyset,\ \text{and\ } \gcd(X,n)=1 \}. \end{split} \] If $B= [1,n]$ we will simply write $\varepsilon(A,n)$ and $\varepsilon_k(A,n)$ rather than $\varepsilon(A,[1,n],n)$ and $\varepsilon_k(A,[1,n],n)$ respectively. \noindent {\bf Theorem 3.} If $l \leq m < n$, then \[ \text{(a)\ } \varepsilon([l,m],n) = \sum_{d|n} \mu(d) 2^{\lfloor (l-1)/d \rfloor + n/d - \lfloor m/d \rfloor}, \] \[ \text{(b)\ } \varepsilon_k([l,m],n) = \sum_{d|n} \mu(d) \binom{\lfloor (l-1)/d \rfloor + n/d - \lfloor m/d \rfloor}{k} . \] \begin{proof} Immediate from Theorem 2 since \[ \varepsilon([l,m],n) = \Phi([1,l-1]\cup [m+1,n],n)\ \text{and\ } \varepsilon_k([l,m],n) = \Phi_k([1,l-1]\cup [m+1,n],n). \] \end{proof} \section*{\normalsize 3. Relatively prime supersets} In this section the sets $A$ and $B$ are not necessary nonempty. \noindent {\bf Definition 3.} If $A\subseteq B$ let \[ \begin{split} \overline{\Phi}(A,B,n) &= \# \{X\subseteq B:\ X \not= \emptyset,\ A\subseteq X,\ \text{and\ } \gcd(X,n)=1 \}, \\ \overline{\Phi}_k(A,B,n) &= \# \{X\subseteq B:\ A\subseteq X,\ \# X=k,\ \text{and\ } \gcd(X,n)=1 \}, \\ \overline{f}(A,B) &= \# \{X\subseteq B:\ X \not= \emptyset,\ A\subseteq X,\ \text{and\ } \gcd(X)=1 \}, \\ \overline{f}_k (A,B) &= \# \{X\subseteq B:\ \# X = k,\ A\subseteq X,\ \text{and\ } \gcd(X)=1 \}. \end{split} \] \noindent The purpose of this section is to give formulas for $\overline{f}(A,[l,m])$, $\overline{f}_k(A,[l,m])$, $\overline{\Phi}(A,[l,m],n)$, and $\overline{\Phi}_k(A,[l,m],n)$ for any subset $A$ of $[l,m]$. We need a lemma. \noindent {\bf Lemma 2.} If $A \subseteq [1,m]$, then \[ \text{(a)\quad } \overline{\Phi}(A,[1,m],n) = \sum_{d| (A,n)}\mu(d) 2^{\lfloor m/d \rfloor - \# A}, \] \[ \text{(b)\ } \overline{\Phi}_k(A,[1,m],n) = \sum_{d| (A,n)}\mu(d) \binom{ \lfloor m/d \rfloor - \# A}{k- \# A}\ \text{whenever\ } \# A \leq k \leq m. \] \begin{proof} If $A = \emptyset$, then clearly \[ \overline{\Phi}(A,[1,m],n) = \Phi([1,m],n) \ \text{and\ } \overline{\Phi}_k (A,[1,m],n) = \Phi_k ([1,m],n) \] and the identities in (a) and (b) follow by Theorem 1 for $l=1$. Assume now that $A \not= \emptyset$. If $m\leq n$, then \[ 2^{m- \# A} = \sum_{d|(A,n)} \overline{\Phi}(\frac{A}{d},[1,\lfloor m/d \rfloor],n/d) \] and \[ \binom{m- \# A}{k- \# A} = \sum_{d| (A,n)}\mu(d) \overline{\Phi}_k(\frac{A}{d},[1,\lfloor m/d \rfloor],n/d) \] which by M\"obius inversion \cite[Theorem 2]{ElBachraoui1} are equivalent to the identities in (a) and in (b) respectively. If $m >n$, let $a$ be a positive integer such that $m \leq n^a$. As $\gcd(X,n)=1$ if and only if $\gcd(X,n^a)=1$ and $\mu(d) =0$ whenever $d$ has a nontrivial square factor we have \[ \begin{split} \overline{\Phi}(A,[1,m],n) &= \overline{\Phi}(A,[1,m],n^a) \\ &= \sum_{d| (A,n^a)}\mu(d) 2^{\lfloor m/d \rfloor - \# A} \\ &= \sum_{d| (A,n)}\mu(d) 2^{\lfloor m/d \rfloor - \# A}. \end{split} \] The same argument gives the formula for $\overline{\Phi}_k(A,[1,m],n)$. \end{proof} \noindent {\bf Theorem 4.}\label{thm:main3} If $A\subseteq [l,m]$, then \[ \text{(a)\quad } \overline{\Phi}(A,[l,m],n)= \sum_{d| (A,n)}\mu(d) 2^{\lfloor m/d \rfloor - \lfloor (l-1)/d \rfloor -\# A}, \] \[ \text{(b)\quad } \overline{\Phi}_k (A,[l,m],n)= \sum_{d| (A,n)}\mu(d) \binom{ \lfloor m/d \rfloor - \lfloor (l-1)/d \rfloor -\# A}{k- \# A}\ \text{whenever\ } \# A \leq k \leq m-l+1. \] \begin{proof} If $A= \emptyset$, then clearly \[ \overline{\Phi}(A,[l,m],n)= \Phi ([l,m],n) \] and \[ \overline{\Phi}_k (A,[l,m],n)= \Phi_k ([l,m],n) \] and the identities in (a) and (b) follow by Theorem 1. \\ Assume now that $A\not= \emptyset$. Let \[ \Psi (A,l,m,n) = \# \{ X\subseteq [l,m]:\ A\cup\{l\} \subseteq X, \text{and\ } \gcd(X,n)=1 \}. \] Then \[ 2^{m-l- \# A}= \sum_{d|(A,l,n)} \Psi (\frac{A}{d}, l/d, \lfloor m/d \rfloor, n/d), \] which by M\"obius inversion \cite[Theorem 2]{ElBachraoui1} means that \begin{equation} \label{eq:one} \Psi (A,l,m,n) = \sum_{d|(A,l,n)} \mu(d) 2^{\lfloor m/d \rfloor -l/d - \# A}. \end{equation} Then combining identity (\ref{eq:one}) with Lemma 2 gives \begin{equation} \begin{split} \overline{\Phi}(A,[l,m],n) &= \overline{\Phi}([A,[1,m],n) - \sum_{i=1}^{l-1} \Psi(i,m,A,n) \\ &= \sum_{d|(A,n)} \mu(d) 2^{ \lfloor m/d \rfloor - \# A} - \sum_{i=1}^{l-1} \sum_{d|(A,i,n)} \mu(d) 2^{\lfloor m/d \rfloor -i/d - \# A} \\ &= \sum_{d|(A,n)} \mu(d) 2^{ \lfloor m/d \rfloor - \# A} - \sum_{d|(A,n)} \mu(d) 2^{ \lfloor m/d \rfloor - \# A} \sum_{j=1}^{\lfloor (l-1)/d \rfloor} 2^{-j} \\ &= \sum_{d|(A,n)} \mu(d) 2^{ \lfloor m/d \rfloor - \# A} - \sum_{d|(A,n)} \mu(d) 2^{ \lfloor m/d \rfloor - \# A}(1 - 2^{- \lfloor (l-1)/d \rfloor}) \\ &= \sum_{d| (A,n)}\mu(d) 2^{\lfloor m/d \rfloor - \lfloor (l-1)/d \rfloor -\# A}. \end{split} \end{equation} This completes the proof of (a). Part (b) follows similarly. \end{proof} \noindent As to $\overline{f}(A,[l,m])$ and $\overline{f}_k (A,[l,m])$ we similarly have: \noindent {\bf Theorem 5.}\label{thm:main4} If $A \subseteq [l,m]$, then \[ \text{(a)\quad } \overline{f}(A,[l,m])= \sum_{d| \gcd(A)}\mu(d) 2^{\lfloor \frac{m}{d} \rfloor - \lfloor \frac{l-1}{d} \rfloor -\# A}, \] \[ \text{(b)\quad } \overline{f}_k (A,[l,m])= \sum_{d| \gcd(A)}\mu(d) \binom{ \lfloor \frac{m}{d}\rfloor - \lfloor \frac{l-1}{d} \rfloor -\# A}{k- \# A},\ \text{whenever\ } \# A \leq k\leq m-l+1. \] We close this section by formulas for relatively prime sets which have a nonempty intersection with $A$. \noindent {\bf Definition 4.} Let \[ \begin{split} \overline{\varepsilon}(A,B,n) &= \# \{ X \subseteq B:\ X \cap A\not= \emptyset\ \text{and\ } \gcd(X,n)=1 \}, \\ \overline{\varepsilon}_k(A,B,n) &= \# \{ X \subseteq B:\ \# X =k,\ X \cap A\not= \emptyset,\ \text{and\ } \gcd(X,n)=1 \}, \\ \overline{\varepsilon}(A,B) &= \# \{ X \subseteq B:\ X \cap A\not= \emptyset\ \text{and\ } \gcd(X)=1 \}, \\ \overline{\varepsilon}_k(A,B) &= \# \{ X \subseteq B:\ \# X =k,\ X \cap A\not= \emptyset,\ \text{and\ } \gcd(X)=1 \}. \end{split} \] \noindent {\bf Theorem 6.} We have \[ \text{(a)\quad } \overline{\varepsilon}(A,[l,m],n)= \sum_{\emptyset\not= X \subseteq A} \sum_{d|(X,n)} \mu(d) 2^{\lfloor \frac{m}{d} \rfloor - \lfloor \frac{l-1}{d} \rfloor -\# X}, \] \[ \text{(b)\quad } \overline{\varepsilon}_k(A,[l,m],n)= \sum_{\substack{\emptyset \not= X\subseteq A \\ \# X \leq k}} \sum_{d|(X,n)} \mu(d) \binom{ \lfloor \frac{m}{d}\rfloor - \lfloor \frac{l-1}{d} \rfloor -\# X}{k- \# X}, \] \[ \text{(c)\ } \overline{\varepsilon}(A,B) = \sum_{\emptyset\not= X \subseteq A} \sum_{d|\gcd(X)} \mu(d) 2^{\lfloor \frac{m}{d} \rfloor - \lfloor \frac{l-1}{d} \rfloor -\# X}, \] \[ \text{(d)\ } \overline{\varepsilon}_k(A,B) = \sum_{\substack{\emptyset \not= X\subseteq A \\ \# X \leq k}} \sum_{d|\gcd(X)} \mu(d) \binom{ \lfloor \frac{m}{d}\rfloor - \lfloor \frac{l-1}{d} \rfloor -\# X}{k- \# X}. \] \begin{proof} These formulas Follow by Theorems 4, 5 and the facts that \[ \overline{\varepsilon}(A,[l,m],n)= \sum_{\emptyset\not= X \subseteq A} \overline{\Phi}(X,[l,m],n), \] \[ \overline{\varepsilon}_k(A,[l,m],n)= \sum_{\substack{\emptyset \not= X\subseteq A \\ \# X \leq k}} \overline{\Phi}_k (X,[l,m],n), \] \[ \overline{\varepsilon}(A,[l,m]) = \sum_{\emptyset\not= X \subseteq A}\overline{f}(X,[l,m]), \] \[ \overline{\varepsilon}_k(A,[l,m]) = \sum_{\substack{\emptyset\not= X \subseteq A \\ \# X \leq k}} \overline{f}_k(X,[l,m]). \] \end{proof} \end{document}
\betaegin{document} \betaegin{abstract} Assuming the existence of a strong cardinal, we find a model of ZFC in which for each uncountable regular cardinal $\lambdaambda,$ there is no universal graph of size $\lambdaambda$. \end{abstract} \maketitle \section{Introduction} The existence of universal objects in an infinite cardinal is of great interest in both set theory and model theory. In this paper we concentrate on universal graphs. It is well-known that there is a countable universal graph and that if $\lambdaambda$ is an uncountable cardinal with $2^{<\lambdaambda}=\lambdaambda$, then there exists a universal graph of size $\lambdaambda$. On the other hand, by a result of Shelah (see \cite{kojman}), if $\lambdaambda$ is a regular cardinal with $2^\lambdaambda=\lambdaambda^+$, then after forcing with $\mathrm{Add}(\lambdaambda, \lambdaambda^{++})$ for adding $\lambdaambda^{++}$-many Cohen subsets to $\lambdaambda,$ there are no universal graphs of size $\lambdaambda^+.$ Friedman and Thompson \cite{friedman-thompson} obtained further results in this direction, in particular, they showed it is consistent, modulo the existence of strong cardinals, that there are no universal graphs at the successor of a singular strong limit cardinal of countable cofinality. In this paper we continue this line of research further and prove the following global result: \betaegin{theorem} \lambdaabel{maintheorem} Assume GCH holds and $\kappaappa$ is a $(\kappaappa+3)$-strong cardinal. Then there exists a generic extension $W$ of the universe in which $\kappaappa$ remains inaccessible and for each uncountable regular cardinal $\lambdaambda < \kappaappa,$ there is no universal graph of size $\lambdaambda,$ in particular the rank initial segment of $W$ at $\kappaappa$ is a model of ZFC in which for each uncountable regular cardinal $\lambdaambda,$ there is no universal graph of size $\lambdaambda.$ \end{theorem} The theorem in particular answers Question 6.1 from \cite{friedman-thompson}. The rest of the paper is devoted to the proof of the above theorem. In Section \ref{s0} we present some preliminaries. In Section \ref{s1} we construct the model $W$, and then in Section \ref{s2} we show that $W$ is as required. \section{Some preliminaries} \lambdaabel{s0} We will start by stating some general non-existing results for universal graphs. \betaegin{lemma} (Shelah, see \cite{kojman}) \lambdaabel{shtm} Suppose $\lambdaambda$ is a regular cardinal and $2^\lambdaambda=\lambdaambda^+.$ Then in the generic extension by $\mathrm{Add}(\lambdaambda, \mu)$, for $\cf(\mu) \gammaeq \lambdaambda^{++},$ there are no universal graphs of size $\lambdaambda^+.$ \end{lemma} Friedman and Thompson \cite{friedman-thompson} isolated the main properties used in the proof of the above lemma and they have given an abstract lemma extending the above lemma, we will need the following special case. \betaegin{lemma} (see \cite{friedman-thompson}) \lambdaabel{sdftm} Suppose $\lambdaambda$ is a regular cardinal and $2^\lambdaambda=\lambdaambda^+.$ Then in the generic extension by $\mathrm{Sacks}(\lambdaambda, \mu)$, for $\cf(\mu) \gammaeq \lambdaambda^{++},$ there are no universal graphs of size $\lambdaambda^+.$ \end{lemma} \section{Building the model W} \lambdaabel{s1} In this section we construct the generic extension $W$ which will be the desired model as requested in Theorem \ref{maintheorem}. We construct the model $W$ in essentially two steps. The first step is a reverse Easton iteration of suitable forcing notions of length $\kappaappa+1$. The second step is a variant of Radin forcing using the existence of suitable guiding generics. Assume GCH holds, $\kappaappa$ is a $(\kappaappa+3)$-strong cardinal and let $j:V \to M$ witness $\kappaappa$ is $(\kappaappa+3)$-strong. Let also $i: V \to N$ be the ultrapower embedding by $U=\{X \subseteq \kappaappa: \kappaappa \in j(X) \}$. Also factor $j$ through $i$ to get $k: N \to M$ defined by $k([f]_U)=j(f)(\kappaappa)$. Note that $\mathrm{crit}(k)=\kappaappa^{++}_N < \kappaappa^{++}_M=\kappaappa^{++}$. Force over $V$ with the reverse Easton iteration \[ \lambdaangle \lambdaangle \mathbb{P}_\lambdaambda: \lambdaambda \lambdaeq \kappaappa+1 \ranglegle, \lambdaangle \lambdausim{\mathbb{Q}}_\lambdaambda: \lambdaambda \lambdaeq \kappaappa \ranglegle\ranglegle \] where we force with the trivial forcing notion except $\lambdaambda\lambdaeq \kappaappa$ is an inaccessible cardinal in which case $$\Vdash_{\mathbb{P}_\lambdaambda}\text{``} \lambdausim{\mathbb{Q}}_\lambdaambda=\lambdausim{\mathrm{Sacks}}(\lambdaambda, \lambdaambda^{++}) \times \lambdausim{\mathrm{Add}}(\lambdaambda^{+}, \lambdaambda^{+3}) \times \lambdausim{\mathrm{Add}}(\lambdaambda^{++}, \lambdaambda^{+4}).$$ Let $\lambdaangle \lambdaangle G_\lambdaambda: \lambdaambda \lambdaeq \kappaappa+1 \ranglegle, \lambdaangle H_\lambdaambda: \lambdaambda \lambdaeq \kappaappa \ranglegle\ranglegle$ be generic for the forcing, so each $G_\lambdaambda$ is $\mathbb{P}_\lambdaambda$-generic over $V$ and $H_\lambdaambda$ is $\mathbb{Q}_\lambdaambda=\lambdausim{\mathbb{Q}}_\lambdaambda[G_{\lambdaambda}]$-generic over $V[G_\lambdaambda]$. Let us also write, for each inaccessible $\lambdaambda \lambdaeq \kappaappa,$ $H_\lambdaambda=H^{0}_\lambdaambda \times H^{1}_\lambdaambda \times H^{2}_\lambdaambda$, which corresponds to $\mathbb{Q}_\lambdaambda=\mathrm{Sacks}(\lambdaambda, \lambdaambda^{++}) \times \mathrm{Add}(\lambdaambda^{+}, \lambdaambda^{+3}) \times \mathrm{Add}(\lambdaambda^{++}, \lambdaambda^{+4}).$ By arguments similar to \cite{friedman-honzik} and \cite{cummings}, the following hold in $V[G_{\kappaappa+1}]$: \betaegin{enumerate} \item $\kappaappa$ is a $(\kappaappa+2)$-strong cardinal, \item $j: V \to M$ extends to some $j^*: V[G_{\kappaappa+1}] \to M[G_{\kappaappa+1} \alphast H]$ with $^{\kappaappa}M[G_{\kappaappa+1} \alphast H] \subseteq M[G_{\kappaappa+1} \alphast H], V_{\kappaappa+3}[G_{\kappaappa+1}] \subseteq M[G_{\kappaappa+1} \alphast H],$ and $j^*$ is generated by a $(\kappaappa, \kappaappa^{+3})$-extender, \item $i: V \to N$ extends to $i^*: V[G_{\kappaappa+1}] \to N[i^*(G_{\kappaappa+1})]$ and $i^*$ is the ultrapower of $V[G_{\kappaappa+1}]$ by $U^*=\{X \subseteq \kappaappa: \kappaappa \in j^*(X) \}$ \item For each inaccessible cardinal $\lambdaambda \lambdaeq \kappaappa,$ we have \[ 2^\lambdaambda=\lambdaambda^{++}+2^{\lambdaambda^+}=\lambdaambda^{+3}+2^{\lambdaambda^{++}}=\lambdaambda^{+4}, \] \item $M[G_{\kappaappa+1} \alphast H] \models``2^\kappaappa=\kappaappa^{++}+2^{\kappaappa^+}=\kappaappa^{+3}+2^{\kappaappa^{++}}=\kappaappa^{+4}$'', \item There is an $F\in V[G_{\kappaappa+1}]$ which is a generic filter over $N[i^*(G_{\lambdaambda+1})]$, by the forcing notion $$(\mathrm{Col}(\kappaappa^{+4}, < i(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+3}, i(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+4}, i(\kappaappa)^+))_{N[i^*(G_{\kappaappa+1})]}$$ \end{enumerate} Note that $F \in M[G_{\kappaappa+1} \alphast H]$, as it can be coded by an element of $V_{\kappaappa+2}$, also, if $k^*: N[i^*(G_{\kappaappa+1})] \to M[G_{\kappaappa+1} \alphast H]$ is the induced elementary embedding, defined by $k^*([f]_{U^*})=j^*(f)(\kappaappa),$ then $\mathrm{crit}(k^*)=\kappaappa^{++}_{N} < \kappaappa^{++}_{M}=\kappaappa^{++}$ and $F$ can be transferred along $k^*$, in the sense that $\lambdaangle k^*{}''[F] \ranglegle$, the filter generated by $k^*{}''[F]$, is $(\mathrm{Col}(\kappaappa^{+4}, < i(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+3}, i(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+4}, i(\kappaappa)^+))_{M[G_{\kappaappa+1} \alphast H]}$-generic over $M[G_{\kappaappa+1} \alphast H].$ For notational simplicity let us denote the models $V[G_{\kappaappa+1}],\ M[G_{\kappaappa+1} \alphast H]$ and $N[i^*(G_{\kappaappa+1})]$ by $V^*, M^*$ and $N^*$ respectively. Work in $V^*$. Let \[ \mathcal{U}=\lambdaangle \mathcal{U}(\alphalpha, \betaeta): \alphalpha \lambdaeq \kappaappa, \betaeta < o^{\mathcal U}(\alphalpha) \ranglegle \] be a coherent sequence of measures of length $\ell^{\mathcal U}=\kappaappa+1$ and $o^{\mathcal U}(\kappaappa)=\kappaappa^+$. For each such $\alphalpha, \betaeta$ let \[ i^*_{\alphalpha, \betaeta}: V^* \to M^*_{\alphalpha, \betaeta} \simeq \mathrm{Ult}(V^*, \mathcal{U}(\alphalpha, \betaeta)) \] be the induced embedding. Let also \[ \mathcal{K}= \lambdaangle K_{\alphalpha, \betaeta}: \alphalpha \lambdaeq \kappaappa, \betaeta < o^{\mathcal U}(\alphalpha) \ranglegle \] be a sequence of filters such that: \betaegin{enumerate} \item[(7)] $K_{\alphalpha, \betaeta}$ is $\betaigg(\mathrm{Col}(\kappaappa^{+4}, < i_{\alphalpha, \betaeta}(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+3}, i_{\alphalpha, \betaeta}(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+4}, i_{\alphalpha, \betaeta}(\kappaappa)^+)\betaigg)_{M^*_{\alphalpha, \betaeta}}$-generic over $M^*_{\alphalpha, \betaeta}$, \item[(8)] the sequence is coherent in the sense that \[ \lambdaangle K_{\alphalpha, \tau}: \tau < \betaeta \ranglegle = [\betaar\alphalpha \mapsto \lambdaangle K_{\betaar\alphalpha, \tau}: \tau < \betaeta \ranglegle]_{\mathcal{U}(\alphalpha, \betaeta)}. \] \end{enumerate} For each $\alphalpha \lambdaeq \kappaappa$ set $\mathcal{F}(\alphalpha)=\betaigcap_{\betaeta < o^{\mathcal U}(\alphalpha)} \mathcal{U}(\alphalpha, \betaeta)$ if $o^{\mathcal U}(\alphalpha) >0,$ and set $\mathcal{F}(\alphalpha)=\{\emptyset\}$ otherwise. We now use $\mathcal{U}$ and $\mathcal{K}$ to define a variant of Radin forcing. \betaegin{definition} Let $\mathbb{R}=\mathbb{R}_{\mathcal{U}, \mathcal{K}}$ be the forcing notion consisting of conditions of the form \[ p=\lambdaangle \alphalpha^p_{-1}, (\alphalpha^p_0, f^p_0, A^p_n, F^p_0), \cdots, (\alphalpha^p_{n^p}, f^p_{n^p}, A^p_{n^p}, F^p_{n^p}) \ranglegle \] where \betaegin{enumerate} \item $n^p\gammaeq 0,$ \item $\alphalpha^p_{-1} < \alphalpha^p_0 < \cdots < \alphalpha^p_{n^p}=\kappaappa$, \item $f^p_i \in \mathrm{Col}((\alphalpha^p_{i-1})^{+4}, < \alphalpha^p_i) \times \mathrm{Add}((\alphalpha^p_{i-1})^{+3}, \alphalpha^p_i) \times \mathrm{Add}((\alphalpha^p_{i-1})^{+4}, (\alphalpha^p_i)^+),$ \item $A^p_i \in \mathcal{F}(\alphalpha^p_i)$, \item $F^p_i$ is a function with domain $A^p_i$ such that: \betaegin{enumerate} \item[(A)] if $o^{\mathcal{U}}(\alphalpha^p_i) > 0$, then: \betaegin{enumerate} \item for each $\theta \in A^p_i, \theta > \alphalpha^p_{i-1}$, \item $f^p_i \in \mathrm{Col}((\alphalpha^p_{i-1})^{+4}, < \theta) \times \mathrm{Add}((\alphalpha^p_{i-1})^{+3}, \theta) \times \mathrm{Add}((\alphalpha^p_{i-1})^{+4}, \theta^+)$, for each $\theta \in A^p_i,$ \item for all $\theta \in A^p_i$, $F^p_i(\theta) \in \mathrm{Col}(\theta^{+4}, < \alphalpha^p_i) \times \mathrm{Add}(\theta^{+3}, \alphalpha^p_i) \times \mathrm{Add}(\theta^{+4}, (\alphalpha^p_i)^+),$ \item $[F^p_i]_{\mathcal{U}(\alphalpha^p_i, \betaeta)} \in K_{\alphalpha^p_i, \betaeta}$, for all $\betaeta < \o^{\mathcal{U}}(\alphalpha)$. \end{enumerate} \item[(B)] if $o^{\mathcal{U}}(\alphalpha^p_i) = 0$, then $F^p_i(\emptyset)=f^p_i.$ \end{enumerate} \end{enumerate} The order relations $\lambdaeq$ and $\lambdaeq^*$ (the Prikry order) on $\mathbb{R}$ are defined in the natural way, see for example \cite{cummings} or \cite{golshani}. \end{definition} Let $\mathbb{R}=\mathbb{R}_{\mathcal{U}, \mathcal{K}}$ and let $K$ be $\mathbb{R}$-generic over $V^*$. Let $C$ be the Radin club added by $K$ and let $\lambdaangle \lambdaambda_i: i<\kappaappa \ranglegle$ be an increasing enumeration of $C$. For each $i<\kappaappa$ let $K_i=K_i^C \times K_i^3 \times K_i^4 $ be $\mathrm{Col}(\lambdaambda_i^{+4}, <\lambdaambda_{i+1}) \times \mathrm{Add}(\lambdaambda_i^{+3}, \lambdaambda_{i+1}) \times \mathrm{Add}(\lambdaambda_i^{+4}, \lambdaambda^+_{i+1})$-generic filer over $V^*$ added by $K$. In the next lemma we collect the main properties of the above forcing notion. \betaegin{lemma} \lambdaabel{rforcingprop} \betaegin{enumerate} \item (the chain condition) $\mathbb{R}$ satisfies the $\kappaappa^+$-Knaster, \item (Prikry property) The forcing notion $(\mathbb{R}, \lambdaeq, \lambdaeq^*)$ satisfies the Prikry property. \item (the factorization lemma): Suppose $p \in \mathbb{R}$ is of length $n> 0$ and let $0 \lambdaeq m < n$. Then \betaegin{enumerate} \item there exists an isomorphism $$\mathbb{R} / p \simeq \mathbb{R} / p^{\lambdaeq m} \times \mathbb{R} / p^{>m},$$ where $p^{\lambdaeq m}=p \restriction m+1= \lambdaangle \alphalpha^p_{-1}, (\alphalpha^p_0, f^p_0, A^p_n, F^p_0), \cdots, (\alphalpha^p_{m}, f^p_{m}, A^p_{m}, F^p_{m}) \ranglegle$ and $p^{>m}= \lambdaangle \alphalpha^p_m, (\alphalpha^p_{m+1}, f^p_{m+1}, A^p_{m+1}, F^p_{m+1}), \cdots, (\alphalpha^p_{n^p}, f^p_{n^p}, A^p_{n^p}, F^p_{n^p}) \ranglegle$, \item $(\mathbb{R}/ p^{\lambdaeq m}, \lambdaeq)$ satisfies the $(\alphalpha^p_m)^+$-c.c. and $(\mathbb{R} / p^{>m}, \lambdaeq^*)$ is $(\alphalpha^p_m)^{+3}$-closed. \end{enumerate} \item $\kappaappa$ remains an inaccessible cardinal in $V^*[K]$. \item Let $C$ and $\lambdaangle \lambdaambda_i: i<\kappaappa \ranglegle$ be as above. Then \betaegin{enumerate} \item for each $i<\kappaappa$, we have \[ 2^{\lambdaambda_i}=\lambdaambda_i^{++}+2^{\lambdaambda_i^+}=\lambdaambda_i^{+3}+2^{\lambdaambda_i^{++}}= \lambdaambda_i^{+4}+2^{\lambdaambda_i^{+3}}=\lambdaambda_i^{+5}=\lambdaambda_{i+1}+2^{\lambdaambda_i^{+4}}=\lambdaambda_{i+1}^+. \] \item If we force with $\mathrm{Col}(\alphaleph_0, < \lambdaambda_0) \times \mathrm{Add}(\alphaleph_0, \lambdaambda_0^+) \times \mathbb{R}$, then in the generic extension, $\kappaappa$ is the least inaccessible cardinal and for all $\lambdaambda < \kappaappa, 2^\lambdaambda=\lambdaambda^{++}$. \end{enumerate} \item $V^*[K]=V^*[\lambdaangle K_i: i<\kappaappa \ranglegle \ranglegle]$. \end{enumerate} \end{lemma} \iffalse We also need the following geometric characterization of the Radin generic filter. \betaegin{lemma} \lambdaabel{geometric} \end{lemma} \fi Now suppose that $L_0 \times L_1 \times K$ is $\mathrm{Col}(\alphaleph_0, < \lambdaambda_0) \times \mathrm{Add}(\alphaleph_0, \lambdaambda_0^+) \times \mathbb{R}$-generic over $V^*$ and set \[ W=V^*[L_0 \times L_1 \times K] = V[G_{\kappaappa+1}][L_0 \times L_1 \times K]. \] This completes our construction of the model $W$. Let us note that by Lemma \ref{rforcingprop}, \[ \text{Card}^W \cap [\alphaleph_0, \kappaappa)=\{\alphaleph_0\} \cup \{\lambdaambda_i, \lambdaambda^+_i, \lambdaambda^{++}_i, \lambdaambda^{+3}_i, \lambdaambda^{+4}_i: i<\kappaappa \}. \] In the next section we show that the model $W$ satisfies the conclusion of Theorem \ref{maintheorem}. \section{No uncountable universal graphs in W} \lambdaabel{s2} In this section we complete the proof of Theorem \ref{maintheorem}, by showing that in $W$, there is no universal graph of size $\lambdaambda$, where $\lambdaambda$ is an uncountable regular cardinal less than $\kappaappa$. \iffalse Before we state the proof, let us make the following observation. \betaegin{lemma} \lambdaabel{dividing} Suppose $i<\kappaappa$, and let $i=i_*+k$, where $i_*$ is a limit ordinal\footnote{$i_*=0$ is allowed} and $k<\omegaega$. Then $W$ is a generic extension of \[ V[G_{\lambdaambda_i}][\lambdaangle K_j: j \lambdaeq i_* \ranglegle][L_0 \times L_1][H_{\lambdaambda_i} \times \prod_{\ell \lambdaeq k} K_{i_*+\ell}] \] by a forcing notion which does not add any new bounded subsets to $$ \end{lemma} \fi The proof is divided into several cases: {\underline{\betaf Case 1. $\lambdaambda=\alphaleph_1$:}} We have $W=V^*[K, L_0][L_1]$, where $V^*[K, L_0]$ satisfies CH and $W$ is an extension of $V^*[K, L_0]$ by $L_1$ which is $\mathrm{Add}(\alphaleph_0, \lambdaambda_0^+)$-generic over $V^*[K, L_0]$. Thus by Lemma \ref{shtm}, there are no universal graphs of size $\alphaleph_1$. {\underline{\betaf Case 2. $\lambdaambda=\lambdaambda_{i+1}^{+n}$ for some $i < \kappaappa$ and some $n < 3$:}} Assume on the contrary that $T$ is a universal graph of size $\lambdaambda$ in $W$. Now we can write $W$ as \[ W=V^*[\lambdaangle K_j: j \lambdaeq i \ranglegle][\lambdaangle K_j: i < j <\kappaappa \ranglegle][L_0 \times L_1] \] and that by Lemma \ref{rforcingprop}(2), $\mathcal{P}^W(\lambdaambda) = \mathcal{P}^{V^*[\lambdaangle K_j: j \lambdaeq i \ranglegle][L_0 \times L_1]}(\lambdaambda)$. It immediately follow that $T \in V^*[\lambdaangle K_j: j \lambdaeq i \ranglegle][L_0 \times L_1].$ Now let $i_*$ be a limit ordinal, with possibly $i_*=0$, such that $i=i_*+k$, for some $k < \omegaega.$ So we have \[ V^*[\lambdaangle K_j: j \lambdaeq i \ranglegle][L_0 \times L_1]=V^*[\lambdaangle K_j: j < i_* \ranglegle][L_0 \times L_1][\prod_{\ell \lambdaeq k}K_{i_*+\ell}]. \] Now $V^*=V[G_{\lambdaambda_{i+1}}][X]$, where $X$ is generic over $V[G_{\lambdaambda_{i+1}}]$ for a forcing notion which is $\lambdaambda_{i+1}^*$-closed and $\lambdaambda_{i+1}^*$ is the least inaccessible above $\lambdaambda_{i+1}$. It immediately follow that $\mathcal{U} \restriction \lambdaambda_{i_*}+1 \in V[G_{\lambdaambda_{i+1}}]$ and hence by Lemma \ref{rforcingprop}(2), we may assume that $T$ belongs to $$V[G_{\lambdaambda_{i+1}}][\lambdaangle K_j: j \lambdaeq i \ranglegle][H_{\lambdaambda_{i+1}}][L_0 \times L_1]=V[G_{\lambdaambda_{i+1}}][\lambdaangle K_j: j \lambdaeq i \ranglegle][L_0 \times L_1][\prod_{\ell < 3, \ell \neq n }H^\ell_{\lambdaambda_{i+1}}][ H^n_{\lambdaambda_{i+1}}].$$ Now we have $V[G_{\lambdaambda_{i+1}}][\lambdaangle K_j: j \lambdaeq i \ranglegle][L_0 \times L_1][\prod_{\ell < 3, \ell \neq n }H^\ell_{\lambdaambda_{i+1}}]$ satisfies $2^{\lambdaambda}=\lambdaambda^{+}$ and $ V[G_{\lambdaambda_{i+1}}][\lambdaangle K_j: j \lambdaeq i \ranglegle][H_{\lambdaambda_{i+1}}][L_0 \times L_1]$ is a generic extension of it, by either $\mathrm{Sacks}(\lambdaambda, \lambdaambda^{++})$ if $\lambdaambda=\lambdaambda_{i+1}$ or $\mathrm{Add}(\lambdaambda, \lambdaambda^{++})$ if $\lambdaambda \in \{\lambdaambda_{i+1}^+, \lambdaambda_{i+1}^{++} \}$. This contradicts either Lemma \ref{shtm} or Lemma \ref{sdftm}. {\underline{\betaf Case 3. $\lambdaambda=\lambdaambda_{i}^{+n}$ for some $i < \kappaappa$ and some $2 < n < 5$:}} The argument is essentially the same as above. Suppose towards a contradiction that $T$ is a universal graph of size $\lambdaambda$ in $W$. As in Case 2, we will get that $T \in V[G_{\lambdaambda_{i}+1}][\lambdaangle K_j: j < i \ranglegle][K_{i}][L_0 \times L_1]$ and this model is an extension of $V[G_{\lambdaambda_{i}+1}][\lambdaangle K_j: j < i \ranglegle][K^C_{i} \times K^{\ell}_{i}][L_0 \times L_1]$, where $\ell \in \{3, 4\}, \ell \neq n$. This later model satisfies GCH at $\lambdaambda$, and as in Case 2 we get a contradiction to Lemma \ref{shtm}. {\underline{\betaf Case 4. $\lambdaambda=\lambdaambda_{i}^{+n}$ for some limit ordinal $i < \kappaappa$ and some $0< n < 3$:}} Arguing as above, if $T \in W$ is universal graph of size $\lambdaambda$, then $T \in V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][H^0_{\lambdaambda_i} \times H^1_{\lambdaambda_i} \times H^2_{\lambdaambda_i}][L_0 \times L_1]$ and this model extends $V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][H^0_{\lambdaambda_i} \times H^\ell_{\lambdaambda_i}][L_0 \times L_1]$, where $\ell \in \{1, 2\}, \ell \neq n$, by $\mathrm{Add}(\lambdaambda, \lambdaambda^{++})$, so again we get a contradiction using Lemma \ref{shtm}. {\underline{\betaf Case 5. $\lambdaambda=\lambdaambda^+_i$ for some limit ordinal $i< \kappaappa$:}} This is the hardest part of the proof. We follow very closely the argument given in \cite{friedman-thompson}. Suppose towards a contradiction that $T$ is a universal graph on $\lambdaambda$. As before, we can conclude that $T \in V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][L_0 \times L_1][H^0_{\lambdaambda_i} \times H^1_{\lambdaambda_i}]$. Let us identify $H^0_{\lambdaambda_i}$ with the $\kappaappa^{++}$-sequences $\lambdaangle A_\sigma: \sigma < \kappaappa^{++} \ranglegle$ of mutually Sacks-generic subsets of $\lambdaambda_i$. For notational simplicity, set \[ \widetilde{V}=V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][L_0 \times L_1][H^1_{\lambdaambda_i}], \] and for each $\sigma \lambdaeq \lambdaambda_i^{++}$ set \[ \widetilde{V}_\sigma=V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][L_0 \times L_1][H^1_{\lambdaambda_i}][\lambdaangle A_\alphalpha: \alphalpha < \sigma \ranglegle]. \] Thus $\widetilde{V}=\widetilde{V}_0$ and $\widetilde{V}_{\lambdaambda_i^{++}}=V[G_{\lambdaambda_i}][\lambdaangle K_j: j<i \ranglegle][L_0 \times L_1][H^0_{\lambdaambda_i} \times H^1_{\lambdaambda_i}].$ Also for each $\sigma < \lambdaambda_i^{++}$, the model $\widetilde{V}_\sigma$ satisfies $2^{\lambdaambda_i}=\lambdaambda_i^+.$ The proof of the next claim is essentially the same as in \cite{mohammadpour}. \betaegin{claim} \lambdaabel{restriction} Let $\mathcal{S}$ be the set of ordinals $\sigma < \lambdaambda_i^{++}$ such that: \betaegin{enumerate} \item The restriction of $\mathcal U$ to $\widetilde{V}_\sigma$, i.e., \[ \mathcal{U} \cap \widetilde{V}_\sigma = \lambdaangle \mathcal{U}(\alphalpha, \betaeta) \cap \widetilde{V}_\sigma: \alphalpha \lambdaeq \lambdaambda_i, \betaeta < o^{\mathcal U}(\alphalpha) \ranglegle \] is a coherent sequence of measures in $\widetilde{V}_\sigma$, \item For each $\sigma$ as above, $\alphalpha \lambdaeq \lambdaambda_i$ and $\betaeta < o^{\mathcal U}(\alphalpha)$ set $i_{\alphalpha, \betaeta}^\sigma: V^* \to M_{\alphalpha, \betaeta}^{*, \sigma} \simeq \mathrm{Ult}(V^*, \mathcal{U}(\alphalpha, \betaeta) \cap \widetilde{V}_\sigma)$. Then the sequence \[ \mathcal{K} \cap \widetilde{V}_\sigma= \lambdaangle K_{\alphalpha, \betaeta} \cap \widetilde{V}_\sigma : \alphalpha \lambdaeq \lambdaambda_i, \betaeta < o^{\mathcal U}(\alphalpha) \ranglegle \] ia a sequence of filters such that: \betaegin{enumerate} \item $K_{\alphalpha, \betaeta} \cap \widetilde{V}_\sigma$ is $\betaigg(\mathrm{Col}(\kappaappa^{+4}, < i_{\alphalpha, \betaeta}(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+3}, i_{\alphalpha, \betaeta}(\kappaappa)) \times \mathrm{Add}(\kappaappa^{+4}, i_{\alphalpha, \betaeta}(\kappaappa)^+)\betaigg)_{M^{*, \sigma}_{\alphalpha, \betaeta}}$-generic over $M^{*, \sigma}_{\alphalpha, \betaeta}$, \item the sequence is coherent in the sense that \[ \lambdaangle K_{\alphalpha, \tau} \cap \widetilde{V}_\sigma : \tau < \betaeta \ranglegle = [\betaar\alphalpha \mapsto \lambdaangle K_{\betaar\alphalpha, \tau} \cap \widetilde{V}_\sigma: \tau < \betaeta \ranglegle]_{\mathcal{U}(\alphalpha, \betaeta) \cap \widetilde{V}_\sigma}. \] \end{enumerate} \end{enumerate} Then $\mathcal{S}$ is a stationary subset of $\lambdaambda_i^{++}$. \end{claim} Now take $\sigma \in \mathcal{S}$ such that $T \in \widetilde{V}_\sigma$. Let us define the graph $T^* \in \widetilde{V}_{\lambdaambda_i^{++}}$ as follows: \betaegin{itemize} \item $T^*$ has the universe $\lambdaambda_i \cup Y,$ where $Y \subseteq \lambdaambda_i^{++}$ has size $\lambdaambda=\lambdaambda_i^+$, $\min(Y) > \sigma$ and $Y \cap \mathcal{S}$ is cofinal in $\sup(Y)$. \item the pair $(\gammaamma, \betaeta)$ is an edge in $T^*$ iff $\gammaamma \in Y$ and $\betaeta \in A_\gammaamma$ or symmetrically $\betaeta \in Y$ and $\gammaamma \in A_\betaeta.$ \end{itemize} Since $T$ is universal, we can find and embedding $f: T^* \to T$ with $f \in \widetilde{V}_{\lambdaambda_i^{++}}$. Take $X \subseteq \lambdaambda_i^{++}$ of size $\lambdaeq \lambdaambda_i$ such that $f \restriction \lambdaambda_i \in \widetilde{V}_\sigma[\lambdaangle A_\alphalpha: \alphalpha \in X \ranglegle]$. The same arguments as in \cite{friedman-thompson} show that there exists $\gammaamma \in Y \cap \mathcal{S} \setminus X$ such that $A_\gammaamma \notin \widetilde{V}_\sigma[\lambdaangle A_\alphalpha: \alphalpha \in X \ranglegle].$ By \cite[Lemma 2.6]{friedman-thompson} we can recover $A_\gammaamma$ using $f \restriction \lambdaambda_i$ and $T$, which implies $A_\gammaamma \in \widetilde{V}_\sigma[\lambdaangle A_\alphalpha: \alphalpha \in X \ranglegle],$ which is impossible. \betaegin{remark} In the model $W$, for every singular cardinal $\lambdaambda,$ we have $2^{<\lambdaambda}=\lambdaambda$ and hence by classical results in model theory, there is a universal graph of size $\lambdaambda$ in $W$. \end{remark} \betaegin{remark} In the model $W$, for each uncountable regular cardinal $\lambdaambda,$ the universality number for graphs on $\lambdaambda$ is $\lambdaambda^{++}$. \end{remark} \betaegin{thebibliography}{100} \betaibitem{cummings} Cummings, James A model in which GCH holds at successors but fails at limits. Trans. Amer. Math. Soc. 329 (1992), no. 1, 1-39. \betaibitem{friedman-honzik} Friedman, Sy-David; Honzik, Radek Easton's theorem and large cardinals. Ann. Pure Appl. Logic 154 (2008), no. 3, 191–208. \betaibitem{friedman-thompson} Friedman, S.-D.; Thompson, K. Negative universality results for graphs. Fund. Math. 210 (2010), no. 3, 269-283. \betaibitem{gitik} Gitik, Moti Prikry-type forcings. Handbook of set theory. Vols. 1, 2, 3, 1351–1447, Springer, Dordrecht, 2010. \betaibitem{golshani} Golshani, Mohammad (Weak) diamond can fail at the least inaccessible cardinal. Fund. Math. 256 (2022), no. 2, 113–129. \betaibitem{mohammadpour} Golshani, Mohammad; Mohammadpour, Rahman; The tree property at double successors of singular cardinals of uncountable cofinality, Ann. Pure Appl. Logic, 169 (2018), no. 2, 164--175. \betaibitem{kojman} Kojman, Menachem; Shelah, Saharon Nonexistence of universal orders in many cardinals. J. Symbolic Logic 57 (1992), no. 3, 875–891. \betaibitem{shelah1} Shelah, Saharon On universal graphs without instances of CH. Ann. Pure Appl. Logic 26 (1984), no. 1, 75–87. \betaibitem{shelah2} Shelah, Saharon Universal graphs without instances of CH: revisited. Israel J. Math. 70 (1990), no. 1, 69–81. \end{thebibliography} \end{document}
\begin{document} {\tau}itle[Condensation effect in Kingman's model] {Emergence of condensation in Kingman's model\\ of selection and mutation \centerline{\rm \sigmamall {\tau}oday}} \author[Steffen Dereich and Peter M\"orters ]{Steffen Dereich and Peter M\"orters} \begin{abstract} We describe the onset of condensation in the simple model for the balance between selection and mutation given by Kingman in terms of a scaling limit theorem. Loosely speaking, this shows that the wave moving towards genes of maximal fitness has the shape of a gamma distribution. We conjecture that this wave shape is a universal phenomenon that can also be found in a variety of more complex models, well beyond the genetics context, and provide some further evidence for this. \end{abstract} \maketitle {\tau}hispagestyle{empty} \sigmaection{Introduction and statement of the result} In~\cite{K78} Kingman proposes and analyses a simple model for the distribution of fitness in a population undergoing selection and mutation. The characterisitic feature of this model is that the fitness of genes before and after mutation is modelled as independent, the mutation having destroyed the biochemical `house of cards' built up by evolution. Kingman shows that in his model the distribution of the fitness in the population converges to a limiting distribution. There are two phases: When \emph{mutation} is favoured over selection, the limiting distribution is a skewed version of the fitness distribution of a mutant. But if \emph{selection} is favoured over mutation, a condensation effect occurs, and we find that a positive proportion of the population in late generations has fitness very near the optimal value, leading to the emergence of an atom at the maximal fitness value in the limiting distribution. Physicists have argued that this is akin to the effect of Bose-Einstein condensation, in which for a dilute gas of weakly interacting bosons at very low temperatures a fraction of the bosons occupy the lowest possible quantum state, see for example~\cite{BFF09}. In the present paper, we focus on the Kingman model and discuss the form of the fitness distribution for that part of the population that eventually form the atom in the limiting distribution. After stating our theorem and giving a proof we will draw comparisons to other models in a discussion section at the end of this paper. Mathematically, Kingman's model consists of a sequence of probability measures $(p_n)$ on the unit interval $[0,1]$ describing the distribution of fitness values in the $n$th generation of a population. The parameters of the model are a mutant fitness distribution $q$ on $[0,1]$ and some $0<\beta<1$ determining the relation between mutation and selection. If $p_n$ is the fitness distribution in the $n$th generation we denote by $$w_n=\int x \, p_n(dx)$$ the mean fitness and define $$p_{n+1}(dx)= (1-\beta) \, w_n^{-1} x \, p_n(dx) + \beta \, q(dx).$$ Loosely speaking, a proportion $1-\beta$ of the genes in the new generation are resampled from the existing population using their fitness as a selective criterion, and the rest have undergone mutation and are therefore sampled from the fitness distribution~$q$. We assume throughout that the mutant fitness distribution near its tip is stochastically larger than the fitness distribution in the inital population, in the sense that the moments $$m_n:=\int x^n \, p_0(dx) \quad \mbox{ and } \mu_n:=\int x^n \, q(dx)$$ satisfy $$\lim_{n{\tau}o\infty} \frac{m_n}{\mu_n}=0.$$ Under this (or, indeed, a weaker) assumption, Kingman showed that $(p_n)$ converges to a limit distribution $p(dx)$, which does not depend on~$p_0$. Moreover, $p$ is absolutely continuous with respect to $q$ if and only if $$\beta \int_0^1 \frac{q(dx)}{1-x} \geq 1.$$ Otherwise, \begin{equation}\label{gam} \gamma(\beta):=1-\beta \int_0^1 \frac{q(dx)}{1-x}>0, \end{equation} and this is the case of interest to us. In this case the limiting distribution $p(dx)$ still exists, but it has an atom at the optimal fitness~$1$, an effect called \emph{condensation}. The limiting distribution does not depend on~$p_0$ and equals $$p(dx) = \beta \frac{q(dx)}{1-x} + \gamma(\beta) \, \deltata_1 (dx).$$ Our main result describes the dynamics of condensation in terms of a scaling limit theorem which zooms into the neighbourhood of the maximal fitness value and shows the shape of the `wave' eventually forming the condensate, see~Figure~1. \begin{figure} \caption{\footnotesize Schematic picture of $p_n$. On the right the \emph{wave} \end{figure} \pagebreak[3] \begin{theorem} Suppose that the fitness distribution $q$ satisfies \begin{equation}\label{tailass} \lim_{h\downarrow 0} \frac{q(1-h,1)}{h^\alpha}=1, \end{equation} where $\alpha>1$, and that~\eqref{gam} holds. Then, for $x>0$, \begin{equation}\label{wave} \lim_{n\uparrow \infty} p_n(1-\frac{x}{n},1) = \frac{\gamma(\beta)}{\Gamma(\alpha)}\int_0^x y^{\alpha-1} e^{-y} \, dy. \end{equation} \end{theorem} We remark that the total mass in the `wave' moving towards the maximal fitness value agrees with the mass of the atom in the limiting distribution~$p(dx)$. Its rescaled shape is that of a \emph{gamma distribution} with shape parameter~$\alpha$. \sigmaection{Proof of Theorem~1} Note that $$\mu_n = \int x^n \, q(dx) \sigmaim \Gamma(\alpha+1)\, n^{-\alpha},$$ where the asymptotics is easily derived from~\eqref{tailass}, and note that \begin{align}\label{eq1304-1}\sigmaum_{n=0}^\infty \mu_n = \int_0^1 \frac {q(dx)}{1-x}=\frac1{\beta} \, (1-\gamma(\beta)). \end{align} Also define $$W_n:=w_1\cdots w_n.$$ Given the family $(W_n)_{n\geq 1}$ the fitness distributions can be obtained as \begin{equation}\label{sol} p_n(dx)=\sigmaum_{r=0}^{n-1} \frac{W_{n-r}}{W_n} \, (1-\beta)^r \beta \, x^r \, q(dx) + \frac{1}{W_n}\,(1-\beta)^n \, x^n \, p_0(dx), \end{equation} see \cite[(2.1)]{K78}. The main tool in the proof is therefore the following lemma. \begin{lemma}\label{thelemma} We have, as $n\uparrow \infty$, $$W_n \sigmaim c\, n^{-\alpha} (1-\beta)^{n-1},$$ where $$c=\frac{\beta}{\gamma(\beta)}\, \Gamma(\alpha+1)\, \sigmaum_{k=1}^\infty W_k \, (1-\beta)^{1-k}.$$ \end{lemma} \begin{proof} Integrating~\eqref{sol} we obtain~\cite[(2.3)]{K78} $$W_n=\sigmaum_{r=1}^{n-1} W_{n-r} \, (1-\beta)^{r-1} \beta \, \mu_r + (1-\beta)^{n-1} \, m_n.$$ Abbreviate $u_n:= W_n \, (1-\beta)^{1-n}.$ Then $u_n$ satisfies the \emph{renewal equation} $$u_n = \sigmafrac{\beta}{1-\beta}\, \sigmaum_{r=1}^{n-1} u_{n-r} \mu_r + m_n, \qquad \mbox{ for } n\geq 1.$$ Using (\ref{eq1304-1}), we obtain $\frac{\beta}{1-\beta}\sigmaum_{n=1}^\infty \mu_n=1-\frac{\gamma(\beta)}{1-\beta}<1$. Hence, the renewal theorem, see e.g.~\cite[XXXIII.10, Theorem~1]{F}, implies that $$\sigmaum_{n=1}^\infty u_n = \frac{\sigmaum_{n=1}^\infty m_n}{1- \frac{\beta}{1-\beta}\, \sigmaum_{n=1}^\infty \mu_n}=\frac{1-\beta}{\gamma(\beta)} \sigmaum_{n=1}^\infty m_n<\infty$$ where the finiteness follows since $m_n$ is bounded by a constant multiple of $\mu_n$ and $$ \sigmaum_{n=0}^\infty \mu_n =\int \frac {q(dx)}{1-x} <\infty. $$ Fix $\deltata>0$ and $0<\varepsilon<\eta<1$ and suppose $n$ is large enough such that $\eta n\leq n-1$ and $$\mu_r\leq (\Gamma(\alpha+1)+\deltata)\, r^{-\alpha} \quad \mbox{ for all } r\geq (1-\eta) n.$$ For an inductive argument suppose that $c_1, \ldots, c_r$ are chosen such that $u_r\leq c_r\, r^{-\alpha}$ for all $\varepsilon n \leq r \leq n-1$. Then one has for $r=1,\dots,n-1$ $$ u_r \mu_{n-r} \leq \begin{cases} (1-\varepsilon)^{-\alpha} (\Gamma(\alpha+1)+\deltata) n^{-\alpha} u_r &{\tau}ext{ if }r\leq \varepsilon n,\\ c_r\,(\Gamma(\alpha+1)+\deltata) r^{-\alpha} (n-r)^{-\alpha} &{\tau}ext{ if } \varepsilon n\leq r \leq \eta n,\\ c_{r}\, \eta^{-\alpha} n^{-\alpha} \mu_{n-r} &{\tau}ext{ if } \eta n\leq r, \end{cases} $$ so that \begin{align} u_n & \leq (1-\varepsilon)^{-\alpha} \, \sigmafrac{\beta}{1-\beta}\, (\Gamma(\alpha+1)+\deltata)\, \Bigl(\sigmaum_{r=1}^{\infty} u_r\Bigr) \, n^{-\alpha}\, \notag\\ & \phantom{space} + \sigmafrac{\beta}{1-\beta}\, (\Gamma(\alpha+1)+\deltata)\, \Bigl(\sigmafrac1n \sigmaum_{r=\lfloor \varepsilon n\rfloor +1}^{\lfloor \eta n\rfloor} c_r\, \bigl(\sigmafrac rn\bigr)^{-\alpha}\bigl(1-\sigmafrac rn\bigr)^{-\alpha} \Bigr) n^{1-2\alpha}\, \label{defcn}\\ & \phantom{space} + \sigmafrac{\beta}{1-\beta}\,\eta^{-\alpha} \, \Bigl(\sigmaum_{r=\lfloor \eta n\rfloor+1}^{n-1} c_r \, \mu_{n-r}\Bigr) n^{-\alpha}+ m_n =: c_n n^{-\alpha}.\notag \end{align} By induction this yields a sequence $(c_n)$ with $u_n \leq c_n\, n^{-\alpha}$ for all~$n\geq 1$. \sigmamallskip Using that $m_n n^{\alpha} {\tau}o 0$ by assumption, and that the term~\eqref{defcn} is bounded by a constant multiple~of $$n^{1-2\alpha}\, \int_{\varepsilon }^{\eta } dr\, r^{-\alpha}(1-r)^{-\alpha} \ll n^{-\alpha},$$ we see that $(c_n)$ converges to the unique solution~$c^*=c^*(\varepsilon, \deltata, \eta)$ of $$c^*=(1-\varepsilon)^{-\alpha} \, \sigmafrac{\beta}{1-\beta}\, (\Gamma(\alpha+1)+\deltata)\, \sigmaum_{r=1}^{\infty} u_r + c^*\,\eta^{-\alpha} \, \sigmafrac{\beta}{1-\beta}\,\sigmaum_{r=1}^\infty \mu_r.$$ Recalling that $\beta \sigmaum_{r=1}^\infty \mu_r = 1-\gamma(\beta)-\beta$, and letting $\varepsilon, \deltata\downarrow 0$ and $\eta\uparrow 1$ we see that $c^*(\varepsilon, \deltata, \eta)$ converges to $$c=\frac{\beta}{\gamma(\beta)}\, \Gamma(\alpha+1) \, \sigmaum_{k=1}^\infty u_k= \frac{\beta}{\gamma(\beta)}\, \Gamma(\alpha+1)\, \sigmaum_{k=1}^\infty W_k \, (1-\beta)^{1-k},$$ which yields the upper bound. The lower bound can be derived similarly. \end{proof} To complete the proof using the lemma, we look at~\eqref{sol} and get \begin{align*} p_n\big(1- \frac{x}{n},1 \big) & = \sigmaum_{r=0}^{n-1} \frac{W_{n-r}}{W_n} \, (1-\beta)^r \beta \, \int_{1-x/n}^1 y^r \, q(dy) + \frac{1}{W_n}\,(1-\beta)^n \, \int_{1-x/n}^1 y^n \, p_0(dy). \\ \end{align*} The second term vanishes asymptotically, as $$\frac{1}{W_n}\,(1-\beta)^n \, \int_{1-x/n}^1 y^n \, p_0(dy) \sigmaim (1-\beta)\, \frac{m_n}{c n^{-\alpha}} \frac{\int_{1-x/n}^1 y^n \, p_0(dy)}{\int_0^1 y^n \, p_0(dy)}{\tau}o 0,$$ using our assumption that $m_n/\mu_n{\tau}o 0$. The first term is asymptotically equivalent to $$n^\alpha \sigmaum_{r=0}^{n-1} W_{n-r} \, c^{-1}\,(1-\beta)^{1-n+r}\beta\, \int_{1-x/n}^1 y^{r} \, q(dy).$$ By chosing a large~$M$, the contribution coming from terms with $r\leq n-Mn^{1/\alpha}$ can be bounded by a constant multiple of $$(n-Mn^{1/\alpha}) \, \Big(\frac{n}{Mn^{1/\alpha}}\Big)^\alpha q\big(1-\sigmafrac{x}{n},1\big),$$ which is bounded by an arbitraily small constant. For the remaining terms we can now use that $$\begin{aligned} \int_{1-x/n}^1 y^{sn} \, q(dy) & \sigmaim \int_x^0 e^{-as} \, dq(1-\sigmafrac{a}n,1) \sigmaim \alpha\, n^{-\alpha} \int_0^x a^{\alpha-1} e^{-as} \, da, \end{aligned}$$ and a change of variables to obtain equivalence to $$\alpha\,\beta\,c^{-1} \Big(\sigmaum_{m=1}^{\infty} W_{m} \,(1-\beta)^{1-m}\Big) \, \int_{0}^x a^{\alpha-1} e^{-a}\, da,$$ and the result follows as, by Lemma~\ref{thelemma}, $$\alpha\,\beta\,c^{-1} \Big(\sigmaum_{m=1}^{\infty} W_{m} \,(1-\beta)^{1-m}\Big) = \frac{\gamma(\beta)}{\Gamma(\alpha)},$$ as required. \sigmaection{Discussion} Kingman's model is on the one hand one of the simplest models in which a condensation effect can be observed, on the other hand it is sufficiently rich to study the emergence of condensation as a dynamical phenomenon. The simplicity of the model allows a rigorous treatment with elementary means, but we believe that our calculation has far reaching consequences as a variety of much more complex models in quite diverse areas of science have similar features. Among the models we expect to share many features with Kingman's model are models of the physical phenomenon of Bose-Einstein condensation, of wealth condensation in macroeconomics, or the emergence of traffic jams. \pagebreak[3] Our \emph{main conjecture} is that in a large universality class of models in which effects similar to mutation and selection compete effectively on a bounded and continuous statespace, the `wave' moving towards the maximal state forming the condensate is of a Gamma shape. Random models which are suitable test cases for our universality claim arise, for example, in the study of random permutations with cycle weights. Here the probability of a permutation~$\sigmaigma$ in the symmetric group on $n$~elements is defined as $$\mathbb P_n(\sigmaigma)= \frac1{n!h_n} \prod_{j\geq 1} {\tau}heta_j^{R_j(\sigmaigma)},$$ where $R_j(\sigmaigma)$ is the number of cycles of length~$j$ in~$\sigmaigma$ and $h_n$ is a normalisation constant. For our investigation we focus on the case that ${\tau}heta_j\sigmaim j^{\gamma}$ for $\gamma\in\mathbb R$. We now discuss results of Betz, Ueltschi and Velenik~\cite{BUV11} and Ercolani and Ueltschi~\cite{EU11} in our context. Our interest is in the \emph{empirical cycle length distribution} which is the random measure on $[0,1]$ given by $$\mu_n= \frac1n \sigmaum_{i=1}^n \lambda_i\, \deltata_{\frac{\lambda_i}{n}},$$ where the integers $\lambda_1\geq \lambda_2 \geq \cdots$ are the ordered cycle lengths of a permutation chosen randomly according to $\mathbb P_n$. The asymptotic behaviour of $\mu_n$ shows three phases depending on the value of the parameter~$\gamma$, see Table~1 in~\cite{EU11}: \begin{itemize} \item If $\gamma<0$ large cycles are preferred and the empirical cycle length distribution concentrates asymptotically in the point $1$, \item if $\gamma=0$ there is no condensation and we have convergence to a beta distribution, \item if $\gamma>0$ we see a preference for short cycles and the empirical cycle length distribution concentrates asymptotically in the point $0$. \end{itemize} In the two phases in which see a condensation effect we have partial information on the shape of the wave, which is consistent with our universality claim. Let us first look at the case $\gamma>0$ when the empirical cycle length distribution concentrates in the left endpoint of our domain, i.e. the normalised cycle lengths vanish asymptotically. In this case Theorem~5.1 of~\cite{EU11} shows that, for $\alpha=\frac{\gamma}{\gamma+1}$, $$\lim_{n{\tau}o\infty} \mathbb E\big[\mu_n[0,\sigmafrac{x}{n^\alpha})\big]=\frac1{\Gamma(\gamma+1)}\, \int_0^x y^\gamma e^{-y}\, dy,$$ i.e.\ focusing on the left edge of the domain in the scale $1/n^\alpha$ we see a gamma distributed wave shape with parameter~$\gamma$, at least in the mean. It is a natural conjecture that this convergence holds not only in expectation, but also in probability, and establishing this fact is subject of an ongoing project. If $\gamma<0$ large cycles are preferred. Here the situation is slightly different because the wave sweeping towards the maximal normalised cyclelength is on the critical scale $1/n$ and this means that we expect that the discrete nature of $\mu_n$ is retained in the limit. More precisely, Theorem~3.2 of~\cite{BUV11} implies that $$\lim_{n{\tau}o\infty} \mathbb E\big[\mu_n[1-\sigmafrac{m}{n},1]\big]= \sigmafrac12\, \sigmaum_{n=0}^m e^{-c^*n}h_n,$$ where $c^*$ is a `Malthusian parameter' chosen such that $$\sigmaum_{n=1}^\infty e^{-c^*n}h_n=1.$$ We further note that $h_n\sigmaim C\,n^{\gamma-1}$ by \cite[(7.1)]{EU11} and so we are still able to recognise a discrete form of a gamma distribution with parameter $\gamma$ in this case. The most elaborate model in which we were able to test our hypothesis is a random network model with fitness. We now give an informal preview of forthcoming results of Dereich~\cite{D12}, which are motivated by a problem of Borgs et al.~ \cite{BCDR07}. A preferential attachment network model is a sequence of random graphs $(\mathcal G(n))_{n\in\mathbb N}$ that is built dynamically: one starts with a graph $\mathcal G(1)$ consisting of a single vertex $1$ and, in general, the graph $\mathcal G({n+1})$ is built by adding the vertex $n+1$ to the graph $\mathcal G(n)$ and by insertion of edges connecting the new vertex to the graph $\mathcal G(n)$ according to an attachment rule. Typically, the attachment rule rewards vertices that already have a high degre: in most cases the degree of a vertex has an affine influence on its attractiveness in the collection of new edges. In a preferential attachment model with fitness one additionally assigns each vertex an intrinsic fitness, a positive number, which has a linear impact on its attractiveness in the network formation.\sigmamallskip Let us be more precise about the variant of the network model to be considered in the rest of this paper. We consider a sequence of random \emph{directed} graphs $(\mathcal G(n))_{n\in\mathbb N}$ and denote by $$ \mathrm{imp}_{n}(m):= \mathrm{indegree}_{\mathcal G(n)}(m) +1 $$ the \emph{impact} of the vertex $m\in\{1,\dots,n\}$ in $\mathcal G(n)$. Further, let $F_1,F_2,\dots$ denote a sequence of independent $q$-distributed random variables modeling the fitness of the individual vertices $1,2,\ldots$. The attachment rule is as follows: given the graph $\mathcal G(n)$ and all fitnesses, link $n+1$ to each individual vertex $m\in\{1,\dots,n\}$ with an independent Poisson distributed number of edges with parameter $$ \frac 1{n\,Z_n}\, {F_m\, \mathrm{imp}_{n}}(m), $$ where $Z_n$ is a normalisation which depends only on $\mathcal G(n)$ and the fitnesses. Note that all links point from new to old vertices so that orientations can be recovered from the undirected set of edges. We consider two types of normalisations: \begin{enumerate} \item \emph{adaptive normalisation}: $Z_n=\frac 1{\lambda n} \sigmaum_{m=1}^n F_m \,\mathrm{imp}_{n}(m)$ for a parameter $\lambda>0$,\\[-2mm] \item \emph{deterministic normalisation}: $(Z_n)$ is a deterministic sequence. \end{enumerate} In the case of adaptive normalisation, the outdegree of $n+1$ is Poisson distributed with parameter~$\lambda$, even when conditioning on the graph $\mathcal G(n)$. Hence, the total number of edges is almost surely of order $\lambda n$ so that $\frac 1{n} \sigmaum_{m=1}^n \mathrm{imp}_{n} (m)$ converges almost surely to $\lambda+1$.\sigmamallskip The analogue of $p_n$ is the \emph{impact measure} given by $$\Xi_n= \frac 1{n} \sigmaum_{m=1}^n \mathrm{imp}_{n} (m)\, \deltata_{F_m}. $$ It measures the contribution of the vertices of a particular fitness to the total impact. \sigmamallskip As observed in \cite{BiBa01} and verified for a different variant of the model in \cite{BCDR07}, network models with fitness show a phase transition similar to Bose-Einstein condensation. The verification of this phase transition in the variant considered here is conducted in \cite{DO12}. \sigmamallskip For \emph{adaptive} normalisation two regimes can be observed \begin{enumerate} \item[{[}FGR{]}] $ \int \frac 1{1-x} \, q(d x)\geq 1+\lambda$: the \emph{fit-get-richer phase},\\[-2mm] \item[{[}BE{]}] $\int \frac 1{1-x} \, q(d x)<1+\lambda$: the \emph{Bose-Einstein phase} or \emph{innovation-pays-off phase}. \end{enumerate} In the fit-get-richer phase, the random measures $(\Xi_n)_{n\in\mathbb N}$ converge almost surely in the weak topology to the measure $\Xi$ on $(0,1]$ given by $$\Xi(d x)= \frac{ \lambda^*}{\lambda^*-x}\, q(d x),$$ where $\lambda^*\in[1,\infty)$ denotes the unique solution to $$ \int \frac {\lambda^*}{\lambda^*-x} \, q(d x)= 1+\lambda,$$ whereas, in the Bose-Einstein phase, one observes convergence to $$\Xi(d x)= \frac 1{1-x} \, q(d x) + \Bigl(1+\lambda-\int \frac 1{1-y} \, q(d y)\Bigr) \deltata_1(d x).$$ \sigmamallskip In order to analyse the emergence of the condensation phenomenon, we consider the preferential attachment model with \emph{deterministic} normalisation. We assume that $q$ is regularly varying at~$1$ with representation $$ q(1-h,1)= h^\alpha \,\ell(h), $$ where $\ell:[0,1]{\tau}o(0, \infty)$ is a slowly varying function. In order to replicate the Bose-Einstein phenomenon in the model with deterministic normalisation, one needs to choose $(Z_n)$ appropriately. For $1\leq m\leq n$, let $$ \Upsilon[ m,n] := \sigmaum_{k=\lfloor m\rfloor}^{\lfloor n\rfloor} \frac {1-Z_k}k. $$ The Bose-Einstein phenomenon can be replicated by choosing $(Z_n)$ such that $$ 1- Z_n\sigmaim \alpha (\log n)^{-1} $$ and such that the limit \begin{align}\label{eq1904-1} \gamma:=\lim_{n{\tau}o\infty} \frac {\alpha}{\alpha-1} \Gamma(\alpha) \,\frac{(\log n)^\alpha \cdot \log (\log n)^\alpha}{\ell((\log n)^{-1})} \,\exp\{ \Upsilon[\log n, n]\} \end{align} exists. We stress that such a normalisation can be found for various fitness distributions $q$ and we refer the reader to the article~\cite{D12} for the details. \begin{theorem}Under the above assumptions, one has, for $x>0$, $$ \lim_{n{\tau}o\infty} \Xi_n\Bigl(1-\frac x{\log n},1\Bigr) = \frac{\gamma}{\Gamma(\alpha)}\int_0^x y^{\alpha-1} e^{-y} \, dy, {\tau}ext{ in probability}. $$ For any measurable set $A\sigmaubset [0,1]$ with $1\not\in \partial A$, one has $$ \lim_{n{\tau}o\infty} \Xi_n(A) = \Xi(A), {\tau}ext{ in probability}, $$ for the measure $\Xi$ on $[0,1]$ given by $$ \Xi(dx) = \frac 1{1-x} \,q(dx) + \gamma \,\deltata_1(dx). $$ \end{theorem} \begin{rem} {\rm In most cases one cannot give an explicit representation for a normalisation $(Z_n)$ satisfying~(\ref{eq1904-1}). On first sight, this might be suprising since the $(Z_n)$ play a r\^ole analogous to $(W_n)$ in the Kingman model where the analysis is feasible. The difference of both models comes from the stochastic nature of the network model. In order to analyse the network model one could start to work with expectations resulting in a mean field model similar to the Kingman model. However, the expectations for $\Xi_n$ are dominated by configurations that are not seen in typical realisations: vertices of particular high fitness that are born very early contribute most although being not present typically. To compensate this the normalisations in the network model have to be slightly smaller than a mean field model would suggest. Vertices of particularly high fitness have an impact only with a delay. This causes the $\Upsilon[\log n,n]$ term in (\ref{eq1904-1}) and makes explicit representations for $(Z_n)$ in many cases unfeasible.} \end{rem} \sigmamallskip We conclude our discussion with the remark that the case of \emph{unbounded fitness distribution} is also of considerable interest. In this case Park and Krug~\cite{PK08} have studied the analogue of Kingman's model and (in a particular case) observed emergence of a travelling wave of Gaussian shape. They also conjecture that this behaviour is of universal nature. \ \\[-1mm] {\bf Acknowledgments:} The second author acknowledges useful discussions with Daniel Ueltschi at the Oberwolfach workshop \emph{Interplay of analysis and probability in physics}, January 2012. We would like to thank Marcel Ortgiese for agreeing to include a preview of~\cite{DO12} in our discussion. \end{document}
\begin{document} \rhead{\mathfrak{t}hepage} \lhead{\author} \mathfrak{t}hispagestyle{empty} \raggedbottom \pagenumbering{arabic} \mathfrak{s}etcounter{section}{0} \mathfrak{t}itle{Characterizing Dehn surgeries on links via trisections} \date{\mathfrak{t}oday} \author{Jeffrey Meier} \address{Department of Mathematics, Indiana University, Bloomington, IN 47408} \email{[email protected]} \urladdr{http://pages.iu.edu/~jlmeier} \author{Alexander Zupan} \address{Department of Mathematics, University of Nebraska-Lincoln, Lincoln, NE 68588} \email{[email protected]} \urladdr{http://www.math.unl.edu/~azupan2} \begin{abstract} We summarize and expand known connections between the study of Dehn surgery on links and the study of trisections of closed, smooth 4-manifolds. In addition, we describe how the potential counterexamples to the Generalized Property R Conjecture given by Gompf, Scharlemann, and Thompson yield genus four trisections of the standard four-sphere that are unlikely to be standard. Finally, we give an analog of the Casson-Gordon Rectangle Condition for trisections that can be used to obstruct reducibility of a given trisection. \end{abstract} \title{Characterizing Dehn surgeries on links via trisections} \mathfrak{s}ection{Outline}\label{sec:outline} The purpose of this note is to use both new and existing results to make clear the significant role of the trisection theory of smooth 4-manifolds in the classification of Dehn surgeries on links. The theory of Dehn surgery on knots has been thoroughly developed over the past forty years. In general, this research has focused on two major questions: First, which manifolds can be obtained by a surgery on a knot in a given manifold $Y$? Second, given a pair of manifolds $Y$ and $Y'$, for which knots $K \mathfrak{s}ubset Y$ does there exist a surgery to $Y'$? These two questions have contributed to the growth of powerful tools in low-dimensional topology, such as sutured manifold theory, the notion of thin position, and Heegaard Floer homology. For example, over the last 15 years, the Heegaard Floer homology theories of Ozsv\'ath and Szab\'o have dramatically deepened our collective understanding of Dehn surgeries on knots (see, for instance,~\cite{Ozsvath-Szabo_Lectures_2006}). If we replace the word ``knot" with ``link" in the preceding paragraph, the situation changes significantly; for example, the classical Lickorish-Wallace Theorem asserts that every 3-manifold $Y$ can be obtained by surgery on a link in $S^3$~\cite{Lickorish_A-representation_1962,Wallace_Modifications_1960}. For the second general question, concerning which links in a given 3-manifold $Y$ yield a surgery to another given 3-manifold $Y'$, we observe the following basic fact: Two framed links that are handleslide equivalent surger to the same 3-manifold~\cite{Kirby_A-calculus_1978}. Thus, surgery classification of links is necessarily considered up to handleslide equivalence, and tools which rely on the topology of a knot exterior $S^3 \mathfrak{s}etminus \betau(K)$ are not nearly as useful, since handleslides can significantly alter this topology. Understanding link surgeries in particular 3-manifolds is intimately connected to smooth 4-manifold topology. Every smooth 4-manifold $X$ can be described by a handle decomposition, characterized by the attaching link $L$ for the 2-handles, which is contained in the boundary of the union of the 0- and 1-handles. In other words, $X$ is associated to a framed link $L \mathfrak{s}ubset \#^k(S^1 \mathfrak{t}imes S^2)$ such that Dehn surgery on $L$ yields $\#^{k'} (S^1 \mathfrak{t}imes S^2)$. Conversely, such a link $L$ (which we will call \emptyseth{admissible}) describes a handle decomposition of a smooth 4-manifold, which we denote $X_L$. Thus, classifying all such surgeries would be equivalent to classifying all smooth 4-manifolds. Clearly, this is an insurmountable task, but to make the problem more tractable, we consider various restrictions placed on the parameters $k$, $k'$, and $n$, where $n$ represents the number of components of the link $L$. For example, let $k = k' = 0$. In the case that $n=1$, Gordon and Luecke proved that knots are determined by their complements, and thus the only knot in $S^3$ that admits an integral $S^3$ surgery (a \emptyseth{cosmetic surgery}) is a $\pm 1$-framed unknot~\cite{Gordon-Luecke_Complements_1989}. In this paper, we will describe the proof of the following theorem from~\cite{Meier-Zupan_Genus-two_2017}. \begin{theorem}\label{main1} If $L \mathfrak{s}ubset S^3$ is a two-component link with tunnel number one with an integral surgery to $S^3$, then $L$ is handleslide equivalent to a 0-framed Hopf link or $\pm 1$-framed unlink. \end{theorem} Another significant case occurs when $k = 0$ and $k' = n$. In other words, we wish to understand $n$-component links in $S^3$ with surgeries to $\#^n (S^1 \mathfrak{t}imes S^2)$. We call such a link $L$ an \emptyseth{R-link}, noting that R-links correspond precisely to the collection of geometrically simply-connected homotopy 4-spheres, i.e. homotopy 4-spheres built without 1-handles. The Generalized Property R Conjecture (GPRC), Kirby Problem 1.82~\cite{Kirby_Problems_1978}, contends that every R-link is handleslide equivalent to a 0-framed unlink. The conjecture is known to be true in the case $n=1$ via Gabai's proof of Property R~\cite{Gabai_FoliationsIII_1987}. In~\cite{Meier-Schirmer-Zupan_Classification_2016}, the authors, in collaboration with Trent Schirmer, proved a stable version of the GPRC for a class of links. \begin{theorem}\label{main2} If $L \mathfrak{s}ubset S^3$ is an $n$-component R-link with tunnel number $n$, then the disjoint union of $L$ with a 0-framed unknot is handleslide equivalent to a 0-framed unlink. \end{theorem} As foreshadowed above, the proofs of these theorems are 4-dimensional in nature, utilizing a prominent new tool: \emptyseth{trisections} of smooth 4-manifolds. A trisection is a decomposition of a 4-manifold $X$ into three simple pieces, a 4-dimensional version of a 3-dimensional Heegaard splitting. Elegantly connecting the two theories, Gay and Kirby proved that every smooth 4-manifold admits a trisection, and every pair of trisections for a given 4-manifold have a common stabilization~\cite{Gay-Kirby_Trisecting_2016}, mirroring the Reidemeister-Singer Theorem~\cite{Reidemeister_Zur-dreidimensionalen_1933, Singer_Three-dimensional_1933} in dimension three. Unlike Heegaard splittings, however, the stabilization operation of Gay and Kirby can be broken into three separate operations, called \emptyseth{unbalanced stabilizations} of types 1, 2, and~3~\cite{Meier-Schirmer-Zupan_Classification_2016}. A trisection is said to be \emptyseth{standard} if it is an unbalanced stabilization of the genus zero trisection of $S^4$, and thus, every trisection of $S^4$ becomes standard after some number of Gay-Kirby stabilizations. In Section~\ref{sec:R-stab}, we describe a process by which an R-link $L$ paired with an \emptyseth{admissible} Heegaard surface $\Sigma$ for its exterior is converted to a trisection $\mathcal T(L,\Sigma)$ of the 4-manifold $X_L$. The new main result of this paper is a technical theorem that connects R-links to properties of these trisections. The terms \emptyseth{$\{2\}$--standard and $\{2,3\}$--standard} refer to trisections that become standard after allowing restricted types of unbalanced stabilizations; we will postpone the rigorous definitions for now. \begin{theorem}\label{thm:equiv} Suppose $L$ is an R-link and $\Sigma$ is any admissible surface for $L$. {\begin{enumerate} \item If $L$ satisfies the GPRC, then $\mathcal T(L,\Sigma)$ is $\{2\}$--standard. \item The link $L$ satisfies the Stable GPRC if and only if $\mathcal T(L,\Sigma)$ is $\{2,3\}$--standard. \end{enumerate}} \end{theorem} In Section~\ref{sec:GST}, we analyze examples of Gompf-Schlarlemann-Thompson, the most prominent possible counterexamples to the GPRC. The first step in a program to disprove the GPRC via Theorem~\ref{thm:equiv} is to find low-genus admissible surfaces for these links, along with diagrams for their induced trisections. We outline this process; extensions of Section~\ref{sec:GST} will appear in forthcoming work~\cite{Meier-Zupan_Fibered_}. In Section~\ref{sec:rect}, we introduce an analog of the Casson-Gordon Rectangle Condition~\cite{Casson-Gordon_Reducing_1987} for trisection diagrams, giving a sufficient condition for a trisection diagram to correspond to an irreducible trisection. \mathfrak{s}ubsection*{Acknowledgements}\ The first author is supported by NSF grants DMS-1400543 and DMS-1664540, and the second author is supported by NSF grant DMS-1664578 and NSF-EPSCoR grant OIA-1557417. \mathfrak{s}ection{Trisections and admissible links}\label{sec:trisections} All manifolds are connected and orientable, unless otherwise stated. We will let $\betau( \cdot )$ refer to an open regular neighborhood in an ambient manifold that should be clear from context. The \emptyseth{tunnel number} of a link $L \mathfrak{s}ubset Y$ is the cardinality of the smallest collection of arcs $a$ with the property that $Y \mathfrak{s}etminus \betau(L \cup a)$ is a handlebody. In this case, $\partial \betau(L \cup a)$ is a \emptyseth{Heegaard surface} cutting $Y \mathfrak{s}etminus \betau(L)$ into a handlebody and a compression body. A \emptyseth{framed} link refers to a link with an integer framing on each component. Let $L$ be a framed link in a 3-manifold $Y$, and let $a$ be a framed arc connecting two distinct components of $L$, call them $L_1$ and $L_2$. The framings of $L_1$, $L_2$ and $a$ induce an embedded surface $S \mathfrak{s}ubset Y$, homeomorphic to a pair of pants, such that $L_1 \cup L_2 \cup a$ is a core of $S$. Note that $S$ has three boundary components, two of which are isotopic to $L_1$ and $L_2$. Let $L_3$ denote the third boundary component, with framing induced by $S$. If $L'$ is the framed link $(L \mathfrak{s}etminus L_1) \cup L_3$, we say that $L'$ is obtained from $L$ by a \emptyseth{handleslide} of $L_1$ over $L_2$ along $a$. If two links are related by a finite sequence of handleslides, we say they are \emptyseth{handleslide equivalent}. It is well-known that Dehn surgeries on handleslide equivalent framed links yield homeomorphic 3-manifolds~\cite{Kirby_A-calculus_1978}. Recall that an R-link is an $n$-component link in $S^3$ with a Dehn surgery to the manifold $\#^n(S^1 \mathfrak{t}imes S^2)$, which we henceforth denote by $Y_n$. Let $U_n$ denote the $n$-component zero-framed unlink in $S^3$. If an R-link $L$ is handleslide equivalent to $U_n$, we say that $L$ has \emptyseth{Property R}. If the split union $L \mathfrak{s}qcup U_r$ is handleslide equivalent to $U_m$ for some integers $r$ and $m$, we say that $L$ has \emptyseth{Stable Property R}. The following conjectures are well-known; the first is Kirby Problem 1.82~\cite{Kirby_Problems_1978}. \begin{GPRC} Every R-link has Property R. \end{GPRC} \begin{SGPRC} Every R-link has Stable Property R. \end{SGPRC} In this section, we explore the relationship between R-links (and a more general family we call admissible links) and trisections of the smooth 4-manifolds that can be constructed from these links. Let $X$ be a smooth, orientable, closed 4-manifold. A \emptyseth{$(g;k_1,k_2,k_3)$--trisection} $\mathcal T$ of $X$ is a decomposition $X = X_1\cup X_2\cup X_3$ such that \begin{enumerate} \item Each $X_i$ is a four-dimensional 1--handlebody, $\betaatural^{k_i}(S^1\mathfrak{t}imes B^3)$; \item If $i\betaot=j$, then $H_{ij} = X_i\cap X_j$ is a three-dimensional handlebody, $\betaatural^g(S^1\mathfrak{t}imes D^2)$; and \item The common intersection $\Sigma = X_1\cap X_2\cap X_3$ is a closed genus $g$ surface. \end{enumerate} The surface $\Sigma$ is called the \emptyseth{trisection surface}, and the parameter $g$ is called the \emptyseth{genus} of the trisection. The trisection $\mathcal T$ is called \emptyseth{balanced} if $k_1=k_2=k_3=k$, in which case it is called a \emptyseth{$(g;k)$--trisection}; otherwise, it is called \emptyseth{unbalanced}. We call the union $H_{12}\cup H_{23}\cup H_{31}$ the \emptyseth{spine} of the trisection. In addition, we observe that $\partial X_i = Y_{k_i} = H_{ij} \cup_\Sigma H_{li}$ is a genus $g$ Heegaard splitting. Because there is a unique way to cap off $Y_{k_i}$ with $\betaatural^{k_i}(S^1\mathfrak{t}imes B^3)$~\cite{Laudenbach-Poenaru_A-note_1972,Montesinos_Heegaard_1979}, every trisection is uniquely determined by its spine. Like Heegaard splittings, trisections can be encoded with diagrams. A \emptyseth{cut system} for a genus $g$ surface $\Sigma$ is a collection of $g$ pairwise disjoint simple closed curves that cut $\Sigma$ into a $2g$-punctured sphere. A cut system $\delta$ is said to \emptyseth{define a handlebody} $H_{\delta}$ if each curve in $\delta$ bounds a disk in $H_{\delta}$. A triple $(\alpha,\beta,\gammaamma)$ of cut systems is called a \emptyseth{$(g;k_1,k_2,k_3)$--trisection diagram} for $\mathcal T$ if $\alpha$, $\beta$, and $\gammaamma$ define the components $H_{\alpha}, H_{\beta}$, and $H_{\gammaamma}$ of the spine of $\mathcal T$. We set the convention that $H_{\alpha} = X_3\cap X_1$, $H_{\beta} = X_1\cap X_2$, and $H_{\gammaamma} = X_2\cap X_3$. The careful reader will note that this convention differs slightly from~\cite{Meier-Schirmer-Zupan_Classification_2016}. With these conventions, $(\alpha,\beta)$, $(\beta,\gammaamma)$, and $(\gammaamma,\alpha)$ are Heegaard diagrams for $Y_{k_1}$, $Y_{k_2}$, and $Y_{k_3}$, respectively. In~\cite{Gay-Kirby_Trisecting_2016}, Gay and Kirby prove that every smooth 4-manifold admits a trisection, and trisection diagrams, modulo handle slides within the three collections of curves, are in one-to-one correspondence with trisections. \begin{examples}\label{exs:trisections} Trisections with genus at most two are well-understood. See Figure~\ref{fig:Diags}. \begin{enumerate} \item There is a unique genus zero trisection; the $(0,0)$--trisection describing $S^4$. \item There are exactly six genus one trisections. Both $\mathbb{C}P^2$ and $\overline{\mathbb{C}P}^2$ admit $(1;0)$--trisections; $S^1\mathfrak{t}imes S^3$ admits a $(1;1)$--trisection; and $S^4$ admits three unbalanced genus one trisections. \item There is a unique irreducible (defined below) genus two trisection~\cite{Meier-Schirmer-Zupan_Classification_2016,Meier-Zupan_Genus-two_2017}, which describes $S^2\mathfrak{t}imes S^2$. \end{enumerate} \end{examples} Given trisections $\mathcal T$ and $\mathcal T'$ for 4-manifolds $X$ and $X'$, we can obtain a trisection for $X\#X'$ by removing a neighborhood of a point in each trisection surface and gluing pairs of components of $\mathcal T$ and $\mathcal T'$ along the boundary of this neighborhood. The resulting trisection is uniquely determined in this manner; we denote it by $\mathcal T\#\mathcal T'$. A trisection $\mathcal T$ is called \emptyseth{reducible} if $\mathcal T = \mathcal T'\#\mathcal T''$, where neither $\mathcal T'$ nor $\mathcal T''$ is the genus zero trisection; otherwise, it is called \emptyseth{irreducible}. Equivalently, $\mathcal T$ is reducible precisely when there exists a curve $\delta$ in $\Sigma$ that bounds compressing disks in $H_{\alpha}$, $H_{\beta}$, and $H_{\gamma}$. Such a curve $\delta$ represents the intersection of a decomposing 3-sphere with the trisection surface. \begin{figure} \caption{Low-genus trisection diagrams. Top, from left to right: the genus one diagrams $\mathcal S_1$, $\mathcal S_2$, and $\mathcal S_3$ for $S^4$. Bottom, from left to right: the genus one diagrams for $\mathbb{C} \label{fig:Diags} \end{figure} In dimension three, stabilization of a Heegaard surface may be viewed as taking the connected sum with the genus one splitting of $S^3$, and a similar structure exists for trisections. Let $\mathcal S_i$ denote the unique genus one trisection of $S^4$ satisfying $k_i=1$. Diagrams for these three trisections are shown in Figure~\ref{fig:Diags}. A trisection $\mathcal T$ is called \emptyseth{$i$--stabilized} if $\mathcal T = \mathcal T'\#\mathcal S_i$, and is simply called \emptyseth{stabilized} if it is $i$--stabilized for some $i=1,2,3$. Two trisections $\mathcal T'$ and $\mathcal T''$ are called \emptyseth{stably equivalent} if there is a trisection $\mathcal T$ that is a stabilization of both $\mathcal T'$ and $\mathcal T''$. Gay and Kirby proved that any two trisections of a fixed 4-manifold are stably equivalent~\cite{Gay-Kirby_Trisecting_2016}. We say that a trisection $\mathcal T$ is \emptyseth{standard} if $\mathcal T$ can be expressed as the connected sum of the trisections listed in Examples~\ref{exs:trisections}. Theorems in~\cite{Meier-Schirmer-Zupan_Classification_2016,Meier-Zupan_Genus-two_2017} classify trisections of genus two. \begin{theorem}\label{thm:g2standard} Every trisection $\mathcal T$ with genus $g =2$ is standard. \end{theorem} Below, we see how this theorem implies Theorem~\ref{main1}, and for this purpose, we turn our attention to surgery on links. \mathfrak{s}ubsection{Admissible links and surfaces}\label{subsec:admissible}\ Recall that $Y_k$ denotes $\#^k(S^1 \mathfrak{t}imes S^2)$, and let $L$ be a framed $n$--component link in $Y_k$ such that Dehn surgery on $L$ yields $Y_{k'}$. We call such a link \emptyseth{admissible}. If $L$ is an admissible link, $L$ describes a closed 4-manifold $X_L$ with a handle decomposition with $k$ 1--handles, $n$ 2--handles, and $k'$ 3--handles. An \emptyseth{admissible} Heegaard surface $\Sigma$ for $L$ is a Heegaard surface cutting $Y_k$ into two handlebodies $H$ and $H'$, such that a core of $H$ contains $L$. As such, $C = H \mathfrak{s}etminus \betau(L)$ is a compression body and $\Sigma$ may be viewed as a Heegaard surface for the link exterior $E(L) = Y_k \mathfrak{s}etminus \betau(L)$. Let $H_L$ be the handlebody that results from Dehn filling $C$ (or performing Dehn surgery on $L$ in $H$) along the framing of the link $L$. An \emptyseth{admissible pair} consists of an admissible link together with an admissible Heegaard surface. For completeness, we will also allow the empty link, $L = \emptyset$. An admissible surface $\Sigma$ for the empty link is a (standard) genus $g$ Heegaard surface for $Y_k$. A genus $g$ Heegaard diagram $(\alpha,\beta)$ for $Y_k$ is called \emptyseth{standard} if $\alpha \cap \beta$ contains $k$ curves, and the remaining $g-k$ curves occur in pairs that intersect once and are disjoint from other pairs. A trisection diagram is called \emptyseth{standard} if each pair is a standard Heegaard diagram. \begin{lemma}\label{lem:constr} Let $L$ be an admissible $n$-component link in $Y_k$. Every admissible pair $(L,\Sigma)$ gives rise to a trisection $\mathcal T(L,\Sigma)$ with spine $H' \cup H \cup H_L$. If $g(\Sigma) = g$, then $\mathcal T(L,\Sigma)$ is a $(g;k,g-n,k')$-trisection. Moreover, there is a trisection diagram $(\alpha,\beta,\gamma)$ for $\mathcal T(L,\Sigma)$ such that \begin{enumerate} \item $H_{\alpha} = H'$, $H_{\beta} = H$, and $H_{\gamma} = H_L$; \item $L$ is a sublink of $\gamma$, where $\gamma$ is viewed as a link framed by $\Sigma$ in $Y_k = H_{\alpha} \cup H_{\beta}$; and \item $(\beta,\gamma)$ is a standard diagram for $Y_{g-n}$, where $\beta \cap \gamma = \gamma \mathfrak{s}etminus L$. \end{enumerate} \end{lemma} \begin{proof} This is proved (in slightly different formats) for $L \betaeq \emptyset$ in both~\cite{Gay-Kirby_Trisecting_2016} and~\cite{Meier-Schirmer-Zupan_Classification_2016}. If $L = \emptyset$, then it follows easily that $X_L$ has a handle decomposition without 2-handles, $H = H_L$, and $H' \cup H \cup H_L$ is the spine for the $(g;k,g,k)$-trisection $\mathcal T(L,\Sigma)$ of $X_L$. In this case, there is a diagram such that $\beta = \gamma$, the standard genus $g$ diagram of $Y_g$. \end{proof} This machinery is enough to prove Theorem~\ref{main1}, classifying cosmetic surgeries on tunnel number one links in $S^3$. Note that the conventions $H_{\alpha} = H'$, $H_{\beta} = H$, and $H_{\gamma} = H_L$ agree with our earlier conventions identifying the union of the 0--handle and the 1--handles with $X_1$, the trace of the Dehn surgery on $H_{\beta}$ along $L$ with $X_2$, and the union of the 3--handles and the 4--handle with $X_3$. \begin{proof}[Proof of Theorem~\ref{main1}] Suppose $L \mathfrak{s}ubset S^3$ is a tunnel number one link with an integral Dehn surgery to $S^3$. Then there exists an admissible surface $\Sigma \mathfrak{s}ubset S^3$ and a genus two trisection $\mathcal T(L,\Sigma)$ with a diagram $(\alpha,\beta,\gamma)$, where $H_L = H_{\gamma}$. By Lemma~\ref{lem:constr}, $\mathcal T(L,\Sigma)$ is a $(2,0)$-trisection, the two curves in $\gamma$ are isotopic to the link $L$ in $S^3 = H_{\alpha} \cup H_{\beta}$, and the surface framing of $\gamma$ in $\Sigma$ is the framing of $L$. By Theorem~\ref{thm:g2standard}, the trisection $\mathcal T(L,\Sigma)$ of $X_L$ is standard, and $(\alpha,\beta,\gamma)$ is handleslide equivalent to a standard diagram $(\alpha',\beta',\gamma')$. Since $\mathcal T(L,\Sigma)$ is $(2,0)$-trisection, $X_L$ is diffeomorphic to either $S^2 \mathfrak{t}imes S^2$ or $\pm \mathbb{C}P \# \pm \mathbb{C}P$. In the first case, $\gamma$ is handleslide equivalent to $\gamma'$, which is a zero-framed Hopf link in $S^3 = H_{\alpha} \cup H_{\beta}$. In the second case, $\gamma$ is handleslide equivalent to $\gamma'$, a 2-component unlink with framings $\pm 1$ and $\pm 1$, completing the proof. \end{proof} We now turn our attention to R-links. Note that if $L$ is an R-link, then the smooth 4-manifold $X_L$ has a handle decomposition with no 1-handles, $n$ 2-handles, and $n$ 3-handles; thus $X_L$ is a simply connected 4-manifold with $\chi(X_L) = 2$, so that $X_L$ is a homotopy $S^4$. We describe an immediate connection between Stable Property R and trisections in the next lemma. \begin{lemma}\label{lem:stdslide} Suppose $L$ is an R-link with admissible surface $\Sigma$ and $\mathcal T(L,\Sigma)$ is a standard trisection of $S^4$. Then $L$ has Stable Property R. \end{lemma} \begin{proof} By Lemma~\ref{lem:constr}, the trisection $\mathcal T(L,\Sigma)$ has a diagram $(\alpha,\beta,\gamma)$ such that $(\beta,\gamma)$ is the standard Heegaard diagram for $Y_{g-n}$. Viewing $\gamma$ as a $g$-component link in $S^3 = H_{\alpha} \cup H_{\beta}$, we have that $(g-n)$ curves in $\gamma$ bounds disks in $H_{\beta}$, while the remaining $n$ curves are isotopic to $L$ (and are disjoint from the $(g-n)$ disks). Thus, as a link in $S^3$, we have $\gamma = L \mathfrak{s}qcup U_{g-n}$. In addition, the trisection $\mathcal T(L,\Sigma)$ is a standard $(g;0,g-n,n)$-trisection of $S^4$ by hypothesis. As such, it must be a connected sum of $g-n$ copies of $\mathcal S_2$ and $n$ copies of $\mathcal S_3$, and it has a standard diagram, $(\alpha',\beta',\gamma')$, where $g-n$ curves in $\gamma'$ are also curves in $\beta'$, and the remaining $n$ curves are also curves in $\alpha'$. Thus, in $S^3 = H_{\alpha'} \cup H_{\beta'}$, the curves $\gamma'$ comprise a $g$-component unlink, with surface framing equal to the zero framing on each component. Since $(\alpha,\beta,\gamma)$ and $(\alpha',\beta',\gamma')$ are trisection diagrams for the same trisection, we have that $\gamma$ is handleslide equivalent to $\gamma'$ via slides contained in $\Sigma$. Thus, $\gamma$ and $\gamma'$ are handleslide equivalent links in $S^3$. We conclude that $L$ has Stable Property R, as desired. \end{proof} Theorem~\ref{main2} can be quickly proved using this lemma and the following result from~\cite{Meier-Schirmer-Zupan_Classification_2016} as its main input. \begin{theorem}\label{thm:msz} Every $(g;0,1,g-1)$-trisection is a standard trisection of $S^4$. \end{theorem} \begin{proof}[Proof of Theorem~\ref{main2}] Suppose $L \mathfrak{s}ubset S^3$ is an $n$-component link with tunnel $n$ with a Dehn surgery to $\#^n(S^1 \mathfrak{t}imes S^2)$. Then by Lemma~\ref{lem:constr} there exists an admissible surface $\Sigma \mathfrak{s}ubset S^3$ and an $(n+1;0,1,n)$-trisection $\mathcal T(L,\Sigma)$. By Theorem~\ref{thm:msz}, the trisection $\mathcal T(L,\Sigma)$ is standard, and by Lemma~\ref{lem:stdslide}, $L$ has Stable Property R. In fact, the proof of Lemma~\ref{lem:stdslide} reveals that $L \mathfrak{s}qcup U_1$ has Property R, as desired. \end{proof} \mathfrak{s}ection{R-links and stabilizations}\label{sec:R-stab} In order to prove the third main theorem, we will further develop the connection between R-links, their induced trisections, and the various stabilization operations. \begin{lemma}\label{lem:emp} If $L = \emptyset$ in $Y_k$ and $g(\Sigma) = g$, then $X_L = \#^k(S^1 \mathfrak{t}imes S^3)$, and $\mathcal T(\emptyset,\Sigma)$ is the connected sum of $k$ copies of the standard $(1;1)$-trisection of $S^1 \mathfrak{t}imes S^3$ and $g-k$ copies of $\mathcal S_2$. \end{lemma} \begin{proof} By Waldhausen's Theorem~\cite{Waldhausen_Heegaard-Zerlegungen_1968}, $Y_k$ has a standard Heegaard diagram, $(\alpha,\beta)$, and by Lemma~\ref{lem:constr}, $(\alpha,\beta,\beta)$ is a trisection diagram for $\mathcal T(\emptyset,\Sigma)$. The $k$ curves in $\alpha \cap \beta$ give rise to $k$ summands of the standard genus one splitting of $S^1 \mathfrak{t}imes S^3$, and the remaining $g-k$ pairs give rise to $g-k$ copies of $\mathcal S_2$. \end{proof} In order to understand operations on an admissible link $L$ and Heegaard surface $\Sigma$ which will correspond to various stabilizations of $\mathcal T(L,\Sigma)$, we introduce several additional definitions. Let $(L_1,\Sigma_1) \mathfrak{s}ubset Y_{k_1}$ and $(L_2,\Sigma_2) \mathfrak{s}ubset Y_{k_2}$ be any admissible pairs, and define the operation $\ast$ by \[ (L_1,\Sigma_1) \ast (L_2,\Sigma_2) = (L_1 \mathfrak{s}qcup L_2, \Sigma_1 \# \Sigma_2),\] where the connected sum is taken so that $L_1 \mathfrak{s}qcup L_2$ is not separated by the surface $\Sigma_1 \# \Sigma_2$. Note that $(L_1,\Sigma_1) \ast (L_2,\Sigma_2) \mathfrak{s}ubset Y_{k_1+k_2}$. \begin{lemma}\label{lem:sum} If $(L_1,\Sigma_1)$ and $(L_2,\Sigma_2)$ are admissible pairs, then $(L,\Sigma) = (L_1,\Sigma_1) \ast (L_2,\Sigma_2)$ is an admissible pair, and $\mathcal T(L,\Sigma) = \mathcal T(L_1,\Sigma_1) \# \mathcal T(L_2,\Sigma_2)$. \end{lemma} \begin{proof} It is clear that the framed link $L_1 \mathfrak{s}qcup L_2 \mathfrak{s}ubset Y_{k_1+k_2}$ has the appropriate surgery. Suppose $\Sigma_i$ bounds a handlebody $H_i$ with core $C_i$ containing $L_i$. Then there is a core $C$ for $H_1 \betaatural H_2$ such that $L_1 \mathfrak{s}qcup L_2 \mathfrak{s}ubset C_1 \mathfrak{s}qcup C_2 \mathfrak{s}ubset C$, and thus $\Sigma_1 \# \Sigma_2$ is admissible as well. For the second claim, note that the curve $\delta$ arising from the connected sum $\Sigma = \Sigma_1 \# \Sigma_2$ is a reducing curve for $\mathcal T(L,\Sigma)$, splitting it into the trisections $\mathcal T(L_1,\Sigma_1)$ and $\mathcal T(L_2,\Sigma_2)$. \end{proof} Let $U$ be a 0-framed unknot in $S^3$, and let $\Sigma_U$ be the genus one splitting of $S^3$ such that one of the solid tori bounded by $\Sigma_U$ contains $U$ as a core. In addition, let $W$ denote the knot $S^1 \mathfrak{t}imes \{\mathfrak{t}ext{pt}\} \mathfrak{s}ubset S^1 \mathfrak{t}imes S^2$ with framing given by the fibering, and let $\Sigma_W$ be the genus one splitting of $S^1 \mathfrak{t}imes S^2$ such that one of the solid tori bounded by $\Sigma_W$ contains $W$ as a core. Note that both $(U,\Sigma_U)$ and $(W,\Sigma_W)$ are admissible pairs. Finally, let $\Sigma_{\emptyset}$ be the genus one Heegaard surface for $S^3$, to be paired with the empty link. \begin{lemma}\label{lem:g1} The links $(W,\Sigma_W)$, $(\emptyset,\Sigma_{\emptyset})$, and $(U,\Sigma_U)$ yield the following trisections: {\begin{enumerate} \item $\mathcal T(W,\Sigma_W) = \mathcal S_1$. \item $\mathcal T(\emptyset,\Sigma_{\emptyset}) = \mathcal S_2$. \item $\mathcal T(U,\Sigma_U) = \mathcal S_3$. \end{enumerate}} \end{lemma} \begin{proof} First, note that each trisection in question has genus one. Since framed surgery on $W \mathfrak{s}ubset Y_1$ yields $S^3$, by Lemma~\ref{lem:constr}, $\mathcal T(W,\Sigma_W)$ is a $(1;1,0,0)$--trisection and must be $\mathcal S_1$. Similarly, $\mathcal T(\emptyset,\Sigma_{\emptyset})$ is a $(1;0,1,0)$--trisection and must be $\mathcal S_2$. Finally, framed surgery on $U$ yields $Y_1$, so $\mathcal T(U,\Sigma_U)$ is a $(1;0,0,1)$--trisection and must be $\mathcal S_3$. \end{proof} By combining Lemmas~\ref{lem:emp},~\ref{lem:sum}, and~\ref{lem:g1}, we obtain \begin{corollary}\label{cor:stab} Suppose $(L,\Sigma)$ is an admissible link, with $\mathcal T = \mathcal T(L,\Sigma)$. {\begin{enumerate} \item $\mathcal T((L,\Sigma) \ast (W,\Sigma_W))$ is the 1--stabilization of $\mathcal T$. \item $\mathcal T((L,\Sigma) \ast (\emptyset,\Sigma_\emptyset))$ is the 2--stabilization of $\mathcal T$. \item $\mathcal T((L,\Sigma) \ast (U,\Sigma_U))$ is the 3--stabilization of $\mathcal T$. \end{enumerate}} In addition, if $\Sigma_+$ is the stabilization of $\Sigma$ (as a Heegaard surface for $Y_k$), then $(L,\Sigma_+) = (L,\Sigma) \ast (\emptyset,\Sigma_{\emptyset})$. \end{corollary} We say that two trisections $\mathcal T_1$ and $\mathcal T_2$ of a 4-manifold $X$ are \emptyseth{2--equivalent} if there is a trisection $\mathcal T$ that is the result of 2--stabilizations performed on both $\mathcal T_1$ and $\mathcal T_2$. \begin{lemma}\label{lem:equiv} If $\Sigma_1$ and $\Sigma_2$ are two distinct admissible surfaces for an admissible link $L$, then the trisections $\mathcal T(L,\Sigma_1)$ and $\mathcal T(L,\Sigma_2)$ are 2--equivalent. \end{lemma} \begin{proof} Since both $\Sigma_1$ and $\Sigma_2$ are Heegaard surfaces for $E(L)$, they have a common stabilization $\Sigma$ by the Reidemeister-Singer Theorem~\cite{Reidemeister_Zur-dreidimensionalen_1933,Singer_Three-dimensional_1933}. By Lemma~\ref{lem:sum}, the surface $\Sigma$ is admissible, and by Corollary~\ref{cor:stab}, $\mathcal T(L,\Sigma)$ can be obtained by 2--stabilizations of $\mathcal T(L,\Sigma_i)$. \end{proof} Since 2--equivalence is an equivalence relation, Lemma \ref{lem:equiv} implies that every admissible surface $\Sigma$ for an admissible link $L$ belongs to the same 2--equivalence class. Hence, $L$ has a well-defined \emptyseth{2--equivalence class}; namely, the 2--equivalence class of $\mathcal T(L,\Sigma)$. If two admissible links $L_1$ and $L_2$ give rise to 2--equivalent trisections, we say that $L_1$ and $L_2$ are \emptyseth{2--equivalent}. Suppose that $L$ is an $n$-component admissible link with admissible surface $\Sigma$, so that $\Sigma$ cuts $Y_k$ into $H \cup H'$, and $L$ is isotopic into a core $C \mathfrak{s}ubset H$. As such, there is a collection of $n$ compressing disks $\{D\}$ with the property that each disk meets a unique component of $L$ once and misses the other components. We call $\{D\}$ a set of \emptyseth{dualizing disks}. Note that if $(\alpha,\beta,\gamma)$ is the trisection diagram for $\mathcal T(L,\Sigma)$ guaranteed by Lemma~\ref{lem:constr}, then the $n$ disks bounded by the $n$ curves in $\beta$ that are not in $\gamma$ are a set of dualizing disks for $L$. \begin{lemma}\label{lem:slide} If admissible links $L_1$ and $L_2$ are related by a handleslide, then $L_1$ and $L_2$ are 2--equivalent. \end{lemma} \begin{proof} If $L_i$ is an $n$--component link, then $L_1$ and $L_2$ have $n-1$ components in common and differ by a single component, $L_1' \mathfrak{s}ubset L_1$ and $L_2' \mathfrak{s}ubset L_2$, where a slide of $L_1'$ over another component $L'$ of $L_1$ along a framed arc $a$ yields $L_2'$. Consider $\Gamma = L_1 \cup a$, an embedded graph with $n-1$ components, and let $\Sigma$ be a Heegaard surface cutting $S^3$ into $H \cup H'$, where $\Gamma$ is contained in a core of $H$. Then $L_1$ is also contained in a core of $H$, and $\Sigma$ is admissible (with respect to $L_1$). Let $\{D_1\}$ be a set of dualizing disks for $L_1$. A priori, the arc $a$ might meet some of the disks in $\{D_1\}$; however, if this is the case, we can perform a sequence of stabilizations on $\Sigma$, after which $a$ avoids all of the disks $\{D_1\}$. Thus, we suppose without loss of generality that $a \cap \{D_1\} = \emptysettyset$. There is an isotopy taking $\Gamma$ into $\Sigma$, preserving the intersections of $L_i$ with the dualizing disks $\{D_1\}$, so that the framing of $\Gamma$ agrees with its surface framing in $\Sigma$. As such, we can perform the handleslide of $L_1'$ over $L'$ along $a$ within the surface $\Sigma$, so that the resulting link $L_2$ is also contained in $\Sigma$, with framing given by the surface framing. Let $D_1' \in \{D_1\}$ be the disk that meets $L_1'$ once, and let $D' \in \{D_1\}$ be the disk that meets $L'$ once. There is an arc $a'$, isotopic in $\Sigma$ to an arc in $\Gamma$, that connects $D_1'$ to $D'$. See Figure~\ref{fig:slide}. Let $D_2'$ be the compressing disk obtained by banding $D_1'$ to $D'$ along $a'$. Then $\{D_2\} = (\{D_1\} \mathfrak{s}etminus D')\cup D_2'$ is a set of dualizing disks for $L_2$. Thus, by pushing $L_2$ back into $H$, we see that $\Sigma$ is an admissible surface for $L_2$. \begin{figure} \caption{A schematic diagram showing how one can adjust the disk system $\{D_1\} \label{fig:slide} \end{figure} Following Lemma \ref{lem:constr}, let $H_i \cup H_i' \cup H_{L_i}$ be a spine for $\mathcal T(L_i,\Sigma)$. By construction, $H_1 = H_2$ and $H_1' = H_2'$. Finally, since $H_i$ is Dehn surgery on $L_i$ in $H_i$ , and $L_1$ and $L_2$ are related by a single handleslide, we have $H_{L_1} = H_{L_2}$. It follows that $\mathcal T(L_1,\Sigma) = \mathcal T(L_2,\Sigma)$, and we conclude that $L_1$ and $L_2$ are 2--equivalent. \end{proof} For the rest of the section, we will restrict our attention to admissible links in $S^3$. Let $U_n$ denote the zero-framed, $n$--component unlink, so $X_{U_n} = S^4$. Recall that a \emptyseth{standard trisection} of $S^4$ is the connected sum of copies of $\mathcal S_1$, $\mathcal S_2$, and $\mathcal S_3$. \begin{lemma}\label{lem:unlink} Let $\Sigma$ be any admissible surface for $U_n$, then $\mathcal T(U_n,\Sigma)$ is standard. \end{lemma} \begin{proof} We induct on $(n,g)$ with the dictionary ordering. If $n=1$, then $E(U_1)$ is a solid torus. If $g=1$, then $\Sigma = \Sigma_U$, so that $\mathcal T(U_1,\Sigma_U) = \mathcal S_3$ by Lemma~\ref{lem:g1}. If $n=1$ and $g > 1$, then $\Sigma$ is stabilized~\cite{lei} (see also~\cite{scharlemann-thompson}), which means that $\mathcal T(U_1,\Sigma)$ is 2--stabilized by Corollary~\ref{cor:stab}, and as such, $\mathcal T(U_1,\Sigma)$ is standard by induction. In general, note that the Heegaard genus of an $n$--component unlink is $n$; thus $g \gammaeq n$ for all possible pairs $(n,g)$. For $n>1$, we have that $E(U)$ is reducible, and so Haken's Lemma~\cite{Haken_Some_1968} implies that $\Sigma$ is reducible, splitting into the connected sum of genus $g_1$ and $g_2$ surfaces $\Sigma_1$ and $\Sigma_2$, where $\Sigma_i$ is a Heegaard surface for $E(U_{n_i})$. Then $\mathcal T(U_n,\Sigma) = \mathcal T(U_{n_1},\Sigma_1) \# \mathcal T(U_{n_2},\Sigma_2)$, where $(n_i,g_i) < (n,g)$. Since both summands are standard trisections by induction, it follows that $\mathcal T(U_n,\Sigma)$ is also standard, completing the proof. \end{proof} A trisection $\mathcal T$ is said to be \emptyseth{2--standard} if it becomes standard after some number of 2-stabilizations. Similarly, $\mathcal T$ is $\{2,3\}$--standard if it becomes standard after some number of 2- and 3-stabilizations. \begin{proof}[Proof of Theorem~\ref{thm:equiv}] Suppose $L$ has Property R. By Lemma \ref{lem:slide}, $L$ and $U_n$ are 2--equivalent links. Thus, $\mathcal T(L,\Sigma)$ is 2--equivalent to some trisection coming from $U_n$, but all trisections induced by $U_n$ are standard by Lemma \ref{lem:unlink}, and thus $\mathcal T(L,\Sigma)$ becomes standard after a finite sequence of 2--stabilizations. If $L$ has Stable Property R, then $L \mathfrak{s}qcup U_n$ has Property R for some $n$, and thus $\mathcal T((L,\Sigma) \ast (U,\Sigma_U) \ast \dots \ast (U,\Sigma_U))$ is 2--standard by the above arguments. By Lemma~\ref{lem:g1} and Corollary~\ref{cor:stab}, \[\mathcal T((L,\Sigma) \ast (U,\Sigma_U) \ast \dots \ast (U,\Sigma_U)) = \mathcal T(L,\Sigma) \# \mathcal S_3 \# \dots \# \mathcal S_3;\] hence $\mathcal T(L,\Sigma)$ is $\{2,3\}$--standard. Finally, if the trisection $\mathcal T(L,\Sigma)$ is $\{2,3\}$--standard, then there exist integers $s$ and $t$ such that the connected sum of $\mathcal T(L,\Sigma)$ with $s$ copies of $\mathcal S_2$ and $t$ copies of $\mathcal S_3$ is standard. Let $(L_*,\Sigma_*)$ be the admissible pair given by \[ (L_*,\Sigma_*) = (L,\Sigma) \ast \underbrace{(\emptyset,\Sigma_{\emptyset}) \ast \dots \ast (\emptyset,\Sigma_{\emptyset})}_s \ast \underbrace{(U,\Sigma_U) \ast \dots \ast (U,\Sigma_U)}_t.\] By assumption, $\mathcal T(L_*,\Sigma_*)$ is standard, so by Lemma~\ref{lem:stdslide}, the link $L_*$ has Stable Property R. But by definition of $\ast$, we have $L_* = L \mathfrak{s}qcup U_t$, and thus $L$ also has Stable Property R, completing the proof. \end{proof} \mathfrak{s}ection{Trisecting the Gompf-Scharlemann-Thompson Examples}\label{sec:GST} Although this view has changed in the past and may change in the future, it is the current view of the authors that the GPRC is likely false. In light of this opinion, we will outline the first steps one might take to employ Theorem~\ref{thm:equiv} to disprove the GRPC or the Stable GPRC. Let $L$ be an R-link with admissible surface $\Sigma$. By Theorem~\ref{thm:equiv}, if $\mathcal T(L,\Sigma)$ is \mathfrak{t}extbf{not} $\{2\}$--standard, then $L$ fails to have Property R. Thus, we in this section we will show how to take the most promising potential counterexamples to the GPRC and construct admissible surfaces and their corresponding trisections. The possible counterexamples mentioned in the previous paragraph were produced by Gompf-Scharlemann-Thompson~\cite{Gompf-Scharlemann-Thompson_Fibered_2010}, building on work of Akbulut-Kirby~\cite{Akbulut-Kirby_A-potential_1985}. We will call this family the \emptyseth{GST links}. In order to describe the construction of the GST links, we need several preliminary details. Let $Q$ denote the square knot, the connected sum of the right-handed and left-handed trefoil knots, and let $F$ denote the genus two fiber surface for the square knot. In~\cite{Scharlemann_Proposed_2012}, Scharlemann depicted an elegant way to think about the monodromy corresponding to the fibration of $E(Q)$ by $F$: We may draw $F$ as a topological annulus $A$, and such that \begin{itemize} \item A disk $D$ has been removed from $A$, \item each component of $\partial A$ is split into six edges and six vertices, and \item opposite inside edges and opposite outside edges of $\partial A$ are identified to form $F$. \end{itemize} With respect to $A$, the monodromy $\varphi$ is a 1/6th clockwise rotation of $A$, followed by an isotopy of $D$ returning it to its original position. Let $Y_Q$ be the closed 3-manifold obtained by 0-surgery on $Q$, so that $Y_Q$ is a fibered 3-manifold with fiber surface $\wh F$ and monodromy $\wh \varphi$, called the \emptyseth{closed monodromy} of $Q$. Note that the monodromy $\wh F$ is an honest 1/6th rotation of the annulus in Figure~\ref{fig:Hexulus}, since, in this case, the puncture has been filled in by the Dehn surgery. Details can be found in~\cite{Gompf-Scharlemann-Thompson_Fibered_2010} and~\cite{Scharlemann_Proposed_2012}, where the following lemma is first proved. \begin{lemma} For every rational number $p/q$ with $q$ odd, there is a family $\{V_{p/q},V'_{p/q},V''_{p,q}\}$ of curves contained in $\wh F$ that are permuted by $\wh \varphi$. \end{lemma} \begin{proof} We may subdivide $A$ into six rectangular regions as shown in Figure~\ref{fig:Hexulus}. It is proved in~\cite{Scharlemann_Proposed_2012} that $\wh F$ is a 3-fold branched cover of a 2-sphere $S$ with four branch points. By naturally identifying $S$ with 4-punctured sphere constructed by gluing two unit squares along their edges, there is a unique isotopy class of curve $c_{p/q}$ with slope $p/q$ in $S$. Let $\rho:F \rightarrow S$ denote the covering map. Scharlemann proves that $\rho^{-1}(c_{p,q}) = \{V_{p/q},V'_{p/q},V''_{p,q}\}$, and these curves are permuted by $\wh \varphi$. \end{proof} Figure~\ref{fig:Hexulus} shows the three lifts, $V_{3/7}$, $V_{3/7}'$, and $V_{3/7}''$, of the rational curve $3/7$ to the fiber $F$ of the square knot. Note that $\wh \varphi\, ^6$ is the identity map, and $\wh \varphi\,^3$ maps $V_{p/q}$ to itself but with reversed orientation. \begin{figure} \caption{The curves $V_{3/7} \label{fig:Hexulus} \end{figure} Finally, we can define the GST links. The next lemma is also from~\cite{Scharlemann_Proposed_2012}. \begin{lemma} The GST link $L_n$ is handleslide equivalent to $Q \cup V_{n/2n+1}$. The R-link $L_n$ has Property R for $n = 0,1,2$ and is not known to have Property R for $n \gammaeq 3$. \end{lemma} For ease of notation, let $V_n = V_{n/2n+1}$ and $V_n' = V'_{n/2n+1}$, so that $L_n = Q \cup V_n$. Two links $L$ and $L'$ are said to be \emptyseth{stably handleslide equivalent} or just \emptyseth{stably equivalent} if there are integers $n$ and $n'$ so that $L \mathfrak{s}qcup U_n$ is handleslide equivalent to $L' \mathfrak{s}qcup U_{n'}$. While we can find admissible surfaces for $L_n$, there is a simpler construction for a family of links $L_n'$ stably equivalent to $L_n$ for each $n$, and we note a link $L$ has Stable Property R if and only if every link stably equivalent to $L$ has Stable Property R. \begin{lemma} The link $L_n = Q \cup V_n$ is stably equivalent to $L'_n = V_n \cup V_n'$. \end{lemma} \begin{proof} We will show that both links are stably equivalent to $Q \cup V_n \cup V_n'$. Since $\wh \varphi(V_n) = V_n'$, we have that $V'_n$ is isotopic to $V_n$ in $Y_Q$. Carrying this isotopy into $S^3$, we see that after some number of handleslides of $V_n'$ over $Q$, the resulting curve $C'$ is isotopic to $V_n$. Now $C'$ can be slide over $V_n$ to produce a split unknot $U_1$, and $Q \cup V_n \cup V_n'$ is handleslide equivalent to $L_n \mathfrak{s}qcup U_1$. On the other hand, $V_n$ and $V_n'$ are homologically independent in the genus two surface $F$. Thus, there is a sequence of slides of $Q$ over $V_n$ and $V_n'$ converting it to a split unknot, so $Q \cup V_n \cup V_n'$ is handleslide equivalent to $L_n' \mathfrak{s}qcup U_1$ as well. \end{proof} Next, we will define an admissible surface for $L_n'$. Consider a collar neighborhood $F \mathfrak{t}imes I$ of $F$, and let $N \mathfrak{s}ubset S^3$ denote the embedded 3-manifold obtained by crushing $\partial F \mathfrak{t}imes I$ to a single curve. Letting $\Sigma = \partial N$, we see that $\Sigma$ is two copies of $F$, call them $F_0$ and $F_1$, glued along the curve $Q$. \begin{lemma}\label{lem:admiss} Consider $L_n'$ embedded in $F_0$, and push $L_n'$ slightly into $N$. Then $\Sigma$ is an admissible surface for $L_n'$. \end{lemma} \begin{proof} First, $F \mathfrak{t}imes I$ is a genus four handlebody, as is $N$, since $N$ is obtained by crushing the vertical boundary of $F \mathfrak{t}imes I$. Moreover, since the exterior $E(Q)$ is fibered with fiber $F$, we may view this fibering as an open book decomposition of $S^3$ with binding $Q$, and thus $\overline{S^3 \mathfrak{s}etminus N}$ is homeomorphic to $N$, so that $\Sigma$ is a Heegaard surface for $S^3$. It remains to be seen that there is a core of $N$ containing $L_n'$, but it suffices to show that there is a pair $D_n$ and $D_n'$ of dualizing disks for $L_n'$ in $N$. Note that for any properly embedded arc $a \mathfrak{s}ubset F_0$, there is a compressing disk $D(a)$ for $N$ obtained by crushing the vertical boundary of the disk $a \mathfrak{t}imes I \mathfrak{s}ubset F \mathfrak{t}imes I$. Let $a_0$ and $a_0'$ be disjoint arcs embedded in $F_0$ such that $a_0$ meets $V_n$ once and avoids $V_n'$, and $a_0'$ meets $V_n'$ once and avoids $V_n$. Then $D(a_0)$ and $D(a_0')$ are dualizing disks for $L_n'$, completing the proof. \end{proof} Lemma~\ref{lem:admiss} does more than simply prove $\Sigma$ is admissible; it provides the key ingredients we need to construct a diagram for $\mathcal T(L_n',\Sigma)$: Let $a_1$ and $a_1'$ denote parallel copies of $a_0$ and $a_0'$, respectively, in $F_1$, so that $\partial D(a_0) = a_0 \cup a_1$ and $\partial D(a_0') = a_0' \cup a_1'$. By Lemma~\ref{lem:constr}, there is a genus four trisection diagram $(\alpha,\beta,\gamma)$ for $\mathcal T(L_n',\Sigma)$ so that \[ \beta_1 = \partial D(a_0) \qquad \beta_2 = \partial D(a_0') \qquad \gamma_1 = V_n \qquad \gamma_2 = V_n'. \] Noting that $(\beta,\gamma)$ defines a genus four splitting of $Y_2$, it follows that any curve disjoint from $\beta_1 \cup \beta_2 \cup \gamma_1 \cup \gamma_2$ that bounds a disk in either of $H_{\beta}$ or $H_{\gamma}$ also bounds in the other handlebody. Let $b_0$ and $b_0'$ denote non-isotopic disjoint arcs in $F_0$ that are disjoint from $a_0 \cup a_0' \cup L_n'$. Then $b_0 \cup b_1$ and $b_0' \cup b_1'$ bound disks in $N$; thus letting \[\beta_3 = \gamma_3 = b_0 \cup b_1 \qquad \beta_4 = \gamma _4 = b_0' \cup b_1',\] we have that $(\beta,\gamma)$ is a standard diagram, corresponding to two of the cut systems in a diagram for $\mathcal T(L_n,\Sigma)$. To find the curves in $\alpha$, let $N' = \overline{S^3 \mathfrak{s}etminus N}$, and observe that $N'$ also has the structure of $F \mathfrak{t}imes I$ crushed along its vertical boundary, and $\partial N' = \partial N = F_0 \cup F_1$. \begin{figure} \caption{A trisection diagram for $\mathcal T(L_3',\Sigma)$. The top row shows two copies of $F_0$, along with arcs: $\varphi(a_0)$ and $\varphi(a_0')$ (red), $\varphi(b_0)$ and $\varphi(b_0')$ (pink); $b_0$ and $b_0'$ (dark blue), $b_1$ and $b_1'$ (light blue), and $V_3$ (dark green) and $V_3'$ (light green). The bottom row shows two copies of $F_1$, along with arcs: $a_1$ and $a_1'$ (red and dark blue) and $b_1$ and $b_1'$ (pink and light blue). The surfaces in the top row are identified with those in the bottom row along the oriented puncture. Thus, each column describes the closed genus four surface $\Sigma$. The left column encodes a 4--tuples of curves on this surface, namely, $\alpha$. The right column encodes the 4--tuple $\beta$ (shades of blue), as well as the two curves $\gammaamma_1$ and $\gammaamma_2$. The trisection diagram for $\mathcal T(L_3',\Sigma)$ is obtained by overlaying the two columns. (Note that $\gammaamma_3 = \beta_3$ and $\gammaamma_4 = \beta_4$.)} \label{fig:GST} \end{figure} One way to reconstruct $S^3$ from $N$ and $N'$, both of which are homeomorphic to crushed products $F \mathfrak{t}imes I$, is to initially glue $F_1 \mathfrak{s}ubset \partial N'$ to $F_1 \mathfrak{s}ubset \partial N'$. The result of this initial gluing is again homeomorphic to a crushed product $F \mathfrak{t}imes I$. The second gluing then incorporates the monodromy, so that $F_0 \mathfrak{s}ubset N'$ is glued to $F_0 \mathfrak{s}ubset N$ via $\varphi$. The result of this gluing is that if $a_1$ is an arc in $F_1 \mathfrak{s}ubset N'$ and $D'(a_1)$ is the corresponding product disk in $N'$, then $\partial D'(a) = a_1 \cup \varphi(a_0)$, where $a_0$ is a parallel copy of $a_1$ in $F_0$ (using the product structure of $N$). Thus, in order to find curves in $\alpha$, we can choose any four arcs in $F_1$ cutting the surface into a planar component and construct their product disks. However, if we wish to a find a diagram with relatively little complication with respect to the $\beta$ and $\gamma$ curves we have already chosen, it makes sense to choose those four arcs to be $a_1$, $a_1'$, $b_1$, and $b_1'$. Thus, \[ \alpha_1 = a_1 \cup \varphi(a_0) \qquad \alpha_2 = a_1' \cup \varphi(a_0') \qquad \alpha_3 = b_1 \cup \varphi(b_0) \qquad \alpha_4 = b_1' \cup \varphi(b_0').\] We have proved the following: \begin{proposition} The triple $(\alpha,\beta,\gamma)$ forms a $(4;0,2,2)$-trisection diagram for $\mathcal T(L_n,\Sigma)$. \end{proposition} \mathfrak{s}ection{A rectangle condition for trisection diagrams}\label{sec:rect} In the final section, we introduce a tool for potential future use. This tool is an adaptation of the Rectangle Condition for Heegaard diagrams of Casson and Gordon~\cite{Casson-Gordon_Reducing_1987} to the setting of trisection diagrams. A collection of $3g-3$ pairwise disjoint and nonisotopic curves in a genus $g$ surface $\Sigma$ is called a \emptyseth{pants decomposition}, as the curves cut $\Sigma$ into $2g-2$ thrice-punctured spheres, or pairs of pants. A pants decomposition defines a handlebody in the same way a cut system does, although a cut system is a minimal collection of curves defining a handlebody, whereas a pants decomposition necessarily contains superfluous curves. An \emptyseth{extended Heegaard diagram} is a pair of pants decompositions $(\alphaa,\mathbb{N}n)$ determining a Heegaard splitting $H_{\alphaa} \cup H_{\mathbb{N}n}$. An \emptyseth{extended trisection diagram} is a triple of pants decompositions $(\alphaa,\mathbb{N}n,\gamma^+)$ determining the spine $H_{\alphaa} \cup H_{\mathbb{N}n} \cup H_{\gamma^+}$ of a trisection. Suppose that $\alphaa$ and $\mathbb{N}n$ are pants decompositions of $\Sigma$, and let $P_{\alphaa}$ be a component of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$ and $P_{\mathbb{N}n}$ a component of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$. Let $a_1$, $a_2$, and $a_3$ denote the boundary components of $P_{\alphaa}$; $b_1$, $b_2$, and $b_3$ the boundary components of $P_{\mathbb{N}n}$. We say that the pair $(P_{\alphaa},P_{\mathbb{N}n})$ is \emptyseth{saturated} if for all $i,j,k,l \in \{1,2,3\}$, $i\betaeq j$, $k \betaeq l$, the intersection $P_{\alphaa} \cap P_{\mathbb{N}n}$ contains a rectangle $R_{i,j,k,l}$ with boundary arcs contained in $a_i$, $b_k$, $a_j,$ and $b_l$. We say that that pair of pants $P_{\alphaa}$ is \emptyseth{saturated with respect to $\mathbb{N}n$} if for every component $P_{\mathbb{N}n}$ of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$, the pair $(P_{\alphaa},P_{\mathbb{N}n})$ is saturated. An extended Heegaard diagram $(\alphaa,\mathbb{N}n)$ satisfies the Rectangle Condition of Casson-Gordon if for every component $P_{\alphaa}$ of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$, we have $P_{\alphaa}$ is saturated with respect to $\mathbb{N}n$. Casson and Gordon proved the following. \begin{theorem}[\cite{Casson-Gordon_Reducing_1987}]\label{thm:CG} Suppose that an extended Heegaard diagram $(\alphaa,\mathbb{N}n)$ satisfies the Rectangle Condition. Then the induced Heegaard splitting $H_{\alphaa} \cup H_{\mathbb{N}n}$ is irreducible. \end{theorem} Now, let $(\alphaa,\mathbb{N}n,\gamma^+)$ be an extended trisection diagram. We say that $(\alphaa,\mathbb{N}n,\gamma^+)$ satisfies the \emptyseth{Rectangle Condition} if for every component $P_{\alphaa}$ of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$, we have that either $P_{\alphaa}$ is saturated with respect to $\mathbb{N}n$ or $P_{\alphaa}$ is saturated with respect to $\gamma^+$. \begin{proposition} Suppose that an extended trisection diagram satisfies the Rectangle Condition. Then the induced trisection $\mathcal T$ with spine $H_{\alphaa} \cup H_{\mathbb{N}n} \cup H_{\gamma^+}$ is irreducible. \end{proposition} \begin{proof} Suppose by way of contradiction that $\mathcal T$ is reducible. Then there exists a curve $\delta \mathfrak{s}ubset \Sigma = \partial H_{\alphaa}$ that bounds disks $D_1 \mathfrak{s}ubset H_{\alphaa}$, $D_2 \mathfrak{s}ubset H_{\mathbb{N}n}$, and $D_3 \mathfrak{s}ubset H_{\gamma^+}$. Let $D_{\alphaa}$ denote the set of $3g-3$ disks in $H_{\alphaa}$ bounded by the curves $\alphaa$, and define $D_{\mathbb{N}n}$ and $D_{\gamma^+}$ similarly. There are several cases to consider. First, suppose that $\delta \in \alphaa$, so that $D_1 \in D_{\alphaa}$, and let $P_{\alphaa}$ be a component of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$ that contains $\delta$ as a boundary component. Suppose without loss of generality that $P_{\alphaa}$ is saturated with respect to $\mathbb{N}n$. Then, for any curve $b \in \mathbb{N}n$, we have that $b$ is the boundary of a component $P_{\mathbb{N}n}$ of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$, where $P_{\alphaa} \cap P_{\mathbb{N}n}$ contains a rectangle with boundary arcs in $\delta$ and $b$. It follows that $\delta$ meets every curve $b \in \mathbb{N}n$, so $\delta \betaotin \mathbb{N}n$. Suppose that $D_2$ and $D_{\mathbb{N}n}$ have been isotoped to intersect minimally, so that these disks meet in arcs by a standard argument. There must be an outermost arc of intersection in $D_2$, which bounds a subdisk of $D_2$ with an arc $\delta' \mathfrak{s}ubset \delta$, and $\delta'$ is a \emptyseth{wave} (an arc with both endpoints on the same boundary curve) contained a single component $P_{\mathbb{N}n}$ of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$. Let $b_1$ and $b_2$ be the boundary components of $P_{\mathbb{N}n}$ disjoint from $\delta'$. Since $P_{\alphaa}$ is saturated with respect to $\mathbb{N}n$, there is rectangle $R \mathfrak{s}ubset P_{\alphaa} \cap P_{\mathbb{N}n}$ with boundary arcs contained in $b_1$, $\delta$, $b_2$, and some other curve in $\partial P_{\alphaa}$. Let $\delta''$ be the arc component of $\partial R$ contained in $\delta$. Since the wave $\delta'$ separates $b_1$ from $b_2$ in $P_{\mathbb{N}n}$, it follows that $\delta' \cap \delta'' \betaeq \emptyset$, a contradiction. In the second case, suppose that $\delta$ is a curve in $\mathbb{N}n$. Note that the Heegaard splitting determined by $(\alphaa,\gamma^+)$ is reducible, and thus by the contrapositive of Casson-Gordon's rectangle condition, there must be some pants decomposition $P_{\alphaa}$ of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$ such that $P_{\alphaa}$ is \mathfrak{t}extbf{not} saturated with respect to $\gamma^+$, so that $P_{\alphaa}$ is saturated with respect to $\beta$. Let $P_{\mathbb{N}n}$ be a component of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$ that contains $\delta$ as boundary component. By the above argument, $\delta \betaotin \alphaa$, and if we intersect $D_1$ with $D_{\alphaa}$ we can run an argument parallel to the one above to show that $\delta$ has a self-intersection, a contradiction. A parallel argument shows that $\delta \betaotin \gamma^+$. Finally, suppose that $\delta$ is not contained in any of $\alphaa$, $\mathbb{N}n$, or $\gamma^+$. By intersecting the disks $D_1$ and $D_{\alphaa}$, we see that there is a wave $\delta' \mathfrak{s}ubset \delta$ contained in some pants component $P_{\alphaa}$ of $\Sigma \mathfrak{s}etminus \betau(\alphaa)$. Suppose without loss of generality that $P_{\alphaa}$ is saturated with respect to $\mathbb{N}n$. By intersecting $D_2$ with $D_{\mathbb{N}n}$, we see that there is a wave $\delta'' \mathfrak{s}ubset \delta$ contained in some pants component $P_{\mathbb{N}n}$ of $\Sigma \mathfrak{s}etminus \betau(\mathbb{N}n)$. Let $a_1$ and $a_2$ be the components of $\partial P_{\alphaa}$ that avoid $\delta'$, and let $b_1$ and $b_2$ be the components of $\partial P_{\mathbb{N}n}$ that avoid $\delta''$. By the Rectangle Condition, $P_{\alphaa} \cap P_{\mathbb{N}n}$ contains a rectangle $R$ whose boundary is made of arcs in $a_1$, $b_1$, $a_2$, and $b_2$. As such, $\delta' \cap R$ contains an arc connecting $b_1$ to $b_2$, while $\delta'' \cap R$ contains an arc connecting $a_1$ to $a_2$, but this implies that $\delta' \cap \delta'' \betaeq \emptyset$, a contradiction. We conclude that no such curve $\delta$ exists. \end{proof} Of course, at this time, the Rectangle Condition is a tool without an application, which elicits the following question: \begin{question} Is there an extended trisection diagram $(\alphaa,\mathbb{N}n,\gamma^+)$ that satisfies the Rectangle Condition? \end{question} Note that while it is easy to find three pants decompositions that satisfy the Rectangle Condition, the difficulty lies in finding three such pants decompositions which also determine a trisection; in pairs, they must be extended Heegaard diagrams for the 3-manifolds $Y_k$. \end{document}
\begin{document} \title{Diffusive-Gutzwiller approach to the quadratically driven photonic lattice} \author{Eduardo Mascarenhas} \affiliation{Institute of Physics, Ecole Polytechnique F\'{e}d\'{e}rale de Lausanne (EPFL), CH-1015 Lausanne, Switzerland} \date{\today} \begin{abstract} We adopt a diffusive-Gutzwiller approach to investigate a phase transition in a quadratically driven-dissipative Bose-Hubbard lattice. Diffusive trajectories may lead to lower average entanglement as compared to jump-like trajectories and have a natural tendency to approach coherent states, therefore the method can be less prone to the bias induced by the fully uncorrelated Gutzwiller ansatz. Averaging over trajectories does lead to classical correlations and this allows us to address the correlation length of such 2D lattices of open quantum systems which is the main goal of this work. Under this approximations, we find negligible correlation length in the low density phase and apparently unbounded length grow in the high density phase. Additionally, we show that the effective relaxation times associated to the times scales for synchronisation in the high density phase may also diverge suggesting the vanishing of the Lindbladian gap.\end{abstract} \maketitle \section{Introduction} Driven-dissipative many-body quantum systems is a special class of nonequilibrium systems that has been through intense activity in the last decade. The possibility of genuine nonequilibrium universality classes~\cite{MarkCrit} suggests that such phenomena may not have equilibrium analogs which makes the development of nonequilibrium methodology of paramount importance~\cite{VarCui,VarL,Taiwan,iPepsRoman,BiondiCorr,CasteelsHierarchy,ClusterMF,LinkedCluster,ArrigoniImpurity,ArrigoniImpurityNE,CarusottoStochGutz,StochGutz}. On the experimental side, the realization of dissipative quantum simulators is currently not a far fetched reality in different architectures using optical or superconducting circuits \cite{FluidLight,HartmannRev,AngelakisRev}. Recently, we have witnessed experimental evidence of dissipative phase transitions in systems of ultra cold atoms \cite{Esslinger}, superconducting circuits \cite{Circuits}, and semiconductors \cite{Semicond}. Lattices of photonic nonlinear resonators have been studied in the last decade even since the low temperature analogy to condensed matter was established, suggesting the possibility of simulating the Mott-Superfluid phase transition~\cite{MottSup}. However, the equilibrium Mott-Superfluid transition is not resilient to the typical highly dissipative nature of such systems. The study of this phenomenology has progressed along two main lines. The first is the nonequilibrium synthesis and stabilization of strongly correlated phases such as Mott~\cite{VFriends} and solid phases~\cite{Solid} in analogy to the ground state physics. The second is the identification and characterization of genuine nonequilibrium phases and transitions such as the emergence of the gas-liquid bistability for the linearly driven case~\cite{BiondiGas,Vicentini}, spontaneous symmetry breaking for the quadratically driven case~\cite{Vincenzo} and laser like transitions driven by interactions~\cite{Laser}. In this context, the quadratically driven single Kerr resonator has been studied, both theoretically~\cite{Quad1,Quad2,Quad3,Quad4,Quad5,Quad6,Quad7,Quad8} and experimentally~\cite{QuadEx}. Theoretically addressing two-dimensional driven dissipative lattices is, in general, a difficult task. The local dimension of the vector space is usually hight as compared to small spin systems. For example, keeping ten Fock states results in a density matrix of local dimension 100, making it hard to model such high occupation systems with tensor-network techniques~\cite{VarCui,VarL,iPepsRoman,FabianBose}. Such techniques may also generate negative density matrices requiring extra resources to ensure positivity~\cite{Positive}. It is also not clear to what degree quantum correlation are relevant in such bulk-driven-dissipative lattices. An alternative, that captures only classical correlations and scales linearly with system size, is the stochastic Gutzwiller approach~\cite{CarusottoStochGutz,StochGutz}. The method has been recently applied to a spin model~\cite{StochGutz} in which the trajectories are driven by Poisson noise, thus being of jump-like behaviour. The only limitations of the method are (i) that the results can be biased by the fact that quantum correlations are neglected in each trajectory and (ii) it can happen that the jump trajectories do not spontaneously create a coherent fraction leading to an unphysical behaviour in which the sites do not ``talk" to each other under the mean-field-like coupling. However, It has been known that, markovian master equations may be unraveled in infinitely many ways and that diffusive local unravelings may lead to lower average entanglement as compared to jump unravelings~\cite{LocalDiff}. In the specific case addressed in~\cite{LocalDiff} local diffusion leads to the minimum average entanglement obtained with local unravelings. Furthermore, in the specific case of the quadratically driven resonator addressed here, local diffusion tends to generate trajectories with significant coherent fraction as shown in~\cite{Quad2}. Such features make the diffusive version of the stochastic-Gutzwiller approach possibly (i) less prone to biases due to the lower entanglement of diffusive trajectories and (ii) less prone to unphysical behaviour of the jump version since the noise in the diffusive trajectories allows for communication between sites through a fluctuating coherent fraction. It should be noted, however, that we do not expect the method to be the ultimate tool for such 2D lattices but rather a useful alternative. In this work we review the theory behind the method applying the time depend variational principle~\cite{TDVP} to the stochastic Schrodinger equation while confining each trajectory to the manifold of uncorrelated states (Gutzwiller ansatz). This short overview of the theory putting the method on firmer grounds is done in Section II. Subsequently, we present the application of the method to the quadratically driven photonic lattice in Section III in which we presents the results of the numerical analysis addressing issues of dimensionality, correlation lengths and and relaxation times as we cross the transition. We find that both the correlation length and the relaxation rates present signatures of the phase transition. In Section IV we present our conclusions. \section{Theory} This work is devoted to open quantum systems whose density matrix obey a Markovian master equation of Lindblad form such as~\cite{QNoise} \begin{equation}\frac{d\rho}{dt}=-i[H,\rho]-\frac{1}{2}\sum_i \left[ K_i^{\dagger}K_i\rho+\rho K_i^{\dagger}K_i -2K_i\rho K^{\dagger}_i\right],\end{equation} in which $H$ is the system hamiltonian and $K_i$ are so called jump operators through which the system couples to the environment. Such master equation may be unraveled with a stochastic Schrodinger equation typically found in homodyne measurements~\cite{QMAC} \begin{eqnarray}d|\tilde{\Psi}\rangle&=&-iH_{\mathrm{eff}}[\mathbf{Q}]dt|\tilde{\Psi}\rangle\nonumber\\ &=&\left[-iHdt -\frac{1}{2}\sum_i K^{\dagger}_iK_idt+\sum_iK_idQ_i\right]|\tilde{\Psi},\rangle\label{Diff}\end{eqnarray} with $dQ_i=\langle K^{\dagger}_i+ K_i\rangle dt + dW_i$ being the homodyne currents and $dW_i$ being independent Wiener processes. The above equation is written in a format appropriate for efficient numerical integration and does not preserve norm such that $|\tilde{\Psi}\rangle$ is an unormalized version of $|\Psi\rangle$. Typically the state is renormalized after each time step. The time dependent variational principle is based on minimization of the functional \begin{equation} f=\left\langle \Psi\left| \frac{\partial}{\partial t}+iH_{\mathrm{eff}}[\mathbf{Q}]\right|{\Psi}\right\rangle\end{equation} such that equation~(\ref{Diff}) is recovered from setting the functional derivative to zero $\frac{\delta f}{\delta \langle {\Psi}|}=0$. The Gutzwiller ansatz in this context consists of restricting the dynamics of each trajectory to the manyfold of uncorrelated states $|\Psi\rangle=\bigotimes_s|\psi_s\rangle$ such that $|\psi_s\rangle$ is a pure state of site $s$. Let also assume that equation~(\ref{Diff}) only has two body couplings such that $H_{\mathrm{eff}}[\mathbf{Q}]=\sum_{s'\le s}H^{(s)}_{s'}[\mathbf{Q}]$ (where the ordered sum excludes double counting). The functional derivative $\frac{\delta f}{\delta \langle {\psi}_s|}=0$ leads to the following set of coupled equations of motion \begin{equation} d|\tilde{\psi}_s\rangle=-i\left[H^{(s)}_{s}|\tilde{\psi_s}\rangle +\sum_{s'\ne s}\langle \psi_{s'}| H^{(s)}_{s'}|\psi_{s'}\rangle\right]|\tilde{\psi_s}\rangle dt,\end{equation} such that $\langle \psi_{s'}| H^{(s)}_{s'}|\psi_{s'}\rangle$ should be immediately identified as the mean-field-like effective hamiltonian that couples site $s$ to site $s'$. Finally, after averaging over trajectories, or equivalently, averaging over the Gaussian noise we recover a classically correlated state \begin{equation}\rho_{CC}=\int d\mu(\mathbf{Q})|\Psi[\mathbf{Q}]\rangle\langle\Psi[\mathbf{Q}]|=\overline{|\Psi[\mathbf{Q}]\rangle\langle\Psi[\mathbf{Q}]|}\end{equation} that is as an approximation of the full quantum state $\rho=\rho_{CC} +\rho_{QC}$, such that $\rho_{QC}$ represents the quantum correlated component. Now, the method is only expected to be accurate in lattices of high connectivity and in case $\rho_{QC}$ is negligible. Testing this hypothesis remains an open challenge and the classically correlated ansatz for $\rho$ remains ultimately as an uncontrolled approximation. \section{The quadratically driven Bose-Hubbard model} The effective Hamiltonian components of the quadratically driven Bose-Hubbard model are given by \begin{eqnarray} H^{(s)}_{s'} &=&\Delta a^{\dagger}_sa_s+\frac{U}{2}a^{\dagger}_sa^{\dagger}_sa_sa_s+\frac{G}{2}(a^{\dagger 2}_s+a^{2}_s)\nonumber \\ &-&\frac{i}{2}\sum_i^2 K^{(s)\dagger}_iK^{(s)}_i+i\sum_i^2K^{(s)}_i\frac{dQ^{(s)}_i}{dt}\quad \mathrm{if} \ s=s'\nonumber\\ &=&-J_{\langle s,s'\rangle}\left(a_sa^{\dagger}_{s'}+a^{\dagger}_sa_{s'}\right) \quad \mathrm{if} \ s\ne s' \label{H} \end{eqnarray} where $a_s$ is the bosonic annihilation operator for the $s$-th site, $J$ is the hopping strength, In this expression, $U$ is the strength of the Kerr nonlinearity, $G$ is the amplitude of the two-photon driving. In the rotating frame, $\Delta=\omega_c-\omega_2/2$, where $\omega_2$ is the frequency of the two-photon driving and $\omega_c$ is the resonator frequency. The jump operators at each site are $K^{(s)}_1=\sqrt{\gamma_1}a_s$ and $K^{(s)}_1=\sqrt{\gamma_2}a^{2}_s$, with $\gamma_1$ and $\gamma_2$ being the single and two photon dissipation rates, respectively. In figure~\ref{photon} we show the asymptotic photon number averaged in real space simulated with the Diffusive-Gutzwiller (DG) method in reasonable agreement with the mean-field uncorrelated method in~\cite{Vincenzo} with the estimated transition point being around $J/\gamma\approx 0.6$. A direct comparison between 1D and 2D (square lattice) results suggests that dimensionality plays an import role in this system. However, it must be stressed that the DG results are unreliable in one dimension as is typically the case with mean-field-like methods. A direct comparison with a matrix-product-state simulation in figure~\ref{MPS} shows that the DG method over estimates the photon number in 1D chains in the higher density (or symmetry broken) phase, also suggesting that short range quantum correlations may be more relevant in the 1D geometry. \begin{figure} \caption{Spatially averaged photon number generated with the DG method as a function of hopping strength for both 1D and 2D geometries. Parameters are chosen as $\gamma_1=\gamma_2=\gamma$, $G=4\gamma$, $U=10\gamma$ and $\Delta=J$.} \label{photon} \end{figure} \begin{figure} \caption{ (Left) Average photon number and (Right) first order correlations generated with an MPS approach. Parameters are chosen as $\gamma_1=\gamma_2=\gamma$, $G=4\gamma$, $U=10\gamma$ and $\Delta=J$.} \label{MPS} \end{figure} \begin{figure} \caption{ DG simulations for a 2D system $L$x$L$ with $L=11$. (Left) The average total photon number and (right) the average total coherent component. Dark and bright curves correspond to $J/\gamma=0.3$ and $J/\gamma=1$, respectively. Parameters are chosen as $\gamma_1=\gamma_2=\gamma$, $G=4\gamma$, $U=10\gamma$ and $\Delta=J$.} \label{TrajJp3J1} \end{figure} Let us now take a closer look at single realizations of the noise process. In figure~\ref{TrajJp3J1} we show several trajectories for one point in the low density phase ($J=0.3\gamma$) and one point in the high density phase ($J=1\gamma$). Here we define the ``macroscopic" quantities that average local quantities over the 2D spacial degrees of freedom. The average total photon number $N=\frac{1}{L^2}\sum_s^{L^2}\langle N_s\rangle$ and average total coherent component $\alpha=\frac{1}{L^2}\sum_s^{L^2}\langle a_s\rangle$. The photon number has an obvious local interpretation, however the coherent component actually captures a global feature regarding the phase synchronization of the oscillators. If the oscillators are out of phase, which is the case in the low density phase, $|\alpha|=0$ such that their phases randomly average to zero, while in the high density phase the oscillators synchronize presenting a global phase locking yielding $|\alpha|>0.$ In figure~\ref{TrajJp3J1} we show the slow buildup of the coherent component for several trajectories allowed by the synchronization in the high density phase and the absence thereof in the low density phase. The simulations are started in the vacuum state which has a pathological behaviour in the quantum jump approach. Note that none of this features would be observable with the quantum-jump approach since the jump trajectories never spontaneously generate a local coherent component ($\langle a_s\rangle$) under the quadratic driving as deeply discussed in~\cite{Vincenzo,Quad2}, which forbids communication between site under the Gutzwiller approximation. In the diffusive approach, the terms $KdQ$ in equation~(\ref{H}) add fluctuations to the local coherent component allowing for communication between the sites and eventually the synchronization. It should also be noted that in figure~\ref{TrajJp3J1} we observe the symmetric asymptotic solutions as predicted in~\cite{Vincenzo}, however while averaging over trajectories we recover a unique steady state with zero coherent component ($\mathrm{tr}\{ \rho a_s \}=0, \forall s$) since the negative and positive trajectories cancel each other. The synchronisation does manifest itself in the spacial correlation functions which we will address. \begin{figure} \caption{ DG simulations for a 2D system $L$x$L$ with $L=11$. (Upper-left) The relaxation dynamics of the average total photon number and (upper-right) the average total coherent component. The effective relaxation times for the (lower-left) photon number and (lower-right) coherent component. Dark curves correspond to lower values of $J/\gamma$ and bright curves correspond higher values of $J/\gamma$. Parameters are chosen as $\gamma_1=\gamma_2=\gamma$, $G=4\gamma$, $U=10\gamma$ and $\Delta=J$.} \label{Relax} \end{figure} Directly probing the gap of the Lindbladian is a desirable task since it dictates the effective relaxation rate that emerges from the many-body dynamics. However, it is not directly accessible with the DG method. Effective relaxation rates my be inferred from the relaxation of observables such as $O(\infty)-O(t)\approx e^{-\gamma_{\mathrm{eff}}(O)t}$, however the choice of observable may dramatically influence the results such that different observables $O$ may have different relaxation rates $\gamma_{\mathrm{eff}}(O)$. Here we study the relaxation of both the photon number and the coherent component. In figure~\ref{Relax} we show the relaxation dynamics and effective relaxation rate for both quantities and we find that the rates are very small in the high density phase indicating the vanishing of the Lindblad gap. Furthermore, the relaxation time of the coherent component can be one order of magnitude larger then the photon number relaxation time. This shows how global quantities that are affected by the synchronisation take longer to reach their asymptotics and that global quantities should be used in order to obtain a more accurate estimation of the Lindblad gap. \begin{figure} \caption{ First order correlation function of the central with the rest of the lattice for a 2D system $L$x$L$ with $L=21$ for (Left) $J/\gamma=0.3$ and (Right) $J/\gamma=1$. Parameters are chosen as $\gamma_1=\gamma_2=\gamma$, $G=4\gamma$, $U=10\gamma$ and $\Delta=J$.} \label{g1U10G4L21} \end{figure} Under the DG approximation the first order correlation assumes a classical nature $\langle a^{\dagger}_sa_{s'}\rangle\approx \overline{\langle a^{\dagger}_s\rangle \langle a_{s'}\rangle}$. We are specifically interested in the correlation between the site in the middle of the 2D lattice and all other sites \begin{equation}G_{i,j}=\left|\overline{\langle a^{\dagger}_{0,0}\rangle \langle a_{i,j}\rangle}\right|.\end{equation} In figure~\ref{g1U10G4L21} we show $G$ both in the low and high density phases. in the low density phase we observe essentially only self-correlation while in the higher density phase the correlation expands over the whole lattice. The difference in protonic density between the two phase is so very drastic in the parameter regimes studies here. Thus, it is remarkable how the synchronization of the oscillators in the high density phase overcomes the local dissipation mechanisms and generates long range coherence similar to a super-fluid state. \begin{figure} \caption{ DG simulation for a system $L$x$L$ with $L=21$.(Left) Exponential correlation length and (Right) the corresponding $g_{0,i} \label{Length11g1U10G4} \end{figure} In order to allow for direct comparisons with the 1D case let us define a 1D projection of the correlation function as \begin{equation} g_{0,i}=\frac{G_{0,i}+G_{i,0}}{2}.\end{equation} We may also define the effective correlation length assuming $g_{0,i}\propto e^{-|i|/\xi}$. In figure~\ref{Length11g1U10G4} we show the correlation length as a function of the hopping strength and $g_{0,i}$ for several values of $J/\gamma$ as we cross the transition. Negligible correlation length is found in the low density phase while in the high density phase the correlation length becomes approximately the system size $\xi\approx L$ at least for the maximal system size we have been able to simulate $L=21$. From our results we see no indication that the correlation assumes a power law shape (as is the case for the equilibrium super-fluid phase). We figure~\ref{Length11g1U10G4} we fit all the correlation functions with an exponential and find the fits to be accurate. For the sake of comparison we also show a power law fit to the correlation function at $J/\gamma$. The exponential decay of the correlation is also present in 1D as shown in figure~\ref{MPS} with an MPS approach. This results also raise the question to weather or not the transition predicted in~\cite{Vincenzo} persists in presence of correlations or if the universality class is altered. These issues are, however, difficult to address requiring long simulation campaigns and possibly improved ansatz that take quantum correlations into account on short length scales. \section{Conclusions} We have carried out a study of the quadratically driven photonic lattice incorporating classical correlations with the diffusive-Gutzwiller approach. We have observed the growth of both relaxation times and correlation lengths in the high density regime in remarkable contrast to the low density regime. Determining precisely the transition points and universality classes might require taking quantum correlations into account because these are relevant at short length scales and influence the the region at which correlations starts to spread and give rise to the emergent transition point. Since the method does not capture quantum correlations, it could be of considerable relevance to account for quantum correlations at least on short length scales. This could be pursued with different techniques, but also still under the Gutzwiller umbrella considering complementary cluster states. We will pursue this with the variational principle outlined in this work. \end{document}
\begin{document} \title{Density by moduli and Wijsman statistical convergence} \author{Vinod K. Bhardwaj, Shweta Dhawan \and and Oleksiy A. Dovgoshey} \date{} \maketitle \begin{abstract} In this paper, we generalized the Wijsman statistical convergence of closed sets in metric space by introducing the $f$-Wijsman statistical convergence these of sets, where $f$ is an unbounded modulus. It is shown that the Wijsman convergent sequences are precisely those sequences which are $f$-Wijsman statistically convergent for every unbounded modulus $f$. We also introduced a new concept of Wijsman strong Ces\`{a}ro summability with respect to a modulus, and investigate the relationships between the $f$-Wijsman statistically convergent sequences and the Wijsman strongly Ces\`{a}ro summable sequences with respect to $f$. \end{abstract} \noindent\textbf{Keywords and phrases:} modulus function; natural density; statistical convergence; strong Ces\`{a}ro summability; Wijsman convergence. \noindent\textbf{2010 Mathematics subject classification:} 40A35; 46A45; 40G15 \section{Introduction and background} The idea of statistical convergence was first introduced by Fast \cite{hf51} and Steinhaus \cite{hs51} independently in the same year 1951 and since then several generalizations and applications of this concept have been investigated by various authors, namely $\check{S}a$l$\Acute{a}$t \cite{ts80}, Fridy \cite{jf85}, Connor \cite{jc88}, Aizpuru $et~al.$ \cite{ab14}, K\"{u}\c{c}\"{u}kaslan $et~al.$ \cite{md14}, and many others. Statistical convergence depends on the natural density of subsets of the set $\mathbb{N} = \{1,2,3,\ldots\}$. The natural density $d(K)$ of set $K \subseteq \mathbb{N}$ (see \cite[Chapter~11]{iz80}) is defined by \begin{equation}\label{eq1.1} d(K) = \lim_{n \to \infty}\frac{1}{n}\left|\{k\leq n\colon k \in K\}\right|, \end{equation} where $\left|\{\,k\leq n: k \in K \}\right|$ denotes the number of elements of $K$ not exceeding~$n$. Obviously we have $d(K) =0$ provided that $K$ is finite. In what follows we write $(x_k) \subset A$ if all elements of the sequence $(x_k)$ belong to $A$. \begin{definition} A sequence $(x_{k}) \subset \mathbb R$ is said to be statistically convergent to $l \in \mathbb R$ if, for each $\varepsilon>0$, the set $\{k \in \mathbb{N}\colon |x_k - l| \geq \varepsilon\}$ has the zero natural density. \end{definition} A new concept of density by moduli was introduced by Aizpuru $et~al.$\cite{ab14} that enabled them to obtain a nonmatrix method of convergence, namely, the $f$-statistical convergence which is a generalization of statistical convergence. We recall that a modulus is a function $f\colon [0, \infty) \to [ 0, \infty)$ such that \begin{enumerate} \item[$(i)$] $f(x) = 0$ if and only if $ x = 0$, \item [$(ii)$] $f(x + y) \leq f(x) + f(y)$ for all $x, y \in [0,\infty)$, \item [$(iii)$] $f$ is increasing, \item [$(iv)$] $f$ is continuous. \end{enumerate} The functions $f$ satisfying condition $(ii)$ are called subadditive. If $f$, $g$ are moduli and $a$, $b$ are positive real numbers, then $$ f\circ g,\quad af+bg, \quad\text{and}\quad f\vee g $$ are moduli. A modulus may be unbounded or bounded. For example, the modulus $f(x) = x^p$ where $0 < p \leq 1$, is unbounded, but $g(x) = \frac{x}{(1+ x)}$ is bounded. It is interesting to note that $f\colon [0, \infty) \to [ 0, \infty)$ is a modulus if and only if there is an uniformly continuous, non-constant function $g\colon [0,\infty) \to [0,\infty)$ such that $$ f(t) = \sup_{\substack{|x-y|\leq t \\ x, y \in [0,\infty)}} |g(x)-g(y)| $$ holds for every $t \in [0, \infty)$. The details can be found in Dovgoshey \emph{et al.} \cite[Theorem~4.3]{DM}. For bounded moduli this characterization has been, in fact, known Lebesgue~\cite{Le} in~$1910$. The idea of replacing of natural density with density by moduli, has motivated us to look for some new generalizations of statistical convergence \cite{vd15, vd15a}. Using the density by moduli Bhardwaj $et~ al.$ \cite{vg15} have also introduced the concept of $f$- statistical boundedness which is a generalization of the concept of statistical boundedness \cite{jo97} and intermediate between the usual boundedness and the statistical boundedness. The concept of convergence of sequences of points has been extended by several authors \cite{jf90, mp86, gb85, gb94, IM2015, yz93, yz94, rw64, rw66} to convergence of sequences of sets. One of such extensions considered in this paper is the concept of Wijsman convergence. Nuray and Rhoades \cite{fr12} extended the notion of Wijsman convergence of sequences of sets to that of Wijsman statistical convergence and introduced the notion of Wijsman strong Ces\`{a}ro summability of sequences of sets and discussed its relations with Wijsman statistical convergence. In this paper we extend the Wijsman statistical convergence to a $f$-Wijsman statistical convergence, where $f$ is an unbounded modulus. Let us recall the basic definitions of $f$-density and $f$-statistical convergence. \begin{definition}[\cite{ab14}]\label{D:01} Let $f\colon [0, \infty) \to [0,\infty)$ be an unbounded modulus. The $f$-density $d^f(K)$ of a set $K \subseteq \mathbb{N}$ is defined as \begin{equation}\label{eq1.2} d^{f} (K) := \lim_{n \to \infty} \frac{f(|\{k \leq n\colon k \in K\}|)}{f(n)} \end{equation} if this limit exists. A sequence $(x_k) \subset \mathbb R$ is said to be $f$-statistically convergent to $l \in \mathbb R$ if, for each $\varepsilon >0$, the set $\{k \in \mathbb N\colon |x_k-l|\geq \varepsilon\}$ has the zero $f$-density. \end{definition} \begin{remark} For each unbounded modulus $f$, the finite sets have the zero $f$-density and $$ (d^{f}(K)=0) \Rightarrow (d^{f}(\mathbb{N}-K) = 1) $$ holds for every $K \subseteq \mathbb N$ but, in general, the implication $$ (d^{f} (\mathbb{N} -K) = 1) \Rightarrow (d^{f} (K)=0) $$ does not hold. For example if we take $f(x)=\log(1+x)$ and $K=\{2n\colon n\in \mathbb N\}$, then $$ d^f(K)=d^f(\mathbb N-K)=1. $$ \end{remark} \begin{example}\label{E:1.4} A set having the zero natural density may have a non-zero $f$-density. In particular $$ d(K) = 0 \quad \text{and}\quad d^{f}(K) = 1/2 $$ holds for $f(x)= \log{(1+x)}$ and $K = \{n^2\colon n \in \mathbb N\}$. \end{example} Now we pause to collect some definitions related to Wijsman convergence of sequences of sets in a metric space. Let $(X,\rho)$ be a metric space with a metric $\rho$. For any $x \in X$ and any non-empty set $A \subseteq X$, the distance from $x$ to $A$ is defined by \begin{align*} d(x,A)= \inf_{y \in A}\rho(x,y). \end{align*} In what follows we denote by $CL(X)$ the set of all non-empty closed subsets of $(X, \rho)$. \begin{definition}\label{D:07} Let $(X,\rho)$ be a metric space, $(A_k) \subset CL(X)$ and $A \in CL(X)$. Then $(A_k)$ is said to be: \begin{itemize} \item \emph{Wijsman convergent to $A$}, if the numerical sequence $(d(x,A_k))$ is convergent to $d(x,A)$ for each $x \in X$; \item \emph{Wijsman statistically convergent} to $A \in CL(X)$, if for each $x \in X$, the numerical sequence $(d(x,A_k))$ is statistically convergent to $d(x,A)$; \item \emph{Wijsman bounded} if \begin{equation}\label{eq1.3} \sup_k d(x,A_k) < \infty \end{equation} for each $x \in X$; \item \emph{Wijsman Ces\`{a}ro summable to $A$} if, for each $x \in X$, the sequence $(d(x,A_k))$ is Ces\`{a}ro summable to $d(x,A)$, i.e., \begin{align*} \lim_{n \to \infty} \frac{1}{n}\sum_{k=1}^{n}d(x,A_k)=d(x,A); \end{align*} \item \emph{Wijsman strongly Ces\`{a}ro summable to $A$} if, for each $x \in X$, the sequence~$(d(x,A_k))$ is strongly Ces\`{a}ro summable to $d(x,A)$, i.e., \begin{align*} \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}|d(x,A_k)-d(x,A)|=0. \end{align*} \end{itemize} \end{definition} \begin{remark}\label{R1.6} The sets $A_k$ belonging to a Wijsman bounded sequence $(A_k)$ can be unbounded subsets of $(X, \rho)$, i.e., $$ \operatorname{diam} A_k =\sup\{\rho(x,y)\colon x, y \in A_k\}=\infty. $$ Moreover, the triangle inequality implies that $(A_k)$ is Wijsman bounded if there exists at least one point $p \in X$ such that~\eqref{eq1.3} holds with $x=p$. \end{remark} \begin{example}\label{R:03} Let $(X, \rho)$ be the complex plane $\mathbb C$ with the standard metric. Let us consider the sequence $(A_k)$ defined as follows: \[ A_{k} := \begin{cases} \left\{z \in \mathbb C\colon |z-1|=\frac{1}{k}\right\},& \text{if $k$ is a square,}\\ \{0\}, &\text{otherwise}. \end{cases} \] This sequence is Wijsman statistically convergent to $\{0\}$ but not Wijsman convergent. \end{example} \begin{definition}\label{D:1} Let $(X,\rho)$ be a metric space, let $(A_k) \subset CL(X)$ and let $f\colon [0,\infty) \to [0,\infty)$ be an unbounded modulus. The sequence $ (A_{k})$ is said to be $f$-Wijsman statistically convergent to $A \in CL(X)$ if the sequence $(d(x,A_k))$ is $f$-statistically convergent to $d(x,A)$ for each $x \in X$. \end{definition} We write $$ [WS^{f}]-\lim A_k = A $$ if $(A_k)$ is $f$-Wijsman statistically convergent to $A$. In the case where $f(x) = ax$, $a >0$, the $f$-Wijsman statistical convergence reduces to the Wijsman statistical convergence. We prove that the Wijsman convergent sequences are precisely those sequences which are $f$-Wijsman statistically convergent for every unbounded modulus $f$. We also introduce a new concept of Wijsman strong Ces\`{a}ro summability with respect to a modulus and show that if a sequence is Wijsman strongly Ces\`{a}ro summable, then it is Wijsman strongly Ces\`{a}ro summable with respect to all moduli $f$. The moduli $f$ for which the converse is true are investigated. Finally, we study a relation between Wijsman strong Ces\`{a}ro summability with respect to a modulus $f$ and $f$-Wijsman statistical convergence. \section{$f$-Wijsman statistical convergence} The results of this section are closely related with paper~\cite{ab14}. \begin{theorem}\label{T:2} Let $f\colon [0,\infty) \to [0,\infty)$ be an unbounded modulus, $(X,\rho)$ be a metric space, $A \in CL(X)$ and let $(A_k) \subset CL(X)$ such that \begin{equation}\label{T:2e1} [WS^f]-\lim A_k = A. \end{equation} Then $(A_k)$ is Wijsman statistically convergent to $A$. \end{theorem} \begin{proof} For all $x \in X$, $\varepsilon>0$ and $n \in \mathbb N$ we write $$ K_{x,\varepsilon}(n):=\{k\leq n\colon |d(x,A_k)-d(X,A)|\geq \varepsilon\}. $$ If $(A_k)$ is not Wijsman statistically convergent to $A$, then there are $x \in X$ and $\varepsilon>0$ such that $$ \limsup_{n\to \infty} \frac{|K_{x,\varepsilon}(n)|}{n}>0. $$ Hence there exist $p \in \mathbb N$ and a sequence $(n_m) \subset \mathbb N$, such that \begin{equation}\label{T:2e2} \lim_{m\to \infty} n_m = \infty \end{equation} and $$ \frac{1}{n_m} |K_{x,\varepsilon}(n_m)| \geq \frac{1}{p} $$ for every $m \in \mathbb N$. The last inequality is equivalent to \begin{equation}\label{T:2e3} n_m \leq p\, |K_{x,\varepsilon}(n_m)|. \end{equation} Using the subadditivity of $f$ and~\eqref{T:2e3} we obtain $$ f(n_m) \leq p\, f(|K_{x,\varepsilon}(n_m)|). $$ Consequently the inequality \begin{equation}\label{T:2e4} \frac{f(|K_{x,\varepsilon}(n_m)|)}{f(n_m)} \geq \frac{1}{p} \end{equation} holds for every $m \in \mathbb N$. Equality~\eqref{T:2e2} and inequality~\eqref{T:2e4} imply $$ \limsup_{n\to \infty} \frac{f(|K_{x,\varepsilon}(n)|)}{f(n)}\geq \frac{1}{p}, $$ contrary to~\eqref{T:2e1}. \end{proof} \begin{remark}\label{R:2.6} Using Example~\ref{E:1.4} it is easy to construct a Wijsman statistically convergent sequence which is not $f$-Wijsman statistically convergent with $f(x)=\log (1+x)$. \end{remark} \begin{theorem}\label{T:3} Let $(X,\rho)$ be a metric space and $f$, $g$ be unbounded moduli. Then for all $A$, $B \in CL(X)$ and every $(A_k) \subset CL(X)$ the equalities \begin{equation}\label{e2.5} [WS^f]-\lim A_k = A \quad \text{and} \quad [WS^g]-\lim A_k = B \end{equation} imply $A=B$. \end{theorem} \begin{proof} Let $(X,\rho)$ be a metric space, let $(A_k) \subset CL(X)$ and let~\eqref{e2.5} hold. By Theorem~\ref{T:2} the sequence $(A_k)$ is Wijsman statistically convergent to $A$ and to $B$. Using the uniqueness of statistical limits of numerical sequences we obtain that $d(x,A)=d(x,B)$ holds for every $x \in X$. It implies the equality $A=B$ because $A$, $B \in CL(X)$. \end{proof} \begin{corollary} Let $(X,\rho)$ be a metric space and let $(A_k) \subset CL(X)$. Then for every unbounded modulus $f\colon [0,\infty)\to [0,\infty)$, the limit $$ [WS^f]-\lim A_k $$ is unique if it exists. \end{corollary} We will say that a modulus $f\colon [0,\infty)\to [0,\infty)$ is slowly varying if the limit relation \begin{equation}\label{eq2.6} \lim_{x\to \infty} \frac{f(ax)}{f(x)} =1 \end{equation} holds for every $a>0$. (See Seneta \cite[Chapter~1]{Sen} for the properties of slowly varying functions.) It is clear that all bounded modulus are slowly varying. The function $f(x)=\log(1+x)$ is an example of unbounded slowly varying modulus. The following lemma is a refinement of Lemma~3.4 from~\cite{ab14}. \begin{Lemma}\label{L:1} Let $K$ be an infinite subset of $\mathbb N$. Then there is an unbounded, concave and slowly varying modulus $f\colon [0,\infty)\to [0,\infty)$ such that \begin{equation}\label{L:1e1} d^f(K)=1. \end{equation} \end{Lemma} \begin{proof} For every $n \in \mathbb N$ write $$ K(n):=\{m\in K\colon m\leq n\}. $$ Since $K$ is infinite, there is a sequence $(n_k) \subset \mathbb N$ such that: \begin{equation}\label{L:1e2} \lim_{k\to \infty}\frac{n_{k+1}}{n_k} = \infty \end{equation} and \begin{equation}\label{L:1e3} n_{k+1} - n_{k} < n_{k+2} - n_{k+1}, \quad 2n_{k} < n_{k+1} \end{equation} and \begin{equation}\label{L:1e4} n_k < \left|K(n_{k+1})\right| \end{equation} hold for every $k \in \mathbb N$. Write $n_0=0$ and define a function $f\colon [0,\infty)\to [0,\infty)$ by the rule: if $x \in [n_{k-1},n_k]$, $k \in \mathbb N$, then \begin{equation}\label{L:1e5} f(x)=\frac{x-n_{k-1}}{n_k - n_{k-1}} + k-1. \end{equation} In particular, we have \begin{equation}\label{L:1e6} f(n_k)=k \end{equation} for every $k \in \mathbb N \cup\{0\}$. We claim that $f$ has all desirable properties. $(i)$ \emph{$f$ is unbounded modulus}. It is clear that $f(0)=0$ holds and $f$ is strictly increasing and unbounded. For subadditivity of $f$ it suffices to show that the function $\frac{f(t)}{t}$ is decreasing on $(0, \infty)$. Indeed, if $\frac{f(t)}{t}$ is decreasing, then $$ f(x+y)=x \frac{f(x+y)}{x+y} + y \frac{f(x+y)}{x+y} \leq x \frac{f(x)}{x} + y \frac{f(y)}{y} = f(x) + f(y). $$ (See, for example, Timan~\cite[3.2.3]{Tim}.) The function $\frac{f(x)}{x}$ is decreasing on $(0,\infty)$ if and only if this function is decreasing on $(n_{k-1}, n_k)$ for every $k \in \mathbb N$. Using~\eqref{L:1e3} we see that the last condition trivially holds on $(n_0, n_1)$, because in this case, the right hand side in~\eqref{L:1e5} is $$ \frac{x-n_0}{n_1-n_0} - (1-1) = \frac{x}{n_1}. $$ Moreover, for $k \geq 2$ the restriction $f|_{(n_{k-1}, n_k)}$ is decreasing if and only if \begin{equation}\label{L:1e7} \frac{(k-1)(n_k-n_{k-1}) - n_{k-1}}{n_k-n_{k-1}} \geq 0. \end{equation} Since, for $k \geq 2$, we have $$ (k-1)(n_k-n_{k-1}) - n_{k-1} \geq n_k-2n_{k-1}, $$ the second inequality in~\eqref{L:1e3} implies \eqref{eq2.6}. Thus $f$ is an unbounded modulus. $(ii)$ \emph{$f$ is concave}. Since $f$ is a piecewise affine function, the one-sided derivatives of $f$ exist at all points $x \in [0, \infty)$. Using~\eqref{L:1e5} and the first inequality in~\eqref{L:1e3} we see that these derivatives are decreasing. Hence $f$ is concave. (For the proof of concavity of functions with decreasing one-sided derivatives see, for example, Artin~\cite[p.~4]{Art}.) $(iii)$ \emph{$f$ is slowly varying}. It is easy to see that~\eqref{eq2.6} holds for all $a>0$ if it holds for all $a>1$. Since $f$ is increasing, the inequality $a>1$ implies that $$ \liminf_{x\to \infty} \frac{f(ax)}{f(x)} \geq 1. $$ Thus $f$ is slowly varying if and only if \begin{equation}\label{L:1e8} \limsup_{x\to \infty} \frac{f(ax)}{f(x)} \leq 1. \end{equation} Let $a > 1$ and $x >0$. Suppose that $$ x \in [n_{k-1}, n_k] \text{ and } ax \in [n_{k+p}, n_{k+p+1}] $$ for some $p$, $k \in \mathbb N$. It implies that \begin{equation}\label{L:1e9} a = \frac{ax}{x} \geq \frac{n_{k+p}}{n_{k}}. \end{equation} Using~\eqref{L:1e7} and~\eqref{L:1e9} we obtain \begin{equation}\label{L:1e10} (x \in [n_{k-1}, n_k]) \Rightarrow (ax \in [n_{k-1}, n_k] \text{ or } ax \in [n_{k}, n_{k+1}]) \end{equation} for all sufficiently large $x$. Now it follows from~\eqref{L:1e5} and~\eqref{L:1e10} that \begin{equation}\label{L:1e11} f(ax) \leq f(x)+2. \end{equation} Since we have $\lim_{x\to \infty} f(x)=\infty$, inequality~\eqref{L:1e11} implies~\eqref{L:1e8}. $(iv)$ \emph{Equality~\eqref{L:1e1} holds}. We must prove the equality \begin{equation}\label{L:1e12} \lim_{m\to \infty} \frac{f(\left|K(m)\right|)}{f(m)} = 1. \end{equation} Let $m \in \mathbb N$ such that $m \geq n_2$. Then there is $k\geq 3$ for which \begin{equation}\label{L:1e13} n_{k-1} \leq m \leq n_k. \end{equation} The last double inequality and~\eqref{L:1e6} imply \begin{equation}\label{L:1e14} k-1 = f(n_{k-1}) \leq f(m) \leq f(n_k) =k. \end{equation} From~\eqref{L:1e13} it follows that \begin{equation}\label{L:1e15} \left|K(n_{k-1})\right| \leq \left|K(m)\right| \leq \left|K(n_k)\right|. \end{equation} Using~\eqref{L:1e4}, \eqref{L:1e15} and the inequality $|K(n_k)| \leq n_k$ we obtain $$ n_{k-2} \leq \left|K(m)\right| \leq n_k, $$ which implies \begin{equation}\label{L:1e16} k-2 = f(n_{k-2}) \leq \left|K(m)\right| \leq f(n_{k}) = k. \end{equation} Limit relation~\eqref{L:1e12} follows from~\eqref{L:1e14} and~\eqref{L:1e16}. \end{proof} \begin{example} The ternary Cantor function $G\colon [0,1] \to [0,1]$ leads to an interesting example of unbounded modulus which is not concave. Indeed, $G$ is subadditive (see, for example, Dobo\v{s}~\cite{Dob} and Timan~\cite[3.2.4]{Tim}) and can be characterized as the unique real-valued, continuous, increasing function $f\colon [0,1]\to \mathbb R$ satisfying the functional equations $$ f\left(\frac{x}{3}\right) = \frac{1}{2} f(x) \text{ and } f(1-x)=1-f(x) $$ (see Chalice~\cite{Ch} for the proof). Now we define a sequence of functions $G_k$, such that $G_1=G$ and, for every $k \geq 2$, $\operatorname{dom}(G_k) = [0,3^{k-1}]$ and $$ G_k(x) = 2G_{k-1}\left(\frac{x}{3}\right), \quad x \in [0,3^{k-1}]. $$ Then the extended Cantor function $$ G_e\colon [0, \infty) \to [0, \infty), \quad G_e(x)=G_k(x), \text{ if } x \in [0,3^{k-1}] $$ is a correctly defined, unbounded modulus which is not concave. \end{example} \begin{figure} \caption{The graph of $G_e$} \end{figure} Let us denote by $MUCS$ the set of all unbounded, concave and slowly varying moduli. \begin{theorem}\label{T:4} Let $(X,\rho)$ be a metric space, $(A_k) \subset CL(X)$ and $A \in CL(X)$. Then the following statements are equivalent: \begin{enumerate} \item [$(i)$] $(A_k)$ is Wijsman convergent to $A$; \item [$(ii)$] The equality \begin{equation}\label{T:4e1} [WS^f] - \lim A_k = A \end{equation} holds for every unbounded modulus $f$; \item [$(iii)$] Equality~\eqref{T:4e1} holds for every $f \in MUCS$. \end{enumerate} \end{theorem} \begin{proof} $(i) \Rightarrow (ii)$ Let $(i)$ hold. Since $(A_{k})$ is Wijsman convergent to $A$, the set $$ K_{x,\varepsilon}:=\{k \in \mathbb{N}\colon |d(x, A_k) - d(x,A)|\geq \varepsilon\} $$ is finite for all $x \in X$ and $\varepsilon>0$. Let $f\colon [0,\infty) \to [0,\infty)$ be an unbounded modulus. The equality \begin{equation*} \lim_{n \to \infty}\frac{f(|K_{x,\varepsilon}|)}{f(n)} = 0, \end{equation*} holds because $f$ is unbounded and increasing. Thus, $[WS^f]-\lim A_k = A$. $(ii) \Rightarrow (iii)$ It is trivial. $(iii) \Rightarrow (i)$ Let $(iii)$ hold. Suppose, $(A_k)$ is not Wijsman convergent to~$A$. Then the set $K_{x,\varepsilon}$ is infinite for some $x \in X$ and $\varepsilon > 0$. Now by Lemma \ref{L:1} there exists $f \in MUCS$ such that $d^f(K_{x,\varepsilon}) = 1$, which contradicts~\eqref{T:4e1}. \end{proof} \begin{remark}\label{R:3} The sequence $(A_k)$ in Example~\ref{R:03} is $f$-Wijsman statistically convergent with $f(x)=x$ but not Wijsman convergent. \end{remark} Theorem~\ref{T:4} us to formulate the following problem. \begin{problem}\label{P2.9} Let $M$ be a set of all unbounded modulus. Describe the sets $S \subseteq M$ for which the conditions: \begin{itemize} \item $(A_k)$ is Wijsman convergent to $A$ \end{itemize} and \begin{itemize} \item The equality $[WS^f]-\lim A_k = A$ holds for every $f \in S$ \end{itemize} are equivalent for all metric spaces $(X, \rho)$, $(A_k) \subset CL(X)$ and $A \in CL(X)$. \end{problem} The following theorem is similar to Theorem~3.1 from~\cite{ab14}. \begin{theorem}\label{T:5} Let $(X,\rho)$ be a metric space, $f\colon [0,\infty)\to [0,\infty)$ be an unbounded modulus, $(A_i) \subset CL(X)$ and $A \in CL(X)$. Then $$ [WS^f]-\lim A_i = A $$ holds if and only if, for each $x \in X$, there exists $K_{x} \subseteq \mathbb{N}$ such that $$ d^{f}(K_x)= 0 \quad\text{and}\quad \lim_{k \in \mathbb{N}-K_x} d(x, A_i)= d(x,A). $$ \end{theorem} \begin{proof} For every $K \subseteq \mathbb N$ and $n \in \mathbb N$ we write $K(n)$ for the set $$ K \cap \{1, \ldots, n\}. $$ Suppose \begin{equation}\label{T:5e1} [WS^f]-\lim A_i = A \end{equation} holds. For every $x \in X$ we must find a set $K_x \subseteq \mathbb N$ such that \begin{equation}\label{T:5e2} \lim_{i\in \mathbb N - K_x} d(x,A_i)=d(x,A) \end{equation} and \begin{equation}\label{T:5e3} \lim_{n \to \infty} \frac{f(|K_x(n)|)}{f(n)} = 0 \end{equation} holds. Let $x \in X$. For every $j \in \mathbb N$ define the set $B_j \subseteq \mathbb N$ by the rule: \begin{equation}\label{T:5e4} (i \in B_j) \Leftrightarrow \left(|d(x, A_i)-d(x,A)|\geq \frac{1}{j}\right). \end{equation} It is clear that $B_{j_1}\subseteq B_{j_2}$ holds whenever $j_2 \geq j_1$. If all $B_j$ are finite, then~\eqref{T:5e2} and~\eqref{T:5e3} are valid with $K_x = \varnothing$. Suppose $B_j$ are infinite for some $j \in \mathbb N$. If there is $B_{j_1}$ satisfying the condition \begin{itemize} \item $B_j - B_{j_1}$ is finite for every $j \in \mathbb N$, \end{itemize} then~\eqref{T:5e2} and~\eqref{T:5e3} follows from~\eqref{T:5e1} with $K_x = B_{j_1}$. (Note that~\eqref{T:5e3} follows from~\eqref{T:5e1}.) Let us consider the case when, for every $B_j$, there is $l$ such that $B_{l+j}-B_j$ is infinite. Define a sequence $(j_k)\subseteq \mathbb N$ recursively by the rule: \begin{itemize} \item if $k=1$, then $j_1$ is the smallest $j$ for which $B_j$ is infinite, \item if $k\geq 2$, then $j_k$ is the smallest $j$ with infinite $B_j - B_{j_{k-1}}$. \end{itemize} Write $B_1^*:=B_{j_1}$ and, for $k\geq 2$, $B_k^*:=B_{j_k} - B_{j_{k-1}}$. It follows from~\eqref{T:5e4} that \begin{equation}\label{T:5e5} (i \in B_1^*) \Leftrightarrow \left(|d(x, A_i)-d(x,A)|\geq \frac{1}{j_1}\right) \end{equation} and, for $k \geq 2$, \begin{equation}\label{T:5e6} (i \in B_k^*) \Leftrightarrow \left(\frac{1}{j_{k}} \leq |d(x, A_i)-d(x,A)|< \frac{1}{j_{k-1}}\right). \end{equation} It is easily seen that $B_{k_1}^*$ and $B_{k_2}^*$ are disjoint for all distinct $k_1$, $k_2 \in \mathbb N$. Let $(n_k) \subseteq\mathbb N$ be a infinite strictly increasing sequence. Write \begin{equation}\label{T:5e7} B^* :=\bigcup_{k=1}^{\infty} (B_k^* - \{1, \ldots, n_k\}). \end{equation} We claim that~\eqref{T:5e2} holds with $K_x = B^*$. To prove~\eqref{T:5e2} it is suffices to show that the set \begin{equation}\label{T:5e8} K_{x,\varepsilon}^* := \{i\in (\mathbb N-B^*)\colon |d(x,A_i)-d(x,A)|\geq \varepsilon\} \end{equation} is finite for every $\varepsilon >0$. If $\varepsilon >0$, then we have either \begin{equation}\label{T:5e9} \varepsilon \geq \frac{1}{j_1} \end{equation} or there is $k\geq 2$ such that \begin{equation}\label{T:5e10} \frac{1}{j_{k-1}} > \varepsilon \geq \frac{1}{j_k}. \end{equation} Let $\varepsilon \geq \frac{1}{j_1}$ and let $i \in K_{x,\varepsilon}^*$. Then $i\in (\mathbb N-B^*)$ and \begin{equation}\label{T:5e11} |d(x,A_i)-d(x,A)|\geq \frac{1}{j_1} \end{equation} hold. Since $$ \mathbb N-B^* = \bigcap_{k=1}^{\infty} (\{1,\ldots, n_k\} \cup (\mathbb N-B_k^*)), $$ the condition $i \in \mathbb N-B^*$ implies $$ i \in \{1,\ldots, n_1\} \text{ or } i \in (\mathbb N-B_1^*). $$ If $i \in (\mathbb N-B_1^*)$, then using~\eqref{T:5e5} we obtain $$ |d(x,A_i)-d(x,A)|< \frac{1}{j_1}, $$ which contradicts~\eqref{T:5e11}. Hence $i \in \{1,\ldots, n_1\}$ holds. Thus if $\varepsilon \geq \frac{1}{j_1}$, then $K_{x,\varepsilon}^*$ is finite with $|K_{x,\varepsilon}^*| \leq n_1$. Similarly if $$ \frac{1}{j_{k-1}} > \varepsilon \geq \frac{1}{j_k} \text{ with } k \geq 2, $$ then, using~\eqref{T:5e6} instead of~\eqref{T:5e5}, we can prove the inequality $$ |K_{x,\varepsilon}^*| \leq n_k. $$ Limit relation~\eqref{T:5e2} follows. Now we prove that there exists an increasing infinite sequence $(n_k) \subseteq \mathbb N$ such that~\eqref{T:5e3} holds for $K_x=B^*$ with $B^*$ defined by~\eqref{T:5e7}. Equality~\eqref{T:5e1} implies that $d^f(B_j)=0$ holds for every $j \in \mathbb N$. Hence for given $\varepsilon_1>0$ there is $n_1 \in \mathbb N$ such that $$ \frac{f(\left|B_{j_1}(n)\right|)}{f(n)} \leq \varepsilon_1 $$ is valid for every $n \geq n_1$. Let $0< \varepsilon_2 \leq \frac{1}{2}\varepsilon_1$. Using the equality $d^f(B_{j_2})=0$ we can find $n_2 > n_1$ such that $$ \frac{f(\left|B_{j_2}(n)\right|)}{f(n)} \leq \varepsilon_2 $$ for all $n \geq n_2$. By induction on $k$ we can find $n_k > n_{k-1}$ which satisfies $$ \frac{f(\left|B_{j_k}(n)\right|)}{f(n)} \leq \frac{1}{2}\varepsilon_{k-1} \leq \left(\frac{1}{2}\right)^{k-1}\varepsilon_{1} $$ for all $n \geq n_k$. It follows~\eqref{T:5e7}, that, for every $k \in\mathbb N$, the inclusion $$ B^*(n) \subseteq B_{j_k} (n) $$ holds if $n \in [n_{k+1}, n_k)$. Hence we have $$ \frac{f(\left|B^*(n)\right|)}{f(n)} \leq \left(\frac{1}{2}\right)^{k-1}\varepsilon_{1} $$ if $n \in [n_{k+1}, n_k)$, $k \in \mathbb N$. The equality $$ \lim_{n\to\infty} \frac{f(\left|B^*(n)\right|)}{f(n)} = 0 $$ follows. Assume now that, for every $x \in X$, there is $K_x \subset \mathbb N$ such that $$ d^f(K_x)=0 \text{ and } \lim_{i \in \mathbb N-K_x} d(x, A_i) = d(x,A). $$ Let $x \in X$ and $\varepsilon >0$. Then there is $i_0 \in \mathbb N-K_x$ such that $$ \left|d(x,A_i) - d(x,A)\right| \leq \varepsilon $$ for all $i \in (\mathbb N-K_x)-\{1, \ldots, i_0\}$. Hence $$ \{i \in \mathbb N \colon \left|d(x,A_i) - d(x,A)\right| > \varepsilon\} \subseteq K_x \cup \{1, \ldots, i_0\}. $$ Equality $d^f(K_x)=0$ implies $d^f(K_x \cup \{1, \ldots, i_0\})=0$. The limit relation $$ [WS^f]-\lim A_i = A $$ follows. \end{proof} \section{Wijsman statistical convergence and Wijsman Ces\`{a}ro summability} The following example shows that Wijsman statistical convergence does not imply Wijsman Ces\`{a}ro summability. \begin{example}\label{E:2} Let $(X, \rho) = \mathbb{R}$ with the standard metric and let $(A_k)$ be defined as \[ A_{k}= \begin{cases} \{k\}, &\text{if $k$ is a square,} \\ \{0\}, &\text{otherwise}. \end{cases} \] This sequence is Wijsman statistically convergent to the set $\{0\}$ since \begin{equation*} \lim_{n \to \infty}\frac{1}{n}|\{k \leq n\colon |d(x,A_k) - d(x, \{0\})| \geq \varepsilon\}| =0 \end{equation*} holds for all $x \in \mathbb R$ and $\varepsilon >0$. Now, we show that this sequence is not Wijsman Ces\`{a}ro summable. For the sequence $(\sigma_{k}(0))$ of Ces\`{a}ro means of order one of the sequence $(d(0, A_k))$ we have \[ \sigma_{k}(0) = \begin{cases} \frac{(1^2+2^2+\cdots+n^2)}{n^2}, &\text{if } k = n^2,\ \text{for some}\ n \in \mathbb{N} \\ \frac{(1^2+2^2+\cdots+n^2)}{k}, & \text{if } n^2 < k < (n+1)^2,\ \text{for some}\ n \in \mathbb{N}. \end{cases} \] The sequence $(\sigma_{k}(0))$ is not convergent because $$ \lim_{n\to \infty} \frac{\sum_{1}^{n} k^2}{n^2} = \lim_{n\to \infty} \frac{1}{6}\, \frac{n(n+1)(2n+1)}{n^2} = \infty. $$ \end{example} We now give an example of sequence $(A_k) \subset CL(X)$ such that the sequence $(\sigma_k(x))$ of Ces\`{a}ro means of the sequence $(d(x, A_k))$ has a finite limit for every $x \in X$ but $(A_k)$ is not Wijsman Ces\`{a}ro summable to $A$ for any $A \in CL(X)$. \begin{example}\label{E:4} Let $(X, \rho) = \mathbb{R}$ with the standard metric and let $(A_k)$ be defined as \[ A_{k}= \begin{cases} \{-1\}, &\text{if k is even}, \\ \{1\}, &\text{if k is odd}. \end{cases} \] Let $x\in \mathbb R$. For the sequence $(\sigma_{k}(x))$ of Ces\`{a}ro means of order one of the sequence $(d(x,A_k))$ we have \[ \sigma_{k}(x)= \begin{cases} |x|, & \text{if $k$ is even and } x \notin [-1,1], \\ 1, & \text{if $k$ is even and } x \in [-1,1], \\ \left|x-\frac{1}{k}\right|, & \text{if $k$ is odd and } x \notin [-1,1], \\ 1+\frac{x}{k}, & \text{if $k$ is odd and } x\in [-1,1]. \end{cases} \] Consequently \begin{equation}\label{e2.8} \lim_{k\to\infty} \sigma_{k}(x)= \begin{cases} |x|, & \text{if } x \notin [-1,1],\\ 1, & \text{if } x \in [-1,1]. \end{cases} \end{equation} No we prove that $(A_k)$ is not Wijsman Ces\`{a}ro summable. Indeed, suppose contrary that there is $A \in CL(X)$ with \begin{equation}\label{e2.9} \lim_{k\to\infty} \sigma_{k}(x)= d(x,A) \end{equation} for every $x \in \mathbb R$. Since $A$ is non-empty, there is $x_0\in A$. Using~\eqref{e2.8} and \eqref{e2.9} we obtain $$ 0=d(x_0, A) = \lim_{k\to\infty} \sigma_{k}(x_0) \text{ and } \lim_{k\to\infty} \sigma_{k}(x_0) \geq 1. $$ Thus $0\geq 1$ which is a contradiction. \end{example} \begin{remark}\label{R2.11} It seems to be intesting to find a criteria guaranteeing the Wijsman Ces\`{a}ro summability of $(A_k) \subset CL(X)$ to some $A \in CL(X)$ if the sequence $\bigl(\sigma_k(x)\bigr)$ of Ces\`{a}ro means of $\bigl(d(x,A_k)\bigr)$ is Ces\`{a}ro summable for every $x \in X$. \end{remark} In the next theorem we show that the Wijsman statistical convergence implies the Wijsman Ces\`{a}ro summability in case of Wijsman bounded sequences. \begin{theorem}\label{T:8} Let $(X,\rho)$ be a metric space, let $A \in CL(X)$ and let $(A_k) \subset CL(X)$. If $(A_k)$ is Wijsman bounded and Wijsman statistically convergent to~$A$, then $(A_k)$ is Wijsman Ces\`{a}ro summable to $A$. \end{theorem} \begin{proof} Let $\varepsilon >0$, $x \in X$, and let $(A_k)$ be Wijsman bounded. For every $n \in \mathbb N$ define the sets $K_{x,\varepsilon}(n)$, $K_{x,\varepsilon}'(n)$ and $M_x$ as \begin{align*} K_{x,\varepsilon}(n)& := \{k \leq n\colon |d(x,A_k) - d(x,A)| \geq \varepsilon\},\\ K_{x,\varepsilon}'(n)& := \{1,\ldots,n\} - K_{x,\varepsilon}(n) \text{ and } M_x := \sup_{k}|d(x,A_k)|. \end{align*} Suppose $(A_k)$ is Wijsman statistically convergent to $A$. Then the limit relation $$ \lim_{n \to \infty} \frac{|K_{x,\varepsilon}(n)|}{n} = 0, $$ holds. Now we have \begin{multline*} \left|d(x,A) - \frac{1}{n}\sum_{k=1}^n d(x,A_k)\right| \leq\frac{1}{n}\sum_{k=1}^{n}|(d(x,A_k) - d(x,A))| \\ =\frac{1}{n}\left(\sum_{k \in K_{x,\varepsilon}'(n)}|d(x,A_k) - d(x,A)| + \sum_{k\in K_{x,\varepsilon}(n)} |d(x,A_k) - d(x,A)|\right)\\ \leq \frac{(n- \left|K_{x,\varepsilon}(n)\right|)\varepsilon}{n} + \frac{1}{n}\left|K_{x,\varepsilon}(n)\right|M_x \leq \varepsilon + M_x \frac{\left|K_{x,\varepsilon}(n)\right|}{n}. \end{multline*} It implies the inequality $$ \limsup_{n\to \infty} \left|d(x,A) - \frac{1}{n}\sum_{k=1}^n d(x, A_k)\right| \leq \varepsilon. $$ Letting $\varepsilon$ to $0$ we obtain $$ \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}d(x,A_k)= d(x,A). $$ Since $x$ is an arbitrary point of $X$, $(A_k)$ is Wijsman Ces\`{a}ro summable to~$A$. \end{proof} \begin{corollary}\label{T:9} Let $(X,\rho)$ be a bounded metric space, $A \in CL(X)$, $(A_k) \subset CL(X)$ and let $f\colon [0,\infty)\to [0,\infty)$ be an unbounded modulus. If $$ [WS^f]-\lim A_k = A, $$ then $(A_k)$ is Wijsman Ces\`{a}ro summable to $A$. \end{corollary} It follows from Theorem~\ref{T:2} and Theorem~\ref{T:8} because in each bounded metric space every sequence of non-empty closed sets is Wijsman bounded. \section{Wijsman strong Ces\`{a}ro summability with respect to a modulus} The well-known space $w$ of strongly Ces\`{a}ro summable sequences is defined as: \begin{equation*} w := \left\{(x_k)\colon \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}|x_{k} - l| =0, \text{ for some } l \in \mathbb R\right\}. \end{equation*} Maddox~\cite{im86} extended the strong Ces\`{a}ro summabllity to that of strong Ces\`{a}ro summabllity with respect to a modulus $f$ and studied the space \begin{equation*} w(f) := \left\{(x_k)\colon \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}f(|x_{k}-l|)=0,\text{ for some $l \in \mathbb R$}\right\}. \end{equation*} In the year 2012, Nuray and Rhoades \cite{fr12} introduced the notion of Wijsman strong Ces\`{a}ro summability of sequences of sets and discussed its relation with Wijsman statistical convergence. In this section, we introduce a new concept of Wijsman strong Ces\`{a}ro summability with respect to a modulus $f$. It is shown that, under certain conditions on $f$, Wijsman strong Ces\`{a}ro summability w.r.t. $f$ implies $f$-Wijsman statistical convergence and that the concepts of $f$-Wijsman statistical convergence and of Wijsman strong Ces\`{a}ro summability w.r.t. $f$ are equivalent for Wijsman bounded sequences. \begin{definition}\label{D:3} Let $(X,\rho)$ be a metric space and let $f\colon [0,\infty) \to [0,\infty)$ be a modulus. A sequence $ (A_{k}) \subset CL(X)$ is said to be Wijsman strongly Ces\`{a}ro summable to $A \in CL(X)$ with respect to $f$, if the equality \begin{equation*} \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}f\left(|d(x,A_k) - d(x,A)| \right) =0 \end{equation*} holds for each $x \in X$. We write $$ [Ww^{f}]-\lim A_k = A $$ if $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$ w.r.t. $f$. \end{definition} \begin{remark}\label{R3.2} For $f(x) = x$, the concept of Wijsman strong Ces\`{a}ro summability w.r.t. $f$ reduces to that of Wijsman strong Ces\`{a}ro summability. \end{remark} \begin{theorem}\label{T:10} Let $(X,\rho)$ be a metric space, $(A_k) \subset CL(X)$, $A \in CL(X)$ and let $f\colon [0,\infty)\to [0,\infty)$ be a modulus. If $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$, then \begin{equation}\label{T:10e1} [Ww^f]-\lim A_k = A. \end{equation} \end{theorem} \begin{proof} Suppose that \begin{equation}\label{T:10e2} \lim_{n \to \infty}\frac{1}{n}\sum_{k=1}^{n}|d(x,A_k) - d(x,A)|=0 \end{equation} holds for each $x \in X$. Let $\varepsilon > 0$ and choose $\delta \in (0,1)$ such that $f(t) < \varepsilon$ for $t \in [0, \delta]$. Consider $$ \sum_{k=1}^{n} f(|d(x,A_k) - d(x,A)|) = \sum_{1} + \sum_{2}, $$ where the first summation is over the set $\{k\leq n\colon |d(x,A_k) - d(x,A)| \leq \delta\}$ and the second is over $\{k\leq n\colon |d(x,A_k) - d(x,A)| > \delta\}$. Then $\sum_{1} \leq n \varepsilon$. To estimate $\sum_{2}$ we use the inequality \begin{equation*} \bigl|d(x,A_k) - d(x,A)\bigr| < \frac{\bigl|d(x,A_k) - d(x,A)\bigr|}{\delta} \leq \left\lceil|d(x,A_k) - d(x,A)|\delta^{-1}\right\rceil, \end{equation*} where $\lceil\cdot\rceil$ is the ceiling function. The modulus functions are increasing and subadditive. Hence \begin{multline*} f(\left|d(x,A_k) - d(x,A)\right|) \leq f(1)\left\lceil\left|d(x,A_k) - d(x,A)\right| \delta^{-1}\right\rceil \\ \leq 2f(1) \left|d(x,A_k) - d(x,A)\right|\delta^{-1} \end{multline*} holds whenever $|d(x,A_k) - d(x,A)| > \delta$. Thus we have $$ \sum_{2}\leq 2 f(1) \delta^{-1} \sum_{k=1}^{n} \left|d(x,A_k) - d(x,A)\right|, $$ which together with $\sum_{1} \leq n\varepsilon$ yields \[ \frac{1}{n}\sum_{k=1}^{n} f\bigl(\left|d(x,A_k) - d(x,A)\right|\bigr) \leq \varepsilon + 2\, f(1)\, \delta^{-1}\, \frac{1}{n} \sum_{k=1}^{n} |d(x,A_k) - d(x,A)|. \] Now using~\eqref{T:10e2} we obtain $$ \limsup_{n\to \infty} \frac{1}{n}\sum_{k=1}^{n} f\bigl(|d(x,A_k) - d(x,A)|\bigr) \leq \varepsilon. $$ Equality~\eqref{T:10e1} follows by letting $\varepsilon$ to $0$. \end{proof} The next example shows that~\eqref{T:10e1} does not imply that $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$. \begin{example}\label{E:3} Let $(X, \rho)=[0,\infty)$ with the standard metric and let $f(x) = \log(1+x)$. Let us consider a sequence $(A_k)$ defined by \[ A_{k}= \begin{cases} \{k\}, &\text{if } k \in \{2^r\colon r \in \mathbb N\},\\ \{0\}, & \text{otherwise}. \end{cases} \] Then, for every $x \in [0,\infty)$, we have \begin{equation}\label{eq3.2} d(x,A_k)= \begin{cases} |x-k|, & \text{if } k \in \{2^r\colon r \in \mathbb N\},\\ x, & \text{otherwise}. \end{cases} \end{equation} For any numerical sequence $(x_i) \subset [0,\infty)$, the limit relation $$ \lim_{n\to \infty} \frac{1}{n} \sum_{i=1}^n f(x_i)=0 $$ holds if and only if $$ \lim_{r\to \infty} \frac{1}{2^r}\sum_{i=2^r}^{2^{r+1}-1} f(x_i)=0. $$ (See Maddox~\cite[p.~523]{im87}). Hence $$ [Ww^f]-\lim A_k = \{0\} $$ holds if and only if we have \begin{equation}\label{eq3.3} \lim_{r\to \infty} \frac{1}{2^r}\sum_{k=2^r}^{2^{r+1}-1} \log\Bigl(1+\bigl|d(x,A_k)-d(x,\{0\})\bigr|\Bigr)=0 \end{equation} for every $x\in [0,\infty)$. Using~\eqref{eq3.2} we see that $$ \sum_{k=2^r}^{2^{r+1}-1} \log\Bigl(1+\bigl|d(x,A_k)-d(x,\{0\})\bigr|\Bigr) = \log\Bigl(1+\bigl|\left|x-2^r\right|-x\bigr|\Bigr). $$ For sufficiently large $r$ we have $$ 1+ \bigl|\left|x-2^r\right|-x\bigr| = 2^r-2x+1. $$ Consequently the left-hand of~\eqref{eq3.3} is equal to $$ \lim_{r\to \infty} \frac{1}{2^r}\log(2^r-2x+1). $$ The last limit is $0$. Thus $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $\{0\}$ w.r.t $f$. Now, using~\eqref{eq3.2} we obtain $$ \frac{1}{2^r}\sum_{k=2^r}^{2^{r+1}-1} \bigl|d(x,A_k) - d(x, \{0\})\bigr| = \frac{2^r-2x}{2^r} $$ for sufficiently large $r$. Thus $$ \lim_{r\to \infty} \frac{1}{2^r}\sum_{k=2^r}^{2^{r+1}-1} \bigl|d(x,A_k) - d(x, \{0\})\bigr| =1, $$ which implies that $(A_k)$ is not Wijsman strongly Ces\`{a}ro summable to $\{0\}$. \end{example} The following lemma was proved by Maddox in~\cite{im87}. \begin{Lemma}\label{L:3.5} Let $f\colon [0, \infty) \to [0, \infty)$ be a modulus. Then there is a finite $\lim_{t\to \infty} \frac{f(t)}{t}$ and the equality \begin{equation}\label{L:3.5e1} \lim_{t\to \infty} \frac{f(t)}{t} = \inf\{t^{-1}f(t)\colon t \in (0, \infty)\} \end{equation} holds. \end{Lemma} \begin{proof} Write \begin{equation}\label{L:3.5e2} \beta := \inf\{t^{-1}f(t)\colon t \in (0, \infty)\}. \end{equation} It suffices to show that \begin{equation}\label{L:3.5e3} \limsup_{t\to \infty} \frac{f(t)}{t} \leq \beta. \end{equation} Let $\varepsilon>0$ and let $t_0 \in (0, \infty)$ such that $$ \beta \geq \frac{f(t_0)}{t_0}-\varepsilon. $$ The last inequality is equivalent to \begin{equation}\label{L:3.5e4} f(t_0) \leq t_0 (\beta+\varepsilon). \end{equation} For every $t \in (0,\infty)$ we have \begin{equation}\label{L:3.5e5} t = t_0 \left\lfloor\frac{t}{t_0}\right\rfloor + \left(t-t_0 \left\lfloor\frac{t}{t_0} \right\rfloor\right) \leq \left(t_0\left\lfloor \frac{t}{t_0} \right\rfloor+1\right), \end{equation} where $\lfloor\cdot\rfloor$ is the floor function. Using the increase and subadditivity of $f$ and \eqref{L:3.5e4}--\eqref{L:3.5e5} we obtain $$ \frac{f(t)}{t} \leq \frac{f(t_0) \left\lfloor\frac{t}{t_0}\right\rfloor+f(1)}{t} \leq \frac{t_0(\beta+\varepsilon)\left\lfloor\frac{t}{t_0}\right\rfloor + f(1)}{t} $$ for all sufficiently large $t$. Hence $$ \limsup_{t\to \infty} \frac{f(t)}{t} \leq (\beta+\varepsilon) \limsup_{t\to \infty} \frac{t_0 \left\lfloor\frac{t}{t_0}\right\rfloor}{t} = \beta+\varepsilon. $$ Inequality~\eqref{L:3.5e3} follows by letting $\varepsilon$ to $0$. \end{proof} \begin{theorem}\label{T:11} Let $(X,\rho)$ be a metric space, $A \in CL(X)$ and $(A_k) \subset CL(X)$. If $f\colon [0, \infty) \to [0, \infty)$ is a modulus such that \begin{equation}\label{T:11e1} \beta:=\lim_{t\to \infty} \frac{f(t)}{t} >0 \text{ and } [Ww^f] - \lim A_k =A, \end{equation} then $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$. \end{theorem} \begin{proof} Let a modulus $f$ satisfy condition~\eqref{T:11e1}. By Lemma~\ref{L:3.5} we have $$ \beta = \inf\{t^{-1}f(t)\colon t >0\}. $$ Consequently \begin{equation}\label{T:11e2} f(t)\geq \beta t \end{equation} holds for every $t \geq 0$. It follows from~\eqref{T:11e2} that \[ \frac{1}{n}\sum_{k=1}^{n} |d(x,A_k) - d(x,A)| \leq \beta^{-1} \frac{1}{n}\sum_{k=1}^{n} f(|\,d(x,A_k) - d(x,A)|), \] holds for every $x \in X$. Using the second term of~\eqref{T:11e1} we see that $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$. \end{proof} \begin{theorem}\label{T:12} Let $(X,\rho)$ be a metric space, $A \in CL(X)$ and $(A_k)\subset CL(X)$. Suppose that $f\colon [0,\infty) \to [0,\infty)$ is an unbounded modulus which satisfies the inequalities \begin{equation}\label{T:12e1} \lim_{t \to \infty}\frac{f(t)}{t}>0 \text{ and }f(xy)\geq c\,f(x)\,f(y) \end{equation} with some $c \in (0, \infty)$ for all $x$, $y\in [0,\infty)$. Then the following statements hold: \begin{enumerate} \item[$(i)$] If $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$ w.r.t. $f$, then $(A_k)$ is $f$-Wijsman statistically convergent to $A$; \item [$(ii)$] If $(A_k)$ is Wijsman bounded and $f$-Wijsman statistically convergent to~$A$, then $(A_k)$ is Wijsman strongly Ces\`{a}ro summable to $A$ w.r.t. $f$. \end{enumerate} \end{theorem} \begin{proof} Let $$ K_{x,\varepsilon}(n):=\{k \leq n\colon |d(x,A_k)-d(x,A)|\geq \varepsilon\} $$ for all $x \in X$, $\varepsilon \in (0,\infty)$ and $n \in \mathbb N$. $(i)$ Let $[Ww^f]-\lim A_k=A$. By subadditivity of moduli we have $$ \sum_{k=1}^{n} f(|d(x,A_k)- d(x,A)|) \geq f\left(\sum_{k=1}^{n} |d(x,A_k)- d(x,A)|\right) $$ for every $x \in X$. Using the second inequality from~\eqref{T:12e1} we obtain \begin{equation*} f\left(\sum_{k \in K_{x,\varepsilon}(n)} |d(x,A_k)- d(x,A)|\right)\geq f\bigl(\left|K_{x,\varepsilon}(n)\right|\varepsilon\bigr) \geq c f\bigl(\left|K_{x,\varepsilon}(n)\right|\bigr)f(\varepsilon). \end{equation*} Hence \begin{equation}\label{T:12e3} \frac{1}{n}\sum_{k=1}^{n}\,f(|\,d(x,A_k) - d(x,A)|) \geq c\left(\frac{\,f\left(\left|K_{x,\varepsilon}(n)\right|\right)}{f(n)}\right) \left(\frac{f(n)}{n}\right) \,f(\varepsilon). \end{equation} This inequality, the first inequality from~\eqref{T:12e1}, $[Ww^f]-\lim A_k=A$ and $\lim_{\varepsilon \to 0} f(\varepsilon)=0$ imply $[WS^f]-\lim A_k=A$. $(ii)$ Let $(A_k)$ be Wijsman bounded and let $[WS^f]-\lim A_k=A$. Since $(A_k)$ is Wijsman bounded, we have \begin{equation}\label{T:12e2} M_x :=\sup_{k}\left|d(x,A_k)| + d(x,A)\right|<\infty. \end{equation} For all $n \in \mathbb{N}$, $x\in X$ and $\varepsilon >0$, we write $K_{x,\varepsilon}'(n):= \{1,\ldots,n\} - K_{x,\varepsilon}(n)$. Now, \begin{multline*} \frac{1}{n}\sum_{k=1}^{n}\,f(|\,d(x,A_k) - d(x,A)|)\\ =\frac{1}{n}\sum_{k \in K_{x,\varepsilon}(n)} f(|d(x,A_k) - d(x,A)|) + \frac{1}{n}\sum_{k \in K_{x,\varepsilon}'(n)} f(|d(x,A_k) - d(x,A)|)\\ \leq \frac{\left|K_{x,\varepsilon}(n)\right|}{n} f(M_x)+ \frac{1}{n}n\,f(\varepsilon). \end{multline*} Letting $n \to \infty$ we get \begin{align*} \frac{1}{n}\sum_{k=1}^{n}\,f(|\,d(x,A_k) - d(x,A)|) \leq f(\varepsilon), \end{align*} in view of Theorem \ref{T:2} and \eqref{T:12e2}. Now the equality $$ [Ww^f]-\lim A_k=A $$ follows from $\lim_{\varepsilon \to 0} f(\varepsilon)=0$. \end{proof} \begin{remark}\label{R:6} If we take $f(x) = x$ in Theorem \ref{T:12}, we obtain Theorem $6$ of Nuray and Rhoades $\cite{fr12}$. \end{remark} It seems to be interesting to find a solution of the following problem. \begin{problem}\label{P3.9} Find characteristic properties of moduli $f$ for which the equalities $[WS^f] - \lim A_k = A$ and $[Ww^f] - \lim A_k = A$ are equivalent for all bounded metric spaces $(X, \rho)$, $(A_k) \subset CL(X)$ and $A \in CL(X)$. \end{problem} \noindent\textbf{Acknowledgmets}. The research of the third author was supported by grant of the State Fund for Fundamental Research (project F71/20570) and partially supported by grant 0115U000136 of the Ministry Education and Science of Ukraine. \begin{footnotesize} \end{footnotesize} \noindent Vinod K. Bhardwaj\\ Department of Mathematics, Kurukshetra University,\\ Kurukshetra-$136119$, INDIA\\ email: \texttt{vinodk\[email protected]}\\[.2cm] Shweta Dhawan\\ Department of Mathematics, KVA DAV College for Women,\\ Karnal-$132001$, INDIA\\ email: \texttt{shwetadhawan\[email protected]}\\[.2cm] Oleksiy A. Dovgoshey\\ Function Theory Department,\\ Institute of Applied Mathematics and Mechanics of NASU,\\ Dobrovolskogo str.~$1$, Slovyansk $84100$, UKRAINE\\ email: \texttt{[email protected]} \end{document}
\begin{document} \title{The space of stability conditions on the local projective plane} \author{Arend Bayer} \address{Department of Mathematics, University of Connecticut U-3009, 196 Auditorium Road, Storrs, CT 06269-3009, USA} \email{[email protected]} \urladdr{http://www.math.uconn.edu/~bayer/} \author{Emanuele Macr\`i} \address{Department of Mathematics, University of Utah, 155 South 1400 East, Salt Lake City, UT 84112-0090, USA \& Mathematical Institute, University of Bonn, Endenicher Allee 60, D-53115 Bonn, Germany} \email{[email protected]} \urladdr{http://www.math.uni-bonn.de/~macri/} \keywords{Bridgeland stability conditions, Space of stability conditions, Derived category, Mirror symmetry, Local projective plane} \subjclass[2000]{14F05 (Primary); 14J32, 14N35, 18E30 (Secondary)} \date{\today} \begin{abstract} We study the space of stability conditions on the total space of the canonical bundle over the projective plane. We explicitly describe a chamber of geometric stability conditions, and show that its translates via autoequivalences cover a whole connected component. We prove that this connected component is simply-connected. We determine the group of autoequivalences preserving this connected component, which turns out to be closely related to $\Gamma_1(3)$. Finally, we show that there is a submanifold isomorphic to the universal covering of a moduli space of elliptic curves with $\Gamma_1(3)$-level structure. The morphism is $\Gamma_1(3)$-equivariant, and is given by solutions of Picard-Fuchs equations. This result is motivated by the notion of $\ensuremath{\mathbb{P}}i$-stability and by mirror symmetry. \end{abstract} \maketitle \section{Introduction} In this paper, we study the space of stability conditions on the derived category of the local $\ensuremath{\mathbb{P}}^2$. Our approach is based on the chamber decomposition given by the wall-crossing for stable objects of the class of skyscraper sheaves of points. \subsection{Motivation} Consider a projective Calabi-Yau threefold $Y$ containing a projective plane $\ensuremath{\mathbb{P}}^2 \subset Y$. Ideally, one would like to study the space of Bridgeland stability conditions on its derived category $\ensuremath{\mathbb{D}}b(Y)$. Understanding the geometry of this space would give insights on the group of autoequivalences of $\ensuremath{\mathbb{D}}b(Y)$ and give a global picture of mirror symmetry. Understanding wall-crossing for counting invariants of semistable objects would have many implication for Donaldson-Thomas type invariants on $Y$. However, no single example of stability condition on a projective Calabi-Yau threefold has been constructed. Instead, in this article we focus on the full subcategory $\ensuremath{\mathbb{D}}b_{\ensuremath{\mathbb{P}}^2}(Y)$ of complexes concentrated on $\ensuremath{\mathbb{P}}^2$. The local model for this situation is the total space $X = \mathop{\mathrm{Tot}}\nolimits \ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-3)$ of the canonical bundle of $\ensuremath{\mathbb{P}}^2$, called the ``local $\ensuremath{\mathbb{P}}^2$'': $\ensuremath{\mathbb{D}}b_{\ensuremath{\mathbb{P}}^2}(Y)$ is then equivalent to the derived category $\ensuremath{\mathbb{D}}D_0 := \ensuremath{\mathbb{D}}b_0(X)$ of coherent sheaves supported on the zero-section. Denote by $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ the space of stability conditions $\sigma=(Z, \ensuremath{\mathbb{P}}P)$ on $\ensuremath{\mathbb{D}}D_0$ (see Appendix \ref{app:BridgelandFramework} for a quick introduction to stability conditions). It is a three-dimensional complex manifold coming with a local homeomorphism $\ensuremath{\mathbb{Z}}Z \colon \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0) \to \ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}}) \cong \ensuremath{\mathbb{C}}^3$, $\ensuremath{\mathbb{Z}}Z((Z,\ensuremath{\mathbb{P}}P))=Z$. The goal of this article is to study the space $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ as a test case for the properties we would expect in the case of $Y$. This space was first studied in \cite{Bridgeland:stab-CY}, where it was suggested that the space is closely related to the Frobenius manifold of the quantum cohomology of $\ensuremath{\mathbb{P}}^2$. Further, understanding how Donaldson-Thomas type counting invariants of semistable objects depend on the stability conditions $\sigma \in \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ (i.e., wall-crossing phenomena) would be highly interesting. For example, due to the derived equivalence $\ensuremath{\mathbb{D}}D_0 \cong \ensuremath{\mathbb{D}}b([\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3])$ of \cite{Mukai-McKay} it would give a new explanation for the relation between the Gromov-Witten potentials of $X$ and of $\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3$ (``crepant resolution conjecture'', see \cite{Coates-CRC, CCIT:computing}). It could also explain the modularity properties of the Gromov-Witten potential of $X$ observed in \cite{ABK:topological_strings}. While these questions remain open, our results give a good description of a connected component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$, explain its relation to autoequivalences of $\ensuremath{\mathbb{D}}D_0$, and do give a global mirror symmetry picture. \subsection{Geometric stability conditions} \label{secti:geomstab} In order to study $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$, we use one of its chamber decompositions. We consider a chamber $U \subset \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ consisting of ``geometric'' stability conditions, which have the property that all skyscraper sheaves $k(x)$, $x\in\ensuremath{\mathbb{P}}^2$, are stable of the same phase (see Definition \ref{def:GeomStability} for the precise definition). Our first result is a complete description of the geometric chamber (see Theorem \ref{thm:geom-stability}): $U$ is an open, connected, simply-connected, 3-dimensional subset of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. Up to shifts, a stability condition $(Z, \ensuremath{\mathbb{P}}P) \in U$ is determined by its central charge $Z$, and we give explicit inequalities cutting out the set $\ensuremath{\mathbb{Z}}Z(U) \subset \ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}}) \cong \ensuremath{\mathbb{C}}^3$ of central charges $Z$ for $(Z, \ensuremath{\mathbb{P}}P) \in U$. The most interesting part of the boundary of $U$ has a fractal-like structure; its shape is determined by the set of Chern classes of semistable vector bundles on $\ensuremath{\mathbb{P}}^2$. Let $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ be the connected component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ containing $U$ and let $\overline{U}$ be the closure of $U$ in $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. We can directly construct every \emph{wall} of $U$, i.e., the components of the boundary $\partial U=\overline{U}\setminus U$ of $U$ (see Theorem \ref{thm:boundary}). We use this to prove the following result (see Corollary \ref{cor:ConnectedComponent}): \begin{thm-int}\label{thmi:translates} The translates of $\overline{U}$ under the group of autoequivalences generated by spherical twists at spherical sheaves in $\ensuremath{\mathbb{D}}D_0$ cover the whole connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. \end{thm-int} The translates of $U$ are disjoint, and each translate is a chamber on which the moduli space of stable objects of class $[k(x)]$ is constant. \subsection{Topology of $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$.} In \cite{Bridgeland:stab-CY}, Bridgeland described an open connected subset $\mathop{\mathrm{Stab}}_a$ of $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ consisting of ``algebraic'' stability conditions that can be described in terms of quivers. We will see that the subset $\mathop{\mathrm{Stab}}_a$ is not dense (in particular, it does not contain the ``large volume limit'' point: see Proposition \ref{prop:geomvsalg}). Nevertheless, by combining Bridgeland's description of $\mathop{\mathrm{Stab}}_a$ with Theorem \ref{thmi:translates}, we prove the following result: \begin{thm-int} \label{thmi:sc} The connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ is simply-connected. \end{thm-int} \subsection{Autoequivalences} In our situation, the local homeomorphism $\ensuremath{\mathbb{Z}}Z : \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0) \to \ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}})$ is not a covering of its image. This is a fundamental difference to the case of Calabi-Yau 2-categories (as studied in \cite{Bridgeland:K3,Thomas:stability,Bridgeland:ADE,Ishii-Ueda-Uehara, HMS:generic_K3s}). Further, there is no non-trivial subgroup of autoequivalences of $\ensuremath{\mathbb{D}}D_0$ that acts as a group of deck transformation of the map $\ensuremath{\mathbb{Z}}Z$. But, in any case, using Theorem 1 we can classify all autoequivalences $\mathop{\mathrm{Aut}}\nolimits^\dag(\ensuremath{\mathbb{D}}D_0)$ which preserve the connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$: \begin{thm-int} \label{thmi:auto} The group $\mathop{\mathrm{Aut}}\nolimits^\dag(\ensuremath{\mathbb{D}}D_0)$ is isomorphic to a product $\ensuremath{\mathbb{Z}}\times\Gamma_1(3)\times\mathop{\mathrm{Aut}}\nolimits(\hat X)$. \end{thm-int} Recall that the congruence subgroup $\Gamma_1(3) \subset \mathop{\mathrm{SL}}(2, \ensuremath{\mathbb{Z}})$ (see Section \ref{sec:autoequivalences} for the definition) is a group on two generators $\alpha$ and $\beta$ subject to the relation $(\alpha\beta)^3 = 1$. It is isomorphic to the subgroup generated by the spherical twist at the structure sheaf $\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}$ of the zero-section $\ensuremath{\mathbb{P}}^2\ensuremath{\hookrightarrow} X$, and by the tensor product with $\ensuremath{\mathcal O}_X(1)$. The group $\ensuremath{\mathbb{Z}}$ is identified with the subgroup generated by the shift by $1$ functor $[1]$ and $\mathop{\mathrm{Aut}}\nolimits(\hat X)$ denotes the group of automorphisms of the formal completion $\hat X$ of $X$ along $\ensuremath{\mathbb{P}}^2$. \subsection{$\ensuremath{\mathbb{P}}i$-stability and mirror symmetry} Stability conditions on a derived category were originally introduced by Bridgeland in \cite{Bridgeland:Stab} to give a mathematical foundation for the notion of $\ensuremath{\mathbb{P}}i$-stability in string theory, in particular in Douglas' work, see e.g. \cite{Douglas:stability, Aspinwall-Douglas:stability} and references therein. However, it has been understood that only a subset of Bridgeland stability conditions is physically meaningful, i.e., there is a submanifold $M$ of the space of stability conditions on $Y$ that parametrizes $\ensuremath{\mathbb{P}}i$-stability conditions, and that is isomorphic to (the universal covering of) the complex K\"ahler moduli space. In fact, $M$ is (the universal covering of) a slice of the moduli space of SCFTs containing the sigma model associated to $Y$; in the physics literature, it is often referred to as the ``Teichm\"uller space''. By mirror symmetry, $M$ is also isomorphic to the universal covering of the moduli space of mirror partners $\widehat{Y}$ of $Y$. As explained in \cite{Bridgeland:spaces}, this leads to a purely algebro-geometric mirror symmetry statement; we prove such a result in Section \ref{sec:MS}: The mirror partner for the local $\ensuremath{\mathbb{P}}^2$ is the universal family over the moduli space $\ensuremath{{\mathcal M}_{\Gamma_1(3)}}$ of elliptic curves with $\Gamma_1(3)$-level structures. Its fundamental group is $\Gamma_1(3)$. Let $\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$ be the universal cover, with $\Gamma_1(3)$ acting as the group of deck transformations. \begin{thm-int}\label{thmi:MS} There is an embedding $I \colon \ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}} \ensuremath{\hookrightarrow} \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ which is equivariant with respect to the action by $\Gamma_1(3)$ on both sides. \end{thm-int} Here the $\Gamma_1(3)$-action on $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ is induced by the subgroup $\Gamma_1(3) \subset \mathop{\mathrm{Aut}}\nolimits^\dag(\ensuremath{\mathbb{D}}D_0)$ identified in Theorem \ref{thmi:auto}. On the level of central charges, the embedding is given in terms of a Picard-Fuchs differential equation: for a fixed $E \in \ensuremath{\mathbb{D}}D_0$, the function $(\ensuremath{\mathbb{Z}}Z \circ I)(z)(E) \colon \ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}} \to \ensuremath{\mathbb{C}}$ is a solution of the Picard-Fuchs equation. In particular, while classical enumerative mirror symmetry gives an interpretation of formal expansions of solutions of Picard-Fuchs equations at special points of $M$ in terms of genus-zero Gromov-Witten invariants on $Y$, the space of stability conditions allows us to interpret these solutions globally. \subsection{Relation to existing work} Various examples of stability conditions in local Calabi-Yau situations have been studied in the literature. In particular, the local derived category of curves inside surfaces has been studied in \cite{Thomas:stability,Bridgeland:ADE,Ishii-Uehara:An,Ishii-Ueda-Uehara,Okada:CY2,MMS:inducing,Brav-HThomas:ADE}, and results similar to Theorem \ref{thmi:translates}, Theorem \ref{thmi:sc}, and Theorem \ref{thmi:auto} have been obtained. Some examples of stability conditions on projective spaces were studied in \cite{Macri:Curves,Aaron-Daniele,Ohkawa}. Other local Calabi-Yau threefold cases were studied in \cite{Toda:stab-crepant_res, Toda:CY-fibrations}, and, as already mentioned, an open subset of $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ has been described in \cite{Bridgeland:stab-CY}. However, our approach follows the ideas in \cite{Bridgeland:K3} more closely than most of the above mentioned articles, as we describe stability conditions in terms of stability of sheaves on $\ensuremath{\mathbb{P}}^2$, rather than in terms of exceptional collections and quivers. Applying this approach in our situation is possible due to the classical results of Dr\'ezet and Le Potier \cite{Drezet-LePotier}; in particular, the fractal boundary of $\ensuremath{\mathbb{Z}}Z(U)$ discussed in Section \ref{secti:geomstab} is directly due to their results. At the same time, Sections \ref{sec:AlgebraicStability1} and \ref{sec:AlgebraicStability2} rely heavily on the work in \cite{Goro-Ruda:Exceptional} on exceptional collection and mutations. Stability conditions around the orbifold point can be understood in terms of stability of quiver representations as studied in \cite{Alastair-Ishii}; in particular our Theorem \ref{thmi:translates} could be understood as a derived version of \cite[Theorem 1.2]{Alastair-Ishii} applied to our situation. There does not seem to be an equivalent of Theorem \ref{thmi:MS} in the literature for a Calabi-Yau 3-category; however, it is motivated by the conjectural picture described in \cite[Section 7]{Bridgeland:spaces}. There are many articles in the mathematical physics literature related to $\ensuremath{\mathbb{P}}i$-stability and mirror symmetry for $\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3$ and the local $\ensuremath{\mathbb{P}}^2$ (as well as other local del Pezzo surfaces), and our presentation in Section \ref{sec:MS} is very much guided by \cite{Aspinwall:Dbranes-CY} and \cite{ABK:topological_strings}. In particular, Theorem \ref{thmi:MS} is based on the computations of analytic continuations and monodromy for solutions of the Picard-Fuchs equation of the mirror of the local $\ensuremath{\mathbb{P}}^2$ in \cite{AGM:measuring, Aspinwall:Dbranes-CY, ABK:topological_strings}; in some sense, we are just lifting their results from the level of central charges to the level of stability conditions. In order for this to work, the ``central charges predicted by physicists'' had to survive a non-trivial test: they had to satisfy the inequalities of Definition \ref{def:setG} (see Observation (\ref{obs:inequality}), page \pageref{obs:inequality}). The fact that they survived this test is somewhat reassuring for the case of compact Calabi-Yau threefolds: identifying similar inequalities (which would be based on inequalities for Chern classes of stable objects), and checking that the central charges satisfy them, is the major obstacle towards constructing stability conditions on compact Calabi-Yau threefolds. \subsection{Open questions} Bridgeland's conjecture \cite[Conj.\ 1.3]{Bridgeland:stab-CY} remains open; it would identify $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ with an open subset of the extended Frobenius manifold of the quantum cohomology of $\ensuremath{\mathbb{P}}^2$. Theorems \ref{thmi:translates} and \ref{thmi:sc} of this paper essentially complete the study of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ as started in \emph{loc.\ cit.}; and Theorem \ref{thmi:MS} clarifies the discussion in \emph{loc.\ cit.} about the ``small quantum cohomology locus'', as this locus corresponds to the image of $\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$. What is missing from a proof of the whole conjecture, as pointed out in \emph{loc.\ cit.}, is still a better understanding of the Frobenius manifold side. It seems natural to conjecture that the full group $\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D_0)$ of autoequivalences of $\ensuremath{\mathbb{D}}D_0$ preserves the connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$; in fact, this last one may be the only three-dimensional component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. In this case, Theorem \ref{thmi:auto} would give a complete description of $\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D_0)$. Maybe the most intriguing question about $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ related to our results is whether there is an intrinsic characterization of the image of the map $I$ of Theorem \ref{thmi:MS}, a question raised in other contexts in \cite{Bridgeland:spaces}. To this end, note that the central charge on the image can also be given in terms of an analytic continuation of the genus zero Gromov-Witten potential of $X$ (see \cite{ABK:topological_strings, Iritani:survey}; that this agrees with our description using the mirror is classical enumerative mirror symmetry). But the genus-zero Gromov-Witten potential is in turn determined by counting invariants of one-dimensional torsion sheaves (\cite{PT1, Toda:generating}), i.e., counting invariants of stable objects close to the large-volume limit. It would also be interesting to generalize some of the results of this paper to other ``local del Pezzo surfaces''. In such a case, the starting point would be a generalization of the result in \cite{Drezet-LePotier} on the description of the Chern classes of stable sheaves. Already for $\ensuremath{\mathbb{P}}^1\times\ensuremath{\mathbb{P}}^1$ the situation is more complicate: see \cite{Rudakov:Quadric,Rudakov:DelPezzoSurfaces} for results in this direction. \subsection{Plan of the paper} The paper is organized as follows. In Section \ref{sec:GeoStability} we define geometric stability conditions and state Theorem \ref{thm:geom-stability}, which classifies them. Sections \ref{sec:Constraining} and \ref{sec:constructing} are devoted to the proof of Theorem \ref{thm:geom-stability}. In Section \ref{sec:boundary} we describe the boundary $\partial U$ of the geometric chamber and prove Theorem \ref{thmi:translates}. Algebraic stability conditions are introduced in Section \ref{sec:AlgebraicStability1} in order to prove Theorem \ref{thmi:sc} (whose proof will take Section \ref{sec:AlgebraicStability2}). In Section \ref{sec:autoequivalences} we study the group of autoequivalences and prove Theorem \ref{thmi:auto}. Section \ref{sec:MS} discusses how the previous results fit into expectations from mirror symmetry, and includes the proof of Theorem \ref{thmi:MS}. Finally, three appendices complete the paper. In Appendix \ref{app:DP}, we review the results of Dr\'ezet and Le Potier as we need them in the proof of Theorem \ref{thm:geom-stability}. Appendix \ref{app:BridgelandFramework} is a brief introduction to stability conditions and contains an improved criterion for the existence of Harder-Narasimhan filtrations. In Appendix \ref{app:ineq} we give a sketch of the proof that the central charges we define in Section \ref{sec:MS} satisfy the inequalities in Definition \ref{def:setG}. \subsection{Notation} We work over the complex numbers $\ensuremath{\mathbb{C}}$. We let $X$ denote the total space of $\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-3)$, and $i\colon\ensuremath{\mathbb{P}}^2\ensuremath{\hookrightarrow} X$ the inclusion of the zero-section. We let $\ensuremath{\mathbb{C}}oh_0:=\ensuremath{\mathbb{C}}oh_{\ensuremath{\mathbb{P}}^2}X\subset\ensuremath{\mathbb{C}}oh X$ be the subcategory of coherent sheaves on $X$ supported (set-theoretically) on the zero-section. We write $\ensuremath{\mathbb{D}}D_0 = \ensuremath{\mathbb{D}}b_0(X)$ for the subcategory of $\ensuremath{\mathbb{D}}b(\ensuremath{\mathbb{C}}oh X)$ of complexes with bounded cohomology, such that all of its cohomology sheaves are in $\ensuremath{\mathbb{C}}oh_0$. (Note that $\ensuremath{\mathbb{D}}D_0 \cong \ensuremath{\mathbb{D}}b(\ensuremath{\mathbb{C}}oh_0)$ as observed in \cite{Ishii-Uehara:An}, Notation and Convention.) The space of stability conditions on $\ensuremath{\mathbb{D}}D_0$ will be denoted by $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$, and its two subsets of geometric and algebraic stability conditions by $U$ and $\mathop{\mathrm{Stab}}_a$, respectively (see Definitions \ref{def:GeomStability} and \ref{def:algebraicstability}). An object $S$ in $\ensuremath{\mathbb{D}}D_0$ is called \emph{spherical} if $\mathop{\mathrm{Ext}}\nolimits^p(S,S)\cong\ensuremath{\mathbb{C}}$ for $p=0,3$ and is zero otherwise. For a spherical object $S$ we denote by ${\mathop{\mathrm{ST}}\nolimits}_S$ the spherical twist associated to $S$, defined by the exact triangle \[ \ensuremath{\mathbb{H}}om^*(S,M)\otimes S\stackrel{ev}{\longrightarrow} M\longrightarrow {\mathop{\mathrm{ST}}\nolimits}_S(M), \] for $M\in\ensuremath{\mathbb{D}}D_0$ (see \cite{Seidel-Thomas:braid}). By abuse of notation, we will write $\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(n) \in \ensuremath{\mathbb{D}}D_0$ for the spherical objects $i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(n)$. For $x \in \ensuremath{\mathbb{P}}^2$ we denote by $k(x)$ the skyscraper sheaf in $X$ of length one concentrated at $x$. The Grothendieck group of $\ensuremath{\mathbb{D}}D_0$ is denoted by $K(\ensuremath{\mathbb{D}}D_0)$. It is isomorphic to $\ensuremath{\mathbb{Z}}^{\oplus 3}$. For any $E \in\ensuremath{\mathbb{D}}D_0$, we write $r(E), d(E), c(E)$ for the components of the Chern character of its push-forward to $\ensuremath{\mathbb{P}}^2$; more precisely, if $\pi\colon X\to\ensuremath{\mathbb{P}}^2$ is the projection and $\mathop{\mathrm{ch}}\nolimits \colon K(\ensuremath{\mathbb{P}}^2) \to A_*(\ensuremath{\mathbb{P}}^2) \otimes \ensuremath{\mathbb{Q}}$ the Chern character with values in the Chow ring, then we write \[ \mathop{\mathrm{ch}}\nolimits(\pi_*(E)) = r(E) \cdot [\ensuremath{\mathbb{P}}^2] + d(E) \cdot [\mathrm{line}]+ c(E) \cdot [\mathrm{pt}]. \] For a complex number $z\in\ensuremath{\mathbb{C}}$, we write $\ensuremath{\mathbb{R}}e z$ (resp.\ $\Im z$) for its real (resp.\ imaginary) part. \section{Geometric stability conditions}\label{sec:GeoStability} We assume familiarity with the notion of stability conditions on a derived category; see Appendix \ref{app:BridgelandFramework} for a short summary, and \cite{Bridgeland:Stab}, \cite[Section 3.4]{Kontsevich-Soibelman:stability} for a complete reference. We begin by constructing and classifying ``geometric'' stability conditions on $\ensuremath{\mathbb{D}}D_0$. Loosely speaking, geometric stability conditions are those that are most closely connected to the geometry of sheaves on $X$; in the definition below, we require that the simple objects of $\ensuremath{\mathbb{C}}oh_0$ remain stable, but it will also turn out that the semistable objects are at most two-term complexes of sheaves in $\ensuremath{\mathbb{C}}oh_0$. \begin{Def}\label{def:GeomStability} A stability condition $\sigma$ on $\ensuremath{\mathbb{D}}D_0$ is called \emph{geometric} if the following two conditions are satisfied: \begin{enumerate} \item All skyscraper sheaves $k(x)$ of closed points $x\in\ensuremath{\mathbb{P}}^2$ are $\sigma$-stable of the same phase. \item \label{enum:full} The connected component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ containing $\sigma$ is full, that is it has maximal dimension equal to $3$. \end{enumerate} \end{Def} We write $U$ for the set of geometric stability conditions, and refer to it as the ``geometric chamber''; in fact, we will see that it is precisely one of the chambers with respect to the chamber decomposition given by the wall-crossing phenomenon for semistable objects of class $[k(x)]$. Part (\ref{enum:full}) of the definition is a technical condition to ensure that the wall-crossing for semistable objects behaves nicely (see \cite[Section 9]{Bridgeland:K3}); it is equivalent to the ``support property'' introduced by Kontsevich-Soibelman (see Proposition \ref{prop:SupportProperty}). We recall that a Bridgeland stability condition can be constructed by giving the heart of a bounded t-structure $\ensuremath{\mathcal A} \subset \ensuremath{\mathbb{D}}D_0$, and a compatible central charge $Z \colon K(\ensuremath{\mathcal A}) = K(\ensuremath{\mathbb{D}}D_0) \to \ensuremath{\mathbb{C}}$ that sends objects in $\ensuremath{\mathcal A}$ to the semi-closed upper half plane (see Remark \ref{rmk:tstruct}). The t-structures appearing in geometric stability conditions are given by the now familiar notion of \emph{tilting} (see \cite{Happel-al:tilting}): For purely 2-dimensional sheaves $\ensuremath{\mathcal F} \in \ensuremath{\mathbb{C}}oh_0$, the slope function $\mu(\ensuremath{\mathcal F}) = \frac{d(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})}$ gives a notion of slope-stability (as in Definition \ref{def:stablesheaf}). By the same arguments as in the case of a projective variety, Harder-Narasimhan filtrations exist. Thus we can follow \cite[Lemma 5.1]{Bridgeland:K3} to make the following definition: \begin{Def} For any $B \in \ensuremath{\mathbb{R}}$, let $\left(\ensuremath{\mathbb{C}}oh_0^{>B}, \ensuremath{\mathbb{C}}oh_0^{\le B}\right)$ be the torsion pair in $\ensuremath{\mathbb{C}}oh_0$ determined by: \begin{itemize} \item $\ensuremath{\mathbb{C}}oh_0^{\le B}$ is generated (by extensions) by semistable sheaves of slope $\mu \le B$, and \item $\ensuremath{\mathbb{C}}oh_0^{>B}$ is generated by semistable sheaves of slope $\mu > B$ and zero- or one-dimensional torsion sheaves. \end{itemize} Let $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)} \subset \ensuremath{\mathbb{D}}D_0$ be the tilt of $\ensuremath{\mathbb{C}}oh_0$ at the torsion pair $\left(\ensuremath{\mathbb{C}}oh_0^{>B}, \ensuremath{\mathbb{C}}oh_0^{\le B}\right)$, that is \[ \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}=\left\{E\in\ensuremath{\mathbb{D}}D_0\,\colon\,\begin{array}{l}\bullet\ \ensuremath{\mathbb{H}}H^i(E)=0,\text{ for all }i\neq0,-1\\ \bullet\ \ensuremath{\mathbb{H}}H^0(E)\in\ensuremath{\mathbb{C}}oh_0^{>B}\\ \bullet\ \ensuremath{\mathbb{H}}H^{-1}(E)\in\ensuremath{\mathbb{C}}oh_0^{\le B}\end{array}\right\}. \] \end{Def} The structure of central charges compatible with $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ (and thus, as we will see, the structure of the whole geometric chamber) depends tightly on the set of Chern classes for which there exist stable torsion-free sheaves. In the case of $\ensuremath{\mathbb{P}}^2$, Dr\'ezet and Le Potier have given a complete description of this set (see Appendix \ref{app:DP} for more details). It is most naturally described in terms of the discriminant $\ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F})$, which is defined as \[ \ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}) = \frac{d(\ensuremath{\mathcal F})^2}{2r(\ensuremath{\mathcal F})^2} - \frac{c(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})}. \] Recall that a vector bundle $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$ is called \emph{exceptional} if $\ensuremath{\mathbb{H}}om(\ensuremath{\mathcal E}, \ensuremath{\mathcal E}) = \ensuremath{\mathbb{C}}$, and $\mathop{\mathrm{Ext}}\nolimits^p(\ensuremath{\mathcal E}, \ensuremath{\mathcal E}) = 0$ for $p > 0$. For an exceptional vector bundle $\ensuremath{\mathcal E}_\alpha$ of rank $r_{\alpha}$ and slope $\alpha$, it follows from Riemann-Roch that the discriminant is given by $\ensuremath{\mathbb{D}}elta_\alpha = \frac 12 - \frac 1{2r_\alpha^2}$. Also note that an exceptional vector bundle can equivalently be characterized by being slope-stable with discriminant smaller than $1/2$: see \cite[Theorem 4.1]{Goro-Ruda:Exceptional} and \cite[Lemme (4.2)]{Drezet-LePotier}. As the slopes of exceptional vector bundles can be constructed explicitly (see Theorem \ref{thm:DP-A}), it remains to describe the slopes and discriminants of non-exceptional stable torsion-free sheaves. As explained in \cite[Section 16]{LePotier}, one can slightly reformulate the results of \cite{Drezet-LePotier} and construct a function $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP} \colon \ensuremath{\mathbb{R}} \to [\frac 12, 1]$. It is periodic of period $1$ and Lipschitz-continuous with Lipschitz constant $\frac 32$. We refer to Appendix \ref{app:DP} for the precise definition of $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}$. Its construction is motivated by the following observation: If $\ensuremath{\mathcal F}_\beta$ is a slope-stable sheaf with $\beta < \alpha$, then $\ensuremath{\mathbb{H}}om(\ensuremath{\mathcal E}_\alpha, \ensuremath{\mathcal F}_\beta) = 0$; if we additionally assume $\beta > \alpha - 3$, then, by Serre duality, also $\mathop{\mathrm{Ext}}\nolimits^2(\ensuremath{\mathcal E}_\alpha, \ensuremath{\mathcal F}_\beta) = 0$ and hence $\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal E}_\alpha, \ensuremath{\mathcal F}_\beta) \le 0$. Using Riemann-Roch this yields an inequality of the form $\ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}_\beta) \ge p_\alpha(\beta)$ for $\alpha - 3 < \beta < \alpha$, with $p_\alpha(x)$ being a quadratic polynomial. The function $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}$ is the supremum of all the quadratic polynomials $p_\alpha$ restricted to the ranges where the inequality is valid. The main result of \cite{Drezet-LePotier} is that $\ensuremath{\mathbb{D}}elta \ge \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu)$ is not only a necessary, but also a sufficient condition for the existence of a stable torsion-free sheaf of slope $\mu$ and discriminant $\ensuremath{\mathbb{D}}elta$. For later use, we paraphrase their result as follows: Define $S_\mathop{\mathrm{inf}}\nolimitsty \subset \ensuremath{\mathbb{R}}^2$ to be the closed subset lying above the the graph of $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}$, i.e., \[ S_\mathop{\mathrm{inf}}\nolimitsty = \stv{(\mu, \ensuremath{\mathbb{D}}elta) \in \ensuremath{\mathbb{R}}^2}{\ensuremath{\mathbb{D}}elta \ge \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu)}. \] \begin{Thm}[\cite{Drezet-LePotier}] \label{thm:DP} Let $S \subset \ensuremath{\mathbb{Q}}^2$ be the set of pairs $(\mu(\ensuremath{\mathcal F}), \ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}))$ where $\ensuremath{\mathcal F}$ is any slope-stable torsion-free sheaf $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$. Similarly, let $S_E \subset \ensuremath{\mathbb{Q}}^2$ be the corresponding set for slopes and discriminants of exceptional vector bundles. Then $S$ is the disjoint union \[ S = S_E \ensuremath{\mathaccent\cdot\cup} \left( S_\mathop{\mathrm{inf}}\nolimitsty \cap \ensuremath{\mathbb{Q}}^2 \right) \] The set $S_E$ has no accumulation points in $\ensuremath{\mathbb{R}}^2 \setminus S_\mathop{\mathrm{inf}}\nolimitsty$. \end{Thm} We explain this reformulation of Dr\'ezet and Le Potier's result in Appendix \ref{app:DP}, along with their explicit description of the set $S_E$; see also Figure \ref{fig:DP-plot}. \begin{figure} \caption{$\delta_\mathop{\mathrm{inf} \label{fig:DP-plot} \end{figure} \begin{Def}\label{def:setG} We define the set $G \subset \ensuremath{\mathbb{C}}^2$ as the set of pairs $a, b \in \ensuremath{\mathbb{C}}$ satisfying the following three inequalities (where we set $B := -\frac{\Im b}{\Im a}$ if $a$ satisfies the first inequality): \begin{align} \Im a &> 0, \label{ineq:Ima}\\ \ensuremath{\mathbb{R}}e b &> -B \cdot \ensuremath{\mathbb{R}}e a - \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(B) + \frac 12 B^2, \label{ineq:genslope} \intertext{and, in case there exists an exceptional vector bundle of slope $B$ and discriminant $\ensuremath{\mathbb{D}}elta_B$,} \ensuremath{\mathbb{R}}e b &> -B \cdot \ensuremath{\mathbb{R}}e a - \ensuremath{\mathbb{D}}elta_B + \frac 12 B^2. \label{ineq:exceptslope} \end{align} \end{Def} \begin{Thm}\label{thm:geom-stability} For $a, b \in \ensuremath{\mathbb{C}}$, denote by $Z_{a, b} \colon K(\ensuremath{\mathbb{D}}D_0) \to \ensuremath{\mathbb{C}}$ the central charge given by \begin{equation}\label{eq:Zab} Z_{a,b}(E)=-c(E)+ad(e)+br(E) \end{equation} for $E \in \ensuremath{\mathbb{D}}D_0$. Then there exists a geometric stability condition $\sigma_{a,b} = (Z_{a,b}, \ensuremath{\mathbb{P}}P_{a,b})$ with $Z_{a, b}$ as above if and only if $(a, b) \in G \subset \ensuremath{\mathbb{C}}^2$. Its heart is, up to shifts, given by $\ensuremath{\mathbb{P}}P_{a,b}((0,1]) = \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$. The shifts $k(x)[n]$ of skyscraper sheaves are the only stable objects of class $\pm [k(x)]$. Any geometric stability condition is equivalent to a stability condition $\sigma_{a,b}$ up to the action of a unique element in $\ensuremath{\mathbb{C}}$. \end{Thm} The action of $z \in \ensuremath{\mathbb{C}}$ is given in Remark \ref{rmk:GroupAction}: it is the lift to the space of stability conditions of the multiplication by $\exp(z)$ on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}})$. The theorem can be rephrased as stating that $U/\ensuremath{\mathbb{C}} \cong G$, with a section given by $(a, b) \mapsto \sigma_{a,b}$. Later, in Remark \ref{Rem:sc-slice}, we will see that this slice of the $\ensuremath{\mathbb{C}}$-action can be extended to a whole connected component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. The theorem will be proved in the following two sections. The best visualization of the set of allowed central charges is given by the following observation: as long as $\Im a > 0$, the central charge can be thought of as a surjective map $K(\ensuremath{\mathbb{D}}D_0)_\ensuremath{\mathbb{R}} \cong \ensuremath{\mathbb{R}}^3 \to \ensuremath{\mathbb{C}} \cong \ensuremath{\mathbb{R}}^2$. Up to the action of $\mathop{\mathrm{GL}}_2^+(\ensuremath{\mathbb{R}})$ on $\ensuremath{\mathbb{R}}^2$ (which does not affect the set of stable objects), this map is determined by its kernel, and by the orientation induced on $K(\ensuremath{\mathbb{D}}D_0)_\ensuremath{\mathbb{R}} / \mathop{\mathrm{Ker}}\nolimits Z$. As long as $\Im a > 0$, the orientation does not change. The kernel intersects the affine hyperplane $r = 1$ of $K(\ensuremath{\mathbb{D}}D_0)_\ensuremath{\mathbb{R}}$ in a single point. The inequalities are equivalent to requiring that this point lies below the graph of $\ensuremath{\mathbb{D}}elta = \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu)$, and not on any of the rays going up vertically from a point in $S_E$ (see Figure \ref{fig:DP-plot}). However, for several reasons it is helpful to classify geometric stability conditions up to the action of $\ensuremath{\mathbb{C}}$, rather than the action of $\mathop{\mathrm{GL}}_2^+(\ensuremath{\mathbb{R}})$. The subgroup $\ensuremath{\mathbb{C}}$ acts on $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ with closed orbits, it has no stabilizers, and it has a well-behaved quotient $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)/\ensuremath{\mathbb{C}}$. The stability conditions $\sigma_{a, b}$ constitute a slice of this action on $U \subset \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$, and the boundary of $U$ can be identified, up to the $\ensuremath{\mathbb{C}}$-action, with the boundary of the set $\stv{\sigma_{a, b}}{(a,b) \in G}$. None of these statements would hold for the $\mathop{\mathrm{GL}}_2^+(\ensuremath{\mathbb{R}})$-action, and the picture of the preceding paragraph only gives a partial picture of the boundary of $U$: one can see that every ray starting at a point of $S_E$ going up vertically may give two walls in the boundary $\partial U$ of the set $U$, but we cannot see that many of these walls intersect at points where the central charge lies in the real line. \section{Constraining geometric stability conditions}\label{sec:Constraining} In this section we will show that geometric stability conditions can only be of the form given in Theorem \ref{thm:geom-stability}. The general idea is the same as in \cite{Bridgeland:K3}: if we assume that the skyscraper sheaves $k(x)$ are stable of phase 1, then $\ensuremath{\mathbb{H}}om$-vanishing helps to constrain the form of objects in $\ensuremath{\mathbb{P}}P((0,1])$, and we can identify $\ensuremath{\mathbb{P}}P((0,1])$ with an explicit tilt of the standard t-structure. By the existence of a well-behaved chamber decomposition for the wall-crossing for stable objects of class $[k(x)]$, the set of geometric stability conditions is open, and we need to prove inequalities for the central charge only when it is defined over $\ensuremath{\mathbb{Q}}$. The proof will be broken into several lemmata and propositions. The following observation shows that the bound of Theorem \ref{thm:DP} translates into bounds for stable objects in $\ensuremath{\mathbb{C}}oh_0$: \begin{Lem}\label{lem:mustabilitypf} A sheaf $\ensuremath{\mathcal F} \in \ensuremath{\mathbb{C}}oh_0$ is a pure slope-stable sheaf if and only if it is the push-forward $\ensuremath{\mathcal F} = i_* \ensuremath{\mathcal F}_0$ of some slope-stable pure sheaf $\ensuremath{\mathcal F}_0 \in \ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2$. \end{Lem} \begin{Prf} Since $i_* \colon \ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2 \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}oh_0$ is a full subcategory, closed under subobjects and quotients, and since $i_*$ preserves the ordering by slopes, it follows $\ensuremath{\mathcal F}_0 \in \ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2$ is stable if and only if $i_* \ensuremath{\mathcal F}_0$ is stable. Now assume that $\ensuremath{\mathcal F}$ is stable. Then $\mathop{\mathrm{End}} \ensuremath{\mathcal F} = \ensuremath{\mathbb{C}} \cdot \mathop{\mathrm{Id}}\nolimits$. Let $Z$ be the scheme-theoretic support of $\ensuremath{\mathcal F}$. By definition, its global sections act faithfully on $\ensuremath{\mathcal F}$, so $H^0(\ensuremath{\mathcal O}_Z) \cong \ensuremath{\mathbb{C}}$. Hence $Z$ must be contained scheme-theoretically in the fiber of the origin under the contraction $X \ensuremath{\twoheadrightarrow} \mathop{\mathrm{Spec}} H^0(\ensuremath{\mathcal O}_X) \cong \ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3$, as otherwise the image of $H^0(\ensuremath{\mathcal O}_X) \to H^0(\ensuremath{\mathcal O}_Z)$ would be non-trivial. But the scheme-theoretic fiber of the origin is exactly $\ensuremath{\mathbb{P}}^2$, and so $\ensuremath{\mathcal F}$ is the push-forward $i_* \ensuremath{\mathcal F}_0$ of some sheaf $\ensuremath{\mathcal F} \in \ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2$ on $\ensuremath{\mathbb{P}}^2$. \end{Prf} Now assume we are given a geometric stability condition. After a rescaling by $\ensuremath{\mathbb{C}}$, we may assume that all skyscraper sheaves $k(x)$ of closed points $x\in\ensuremath{\mathbb{P}}^2$ are stable with phase $1$ and $Z(k(x)) = -1$. \begin{Lem}[{\cite[Lemma 10.1]{Bridgeland:K3}}]\label{lem:geom-stability} Let $(Z,\ensuremath{\mathbb{P}}P)$ be a stability condition such that the skyscraper sheaves $k(x)$ are stable of phase 1, with $Z(k(x)) = -1$. \begin{enumerate} \item\label{coh-constraint} For any object $E \in \ensuremath{\mathbb{P}}P((0,1])$, its cohomology sheaves $\ensuremath{\mathbb{H}}H^i(E)$ vanish unless $i = 0, -1$. \item \label{H-1_pure} Further, for any such $E \in \ensuremath{\mathbb{P}}P((0,1])$ the cohomology sheaf $\ensuremath{\mathbb{H}}H^{-1}(E)$ is pure of dimension 2. \item \label{stableinP1} If $E \in \ensuremath{\mathbb{P}}P(1)$ is stable and $E \neq k(x)$ for all $x \in \ensuremath{\mathbb{P}}^2$, then there is a vector bundle $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$ such that $E \cong i_* \ensuremath{\mathcal F}[1]$. \end{enumerate} \end{Lem} \begin{Prf} If $E \in \ensuremath{\mathbb{P}}P((0,1))$, then $\ensuremath{\mathbb{H}}om(E, k(x)[i]) = 0$ for $i < 0$, and $\ensuremath{\mathbb{H}}om(k(x)[i], E) = \ensuremath{\mathbb{H}}om(E, k(x)[3+i]) = 0$ for $i \ge 0$ and $x \in \ensuremath{\mathbb{P}}^2$. Since $E$ is supported on $\ensuremath{\mathbb{P}}^2$, all homomorphisms with shifts of skyscraper sheaves outside the zero-section are zero. We can therefore apply \cite[Prop.\ 5.4]{Bridgeland-Maciocia:K3Fibrations} and deduce that $E$ is quasi-isomorphic to a 3-term complex of locally free sheaves $E^{-2}\stackrel{d^{-2}}{\longrightarrow} E^{-1} \to E^0$. Hence $\ensuremath{\mathbb{H}}H^{-2}(E)$ torsion-free on $X$; since $\ensuremath{\mathbb{H}}H^{-2}(E) \in \ensuremath{\mathbb{C}}oh_0$, it must vanish. This implies the first claim for such $E$. This also shows that $\ensuremath{\mathbb{H}}H^{-1}(E)$ is the cokernel of an injective map from a locally free sheaf to a torsion-free sheaf, which implies the second claim. If $E$ is stable of phase 1, then additionally $\ensuremath{\mathbb{H}}om(E, k(x)) = 0$ (as they are both stable objects of the same phase). Hence $E$ is isomorphic to a two-term complex of vector bundles $E^{-2}\stackrel{d^{-2}}{\longrightarrow}E^{-1}$. By the same argument as in the previous case, the map $d^{-2}$ must be injective, so that $E$ is isomorphic to the shift of a sheaf: $E \cong \ensuremath{\mathcal F}'[1]$. Since $E$ is stable, $\ensuremath{\mathcal F}'$ can only have scalar endomorphisms, and thus $\ensuremath{\mathcal F}'$ is the push-forward of a sheaf $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$. Using $0 = \ensuremath{\mathbb{H}}om(i_* k(x),i_* \ensuremath{\mathcal F}[1]) \cong \ensuremath{\mathbb{H}}om(k(x) \oplus k(x)[1],\ensuremath{\mathcal F}[1])$, it follows that $\ensuremath{\mathcal F}$ is a vector bundle. Since all the assertions of the lemma are properties that are closed under extensions, this finishes its proof. \end{Prf} Recall that, for a full stability condition $\sigma=(Z,\ensuremath{\mathbb{P}}P)$, \begin{equation} \label{eq:def-metric} \| W\|_{\sigma}:=\sup\left\{\frac{|W(E)|}{|Z(E)|}\colon E\text{ is }\sigma\text{-stable}\right\} \end{equation} defines a metric on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ (see Appendix \ref{app:BridgelandFramework} for more details). The next result is based on \cite[Section 9]{Bridgeland:K3}: \begin{Prop}\label{prop:chambers} Let $\ensuremath{\mathbb{D}}D$ be a triangulated category such that $K(\ensuremath{\mathbb{D}}D)$ is a finite-dimensional lattice, and let $\mathop{\mathrm{Stab}}^* \subset \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$ be a full connected component of its space of stability conditions. Fix a primitive class $\alpha \in K(\ensuremath{\mathbb{D}}D)$, and an arbitrary set $S \subset \ensuremath{\mathbb{D}}D$ of objects of class $\alpha$. Then there exists a collection of walls $W^S_\beta$, $\beta \in K(\ensuremath{\mathbb{D}}D)$, with the following properties: \begin{enumerate} \item Every wall $W^S_\beta$ is a closed submanifold with boundary of real codimension one. \item The collection $W^S_\beta$ is locally finite (i.e., every compact subset $K \subset \mathop{\mathrm{Stab}}^*$ intersects only a finite number of walls). \item \label{enum:semistable-on-wall} For every stability conditions $(Z, \ensuremath{\mathbb{P}}P) \in W^S_\beta$, there exists a phase $\phi$ and an inclusion $F_\beta \ensuremath{\hookrightarrow} E_\alpha$ in $\ensuremath{\mathbb{P}}P(\phi)$ with $[F_\beta] = \beta$ and some $E_\alpha \in S$. \item \label{enum:chambers} If $C \subset \mathop{\mathrm{Stab}}^*$ is a connected component of the complement of $\bigcup_{\beta \in K(\ensuremath{\mathbb{D}}D)} W^S_\beta$, and $\sigma_1, \sigma_2 \in C$, then an object $E_\alpha \in S$ is $\sigma_1$-stable if and only if it is $\sigma_2$-stable. \end{enumerate} \end{Prop} \begin{Prf} For a class $\beta \in K(\ensuremath{\mathbb{D}}D)$ let $V^S_\beta$ be the set of stability conditions $(Z, \ensuremath{\mathbb{P}}P)$ for which there exists an inclusion as in part (\ref{enum:semistable-on-wall}). Since $\alpha$ is primitive, each $V^S_\beta$ is contained in the codimension-one subset with $\Im \frac{Z(\beta)}{Z(\alpha)} = 0$. We first want to show that there are only finitely many $\beta$ for which $V^S_\beta$ intersects an open ball $B_{\frac 18}(\sigma)$ of radius $\frac 18$ around $\sigma = (Z, \ensuremath{\mathbb{P}}P)$: Given $\sigma$ and $S$, let $I_\sigma(S) \subset K(\ensuremath{\mathbb{D}}D)$ be the set of all classes $\beta$ for which there exists $\phi \in \ensuremath{\mathbb{R}}$ with $Z(\alpha) \in \ensuremath{\mathbb{R}}_{>0}\cdot e^{i\pi \phi}$ and a strict inclusion $F_\beta \ensuremath{\hookrightarrow} E$ in the quasi-abelian category $\ensuremath{\mathbb{P}}P((\phi - \frac 14, \phi + \frac 14))$ with $[F_\beta]=\beta$ and $E \in S$. Since the metric $\|\cdot\|_\sigma$ is finite, and since $K(\ensuremath{\mathbb{D}}D)$ is a discrete subgroup of $K(\ensuremath{\mathbb{D}}D)\otimes \ensuremath{\mathbb{R}} \cong \ensuremath{\mathbb{R}}^n$, there exist only finitely many classes $\gamma \in K(\ensuremath{\mathbb{D}}D)$ that have a $\sigma$-semistable object $F_\gamma$ of class $[F_\gamma] = \gamma$ satisfying $\abs{Z(F_\gamma)} < \abs{Z(\alpha)}$. It follows that the set $I_\sigma(S)$ is also finite (as each HN filtration factor of $F_\beta$ is an object $F_\gamma$ as considered in the previous sentence). But if $V^S_\beta$ intersects $B_{\frac 18}(\sigma)$, then it follows from \cite[Lemma 7.5]{Bridgeland:Stab} that $\beta \in I_\sigma(S)$. An object $E$ of class $\alpha$ is $(Z', \ensuremath{\mathbb{P}}P')$-semistable for $(Z', \ensuremath{\mathbb{P}}P') \in B_{\frac 18}(\sigma)$ if and only if $\Im \frac{Z'(\beta)}{Z'(\alpha)} \le 0$ for every $\beta \in I_\sigma(\{E\})$---and it is stable if and only if the inequalities are strict. Repeating this argument for every possible subobject $F_\beta$, it follows that inside the codimension one subset $\Im \frac{Z'(\beta)}{Z'(\alpha)} = 0$, the set $V^S_\beta$ is a finite union of subsets, each of which is cut out by a finite number of inequalities of the form $\Im \frac{Z'(\beta')}{Z'(\alpha)} \le 0$ for some $\beta' \in I_\sigma(S)$. We let $W^S_\beta$ be the union of all codimension-one components of $V^S_\beta$. It remains to prove claim (\ref{enum:chambers}). It is sufficient to consider the case $\sigma_1, \sigma_2 \in B_{\frac 18}(\sigma) \cap C$. Assume that there is an object $E \in S$ that is $\sigma_1$-stable but not $\sigma_2$-stable. Then on every path $\gamma \colon [0,1] \to B_{\frac 18}(\sigma) \cap C$ connecting $\sigma_1$ with $\sigma_2$, there is a point $\gamma(t)$ on which $E$ is strictly semistable, i.e., $\gamma(t) \in V^S_\beta \cap C$ for some $\beta \in I_\sigma(S)$ and $t \in (0, 1]$. But by the definition of the walls $W^S_\beta$, the set $V^S_\beta \cap C$ has codimension at least two, and hence we may choose $\gamma$ such that for $t \in (0, 1)$, it avoids all of the finitely many non-empty subsets $V^S_\beta \cap C \subset C$ for $\beta \in I_\sigma(S)$, in other words we have that $E$ is $\gamma(t)$-stable for $t \in (0, 1)$, and $\sigma_2 \in V^S_\beta \cap C$ for some $\beta \in I_\sigma(S)$. In particular, $\sigma_2$ is contained in the set $\Im \frac{Z(\beta)}{Z(\alpha)} = 0$, and $E$ will not be stable in the subset of $B_{\frac 18}(\sigma) \cap C$ with $\Im \frac{Z(\beta)}{Z(\alpha)} \le 0$. On the other hand, the set $C \setminus \bigcup V^S_\beta$ is path-connected, and by the previous argument $E$ is stable on all of it. This is a contradiction. (In other words, we proved in the last step that higher-codimension components of $V^S_\beta$ always come from objects $E_\alpha$ that are semistable on this component, and unstable at any nearby point.) \end{Prf} As can be seen from the proof, the proposition holds for any family of stability conditions satisfying the \emph{support property} of \cite[Section 1.2]{Kontsevich-Soibelman:stability}. In fact, in our situation fullness and the support property are equivalent, see Proposition \ref{prop:SupportProperty}. \begin{Cor}\label{cor:openness} The set $U$ of geometric stability conditions is open in the space of stability conditions $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. Its boundary $\partial U = \overline{U} \setminus U$ is given by a locally finite union of walls, and each wall is a real submanifold with boundary in $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ of codimension one. \end{Cor} We proceed to show that any geometric stability condition is necessarily of the form given in Theorem \ref{thm:geom-stability}. By Lemma \ref{lem:geom-stability}, we have $\ensuremath{\mathbb{P}}P((0,1]) \subset \langle \ensuremath{\mathbb{C}}oh_0, \ensuremath{\mathbb{C}}oh_0[1] \rangle$. This implies that $\ensuremath{\mathbb{P}}P((0,1])$ is obtained from $\ensuremath{\mathbb{C}}oh_0$ by tilting at the torsion pair \begin{align*} \ensuremath{\mathcal T} &= \ensuremath{\mathbb{C}}oh_0 \cap \ensuremath{\mathbb{P}}P((0,1]) \\ \ensuremath{\mathcal F} &= \ensuremath{\mathbb{C}}oh_0 \cap \ensuremath{\mathbb{P}}P((-1,0]) \end{align*} (see e.g.\ \cite[Lemma 1.1.2]{Polishchuk:families-of-t-structures}). Since we assume $Z(k(x)) = -1$, the central charge can be written in the form of equation \eqref{eq:Zab}; in particular $\Im Z(E) = d(E) \cdot \Im a + r(E) \Im b$. By mimicking the proof of \cite[Prop.\ 10.3]{Bridgeland:K3}, it follows that $\Im a>0$ and, after setting $B=-\frac{\Im b}{\Im a}$, $\ensuremath{\mathbb{P}}P((0,1]) = \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$. It remains to prove the inequality on $\ensuremath{\mathbb{R}}e b$. We first assume that $B \in \ensuremath{\mathbb{Q}}$. For any semistable torsion-free sheaf $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$ of slope $B$ we have $\Im Z(i_*\ensuremath{\mathcal F}) = 0$ and $i_*\ensuremath{\mathcal F}[1] \in \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$, hence we must have $\ensuremath{\mathbb{R}}e Z(i_*\ensuremath{\mathcal F}) > 0$. It follows that: \begin{align*} 0 & < \frac{\ensuremath{\mathbb{R}}e Z(i_*\ensuremath{\mathcal F})}r = \ensuremath{\mathbb{R}}e b + \ensuremath{\mathbb{R}}e a B - \frac cr \\ &= \ensuremath{\mathbb{R}}e b + \ensuremath{\mathbb{R}}e a B + \ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}) - \frac 12 B^2 \end{align*} Applying Theorem \ref{thm:DP}, we obtain the inequalities (\ref{ineq:genslope}) and (\ref{ineq:exceptslope}). Finally, we need to treat the case $B \not \in \ensuremath{\mathbb{Q}}$. By Corollary \ref{cor:openness}, there exists an open neighborhood $V \subset \ensuremath{\mathbb{C}}^2$ of $(a,b)$, such that any $(a', b') \in V$ with $\frac{\Im b'}{\Im a'} \in \ensuremath{\mathbb{Q}}$ satisfy inequality (\ref{ineq:genslope}). Hence it holds for $(a, b)$, too. \section{Constructing geometric stability conditions}\label{sec:constructing} We now come to the proof of existence of geometric stability conditions. The main problem is to prove the existence of Harder-Narasimhan filtrations for the stability function $Z_{a, b}$ on $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$. We prove this directly in the case where the image of $\Im Z$ is discrete, and then use Bridgeland's deformation result to extend it to the more general case. In order to make the extension effective, we have to bound the metric $\|\cdot\|_\sigma$ on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}}) \cong \ensuremath{\mathbb{C}}^3$ defined by equation \eqref{eq:def-metric} from above. To do so, we in turn have to control $\abs{Z(E)}$ for stable objects $E$ from below. Our arguments in this section build on \cite{Bridgeland:K3, Aaron-Daniele}. Given $(a, b) \in G \subset \ensuremath{\mathbb{C}}^2$, let $\gamma_{a,b}\colon\ensuremath{\mathbb{R}}\to\ensuremath{\mathbb{C}}$ be the infinite path $\gamma_{a,b}(t)=x(t)+iy(t)$ defined by \begin{equation*}\label{eq:pathgamma} \begin{split} x(t) &= \ensuremath{\mathbb{R}}e b + \frac 12 (\ensuremath{\mathbb{R}}e a)^2 + \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(t) -\frac 12 \left(t - \ensuremath{\mathbb{R}}e a\right)^2 \\ y(t) &= \Im a\cdot t + \Im b \end{split} \end{equation*} and let $S_{a, b}\subset\ensuremath{\mathbb{C}}$ be the closed subset cut out by $\gamma_{a,b}$ that lies on or to the right of $\gamma_{a,b}$, i.e., \[ S_{a, b}:=\stv{x + iy}{\exists t\text{ with }y=y(t),\, x\ge x(t)}. \] Since $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(t) \in [\frac 12, 1]$, the path $\gamma_{a,b}$ is contained between the graphs of two parabolas with horizontal distance $\frac 12$, see Figure \ref{fig:pathgamma}. \begin{figure} \caption{The path $\gamma_{a,b} \label{fig:pathgamma} \end{figure} \begin{Lem}\label{lem:ineq} Let $\ensuremath{\mathcal F}\in\ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2$ be a torsion-free slope-stable sheaf on $\ensuremath{\mathbb{P}}^2$ of rank $r$ that is not an exceptional vector bundle. Then $\frac{Z_{a,b}(i_*\ensuremath{\mathcal F})}r \in S_{a,b}$. \end{Lem} \begin{Prf} We write $\frac {\mathop{\mathrm{ch}}\nolimits(\ensuremath{\mathcal F})}r = [\ensuremath{\mathbb{P}}^2] + \mu [l] + \frac cr [\mathrm{pt}]$ and $\frac{Z_{a,b}(i_*\ensuremath{\mathcal F})}r = x + iy$. Using Theorem \ref{thm:DP}, we obtain: \begin{align*} y &= \Im a \mu + \Im b \\ x &= - \frac cr + \ensuremath{\mathbb{R}}e a \mu + \ensuremath{\mathbb{R}}e b = \ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}) - \frac 12 \mu^2 + \ensuremath{\mathbb{R}}e a \mu + \ensuremath{\mathbb{R}}e b \\ & \ge \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu) - \frac 12 \mu^2 + \ensuremath{\mathbb{R}}e a \mu + \ensuremath{\mathbb{R}}e b \end{align*} Setting $t = \mu$ yields the claim. \end{Prf} \begin{Lem} For any $(a, b) \in G$, the central charge $Z_{a,b}$ is a stability function for $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$: if $0\neq E\in\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$, then $Z_{a,b}(E)\in\ensuremath{\mathbb{H}}$. \end{Lem} \begin{Prf} It is sufficient to prove the claim for torsion sheaves of dimension $\le 1$, and for (shifts of) slope-stable sheaves. The claim is only non-trivial for objects $\ensuremath{\mathcal F}[1]$, where $\ensuremath{\mathcal F}$ is a purely $2$-dimensional slope-stable of slope $\mu(\ensuremath{\mathcal F}) = B$. In such a case, $Z_{a,b}(\ensuremath{\mathcal F})$ is lying on the real line. By Lemma \ref{lem:mustabilitypf}, $\ensuremath{\mathcal F}$ is the push-forward of a slope-stable sheaf on $\ensuremath{\mathbb{P}}^2$. If $\ensuremath{\mathcal F}$ is not an exceptional vector bundle, the previous lemma shows $Z_{a,b}(i_*\ensuremath{\mathcal F}) \in \ensuremath{\mathbb{R}}_{>0}$. If it is exceptional, the same computation yields this statement from inequality \eqref{ineq:exceptslope}. \end{Prf} In other words, inequality \eqref{ineq:genslope} is equivalent to $0 \notin S_{a,b}$, i.e., the path is passing through the real line with positive real part; and inequality \eqref{ineq:exceptslope} is equivalent to $Z(\ensuremath{\mathcal E}) \not\in \ensuremath{\mathbb{R}}_{\le 0}$ for any exceptional vector bundle $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$. Together, they guarantee that the central charge of a slope-stable sheaf never lies on the negative real line. Due to the above Lemma, as in Remark \ref{rmk:tstruct} we can define a phase function $\phi(E):=(1/\pi)\arg(Z(E))\in(0,1]$, for all $0\neq E\in\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$. \begin{Lem}[{\cite[Proposition 7.1]{Bridgeland:K3}}] \label{lem:HNFiltrationsDiscrete} Let $a,b\in\ensuremath{\mathbb{C}}$ be such that $B\in\ensuremath{\mathbb{Q}}$ and $Z_{a,b}(\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}\setminus\{0\})\subset\ensuremath{\mathbb{H}}$. Then Harder-Narasimhan filtrations exist for $(Z_{a,b},\ensuremath{\mathbb{C}}oh_0^{\sharp(B)})$. \end{Lem} \begin{Prf} We use Proposition \ref{prop:HNFiltrationsDiscrete}. Since $B\in\ensuremath{\mathbb{Q}}$, the image of $\Im(Z_{a,b})$ is discrete in $\ensuremath{\mathbb{R}}$. Let $F\in\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ and let \begin{equation}\label{eq:Bonn17} 0=A_0\subset A_1\subset\ldots\subset A_j\subset A_{j+1}\subset\ldots\subset F, \end{equation} be a sequence of subobjects, with $A_j\in\ensuremath{\mathbb{P}}P'_{a,b}(1)$. As in Proposition \ref{prop:HNFiltrationsDiscrete}, $\ensuremath{\mathbb{P}}P'_{a,b}(1)$ is the full subcategory of $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ whose objects have phase $1$ with respect to $Z_{a,b}$. We need to show that \eqref{eq:Bonn17} stabilizes. To this end, we first observe that \eqref{eq:Bonn17} induces a sequence of inclusions \begin{equation}\label{eq:Bonn18} 0=\ensuremath{\mathbb{H}}H^{-1}(A_0)\ensuremath{\hookrightarrow}\ldots\ensuremath{\mathbb{H}}H^{-1}(A_j)\ensuremath{\hookrightarrow}\ldots\ensuremath{\hookrightarrow}\ensuremath{\mathbb{H}}H^{-1}(F). \end{equation} Since $\ensuremath{\mathbb{C}}oh_0$ is Noetherian, \eqref{eq:Bonn18} must terminate. We can therefore assume that $\ensuremath{\mathbb{H}}H^{-1}(A_j)\cong\ensuremath{\mathcal F}_{-1}$ for all $j$ and some $\ensuremath{\mathcal F}_{-1}\in\ensuremath{\mathbb{C}}oh_0^{\le B}$. Let $\ensuremath{\mathcal F}_0$ denote the cokernel of the inclusion $\ensuremath{\mathcal F}_{-1}\ensuremath{\hookrightarrow}\ensuremath{\mathbb{H}}H^{-1}(F)$; then, by the long exact cohomology sequence, we have $\ensuremath{\mathcal F}_0\in\ensuremath{\mathbb{C}}oh_0^{\le B}$ as well. Now, observe that the simple objects of $\ensuremath{\mathbb{P}}P'_{a,b}(1)$ are skyscraper sheaves $k(x)$ ($x\in\ensuremath{\mathbb{P}}^2$) and objects of the form $i_*\ensuremath{\mathcal G}[1]$, for $\ensuremath{\mathcal G}\in\ensuremath{\mathbb{C}}oh \ensuremath{\mathbb{P}}^2$ a locally-free slope-stable sheaf on $\ensuremath{\mathbb{P}}^2$ with $\mu(\ensuremath{\mathcal G})=B$. Indeed, this can be proved in precisely the same way as Lemma \ref{lem:geom-stability}, (\ref{stableinP1}). In particular, $\ensuremath{\mathbb{H}}H^0(A_j)$ is a torsion sheaf of dimension zero. Let $B_j$ be the cokernel in $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ of $A_j\ensuremath{\hookrightarrow} F$; then we have an exact sequence \[ 0\to\ensuremath{\mathcal F}_0 \stackrel{f_j}{\to}\ensuremath{\mathbb{H}}H^{-1}(B_j)\to \ensuremath{\mathbb{H}}H^0(A_j) \stackrel{g_j}{\to}\ensuremath{\mathbb{H}}H^0(F). \] The cokernel $\mathop{\mathrm{coker}} f_j$ is zero-dimensional; since $\ensuremath{\mathcal F}_0$ is fixed $\ensuremath{\mathbb{H}}H^{-1}(B_j)$ is pure of dimension $2$, the length of $\mathop{\mathrm{coker}} f_j$ is bounded. As the length of the image $\mathop{\mathrm{im}}\nolimits g_j$ is also bounded, we get a bound on the length of $\ensuremath{\mathbb{H}}H^0(A_j)$. At the same time, if $D_j$ denotes the cokernel in $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ of $A_j\ensuremath{\hookrightarrow} A_{j+1}$, we must have $\ensuremath{\mathbb{H}}H^{-1}(D_j)=0$, and thus an inclusion $\ensuremath{\mathbb{H}}H^0(A_j)\ensuremath{\hookrightarrow}\ensuremath{\mathbb{H}}H^0(A_{j+1})$. Hence, for $j\gg0$, $\ensuremath{\mathbb{H}}H^0(A_j)\cong\ensuremath{\mathbb{H}}H^0(A_{j+1})$ and so \eqref{eq:Bonn17} stabilizes. \end{Prf} Notice that, in the assumptions of Lemma \ref{lem:HNFiltrationsDiscrete}, the pair $(Z_{a,b},\ensuremath{\mathbb{C}}oh_0^{\sharp(B)})$ defines a locally-finite stability condition on $\ensuremath{\mathbb{D}}D_0$. Indeed this follows immediately from \cite[Lemma 4.4]{Bridgeland:K3}. In the rest of this section, we will use Bridgeland's deformation result to extend the existence of Harder-Narasimhan filtrations to the case where $a, b$ are not rational. In order to make the deformation effective, we need to bound the metric $\|\cdot\|_{\sigma_{a,b}}$ on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D_0), \ensuremath{\mathbb{C}}) \cong \ensuremath{\mathbb{C}}^3$ defined in equation \eqref{eq:def-metric} relative to an arbitrarily chosen metric, with the bound depending continuously on $a, b \in G$. We define the following functions $G \to \ensuremath{\mathbb{R}}_{\ge 0}$: \begin{align*} \gamma_{\mathop{\mathrm{min}}\nolimits}(a, b) &:= \mathop{\mathrm{inf}}\nolimits \stv{\abs{\gamma_{a,b}(t)}} {t \in \ensuremath{\mathbb{R}}} \\ E_{\mathop{\mathrm{min}}\nolimits}(a,b) &:= \mathop{\mathrm{inf}}\nolimits \stv{\abs{ \frac{Z_{a,b}(\ensuremath{\mathcal E})}{r(\ensuremath{\mathcal E})} + t}} {\text{$t \in \ensuremath{\mathbb{R}}_{\ge 0}$, $\ensuremath{\mathcal E}$ exceptional v. bundle}} \\ S_{\mathop{\mathrm{min}}\nolimits}(a,b) &:= \mathop{\mathrm{min}}\nolimits( \gamma_{\mathop{\mathrm{min}}\nolimits}(a,b), E_{\mathop{\mathrm{min}}\nolimits}(a,b)). \end{align*} \begin{Lem}\label{lem:Smin} The function $S_{\mathop{\mathrm{min}}\nolimits}$ is continuous and satisfies \[ 0 < S_{\mathop{\mathrm{min}}\nolimits}(a,b) \le \mathop{\mathrm{inf}}\nolimits \stv{\abs{ \frac{Z_{a,b}(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})} + t}} {t \in \ensuremath{\mathbb{R}}_{\ge 0},\, \ensuremath{\mathcal F} \in \ensuremath{\mathbb{C}}oh_0\, \mathrm{slope}\text{-}\mathrm{stable}} \] for all $(a, b) \in G$. \end{Lem} \begin{Prf} The path $\gamma_{a,b}(t)$ depends continuously on $a, b, t$ and has the properties $\gamma_{a,b}(t)\neq0$ for all $a, b, t$, and $\lim_{t \to \pm \mathop{\mathrm{inf}}\nolimitsty} \abs{\gamma_{a,b}(t)} = +\mathop{\mathrm{inf}}\nolimitsty$. It follows that $\gamma_{\mathop{\mathrm{min}}\nolimits}$ is a positive continuous function. Since $Z_{a, b}(\ensuremath{\mathcal E}) \not\in \ensuremath{\mathbb{R}}_{\leq 0}$, the term $\mathop{\mathrm{inf}}\nolimits \stv{\abs{ \frac{Z_{a,b}(\ensuremath{\mathcal E})}{r(\ensuremath{\mathcal E})} + t}} {t \in \ensuremath{\mathbb{R}}_{\ge 0}}$ is positive for every exceptional vector bundle $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$. Further, Theorem \ref{thm:DP} together with the computation of Lemma \ref{lem:ineq} shows that every accumulation point of the set \[ \stv {\frac{Z_{a,b}(\ensuremath{\mathcal E})}{r(\ensuremath{\mathcal E})}} {\text{$\ensuremath{\mathcal E}$ exceptional v. bdle}} \] is contained in $S_{a,b}$. Hence $E_{\mathop{\mathrm{min}}\nolimits}$ is also a positive continuous function. It remains to prove $S_{\mathop{\mathrm{min}}\nolimits}(a, b) \le \abs{ \frac{Z_{a,b}(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})} + t}$ for all $\ensuremath{\mathcal F}, t$. It holds by definition when $\ensuremath{\mathcal F}$ is an exceptional vector bundle. Otherwise, the claim follows as $\frac{Z_{a,b}(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})} + t$ is contained in $S_{a,b}$. \end{Prf} Let $\abs{\cdot}_\mathop{\mathrm{inf}}\nolimitsty$ be the supremums-norm on $K(\ensuremath{\mathbb{D}}D_0)\otimes \ensuremath{\mathbb{R}} \cong \ensuremath{\mathbb{R}}^3$ in the coordinates $(r, d, c)$, i.e., $\abs{(r,d,c)}_\mathop{\mathrm{inf}}\nolimitsty=\max\{\abs{r},\abs{d},\abs{c}\}$. Let $M(a, b)$ be the matrix \[ M(a,b) := \begin{pmatrix} 1 & 0 & 0 \\ \Im b & \Im a & 0 \\ \ensuremath{\mathbb{R}}e b & \ensuremath{\mathbb{R}}e a & -1 \end{pmatrix}, \] and let $N(a, b) := \left\|M(a,b)^{-1}\right\|_\mathop{\mathrm{inf}}\nolimitsty$ be the norm of its inverse, where $\left\|\cdot\right\|_\mathop{\mathrm{inf}}\nolimitsty$ is the operator norm with respect to the supremums-norm on $\ensuremath{\mathbb{R}}^3$. We claim the following estimate, which is the ``support property'' discussed in Proposition \ref{prop:SupportProperty} with an explicit constant: \begin{Lem}\label{lem:estimate} If $E \in \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ is $Z_{a,b}$-stable, then \[ \frac{\abs{Z_{a,b}(E)}}{\abs{E}_\mathop{\mathrm{inf}}\nolimitsty} \ge \frac{\mathop{\mathrm{min}}\nolimits\left(S_{\mathop{\mathrm{min}}\nolimits}(a, b), 1\right)}{N(a,b)}. \] \end{Lem} We first show how to conclude the proof of Theorem \ref{thm:geom-stability} from the lemma: \begin{Cor} There exists a geometric stability condition $\sigma_{a,b}$ for arbitrary pairs $(a,b) \in G \subset \ensuremath{\mathbb{C}}^2$. \end{Cor} \begin{Prf} Let $V\subset U$ be the subset of geometric stability conditions $\sigma = (Z,\ensuremath{\mathbb{P}}P)$ such that all skyscraper sheaves $k(x)$ are stable of phase $1$ with $Z(k(x)) = -1$. For any such stability condition, the central charge is of the form $Z = Z_{a,b}$ of equation \eqref{eq:Zab}, and thus $\ensuremath{\mathbb{Z}}Z$ induces a map $\ensuremath{\mathbb{Z}}Z_V \colon V \to \ensuremath{\mathbb{C}}^2, \sigma \mapsto (a,b)$. As proved at the end of Section \ref{sec:Constraining}, the heart of such a stability condition is uniquely determined; thus $\ensuremath{\mathbb{Z}}Z_V$ is injective, and so it is a homeomorphism onto its image. Using Corollary \ref{cor:openness} and the deformation property, we see that the image of $\ensuremath{\mathbb{Z}}Z_V$ is open in $\ensuremath{\mathbb{C}}^2$. By Lemma \ref{lem:HNFiltrationsDiscrete} and Lemma \ref{lem:estimate} (which shows that the stability conditions satisfy condition \eqref{enum:full} in Definition \ref{def:GeomStability}), it contains the dense subset of $(a,b) \in G$ such that $B = - \frac{\Im b}{\Im a} \in \ensuremath{\mathbb{Q}}$ is rational. Hence it suffices to prove that the image is closed in $G$. Assume the contrary, and that $(a, b) \in G$ are in the boundary of $\ensuremath{\mathbb{Z}}Z_V$. By Lemma \ref{lem:Smin} the function $S_{\mathop{\mathrm{min}}\nolimits}$ is continuous and positive. It follows that for all $(a', b') \in G$ sufficiently close to $(a,b)$, we have \[ \left\|Z_{a,b} - Z_{a',b'}\right\|_{\mathop{\mathrm{inf}}\nolimitsty} < \sin\left(\frac{\pi}8\right) \frac{\mathop{\mathrm{min}}\nolimits\left(S_{\mathop{\mathrm{min}}\nolimits}(a', b'), 1\right)}{N(a',b')}. \] By the definition of $\|\cdot\|_{\sigma_{a', b'}}$ (see equation \eqref{eq:def-metric}) and Lemma \ref{lem:estimate}, this implies \[ \left\|Z_{a,b} - Z_{a',b'}\right\|_{\sigma_{a',b'}} < \sin\left(\frac{\pi}8\right) \] for all such $(a', b')$ for which a geometric stability condition $\sigma_{a', b'}$ exists. By Bridgeland's effective deformation result (see Theorem \ref{thm:B-deform}) there exists a stability condition $\sigma_{a,b} = (Z_{a,b},\ensuremath{\mathbb{P}}P_{a,b})$ in the neighborhood of $\sigma_{a', b'}$. By choosing $(a', b')$ appropriately, we may assume that $\sigma_{a,b}$ is on one of the walls in the sense of Corollary \ref{cor:openness}; in particular, $k(x)$ is semistable, and there is an inclusion $E \ensuremath{\hookrightarrow} k(x)$ in $\ensuremath{\mathbb{P}}P_{a,b}(1)$ with $E$ being stable. In particular, $0 = \Im Z_{a,b}(E) = \Im a \cdot d(E) + \Im b \cdot r(E)$. Since $k(x)$ is stable with respect to $\sigma_{a', b'}$, we have $\Im Z_{a', b'}(E) \neq 0$, and thus we have $r(E) \neq 0$ or $d(E) \neq 0$. Since $\Im a > 0$, it follows that $r(E) \neq 0$; but then $B = - \frac{\Im b}{\Im a} = \frac{d(E)}{r(E)} \in \ensuremath{\mathbb{Q}}$, and so we already know that there exists a geometric stability condition $\sigma \in V$ with $\ensuremath{\mathbb{Z}}Z_V(\sigma) = (a,b)$. \end{Prf} \begin{Prf} (Lemma \ref{lem:estimate}) Writing $r = r(E)$ etc., we have \begin{align*} \abs{E}_\mathop{\mathrm{inf}}\nolimitsty &= \max(r,d,c) \\ & = \abs{M(a,b)^{-1} \cdot \bigl(r, \Im a \cdot d + \Im b \cdot r, -c + \ensuremath{\mathbb{R}}e a \cdot d + \ensuremath{\mathbb{R}}e b \cdot r\bigr)}_\mathop{\mathrm{inf}}\nolimitsty \\ & \le N(a,b) \cdot \abs{\left(r, \Im Z_{a,b}(E), \ensuremath{\mathbb{R}}e Z_{a,b}(E)\right)}_\mathop{\mathrm{inf}}\nolimitsty \\ & \le N(a,b) \cdot \max(\abs{r}, \abs{Z_{a,b}(E)}) \end{align*} Thus the claim follows if we can show \[ \abs{\overline Z(E)} > S_{\mathop{\mathrm{min}}\nolimits(a, b)} \] where $E \in \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ is any $Z_{a,b}$-stable objects with non-zero rank, and where we wrote $\overline Z(E) = \frac{Z_{a,b}(E)}{\abs{r(E)}}$. Assume first that $r(H^0(E)) \ge r(H^{-1}(E))$, with $r(H^0(E)) > 0$. We have \[ \Im Z_{a,b}(E) \ge \Im Z_{a,b}(H^0(E)), \] as $E \ensuremath{\twoheadrightarrow} H^0(E)$ is a quotient in $\ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ and $Z_{a,b}$ is a stability function, and we have $\abs{r(E)} \le r(H^0(E))$; together, they show \[ \Im \overline Z(E) \ge \Im \overline Z(H^0(E)). \] Let $H^0(E) \ensuremath{\twoheadrightarrow} \ensuremath{\mathcal F}$ be a semistable quotient with $\mu(H^0(E)) \ge \mu(\ensuremath{\mathcal F}) > B$ (such a quotient always exists due to the existence of HN-filtrations for slope stability). Then \begin{align*} \Im \overline Z(H^0(E)) & = \frac {\Im a \cdot d(H^0(E)) + \Im b \cdot r(H^0(E))} {r(H^0(E))} = \Im a \left(\mu(H^0(E)) - B \right) \\ & \ge \Im a \left(\mu(\ensuremath{\mathcal F}) - B\right) = \Im \overline Z(\ensuremath{\mathcal F}) \end{align*} (where we used the assumption $\Im a > 0$ in the inequality). On the other hand, as $E$ is $Z_{a,b}$-semistable and $\ensuremath{\mathcal F} \in \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$, and so the phase of $Z_{a,b}(\ensuremath{\mathcal F})$ is at least as big as the phase of $Z_{a,b}(E)$. Hence the line segment connecting 0 and $\overline Z(E)$ intersects the ray $\overline Z(\ensuremath{\mathcal F}) + t, t \ge 0$ (see Figure \ref{fig:Z-proof}). By Lemma \ref{lem:Smin}, this implies the claim. \begin{figure} \caption{Location of $\overline Z(\ensuremath{\mathcal F} \label{fig:Z-proof} \end{figure} A dual argument holds in case $r(H^{-1}(E)) > r(H^0(E))$, by considering a slope-stable sheaf $\ensuremath{\mathcal F} \ensuremath{\hookrightarrow} H^{-1}(E)$ with $B \ge \mu(\ensuremath{\mathcal F}) \ge \mu(H^{-1}(E))$. Finally, when $H^{-1}(E)$ is zero and $H^0(E)$ has rank zero, there is nothing to prove. \end{Prf} \begin{Rem} The methods used in the last part of this section also apply in the situation \cite[Section 2]{Aaron-Daniele}: the stability conditions constructed there for rational divisors $D, F$ deform to produce stability condition for arbitrary $\ensuremath{\mathbb{R}}$-divisors $D, F$ with $F$ ample. \end{Rem} \section{Boundary of the geometric chamber}\label{sec:boundary} In this section we will show that the set of boundary walls of the geometric chamber $U$ can be described explicitly using exceptional vector bundles $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$. For any such $\ensuremath{\mathcal E}$, the push-forward $i_*\ensuremath{\mathcal E}$ is a spherical object in $\ensuremath{\mathbb{D}}D_0$. We denote by ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E} \colon \ensuremath{\mathbb{D}}D_0 \to \ensuremath{\mathbb{D}}D_0$ the spherical twist at $i_* \ensuremath{\mathcal E}$. If $r$ is the rank of $\ensuremath{\mathcal E}$ and $x \in \ensuremath{\mathbb{P}}^2$, we also write $\ensuremath{\mathcal E}^x$ for the kernel of the natural map $i_*\ensuremath{\mathcal E}^{\oplus r} \ensuremath{\twoheadrightarrow} k(x)$. The goal of this section is to prove the following theorem: \begin{Thm}\label{thm:boundary} For every exceptional vector bundle $\ensuremath{\mathcal E}$ there exist two codimension one walls $W_\ensuremath{\mathcal E}^+, W_\ensuremath{\mathcal E}^- \subset \partial U$ with the following properties: \begin{enumerate} \item Stability conditions in $W_\ensuremath{\mathcal E}^+$ are characterized by the property that $i_*\ensuremath{\mathcal E}$ and all skyscraper sheaves $k(x)$ are semistable of the same phase $\phi$, with $i_*\ensuremath{\mathcal E}$ being a subobject of $k(x)$ in $\ensuremath{\mathbb{P}}P(\phi)$; at a general point of $W_\ensuremath{\mathcal E}^+$, the Jordan-H\"older filtration of any skyscraper sheaf $k(x)$ is given by \begin{equation} \label{eq:kx-filtration1} i_*\ensuremath{\mathcal E}^{\oplus r} \to k(x) \to \ensuremath{\mathcal E}^x[1]. \end{equation} Similarly, we have $\sigma \in W_\ensuremath{\mathcal E}^-$ if $i_*\ensuremath{\mathcal E}[2]$ is semistable of the same phase $\phi$ as $k(x)$, and $i_*\ensuremath{\mathcal E}[2]$ is a quotient of $k(x)$ in $\ensuremath{\mathbb{P}}P(\phi)$. At a general point in $W_\ensuremath{\mathcal E}^-$, the Jordan-H\"older filtration of $k(x)$ is given by \begin{equation} \label{eq:kx-filtration2} {\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}^{-1}(\ensuremath{\mathcal E}^x[1]) \to k(x) \to i_*\ensuremath{\mathcal E}^{\oplus r}[2]. \end{equation} \item We have $W_\ensuremath{\mathcal E}^+ = \overline{U} \cap {\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E} \left(\overline{U}\right)$, i.e., $W_\ensuremath{\mathcal E}^+$ is the wall between $U$ and ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(U)$; similarly, $W_\ensuremath{\mathcal E}^- = \overline{U} \cap {\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}^{-1} \left(\overline{U}\right)$. \end{enumerate} There are no other walls in $\partial U$. \end{Thm} \begin{Cor}\label{cor:ConnectedComponent} The translates of $\overline{U}$ under the group of autoequivalences generated by ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}$ cover the whole connected component $\mathop{\mathrm{Stab}}^{\dag}(\ensuremath{\mathbb{D}}D_0)$ of $U$ in the space of stability conditions. \end{Cor} \begin{Prf} Let $\alpha \colon [0,1] \to \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ be a path of stability conditions with $\alpha(0) \in U$. By Proposition \ref{prop:chambers}, there exists a finite set of walls $W^{[k(x)]}_i$ intersecting $\alpha$, such that the set of stable objects of class $[k(x)]$ is constant in the complement of the intersection points. We may also assume that $\alpha$ intersects each wall transversely and in a generic point of the wall. Using the above theorem, it follows by induction that every open interval in the complement is contained in the translate of $U$ under a sequence of spherical twists. \end{Prf} Most of the existing proofs of statements similar to the above claims are in the situation of a Calabi-Yau 2-category. In that situation, Lemma 5.2 of \cite{Bridgeland:K3} applies, which guarantees, via an Euler characteristic computation, that every non-trivial Harder-Narasimhan filtration of a skyscraper sheaf $k(x)$ contains a spherical object. In a Calabi-Yau 3-category, spherical objects cannot be characterized via their Euler characteristic among stable objects. Instead, our proof is obtained by a direct geometric analysis of the boundary of $U$. Consider a stability condition $(Z, \ensuremath{\mathbb{P}}P)$ in the boundary $\partial U$ of $U$. Since the skyscraper sheaves $k(x)$ are semistable, $Z$ satisfies $Z(k(x)) \neq 0$ and, up to the action of $\ensuremath{\mathbb{C}}$, we can still assume $k(x) \in \ensuremath{\mathbb{P}}P(1)$ and $Z = Z_{a,b}$ for $a, b \in \ensuremath{\mathbb{C}}$ as in equation \eqref{eq:Zab}. By Bridgeland's deformation result and Theorem \ref{thm:geom-stability}, $a,b \in \ensuremath{\mathbb{C}}$ must satisfy one of the following conditions: \begin{description} \item[Case $a$] $\Im a = 0$ \item[Case $E$] $\Im a > 0$, there exists an exceptional vector bundle of slope $B = - \frac {\Im b}{\Im a}$, and we have $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(B) > - \ensuremath{\mathbb{R}}e b - B \cdot \ensuremath{\mathbb{R}}e a + \frac 12 B^2 > \ensuremath{\mathbb{D}}elta_B$. \item[Case $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}$] $\Im a > 0$ and $- \ensuremath{\mathbb{R}}e b - B \cdot \ensuremath{\mathbb{R}}e a + \frac 12 B^2 = \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(B)$. \end{description} We begin by showing that ``Case $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}$'' cannot exist. \begin{Lem}\label{lem:constantheart} Let $\sigma_t = (Z_t,\ensuremath{\mathbb{P}}P_t)$ for $t \in I \subset \ensuremath{\mathbb{R}}$ be a path in the space of stability conditions such that $\Im Z_t$ is constant. Then $\ensuremath{\mathbb{P}}P_t((0,1])$ and $\ensuremath{\mathbb{P}}P_t(1)$ are constant, too. \end{Lem} \begin{Prf} Let $t_1, t_2 \in I$ be such that $\sigma_{t_1}$ and $\sigma_{t_2}$ are close with respect to the metric on $\mathop{\mathrm{Stab}} (\ensuremath{\mathbb{D}}D_0)$; to be specific, we assume $d(\sigma_{t_1}, \sigma_{t_2}) < \frac 18$. Given $\phi \in (0,1]$, the objects $E \in \ensuremath{\mathbb{P}}P_{t_2}(\phi)$ can be characterized as the $Z_{t_2}$-stable objects in the quasi-abelian category $\ensuremath{\mathbb{P}}P_{t_1}((\phi - \frac 18, \phi + \frac 18))$ (see \cite[Section 7]{Bridgeland:Stab}). We want to show $E \in \ensuremath{\mathbb{P}}P_{t_1}((0,1])$. In case $\frac 18 \le \phi \le \frac 78$ we are done. If $\phi \in (\frac 78, 1]$, then for any $A \in \ensuremath{\mathbb{P}}P_{t_1}((1, \phi + \frac 18))$ we have $\Im Z_{t_2} (A) = \Im Z_{t_1}(A) < 0$, and thus the phase of $A$ with respect to $Z_{t_2}$ is bigger than one. Hence $A$ cannot be a subobject of $E$. By considering the Harder-Narasimhan filtration of $E$ with respect to $\ensuremath{\mathbb{P}}P_{t_1}$, this implies that $E \in \ensuremath{\mathbb{P}}P_{t_1}((\phi - \frac 18, 1])$. A similar argument applies for $0 < \phi < \frac 18$. It follows that $\ensuremath{\mathbb{P}}P_{t_2}((0,1]) \subset \ensuremath{\mathbb{P}}P_{t_1}((0,1])$, and thus they must be equal. The claim about $\ensuremath{\mathbb{P}}P_t(1)$ follows easily. \end{Prf} \begin{Lem} There are no stability conditions $\sigma = (Z_{a,b}, \ensuremath{\mathbb{P}}P)$ in $\partial U$ such that $\Im a > 0$ and $-\ensuremath{\mathbb{R}}e b - B \cdot \ensuremath{\mathbb{R}}e a + \frac 12 B^2 = \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(B)$. \end{Lem} \begin{Prf} Due to Corollary \ref{cor:openness}, we may assume that $B$ is irrational. Consider the path $\sigma_t = (Z_{a(t), b(t)}, \ensuremath{\mathbb{P}}P_t), t \in [0,1]$ in $\mathop{\mathrm{Stab}} (\ensuremath{\mathbb{D}}D_0)$ starting at $\sigma$ induced by deforming $Z$ as $Z_{a(t), b(t)}$, with $a(t) = a$ constant, and $b(t) = b + \epsilon t$. Due to the form of the inequalities in Theorem \ref{thm:geom-stability}, the stability conditions $\sigma_t$ for $t> 0$ are geometric. By the previous lemma, it follows that $\ensuremath{\mathbb{P}}P_0((0,1]) = \ensuremath{\mathbb{P}}P_1((0,1]) = \ensuremath{\mathbb{C}}oh_0^{\sharp(B)}$ and, since $B$ is irrational, that $\ensuremath{\mathbb{P}}P_0(1)$ is generated by the skyscraper sheaves $k(x)$. The sheaves $k(x)$ have no subobjects in this category, thus they are stable. By Corollary \ref{cor:openness}, this contradicts the assumption that $\sigma$ is in the boundary of $U$. \end{Prf} The following lemma deals with ``Case $a$'': \begin{Lem} Let $(Z_{a,b}, \ensuremath{\mathbb{P}}P) \in \partial U$ be a stability condition with $\Im a = 0$. Then $\Im b = 0$, i.e., the image of $Z$ is contained in the real line. \end{Lem} \begin{Prf} Writing inequality (\ref{ineq:genslope}) without denominators we get \[ (\Im a)^2 \ensuremath{\mathbb{R}}e b > \Im a \cdot \Im b \cdot \ensuremath{\mathbb{R}}e a - (\Im a)^2 \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(B) + \frac 12 (\Im b)^2 \] By continuity, this implies $(\Im b)^2 \le 0$ on the boundary with $\Im a = 0$. \end{Prf} In particular, the part of the boundary with $\Im a = 0$ is a codimension two subset; by Corollary \ref{cor:openness}, any such point is contained in the closure of the $\Im a > 0$-part of a wall. We will now consider boundary stability conditions in ``Case $E$''. Let $a, b, B$ be as in the assumption, and let $\ensuremath{\mathcal E}$ be the unique exceptional vector bundle on $\ensuremath{\mathbb{P}}^2$ of slope $B$ (see Theorem \ref{thm:DP-A}); then $Z_{a,b}(i_*\ensuremath{\mathcal E}) \in (-1, 0)$ and $Z_{a, b}(i_* \ensuremath{\mathcal G}) \in \ensuremath{\mathbb{R}}_{> 0}$ for any other slope-stable sheaf $\ensuremath{\mathcal G}$ on $\ensuremath{\mathbb{P}}^2$ of slope $B$. This suggests that $i_*\ensuremath{\mathcal E}$ is semistable, of phase $\phi(i_*\ensuremath{\mathcal E}) = \pm 1$ (depending on whether $Z(i_*\ensuremath{\mathcal E})$ approaches the real line from above or below when we approach $Z_{a,b}$ by geometric stability conditions); and in the case $\phi(i_*\ensuremath{\mathcal E}) = +1$, the $t$-structure $\ensuremath{\mathbb{P}}P((0,1])$ should be given as in the following proposition. We will prove this by constructing the stability conditions in the boundary directly, and prove that they deform to geometric stability conditions. \begin{Prop} \label{prop:E-tstruct} Let $B$ be the slope of an exceptional vector bundle $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$. Then there is a torsion pair $(\ensuremath{\mathcal T}^\ensuremath{\mathcal E}, \ensuremath{\mathcal F}^\ensuremath{\mathcal E})$ on $\ensuremath{\mathbb{C}}oh_0$ where \begin{itemize} \item $\ensuremath{\mathcal T}^\ensuremath{\mathcal E}$ is the extension-closed subcategory of $\ensuremath{\mathbb{C}}oh_0$ generated by torsion sheaves, by slope-semistable sheaves of slope $\mu > B$, and by $\ensuremath{\mathcal E}$, and \item $\ensuremath{\mathcal F}^\ensuremath{\mathcal E}$ is generated by slope-semistable sheaves $\ensuremath{\mathcal G}$ of slope $\mu \le B$ that also satisfy $\ensuremath{\mathbb{H}}om(i_* \ensuremath{\mathcal E}, \ensuremath{\mathcal G}) = 0$. \end{itemize} \end{Prop} \begin{Prf} From the construction, it is clear that $\ensuremath{\mathbb{H}}om(\ensuremath{\mathcal T}^\ensuremath{\mathcal E},\ensuremath{\mathcal F}^\ensuremath{\mathcal E}) = 0$. Now given any $\ensuremath{\mathcal G} \in \ensuremath{\mathbb{C}}oh_0$, let $\ensuremath{\mathcal G}_{> B} \ensuremath{\hookrightarrow} \ensuremath{\mathcal G} \ensuremath{\twoheadrightarrow} \ensuremath{\mathcal G}_{\le B}$ the unique short exact sequence with $\ensuremath{\mathcal G}_{> B} \in \ensuremath{\mathbb{C}}oh_0^{> B}$ and $\ensuremath{\mathcal G}_{\le B} \in \ensuremath{\mathbb{C}}oh_0^{\le B}$. Let $V = \ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, \ensuremath{\mathcal G}_{\le B})$ and let $\ensuremath{\mathcal F}$ be the cokernel such that the following sequence is exact on the right: \[ V \otimes i_*\ensuremath{\mathcal E} \stackrel{f}{\longrightarrow} \ensuremath{\mathcal G}_{\le B} \to \ensuremath{\mathcal F} \to 0 \] We claim that $\ensuremath{\mathcal F} \in \ensuremath{\mathcal F}^\ensuremath{\mathcal E}$, and that the kernel $\ensuremath{\mathcal T}$ of the composition $\ensuremath{\mathcal G} \ensuremath{\twoheadrightarrow} \ensuremath{\mathcal G}_{\le B} \ensuremath{\twoheadrightarrow} \ensuremath{\mathcal F}$ lies in $\ensuremath{\mathcal T}^\ensuremath{\mathcal E}$. The image of the evaluation map $f$ is slope-semistable of slope $B$; hence so is the kernel of $f$. Since $i_*\ensuremath{\mathcal E}$ is stable, the kernel is of the form $i_* \ensuremath{\mathcal E} \otimes V'$ for some $V' \subset V$; by the definition of $V$, this forces $V' = 0$, i.e., the above sequence is exact. This shows that $\ensuremath{\mathcal T}$ is an extension of $\ensuremath{\mathcal G}_{> B}$ and $i_*\ensuremath{\mathcal E}$. Since $\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, i_*\ensuremath{\mathcal E}) = \ensuremath{\mathbb{C}}$ and $\mathop{\mathrm{Ext}}\nolimits^1(i_*\ensuremath{\mathcal E}, i_*\ensuremath{\mathcal E}) = 0$, the long exact sequence associated to $\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, \underline{\hphantom{M}})$ shows that $\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, \ensuremath{\mathcal F}) = 0$, and thus $\ensuremath{\mathcal F} \in \ensuremath{\mathcal F}^\ensuremath{\mathcal E}$ as desired. \end{Prf} A similar result as Proposition \ref{prop:E-tstruct} is also in \cite[Prop.\ 2.7]{Yoshioka:StabilityFM}. We continue to assume that $a, b \in \ensuremath{\mathbb{C}}$ satisfy $B = - \frac{\Im b}{\Im a}$ and the inequality of boundary ``Case $E$''. Let $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E} = \langle \ensuremath{\mathcal T}^\ensuremath{\mathcal E}, \ensuremath{\mathcal F}^\ensuremath{\mathcal E}[1] \rangle$ be the $t$-structure given by tilting at the torsion pair of Proposition \ref{prop:E-tstruct}. From the previous discussion, it follows that $Z_{a, b}$ is a stability function for $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$. \begin{Prop}\label{prop:HNFiltrsBoundary} Harder-Narasimhan filtrations exist for the stability function $Z_{a,b}$ on $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$. \end{Prop} \begin{Prf} The proof is similar to Lemma \ref{lem:HNFiltrationsDiscrete}. Indeed, since $B = -\frac{\Im b}{\Im a}$ is the slope of $\ensuremath{\mathcal E}$, it is rational; hence the imaginary part of $Z_{a,b}$ is discrete, and we can again apply Proposition \ref{prop:HNFiltrationsDiscrete}. First note that for any $A \in \ensuremath{\mathbb{P}}P'_{a,b}(1)$, the sheaf $\ensuremath{\mathbb{H}}H^0(A)$ is in the category extension-generated by $k(x), x \in \ensuremath{\mathbb{P}}^2$, and by $i_*\ensuremath{\mathcal E}$: otherwise $\ensuremath{\mathbb{H}}H^0(A) \in \ensuremath{\mathcal T}^\ensuremath{\mathcal E}$ would have $0 < \Im Z(\ensuremath{\mathbb{H}}H^0(A)) \le \Im Z(A)$. To adapt the proof of Lemma \ref{lem:HNFiltrationsDiscrete} (where instead we knew that $\ensuremath{\mathbb{H}}H^0(A)$ is a zero-dimensional torsion sheaf), we replace all arguments using the length of a maximal zero-dimensional subsheaf by using the function \[ e \colon \ensuremath{\mathbb{C}}oh_0 \to \ensuremath{\mathbb{Z}}, \quad e( \cdot) := \mathop{\mathrm{dim}}\nolimits(\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, \cdot)). \] In many respects, it has the same formal properties needed (e.g.\ subadditivity on short exact sequences), and the proof goes through: Assume that $F\in\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$ has an infinite sequence \begin{equation}\label{eq:Bonn26} 0=A_0\subset A_1\subset\ldots\subset A_j\subset A_{j+1}\subset\ldots\subset F, \end{equation} of subobjects with $A_j\in\ensuremath{\mathbb{P}}P'_{a,b}(1)$. Denote by $B_j$ the cokernel in $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$ of $A_j\ensuremath{\hookrightarrow} F$. By arguing as in Lemma \ref{lem:HNFiltrationsDiscrete}, we can assume $\ensuremath{\mathbb{H}}H^{-1}(A_j)=\ensuremath{\mathcal F}_{-1}$ and $\ensuremath{\mathbb{H}}H^0(B_j)=\ensuremath{\mathcal G}_1$, for some $\ensuremath{\mathcal F}_{-1},\ensuremath{\mathcal G}_1\in\ensuremath{\mathbb{C}}oh_0$ and for all $j$. Let $\ensuremath{\mathcal F}_0$ be the cokernel in $\ensuremath{\mathbb{C}}oh_0$ of $\ensuremath{\mathcal F}_{-1}\ensuremath{\hookrightarrow}\ensuremath{\mathbb{H}}H^{-1}(F)$ and let $\ensuremath{\mathcal G}_0$ be the kernel of $\ensuremath{\mathbb{H}}H^0(F)\ensuremath{\twoheadrightarrow}\ensuremath{\mathcal G}_1$. Then we have an exact sequence \[ 0\to\ensuremath{\mathcal F}_0\stackrel{g}{\to}\ensuremath{\mathbb{H}}H^{-1}(B_j)\to\ensuremath{\mathbb{H}}H^0(A_j)\stackrel{f}{\to}\ensuremath{\mathcal G}_0\to0, \] and we let $\ensuremath{\mathcal K}_j = \ker f = \mathop{\mathrm{coker}} g$. By the long exact $\ensuremath{\mathbb{H}}om$-sequences, we have \[ e(\ensuremath{\mathbb{H}}H^0(A_j)) \le e(\ensuremath{\mathcal K}_j) + e(\ensuremath{\mathcal G}_0) \le e(\ensuremath{\mathbb{H}}H^{-1}(B_j)) + \mathop{\mathrm{dim}}\nolimits \mathop{\mathrm{Ext}}\nolimits^1(i_*\ensuremath{\mathcal E},\ensuremath{\mathcal F}_0) + e(\ensuremath{\mathcal G}_0). \] By definition of $\ensuremath{\mathcal F}^\ensuremath{\mathcal E}$, we have $\ensuremath{\mathbb{H}}om(i_* \ensuremath{\mathcal E}, \ensuremath{\mathbb{H}}H^{-1}(B_j)) = 0$, and thus $e(\ensuremath{\mathbb{H}}H^0(A_j))$ is bounded. Now consider a filtration step of \eqref{eq:Bonn26}, and let $D_j$ be the cokernel in $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$ of $A_j\ensuremath{\hookrightarrow} A_{j+1}$. From the exact sequence \[ 0\to\ensuremath{\mathbb{H}}H^{-1}(D_j)\to\ensuremath{\mathbb{H}}H^0(A_j)\to\ensuremath{\mathbb{H}}H^0(A_{j+1})\stackrel{\phi}{\to}\ensuremath{\mathbb{H}}H^0(D_j)\to0, \] we argue similarly as before: \begin{align*} e(\ker \phi) = e(\ensuremath{\mathbb{H}}H^{-1}(D_j)) + e(\ker \phi) \ge e(\ensuremath{\mathbb{H}}H^0(A_j)) \end{align*} On the other hand, as $\ensuremath{\mathcal T}^\ensuremath{\mathcal E}$ is closed under quotients, we have $\ker \phi \in \ensuremath{\mathcal T}^\ensuremath{\mathcal E}$, and thus $\ker \phi \in \ensuremath{\mathbb{P}}P'_{a,b}(1)$ and $\mathop{\mathrm{Ext}}\nolimits^1(i_*\ensuremath{\mathcal E}, \ker \phi) = 0$. Thus we have \[ e(\ensuremath{\mathbb{H}}H^0(A_{j+1})) = e(\ker \phi) + e(\ensuremath{\mathbb{H}}H^0(D_j)) \ge e(\ker \phi) \ge e(\ensuremath{\mathbb{H}}H^0(A_j)) \] with equality only if $\ensuremath{\mathbb{H}}H^0(D_j) = 0$. By the boundedness established above, we do have $\ensuremath{\mathbb{H}}H^0(D_j) = 0$ for $j \gg 0$; but then $\ensuremath{\mathbb{H}}H^0(A_j) \ensuremath{\twoheadrightarrow} \ensuremath{\mathbb{H}}H^0(A_{j+1})$ stabilizes as $\ensuremath{\mathbb{C}}oh_0$ is Noetherian. \end{Prf} We denote by $\ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$ the set of stability conditions constructed in the above proposition. The same methods as in Section \ref{sec:constructing} show that they are full, i.e., that they satisfy condition \ref{enum:full} of Definition \ref{def:GeomStability}; alternatively, this follows from Corollary \ref{cor:BoundaryIsAlgebraic}, where it is shown that they can also be constructed as a stability condition whose heart of finite length are representations of a finite quiver. \begin{Prop} For any $\sigma \in \ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$, the skyscraper sheaves $k(x)$ are $\sigma$-semistable with Jordan-H\"older filtration given as in \eqref{eq:kx-filtration1}. Their images under the spherical twist at $i_*\ensuremath{\mathcal E}$ are also $\sigma$-semistable, of the same phase as $k(x)$, with Jordan-H\"older filtration given by \begin{equation} \label{eq:TEkx} \ensuremath{\mathcal E}^x[1] \to {\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(k(x)) \to i_*\ensuremath{\mathcal E}^{\oplus r}. \end{equation} \end{Prop} \begin{Prf} The sheaves $\ensuremath{\mathcal E}^x$ are slope-semistable of the same slope as $\ensuremath{\mathcal E}$, and the long exact $\ensuremath{\mathbb{H}}om$-sequence shows $\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, \ensuremath{\mathcal E}^x) = 0$. Hence $\ensuremath{\mathcal E}^x \in \ensuremath{\mathcal F}^\ensuremath{\mathcal E}$, and $\ensuremath{\mathcal E}^x[1] \in \ensuremath{\mathbb{P}}P(1)$, and we indeed have a short exact sequence as in (\ref{eq:kx-filtration1}) in $\ensuremath{\mathbb{P}}P(1)$. We claim that $\ensuremath{\mathcal E}^x[1]$ is $Z_{a,b}$-stable, i.e., that there are no non-trivial short exact sequences $M \ensuremath{\hookrightarrow} \ensuremath{\mathcal E}^x[1] \ensuremath{\twoheadrightarrow} N$ in the abelian category $\ensuremath{\mathbb{P}}P(1)$: Let $C$ denote the kernel of the composition $k(x) \ensuremath{\twoheadrightarrow} \ensuremath{\mathcal E}^x[1] \ensuremath{\twoheadrightarrow} N$ in $\ensuremath{\mathbb{P}}P(1)$. By the long exact cohomology sequence, $C$ is isomorphic to a sheaf $\ensuremath{\mathbb{C}}C$. As observed in the proof of Proposition \ref{prop:HNFiltrsBoundary}, $\ensuremath{\mathbb{C}}C = \ensuremath{\mathbb{H}}H^0(C)$ lies in the category extension-generated by $i_*\ensuremath{\mathcal E}$ and skyscraper sheaves $k(x)$ for $x \in \ensuremath{\mathbb{P}}^2$. We claim that due to $\mathop{\mathrm{Ext}}\nolimits^1(k(x), i_*\ensuremath{\mathcal E}) = \mathop{\mathrm{Ext}}\nolimits^1(i_*\ensuremath{\mathcal E}, i_*\ensuremath{\mathcal E}) = 0$, this implies more strongly that there is a short exact sequence $\ensuremath{\mathcal T} \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C \ensuremath{\twoheadrightarrow} i_*\ensuremath{\mathcal E}^{\oplus k}$ for some zero-dimensional torsion sheaf $\ensuremath{\mathcal T}$. Indeed, by induction on the length of the Jorder-H\"older filtration of $\ensuremath{\mathbb{C}}C$ in this finite-length category, we may assume that there is a sheaf $\ensuremath{\mathbb{C}}C' \in \ensuremath{\mathbb{P}}P(1)$ with a short exact sequence $\ensuremath{\mathcal T}' \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C' \ensuremath{\twoheadrightarrow} i_*\ensuremath{\mathcal E}^{\oplus k'}$, such that $\ensuremath{\mathbb{C}}C$ is an extension of $\ensuremath{\mathbb{C}}C'$ by a simple object, i.e., there is a short exact sequence either of the form $k(x) \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C \ensuremath{\twoheadrightarrow} \ensuremath{\mathbb{C}}C'$ or $i_*\ensuremath{\mathcal E} \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C \ensuremath{\twoheadrightarrow} \ensuremath{\mathbb{C}}C'$. In the former case, the claim for $\ensuremath{\mathbb{C}}C$ follows immediately by considering the composition $\ensuremath{\mathbb{C}}C \ensuremath{\twoheadrightarrow} \ensuremath{\mathbb{C}}C' \ensuremath{\twoheadrightarrow} i_*\ensuremath{\mathcal E}^{\oplus k'}$; in the latter case, the vanishing of $\mathop{\mathrm{Ext}}\nolimits^1(\ensuremath{\mathcal T}, i_*\ensuremath{\mathcal E}) = 0$ implies that there is factorization $\ensuremath{\mathcal T}' \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C$; due to $\mathop{\mathrm{Ext}}\nolimits^1(i_*\ensuremath{\mathcal E}, i_*\ensuremath{\mathcal E}) = 0$, the kernel is of the form $i_*\ensuremath{\mathcal E}^{\oplus k'+1}$. The composition $\ensuremath{\mathcal T} \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C \ensuremath{\hookrightarrow} k(x)$ can only be injective in $\ensuremath{\mathbb{P}}P(1)$ if it is injective as a map of sheaves; hence either $\ensuremath{\mathcal T} \cong \ensuremath{\mathbb{C}}C \cong k(x)$, or $\ensuremath{\mathcal T} = 0$ and $\ensuremath{\mathbb{C}}C \cong i_*\ensuremath{\mathcal E}^{\oplus k}$. In the former case we have $N = 0$. In the latter case, note that the inclusion $i_*\ensuremath{\mathcal E}^{\oplus r} \ensuremath{\hookrightarrow} k(x)$ factors via $i_*\ensuremath{\mathcal E}^{\oplus r} \ensuremath{\hookrightarrow} \ensuremath{\mathbb{C}}C \cong i_*\ensuremath{\mathcal E}^{\oplus k} \ensuremath{\hookrightarrow} k(x)$ and induces an isomorphism $\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, i_*\ensuremath{\mathcal E}^{\oplus r}) \cong \ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}, k(x)) \cong \ensuremath{\mathbb{C}}^r$. Thus we must have $k= r$, and $N \cong \ensuremath{\mathcal E}^x[1]$. So in both cases the exact sequence is trivial. By using adjunction one sees that $\ensuremath{\mathbb{R}}Hom(i_*\ensuremath{\mathcal E}, k(x)) = \ensuremath{\mathbb{C}}^r \oplus \ensuremath{\mathbb{C}}^r[-1]$. The long exact cohomology sequence shows $\ensuremath{\mathbb{H}}H^0({\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(k(x))) \cong i_*\ensuremath{\mathcal E}^{\oplus r}$ and $\ensuremath{\mathbb{H}}H^{-1}({\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(k(x))) = \ensuremath{\mathcal E}^x$, and so there is an exact triangle as in (\ref{eq:TEkx}). This shows ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(k(x)) \in \ensuremath{\mathbb{P}}P(1)$, and that (\ref{eq:TEkx}) is a Jordan-H\"older filtration. \end{Prf} We can deform $Z$ such that $Z(k(x)) = -1$ remains constant, $Z(i_*\ensuremath{\mathcal E})$ moves to the upper half-plane, and $Z(\ensuremath{\mathcal E}^x[1])$ moving to the lower-half plane; then by Lemma \ref{lem:semitostable}, all $k(x)$ become stable. It follows that the closure $W_\ensuremath{\mathcal E}^+$ of the orbit of $\ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$ under the action of $\ensuremath{\mathbb{C}}$ is a wall of $\partial U$. The objects $k(x)$ and ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(k(x))$ become stable on opposite sides of the wall, and thus $W_\ensuremath{\mathcal E}^+ \subset \overline{U} \cap {\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}(\overline{U})$. If we apply ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}^{-1}$ to $\ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$, we obtain a wall where the Jordan-H\"older filtration of $k(x)$ is given by the image of (\ref{eq:TEkx}) under ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}^{-1}$, which is indeed the exact triangle (\ref{eq:kx-filtration2}). This finishes the proof of Theorem \ref{thm:boundary}. Note that the proof also implies that two such walls can only intersect at points where the image of the central charge is contained in a line $e^{i\pi\phi}\cdot \ensuremath{\mathbb{R}} \subset \ensuremath{\mathbb{C}}$. In that case, the heart $\ensuremath{\mathbb{P}}P(\phi)$ of the associated t-structure has finite length; in fact, it is one of the ``quivery'' stability conditions constructed in \cite{Bridgeland:stab-CY}, and used in the following section. \begin{Lem}\label{lem:semitostable} Let $E \in \ensuremath{\mathbb{D}}D_0$ and $\sigma \in \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ be a stability condition such that $E$ is $\sigma$-semistable, and assume that there is a Jordan-H\"older filtration $M^{\oplus r} \ensuremath{\hookrightarrow} E \ensuremath{\twoheadrightarrow} N$ of $E$ such that $M, N$ are $\sigma$-stable, $\ensuremath{\mathbb{H}}om(E, M) = 0$, and $[E]$ and $[M]$ are linearly independent classes in $K(\ensuremath{\mathbb{D}}D_0)$. Then $\sigma$ is in the closure of the set of stability conditions where $E$ is stable. \end{Lem} \begin{Prf} Let $\phi$ be the phase of $M, E, N$ with respect to $\sigma = (Z, \ensuremath{\mathbb{P}}P)$. By similar arguments as in the proof of Proposition \ref{prop:chambers}, we can show that for any stability condition $\sigma'$ sufficiently close to $\sigma$, $E$ can only be destabilized by subobjects $F \ensuremath{\hookrightarrow} E$ in $\ensuremath{\mathbb{P}}P(\phi)$. Now let $\sigma' = (Z', \ensuremath{\mathbb{P}}P')$ be such a stability condition close by with $M, N$ stable and $\phi'(M) < \phi'(E) < \phi'(N)$. Assume that $F \in \ensuremath{\mathbb{P}}P(\phi)$ is a stable destabilizing subobject of $E$ with respect to $Z'$. If the image of the composition $F \to N$ is zero, then $F$ factors via $M$, hence $\phi'(F) < \phi'(M)$. So $F \to N$ must be surjective; its kernel $G \in \ensuremath{\mathbb{P}}P(\phi)$ is a subobject of $M^{\oplus r}$, and thus of the form $M^{\oplus k}$ for some $k < r$. Hence the quotient of $F \ensuremath{\hookrightarrow} E$ is isomorphic to $M^{\oplus r-k}$, in contradiction to $\ensuremath{\mathbb{H}}om(E, M) = 0$. \end{Prf} \section{Algebraic stability conditions}\label{sec:AlgebraicStability1} In this section we study the open subset $\mathop{\mathrm{Stab}}_a$, introduced by Bridgeland in \cite{Bridgeland:stab-CY}, consisting of \emph{algebraic stability conditions}. We first introduce open subsets $\Theta_\mathfrak S$, associated to a collection of spherical objects $\mathfrak S$, and study their boundary in $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. The subset of algebraic stability conditions will then be the union of all $\Theta_\mathfrak S$. Then we study in detail the relation between $\mathop{\mathrm{Stab}}_a$ and $U$. In particular, we show that $\mathop{\mathrm{Stab}}_a$ contains the boundary of $U$ (described in the previous section) and, vice versa, that the intersection of $U$ with $\mathop{\mathrm{Stab}}_a$ is strictly contained in $U$. In the next section we will apply all of this to prove Theorem \ref{thmi:sc}. Let $\ensuremath{\mathcal E}E=\{\ensuremath{\mathcal E}_0,\ensuremath{\mathcal E}_1,\ensuremath{\mathcal E}_2\}$ be an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$. Recall that (see \cite{Goro-Ruda:Exceptional,Bondal}) a collection $\ensuremath{\mathcal E}E$ of exceptional vector bundles is called \emph{exceptional} if $\mathop{\mathrm{Ext}}\nolimits^p(\ensuremath{\mathcal E}_j,\ensuremath{\mathcal E}_i)=0$, for all $p$ and all $j>i$. On $\ensuremath{\mathbb{P}}^2$, all exceptional collections also satisfy the vanishing (strong exceptional collection) $\mathop{\mathrm{Ext}}\nolimits^p(\ensuremath{\mathcal E}_i,\ensuremath{\mathcal E}_j)=0$, for all $p>0$ and all $i<j$. Moreover, every exceptional vector bundle on $\ensuremath{\mathbb{P}}^2$ is part of an exceptional collection of vector bundles. The subcategory of $\ensuremath{\mathbb{D}}D_0$ generated by extensions by $i_*\ensuremath{\mathcal E}_0[2]$, $i_*\ensuremath{\mathcal E}_1[1]$, and $i_*\ensuremath{\mathcal E}_2$ \[ \ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E}:=\langle i_*\ensuremath{\mathcal E}_2,i_*\ensuremath{\mathcal E}_1[1],i_*\ensuremath{\mathcal E}_0[2]\rangle \] is the heart of a bounded $t$-structure on $\ensuremath{\mathbb{D}}D_0$. By \cite{Bridgeland:TStruct}, the category $\ensuremath{\mathcal A}_\ensuremath{\mathcal E}E$ can also be described as the category of nilpotent modules over a certain algebra. \begin{Def}\label{def:quivery} A heart of a bounded $t$-structure on $\ensuremath{\mathbb{D}}D_0$ is called \emph{quivery} if it is of the form $\ensuremath{\mathbb{P}}hi(\ensuremath{\mathcal A}_\ensuremath{\mathcal E}E)$, for some exceptional collection $\ensuremath{\mathcal E}E$ of vector bundles on $\ensuremath{\mathbb{P}}^2$ and for some autoequivalence $\ensuremath{\mathbb{P}}hi$ of $\ensuremath{\mathbb{D}}D_0$ given by composition of spherical twists associated to exceptional vector bundles. \end{Def} Notice that a quivery subcategory is of finite length, with simple objects $\ensuremath{\mathbb{P}}hi(i_*\ensuremath{\mathcal E}_2)$, $\ensuremath{\mathbb{P}}hi(i_*\ensuremath{\mathcal E}_1[1])$, and $\ensuremath{\mathbb{P}}hi(i_*\ensuremath{\mathcal E}_0[2])$, which are also spherical in $\ensuremath{\mathbb{D}}D_0$. A quivery subcategory is called \emph{ordered} if it comes with an ordering of $S_0,S_1,S_2$ of its simple objects compatible with the requirement that $\ensuremath{\mathbb{H}}om^k(S_j,S_l)=0$ unless $0\leq k\leq3$ and $j-l\equiv k$(mod $3$). A collection $\mathfrak S=\{S_0,S_1,S_2\}$ of spherical objects of $\ensuremath{\mathbb{D}}D_0$ is called an \emph{ordered quivery collection} if it arises as an ordered collection of simple objects in an ordered quivery subcategory (which we will denote by $\ensuremath{\mathcal A}_{\mathfrak S}$). By \cite[Theorem 4.11]{Bridgeland:TStruct}, we can define an action on the set of quivery ordered subcategories of $\ensuremath{\mathbb{D}}D_0$ of the affine braid group $B_3$, i.e., the group generated by elements $\tau_j$ ($j\in\ensuremath{\mathbb{Z}}_3$) and $r$ subject to the relations \[ r\tau_jr^{-1}=\tau_{j+1},\qquad\tau_j\tau_{j+1}\tau_j=\tau_{j+1}\tau_j\tau_{j+1},\qquad r^3=1. \] Indeed, to define such an action is sufficient to set how the generators of $B_3$ act on the simple objects of a ordered quivery category: \begin{align*} \tau_1\{S_0,S_1,S_2\}&:=\{S_1[-1],{\mathop{\mathrm{ST}}\nolimits}_{S_1}(S_0),S_2\}\\ r\{S_0,S_1,S_2\}&:=\{S_2,S_0,S_1\}. \end{align*} By \cite[Prop.\ 4.10]{Bridgeland:TStruct}, the image via $\tau_j$ of a quivery category is quivery as well, and thus the action is well-defined. Notice, in particular, that \[ \tau_2\{S_0,S_1,S_2\}=\{S_0,S_2[-1],{\mathop{\mathrm{ST}}\nolimits}_{S_2}(S_1)\}. \] \begin{Rem}\label{rmk:GoroRuda} Let $\ensuremath{\mathcal E}E=\{\ensuremath{\mathcal E}_0,\ensuremath{\mathcal E}_1,\ensuremath{\mathcal E}_2\}$ be an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$. Then \[ \tau_1\{i_*\ensuremath{\mathcal E}_2,i_*\ensuremath{\mathcal E}_1[1],i_*\ensuremath{\mathcal E}_0[2]\}=\{i_*\ensuremath{\mathcal F}_2,i_*\ensuremath{\mathcal F}_1[1],i_*\ensuremath{\mathcal F}_0[2]\}, \] where $\ensuremath{\mathcal F}F=\{\ensuremath{\mathcal F}_0,\ensuremath{\mathcal F}_1,\ensuremath{\mathcal F}_2\}$ is another exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$, called the \emph{left mutation} of $\ensuremath{\mathcal E}E$ at $\ensuremath{\mathcal E}_1$ (see \cite{Goro-Ruda:Exceptional,Bondal}). Similarly, for $\tau_2$ we have the \emph{left mutation} of $\ensuremath{\mathcal E}E$ at $\ensuremath{\mathcal E}_0$, for $\tau_1^{-1}$ we have the \emph{right mutation} of $\ensuremath{\mathcal E}E$ at $\ensuremath{\mathcal E}_2$, and for $\tau_2^{-1}$ we have the \emph{right mutation} of $\ensuremath{\mathcal E}E$ at $\ensuremath{\mathcal E}_1$. Since all exceptional collections of vector bundles on $\ensuremath{\mathbb{P}}^2$ can be obtained by a sequence of mutations from $\ensuremath{\mathcal E}E_1:=\{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1),\Omega_{\ensuremath{\mathbb{P}}^2}(1),\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}\}$, all ordered quivery subcategories can be obtained from \[ \ensuremath{\mathcal A}_1:=\ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E_1}=\langle i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2},i_*\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1],i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1)[2]\rangle \] by the action of $B_3$. \end{Rem} \begin{Def}\label{def:algebraicstability} A stability condition $\sigma$ on $\ensuremath{\mathbb{D}}D_0$ is called \emph{algebraic} if there exists $M\in\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$ such that the heart of $\sigma\cdot M$ is quivery. Denote by $\mathop{\mathrm{Stab}}_a$ the subset of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ consisting of algebraic stability conditions. \end{Def} Using \cite[Prop.\ 4.10]{Bridgeland:TStruct} and \cite[Cor.\ 3.20]{Macri:Curves} it follows that $\mathop{\mathrm{Stab}}_a$ is an open connected $3$-dimensional submanifold of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. Moreover, it is easy to construct stability conditions in $\mathop{\mathrm{Stab}}_a$ in which the skyscraper sheaves are all stable (for example, a stability condition with heart $\ensuremath{\mathcal A}_1$ in which $\phi(i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2})<\phi(i_*\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1])<\phi(i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1)[2])$). Hence $\mathop{\mathrm{Stab}}_a\subset\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$, but the inclusion is strict (this can be deduced from Proposition \ref{prop:geomvsalg} and Remark \ref{rmk:Bonn1107}). Finally, by its own definition, $\mathop{\mathrm{Stab}}_a$ is invariant under the subgroup of the autoequivalences of $\ensuremath{\mathbb{D}}D_0$ which is generated by spherical twists ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal F}$, with $\ensuremath{\mathcal F}$ an exceptional bundle on $\ensuremath{\mathbb{P}}^2$. \begin{Def}\label{def:theta} Let $\mathfrak S$ be an ordered quivery collection. We denote by $\Theta_{\mathfrak S}$ the open subset of $\mathop{\mathrm{Stab}}_a$ consisting of stability conditions whose heart is, up to the action of $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, equivalent to $\ensuremath{\mathcal A}_\mathfrak S$. With a slight abuse of notation, when $\ensuremath{\mathcal E}E$ is an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$, we denote by $\Theta_{\ensuremath{\mathcal E}E}$ the open subset of $\mathop{\mathrm{Stab}}_a$ consisting of stability conditions whose heart is, up to the action of $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, equivalent to $\ensuremath{\mathcal A}_\ensuremath{\mathcal E}E$. \end{Def} \begin{Lem}\label{lem:thetaP2} The region $\Theta_{\mathfrak S} \subset \mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$ is characterized as the subset where $S_0, S_1, S_2$ are stable, and where their phases $\phi_j := \phi(S_j)$ satisfy \begin{equation} \label{eq:phasesclose} \abs{\phi_j - \phi_{j+1}} < 1 \quad \text{for $j= 0, 1, 2$}.\end{equation} It is homeomorphic to \[ \ensuremath{\mathbb{C}}C_{\mathfrak S} = \left\{(m_0,m_1,m_2,\phi_0,\phi_1,\phi_2)\in\ensuremath{\mathbb{R}}^6\colon \text{\rm $m_j>0$ and \eqref{eq:phasesclose} holds for all $j$} \right\} \] \end{Lem} \begin{Prf} Evidently $S_j$ are stable in $\Theta_\mathfrak S$, and satisfy equation \eqref{eq:phasesclose}. Conversely, if $S_j$ are stable in $(Z, \ensuremath{\mathbb{P}}P)$ satisfying equation \eqref{eq:phasesclose}, then for $\phi$ slightly smaller than $\mathop{\mathrm{min}}\nolimits \phi_j$ we have $\ensuremath{\mathcal A}_\mathfrak S \subset \ensuremath{\mathbb{P}}P((\phi, \phi+1])$, thus $\ensuremath{\mathcal A}_\mathfrak S = \ensuremath{\mathbb{P}}P((\phi, \phi+1])$ and $(Z, \ensuremath{\mathbb{P}}P) \in \Theta_\mathfrak S$. \end{Prf} Notice that, for later use, ${\mathop{\mathrm{ST}}\nolimits}_{S_{j+1}}(S_{j})$ is an extension of $S_{j+1}$ by $m$ copies of $S_{j}$, where $m=\mathop{\mathrm{dim}}\nolimits\ensuremath{\mathbb{H}}om^1(S_{j+1},S_j)$. Hence, its class in $K(\ensuremath{\mathbb{D}}D_0)$ is given by \[ [{\mathop{\mathrm{ST}}\nolimits}_{S_{j+1}}(S_j)]=m[S_j]+[S_{j+1}]. \] Moreover, it belongs to $\ensuremath{\mathcal A}_\mathfrak S$ and, if $\phi(S_{j+1})>\phi(S_j)$ then it is also $\sigma$-stable. A similar observation holds true for ${\mathop{\mathrm{ST}}\nolimits}^{-1}_{S_{j}}(S_{j+1})$. The next proposition generalizes \cite[Theorem 1.1]{Bridgeland:stab-CY}. \begin{Prop}\label{prop:BoundaryTheta} Let $\mathfrak S=\{S_0,S_1,S_2\}$ be an ordered quivery collection. Then the closure $\overline{\Theta}_{\mathfrak S}$ of $\Theta_{\mathfrak S}$ in $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ is contained in $\mathop{\mathrm{Stab}}_a$. \end{Prop} \begin{Prf} Let $\overline{\sigma}=(\overline{Z},\overline{\ensuremath{\mathbb{P}}P})\in\overline{\Theta}_{\mathfrak S}\setminus\Theta_{\mathfrak S}$ be the limit of a sequence $\{\sigma_s\}_{s\in\ensuremath{\mathbb{N}}}$, with $\sigma_s\in\Theta_{\mathfrak S}$. Then $S_0$, $S_1$, and $S_2$ are $\overline{\sigma}$-semistable; up to the action of $\ensuremath{\mathbb{C}}$, we have the following possibilities for their phases: \begin{enumerate} \item \label{enum:Zinline} The image of $\overline{Z}$ is a line in the plane. \item \label{enum:jj+1} The image of $\overline{Z}$ is not a line in the plane and there exists $j$ such that $0=\overline{\phi}(S_j)=\overline{\phi}(S_{j+1})-1$ (here and in the sequel all the indices are taken modulo $3$). \item \label{enum:jj-1} The image of $\overline{Z}$ is not a line in the plane and there exists $j$ such that $0=\overline{\phi}(S_j)=\overline{\phi}(S_{j-1})-1$. \end{enumerate} We begin with case (\ref{enum:jj+1}). First of all notice that $0<\overline{\phi}(S_{j-1})<1$. Moreover, up to the action of $\ensuremath{\mathbb{C}}$, we can assume every $\sigma_s$ has heart $\ensuremath{\mathcal A}_{\mathfrak S}$. Let $P_m$ be the Kronecker quiver with $m = \mathop{\mathrm{dim}}\nolimits\ensuremath{\mathbb{H}}om^1(S_{j+1},S_j)>0$ , i.e., the quiver with two vertices and $m$ arrows from the first to the second vertex. Consider the faithful functor $I \colon \ensuremath{\mathbb{D}}b(P_m)\to\mathrm{Tr}(S_j,S_{j+1})\subset\ensuremath{\mathbb{D}}D_0$, which maps the two simple quiver representations of $P_m$ corresponding to the two vertices respectively to $S_{j+1}$ and $S_j$. Here $\mathrm{Tr}(S_j,S_{j+1})$ denotes the triangulated subcategory of $\ensuremath{\mathbb{D}}D_0$ generated by $S_j$ and $S_{j+1}$. For $s\gg 0$, the stability condition $\sigma_s$ induces a stability condition on $\mathrm{Tr}(S_j,S_{j+1})$, whose heart is the abelian category generated by extensions by $S_j$ and $S_{j+1}$. Now the functor $I$ restricted to mod-$P_m$ is full and faithful. By \cite[Prop. 2.12]{MMS:inducing}, $\sigma_s$ induces a stability condition $I^{-1}\sigma_s$ in $\ensuremath{\mathbb{D}}b(P_m)$. Hence, by \cite[Lemma 2.9]{MMS:inducing}, $I^{-1}\overline{\sigma}\in\mathop{\mathrm{Stab}}(P_m)$. By \cite[Lemma 4.2]{Macri:Curves}, there exists an integer $k\in\ensuremath{\mathbb{Z}}$ such that $\overline{\sigma}\in\Theta_{\tau_{j+1}^k\mathfrak S}$. More explicitly, if $\tau_{j+1}^k\mathfrak S=\{R_0,R_1,R_2\}$, then what we proved is that $R_j$ are stable with respect to $\overline{\sigma}$, and that we have Jordan-H\"older filtrations given by $S_{j-1}=R_{j-1}$ and \begin{equation}\label{eq:Bonn1007} \begin{split} &R_{j}^{\oplus u_j}\to S_j[\epsilon]\to R_{j+1}^{\oplus v_j}\\ &R_{j}^{\oplus u_{j+1}}\to S_{j+1}[\epsilon-1]\to R_{j+1}^{\oplus v_{j+1}}, \end{split} \end{equation} where $\epsilon=0,1$ according to $k$. For possibility (\ref{enum:jj-1}), we have similarly $0<\overline{\phi}(S_{j+1})<1$. Then $\overline{\sigma}\in\Theta_{\tau_{j-1}\mathfrak S}$ and $S_0$, $S_1$, and $S_2$ remain stable in $\overline{\sigma}$. Finally, if the image of $Z$ lies in a line (case (\ref{enum:Zinline})), then we can deform $\overline{\sigma}$ in $\overline{\Theta}_{\mathfrak S}$ in such a way to reduce to the situation of case (\ref{enum:jj+1}). We can apply the previous procedure and find $g_1\in B_3$ such that $\overline{\sigma}\in\overline{\Theta}_{g_1\mathfrak S}$. If $\overline{\sigma}\in\Theta_{g_1\mathfrak S}$ we have finished the proof. Assume not. Then we continue and again deform $\overline{\sigma}$ in $\overline{\Theta}_{g_1\mathfrak S}$ to reduce again to case (\ref{enum:jj+1}). We produce a new element $g_2\in B_3$ and so on. This procedure must eventually terminate at a step $N$: indeed at every step, by \eqref{eq:Bonn1007}, we are constructing a filtration of $S_0$, $S_1$, and $S_2$ into $\overline{\sigma}$-semistable objects of the same phase. But $\overline{\sigma}$ is locally-finite. Hence at a certain point we produce a stable factor and so $\overline{\sigma}\in\Theta_{g_N\mathfrak S}$, as wanted. \end{Prf} We can now study the relation of $\mathop{\mathrm{Stab}}_a$ with $U$. \begin{Lem}\label{lem:StabilitySkyscrapers} Let $\ensuremath{\mathcal E}E$ be an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$. Then $\Theta_{\ensuremath{\mathcal E}E}\cap U\neq\emptyset$ and it is connected. \end{Lem} \begin{Prf} First of all notice that all skyscraper shaves $k(x)$ for $x \in \ensuremath{\mathbb{P}}^2$ belong to $\ensuremath{\mathcal A}_\ensuremath{\mathcal E}E$. Consider the stability condition $\overline{\sigma}\in\Theta_\ensuremath{\mathcal E}E$ with heart $\ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E}$, whose simple objects have phases \[ \overline{\phi}(i_*\ensuremath{\mathcal E}_2)=\overline{\phi}(i_*\ensuremath{\mathcal E}_1[1])=\overline{\phi}(i_*\ensuremath{\mathcal E}_0[2])=1. \] Let $\ensuremath{\mathcal E}_2^x$ be the kernel of $\ensuremath{\mathcal E}_2^{\oplus r} \ensuremath{\twoheadrightarrow} k(x)$ as in Section \ref{sec:boundary}; then $\ensuremath{\mathcal E}_2^x[1] \in \ensuremath{\mathcal A}_\ensuremath{\mathcal E}E$, and in fact it is contained in the abelian category generated by extensions by $i_*\ensuremath{\mathcal E}_1[1]$ and $i_*\ensuremath{\mathcal E}_0[2]$. We can deform $\overline{\sigma}$ slightly to a stability condition $\sigma\in\Theta_\ensuremath{\mathcal E}E$ with \begin{itemize} \item $\phi(i_*\ensuremath{\mathcal E}_2)=\phi(k(x))=\phi(\ensuremath{\mathcal E}_2^x[1])$, \item $\phi(i_*\ensuremath{\mathcal E}_1[1])<\phi(i_*\ensuremath{\mathcal E}_0[2])$. \end{itemize} We claim that $\ensuremath{\mathcal E}_2^x$ is $\sigma$-stable. Indeed, as in the proof of Proposition \ref{prop:BoundaryTheta}, we can consider the faithful functor $I\colon \ensuremath{\mathbb{D}}b(P_m)\to\mathrm{Tr}(i_*\ensuremath{\mathcal E}_0,i_*\ensuremath{\mathcal E}_1)$, where $m = \mathop{\mathrm{dim}}\nolimits\ensuremath{\mathbb{H}}om(i_*\ensuremath{\mathcal E}_0,i_*\ensuremath{\mathcal E}_1)>0$. Then $\ensuremath{\mathcal E}_2^x\cong I(\widetilde{\ensuremath{\mathcal E}}_2^x)$, and to prove that $\ensuremath{\mathcal E}_2^x$ is $\sigma$-stable is equivalent to prove that $\widetilde{\ensuremath{\mathcal E}}_2^x$ is $I^{-1}\sigma$-stable in $\ensuremath{\mathbb{D}}b(P_m)$. But the stability of $\widetilde{\ensuremath{\mathcal E}}_2^x$ follows immediately from \cite[Proposition 4.4]{King:QuiverStability}. Hence $k(x)$ is $\sigma$-semistable, and its two Jordan--H\"older factors are $\ensuremath{\mathcal E}_2^x[1]$ and $i_*\ensuremath{\mathcal E}_2^{\oplus r_2}$, where $r_2$ is the rank of $\ensuremath{\mathcal E}_2$. By Lemma \ref{lem:semitostable}, $\sigma\in\Theta_\ensuremath{\mathcal E}E\cap\overline{U}$, and so $\Theta_{\ensuremath{\mathcal E}E}\cap U\neq\emptyset$ since $\Theta_\ensuremath{\mathcal E}E$ is open. To prove connectedness, we may first use the action by $\ensuremath{\mathbb{C}}$ to fix the phase of $k(x)$ to be 1 with $Z(k(x)) = -1$. Then every class of a subobject of $k(x)$ gives a linear inequality for the imaginary part of $Z$, and thus $\Theta_\ensuremath{\mathcal E}E \cap U$ is cut out by a finite number of half-spaces. \end{Prf} \begin{Cor}\label{cor:BoundaryIsAlgebraic} We have \[ {\mathop{\mathrm{Stab}}}^{\dag}(\ensuremath{\mathbb{D}}D_0)={\mathop{\mathrm{Stab}}}_a\cup\bigcup\ensuremath{\mathbb{P}}hi(U), \] where the union is taken over all autoequivalences $\ensuremath{\mathbb{P}}hi$ of $\ensuremath{\mathbb{D}}D_0$ which belongs to the subgroup generated by spherical twists ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal F}$ ($\ensuremath{\mathcal F}$ an exceptional vector bundle on $\ensuremath{\mathbb{P}}^2$). \end{Cor} \begin{Prf} By Corollary \ref{cor:ConnectedComponent}, we know that $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)=\bigcup\ensuremath{\mathbb{P}}hi(\overline{U})$. We only need to show that the boundary $\partial U$ is contained in $\mathop{\mathrm{Stab}}_a$. Complete $\ensuremath{\mathcal E}$ to an exceptional collection $\ensuremath{\mathcal E}E=\{\ensuremath{\mathcal E}_0,\ensuremath{\mathcal E}_1,\ensuremath{\mathcal E}_2=\ensuremath{\mathcal E}\}$ of vector bundles on $\ensuremath{\mathbb{P}}^2$. By the proof of Lemma \ref{lem:StabilitySkyscrapers}, there exists a stability condition $\sigma = (Z, \ensuremath{\mathbb{P}}P) \in \Theta_{\ensuremath{\mathcal E}E}\cap \ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$, where $\ensuremath{\mathcal W}_\ensuremath{\mathcal E}^+$ consists of the stability conditions constructed by Propositions \ref{prop:E-tstruct} and \ref{prop:HNFiltrsBoundary}. For such $\sigma$, the objects $i_*\ensuremath{\mathcal E}_0[1]$ and $i_*\ensuremath{\mathcal E}_1[1]$ belong to $\ensuremath{\mathbb{C}}oh_0^\ensuremath{\mathcal E}$; combined with Lemma \ref{lem:thetaP2} we get \begin{equation}\label{eq:IneqPhases} 0 < \phi(i_*\ensuremath{\mathcal E}_0[1]) < \phi(i_*\ensuremath{\mathcal E}_1[1]) < 1 = \phi(i_* \ensuremath{\mathcal E}_2). \end{equation} By definition $\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+$ is connected. Moreover, $\Theta_{\ensuremath{\mathcal E}E}\cap\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+\neq\emptyset$ is open in $\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+$. We want to show it is also closed. Let $\overline{\sigma}$ be a stability condition on the boundary $\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+ \cap \partial \Theta_{\ensuremath{\mathcal E}E}$. Due to the inequalities \eqref{eq:IneqPhases}, the only inequality of Lemma \ref{lem:thetaP2} that can become an equality for $\overline{\sigma}$ is \[ \overline{\phi}(i_*\ensuremath{\mathcal E}_0[1])= \overline{\phi}(i_*\ensuremath{\mathcal E}_1[1]). \] On the other hand, we have $\ensuremath{\mathcal E}_2^x[1] \in \overline{\ensuremath{\mathbb{P}}P}(1)$ by construction of $\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+$ and, at the same time, it lies in the abelian subcategory generated by $i_*\ensuremath{\mathcal E}_0[2]$ and $i_*\ensuremath{\mathcal E}_1[1]$. Therefore, the central charge $\overline{Z}$ is contained in a line, i.e., $\overline{\sigma}\notin\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+$. Hence $W_\ensuremath{\mathcal E}^+ = \overline{\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}}^+} \subset\overline{\Theta}_{\ensuremath{\mathcal E}E}$ and, by Proposition \ref{prop:BoundaryTheta}, $W_\ensuremath{\mathcal E}^+ \subset \mathop{\mathrm{Stab}}_a$. For the case of the boundary of type $W_\ensuremath{\mathcal E}^-$, simply observe that $W_\ensuremath{\mathcal E}^-={\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}^{-1}(W_\ensuremath{\mathcal E}^+)\subset\mathop{\mathrm{Stab}}_a$. By Theorem \ref{thm:boundary}, $\partial U\subset\mathop{\mathrm{Stab}}_a$, as wanted. \end{Prf} Notice that, in the proof of Corollary \ref{cor:BoundaryIsAlgebraic}, we actually showed that \begin{equation}\label{eq:Utah29909} \partial U\subset\bigcup\overline{\Theta}_\ensuremath{\mathcal E}E, \end{equation} where the union is taken over all exceptional collections of vector bundles on $\ensuremath{\mathbb{P}}^2$. It follows that: \begin{Rem} \label{rem:degenerate} There is a one-to-one correspondence between quivery subcategories and loci in $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ of codimension 2 where the image of the central charge is contained in a line. \end{Rem} Indeed, such a degenerate stability condition must lie, up to translation by spherical twists, in the boundary $\partial U$. It has a unique heart (up to shifts), which must be $\ensuremath{\mathcal A}_\ensuremath{\mathcal E}E$ for some exceptional collection $\ensuremath{\mathcal E}E$. \begin{Cor}\label{cor:IntersectionIsConnected} $\mathop{\mathrm{Stab}}_a\cap U$ is connected. \end{Cor} \begin{Prf} Let $\ensuremath{\mathcal E}E=\{\ensuremath{\mathcal E}_0,\ensuremath{\mathcal E}_1,\ensuremath{\mathcal E}_2\}$ be an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$. By Lemma \ref{lem:StabilitySkyscrapers}, $\Theta_\ensuremath{\mathcal E}E\cap U$ is nonempty and connected. We first claim that we can connect in $\mathop{\mathrm{Stab}}_a\cap U$ any stability condition in $\Theta_\ensuremath{\mathcal E}E\cap U$ to a stability condition in $\Theta_{\ensuremath{\mathcal E}E_1}\cap U$. We proceed by induction on the length of a mutation from $\ensuremath{\mathcal E}E_1$ to $\ensuremath{\mathcal E}E$. By Remark \ref{rmk:GoroRuda}, we need to show that a stability condition in $\Theta_{\tau_j^{\pm 1}\ensuremath{\mathcal E}E}\cap U$, for $j=1,2$, can be connected to a stability condition in $\Theta_\ensuremath{\mathcal E}E\cap U$. Let $\sigma\in\Theta_{\ensuremath{\mathcal E}E}\cap U$. Then there exists a continuous family $G(t)\in\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, $t\in\ensuremath{\mathbb{R}}$, such that $\sigma\cdot G(t)\to\overline{\sigma}$, for $t\to+\mathop{\mathrm{inf}}\nolimitsty$, where $\overline{\sigma}\in\Theta_{\ensuremath{\mathcal E}E}$ is a stability condition having $\ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E}$ as heart and $\overline{\phi}(i_*\ensuremath{\mathcal E}_2)=\overline{\phi}(i_*\ensuremath{\mathcal E}_1[1])=\overline{\phi}(i_*\ensuremath{\mathcal E}_0[2])=1$. Hence $\overline{\sigma}\in\overline{U}$. By Theorem \ref{thm:boundary}, there exist two stability conditions $\sigma_1\in\Theta_{\ensuremath{\mathcal E}E}\cap\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}_2}^+$ and $\sigma_2\in\ensuremath{\mathcal W}_{\ensuremath{\mathcal E}_0}^-$. If $\phi_k$ denotes the phase function in $\sigma_k$ ($k=1,2$), we must have \[ \phi_1(i_*\ensuremath{\mathcal E}_0[2])>\phi_1(i_*\ensuremath{\mathcal E}_2)>\phi_1(i_*\ensuremath{\mathcal E}_1[1]) \] and \[ \phi_2(i_*\ensuremath{\mathcal E}_1[1])>\phi_2(i_*\ensuremath{\mathcal E}_0[2])>\phi_2(i_*\ensuremath{\mathcal E}_2). \] But then $\sigma_1\in\Theta_{\ensuremath{\mathcal E}E}\cap\Theta_{\tau_2^{\pm 1}\ensuremath{\mathcal E}E}\cap\overline{U}$ and $\sigma_2\in\Theta_{\ensuremath{\mathcal E}E}\cap\Theta_{\tau_1^{\pm 1}\ensuremath{\mathcal E}E}\cap\overline{U}$. Since the subsets $\Theta$ are open, this is enough to conclude that $\Theta_\ensuremath{\mathcal E}E\cap\Theta_{\tau_j^{\pm 1}\ensuremath{\mathcal E}E}\cap U\neq\emptyset$, for $j=1,2$. This shows the claim. In general, let $\sigma\in\Theta_\mathfrak S\cap U$, for an ordered quivery collection $\mathfrak S=\{S_0,S_1,S_2\}$. Then, proceeding as above, $\Theta_\mathfrak S\cap U$ is connected and we can find a stability condition $\overline{\sigma}$ in the closure of the $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$-orbit of $\sigma$ such that $S_0$, $S_1$, and $S_2$ are $\overline{\sigma}$-stable of the same phase, that is $\overline{\sigma}\in\Theta_\mathfrak S\cap\overline{U}$. But then, by \eqref{eq:Utah29909}, $\overline{\sigma}\in\overline{\Theta}_{\ensuremath{\mathcal E}E}$, for some exceptional collection $\ensuremath{\mathcal E}E$ of vector bundles on $\ensuremath{\mathbb{P}}^2$. This gives $\Theta_\ensuremath{\mathcal E}E\cap\Theta_\mathfrak S\cap U\neq\emptyset$ and this intersection is connected, which completes the proof. \end{Prf} We conclude the section by making a comparison between $\mathop{\mathrm{Stab}}_a$ and $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. To this end, we define $\mathop{\mathrm{Stab}}_g$ as the set of geometric stability conditions which, up to the action of $\ensuremath{\mathbb{C}}$, are of the form $\sigma_{a,b}$ with \begin{align*} &\Im a>0\\ & \ensuremath{\mathbb{R}}e b>-B\cdot\ensuremath{\mathbb{R}}e a+\frac{1}{2}B^2, \end{align*} where as in Definition \ref{def:setG}, $B:=-\frac{\Im b}{\Im a}$. By Theorem \ref{thm:geom-stability}, all pairs $(a,b)\in\ensuremath{\mathbb{C}}^2$ satisfying the above inequalities are actual stability conditions. This implies that $\mathop{\mathrm{Stab}}_g$ is an open, connected, and simply-connected subset of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D_0)$. Moreover, up to the action of $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, we can assume the central charge of a stability condition in $\mathop{\mathrm{Stab}}_g$ to take the form (see \cite{Aaron-Daniele}) \[ Z^{t,m}(-)=-\int_{\ensuremath{\mathbb{P}}^2}e^{-(t+im)h}\mathop{\mathrm{ch}}\nolimits(-), \] for $t,m\in\ensuremath{\mathbb{R}}$, $m>0$, and $h$ the class of a line in $\ensuremath{\mathbb{P}}^2$. In such a case, we denote the corresponding stability condition by $\sigma^{t,m}$. Let $\ensuremath{\mathcal E}E=\{\ensuremath{\mathcal E}_0,\ensuremath{\mathcal E}_1,\ensuremath{\mathcal E}_2\}$ be an exceptional collection of vector bundles on $\ensuremath{\mathbb{P}}^2$. Set $\mathop{\mathrm{ch}}\nolimits(\ensuremath{\mathcal E}_j)=(r_j,d_j,c_j)$, $\mu_j:=d_j/r_j$, and $\ensuremath{\mathbb{D}}elta_j:=\frac{1}{2}\left(1-\frac{1}{r_j^2}\right)$ ($j=1,2,3$). Note that $\mu_0 < \mu_1 < \mu_2 < \mu_0 + 3$. \begin{Prop}\label{prop:geomvsalg} We have $\sigma=\sigma^{t,m}\in\Theta_{\ensuremath{\mathcal E}E}\cap\mathop{\mathrm{Stab}}_g$ only if $(t,m)$ is contained in the open semicircle with center $(C,0)$, where \begin{equation}\label{eq:center} C:=\frac{1}{2}(\mu_0+\mu_2)+\frac{\ensuremath{\mathbb{D}}elta_0-\ensuremath{\mathbb{D}}elta_2}{\mu_2-\mu_0} \end{equation} and radius $R:=\sqrt{\rho}$, where \begin{equation}\label{eq:radius} \rho:=\left(\frac{\ensuremath{\mathbb{D}}elta_0-\ensuremath{\mathbb{D}}elta_2}{\mu_2-\mu_0}\right)^2+\frac{1}{4}(\mu_2-\mu_0)^2-\left(\ensuremath{\mathbb{D}}elta_0+\ensuremath{\mathbb{D}}elta_2\right)>0. \end{equation} \end{Prop} \begin{Prf} First of all, let $\sigma=\sigma^{t,m}\in\Theta_{\ensuremath{\mathcal E}E}\cap\mathop{\mathrm{Stab}}_g$. Then, by Lemma \ref{lem:thetaP2}, \begin{enumerate} \item $\phi(i_*\ensuremath{\mathcal E}_0)<\phi(i_*\ensuremath{\mathcal E}_1)<\phi(i_*\ensuremath{\mathcal E}_2)$ and \item \label{enum:tmineq2} $\phi(i_*\ensuremath{\mathcal E}_0)+1=\phi(i_*\ensuremath{\mathcal E}_0[1])<\phi(i_*\ensuremath{\mathcal E}_2)$. \end{enumerate} As a consequence of (\ref{enum:tmineq2}), $(t,m)$ lies in the region bounded by \[ \frac{\Im Z^{t,m}(i_*\ensuremath{\mathcal E}_0)}{\ensuremath{\mathbb{R}}e Z^{t,m}(i_*\ensuremath{\mathcal E}_0)}=\frac{\Im Z^{t,m}(i_*\ensuremath{\mathcal E}_2)}{\ensuremath{\mathbb{R}}e Z^{t,m}(i_*\ensuremath{\mathcal E}_2)}, \] Making it explicit, we have \[ m^2+\left(t-\frac{r_0c_2-r_2c_0}{d_2r_0-d_0r_2}\right)^2=-2\frac{d_0c_2-d_2c_0}{d_2r_0-d_0r_2}+\left(\frac{r_0c_2-r_2c_0}{d_2r_0-d_0r_2}\right)^2. \] As observed in Appendix \ref{app:DP}, \[ \frac{c_j}{r_j}=\frac{1}{2r^2}-\frac{1}{2}+\frac{\mu_j^2}{2}=-\ensuremath{\mathbb{D}}elta_j+\frac{\mu_j^2}{2}, \] for $j=0,1,2$. Substituting we immediately deduce \eqref{eq:center} and \eqref{eq:radius}. The fact that $\rho>0$ is again a straightforward computation, using \[ 0=\mathop{\mathrm{ch}}\nolimitsi(\ensuremath{\mathcal E}_2,\ensuremath{\mathcal E}_0)=r_0r_2\left(1-\frac{3}{2}(\mu_2-\mu_0)+\frac{1}{2}(\mu_2-\mu_0)^2-(\ensuremath{\mathbb{D}}elta_0+\ensuremath{\mathbb{D}}elta_2)\right). \] \end{Prf} Using Lemma \ref{lem:StabilitySkyscrapers} and a deformation argument it can be proved that the statement of the previous proposition is actually an \emph{if and only if}. \begin{Rem}\label{rmk:Bonn1107} By \cite[Proposition 5.1]{Goro-Ruda:Exceptional}, we have \[ \frac{\ensuremath{\mathbb{D}}elta_0-\ensuremath{\mathbb{D}}elta_2}{\mu_2-\mu_0}=\frac{3}{2}\cdot\frac{r_0^2-r_2^2}{r_0^2+r_2^2+(c_2 r_0-c_0 r_2)^2}\in\left[-\frac{3}{2},\frac{3}{2}\right]. \] Hence, if $m>3/\sqrt{2}$, then $\sigma^{t,m}\notin\Theta_{\ensuremath{\mathcal E}E}$. \end{Rem} \section{Simply-connectedness}\label{sec:AlgebraicStability2} We can now prove the simply-connectedness of $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$: \begin{Thm}\label{thm:SimplyConn} The connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ is simply-connected. \end{Thm} The idea of the proof is very simple: by an elementary topological argument, using what we proved in the previous section, we first reduce Theorem \ref{thm:SimplyConn} to proving that $\mathop{\mathrm{Stab}}_a$ is simply-connected. To show this last assertion, we associate to every loop in $\mathop{\mathrm{Stab}}_a$ a word in the generators of the affine braid group $B_3$. Then the simply-connectedness of $\mathop{\mathrm{Stab}}_a$ will be equivalent to the fact that $B_3$ acts freely on the set of ordered quivery subcategories, and that, for every relation in $B_3$, we can find a corresponding loop that is contractible. The main reason we involve Bridgeland's description of the set $\mathop{\mathrm{Stab}}_a$ is the following: the loci of degenerate stability conditions appearing in Remark \ref{rem:degenerate} are rather implicit in our description of $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$; however, they are essential for the simply-connectedness of the space. \begin{Rem}\label{Rem:sc-slice} Following \cite{Bridgeland:stab-CY}, let $\mathop{\mathrm{Stab}}_n \subset \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ be the subset of normalized stability conditions with $Z(k(x)) = -1$. Denote by $\overline{\mathop{\mathrm{Stab}}}$ the quotient $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)/\ensuremath{\mathbb{C}}$, which must also be simply-connected. By the results of the previous section, there always exist semistable objects of class $[k(x)]$ in $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$; hence $Z(k(x))$ is never zero. It follows that the subset $\mathop{\mathrm{Stab}}_n$ already surjects onto $\overline{\mathop{\mathrm{Stab}}}$. This surjection is a Galois covering $\overline{\mathop{\mathrm{Stab}}} \cong \mathop{\mathrm{Stab}}_n/\ensuremath{\mathbb{Z}}$, where the action by $n \in \ensuremath{\mathbb{Z}}$ is given as the shift $[2n]$; by the simply-connectedness of $\overline{\mathop{\mathrm{Stab}}}$, it follows that $\mathop{\mathrm{Stab}}_n \cong \overline{\mathop{\mathrm{Stab}}} \times \ensuremath{\mathbb{Z}}$. In particular, there is a connected component of ``very normalized'' stability conditions $\mathop{\mathrm{Stab}}_{vn} \subset \mathop{\mathrm{Stab}}_n$ containing the geometric stability conditions where the skyscraper sheaves are semistable of phase 1. It is a global slicing with respect to the $\ensuremath{\mathbb{C}}$-action, and simply-connected. It is invariant under spherical twists and tensoring with line bundles (i.e., invariant under the subgroup $\Gamma_1(3) \subset \mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ of Theorem \ref{thm:autoequiv-group}). Presumably, Bridgeland's Conjecture 1.2 in \cite{Bridgeland:stab-CY} could be modified to use this connected component $\mathop{\mathrm{Stab}}_{vn}$ rather than its open subset $\mathop{\mathrm{Stab}}_{n}^0(X)$ in the notation of \cite{Bridgeland:stab-CY}. \end{Rem} \begin{Lem}\label{lem:VanKampen} Let $X$ be a topological space such that \[ X=A\cup\bigcup_{n\in I}B_n \] where $I$ is an arbitrary set of indices and \begin{itemize} \item $A$ and all $B_n$ are open, connected, and simply-connected; \item $A\cap B_n$ is non-empty and connected, for all $n\in I$; \item $B_n\cap B_m=\emptyset$, for $n\neq m$. \end{itemize} Then $X$ is simply connected. \end{Lem} \begin{Prf} An inductive application of the classical Seifert--Van Kampen Theorem shows that for all finite subsets $N\subset I$ \[ X_N:=A\cup\bigcup_{n\in N}B_n \] is connected and simply-connected. However, by compactness, any loop in $X$ is contained in $X_N$ for some finite subset $N\subset I$. Hence it is contractible, as required. \end{Prf} To prove Theorem \ref{thm:SimplyConn}, we use the previous lemma with $A:=\mathop{\mathrm{Stab}}_a$, and the family $B_n$ as $\ensuremath{\mathbb{P}}hi(U)$, for $\ensuremath{\mathbb{P}}hi$ an autoequivalence of $\ensuremath{\mathbb{D}}D_0$ which belongs to the subgroup generated by spherical twists ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal F}$ ($\ensuremath{\mathcal F}$ an exceptional vector bundle on $\ensuremath{\mathbb{P}}^2$). By Theorem \ref{thm:geom-stability}, $U \cong \ensuremath{\mathbb{C}} \times G$ is open, connected and simply-connected; hence the same holds for all $\ensuremath{\mathbb{P}}hi(U)$. Also, we have $U\cap\ensuremath{\mathbb{P}}hi(U)=\emptyset$ unless $U=\ensuremath{\mathbb{P}}hi(U)$. Thus Theorem \ref{thm:SimplyConn} follows from Corollary \ref{cor:BoundaryIsAlgebraic}, Corollary \ref{cor:IntersectionIsConnected}, and the following proposition: \begin{Prop}\label{prop:main} $\mathop{\mathrm{Stab}}_a$ is simply-connected. \end{Prop} Before proving Proposition \ref{prop:main}, we need a few lemmata. \begin{Lem}\label{lem:firststabilityP2} Let $\mathfrak S=\{S_0,S_1,S_2\}$ and $\ensuremath{\mathbb{R}}RR=\{R_0,R_1,R_2\}$ be two ordered quivery collections. Assume that $\Theta_\mathfrak S\cap\Theta_\ensuremath{\mathbb{R}}RR\neq\emptyset$. Then, either $\Theta_{\mathfrak S}=\Theta_{\ensuremath{\mathbb{R}}RR}$, or there exists a stability condition $\overline{\sigma}=(\overline{Z},\overline{\ensuremath{\mathbb{P}}P})\in\partial\Theta_{\mathfrak S}\cap\Theta_{\ensuremath{\mathbb{R}}RR}$ such that the image of $\overline{Z}$ is contained in a line. \end{Lem} \begin{Prf} By hypothesis, either $\Theta_{\mathfrak S}=\Theta_{\ensuremath{\mathbb{R}}RR}$, or there exists a stability condition $\sigma\in\partial\Theta_{\mathfrak S}\cap\Theta_{\ensuremath{\mathbb{R}}RR}$. Now, we proceed as in the proof of Corollary \ref{cor:IntersectionIsConnected}: for every stability condition in $\partial\Theta_{\mathfrak S}\cap\Theta_{\ensuremath{\mathbb{R}}RR}$ there exists a sequence $G_k\in\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$ ($k\in\ensuremath{\mathbb{N}}$) such that $\sigma\cdot G_k\to\overline{\sigma}$, where $\overline{\sigma}=(\overline{Z},\overline{\ensuremath{\mathbb{P}}P})$ is a stability condition in $\Theta_{\ensuremath{\mathbb{R}}RR}$ such that the image of $\overline{Z}$ is contained in a line. But then $\overline{\sigma}\in\partial\Theta_{\mathfrak S}\cap\Theta_{\ensuremath{\mathbb{R}}RR}$, as wanted. \end{Prf} \begin{Lem}\label{lem:secondstabilityP2} Let $\mathfrak S=\{S_0,S_1,S_2\}$ and $\ensuremath{\mathbb{R}}RR=\{R_0,R_1,R_2\}$ be two ordered quivery collections. If $\overline{\sigma}=(\overline{Z},\overline{\ensuremath{\mathbb{P}}P})\in\partial\Theta_{\mathfrak S}\cap\Theta_{\ensuremath{\mathbb{R}}RR}$ is such that the image of $\overline{Z}$ is contained in a line, then there exists $\gamma = \gamma_s\cdot\ldots\cdot\gamma_1\in B_3$, $\gamma_k\in\{\tau_0^{\pm 1},\tau_1^{\pm 1},\tau_2^{\pm 1}\}$ for all $k\in\{1,\ldots,s\}$, such that, up to reordering, $\ensuremath{\mathbb{R}}RR=\gamma\mathfrak S$, and there exist real numbers $0=a_0<a_1<\ldots<a_s<a_{s+1}=1$ and a continuous path $\alpha \colon [0,1]\to\mathop{\mathrm{Stab}}_a$ such that $\alpha([a_k,a_{k+1}))\subset\Theta_{\gamma_k\ldots\gamma_1\mathfrak S}$ and $\alpha(1)=\overline{\sigma}$. \end{Lem} \begin{Prf} First of all, if the image of $\overline{Z}$ is contained in a line, then the quivery collection is uniquely determined, up to reordering. Then, given $\overline{\sigma}$, we can deform it slightly as in the proof of Proposition \ref{prop:BoundaryTheta}, case (a). In this way we can find a non-trivial $\gamma_1\in\{\tau_0^{\pm 1},\tau_1^{\pm 1},\tau_2^{\pm 1}\}$ such that $\overline{\sigma}\in\overline{\Theta}_{\gamma_1\mathfrak S}\cap\partial\Theta_{\mathfrak S}$. If $\overline{\sigma}\in\Theta_{\gamma_1\mathfrak S}$, then $\ensuremath{\mathbb{R}}RR=\gamma_1\mathfrak S$, up to the action of $r$, and the lemma is proved. Otherwise, we can iterate the previous argument, by replacing $\mathfrak S$ with $\gamma_1\mathfrak S$. This process terminates as in the proof of Proposition \ref{prop:BoundaryTheta}. \end{Prf} \begin{Lem}\label{lem:SimplConn1} Let $\mathfrak S$ be an ordered quivery collection. Then, for all $\gamma\in\{\tau_0^{\pm 1},\tau_1^{\pm 1},\tau_2^{\pm 1}\}$, $\Theta_\mathfrak S\cup\Theta_{\gamma\mathfrak S}$ is simply-connected. \end{Lem} \begin{Prf} For simplicity, we assume $\gamma=\tau_1$. By Lemma \ref{lem:thetaP2} and the Seifert-Van Kampen Theorem, we only need to show that $\Theta_{\mathfrak S}\cap\Theta_{\tau_1\mathfrak S}$ is connected. But, using Lemma \ref{lem:thetaP2} again, as well as the remark following it, we have \[ \Theta_{\mathfrak S}\cap\Theta_{\tau_1\mathfrak S}=\ensuremath{\mathbb{C}}C_\mathfrak S\cap\left\{(m_0,m_1,m_2,\phi_0,\phi_1,\phi_2)\in\ensuremath{\mathbb{R}}^6\colon \phi_1>\phi_0,\,\phi_1>\phi_2\right\}, \] which is clearly connected. \end{Prf} \begin{Prf} (Proposition \ref{prop:main}) Take a continuous loop $\alpha \colon [0,1]\to\mathop{\mathrm{Stab}}_a$. By using the previous lemmata, there exist real numbers $0=a_0<a_1<\ldots< a_m=1$, $m\in\ensuremath{\mathbb{N}}$, and ordered spherical collections \[ \{\ensuremath{\mathcal M}M_k=\{M_0^k,M_1^k,M_2^k\}\}_{k\in\{1,\ldots,m\}} \] with $\ensuremath{\mathcal M}M_{k+1}$ obtained from $\ensuremath{\mathcal M}M_k$ by an element $\gamma_{k+1}\in\{\tau_0^{\pm 1},\tau_1^{\pm 1},\tau_2^{\pm 1}\}$, such that, up to replacing $\alpha$ with an homotopic path, $\alpha([a_{k-1},a_k))\subset\Theta_k:=\Theta_{\ensuremath{\mathcal M}M_k}$ for $k\in\{1,\ldots,m\}$ and $\alpha(0)\in\Theta_m\cap\Theta_1$. Thus we can assign a word $W(\alpha) = \gamma_m \ldots \gamma_1$ in the generators of $B_3$ to every loop $\alpha$. Using Lemma \ref{lem:SimplConn1} we deduce that the homotopy class $[\alpha]$ of $\alpha$ is determined by $W(\alpha)$, and that $[\alpha]$ is in fact determined by the element in the free group $L$ generated by $r, \tau_0, \tau_1, \tau_2$ associated to $W(\alpha)$. Now assume more specifically that the stability conditions $(Z, \ensuremath{\mathbb{P}}P) = \alpha(0) = \alpha(1)$ is given by $\ensuremath{\mathbb{P}}P((0,1]) = \ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E_1}$, $Z(i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}) = Z(i_*\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1]) = Z(i_*\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1)[2]) = - \frac 13$. Then any heart $\ensuremath{\mathbb{P}}P((\phi, \phi+1])$ is a shift of $\ensuremath{\mathcal A}_{\ensuremath{\mathcal E}E_1}$. As an ordered quivery collection is determined, up to reordering, by its heart, we have \[ r^j W(\alpha)(\ensuremath{\mathcal M}M_1)=\ensuremath{\mathcal M}M_1, \] for some $j$. Since the braid group $B_3$ acts freely on the set of ordered quivery subcategories (by \cite[Theorem 5.6]{Bridgeland:TStruct}), we have $r^jW(\alpha) = \mathop{\mathrm{id}}\nolimits_{B_3}$ in $B_3$. Due to the description of $B_3$ in terms of generators and relations, it follows that we have an identity in $L$ of the form \[ W(\alpha) = r^{-j}(h_1R_1^{\pm 1}h_1^{-1})\cdots(h_sR_s^{\pm 1}h_s^{-1}), \] with $R_1, \dots, R_s \in\{r\tau_ir^{-1}\tau_{i+1}^{-1},\tau_i\tau_{i+1}\tau_i\tau_{i+1}^{-1}\tau_i^{-1}\tau_{i+1}^{-1},r^3\}$ and $h_1,\ldots,h_s\in L$ arbitrary elements. By Lemma \ref{lem:SimplConn2}, loops with associated words $\tau_i\tau_{i+1}\tau_i\tau_{i+1}^{-1}\tau_i^{-1}\tau_{i+1}^{-1}$ (or its inverse) can be contracted in $\mathop{\mathrm{Stab}}_a$. This implies that $\alpha$ can be contracted in general, and so $\mathop{\mathrm{Stab}}_a$ is simply-connected. \end{Prf} \begin{Lem}\label{lem:SimplConn2} Let $\alpha$ be a loop with word $W(\alpha) = \tau_i\tau_{i+1}\tau_i\tau_{i+1}^{-1}\tau_i^{-1}\tau_{i+1}^{-1}$. Then $\alpha$ is contractible. \end{Lem} \begin{Prf} We may assume $i=1$. We will say that a loop $\alpha$ ``runs through the regions $U_1, \ldots, U_m$'' for open subsets $U_i \subset \mathop{\mathrm{Stab}}_a$ if there are $0=a_0 < a_1 < \ldots <a_{m} < 1$ with $\alpha([a_{k-1}, a_k)) \subset U_k$ and $\alpha([a_m,1]) \subset U_1$. By assumption, the loop $\alpha$ runs through the regions $\Theta_1, \dots, \Theta_6$ given by $\Theta_k = \Theta_{\ensuremath{\mathcal M}M_k}$ and \begin{align*} \ensuremath{\mathcal M}M_1 &= \{S_0,S_1,S_2\} & \ensuremath{\mathcal M}M_4 &= \{\Gamma,{\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1[1],S_0[2]\} \\ \ensuremath{\mathcal M}M_2 &= \{S_0,{\mathop{\mathrm{ST}}\nolimits}_{S_1}^{-1}S_2,S_1[1]\} & \ensuremath{\mathcal M}M_5 &= \{{\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1,{\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_2,S_0[2]\} \\ \ensuremath{\mathcal M}M_3 &= \{\Gamma,S_0[1],S_1[1]\} & \ensuremath{\mathcal M}M_6 &= \{{\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1,S_0[1],S_2\} \end{align*} for some ordered quivery collection $S_0, S_1, S_2$, where $\Gamma={\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}{\mathop{\mathrm{ST}}\nolimits}_{S_1}^{-1}S_2$. First of all observe that, by Lemma \ref{lem:thetaP2}, \[ \Theta_1\cap\Theta_2\cap\Theta_3\neq\emptyset \] and it is homeomorphic to the locus in $\Theta_1$ given by those stability conditions having phases such that $\phi(S_1)<\phi(S_0)<\phi(S_2)$ and $\phi(S_0)<\phi({\mathop{\mathrm{ST}}\nolimits}_{S_1}^{-1}S_2)$. This implies that we can replace $\alpha$ by a loop, which we will denote again $\alpha$, such that $\alpha$ runs though the regions $\Theta_1,\Theta_3, \Theta_4, \Theta_5,\Theta_6$. Repeating the same argument on $\Theta_4\cap\Theta_5\cap\Theta_6$, we can replace it by a loop that runs through $\Theta_1, \Theta_3, \Theta_4, \Theta_6$. Let $t_1\in(0,1)$ be such that $\alpha([0,t_1))\subset\Theta_1$ and $\alpha(t_1)=(Z_1,\ensuremath{\mathbb{P}}P_1)\in\Theta_3\cap\partial\Theta_1$. By Lemma \ref{lem:firststabilityP2}, we can assume the image of $Z_1$ to be contained in the real line. In such a case, we have $\phi_1(S_0)=\phi_1(S_1)=0$ and $\phi_1(S_2)=1$. At the same time, by definition, we have \begin{equation}\label{eqn:Tianjin1} S_0^{\oplus m}\to{\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1\to S_1 \end{equation} and so ${\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1$ is semistable as well of phase $0$ whose Jordan-H\"older filtration is given by \eqref{eqn:Tianjin1}. By Lemma \ref{lem:semitostable}, $\alpha(t_1)\in\Theta_3\cap\partial\Theta_6\cap\partial\Theta_1$. In particular, $\Theta_3\cap\Theta_6\neq\emptyset$. Let $t_4\in(t_1,1)$ be such that $\alpha(t_4)\in\Theta_1\cap\partial\Theta_6$ and $\alpha((t_4,1])\subset\Theta_1$. By Lemma \ref{lem:thetaP2}, the intersection $\Theta_1\cap\partial\Theta_6$ is given as the region \[ \ensuremath{\mathbb{C}}C_{\ensuremath{\mathcal M}M_1}\cap\left\{(m_0,m_1,m_2,\phi_0,\phi_1,\phi_2)\in\ensuremath{\mathbb{R}}^6\colon \phi_1=\phi_0,\,\phi_0\leq\phi_2\leq\phi_0+1\right\}. \] in particular, it is connected and simply-connected. Since $\alpha(t_1)\in\overline{\Theta}_1\cap\partial\Theta_6$, we can replace $\alpha$ by a homotopic path for which $\alpha(t_1)=\alpha(t_4)\in\Theta_3\cap\partial\Theta_6\cap\partial\Theta_1$. What we proved so far is that our original loop $\alpha$ is homotopic to a loop which can be decomposed as a loop $\alpha' = \alpha([0, t_1] \cup [t_4, 1])$ contained in $\Theta_1\cup\Theta_3$ and another loop $\beta = \alpha([t_1,t_4])$ which runs through $\Theta_3,\Theta_4,\Theta_6$. Now consider just the loop $\beta$. Let $t_2\in(t_1,t_4)$ be such that $\beta(t_2)\in\Theta_6\cap\partial\Theta_4$ and $t_3\in(t_2,t_4)$ such that $\beta(t_3)\in\Theta_4\cap\partial\Theta_3$. Arguing as above, we can replace $\beta$ by a homotopic loop for which $\beta(t_2)=\beta(t_3)\in\Theta_6\cap\partial\Theta_3\cap\partial\Theta_4$. Hence, the loop $\beta$ can be decomposed as a loop $\alpha'' =\beta([t_2, t_3])$ contained in $\Theta_4\cup\Theta_6$ and another loop $\alpha''' = \beta([t_1, t_2] \cup [t_3, t_4])$ contained in $\Theta_3\cup\Theta_6$. Summing up, to prove that $\alpha$ is contractible, we only need to prove that all regions $\Theta_1\cup\Theta_3$, $\Theta_4\cup\Theta_6$, and $\Theta_3\cup\Theta_6$ are simply-connected. Again, by Lemma \ref{lem:thetaP2} and the Seifert--Van Kampen Theorem, it is sufficient to show that the intersections $\Theta_1\cap\Theta_3$, $\Theta_4\cap\Theta_6$, and $\Theta_3\cap\Theta_6$ are connected. For $\Theta_1\cap\Theta_3$, observe that it corresponds to the locus in $\ensuremath{\mathbb{C}}C_{\ensuremath{\mathcal M}M_1}$ in which $\Gamma$ is stable and $\phi(S_0),\phi(S_1)<\phi(\Gamma)$. This can be proved to be connected by proceeding in a similar way as in the last part of the proof of Lemma \ref{lem:StabilitySkyscrapers}: in this situation we use the $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$-action to fix the values of $Z(\Gamma)$ and $Z(S_1)$. The region $\Theta_1\cap\Theta_3$ is then, up to the action of $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, cut out by half-planes, and so it is connected. The intersection $\Theta_4\cap\Theta_6$ is analogous. Finally, $\Theta_3\cap\Theta_6$ corresponds to the locus in $\ensuremath{\mathbb{C}}C_{\ensuremath{\mathcal M}M_3}$ in which $S_2$ is stable, $\phi(S_0[1])<\phi(S_1[1])$, and $\phi(S_2)<\phi({\mathop{\mathrm{ST}}\nolimits}_{S_0}^{-1}S_1)$, which is again connected by a similar argument. This completes the proof of the lemma. \end{Prf} \section{Group of autoequivalences}\label{sec:autoequivalences} Let $\mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ be the group of autoequivalences of $\ensuremath{\mathbb{D}}D_0$ up to isomorphism of functors, and let $\mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0$ be the subgroup of $\mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ preserving the connected component $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. The numerical $K$-group of $\ensuremath{\mathbb{D}}D_0$ is $K(\ensuremath{\mathbb{D}}D_0)/K^\perp = K(\ensuremath{\mathbb{D}}D_0)/ \ensuremath{\mathbb{Z}}\cdot[k(x)] \cong \ensuremath{\mathbb{Z}}^{\oplus 2}$. Since the Euler form is skew-symmetric, there is a natural map \begin{equation} \label{eq:AuttoSL2Z} \mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0 \to \mathop{\mathrm{SL}}(2, \ensuremath{\mathbb{Z}}) \end{equation} given by sending an autoequivalence to its induced action on the numerical $K$-group. Crucial for us will be the congruence subgroup $\Gamma_1(3) \subset \mathop{\mathrm{SL}}(2, \ensuremath{\mathbb{Z}})$ of matrices \[ \begin{pmatrix} a & b \\ c & d \end{pmatrix} \equiv \begin{pmatrix} 1 & b \\ 0 & 1 \end{pmatrix} \pmod{3} \] It has generators $T = \begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix}$ and $S = \begin{pmatrix} 1 & 0 \\ -3 & 1 \end{pmatrix}$ with a single relation given by $(ST)^3 = 1$. As in the introduction, we denote by $\hat X$ the formal completion of $X$ along $\ensuremath{\mathbb{P}}^2$. \begin{Thm}\label{thm:autoequiv-group} \[ \mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0 \cong \ensuremath{\mathbb{Z}} \times \Gamma_1(3) \times \mathop{\mathrm{Aut}}\nolimits(\hat X). \] \end{Thm} We start by identifying the subgroup $\Gamma_1(3) \subset \mathop{\mathrm{Aut}}\nolimits^\dag(\ensuremath{\mathbb{D}}D_0)$: As observed in \cite[Section 7.3.5]{Aspinwall:Dbranes-CY}, there is a relation \begin{equation}\label{eq:Z3Z-relation} \bigl({\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}} \circ (\underline{\hphantom{M}} \otimes \pi^*\ensuremath{\mathcal O}(1))\bigr)^3 \cong \mathop{\mathrm{Id}}\nolimits. \end{equation} Due to the description of $\Gamma_1(3)$ by generators and relations, this induces a map $\Gamma_1(3) \to \mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0$. If we choose $([\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}], [\ensuremath{\mathcal O}_l])$ as a basis of $K(\ensuremath{\mathbb{D}}D_0)/K^\perp$, then the composition with \eqref{eq:AuttoSL2Z} maps the generators to $S$ and $T$, respectively; hence the composition $\Gamma_1(3) \to \mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0 \to \mathop{\mathrm{SL}}_2(\ensuremath{\mathbb{Z}})$ is the standard inclusion of $\Gamma_1(3)$ as the congruence subgroup given above. In particular, the action on $\ensuremath{\mathbb{D}}D_0$ is faithful, and we obtain: \begin{Prop} \label{prop:gamma1} The subgroup of $\mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0$ generated by ${\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}}$ and $\underline{\hphantom{M}} \otimes \pi^*\ensuremath{\mathcal O}(1)$ is isomorphic to $\Gamma_1(3)$. \end{Prop} Alternatively, one can prove that the composition ${\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}} \circ (\underline{\hphantom{M}} \otimes \pi^*\ensuremath{\mathcal O}(1))$ is isomorphic to the generator of the natural $\ensuremath{\mathbb{Z}}_3$-action on $\ensuremath{\mathbb{D}}b_0([\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3])$ via the derived McKay correspondence of \cite{Mukai-McKay} (where $\ensuremath{\mathbb{D}}b_0([\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3])$ denotes the bounded derived category of $\ensuremath{\mathbb{Z}}_3$-equivariant coherent sheaves on $\ensuremath{\mathbb{C}}^3$ supported at the origin). \begin{Lem}\label{lem:ST-in-gamma13} For any exceptional vector bundle $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$, its associated spherical twist ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal E}$ is contained in the subgroup $\Gamma_1(3)$ generated by ${\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}}$ and $\underline{\hphantom{M}} \otimes \pi^* \ensuremath{\mathcal O}(1)$. \end{Lem} \begin{Prf} Since, for all autoequivalences $\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D_0)$ and for all spherical objects $F\in\ensuremath{\mathbb{D}}D_0$, \[ \ensuremath{\mathbb{P}}hi\circ{\mathop{\mathrm{ST}}\nolimits}_{F}\circ\ensuremath{\mathbb{P}}hi^{-1}\cong{\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathbb{P}}hi(F)}, \] it is sufficient to show that there exists $g \in \Gamma_1(3)$ with $i_*\ensuremath{\mathcal E} \cong g\left(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}\right)$. By \cite{Goro-Ruda:Exceptional}, $\ensuremath{\mathcal E}$ is contained in a mutation of the exceptional collection $\ensuremath{\mathcal E}E_1$. Remark \ref{rmk:GoroRuda} completes the proof. \end{Prf} Restricting to the subgroup $\mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0$ makes it possible to control autoequivalences via the following proposition: \begin{Prop} \label{prop:geom-autos} Let $\ensuremath{\mathbb{P}}hi \in \mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ be an autoequivalence such that there exist two geometric stability conditions $\sigma, \sigma'$ with $\ensuremath{\mathbb{P}}hi(\sigma) = \sigma'$. Then $\ensuremath{\mathbb{P}}hi$ is isomorphic to the composition of an automorphism of $\hat X$ with $\underline{\hphantom{M}} \otimes \ensuremath{\mathcal O}(n)[k]$. \end{Prop} \begin{Prf} From the description of geometric stability conditions it follows that $\ensuremath{\mathbb{P}}hi$ sends skyscraper sheaves $k(x)$, $x \in \ensuremath{\mathbb{P}}^2$, to shifts of skyscraper sheaves. More precisely, after replacing $\ensuremath{\mathbb{P}}hi$ by $\ensuremath{\mathbb{P}}hi \circ \underline{\hphantom{M}}[k]$ for some $k \in \ensuremath{\mathbb{Z}}$, we may assume that for every $x \in \ensuremath{\mathbb{P}}^2$ there is $x' \in \ensuremath{\mathbb{P}}^2$ with $\ensuremath{\mathbb{P}}hi(k(x)) \cong k(x')$. The proposition follows now directly from the recent results in \cite{Lunts-Orlov:uniqueness, alberto-paolo:FM-supported}. Since the argument is quite standard, we give only a brief sketch. By \cite[Theorem 1.1]{alberto-paolo:FM-supported}, every autoequivalence of $\ensuremath{\mathbb{D}}D_0$ is of \emph{Fourier-Mukai type}, i.e., there exists an object $\ensuremath{\mathcal U}\in\ensuremath{\mathbb{D}}b(\ensuremath{\mathbb{Q}}coh_{\ensuremath{\mathbb{P}}^2\times\ensuremath{\mathbb{P}}^2}(X\times X))$ such that $\ensuremath{\mathbb{P}}hi\cong\ensuremath{\mathbb{P}}hi_\ensuremath{\mathcal U}:=(p_1)_*\left(\ensuremath{\mathcal U}\otimes p_2^*(\underline{\hphantom{M}})\right)$, where $\ensuremath{\mathbb{Q}}coh_{\ensuremath{\mathbb{P}}^2\times\ensuremath{\mathbb{P}}^2}(X\times X)$ is the category of quasi-coherent sheaves on $X\times X$ supported on $\ensuremath{\mathbb{P}}^2\times\ensuremath{\mathbb{P}}^2$, all functors are supposed to be derived, and $p_1,p_2$ denote the two projections. Let $X_n$ be the $n$-th infinitesimal neighborhood of $\ensuremath{\mathbb{P}}^2$ inside $X$. By using \cite[Lemma 4.3]{Bridgeland:EqFMT}, we can show that $\ensuremath{\mathcal U}$ is actually a sheaf and it maps $\ensuremath{\mathbb{C}}oh_0$ (resp.\ $\ensuremath{\mathbb{C}}oh X_n$) to $\ensuremath{\mathbb{C}}oh_0$ (resp.\ $\ensuremath{\mathbb{C}}oh X_n$). By tensoring with a line bundle on $X$, we can assume that $\ensuremath{\mathbb{P}}hi(\ensuremath{\mathcal O}_{X_n})\cong\ensuremath{\mathcal O}_{X_n}$, for all $n$. Hence, arguing as in \cite[Corollary 5.23]{Huybrechts:FM}), since $\ensuremath{\mathbb{P}}hi_{\ensuremath{\mathcal U}}(k(x))\cong k(x')$, there exists a family of compatible automorphisms $u_n\colon X_n\to X_n$, $n\in\ensuremath{\mathbb{Z}}_{\geq 0}$, which induces an automorphism $u\colon \widehat{X}\to\widehat{X}$ and such that $\ensuremath{\mathbb{P}}hi_{\ensuremath{\mathcal U}}\cong u^*$, as wanted. \end{Prf} \begin{Prf} (Theorem \ref{thm:autoequiv-group}) Given $\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0$, pick an arbitrary geometric stability condition $\sigma \in U$. By Corollary \ref{cor:ConnectedComponent}, there exists a stability condition $\sigma' \in \overline{U}$ and a composition $\ensuremath{\mathbb{P}}si$ of spherical twists associated to exceptional vector bundles with $\ensuremath{\mathbb{P}}si \circ \ensuremath{\mathbb{P}}hi(\sigma) = \sigma'$. The stability condition $\ensuremath{\mathbb{P}}si \circ \ensuremath{\mathbb{P}}hi(\sigma)$ has no semistable objects of class $[k(x)]$; thus actually $\sigma' \in U$. By Proposition \ref{prop:geom-autos} and Lemma \ref{lem:ST-in-gamma13}, $\ensuremath{\mathbb{P}}hi$ is contained in the group generated by $\Gamma_1(3)$, shifts, and $\mathop{\mathrm{Aut}}\nolimits(\hat X)$. As the actions by $\ensuremath{\mathbb{Z}}$, by $\Gamma_1(3)$, and by $\mathop{\mathrm{Aut}}\nolimits(\hat X)$ commute, we get a surjective map \[ \ensuremath{\mathbb{Z}} \times \Gamma_1(3) \times \mathop{\mathrm{Aut}}\nolimits(\hat X) \to \mathop{\mathrm{Aut}}\nolimits^\dag \ensuremath{\mathbb{D}}D_0.\] The restriction $\mathop{\mathrm{Aut}}\nolimits(\hat X) \to \mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D_0)$ is injective by \cite[Theorem 1.1]{alberto-paolo:FM-supported}. Since $\ensuremath{\mathbb{Z}} \times \mathop{\mathrm{Aut}}\nolimits(\hat X)$ acts by $\pm 1$ on $K/K^\perp$, its intersection with $\Gamma_1(3)$ is trivial, and the above map is an isomorphism. \end{Prf} \section{$\ensuremath{\mathbb{P}}i$-stability and Global Mirror Symmetry}\label{sec:MS} In this section, we outline how our results fit into expectations coming from mirror symmetry for the local $\ensuremath{\mathbb{P}}^2$. Mirror symmetry for the local $\ensuremath{\mathbb{P}}^2$ has been discussed in many places of the mathematical physics literature, see e.g. \cite{AGM:measuring, Diaconescu-Goms:fractional}; our presentation follows \cite{ABK:topological_strings} and \cite{Aspinwall:Dbranes-CY} most closely. \subsection{Monodromy and autoequivalences} The family of mirror partners to the local $\ensuremath{\mathbb{P}}^2$ can be constructed explicitly from the following family of genus one curves: The equation \[ X_0^3 + X_1^3 + X_2^3 - 3 \psi X_0 X_1 X_2 = 0 \] cuts out a surface $S \subset \ensuremath{\mathbb{P}}^2 \times \ensuremath{\mathbb{C}}$. At $\psi^3 = 1$ and $\psi = \mathop{\mathrm{inf}}\nolimitsty$, the fibers are singular; all other fibers of $S$ over $\ensuremath{\mathbb{C}}$ are smooth genus one curves. There is a $\mu_3$-action on $S$ given by $X_0 \mapsto \omega^{-1} X_0$ and $\psi \mapsto \omega \psi$, and leaving the other variables invariant, where $\omega=\exp(2\pi i/3)$. Let $\ensuremath{\mathcal Y}$ be the quotient \[ \bigl(S \setminus \{ \psi^3=1 \}\bigr)/\mu_3 \] of the union of the smooth fibers by the group action; then $\ensuremath{\mathcal Y}$ is a family of smooth elliptic curves over $(\ensuremath{\mathbb{C}} - \mu_3)/\mu_3$. In fact, the base is the moduli space $\ensuremath{{\mathcal M}_{\Gamma_1(3)}} \cong (\ensuremath{\mathbb{C}} - \mu_3)/\mu_3$ of elliptic curves with $\Gamma_1(3)$-level structure. We can also think of $\ensuremath{{\mathcal M}_{\Gamma_1(3)}}$ as $\ensuremath{\mathbb{P}}^1$ with the points $z = -\frac 1{27}$ and $z= 0$ removed, and a stacky $\ensuremath{\mathbb{Z}}_3$ point at $z= \mathop{\mathrm{inf}}\nolimitsty$ (where we set $z = - \frac 1{(3\psi)^3}$). The fundamental group of $\ensuremath{{\mathcal M}_{\Gamma_1(3)}}$ is $\Gamma_1(3)$. It is generated by the loops $\gamma_{-\frac 1 {27}}, \gamma_0$ around $-\frac 1{27}$ and $0$; as their composition is a loop around $z = \mathop{\mathrm{inf}}\nolimitsty$, they satisfy $\bigl(\gamma_{-\frac 1{27}} \gamma_0 \bigr)^3 = 1$. Given any $z \in \ensuremath{{\mathcal M}_{\Gamma_1(3)}}$, one can determine a basis of first homology $H_1(\ensuremath{\mathcal Y}_z)$ of the fibers by choosing a path from $z$ to $-\frac 1{27}$ and from $z$ to $\mathop{\mathrm{inf}}\nolimitsty$; the basis is then given by the two corresponding vanishing cycles $\bar A_z$ and $\bar B_z$. This basis yields an identification of $\pi_1(\ensuremath{{\mathcal M}_{\Gamma_1(3)}})$ as a subgroup of $SL_2(\ensuremath{\mathbb{Z}})$ by its monodromy action on the first homology $H_1$ of the fibers of $\ensuremath{\mathcal Y}$. Explicitly, we get \[ \gamma_{-\frac 1{27}} \equiv \begin{pmatrix} 1 & 0 \\ -3 & 1 \end{pmatrix}, \quad \gamma_0 \equiv \begin{pmatrix} 1 & 1 \\ 0 & 1 \end{pmatrix}. \] A well-known principle of mirror symmetry states that monodromies in the mirror family $\ensuremath{\mathcal Y}$ lift to autoequivalences in the derived category $\ensuremath{\mathbb{D}}D_0$: it is implied by homological mirror symmetry and has been applied and verified e.g. in \cite{Seidel-Thomas:braid, Horja:autoequivalences}. Theorem \ref{thm:autoequiv-group} gives another incarnation of this principle, as the action of $\Gamma_1(3) \subset \mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ on $K(\ensuremath{\mathbb{D}}D_0)/K^{\perp} \cong \ensuremath{\mathbb{Z}}^{\oplus 2}$ matches the action of $\Gamma_1(3) \cong \pi_1(\ensuremath{{\mathcal M}_{\Gamma_1(3)}})$ if we identify ${\mathop{\mathrm{ST}}\nolimits}_\ensuremath{\mathcal O}$ with $\gamma_{-\frac 1{27}}$ and $\underline{\hphantom{M}} \otimes \ensuremath{\mathcal O}(1)$ with $\gamma_0$. \subsection{Period integrals and $\mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$} However, in the spirit of \cite{Bridgeland:spaces}, there is also more geometric connection between the mirror moduli space $\ensuremath{{\mathcal M}_{\Gamma_1(3)}}$ and the space of stability conditions. The periods in this mirror construction are given by integrals over the meromorphic differential form $\lambda = \ln \frac{X_2}{X_3} \frac{dX_1}{X_1}$. More precisely, let $\ensuremath{\mathcal Y}^0 \subset \ensuremath{\mathcal Y}$ be the complement of the set of poles of $\lambda$, and $\tilde \ensuremath{\mathcal Y}^0$ be the cover on which $\ln \frac{X_2}{X_3} $ is well-defined. Following \cite{ABK:topological_strings}, one can choose a family of cycles $A_z, B_z \in H_1(\tilde \ensuremath{\mathcal Y}^0_z)$ that project to $\bar A_z, \bar B_z \in H_1(\ensuremath{\mathcal Y}_z)$, and a third family of cycles $C_z \in H_1(\tilde \ensuremath{\mathcal Y}^0_z)$ and define the period integrals as: \[ \ensuremath{\mathbb{P}}i(z) = \begin{pmatrix} \int_B \lambda \\ \int_A \lambda \\ \int_C \lambda \end{pmatrix} \] The authors show that if $A, B, C$ are chosen appropriately, then the action of $\pi_1(\ensuremath{{\mathcal M}_{\Gamma_1(3)}}) \cong \Gamma_1(3)$ on these 3 periods matches the action of $\Gamma_1(3) \subset \mathop{\mathrm{Aut}}\nolimits \ensuremath{\mathbb{D}}D_0$ on $K(\ensuremath{\mathbb{D}}D_0) \cong \ensuremath{\mathbb{Z}}^{\oplus 3}$. We will now ignore the construction of period integrals and instead just consider their Picard-Fuchs equation; with $\theta_z := z \frac{d}{dz}$ it is given by \begin{equation} \label{eq:Picard-Fuchs} \bigl( \theta_z^3 + 3z \theta_z (3\theta_z + 1)(3\theta_z + 2) \bigr) \ensuremath{\mathbb{P}}i = 0, \end{equation} and has singularities at $z = 0$, $z = - \frac 1{27}$ and $z = \mathop{\mathrm{inf}}\nolimitsty$. Using an \emph{Ansatz} and solving for the coefficients of the power series, one can find expansions of three linearly independent solutions around $z = 0$ and $\psi = 0$, respectively (see also \cite[Section 6]{ABK:topological_strings} and \cite[Section 7.3]{Aspinwall:Dbranes-CY}). Around $z = 0$, we make the standard branch choice of $\ln z$ for $z \in \ensuremath{\mathbb{C}} \setminus \ensuremath{\mathbb{R}}_{\le 0}$, and get as expansions (compare with \cite[Section 6.2]{ABK:topological_strings}) \begin{align*} \omega_0(z) &= 1 \\ \omega_1(z) &= \frac 1{2\pi i} \left(\ln z +3 \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} (-z)^n \right) \\ \omega_2(z) &= \frac 1{(2 \pi i)^2} \left( (\ln z)^2 + 6 \ln z \cdot \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} (-z)^n + \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty l_n z^n \right) \end{align*} where the differential equation defines the $l_n$ recursively: \begin{multline*} l_n = -\frac 1{n^3} \left( (3n-1)(3n-2)(3n-3) l_{n-1} + 18 \cdot (-1)^n \frac{(3n-1)!}{n!^3} \cdot n^2 \right. \\ \left. - 18 \cdot (-1)^n \frac{(3n-4)!}{(n-1)!^3} \left(27 n^2 - 36 n + 11\right) \right) \end{multline*} Similarly, the power series expansion of a basis of solutions nearby $z = \mathop{\mathrm{inf}}\nolimitsty$ are given by $\varpi_0(\psi) = 1$ and: \begin{align*} \varpi_1(\psi) &= \frac 1{2\pi i} \sum_{n=1 \atop 3\nmid n}^\mathop{\mathrm{inf}}\nolimitsty \frac{\Gamma\left(\frac n3 \right)} {\Gamma(n+1) \Gamma\left(1 - \frac n3 \right)^2} (3 \psi)^n \\ \varpi_2(\psi) &= \frac 1{2\pi i} \sum_{n=1 \atop 3\nmid n}^\mathop{\mathrm{inf}}\nolimitsty \frac{\Gamma\left(\frac n3 \right)} {\Gamma(n+1) \Gamma\left(1 - \frac n3 \right)^2} (3 e^{\frac{2\pi i}3} \psi)^n \end{align*} Here we use $\psi = - \frac 1{3\sqrt[3]{z}}$ with the branch choice $\frac{2\pi}3 < \arg \psi < \frac{4\pi}3$ for $\abs{\arg z} < \pi$. Following Aspinwall, we define the solutions $a(z), b(z)$ of \eqref{eq:Picard-Fuchs} for $z \in \ensuremath{\mathbb{C}}^* \setminus \ensuremath{\mathbb{R}}_{<0}$ by setting \begin{equation} \label{eq:ab-exp0} a(z) = \omega_1(z) - \frac 12, \quad b(z) = - \frac 12 \omega_2(z) + \frac 12 \omega_1(z) - \frac 14 \end{equation} for $\abs{z} < \frac 1{27}$ and analytic continuation. This analytic continuation is computed explicitly in \cite[Eqn. (286)]{Aspinwall:Dbranes-CY} and \cite[Eqn. (6.22)]{ABK:topological_strings}, and gives the following expansion of $a(z)$ and $b(z)$ around $z = \mathop{\mathrm{inf}}\nolimitsty$: \begin{equation} \label{eq:ab-expinf} a(z) = \varpi_1(z)-\frac{1}{2}, \quad b(z) = \frac{1}{3}\left(\varpi_1(z)-\varpi_2(z)-1\right) \end{equation} \begin{Thm}\label{thm:PowerSeries} Fix a universal cover $\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}} \to \ensuremath{{\mathcal M}_{\Gamma_1(3)}}$ together with its $\Gamma_1(3)$-action of deck transformations and choose a fundamental domain $D\subset\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$ that projects isomorphically onto $\ensuremath{\mathbb{C}}^* \setminus \ensuremath{\mathbb{R}}_{<0}$. Then there is an embedding $I \colon \ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}} \to \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$ defined by the following properties: \begin{enumerate} \item For $(Z(z), \ensuremath{\mathbb{P}}P(z)) = I(z)$, the central charge is given by \[ Z(E)(z) = -c(E) + a(z) \cdot d(E) + b(z) \cdot r(E), \] for all $E\in K(\ensuremath{\mathbb{D}}D_0)$ (where we identify $a(z), b(z)$ with their analytic continuations from $D$ to $\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$). \item For $z \in D$, the stability condition $I(z)$ is geometric with $k(x)$ having phase 1. \end{enumerate} \end{Thm} On the boundary of $I(D)$ we have several interesting special points: the point $z=0$ corresponds to the \emph{large volume limit point}, where $\Im(a)\to+\mathop{\mathrm{inf}}\nolimitsty$ and the central charge is approximately given by $Z(E) = - \int_{\ensuremath{\mathbb{P}}^2} \mathop{\mathrm{ch}}\nolimits(E) e^{-ah}$, where $h$ is the class of a line in $\ensuremath{\mathbb{P}}^2$; the limit stability condition as $z \to 0$ can be described as a polynomial stability condition of \cite{large-volume}. The point $z=\mathop{\mathrm{inf}}\nolimitsty$ is the \emph{orbifold point}: the heart of the bounded $t$-structure is $\ensuremath{\mathcal A}_1 \cong \ensuremath{\mathbb{C}}oh_0 [\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3]$ and the three simple objects of $\ensuremath{\mathcal A}_1$ have the same central charge equal to $-1/3$; this point is fixed under the $\ensuremath{\mathbb{Z}}_3$-action on $\ensuremath{\mathbb{D}}D_0$ given by relation \eqref{eq:Z3Z-relation} (i.e., by tensor product in $\ensuremath{\mathbb{C}}oh_0 [\ensuremath{\mathbb{C}}^3/\ensuremath{\mathbb{Z}}_3]$ with a non-trivial one-dimensional $\ensuremath{\mathbb{Z}}_3$-representation). Finally, when $\psi=\omega$ (resp.\ $\psi=\omega^2$) and so $z=-\frac 1{27}$ (these are called \emph{conifold points}), we have a singularity: indeed, $Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2})=0$, resp.\ $Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1))=0$, depending on whether we approach $-\frac 1{27}$ from above or below. The proof of the theorem is based on the following two observations: \begin{enumerate} \item \label{obs:inequality} For all $z \in \ensuremath{\mathbb{C}} \setminus \ensuremath{\mathbb{R}}_{\le 0}$, the complex numbers $a(z), b(z)$ satisfy the inequalities of Definition \ref{def:setG}. \item \label{obs:monodromy} The monodromy action of $\Gamma_1(3)$ on the solutions $a(z),b(z)$ (computed, for example, in \cite{Aspinwall:Dbranes-CY}) is compatible with the $\Gamma_1(3)$-action on $\mathop{\mathrm{Stab}}^\dag$. \end{enumerate} We first verified Observation (\ref{obs:inequality}) by explicit computations using the computer algebra package SAGE \cite{sage}.\footnote{The program used to test the inequalities is available for download from the authors' homepages. It implements the power series expansion around $z=0$ and $\psi=0$ and tests the inequalities for random complex numbers in their respective convergence domains.} A complete argument is sketched in Appendix \ref{app:ineq}. To prove Theorem \ref{thm:PowerSeries} we only need to show Observation \eqref{obs:monodromy}: \begin{Prf} (Theorem \ref{thm:PowerSeries}) By Observation \eqref{obs:inequality} and Theorem \ref{thm:geom-stability}, we obtain an embedding $I\colon D \ensuremath{\hookrightarrow} U \subset \mathop{\mathrm{Stab}}^\dag(\ensuremath{\mathbb{D}}D_0)$. By Bridgeland's deformation result, the extension of $I$ to $\ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$ is unique, if it exists. Now, we can extend $I$ to the $\Gamma_1(3)$-translates of $D$ uniquely by requiring it to be $\Gamma_1(3)$-equivariant. Hence, it remains to check that this extension of $I$ glues along the translates of $\partial D \subset \ensuremath{\widetilde{\mathcal M}_{\Gamma_1(3)}}$, and is compatible with the requirement that $a(z), b(z)$ are solutions to the Picard-Fuchs equation. Let $\gamma_0$ be the loop going in positive direction around the origin $z = 0$, and $\gamma_\mathop{\mathrm{inf}}\nolimitsty$ the loop around $z= \mathop{\mathrm{inf}}\nolimitsty$ acting on $\psi$ by $\psi \mapsto e^{\frac{2\pi i}3} \psi$. Then, by the $\Gamma_1(3)$-equivariance, it is in fact enough to check the glueing along $\overline{D} \cap \gamma_0(\overline{D})$ lying above $(-\frac 1{27}, 0) \subset \ensuremath{\mathbb{C}}$ in the $z$-plane, and along $\overline{D} \cap \gamma_\mathop{\mathrm{inf}}\nolimitsty(\overline{D})$, lying above the line segments $(0,1) \cdot e^{\frac 23 \pi i}$ and $(0,1) \cdot e^{\frac 43 \pi i}$ in the $\psi$-plane. The action of $\gamma_0$ on the solutions is given by \[ \omega_1(z) \mapsto \omega_1(z) + 1, \quad \omega_2(z) \mapsto \omega_2(z) + 2 \omega_1(z) + 1. \] The action of $\underline{\hphantom{M}} \otimes \ensuremath{\mathcal O}(1)$ on the set of geometric stability conditions $\sigma_{a, b}$ of Theorem \ref{thm:geom-stability} is given by $a \mapsto a + 1$ and $b \mapsto b - a - \frac 12$. Using the expansions in equation \eqref{eq:ab-exp0}, we see that the induced action of $\gamma_0$ on $a(z)$ and $b(z)$ matches exactly; hence the definition of $a(z), b(z)$ on $\gamma_0(D)$ by analytic continuation agrees with the implicit definition given by the requirement that $I$ is $\gamma_0$-equivariant; on the other hand, when $\arg(z) = \pi$ we have $B = 0$ and $b(-\frac 1{27}) = 0$ (see Appendix \ref{app:ineq}), and it follows that $a(z), b(z)$ still satisfy the inequalities of Definition \ref{def:setG} for $z \in \overline{D} \cap \gamma_0(\overline{D})$, i.e., for $z = (-\frac 1{27}, 0)$ with $\arg(z) = \pi$. Then Theorem \ref{thm:geom-stability} implies that $I$ glues along this boundary component of $D$ within the geometric chamber. Similarly, the action of $\gamma_\mathop{\mathrm{inf}}\nolimitsty$ on the space of solutions is computed in terms of the expansions around $z = \mathop{\mathrm{inf}}\nolimitsty$ as \[ \varpi_1(z) \mapsto \varpi_2(z), \quad \varpi_2(z) \mapsto - \varpi_1(z) - \varpi_2(z). \] The central charges of the three simple objects in the quiver category $\ensuremath{\mathcal A}_1$ are given by \begin{align*} Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2})(z) &= b(z) = \tfrac{1}{3}\varpi_1(z) - \tfrac 13 \varpi_2(z) - \tfrac 13 \\ Z(\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1])(z) &= -2 b(z) + a(z) - \tfrac 12 = \tfrac{1}{3}\varpi_1(z) + \tfrac 23 \varpi_2(z) - \tfrac 13 \\ Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1)[2])(z) &= b(z) - a(z) - \tfrac 12 = -\tfrac{2}{3}\varpi_1(z) - \tfrac 13 \varpi_2(z) - \tfrac 13 \end{align*} The autoequivalence $\bigl({\mathop{\mathrm{ST}}\nolimits}_{\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}} \circ (\underline{\hphantom{M}} \otimes \pi^*\ensuremath{\mathcal O}(1))\bigr)^{-1}$ permutes these 3 objects and preserves the heart of the t-structure $\ensuremath{\mathcal A}_1$; hence it is easy to see that its action on the central charge matches the monodromy $\gamma_\mathop{\mathrm{inf}}\nolimitsty$. \end{Prf} \appendix \section{Bounds on stable Chern classes after Dr\'ezet-Le Potier}\label{app:DP} We give a brief review and a reformulation of the main result of \cite{Drezet-LePotier}. We recall that for a torsion-free sheaf $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$, its \emph{slope} is defined by $\mu(\ensuremath{\mathcal F}) = \frac{d(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})}$, giving the following notion of stability: \begin{Def} \label{def:stablesheaf} A torsion-free sheaf $\ensuremath{\mathcal F}$ on $\ensuremath{\mathbb{P}}^2$ is called \emph{slope-stable} if the inequality $\mu(\ensuremath{\mathcal F}') < \mu(\ensuremath{\mathcal F})$ holds for all saturated subsheaves $\ensuremath{\mathcal F}' \subset \ensuremath{\mathcal F}$. \end{Def} The \emph{discriminant} of $\ensuremath{\mathcal F}$ is defined by $\ensuremath{\mathbb{D}}elta(\ensuremath{\mathcal F}) = \frac{d(\ensuremath{\mathcal F})^2}{2r(\ensuremath{\mathcal F})^2} - \frac{c(\ensuremath{\mathcal F})}{r(\ensuremath{\mathcal F})}$. Let $\ensuremath{\mathcal A}A$ be the set of all $\alpha \in \ensuremath{\mathbb{Q}}$ such that there exists an exceptional vector bundle on $\ensuremath{\mathbb{P}}^2$ with slope $\alpha$. For any $\alpha \in \ensuremath{\mathcal A}A$, we call its rank $r_\alpha$ the smallest integer $r > 0$ such that $r\alpha \in \ensuremath{\mathbb{Z}}$. We call $\ensuremath{\mathbb{D}}elta_\alpha := \frac 12 \left(1 - \frac 1{r_\alpha^2}\right)$ its discriminant. It follows from Riemann-Roch that the rank and discriminant of an exceptional vector bundle with slope $\alpha$ (if it exists) are uniquely determined by these formulas. Similarly, any non-exceptional stable sheaf satisfies $\ensuremath{\mathbb{D}}elta \ge \frac 12$. For two rational numbers with $3 + \alpha + \beta \neq 0$, Dr\'ezet and Le Potier define the operation \[ \alpha . \beta := \frac{\alpha + \beta}2 + \frac{\ensuremath{\mathbb{D}}elta_\beta - \ensuremath{\mathbb{D}}elta_\alpha}{3 + \alpha - \beta} \] Let $\ensuremath{\mathbb{D}}DD$ be the set of rational numbers of the form $\frac{p}{2^q}$ for $p \in \ensuremath{\mathbb{Z}}, q \in \ensuremath{\mathbb{Z}}_{\ge 0}$. One defines a function $\epsilon \colon \ensuremath{\mathbb{D}}DD \to \ensuremath{\mathbb{Q}}$ inductively by $\epsilon(n) := n$ for $n \in \ensuremath{\mathbb{Z}}$ and \[ \epsilon\left(\frac{2p+1}{2^{q+1}}\right) := \epsilon\left(\frac p{2^q}\right) . \epsilon\left(\frac{p+1}{2^q}\right) \] \begin{Thm}[{\cite[Th\'eor\`eme A and chapitre 5]{Drezet-LePotier}}] \label{thm:DP-A} The set $\ensuremath{\mathcal A}A$ of exceptional slopes is equal to the image $\epsilon(\ensuremath{\mathbb{D}}DD)$, and for each slope $\alpha \in \ensuremath{\mathcal A}A$ the exceptional vector bundle of slope $\alpha$ is unique. \end{Thm} Now define \begin{align*} P(X) & := 1 + \frac 32 X + \frac 12 X^2 \\ p(x) & := \begin{cases} P(-\abs{x}) & \abs{x} < 3 \\ 0 & \text{otherwise} \end{cases} \\ \intertext{and, for any $\alpha \in \ensuremath{\mathcal A}A$,} p_\alpha(x) &:= p(x - \alpha) - \ensuremath{\mathbb{D}}elta_\alpha. \end{align*} If $\alpha, \beta$ are of the form given in Theorem \ref{thm:DP-A}, then $p_\alpha$ and $p_\beta$ are monotone decreasing and increasing, respectively; they intersect in the point $(\alpha . \beta, \ensuremath{\mathbb{D}}elta_{\alpha . \beta})$. \begin{Thm}[{\cite{Drezet-LePotier}, \cite[Theorem 16.2.1]{LePotier}}] \label{thm:DP-C} Given an integer $r > 0$ and rationals $\mu, \ensuremath{\mathbb{D}}elta \in \ensuremath{\mathbb{Q}}$, there exists a slope-stable sheaf $\ensuremath{\mathcal E}$ on $\ensuremath{\mathbb{P}}^2$ with rank $r$, slope $\mu$, and discriminant $\ensuremath{\mathbb{D}}elta$ if and only if \begin{enumerate} \item \label{divisibility} $r\mu \in \ensuremath{\mathbb{Z}}$ and $r(P(\mu) - \ensuremath{\mathbb{D}}elta) \in \ensuremath{\mathbb{Z}}$, and \item For every $\alpha \in \ensuremath{\mathcal A}A$ with $r_\alpha < r$ and $\abs{\alpha - \mu} <3$, we have $\ensuremath{\mathbb{D}}elta \ge p_\alpha(\mu)$. \end{enumerate} \end{Thm} The only change compared to \cite[Theorem 16.2.1]{LePotier} is that we replaced [Gieseker-]stable with slope-stable, which is justified by \cite[Th\'eor\`eme (4.11)]{Drezet-LePotier}. This leads us to define (cf. \cite[Sect.\ 16.4]{LePotier}) $\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP} \colon \ensuremath{\mathbb{R}} \to [1/2,1]$ as \[ \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}:= \sup \stv{p_\alpha}{\alpha \in \ensuremath{\mathcal A}A}. \] The necessary and sufficient condition for the existence of non-exceptional slope-stable sheaves can then be written as \[ \ensuremath{\mathbb{D}}elta\geq\delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu). \] As discussed in \cite[Sect.\ 16.4]{LePotier}, this is equivalent to the formulation in Thereom \ref{thm:DP-C} for purely arithmetic reasons. The first part of Theorem \ref{thm:DP} now follows immediately. For the last assertion, let $(\mu_n, \ensuremath{\mathbb{D}}elta_n)$ be a sequence of distinct points in $S_E$ that converges in $\ensuremath{\mathbb{R}}^2$ to $(\mu, \ensuremath{\mathbb{D}}elta)$. For every $\alpha \in \ensuremath{\mathcal A}A$, we have $\ensuremath{\mathbb{D}}elta_n \ge p_\alpha(\mu_n)$ for all $n \gg 0$ (in fact, this holds whenever $\alpha \neq \mu_n$). By continuity, $\ensuremath{\mathbb{D}}elta \ge p_\alpha(\mu)$, and thus $\ensuremath{\mathbb{D}}elta \ge \delta_\mathop{\mathrm{inf}}\nolimitsty^{DP}(\mu)$, i.e., the accumulation point $(\mu, \ensuremath{\mathbb{D}}elta)$ is contained in $S_\mathop{\mathrm{inf}}\nolimitsty$. \section{Bridgeland's stability conditions}\label{app:BridgelandFramework} In this section we give a brief review of stability conditions on derived categories, following \cite{Bridgeland:Stab}. Let $\ensuremath{\mathbb{D}}D$ be a triangulated category with good properties, e.g.\ the bounded derived category of coherent sheaves on a smooth and projective variety or $\ensuremath{\mathbb{D}}D_0$. A \emph{stability condition} $\sigma$ on $\ensuremath{\mathbb{D}}D$ consists in a pair $(Z,\ensuremath{\mathbb{P}}P)$, where $Z: K(\ensuremath{\mathbb{D}}D)\to\ensuremath{\mathbb{C}}$ (\emph{central charge}) is an additive map and $\ensuremath{\mathbb{P}}P(\phi)\subset\ensuremath{\mathbb{D}}D$ are full, additive subcategories ($\phi\in\ensuremath{\mathbb{R}}$) satisfying: \begin{enumerate} \item for any $0 \neq E\in\ensuremath{\mathbb{P}}P(\phi)$ we have $Z(E)\neq0$ and $Z(E)/|Z(E)|=\exp(i\pi\phi)$; \item $\forall\phi\in\ensuremath{\mathbb{R}}$, $\ensuremath{\mathbb{P}}P(\phi+1)=\ensuremath{\mathbb{P}}P(\phi)[1]$; \item if $\phi_1>\phi_2$ and $A_j\in\ensuremath{\mathbb{P}}P(\phi_j)$, $j=1,2$, then $\ensuremath{\mathbb{H}}om(A_1,A_2)=0$; \item \label{enum:HN-filt} for any $E\in\ensuremath{\mathbb{D}}D$ there is a sequence of real numbers $\phi_1>\dots >\phi_n$ and a collection of triangles $E_{j-1}\to E_j\to A_j$ with $E_0=0$, $E_n=E$ and $A_j\in\ensuremath{\mathbb{P}}P(\phi_j)$ for all $j$. \end{enumerate} The collection of exact triangles in (\ref{enum:HN-filt}) is called the \emph{Harder-Narasimhan filtration} of $E$. Each subcategory $\ensuremath{\mathbb{P}}P(\phi)$ is extension-closed and abelian. Its nonzero objects are said to be \emph{semistable} of phase $\phi$ in $\sigma$, and the simple objects (i.e., objects without proper subobjects or quotients) are said to be \emph{stable}. For any interval $I\subset\ensuremath{\mathbb{R}}$, $\ensuremath{\mathbb{P}}P(I)$ is defined to be the extension-closed subcategory of $\ensuremath{\mathbb{D}}D$ generated by the subcategories $\ensuremath{\mathbb{P}}P(\phi)$, for $\phi\in I$. Bridgeland proved that, for all $\phi\in\ensuremath{\mathbb{R}}$, $\ensuremath{\mathbb{P}}P((\phi,\phi+1])$ is the heart of a bounded $t$-structure on $\ensuremath{\mathbb{D}}D$. The category $\ensuremath{\mathbb{P}}P((0, 1])$ is called the \emph{heart} of $\sigma$. \begin{Rem}\label{rmk:tstruct} Let $\ensuremath{\mathbb{H}}:=\{z\in\ensuremath{\mathbb{C}}\colon z=|z|\exp(i\pi\phi),\,0<\phi\leq1\}$. If $\ensuremath{\mathcal A}\subset\ensuremath{\mathbb{D}}D$ is the heart of a bounded $t$-structure, then a group homomorphism $Z\colon K(\ensuremath{\mathbb{D}}D)\to\ensuremath{\mathbb{C}}$ gives rise to a unique stability condition when the following two conditions are satisfied (\cite[Prop.\ 5.3]{Bridgeland:Stab}): (i) $Z(\ensuremath{\mathcal A}\setminus0)\subset\ensuremath{\mathbb{H}}$ ($Z$ is a \emph{stability function} on $\ensuremath{\mathcal A}$); (ii) Harder-Narasimhan filtrations exist for objects in $\ensuremath{\mathcal A}$ with respect to $Z$. Condition (i) means that, for all $0\neq A\in\ensuremath{\mathcal A}$, the requirement $Z(A)\in\ensuremath{\mathbb{H}}$ gives a well-defined phase $\phi(A):=(1/\pi)\arg(Z(A))\in(0,1]$. This defines a notion of \emph{phase-stability} for objects in $\ensuremath{\mathcal A}$, and so of (semi)stable objects of $\ensuremath{\mathcal A}$. Then condition (ii) asks for the existence of finite filtrations for every object in $\ensuremath{\mathcal A}$ in semistable ones with decreasing phases. In particular, if $\ensuremath{\mathcal A}$ is an abelian category of finite length (i.e., Artinian and Noetherian) with a finite number of simple objects $\{S_0, \dots, S_m\}$, then any group homomorphism $Z\colon K(\ensuremath{\mathbb{D}}D)\to\ensuremath{\mathbb{C}}$ with $Z(S_i)\in \ensuremath{\mathbb{H}}$ for all $i$ extends to a unique stability condition on $\ensuremath{\mathbb{D}}D$. \end{Rem} We give an improved criterion for the existence of Harder-Narasimhan filtrations: \begin{Prop}\label{prop:HNFiltrationsDiscrete} Let $\ensuremath{\mathcal A}\subset\ensuremath{\mathbb{D}}D$ be the heart of a bounded $t$-structure on $\ensuremath{\mathbb{D}}D$ and let $Z\colon K(\ensuremath{\mathbb{D}}D)\to\ensuremath{\mathbb{C}}$ be a stability function on $\ensuremath{\mathcal A}$. Write $\ensuremath{\mathbb{P}}P'(1) \subset \ensuremath{\mathcal A}$ for the full subcategory of objects with phase 1 with respect to $Z$, and assume that: \begin{itemize} \item The image of $\Im(Z)$ is a discrete subgroup of $\ensuremath{\mathbb{R}}$. \item For all $E\in\ensuremath{\mathcal A}$, any sequence of subobjects \[ 0=A_0\subset A_1\subset\ldots\subset A_j\subset A_{j+1}\subset\ldots \subset E, \] with $A_j\in\ensuremath{\mathbb{P}}P'(1)$, stabilizes. \end{itemize} Then Harder-Narasimhan filtrations exist for objects in $\ensuremath{\mathcal A}$ with respect to $Z$. \end{Prop} \begin{Prf} We use the same ideas as in \cite[Prop.\ 7.1]{Bridgeland:K3}, and we want to apply \cite[Prop.\ 2.4]{Bridgeland:Stab}. First of all notice that, if \[ 0\to A\to E\to B\to 0 \] is an exact sequence in $\ensuremath{\mathcal A}$, then \[ 0 \leq \Im Z(A)\leq \Im Z(E)\quad\text{and}\quad 0 \leq \Im Z(B)\leq \Im Z(E). \] Let \[ \ldots\subset E_{j+1}\subset E_j\subset\ldots\subset E_1\subset E_0=E \] be an infinite sequence of subobjects of an object $E$ in $\ensuremath{\mathcal A}$ with $\phi(E_{j+1})>\phi(E_j)$, for all $j$. Since $\Im Z$ is discrete, there exists $N\in\ensuremath{\mathbb{N}}N$ such that \[ 0\leq \Im Z(E_n)=\Im Z(E_{n+1}), \] for all $n\geq N$. Consider the exact sequence in $\ensuremath{\mathcal A}$ \[ 0\to E_{n+1}\to E_n\to F_{n+1}\to 0. \] Then, by additivity of $\Im Z$, we have $\Im Z(F_{n+1})=0$, for all $n\geq N$. But this yields $\phi(F_{n+1})=1$, for all $n\geq N$ and so $\phi(E_{n+1})\leq\phi(E_n)$, a contradiction. In this way, property $(a)$ of \cite[Prop.\ 2.4]{Bridgeland:Stab} is satisfied. Let \[ E=E_0\twoheadrightarrow E_1\twoheadrightarrow\ldots\twoheadrightarrow E_j\twoheadrightarrow E_{j+1}\twoheadrightarrow\ldots \] be an infinite sequence of quotients of $E$ in $\ensuremath{\mathcal A}$ with $\phi(E_j)>\phi(E_{j+1})$, for all $j$. As before, $\Im Z(E_n)=\Im Z(E_{n+1})$, for all $n\geq N$. Consider the exact sequence in $\ensuremath{\mathcal A}$ \[ 0\to F_n\to E_N\to E_n\to 0, \] for $n\geq N$. Then $\Im Z(F_n)=0$, i.e., $F_n\in\ensuremath{\mathbb{P}}P'(1)$. Hence we have an infinite sequence of subobjects of $E_N$ belonging to $\ensuremath{\mathbb{P}}P'(1)$, a contradiction. Property $(b)$ of \cite[Prop.\ 2.4]{Bridgeland:Stab} is then verified and the proposition is proved. \end{Prf} A stability condition is called \emph{locally-finite} (see \cite[Sect.\ 5]{Bridgeland:Stab}) if there exists some $\epsilon>0$ such that, for all $\phi\in\ensuremath{\mathbb{R}}$, each quasi-abelian subcategory $\ensuremath{\mathbb{P}}P((\phi-\epsilon,\phi+\epsilon))$ is of finite length. In this way $\ensuremath{\mathbb{P}}P(\phi)$ has finite length so that every object in $\ensuremath{\mathbb{P}}P(\phi)$ has a finite Jordan--H\"older filtration into stable factors of the same phase. The set of stability conditions which are locally-finite will be denoted by $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$. The stability conditions we consider also satisfy the additional conditions in the definition given in \cite[Section 2]{Kontsevich-Soibelman:stability} (in particular the support property, as discussed below). The local-finiteness condition will then be automatic. The main result in \cite{Bridgeland:Stab} endows $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$ with a topology, induced by a metric $d(-,-)$ (see \cite[Prop.\ 8.1]{Bridgeland:Stab} for the explicit form of $d$), in such a way it becomes a complex manifold whose connected components are locally modeled on linear subspaces of $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ via the map $\ensuremath{\mathbb{Z}}Z$ sending a stability condition $(Z,\ensuremath{\mathbb{P}}P)$ to its central charge $Z$. A connected component of $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$ is called \emph{full} if it has maximal dimension, i.e., it is modeled on the whole $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$. For a stability condition $\sigma=(Z,\ensuremath{\mathbb{P}}P)$ belonging to a full connected component (we will call $\sigma$ \emph{full}), we recall the statement of Bridgeland's deformation result. In this case, the metric \begin{equation*} \| W\|_{\sigma}:=\sup\left\{\frac{|W(E)|}{|Z(E)|}\colon E\text{ is }\sigma\text{-stable}\right\} \end{equation*} is finite, and thus defines a topology on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$. \begin{Thm}[{\cite[Theorem 7.1]{Bridgeland:Stab}, \cite[Lemma 4.5]{Bridgeland:K3}}] \label{thm:B-deform} In the situation of the previous paragraph, let $0<\epsilon<1/8$. Then, for any group homomorphism $W\colon K(\ensuremath{\mathbb{D}}D)\to\ensuremath{\mathbb{C}}$ with \[ \|W-Z\|_{\sigma}<\sin(\pi\epsilon), \] there exists a unique (locally-finite) stability condition $\tau=(W,\ensuremath{\mathbb{Q}}Q)\in\mathop{\mathrm{Stab}}^*(\ensuremath{\mathbb{D}}D)$ with $d(\sigma,\tau)<\epsilon$. \end{Thm} In particular this shows that the map $\ensuremath{\mathbb{Z}}Z \colon \mathop{\mathrm{Stab}}^*(\ensuremath{\mathbb{D}}D) \to \ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ is a local homeomorphism. Let us also clarify the relation between full stability conditions in the situation of finite-rank $K$-group and the support property introduced in \cite{Kontsevich-Soibelman:stability}. More precisely, assume that $\ensuremath{\mathbb{D}}D$ is a triangulated category such that $K(\ensuremath{\mathbb{D}}D)/\mathrm{torsion}$ is a finite-dimensional lattice, and choose a metric $\abs{\cdot}$ on $K(\ensuremath{\mathbb{D}}D)_\ensuremath{\mathbb{R}}$. Then a stability condition $\sigma=(Z,\ensuremath{\mathbb{P}}P)\in\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$ has the \emph{support property}\footnote{In the notation of \cite[Section 1.2]{Kontsevich-Soibelman:stability}, we implicilty made the choices $\ensuremath{\mathbb{L}}ambda = K(\ensuremath{\mathbb{D}}D)/\mathrm{torsion}$ and $\mathrm{cl}$ the projection.} if there exists a constant $C>0$ such that \[ C \abs{Z(E)} \ge \abs{E} \] for all $\sigma$-stable $E \in \ensuremath{\mathbb{D}}D$. \begin{Prop}\label{prop:SupportProperty} Assume that $K(\ensuremath{\mathbb{D}}D)$ has finite rank. Then a Bridgeland stability condition $\sigma = (Z, \ensuremath{\mathbb{P}}P)$ is full if and only if it has the support property. \end{Prop} \begin{Prf} Denote by $\abs{\cdot}^\vee$ the induced metric on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$. A stability condition $\sigma=(Z,\ensuremath{\mathbb{P}}P)$ is full if and only if the semi-metric $\| \cdot \|_\sigma$ is finite. Since $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ is finite-dimensional, this holds if and only if it is bounded by a multiple of $\abs{\cdot}^\vee$, i.e., if and only if there exists $C > 0$ such that, for any $W \in \ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$, we have \[ \|W\|_\sigma \le C \cdot \abs{W}^\vee \] Hence if $\sigma$ satisfies the support property, then \begin{equation*} \begin{split} \|W\|_\sigma &=\sup\left\{\frac{|W(E)|}{|Z(E)|}\colon E\text{ is }\sigma\text{-stable}\right\}\\ &\le C \cdot \sup\left\{\frac{|W(E)|}{\abs{E}}\colon E\text{ is }\sigma\text{-stable}\right\}\\ &\le C \cdot \abs{W}^\vee \end{split} \end{equation*} and so $\sigma$ is full. Conversely, assume that $\sigma$ does not satisfy the support property, i.e., there is a sequence $E_n$ of $\sigma$-stable objects with $\abs{Z(E_n)} < \frac{\abs{E_n}}n$. Let $W_n\in\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ be such that $\abs{W_n}^\vee = 1$ and $\abs{W_n(E_n)} = \abs{E_n}$. Then \[ \|W_n\|_{\sigma} \ge \frac{\abs{W_n(E_n)}}{\abs{Z(E_n)}} > n \cdot \frac{\abs{E_n}}{\abs{E_n}} = n \abs{W_n}^\vee \] and so $\sigma$ is not full. \end{Prf} \begin{Rem}\label{rmk:GroupAction} By \cite[Lemma 8.2]{Bridgeland:Stab}, we have a left action on $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$ by the autoequivalence group $\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D)$, and a right action by $\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$, the universal cover of the matrices in $\mathop{\mathrm{GL}}_2(\ensuremath{\mathbb{R}})$ with positive determinant. The first action is defined, for $\ensuremath{\mathbb{P}}hi\in\mathop{\mathrm{Aut}}\nolimits(\ensuremath{\mathbb{D}}D)$, by $\ensuremath{\mathbb{P}}hi(Z,\ensuremath{\mathbb{P}}P)=(Z\circ\phi_*^{-1},\ensuremath{\mathbb{P}}hi(\ensuremath{\mathbb{P}}P))$, where $\phi_*$ is the automorphism induced by $\ensuremath{\mathbb{P}}hi$ at the level of Grothendieck groups. The second one is the lift of the action of $\mathop{\mathrm{GL}}_2(\ensuremath{\mathbb{R}})$ on $\ensuremath{\mathbb{H}}om(K(\ensuremath{\mathbb{D}}D), \ensuremath{\mathbb{C}})$ (by identifying $\ensuremath{\mathbb{C}}\cong\ensuremath{\mathbb{R}}^2$). Notice, in particular, that the additive group $\ensuremath{\mathbb{C}}$ acts on $\mathop{\mathrm{Stab}}(\ensuremath{\mathbb{D}}D)$, via the embedding $\ensuremath{\mathbb{C}}\ensuremath{\hookrightarrow}\widetilde{\mathop{\mathrm{GL}}}_2(\ensuremath{\mathbb{R}})$. \end{Rem} \section{Proof of the inequality for central charges}\label{app:ineq} This appendix is a brief sketch of a complete proof of Observation (\ref{obs:inequality}) on page \pageref{obs:inequality}: On the fundamental domain $\ensuremath{\mathbb{C}} \setminus \ensuremath{\mathbb{R}}_{\le 0}$, the functions $a(z), b(z)$ defined by equations \eqref{eq:ab-exp0} and \eqref{eq:ab-expinf} satisfy the inequalities of Definition \ref{def:setG}. The general idea is to deduce the inequalities from inequalities for real or imaginary parts of holomorphic functions, which only need to be tested on the boundary of the fundamental domain. Note that the boundary, expressed in $z$ and $\psi$ according to the convergence domains $\abs{z} \le \frac 1{27}$ and $\abs{\psi} < 1$ of our power series expansions, consists of two copies of $z \in [-1/27, 0]$, with the two natural branch choices of $\ln z$, and of the two ray segments $\psi \in [0, 1] \cdot e^{2\pi i/3}$ and $\psi \in [0, 1] \cdot e^{4 \pi i/3}$. (We will refer to the boundary segments by $\arg(z) = \pm \pi$ etc.) \subsection*{Step 1.} We first show that $\Im(a)>0$. For example by using the integral criteria, it can be shown easily that the series $\sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} \frac{1}{27^n}$ converges to a real number less than $\frac{1}{\pi\sqrt{3}}$ (the exact value is 0.129...). Thus, for $\abs{z}<1/27$, $z\neq0$, $\abs{\arg(z)}<\pi$, we have \[ \Im(a(z))\geq-\frac{1}{2\pi}\left(\ln\frac 1{27} +3 \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} \frac{1}{27^n}\right)>0. \] Passing to the $\psi$-coordinate, the inequality follows trivially on the two boundary rays from \eqref{eq:ab-expinf} and the definition of $\varpi_1(\psi)$. \subsection*{Step 2.} For $B=-\Im(b(z))/\Im(a(z))$ we have $-1 < B < 0$. More precisely, we will use $-1/2\leq B < 0$ for $\Im(z) \ge 0$ (i.e., for $2\pi/3 < \arg(\psi)\leq\pi$ or $0\leq\arg(z) < \pi$), and $-1< B\leq -1/2$ for $\Im (z) \le 0$ (i.e., for $\pi\leq\arg(\psi)<4\pi/3$ or $-\pi<\arg(z)\leq0$). To show this, first notice that $\Im(Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2})(z))=\Im(b(z)) > 0$: indeed, for $\arg(z) = \pi$ or $\arg(\psi) = \frac {2\pi}3$, it is trivial to check that $\Im(b(z)) = 0$. Similarly, the inequality holds strictly for $\arg(z) = - \pi$ or $\arg(\psi) = \frac{4\pi}3$, and it also holds around $z = 0$. Thus the strict inequality holds on the interior of the fundamental domain, and thus $B < 0$. Similarly we can show $\Im(Z(\ensuremath{\mathcal O}_{\ensuremath{\mathbb{P}}^2}(-1)[2])(z)) = \Im(b(z) - a(z)) < 0$ and thus $-1 < B$. For the more precise statement, it is sufficient to look at the sign of $\Im(Z(\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1])(z)) = \Im(b(z) - \frac 12 a(z))$: once again the maximum principle shows that $\Im(Z(\Omega_{\ensuremath{\mathbb{P}}^2}(1)[1])(z))\leq0$, for $2\pi/3 < \arg(\psi)\leq\pi$ and $0\leq\arg(z)< \pi$. This implies to $-\frac 12 \leq B$. The case $\Im (z) \le 0$ is analogous. \subsection*{Step 3.} Finally, to check that the other two inequalities of Definition \ref{def:setG} are satisfied, we show the following stronger statement: \begin{equation}\label{eq:UpperSemiCircUtah} -\ensuremath{\mathbb{R}}e(b(z))-B\ensuremath{\mathbb{R}}e(a(z))+\frac{B^2}{2}<\frac{3}{8}=\ensuremath{\mathbb{D}}elta_{-1/2}, \end{equation} for all $-1/2\leq B\leq0$, in the region $\Im(z) \ge 0$ (and an analogous statement, which we will skip, for $\Im(z) \le 0$). By the claims of the previous step, this will imply Observation (\ref{obs:inequality}), as $\delta^{DP}_\mathop{\mathrm{inf}}\nolimitsty(\mu) \ge \frac 12$, and as $\ensuremath{\mathbb{D}}elta_B \ge \frac 38$ for $B \not\in \ensuremath{\mathbb{Z}}$. Also note that we only have to prove the inequality above for $B = 0$, and for $B = -\frac 12$. First we note that $b(-1/27)=0$ (with the choice of $\arg(-1/27) = \pi$): this can be deduced from the monodromy. From this, it follows that the series $\sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \abs{l_n} \frac{1}{27^n}$ (from the definition, it is easy to see that $l_n=(-1)^n\abs{l_n}$) converges to a real number less than 3 (the exact value is 1.558...). From this we can deduce \eqref{eq:UpperSemiCircUtah} for the cases $\arg(z) = 0$ and $\arg(z)= \pi$: Setting $B = - \frac 12$ and $B = 0$ yields \begin{align*} \tfrac 12 \ensuremath{\mathbb{R}}e(\omega_2(z))+\tfrac 18 &< \tfrac 38, \quad \text{and} \quad \tfrac 12 \ensuremath{\mathbb{R}}e(\omega_2(z))-\tfrac 12 \ensuremath{\mathbb{R}}e(\omega_1(z))+\tfrac 14 <\tfrac 38. \end{align*} As $\ensuremath{\mathbb{R}}e(\omega_1(z))= \frac{1}{2\pi}\Im(\ln(z))$, this would follow from \[ \tfrac 12 \ensuremath{\mathbb{R}}e(\omega_2(z))+\tfrac 18<\tfrac 38 \quad \text{if $z<0$, and } \quad \tfrac 12 \ensuremath{\mathbb{R}}e(\omega_2(z))+\tfrac 14<\tfrac 38 \quad \text{if $z>0.$} \] Finally using the definition of $\omega_2(z)$, both inequalities become \begin{multline*} -\frac{1}{8\pi^2} \left( (\ln\abs{z})^2 + 6 \ln\abs{z} \cdot \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} (-z)^n \right. \left. + \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \abs{l_n} (-z)^n \right) <\frac 18. \end{multline*} But the quantity on the left is at most \[ -\frac{1}{8\pi^2} \left( \left(\ln\frac{1}{27}\right)^2 + 6 \ln\frac{1}{27} \cdot \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \frac{(3n-1)!}{n!^3} \frac{1}{27^n} - \sum_{n=1}^\mathop{\mathrm{inf}}\nolimitsty \abs{l_n} \frac{1}{27^n} \right), \] which is smaller than $\frac 18$ by the estimate of the last term mentioned earlier. For the cases $\arg(\psi) = \pi$ and $\arg(\psi) = \frac{2\pi}3$, we first observe that, for $0<\rho\leq1$ and for any function $u\colon \ensuremath{\mathbb{Z}}_{>0}\to\{0,1\}$ with $u(1)=1$, we have \begin{equation}\label{eqn:Estimatepsi} \begin{split} \frac 1{2\pi} &\sum_{n=1 \atop 3\nmid n}^\mathop{\mathrm{inf}}\nolimitsty\frac{\Gamma\left(\frac n3 \right)}{\Gamma(n+1) \Gamma\left(1 - \frac n3 \right)^2} (3\rho)^n(-1)^{u(n)}\\ &\leq\rho\left(-\frac{3}{2\pi}\frac{\Gamma\left(\frac 13 \right)}{\Gamma\left(\frac 23 \right)^2}+\frac {1}{2\pi}\sum_{n=2 \atop 3\nmid n}^\mathop{\mathrm{inf}}\nolimitsty\frac{\Gamma\left(\frac n3 \right)}{\Gamma(n+1) \Gamma\left(1 - \frac n3 \right)^2} 3^n\right)<0. \end{split} \end{equation} Setting $B =0$ and $B = - \frac 12$, and using the definition of $\varpi_1(\psi)$, the needed inequalities are \[ \frac 38>\frac 13\ensuremath{\mathbb{R}}e(\varpi_2(-\rho))+\frac 13, \] for $\psi=-\rho$, and \[ \frac 38>\begin{cases} \frac 16 \ensuremath{\mathbb{R}}e(\varpi_2(\rho e^{2\pi i/3}))+\frac 5{24}\\ \frac 23 \ensuremath{\mathbb{R}}e (\varpi_2(\rho e^{2\pi i/3})) + \frac 13 \end{cases}, \] for $\psi=\rho e^{2\pi i/3}$, $0<\rho\leq1$. But, by \eqref{eqn:Estimatepsi}, \[ \ensuremath{\mathbb{R}}e(\varpi_2(\rho e^{2\pi i/3})), \ensuremath{\mathbb{R}}e(\varpi_2(-\rho)) <0. \] Hence, \eqref{eq:UpperSemiCircUtah} holds also for $\arg(\psi) = \pi$ and $\arg(\psi) = \frac{2\pi}3$, and the proof of Observation \eqref{obs:inequality} is complete. \end{document}
\begin{document} \title{Post Markovian Dynamics of Quantum Correlations: Entanglement vs. Discord} \author{Hamidreza Mohammadi} \thanks{[email protected]} \affiliation{Department of Physics, University of Isfahan, Isfahan, Iran} \affiliation{Quantum Optics Group, University of Isfahan, Isfahan, Iran} \begin{abstract} \noindent Dynamics of an open two-qubit system is investigated in the post-Markovian regime, where the environments have a short-term memory. Each qubit is coupled to separate environment which is held in its own temperature. The inter-qubit interaction is modeled by XY-Heisenberg model in the presence of spin-orbit interaction and inhomogeneous magnetic field. The dynamical behavior of entanglement and discord has been considered. The results show that, quantum discord is more robust than quantum entanglement, during the evolution. Also the asymmetric feature of quantum discord can be monitored by introducing the asymmetries due to inhomogeneity of magnetic field and temperature difference between the reservoirs. By employing proper parameters of the model, it is possible to maintain non-vanishing quantum correlation at high degree of temperature. The results can provide a useful recipe for studying of dynamical behavior of two-qubit systems such as trapped spin-electrons in coupled quantum dots. \end{abstract} \pacs{03.67.Hk, 03.65.Ud, 75.10.Jm} \maketitle \section{INTRODUCTION} The weirdness of quantum mechanics lies on the concept of quantum correlation which is originated from the superposition principle. There are two important aspects of quantum correlation: \textit{quantum entanglement} \cite{EPRPRA1935,SchrodingerNat1935}which is defined within the entanglement-separability paradigm and \textit{quantum discord} \cite{ZurekPRL2001,HendersonJPA2001}, defined from an information-theoretic perspective. The quantitative and qualitative evaluation of such correlations is central task in conceptual studies of conceptual quantum mechanics, and it also has crucial significance in operative quantum information theory. A mixed state $\rho$ of a bipartite system is an entangled state if it not separable i.e. it can not prepared by Local Operation and Classical Communication (LOCC) tasks. There are many measures which evaluate the amount of entanglement of a quantum state. The entanglement of formation is one of these measures, which is enumerates the resources which is needed to create a given entangled state. For the case of a two-qubit bipartite system the formula of the entanglement of formation can be expressed as a smooth function of \textit{the concurrence} and hence the concurrence can be take as a measure of entanglement in its own right\cite{WoottersPRL1997}. The concurrence of the state $\rho_{AB}$ can be obtained explicitly as: \begin{equation} C(\rho_{AB})=\max\{0,2\lambda_{max}-\sum_{i=1}^{4}\,\lambda_{i}\},\label{concurrence} \end{equation} where $\lambda_{i}$s are roots of the eigenvalues of the non-Hermitian matrix $R=\sqrt{\sqrt{\rho_{AB}}\tilde{\rho}_{AB}\sqrt{\rho_{AB}}}$, and $\tilde{\rho}_{AB}$ is defined by $\tilde{\rho}_{AB}:=(\sigma^{y}\otimes\sigma^{y})\rho_{AB}^{*}(\sigma^{y}\otimes\sigma^{y})$, here $\sigma^y$ is Pauli y-matrix \cite{WoottersPRL1997}. Until some time ago, entanglement was considered as the only type of quantum correlation could be find in a composed quantum state. However, it has been discovered that some multi-partite separable states could speedup quantum computation algorithms \cite{KnillPRL1998} i.e. they possess some quantum features. Therefore, entanglement is not the only aspect of quantum correlation. Datta {\it et. al.} \cite{DattaPRL2008} have showen that the resource of this speedup is another important type of quantum correlations, named by Quantum Discord (QD). Quantum discord first introduced by Zurek {\it et. al.} \cite{ZurekPRL2001} and Henderson {\it et. al.} \cite{HendersonJPA2001}, independently in year 2001. The definition of QD lies on the difference between two classically equivalent definitions of mutual information in the quantum mechanics language. In mathematical sense the quantum discord could be obtained by eliminating the classical correlation from the total correlation measured by quantum mutual information. The classical correlation between the parts of a bipartite system can be obtained by use of the measurement-base conditional density operator. Hence we can write the discord with respect to the $B$ subsystem (right discord) as: \begin{equation} D_{B}(\rho_{AB})=I(\rho_{AB})-CC_{B}(\rho_{AB}).\label{discord} \end{equation} Where $I(\rho_{AB})=S(\rho_{A})+S(\rho_{B})-S(\rho_{AB})$ is the mutual information and $CC_{B}(\rho_{AB})=\underset{\{\Pi_{k}^{(B)}\}}{\sup}\{S(\rho_{A})-S(\rho_{AB}|\{\Pi_{k}^{(B)}\})\}$ is the classical part of correlation. Here $\rho_{A(B)}$ and $\rho_{AB}$ refer to the reduced density matrix of subsystem $A(B)$ and the density matrix of the system as the whole and $S(\rho)=-Tr(\rho\,\log_{2}\rho)$ is Von Neumann entropy. The maximization in the definition of classical correlation is taken over the set of generalized measurements (POVMs) $\{\Pi_{k}^{(B)}\}$, and $S(\rho_{AB}|\{\Pi_{k}^{(B)}\})=\sum_{k=0}^{1}p_{k}S(\rho_{k})$ is the conditional entropy of subsystem $A$, with $\rho_{k}=Tr_{B}((I_{A}\otimes\Pi_{k}^{(B)})\,\:\rho_{AB}\,\:(I_{A}\otimes\Pi_{k}^{(B)}))/p_{k}$ and $p_{k}=Tr(\rho_{AB}\:(I_{A}\otimes\Pi_{k}^{(B)}))$. However, one can swap the role of the subsystems $A$ and $B$ to obtain discord with respect to $A$ subsystem (left discord), i.e. $D_{A}(\rho_{AB})$. Decoherence is the main obstacle to preserving the superposition and hence quantum correlation in real quantum systems. Undesired leakage of the coherence of the system to the environment, due to unavoidable interaction between the quantum systems and their environment, leading to decoherence \cite{Schlosshauerbook2007}. Thermal decoherence plays a significant role to destroying the useful quantum correlation between the parts of the quantum systems. Although investigating the decohernce procedure in thermal equilibrium is useful but real systems are not in equilibrium \cite{VedralJP2009} and hence the dynamical behavior of the systems under non-equilibrium condition has to be elucidated. Furthermore, the formal analysis of an open quantum dynamics is considered in Markovian framework i.e. by assuming the weak system-environment coupling and the forgetful nature of the environmental system. Despite of its wide applicability, it should be kept in mind that Markovianity is only an approximation and the real physical systems may not fulfill these conditions. This imposes one to address the question of quantum feature survival in noisy as well as non-equilibrium conditions in the non-Markovian regime. In propose to realize such systems we consider the non-equilibrium dynamics of a system including two coupled qubits in contact with different thermal baths. This is a system which is interesting both from theoretical and empirical point of view. Recent progresses in nano-technology provide the possibility of fabrication and manipulation of confined spins in nano-scale devices. Among these devices, semiconductor quantum dots becomes a useful device for manipulating, transferring and saving the quantum information. For example, data transferring between nuclear spins and electronic spins confined in a semiconductor quantum dot has been considered \cite{ReinaPRB2000}. These nuclear and the electronic are embedded in a solid state environment with huge degrees' of freedom (bath). Because they interact with their bath in completely different manner, they experience different effects from environment. Nowadays, with the aid of NMR and quantum optical techniques, it is possible to change the temperature of the nuclear spins without affecting the electron spins temperature \cite{StepankoPRL2006}. A system consists of two spin-electron confined in two coupled quantum dots\cite{LossPRA1998, DiVincenzoPRA1995}, is another example. In this system qubit is represented by a single spin-electron confined in each quantum dot. These qubits can be initialized, manipulated, and read out by extremely sensitive devices. In comparison with quantum optical and NMR systems, such systems are more scalable and more robust to the environmental affects. Each quantum dot could be coupled to different source and drain electrodes, during the fabrication process, and hence feels a different environment \cite{LegelPRB2007}. In this paper, the non-equilibrium dynamics of an open quantum system is investigated. The system to be considered includes a two-qubit system interacting with surrounding environment. The inter-qubit separation is supposed large enough such that each qubit is embedded in a separate environment. The environments are modeled by thermal reservoirs (bosonic bathes) which are assumed to be in thermodynamic equilibrium in their own temperature $\beta_{i}=\frac{1}{k_{B}T_{i}}$. Furthermore each qubit is realized by the spin of an electron confined in a quantum dots, so due to weak lateral confinement, electrons can tunnel from one dot to the other and spin-spin and spin-orbit interactions between the two qubits exist. Also, it is assumed that an external magnetic field is applied to each quantum dot. Thus the inter-qubit interaction could be modeled by anisotropic XY Heisenberg system in the presence of the inhomogeneous magnetic field, equipped by spin-orbit interaction in the form of the Dzyaloshinski-Moriya (DM) interaction \cite{KavokinPRB2001, DzyaloshinskiJPCS1958, MoriyaPRL1960}. In the following, the influence of the parameters of the system (i.e. magnetic field (B), inhomogeneity of magnetic field (b), partial anisotropy($\chi$), mean coupling (J) and the spin-orbit interaction parameter (D)) and environmental parameters (i.e. temperatures $T_{1}$ and $T_{2}$ or equally $T_{M}$ and $\Delta T$, and the couplings strength $\gamma_{1}$ and $\gamma_{2}$) on the amount of entanglement and discord of the system is studied. The results show that the dynamics of quantum correlations depends on the geometry of connection, especially the geometry of connection can bold asymmetric property of quantum discord. Also the results show that, for an exponentially damping memory kernel, there is a steady state in asymptotically large time limit. The amount of both asymptotic entanglement and asymptotic discord decreases as the temperature increases but for asymptotic discord sudden death does not occur; asymptotic discord descends exponentially with temperature, while the entanglement suddenly vanishes above a critical temperature, $T_{M}^{cr.}$. The results also reveal that, the size of $T_{M}^{cr.}$ (temperature over which the quantum entanglement cease to exist) and the amount of both entanglement and discord can be improved by adjusting the value of the spin-orbit interaction parameter $D$. This parameter can be manipulated by adjusting the height of the barrier between two quantum dots. The paper is organized as follows. In Sec. II, we introduce the Hamiltonian of the whole system-reservoir and then write the post-Markovian master equation governed on the system by tracing out the reservoirs' degrees of freedom. Ultimately, for X-shaped initial states, the density matrix of the system at a later time is derived exactly. The effects of initial conditions and system parameters on the dynamics of entanglement and entanglement of asymptotic state of the system are presented in Sec. III. Finally in Sec. IV a discussion concludes the paper. \section{THE MODEL AND HAMILTONIAN} A bipartite quantum system coupled to two reservoirs is described by the following Hamiltonian: \begin{eqnarray} \hat{H}=\hat{H}_{S}+\hat{H}_{B1}+\hat{H}_{B2}+\hat{H}_{SB1}+\hat{H}_{SB2},\label{total Hamiltonian} \end{eqnarray} where $\hat{H}_{S}$ is the Hamiltonian of the system, $\hat{H}_{Bj}$ is the Hamiltonian of the jth reservoir $(j=1, 2)$ and $\hat{H}_{SBj}$ denotes the interaction Hamiltonian of the system and jth reservoir. According to previous section, the system under consideration consists of two interacting spin electrons confined in two coupled quantum dots. Thus inter-qubit interaction includes spin-spin interaction and spin-orbit interaction (due to orbital motion of electrons). Such system could be described by a two-qubit anisotropic Heisenberg XY-model in the presence of an inhomogeneous magnetic field equipped by spin-orbit interaction with the following Hamiltonian (see \cite{HamidPRA2008} and references therein): \begin{equation} \hat{H}_{S}={\textstyle \frac{1}{2}}(J_{x}\,\sigma_{1}^{x}\sigma_{2}^{x}\,+J_{y}\,\sigma_{1}^{y}\sigma_{2}^{y}+\textbf{B}_{1}\cdot\boldsymbol{\sigma}_{1}+\textbf{B}_{2}\cdot\boldsymbol{\sigma}_{2}+\boldsymbol{D}.(\boldsymbol{\sigma}_{1}\times\boldsymbol{\sigma}_{2})),\label{system Hamiltonian} \end{equation} where $\boldsymbol{\sigma}_{j}=(\sigma_{j}^{x},\sigma_{j}^{y},\sigma_{j}^{z})$ is the vector of Pauli matrices, $\textbf{B}_{j}\,(j=1,2)$ is the magnetic field on site j, $J_{\mu}s\,(\mu=x,y)$ are the real coupling coefficients (the interaction is anti-ferromagnetic (AFM) for $J_{\mu}>0$ and ferromagnetic (FM) for $J_{\mu}<0$) and $\boldsymbol{D}$ is Dzyaloshinski-Moriya term of spin-orbit interaction. Reparametrizing the above Hamiltonian with $\textbf{B}_{j}=B_{j}\,\hat{\boldsymbol{z}}$ such that $B_{1}=B+b$ and $B_{2}=B-b$, where b is magnetic field inhomogeneity, and $J:=\frac{J_{x}+J_{y}}{2}$, as the mean coupling coefficient in the XY-plane, $\chi:=\frac{J_{x}-J_{y}}{J_{x}+J_{y}}$, as partial anisotropy, $-1\leq\chi\leq1$, and with the assumption $\boldsymbol{D}=J\, D\hat{\boldsymbol{\, z}}$ we have: \begin{eqnarray} \hat{H}_{S} & = & J\chi(\sigma_{1}^{+}\sigma_{2}^{+}+\sigma_{1}^{-}\sigma_{2}^{-})+J(1+i\, D)\,\sigma_{1}^{+}\sigma_{2}^{-}+J(1-i\, D)\,\sigma_{1}^{-}\sigma_{2}^{+}\nonumber \\ & + & (\frac{{B+b}}{2})\sigma_{1}^{z}+(\frac{{B-b}}{2})\sigma_{2}^{z},\label{system Hamiltonian1} \end{eqnarray} where $\sigma^{\pm}=\frac{1}{2}(\sigma^{x}\pm i\sigma^{y})$, denote the lowering and raising operators. The spectrum of $\hat {H}_{S}$, in the standard basis $\{\ket{00},\ket{01},\ket{10},\ket{11}\}$, is easily obtained as \begin{eqnarray} \begin{array}{l} \ket{\varepsilon_{1,2}}=\ket{\Psi^{\pm}}= \sin\theta_{\pm} e^{ i \phi}\ket{01}+\cos\theta_{\pm}\ket{10}\,,\,\;\;\;\;\varepsilon_{1,2}=\pm\xi\,,\\ \\ \ket{\varepsilon_{3,4}}=\ket{\Sigma^{\pm}}=\sin\theta'_{\pm}\ket{00}+\cos\theta'_{\pm}\ket{11}\,,\,\;\;\;\;\;\;\;\varepsilon_{3,4}=\pm\eta\,. \end{array}\label{spectrum} \end{eqnarray} Where $\tan\theta_{\pm}=\sqrt{\pm(\frac{\xi\pm b}{\xi \mp b})}$, $\tan\phi=D$ and $\tan\theta'_{\pm}=\sqrt{\pm(\frac{\eta\pm B}{\eta \mp B})}$ with $\xi=(b^{2}+J^{2}(1+D^{2}))^{1/2}$ and $\eta=(B^{2}+(J \chi)^2)^{1/2}$. The Hamiltonian of the reservoir coupled to jth spin are given by \begin{eqnarray} \hat{H}_{Bj}=\sum_{n}\omega_{n}\hat{a}_{nj}^{\dag}\hat{a}_{nj}\,,\label{Bath Hamiltonian} \end{eqnarray} where $\hat{a}_{nj}^{\dag}$ and $\hat{a}_{nj}$ are the creation and the annihilation operators of the jth bath mode, respectively. In the full dissipative regime and in the absence of dephasing processes the interaction between the system and the jth bath is governed by the following Hamiltonian\cite{HamidEPJD2010}: \begin{eqnarray} \hat{H}_{SBj}=(\sigma_{j}^{+}+\sigma_{j}^{-})(\sum_{n}g_{n}^{(j)}\hat{a}_{n,\, j}+g_{n}^{(j)*}\hat{a}_{n,\, j}^{\dag})=\sum_{\mu}(\hat{\Lambda}{}_{j,\,\mu}^{+}+\hat{\Lambda}{}_{j,\,\mu}^{-})(\hat{G}_{j,\,\mu}+\hat{G}_{j,\,\mu}^{\dagger}),\label{Interaction Hamiltonian} \end{eqnarray} The system operators $\hat{\Lambda}_{j,\,\mu}^{\pm}$ are chosen to satisfy $[\hat{H}_{S},\hat{\Lambda}_{j,\,\mu}^{\pm}]=\pm\omega_{j,\,\mu}\hat{\Lambda}_{j,\,\mu}^{\pm}$, and the $\textit{\^{G}}_{j,\,\mu}$'s are the random operators of reservoirs and act on the bath degrees of freedom. The Greek letter indexes are related to the transitions between the internal levels of the system induced by the bath. The irreversibility hypothesis implies that the evolution of the system does not influence the states of the reservoirs and the state of whole system+reservoirs is describing by, $\hat{\sigma}(t)=\hat{\rho}(t)\hat{\rho}_{B1}(0)\hat{\rho}_{B2}(0)$, where $\hat{\rho}(t)$ is the reduced density matrix describing the system and each bath is supposed to be in their thermal state at temperature $ T_j=\frac{1}{\beta_j}$, i.e. $\hat{\rho}_{Bj}=\emph{e}^{-\beta_{j}\hat{H}_{Bj}}/Z$, where $Z=Tr(\emph{e}^{-\beta_{j}\hat{H}_{Bj}})$ is the partition function of the jth bath. Dynamics of the reduced density matrix of system in the Post-Markov approximation is describing with the following master equation\cite{ShabaniPRA2005,DieterichNat2015, CampbellPRA2012, BudiniPRE2014, Sinaysky}: \begin{eqnarray} \frac{d\hat{\rho}}{dt}=-i[\hat{H}_{S},\hat{\rho}]+\mathcal{L}\int_{0}^{t}dt'[k(t')\exp(\mathcal{L}\, t')\hat{\rho}(t-t')],\label{master equation} \end{eqnarray} where $\mathcal{L}=\mathcal{L}_{1}+\mathcal{L}_{2}$ and $\mathcal{L}_{j}(\hat{\rho})\,(j=1,2)$ is \textit{dissipator} or \textit{Lindbladian} given by \begin{eqnarray} \mathcal{L}_{j}(\hat{\rho})\equiv\sum_{\mu,\,\nu}J_{\mu,\,\nu}^{(j)}(\omega_{j,\,\nu})\{[\hat{\Lambda}_{j,\,\mu}^{+},[\hat{\Lambda}_{j,\,\nu}^{-},\hat{\rho}]]-(1-\emph{e}^{\beta_{j}\omega_{j,\,\nu}})[\hat{\Lambda}_{j,\,\mu}^{+},\hat{\Lambda}_{j,\,\nu}^{-}\hat{\rho}]\}.\label{dissipiators 1} \end{eqnarray} Here $J_{\mu,\nu}^{(j)}(\omega_{j,\nu})$ is the spectral density of the jth reservoir given by: \begin{eqnarray} J_{\mu,\,\nu}^{(j)}(\omega_{j,\,\nu})=\int_{0}^{\infty}d\tau\emph{e}^{i\omega_{j,\,\nu}\tau} \langle\textit{\={G}}_{j,\,\mu}(\tau)\,\,\textit{\^G}_{j,\,\nu}\rangle_{\rho_{Bj}},\label{spectral density} \end{eqnarray} with $\textit{\={G}}_{j,\,\nu}(\tau)=\emph{e}^{-iH_{Bj}\tau}\textit{\^G}_{j,\,\mu}^{\,\,\,\dag}\emph{e}^{iH_{Bj}\tau}$. For the bosonic thermal bath modeled by an infinite set of harmonic oscillators, the Weisskpof-Wignner-like approximation implies that: $J^{(j)}(\omega_{\mu})=\gamma_{j}(\omega_{\mu})n_{j}(\omega_{\mu})$ with the property of $J^{(j)}(-\omega_{\mu})=\emph{e}^{\beta_{j}\omega_{\mu}}J^{(j)}(\omega_{\mu})$, where $n_{j}(\omega_{\mu})=(\emph{e}^{\beta_{j}\omega_{\mu}}-1)^{-1}$ is the thermal mean value of the number of excitation in the $j$th reservoir at frequency $\omega_{\mu}$ and $\gamma_{j}(\omega_{\mu})$ is the coupling coefficient of system and the $j$th reservoir. Thus, we can write: \begin{eqnarray} \mathcal{L}_{j}(\hat{\rho}) & = & \sum_{\mu=1}^{4}J^{(j)}(-\omega_{\mu})(2\hat{\Lambda}_{j,\,\mu}^{+}\hat{\rho}\hat{\Lambda}_{j,\,\mu}^{-}-\{\hat{\rho},\hat{\Lambda}_{j,\,\mu}^{-}\hat{\Lambda}_{j,\,\mu}^{+}\}_{+}))\nonumber \\ & + & \sum_{\mu=1}^{4}J^{(j)}(\omega_{\mu})(2\hat{\Lambda}_{j,\,\mu}^{-}\hat{\rho}\hat{\Lambda}_{j,\,\mu}^{+}-\{\hat{\rho},\hat{\Lambda}_{j,\,\mu}^{+}\hat{\Lambda}_{j,\,\mu}^{-}\}_{+})), \end{eqnarray} with the transition frequencies \begin{eqnarray} \omega_{1}=\xi-\eta=-\omega_{4},\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\ \omega_{2}=\xi+\eta=-\omega_{3}, \end{eqnarray} and the transition operators \begin{eqnarray} \hat{\Lambda}_{j,\,1}^{+} & = & c_{j,\,1}\outprod{\Psi^{+}}{\Sigma^{+}},\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\ \hat{\Lambda}_{j,\,2}^{+} = c_{j,\,2}\outprod{\Psi^{+}}{\Sigma^{-}}, \nonumber \\ \hat{\Lambda}_{j,\,3}^{+} & = & c_{j,\,3}\outprod{\Psi^{-}}{\Sigma^{+}},\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\,\ \hat{\Lambda}_{j,\,4}^{+}=c_{j,\,4}\outprod{\Psi^{-}}{\Sigma^{-}},\label{operators} \end{eqnarray} where \begin{eqnarray} \mid c_{j,\,1}\mid^{2} & = & \mid c_{j,\,4}\mid^{2}=\frac{1}{2\xi\eta}(\xi\eta+J^{2}\chi+(-1)^{j} B b),\nonumber \\ \mid c_{j,\,2}\mid^{2} & = & \mid c_{j,\,3}\mid^{2}=\frac{1}{2\xi\eta}(\xi\eta-J^{2}\chi-(-1)^{j} B b), \end{eqnarray} and $\hat{\Lambda}_{j,\,\mu}^{-}=(\hat{\Lambda}_{j,\,\mu}^{+})^\dagger$. Note that, the transition operators $\hat{\Lambda}_{j,\,\mu}^{\pm}$ defined in Eq. (\ref{operators}) just describe the energy exchange between the system and environment (dissipative coupling), including both excitation and de-excitation of the qubits. The absence of the transitions $\Sigma^{+}\leftrightarrow\Sigma^{-}$ and $\Psi^{+}\leftrightarrow\Psi^{-}$ stems in the omittance of the dephasing processes in the system-bath interaction Hamiltonian. In addition in the rest of the paper, the non-dispersive coupling coefficient is considered i.e. $\gamma_{j}(\omega_{\mu})=\gamma_{j}$. An analytical solution of master equation (\ref{master equation}) can be obtained by solving the eigenvalue equation $\mathcal{L}\rho=\lambda\rho$. In this order, the Lindblad superoperator diagonalized with the aid of its Jordan decomposition form $J$ with $\mathcal{L}=SJS^{-1}$. Fortunately, the master integro-differential equation (\ref{master equation}) has an important property, when the spectrum of $\hat{H}_{s}$ (see eq.(\ref{spectrum})) is non-degenerate, the equations for diagonal elements of density matrix decouple from non-diagonal ones \cite{Breuerbook2002}. Thus for the case $\xi\neq \eta$, where the spectrum (\ref{spectrum}) is not degenerate, we can consider them separately. The Lindbladian for diagonal terms can be written as a time independent $4\times4$ matrix in the energy basis $\{\ket{\varepsilon_{i}}\}_{i=1}^{4}$: \begin{eqnarray} \mathcal{L}^{diag}=\left({\begin{array}{cccc} -(X_{1}^{-}+Y_{2}^{-}) & 0 & X_{1}^{+} & Y_{2}^{+}\\ 0 & -(X_{1}^{+}+Y_{2}^{+}) & Y_{2}^{-} & X_{1}^{-}\\ X_{1}^{-} & Y_{2}^{+} & -(X_{1}^{+}+Y_{2}^{-}) & 0\\ Y_{2}^{-} & X_{1}^{+} & 0 & -(X_{1}^{-}+Y_{2}^{+}) \end{array}}\right),\label{B explicit form} \end{eqnarray} where \begin{eqnarray} X_{\mu}^{\pm}=2\sum_{j=1,2}J^{(j)}(\mp\omega_{\mu})\mid a_{j,\,1}\mid^{2},\nonumber \\ Y_{\mu}^{\pm}=2\sum_{j=1,2}J^{(j)}(\mp\omega_{\mu})\mid a_{j,\,2}\mid^{2}. \end{eqnarray} The Jordan form of this matrix can be obtained easily as: \begin{equation} \mathcal{L}^{diag}=SJ^{(d)}S^{-1},\nonumber \end{equation} with \begin{eqnarray*} S=\left({\begin{array}{cccc} \frac{Y_{2}^{+}}{Y_{2}^{-}} & \frac{Y_{2}^{+}}{Y_{2}^{-}} & -1 & -1\\ \frac{X_{1}^{-}}{X_{1}^{+}} & -1 & \frac{X_{1}^{-}}{X_{1}^{+}} & -1\\ \frac{X_{1}^{-}}{X_{1}^{+}}\frac{Y_{2}^{+}}{Y_{2}^{-}} & -\frac{Y_{2}^{+}}{Y_{2}^{-}} & -\frac{X_{1}^{-}}{X_{1}^{+}} & 1\\ 1 & 1 & 1 & 1 \end{array}}\right), \end{eqnarray*} and \begin{eqnarray*} J^{(d)}=diag[J_{11}^{(d)}=0,J_{22}^{(d)}=-X_{1},J_{33}^{(d)}=-Y_{2},J_{44}^{(d)}=-(X_{1}+Y_{2})]. \end{eqnarray*} Knowing the eigenvalues of Linbladian superoperator, $\lambda_{i}^{(d)}=J_{ii}^{d}$ and the memory kernel $k(t)$, the function $\xi_{i}^{(d)}(t)=\xi(\lambda_{i}^{(d)},t)=Lap^{-1}[\frac{1}{s-\lambda_{i}^{(d)}k(s-\lambda_{i}^{(d)})}]$ can be calculated. Thus the solution of the master equation yields the diagonal elements of the density matrix in the energy basis as: \begin{equation} R(t)=S\, diag(\xi(J_{11}^{(d)},t),\xi(J_{22}^{(d)},t),\xi(J_{33}^{(d)},t),\xi(J_{44}^{(d)},t))\, S^{-1}R(0)=P(t)\, R(0),\nonumber \end{equation} where $R(t)=(\rho_{11}(t),\rho_{22}(t),\rho_{33}(t),\rho_{44}(t))^{T}$. In the energy basis, the Lindbladian corresponding to the non-diagonal elements in master equation (\ref{master equation}) is in Jordan( diagonal) form: \begin{equation} \mathcal{L}^{nondiag}=J^{(n)}=diag(J_{11}^{(n)},J_{22}^{(n)},J_{33}^{(n)},J_{44}^{(n)})=diag(-2i\xi,2i\xi,-2i\eta,2i\eta)-\half(X_{1}+Y_{2})I,\nonumber \end{equation} where $I$ denotes a $4\times4$ identity matrix. Thus the eigenvalues of Lindbladian of non-diagonal elements is determined as $\lambda_{i}^{(n)}=J_{ii}^{(n)}$ and hence the function $\xi_{i}^{(n)}(t)=\xi(J_{ii}^{(n)},t)=Lap^{-1}[\frac{1}{s-J_{ii}^{(n)}k(s-J_{ii}^{(n)})}]$ can be obtained. The non-diagonal elements of density matrix in the later time and in the energy basis can be calculated as: \begin{equation} Q(t)=diag(\xi(J_{11}^{(n)},t),\xi(J_{22}^{(n)},t),\xi(J_{33}^{(n)},t),\xi(J_{44}^{(n)},t))\, Q(0),\nonumber \end{equation} with $Q(t)=(\rho_{12}(t),\rho_{21}(t),\rho_{34}(t),\rho_{43}(t))^{T}$. Now, the dynamics of reduced density operator of system is determined if the memory function (kernel) is determined. In the following we assume an exponentially damping function for the kernel with the form: \begin{equation} k(t)=\gamma_{0}e^{-\gamma_{0}t},\label{kernel} \end{equation} where $\gamma_{0}^{-1}$ denotes the characteristic time of the environment's memory function (also called ``coarse-graining time''). Therefore we have $\xi(\lambda_{i},t)=\frac{\gamma_{0}e^{\lambda_{i}t}+\lambda_{i}e^{-\gamma_{0}t}}{\lambda_{i}+\gamma_{0}}.$ Consequently, the diagonal terms of density matrix in the energy basis can be obtained as: \begin{eqnarray} \rho_{i\, i}(t)=\sum_{j=1}^{4}p_{i\, j}\,\rho_{j\, j}(0),\label{R t} \end{eqnarray} where $p_{ij}$ are elements of matrix $P(t)=[p_{ij}]_{4\times4}$ and are given in the appendix A, explicitly. The non-diagonal element of density matrix in the energy basis can be written as: \begin{eqnarray}\label{nondiagonal} \,\rho_{1\,2}(t)=\frac{e^{-t\gamma_{0}}\left(X_{1}+Y_{2}-2(e^{-\half t(X_{1}+Y_{2}-2(\gamma_{0}-2i\xi))}\gamma_{0}-2i\xi)\right)}{X_{1}+Y_{2}-2(\gamma_{0}-2i\xi)}\rho_{1\,2}(0),\,\,\,\,\,\,\,\,\rho_{2\,1}(t)=\rho_{1\,2}^{*}(t),\nonumber \\ \rho_{3\,4}(t)=\frac{e^{-t\gamma_{0}}\left(X_{1}+Y_{2}-2(e^{-\half t(X_{1}+Y_{2}-2(\gamma_{0}-2i\eta))}\gamma_{0}-2i\eta)\right)}{X_{1}+Y_{2}-2(\gamma_{0}-2i\eta)}\rho_{3\,4}(0),\,\,\,\,\,\,\,\,\rho_{4\,3}(t)=\rho_{3\,4}^{*}(t).\\ \nonumber \end{eqnarray} The spectrum (\ref{spectrum})becomes degenerate at $\xi=\eta$, for which the above solution is not valid. The state of the system is not well defined at this critical point. This critical point assigns a critical value for the parameters of the system such as critical magnetic field ($B_{c}$), critical parameter of inhomogeneity of magnetic field ($b_{c}$), critical spin-orbit interaction parameter ($D_{c}$) and etc. Indeed quantum phase transition may be occurs at this critical point and hence the amounts of quantum correlation of the system changes abruptly when the parameters cross their critical values. The behavior of thermal entanglement at this point is studied in \cite{HamidPRA2008}. For the case of memory-less evolution {\it i.e.} $ k(t)=\delta(t) $ or $ \gamma \rightarrow 0 $ and also for the asymptotic large times {\it i.e.} $t \rightarrow \infty$ the evolution reduce to the Markovian case. \subsection*{Asymptotic case} The decoherence induced by environments does not prevent the creation of a steady state level of quantum correlation, regardless of the initial state of the system. Due to Eq. (\ref{kernel}) the effects of memory decrease by time and hence the evolution becomes Markovian, at the large time limit. At the large time limit, the non-diagonal elements (\ref{nondiagonal}) vanish and $\hat{\rho}(t)$ converges to a diagonal density matrix (in the energy basis): \begin{eqnarray} \hat{\rho}^{\infty}=\hat{\rho}_{asymptotic}=\lim_{t\rightarrow\infty}\hat{\rho}(t)=\frac{1}{X_{1}Y_{2}}\,\,\,\textrm{diagonal}(X_{1}^{+}Y_{2}^{+},X_{1}^{-}Y_{2}^{-},X_{1}^{-}Y_{2}^{+},X_{1}^{+}Y_{2}^{-}),\label{rho asym1 } \end{eqnarray} which is time independent. Therefore, there is a stationary state which the system tends asymptotically. This asymptotic state is independent on the initial conditions due to forgetful treatment of environment in the Markovian regime. There is an interesting limiting case for which the coupled quantum dots are in contact with the reservoirs at identical temperatures ($\beta_{1}=\beta_{2}=\beta$). In this case, it is easy to show that the reduced density matrix $\hat{\rho}^{\infty}$ takes the thermodynamic canonical form for a system described by the Hamiltonian $\hat{H}_{S}$ at temperature $T=\beta^{-1}$ \textit{i.e.} $\hat{\rho}^{\infty}(\Delta T=0)\equiv\hat{\rho}_{T}=\frac{e^{-\beta H_{S}}}{Z}$, where $Z=Tr(e^{-\beta H_{S}})$ is the partition function. Thermal entanglement and thermal discord properties of such systems have been studied substantially in Refs. \cite{HamidPRA2008, WerlangPRA2010}, respectively. \section{Results and Discussion} Knowing the density matrix, one can calculate the concurrence, as a measure of entanglement and the quantum discord, as a measure of quantum correlation. Evidently, the results depend on the parameters involved. This prevents one from writing an analytic expression for the concurrence and/or the discord, but it is possible to calculate them for a given set of the parameters. Influence of a parameter on the dynamical and asymptotical behavior of quantum correlations could be studied by drawing their variation versus the mentioned parameter when the other parameters are fixed. The results are depicted in Figures \ref{figure1}-\ref{figure8}. Figs. \ref{figure1}-\ref{figure6} compare the time evolution of the concurrence and the quantum discord and Figs.\ref{figure7} and \ref{figure8} depict the asymptotic concurrence and quantum discord versus the system and environment parameters. The results of Figs. \ref{figure1}-\ref{figure6} show that all type of considered quantum correlations reach a steady value after some coherent oscillations. These coherent oscillations, are due to competition of the unitary and dissipative terms in master equation (\ref{master equation}). Due to the kernel (\ref{kernel}), the environment losses its memory during the evolution and hence the dynamics tends to the Markovian case at asymptotically large time limit. Figures \ref{figure1}-\ref{figure3} depict influence of spin-orbit parameter, $D$ on the post-Markovian dynamics of the concurrence, left and right discord, respectively for maximally entangled initial state $\ket{\psi^+}=\frac{1}{\sqrt{2}}(\ket{01}+\ket{10})$. The results show that increasing $D$ improves the amount of steady state quantum correlation. Figures \ref{figure4}-\ref{figure6} show the dynamics of concurrence, left and right discord, respectively in different dynamical regimes for maximally entangled and also for non zero-discord separable initial states. The results show that the initial coherent oscillations are indicator of Markovianity of evolution and disappear in non-Markovian regime and also steady state level of quantum correlation achieves at earlier time for Markovian case. Figure \ref{figure7} illustrates the asymptotic quantum correlation vs. temperature when two bathes are held in the same temperature. This figure reveals that the asymptotic entanglement vanishes above a critical temperature (entanglement sudden death). But quantum discord sudden death does not never occurs. This is due to the fact that the set of zero-discord states has no volume in the state space \textit{i.e.} almost all quantum states possess quantum discord \cite{FerraroPRA2010}. Since each qubit experiences different magnetic fields, the symmetry of asymptotic right and left discords breaks for $T\geq 0$. Variation of the asymptotic quantum correlation is depicted in figure \ref{figure8} for differnt ways of connections. Because each qubit is held in its own temperature and experiences different magnetic field, there are two different ways for connecting the quantum dots to their bathes \cite{HamidEPJD2010}: (i) \textquotedbl\textit{direct geometry}{}\textquotedbl{}; where a high temperature bath couples to the quantum dots which is in the stronger magnetic field i.e. $b\Delta T>0$ and (ii) \textquotedbl{}\textit{indirect geometry}\textquotedbl{}; where a high temperature bath couples to the quantum dot which is in the weaker magnetic field i.e. $b\Delta T<0$. The results show that inhomogeneity of magnetic field removes the degeneracy of left and right discord and the amount of this symmetry breaking depends on the temperature difference between bathes. This figure reveals that, if the measurement performed on the qubit which is in the stronger magnetic field, higher amount of asymptotic quantum discord could be achieved. So, the geometry of connection determines the amount of asymptotic quantum correlation an hence is important. \section{Conclusion} The Dynamics of non-equilibrium thermal entanglement and thermal discord of an open two-qubit system is investigated. The inter-qubit interaction is considered as the Heisenberg interaction in the presence of inhomogeneous magnetic field and spin-orbit interaction, raised from the Dzyaloshinski- Moriya (DM) anisotropic anti-symmetric interaction. Each qubit interacts with a separate thermal reservoir which is held in its own temperature. For physical realization of the model we address to the spin states of two electrons which are confined in two coupled quantum dots, respectively. The dots are assumed to biased via different sources and drains and hence experience different environments. The effects of the parameters of the model, including the parameters of the system (especially, the parameter of the spin-orbit interaction, $D$, and magnetic field inhomogeneity, $b$) and environmental parameters (particularly, mean temperature $T_{M}$ and temperature difference $\Delta T$), on the dynamics of the system is investigated, by solving the quantum Markov-Born master equation of the system. Tracing the dynamics of the system allowed us to distinguish between the quantum correlation produced by the inter-qubit interaction and/or by the environment. Decoherence induced by thermal bathes are competing with inter-qubit interaction terms leading to the system evolves to an asymptotic steady state. The size of the entanglement and discord of this steady state and also the dynamical behavior of the entanglement depend on the parameters of the model and also on the geometry of connection. The results reveal that increasing the size of DM interaction, $D$, enhances the amount of all asymptotic quantum correlation measures. Also the results show that the asymptotic entanglement of the system dies above a critical temperature $T_{cr.}$ and entanglement sudden death occurs. The size of $T_{cr.}$ and the amount of asymptotic entanglement can be enhanced by choosing a suitable value of $D$ and the temperature difference $\Delta T$. On the other hand the results show that thermal discord could live in higher temperatures than thermal entanglement and quantum discord sudden death does not occurs. Also, introducing magnetic field inhomogeneity breaks the symmetry between left and right discord. The results show that if the magnetic field applied on right(left) qubit is greater then the size of right(left) discord is higher. Furthermore, we find that choosing proper geometry of connection is important for creating and maintaining the quantum correlation. \begin{acknowledgments} The author wish to thank The Office of Graduate Studies and Research Vice President of The University of Isfahan for their support. \end{acknowledgments} \appendix \section{elements of marix P} The elements of matrix $P(t)=[p_{ij}]_{4\times4}$ in the equation (\ref{R t}) can be written as follow: \begin{eqnarray} p_{1\,1} & = & \frac{1}{X_{1}Y_{2}}[\frac{X_{1}^{-}Y_{2}^{+}(e^{-t\gamma_{0}}X_{1}-e^{-tX_{1}}\gamma_{0})}{X_{1}-\gamma_{0}}+\frac{X_{1}^{+}Y_{2}^{-}\left(e^{-t\gamma_{0}}Y_{2}-e^{-tY_{2}}\gamma_{0}\right)}{Y_{2}-\gamma_{0}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{-}Y_{2}^{-}\left(X_{1}+Y_{2}-e^{-t\left(X_{1}+Y_{2}-\gamma_{0}\right)}\gamma_{0}\right)}{X_{1}+Y_{2}-\gamma_{0}}+X_{1}^{+}Y_{2}^{+}],\nonumber\\ p_{1\,2} & = & \frac{X_{1}^{+}Y_{2}^{+}}{X_{1}Y_{2}}[\frac{e^{-t\gamma_{0}}X_{1}Y_{2}(X_{1}+Y_{2}-2\gamma_{0})}{(X_{1}-\gamma_{0})(X_{1}+Y_{2}-\gamma_{0})(\gamma_{0}-Y_{2})}+\frac{e^{-tX_{1}}\gamma_{0}}{X_{1}-\gamma_{0}}+\frac{e^{-tY_{2}}\gamma_{0}}{Y_{2}-\gamma_{0}}\nonumber\\ & - & \frac{e^{-t(X_{1}+Y_{2})}\gamma_{0}}{X_{1}+Y_{2}-\gamma_{0}}+1]\nonumber\\ p_{1\,3} & = & \frac{X_{1}^{+}}{X_{1}Y_{2}}[-\frac{\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)Y_{2}^{+}}{\gamma_{0}-X_{1}}+\frac{Y_{2}^{-}\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)}{\gamma_{0}-Y_{2}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}Y_{2}^{-}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+Y_{2}^{+}],\nonumber\\ p_{1\,4} & = & \frac{Y_{2}^{+}}{X_{1}Y_{2}}[-\frac{\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)X_{1}^{+}}{\gamma_{0}-Y_{2}}+\frac{X_{1}^{-}\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)}{\gamma_{0}-X_{1}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{-}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+X_{1}^{+}],\nonumber\\ p_{2\,1} & = & \frac{X_{1}^{-}Y_{1}^{-}}{X_{1}Y_{2}}[\frac{e^{-t\gamma_{0}}X_{1}Y_{2}(X_{1}+Y_{2}-2\gamma_{0})}{(X_{1}-\gamma_{0})(X_{1}+Y_{2}-\gamma_{0})(\gamma_{0}-Y_{2})}+\frac{e^{-tX_{1}}\gamma_{0}}{X_{1}-\gamma_{0}}+\frac{e^{-tY_{2}}\gamma_{0}}{Y_{2}-\gamma_{0}}\nonumber\\ & - & \frac{e^{-t(X_{1}+Y_{2})}\gamma_{0}}{X_{1}+Y_{2}-\gamma_{0}}+1],\nonumber\\ p_{2\,2} & = & \frac{1}{X_{1}Y_{2}}[\frac{X_{1}^{+}Y_{2}^{-}(e^{-t\gamma_{0}}X_{1}-e^{-tX_{1}}\gamma_{0})}{X_{1}-\gamma_{0}}+\frac{X_{1}^{-}Y_{2}^{+}\left(e^{-t\gamma_{0}}Y_{2}-e^{-tY_{2}}\gamma_{0}\right)}{Y_{2}-\gamma_{0}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{+}Y_{2}^{+}\left(X_{1}+Y_{2}-e^{-t\left(X_{1}+Y_{2}-\gamma_{0}\right)}\gamma_{0}\right)}{X_{1}+Y_{2}-\gamma_{0}}+X_{1}^{-}Y_{2}^{-}],\nonumber\\ p_{2\,3} & = & \frac{Y_{2}^{-}}{X_{1}Y_{2}}[\frac{X_{1}^{+}\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)}{\gamma_{0}-X_{1}}-\frac{\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)X_{1}^{-}}{\gamma_{0}-Y_{2}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{+}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+X_{1}^{-}],\nonumber\\ p_{2\,4} & = & \frac{X_{1}^{-}}{X_{1}Y_{2}}[-\frac{\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)Y_{2}^{-}}{\gamma_{0}-X_{1}}+\frac{Y_{2}^{+}\left(e^{-tY_{2}}\gamma_{0}e^{-t\gamma_{0}}Y_{2}\right)}{\gamma_{0}-Y_{2}}\nonumber\\ & + & \frac{e^{-t\gamma_{0}}Y_{2}^{+}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+Y_{2}^{-}],\nonumber \end{eqnarray} \begin{eqnarray} p_{3\,1} & = & \frac{X_{1}^{-}}{X_{1}Y_{2}}[-\frac{\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)Y_{2}^{+}}{\gamma_{0}-X_{1}}+\frac{Y_{2}^{-}\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)}{\gamma_{0}-Y_{2}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}Y_{2}^{-}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+Y_{2}^{+}],\nonumber \\ p_{3\,2} & = & \frac{Y_{2}^{+}}{X_{1}Y_{2}}[\frac{X_{1}^{+}\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)}{\gamma_{0}-X_{1}}-\frac{\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)X_{1}^{-}}{\gamma_{0}-Y_{2}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{+}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+X_{1}^{-}],\nonumber \\ p_{3\,3} & = & \frac{1}{X_{1}Y_{2}}[\frac{X_{1}^{+}Y_{2}^{+}(e^{-t\gamma_{0}}X_{1}-e^{-tX_{1}}\gamma_{0})}{X_{1}-\gamma_{0}}+\frac{X_{1}^{-}Y_{2}^{-}\left(e^{-t\gamma_{0}}Y_{2}-e^{-tY_{2}}\gamma_{0}\right)}{Y_{2}-\gamma_{0}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{+}Y_{2}^{-}\left(X_{1}+Y_{2}-e^{-t\left(X_{1}+Y_{2}-\gamma_{0}\right)}\gamma_{0}\right)}{X_{1}+Y_{2}-\gamma_{0}}+X_{1}^{-}Y_{2}^{+}],\nonumber \\ p_{3\,4} & = & \frac{X_{1}^{-}Y_{2}^{+}}{X_{1}Y_{2}}[\frac{e^{-t\gamma_{0}}X_{1}Y_{2}(X_{1}+Y_{2}-2\gamma_{0})}{(X_{1}-\gamma_{0})(X_{1}+Y_{2}-\gamma_{0})(\gamma_{0}-Y_{2})}+\frac{e^{-tX_{1}}\gamma_{0}}{X_{1}-\gamma_{0}}+\frac{e^{-tY_{2}}\gamma_{0}}{Y_{2}-\gamma_{0}}\nonumber \\ & - & \frac{e^{-t(X_{1}+Y_{2})}\gamma_{0}}{X_{1}+Y_{2}-\gamma_{0}}+1]\nonumber \\ p_{4\,1} & = & \frac{Y_{2}^{-}}{X_{1}Y_{2}}[-\frac{\left(e^{-tY_{2}}\gamma_{0}-e^{-t\gamma_{0}}Y_{2}\right)X_{1}^{+}}{\gamma_{0}-Y_{2}}+\frac{X_{1}^{-}\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)}{\gamma_{0}-X_{1}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{-}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+X_{1}^{+}],\nonumber \\ p_{4\,2} & = & \frac{X_{1}^{+}}{X_{1}Y_{2}}[-\frac{\left(e^{-tX_{1}}\gamma_{0}-e^{-t\gamma_{0}}X_{1}\right)Y_{2}^{-}}{\gamma_{0}-X_{1}}+\frac{Y_{2}^{+}\left(e^{-tY_{2}}\gamma_{0}e^{-t\gamma_{0}}Y_{2}\right)}{\gamma_{0}-Y_{2}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}Y_{2}^{+}\left(X_{1}+Y_{2}-e^{-t(X_{1}+Y_{2}-\gamma_{0})}\gamma_{0}\right)}{-X_{1}-Y_{2}+\gamma_{0}}+Y_{2}^{-}],\nonumber \\ p_{4\,3} & = & \frac{X_{1}^{+}Y_{2}^{-}}{X_{1}Y_{2}}[\frac{e^{-t\gamma_{0}}X_{1}Y_{2}(X_{1}+Y_{2}-2\gamma_{0})}{(X_{1}-\gamma_{0})(X_{1}+Y_{2}-\gamma_{0})(\gamma_{0}-Y_{2})}+\frac{e^{-tX_{1}}\gamma_{0}}{X_{1}-\gamma_{0}}+\frac{e^{-tY_{2}}\gamma_{0}}{Y_{2}-\gamma_{0}}\nonumber \\ & - & \frac{e^{-t(X_{1}+Y_{2})}\gamma_{0}}{X_{1}+Y_{2}-\gamma_{0}}+1]\nonumber \\ p_{4\,4} & = & \frac{1}{X_{1}Y_{2}}[\frac{X_{1}^{-}Y_{2}^{-}(e^{-t\gamma_{0}}X_{1}-e^{-tX_{1}}\gamma_{0})}{X_{1}-\gamma_{0}}+\frac{X_{1}^{+}Y_{2}^{+}\left(e^{-t\gamma_{0}}Y_{2}-e^{-tY_{2}}\gamma_{0}\right)}{Y_{2}-\gamma_{0}}\nonumber \\ & + & \frac{e^{-t\gamma_{0}}X_{1}^{-}Y_{2}^{+}\left(X_{1}+Y_{2}-e^{-t\left(X_{1}+Y_{2}-\gamma_{0}\right)}\gamma_{0}\right)}{X_{1}+Y_{2}-\gamma_{0}}+X_{1}^{+}Y_{2}^{-}].\label{p i j} \end{eqnarray} Here we have defined $X_{\mu}=X_{\mu}^{+}+X_{\mu}^{-}$ and $Y_{\mu}=Y_{\mu}^{+}+Y_{\mu}^{-}$. {} \begin{figure} \caption{(Color online): Dynamical behavior of the concurrence for a maximally entangled initial state for different values of $D$ around $D_c \approx 1.68$. Here $J = 1 $, $\chi = 0.9 $, $B = 2 $, $b = 1 $, $T_ 1 = 1.25 $, $T_ 2 = 0.75 $ and $\frac{\gamma_0} \label{figure1} \end{figure} \begin{figure} \caption{(Color online): Dynamical behavior of the left discord for a maximally entangled initial state for different values of $D$ around $D_c \approx 1.68 $. Here $J = 1 $, $\chi = 0.9 $, $B = 2 $, $b = 1 $, $T_ 1 = 1.25 $, $T_ 2 = 0.75 $ and $\frac{\gamma_0} \label{figure2} \end{figure} \begin{figure} \caption{(Color online): Dynamical behavior of the right discord for a maximally entangled initial state for different values of $D$ around $D_c \approx 1.68 $. Here $J = 1 $, $\chi = 0.9 $, $B = 2 $, $b = 1 $, $T_ 1 = 1.25 $, $T_ 2 = 0.75 $ and $\frac{\gamma_0} \label{figure3} \end{figure} \begin{figure} \caption{(Color online): Dynamics of the concurrence for different values of $\frac{\gamma_0} \label{figure4} \end{figure} \begin{figure} \caption{(Color online): Dynamics the left discord for different values of $\frac{\gamma_0} \label{figure5} \end{figure} \begin{figure} \caption{(Color online): Dynamics of the right discord for different values of $\frac{\gamma_0} \label{figure6} \end{figure} \begin{figure} \caption{(Color online): (Color online) The asymptotic concurrence (black solid line), the asymptotic left discord (red dotted line) and the asymptotic \ right discord (blue dashed line) vs. temperature, $T=T_1=T_2$. Inset : $D_B^\infty - D_A^\infty$ vs. $T$. Here $J = 1 $, $\chi = 0.9 $, $B = b = 2 $ and $D=2$. All parameters are dimensionless. } \label{figure7} \end{figure} \begin{figure} \caption{(Color online): Influence of geometry of connection on the asymptotic quantum correlations : the asymptotic concurrence (black solid line), the asymptotic left discord (red dotted line) and the asymptotic right discord (blue dashed line). Insets : $D_B^\infty-D_A^\infty$ vs. b for different values of $\Delta T$. The last graph depicts the behavior of the asymptotic concurrence (black solid line) and the averaged asymptotic discord ($\frac {D_B^\infty + D_A^\infty} \label{figure8} \end{figure} \end{document}
\begin{document} \title{Finite 2-distance transitive graphs} \begin{abstract} A non-complete graph $\Gamma$ is said to be $(G,2)$-distance transitive if $G$ is a subgroup of the automorphism group of $\Gamma$ that is transitive on the vertex set of $\Gamma$, and for any vertex $u$ of $\Gamma$, the stabilizer $G_u$ is transitive on the sets of vertices at distance~1 and~2 from $u$. This paper investigates the family of $(G,2)$-distance transitive graphs that are not $(G,2)$-arc transitive. Our main result is the classification of such graphs of valency not greater than~5. \end{abstract} \section{Introduction} Graphs that satisfy certain symmetry conditions have been a focus of research in algebraic graph theory. We usually measure the degree of symmetry of a graph by studying if the automorphism group is transitive on certain natural sets formed by combining vertices and edges. For instance, $s$-arc transitivity requires that the automorphism group should be transitive on the set of $s$-arcs (see Section~\ref{sect:def} for precise definitions). The class of $s$-arc transitive graphs have been studied intensively, beginning with the seminal result of Tutte \cite{Tutte-1} that cubic $s$-arc transitive graphs must have $s\leqslant 5$. Later, in 1981, Weiss \cite{weiss}, using the finite simple group classification, showed that there are no $8$-arc transitive graphs of valency at least 3. For a survey on $s$-arc transitive graphs, see~\cite{seress}. Recently, several papers have considered conditions on undirected graphs that are similar to, but weaker than, $s$-arc transitivity. For examples of such conditions, we mention local $s$-arc transitivity, local $s$-distance transitivity, $s$-geodesic transitivity, and $2$-path transitivity. Devillers et al.~\cite{locallysdist} studied the class of locally $s$-distance transitive graphs, using the normal quotient strategy developed for $s$-arc transitive graphs in~\cite{Praeger-4}. The condition of $s$-geodesic transitivity was investigated in several papers~\cite{DJLP-2,DJLP-prime,DJLP-compare}. A characterization of $2$-path transitive, but not $2$-arc transitive graphs was given by Li and Zhang~\cite{LZ-2path-2013}. In this paper we study the class of $2$-distance transitive graphs. If $G$ is a subgroup of the automorphism group of a graph $\Gamma$, then $\Gamma$ is said to be $(G,2)$-distance transitive if $G$ acts transitively on the vertex set of $\Gamma$, and a vertex stabilizer $G_u$ is transitive on the neighborhood $\Gamma(u)$ of $u$ and on the second neighborhood $\Gamma_2(u)$ (see Section~\ref{sect:def}). The class of $(G,2)$-distance transitive graphs is larger than the class of $(G,2)$-arc transitive graphs, and in this paper we study the $(G,2)$-distance transitive graphs that are not $(G,2)$-arc transitive. Our first theorem links the structure of $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs to their valency and the value of the constant $c_2$ in the intersection array (see Definition \ref{definition: intersectionarray}). \begin{theorem}\label{thm:valency stuff} Let $\Gamma$ be a connected $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graph of girth $4$ and valency $k\geqslant 3$. Then $2\leqslant c_2\leqslant k-1$ and the following are valid. \begin{enumerate} \item If $c_2=k-1$, then $\Gamma \cong \gridcomp{(k+1)}$ and $G$ satisfies Condition~\ref{gridcondition}. \item If $c_2=2$, then $k$ is a prime-power such that $k\equiv 3 \pmod 4$ and $G_u$ acts $2$-homogeneously, but not $2$-transitively on $\Gamma(u)$ for each $u\in V\Gamma$. \end{enumerate} \end{theorem} The following corollary is a characterization of the family of connected $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs of girth $4$ and prime valency. \begin{corollary}\label{thm:primeval} Let $\Gamma$ be a connected $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graph of girth $4$ and prime valency $p$, and let $u\in V \Gamma$. Then the following are valid. \begin{enumerate} \item Either $\Gamma\cong \overline{\grid 2{(p+1)}}$, or $c_2 |p-1$ and $2\leqslant c_2\leqslant (p-1)/2$. \item If $ c_2=2$, then $p\equiv 3 \pmod 4$ and $G_u$ is $2$-homogeneous, but not $2$-transitive on $\Gamma(u)$. \item If $ c_2=(p-1)/2$, then $|\Gamma_2(u)|=2p$, and $G_u$ is imprimitive on $\Gamma_2(u)$. \end{enumerate} \end{corollary} Finally, our third main result determines all the possible $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs of valency at most 5. \begin{theorem}\label{thm:small val} Let $\Gamma$ be a connected $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graph of valency $k\leqslant 5$. Then $\Gamma$ and $G$ must be as in one of the rows of Table~\ref{maintable}. \end{theorem} { \begin{table} \begin{center} \begin{tabular}{|l|c|c|l|l|} \hline $\Gamma$ & valency & girth & $G$ & Reference \\ \hline $\gridcomp{4}$ & $3$ & $4$ & satisfies Condition~\ref{gridcondition} & Section \ref{sec:gridcomp} \\ \hline Octahedron & $4$ & $3$ & {\setlength\extrarowheight{0pt}\begin{tabular}{l} $G\leqslant S_2\wr S_3$,\\ $|S_2\wr S_3:G|\in\{1,2\}$,\\ $G$ projects onto $S_3$ \end{tabular}} & Lemma \ref{lem:Octahedron} \\ \hline $\Hamming(2,3)$ & $4$ & $3$ & {\setlength\extrarowheight{0pt}\begin{tabular}{l} $G\leqslant S_3\wr S_2$,\\ $|S_3\wr S_2:G|\in\{1,2\}$\\ $G$ projects onto $S_2$ \end{tabular}} & Proposition \ref{2dtval4-girth3}\\ \hline {\setlength\extrarowheight{0pt}\begin{tabular}{l} the line graph of a connected\\ $(G,3)$-arc transitive graph \end{tabular}} & 4 & 3 & & Proposition \ref{2dtval4-girth3} \\ \hline $\gridcomp 5$ & $4$ & $4$ & satisfies Condition~\ref{gridcondition} & Section \ref{sec:gridcomp}\\ \hline Icosahedron & $5$ & $3$ & $G\in\{A_5,A_5\times C_2\}$ & Lemma \ref{lem:Icosahedron}\\ \hline $\gridcomp 6$ & $5$ & $4$ & satisfies Condition~\ref{gridcondition} & Section \ref{sec:gridcomp}\\ \hline \end{tabular} \end{center} \caption{$(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs of valency at most 5} \label{maintable} \end{table}} In Section~\ref{sect:def} we state the most important definitions and some basic results related to $2$-distance transitivity. In Section~\ref{sect:exam}, we study some examples, such as grids, their complements, Hamming graphs, complete bipartite graphs, and platonic solids from the point of view of $2$-distance transitivity. In Section~\ref{sect:girth4}, we consider $2$-distance transitive graphs of girth 4. Finally the proofs of our main results are given in Section~\ref{sect:proofs}. \subsection*{Acknowledgment} During the preparation of this article, the first and second authors held {\em Ci\^encia sem Fronteiras/Jovem Talento} and {\em Programa Nacional de P\'os-doutorado} fellowships, respectively, awarded by the Brazilian Government. In addition, the second author was also awarded the NNSF grant 11301230 (China). The third author was supported by the research projects 302660/2013-5 {\em (CNPq, Produtividade em Pesquisa)}, 475399/2013-7 {\em (CNPq, Universal)}, and the APQ-00452-13 {\em (Fapemig, Universal)}. \section{Basic Definitions and useful facts}\label{sect:def} In this paper, graphs are finite, simple, and undirected. For a graph $\Gamma$, let $\VGamma$ and $\Aut\Gamma$ denote its vertex set and automorphism group, respectively. Let $\Gamma$ be a graph and let $u$ and $v$ be vertices in $\Gamma$ that belong to the same connected component. Then the {\em distance} between $u$ and $v$ is the length of a shortest path between $u$ and $v$ and is denoted by $d_{\Gamma}(u,v)$. We denote by $\Gamma_s(u)$ the set of vertices at distance $s$ from $u$ in $\Gamma$ and we set $\Gamma(u)=\Gamma_1(u)$. The \emph{diameter} $\diam\Gamma$ of $\Gamma$ is the greatest distance between vertices in $\Gamma$. Let $G\leqslant\Aut\Gamma$ and let $s\leqslant\diam\Gamma$. We say that $\Gamma$ is \emph{$(G,s)$-distance transitive} if $G$ is transitive on $\VGamma$ and $G_u$ is transitive on $\Gamma_i(u)$ for all $i\leqslant s$. If $\Gamma$ is $(G,s)$-distance transitive for $s= \diam \Gamma$, then we simply say that it is \emph{$G$-distance transitive}. By our definition, if $s>\diam\Gamma$, then $\Gamma$ is not $(G,s)$-distance transitive. For instance, the complete graph is not $(G,2)$-distance transitive for any group $G$. In the characterization of $(G,s)$-distance transitive graphs, the following constants are useful. Our definition is inspired by the concept of intersection arrays defined for the distance regular graphs (see \cite{BCN}). \begin{definition}\label{definition: intersectionarray} Let $\Gamma$ be a $(G,s)$-distance transitive graph, $u\in\VGamma$, and let $v\in\Gamma_i(u)$, $i\leqslant s$. Then the number of edges from $v$ to $\Gamma_{i-1}(u)$, $\Gamma_i(u)$, and $\Gamma_{i+1}(u)$ does not depend on the choice of $v$ and these numbers are denoted, respectively, by $c_i$, $a_i$, $b_i$. \end{definition} Clearly we have that $a_i+b_i+c_i$ is equal to the valency of $\Gamma$ whenever the constants are well-defined. Note that for $(G,2)$-distance transitive graphs, the constants are always well-defined for $i=1,\ 2$. A sequence $(v_0,\ldots,v_{s})$ of vertices of a graph is said to be an {\em $s$-arc} if $v_i$ is connected to $v_{i+1}$ for all $i\in\{0,\ldots,s-1\}$ and $v_i\neq v_{i+2}$ for all $i\in\{0,\ldots,s-2\}$. A graph $\Gamma$ is called \emph{$(G,s)$-arc transitive} if $G$ acts transitively on the set of vertices and on the set of $s$-arcs of $\Gamma$. (We note that some authors define $(G,s)$-arc transitivity only requiring that $G$ should be transitive on the set of $s$-arcs.) It is well-known, that $\Gamma$ is $(G,2)$-arc transitive if and only if $G$ is transitive on $\VGamma$, and the stabilizer $G_u$ is 2-transitive on $\Gamma(u)$ for some, and hence for all, $u\in\VGamma$. We will use this fact without further reference in the rest of the paper. The {\em girth} of a graph $\Gamma$ is the length of a shortest cycle in $\Gamma$. Let $\Gamma$ be a connected $(G,2)$-distance transitive graph. If $\Gamma$ has girth at least 5, then for any two vertices $u$ and $v$ with $d_{\Gamma}(u,v)=2$, there exists a unique 2-arc between $u$ and $v$. Hence if $\Gamma$ is $(G,2)$-distance transitive, then it is $(G,2)$-arc transitive. On the other hand, if the girth of $\Gamma$ is 3, and $\Gamma$ is not a complete graph, then some $2$-arcs are contained in a triangle, while some are not. Hence $\Gamma$ is not $(G,2)$-arc transitive. We record the conclusion of this argument in the following lemma. \begin{lemma}\label{lem:girth5} Suppose that $\Gamma$ is a $(G,2)$-distance transitive graph. If $\Gamma$ has girth at least $5$, then $\Gamma$ is $(G,2)$-arc transitive. If $\Gamma$ has girth~$3$, then $\Gamma$ is not $(G,2)$-arc transitive. \end{lemma} If $\Gamma$ has girth 4, then $\Gamma$ can be $(G,2)$-distance transitive, but not $(G,2)$-arc transitive. An infinite family of examples can be constructed using Lemma~\ref{lem:gridcomp}. We close this section with two results on permutation group theory and another one on $2$-geodesic transitive graphs. They will be needed in our analysis in Sections~\ref{sect:girth4}--\ref{sect:proofs}. Recall that a permutation group $G$ acting on $\Omega$ is said to be {\em $2$-homogeneous} if $G$ is transitive on the set of $2$-subsets of $\Omega$. \begin{lemma}[\cite{kantor}] \label{2dt-2homonot2t} Let $G$ be a $2$-homogeneous permutation group of degree $n$ which is not $2$-transitive. Then the following statements are valid: \begin{enumerate} \item $n=p^e\equiv 3 \pmod 4$ where $p$ is a prime; \item $|G|$ is odd and is divisible by $p^e(p^e-1)/2$; \end{enumerate} \end{lemma} \begin{lemma}{\rm(\cite[Theorem 1.51]{Gorenstein-1})}\label{val-2p-1} If $G$ is a primitive, but not $2$-transitive permutation group on $2p$ letters where $p$ is a prime, then $p=5$ and $G\cong A_5$ or $S_5$. \end{lemma} An \emph{$s$-geodesic} in a graph $\Gamma$ is a shortest path of length $s$ between vertices in $\Gamma$. In particular, a vertex triple $(u,v,w)$ with $v$ adjacent to both $u$ and $w$ is called a \emph{$2$-geodesic} if $u$ and $w$ are not adjacent. A non-complete graph $\Gamma$ is said to be \emph{ $(G,2)$-geodesic transitive} if $G$ is transitive on both the arc set and on the set of 2-geodesics of $\Gamma$. Recall that the {\em line graph} $L(\Gamma)$ of a graph $\Gamma$ is graph whose vertices are the edges of $\Gamma$ and two vertices of $L(\Gamma)$ are adjacent if and only if they are adjacent to a common vertex of $\Gamma$. For a natural number $n$, we denote by $\K_n$ the {\em complete graph} on $n$ vertices. \begin{lemma}{\rm (\cite[Theorem 1.3]{DJLP-2})}\label{2gt-val4} Let $\Gamma$ be a connected, non-complete graph of valency $4$ and girth $3$. Then $\Gamma$ is $(G,2)$-geodesic transitive if and only if, either $\Gamma=L(\K_4)$ or $\Gamma=L(\Sigma)$ where $\Sigma$ is connected cubic $(G,3)$-arc transitive graph. \end{lemma} We observe that the line graph of $\K_4$ is precisely the octahedral graph (see Lemma \ref{lem:Octahedron}). \section{Constructions, Examples \& non-Examples}\label{sect:exam} \subsection{Complements of grids and complete bipartite graphs}\label{sec:gridcomp} For $n,\ m\geqslant 2$, we define the $\grid{n}{m}$ as the graph having vertex set $\{(i,j) \mid 1 \leqslant i\leqslant n,\ 1\leqslant j \leqslant m\}$, and two distinct vertices $(i,j)$ and $(r,s)$ are adjacent if and only if $i=r$ or $j=s$. The automorphism group of the $\grid{n}{m}$, when $n \neq m$, is the direct product $S_n \times S_m$; when $n=m$, it is $S_n \wr S_2$. The complement $\overline{\Gamma}$ of a graph $\Gamma$, is the graph with vertex set $V\Gamma$, and two vertices are adjacent in $\overline{\Gamma}$ if and only if they are not adjacent in $\Gamma$. Clearly, $\Aut\Gamma=\Aut\overline\Gamma$. Of particular interest to us is the complement graph $\gridcomp{m}$. The graph in Figure \ref{fig:gridcomp} is the $\gridcomp{4}$. Observe that for $\Gamma=\gridcomp{m}$, we have $\diam\Gamma = 3$, and \[ c_1 = 1,\ a_1 = 0,\ b_1 = m-2,\ c_2 = m-2,\ a_2 = 0,\ b_2 = 1. \] \begin{figure} \caption{The grid complement $\gridcomp{4} \label{fig:gridcomp} \end{figure} \begin{condition}\label{gridcondition} Let $m\geqslant 3$ and let $\pi: S_2\times S_m\rightarrow S_2$ be the natural projection. We say that a subgroup $G$ of $S_2\times S_m$ satisfies Condition~\ref{gridcondition} if $G\pi=S_2$ and $G\cap S_m$ is a $2$-transitive, but not $3$-transitive subgroup of $S_m$. \end{condition} \begin{lemma}\label{lem:gridcomp} Let $\Gamma = \gridcomp{m}$ with $m\geqslant 4$, and let $G\leqslantslant \Aut\Gamma =S_2\times S_m$. Then $\Gamma$ is $(G,2)$-distance transitive, but not $(G,2)$-arc transitive if and only if $G$ satisfies Condition \ref{gridcondition}. \end{lemma} \begin{proof} Let $\Delta_1=\{(1,i)\mid i=1,2,\ldots,m\}$ and $\Delta_2=\{(2,i)\mid i=1,2,\ldots,m\}$ be the two biparts of $V\Gamma$. Let $u=(1,1)\in \Delta_1$. Suppose first that $\Gamma$ is $(G,2)$-distance transitive, but not $(G,2)$-arc transitive. Since $G$ is transitive on $V\Gamma$, $G$ projects onto $S_2$, that is, $G\pi=S_2$. Let $H=G\cap S_m$. Then $G_u=H_1$, $\Delta_2=\Gamma(u)\cup \{(2,1)\}$ and $\Gamma_2(u)=\Delta_1\setminus \{u\}$. Since $\Gamma$ is $(G,2)$-distance transitive, $G_u=H_1$ is transitive on both $\Gamma(u)$ and $\Gamma_2(u)$. Hence $H_1$ is transitive on $\{2,\ldots,m\}$, and so $H$ is a $2$-transitive subgroup of $S_m$. Since $\Gamma$ is not $(G,2)$-arc transitive, $G_u=H_1$ is not 2-transitive on $\{2,3,\ldots,m\}$, so $H$ is not $3$-transitive. Thus $G$ satisfies Condition \ref{gridcondition}. Conversely, suppose that $G$ satisfies Condition \ref{gridcondition}. Then $H=G\cap S_m$ is transitive on $\Delta_1$ and $\Delta_2$, and $G$ swaps these two sets. Thus $G$ is transitive on $\VGamma$. As $H$ is a 2-transitive, but not 3-transitive subgroup of $S_m$, $H_1$ is transitive, but not 2-transitive on $\Gamma(u)=\{(2,i)\mid i=2,\ldots,m\}$ and on $\Gamma_2(u)=\{(1,i)\mid i=2,\ldots,m\}$. Hence $\Gamma$ is $(G,2)$-distance transitive, but not $(G,2)$-arc transitive. \end{proof} A list of $2$-transitive, but not $3$-transitive permutation groups can be found in~\cite[pp.~194-197]{cameron}. Complete bipartite graphs appear frequently in this paper. Since $\K_{m,n}$ with $m\neq n$ is not regular, we study $\K_{m,m}$. The full automorphism group of $\K_{m,m}$ is $S_m \wr S_2$, and this automorphism group acts $2$-arc transitively on $\K_{m,m}$. In the lemma below, we show that there is no $2$-distance transitive action on $\K_{m,m}$ which is not $2$-arc transitive. \begin{lemma}\label{lem:complete bipartite} Let $\Gamma\cong \K_{m,m}$ with $m\geqslant 2$ and let $G\leqslant\Aut \Gamma$. Then $\Gamma$ is $(G,2)$-distance transitive if and only if it is $(G,2)$-arc transitive. \end{lemma} \begin{proof}[Proof] If $\Gamma$ is $(G,2)$-arc transitive, then, by definition, it is $(G,2)$-distance transitive. Conversely, suppose that $\Gamma$ is $(G,2)$-distance transitive with some $G\leqslant\Aut \Gamma$. Let $\VGamma=\Delta_1\cup\Delta_2$ be the bipartition of $V \Gamma$ where $\Delta_1=\{(1,i)\mid i=1,\ldots,m\}$ and $\Delta_2=\{(2,i)\mid i=1,\ldots,m\}$. The full automorphism group of $\Gamma$ is $S_m\wr S_2$. Since $G\leqslant\Aut \Gamma$ is assumed to be vertex transitive, $G_{\Delta_1}=G_{\Delta_2}$ is transitive on both $\Delta_1$ and $\Delta_2$. Set $G_0=G_{\Delta_1}$. Thus $G_0$ is a subdirect subgroup in $M^{(1)}\times M^{(2)}$ where $M^{(i)}\leqslant S_m$ and $M^{(i)}$ is the image of $G_0$ under the $i$-th coordinate projection $S_m\times S_m\rightarrow S_m$. Further, $G$ projects onto $S_2$ under the natural projection $\Aut \Gamma\rightarrow S_2$. If $x=(x_1,x_2)\sigma\in G$ with $x_i\in S_m$ and $\sigma=(1,2)\in S_2$, then $(M^{(1)})^{x_1}=M^{(2)}$, and so $M^{(1)}$ and $M^{(2)}$ are conjugate subgroups of $S_m$. Hence possibly replacing $G$ with its conjugate $G^{(x_1,1)}$, we may assume without loss of generality that $M^{(1)}=M^{(2)}=M$. Let $u=(1,1)\in\VGamma$. Then $\Gamma(u)=\Delta_2$ and $\Gamma_2(u)=\Delta_1\setminus \{u\}$. Further, $G_u$ stabilizes $\Delta_1$, and hence $G_u\leqslant G_0$. Since $\Gamma$ is $(G,2)$-distance transitive, it follows that $G_u$ is transitive on both $\Delta_2$ and $\Delta_1\setminus \{u\}$. Set $H=M_1$. Since $G_u\leqslant H\times M$, the stabilizer $H$ must be transitive on $\{2,\ldots,m\}$, and hence $M$ is a 2-transitive subgroup of $S_m$. In particular $M$ contains a unique minimal normal subgroups $N$ and this minimal normal subgroup is either elementary abelian or simple. Since $N$ is transitive, we can write $M=NH$. We have that $G_0$ contains $1\times N$ if and only if it contains $N\times 1$. Hence we need to consider two cases: the first is when $G_0$ contains $N\times N$ and the second is when it does not. Suppose first that $G_0$ contains $N\times N$. In particular, $1\times N\leqslant G_u$. For all $h_2\in H$, there is some $n_1h_1\in M$ with $n_1\in N$ and $h_1\in H$ such that $(n_1h_1,h_2)\in G_0$. Since $N\times 1\leqslant G_0$, this implies that $(h_1,h_2)\in G_0$ and also $(h_1,h_2)\in G_u$. Thus $G_u$ projects onto $NH=M$ by the second projection. Hence $G_u$ is 2-transitive on $\Delta_2=\Gamma(u)$, which shows that $\Gamma$ is $(G,2)$-arc transitive. Suppose now that $N\times N$ is not contained in $G_0$. Since $G_0\cap (1\times M)$ is a normal subgroup of $M$ and $N$ is the unique minimal normal subgroup of $M$, we find that $G_0\cap (1\times M)=1$ and, similarly, that $G_0\cap (M\times 1)=1$. Therefore $G_0$ is a diagonal subgroup; that is, $$ G_0=\{(t,\alpha(t))\mid t\in M\} $$ with some $\alpha\in\Aut M$. As $H$ is the stabilizer of $1$ in $M$, we have that $G_u=\{(t,\alpha(t))\mid t\in H\}$. On the other hand, $G_u$ is transitive on $\Delta_2$, and hence $\alpha(H)$ is a transitive subgroup of $M$. Thus we obtain the factorization $M=H\alpha(H)$. The following possibilities are listed in~\cite[Theorem~1.1]{baum}. \begin{enumerate} \item[(a)] Either $M$ is affine and is isomorphic to $[(\F{2})^3\rtimes \PSL(3,2)]\wr X$ where $X$ is a transitive permutation group; \item[(b)] or $\Soc M\cong \pomegap 8q$, $\Sp(4,q)$ ($q$ even with $q\geqslant 4$), $A_6$, $M_{12}$. \end{enumerate} In case~(a), if $X\neq 1$, then $M$ is contained in a wreath product in product action, and such a wreath product is never 2-transitive. Thus $X=1$, $m=8$, $M=(\F{2})^3\rtimes \PSL(3,2)$, and $G_u\cong\PSL(3,2)$ acting transitively on $\Delta_2$. However, this transitive action of $\PSL(3,2)$ is $2$-transitive, which gives that $\Gamma$ is $(G,2)$-arc transitive. In case~(b), inspecting the list of almost simple 2-transitive groups in~\cite{cameron}, we find that there are no 2-transitive groups with socle $\pomegap 8q$ or $\Sp(4,q)$ with $q$ even and $q\geqslant 4$. Hence $\Soc M= A_6$ or $M_{12}$. Then $G_u$ is either $A_5$, $S_5$ or $M_{11}$ acting transitively on $\Delta_2$. These actions are all 2-transitive, which implies that $\Gamma$ is $(G,2)$-arc transitive. \end{proof} \subsection{Hamming graphs and platonic solids} For $d,\ q\geqslant 2$, the vertex set of the {\em Hamming graph} $\Hamming(d,q)$ is the set $\{1,\ldots,q\}^d$ and two vertices $u=(\alpha_1,\ldots,\alpha_d)$ and $v=(\beta_1,\ldots,\beta_d)$ are adjacent if and only if their Hamming distance is one; that is, they differ in precisely one coordinate. The Hamming graph has diameter $d$ and has girth $4$ when $q=2$ and girth $3$ when $q > 2$. The wreath product $W = S_q\wr S_d$ is the full automorphism group of $\Gamma$, acting distance transitively, see \cite[Section 9.2]{BCN}. The Hamming graphs are well studied, due in part to their applications to coding theory. Hamming graphs arise in two cases of our research. The first case is the cube $\Gamma=\Hamming(3,2)$. The standard construction of the cube graph is precisely the same as for the Hamming graphs with $d=3$ and $q=2$, and so this graph is the `standard' cube with $8$ vertices (the cube $\Hamming(3,2)$ is also isomorphic to the grid complement $\gridcomp{4}$). The second case is $\Gamma=\Hamming(d,2)$ when $d>2$; see Lemma~\ref{lem:cube1}. Some platonic solids (cube, octahedron and icosahedron) appear in some form in our investigation. The cube appears as the $\gridcomp{4}$. We discuss in more detail the octahedron and the icosahedron. The octahedron (see Figure \ref{fig:Octahedron}) has 6 vertices and diameter 2. Its automorphism group $S_2 \wr S_3$ acts imprimitively preserving the partition of vertices into antipodal pairs. We denote by $\pi$ the natural projection $S_2 \wr S_3\rightarrow S_3$. \begin{figure} \caption{The octahedron, displayed according to its distance-partition.} \label{fig:Octahedron} \end{figure} \begin{lemma}\label{lem:Octahedron} Let $\Gamma$ be the octahedron, and let $G \leqslantslant \Aut\Gamma $. Then $\Gamma$ is not $(G,2)$-arc transitive. Further, $\Gamma$ is $(G,2)$-distance transitive, if and only if either $G= S_2\wr S_3$, or $G$ is an index $2$ subgroup of $S_2\wr S_3$ and $G\pi =S_3$. \end{lemma} \begin{proof} Since $\Gamma$ is non-complete of girth 3, $\Gamma$ is not $(G,2)$-arc transitive. Now assume that $\Gamma$ is $(G,2)$-distance transitive. Let $u=a$ be the vertex in the graph of Figure \ref{fig:Octahedron}. Since $G_u$ is transitive on $\Gamma(u)$ and $|\Gamma(u)|=4$, $|G_u|$ is divisible by 4. Further, $|G:G_u|=6$, and so $|G|$ is divisible by 24. Suppose that $G$ is a proper subgroup of $\Aut \Gamma=S_2\wr S_3$. Then $|G|=24$. As $|\Aut \Gamma|=48$, $G$ is an index $2$ subgroup of $S_2\wr S_3$. The three antipodal blocks of $V\Gamma$ in the graph of Figure \ref{fig:Octahedron} are $\Delta_1=\{a,a'\}$, $\Delta_2=\{b,b'\}$ and $\Delta_3=\{c,c'\}$. Since $G$ is transitive on $V\Gamma$, $G$ is transitive on the three antipodal blocks. Thus the image $G\pi$ of $G$ in $S_3$ is $\mathbb{Z}_3$ or $S_3$. Assume $G\pi=\mathbb{Z}_3$. Then $G_u$ acts on the three antipodal blocks trivially. Hence $G_u$ does not map $\Delta_2$ to $\Delta_3$, contradicting that $G_u$ is transitive on $\Gamma(u)$. Therefore $G\pi=S_3$. Simple calculation shows that the conditions stated in the lemma are sufficient for $2$-distance transitivity. \end{proof} The icosahedron has automorphism group $S_2 \times A_5$ acting arc transitively. \begin{lemma}\label{lem:Icosahedron} Let $\Gamma$ be the icosahedron, and let $G \leqslantslant \Aut\Gamma $. The graph $\Gamma$ is $(G,2)$-distance transitive if and only if $G = S_2 \times A_5$ or $G=A_5$. In particular, $\Gamma$ is not $(G,2)$-arc transitive. \end{lemma} \begin{proof} By \cite[Theorem 1.5]{DJLP-prime}, $\Aut\Gamma\cong S_2 \times A_5$. It is easy to see that for $G\in\{S_2\times A_5,A_5\}$, $\Gamma$ is $(G,2)$-distance transitive. Suppose that $\Gamma$ is $(G,2)$-distance transitive. Then $G$ is transitive on $V\Gamma$ and $G_u$ is transitive on $\Gamma(u)$, and so $12=|V\Gamma|$ divides $|G|$ and $|\Gamma(u)|=5$ divides $|G_u|$. Thus 60 divides $|G|$. Since $G\leqslant \Aut\Gamma\cong S_2 \times A_5$, it follows that $G = S_2 \times A_5$ or $G=A_5$. Finally, as $\Gamma$ is a non-complete graph of girth 3, $\Gamma$ is not $(G,2)$-arc transitive. \end{proof} \begin{figure} \caption{The icosahedron, displayed according to its distance-partition.} \label{fig:Icosahedron} \end{figure} \section{Graphs of girth $4$}\label{sect:girth4} By the assertion of Lemma \ref{lem:girth5}, to study the family of $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs, we only need to consider the graphs with girth $3$ or $4$. This section is devoted to the girth $4$ case, and the structure of such graphs depends strongly upon the value of the constant $c_2$ as in Definition~\ref{definition: intersectionarray}. We begin with a simple combinatorial result: \begin{lemma}\label{lem:sizeofgamma2} Let $\Gamma$ be a $(G,2)$-distance transitive graph with valency $k$ and girth at least $4$. Let $u\in V\Gamma$. Then there are $k(k-1)$ edges between $\Gamma(u)$ and $\Gamma_2(u)$, and $k(k-1) = c_2 |\Gamma_2(u)|$. \end{lemma} \begin{proof} Consider a vertex $v \in \Gamma(u)$. Since $\Gamma$ has girth more than $3$, all of the neighbors of $v$, except for $u$, lie in $\Gamma_2(u)$. Thus, there are $k-1$ edges from $v$ to $\Gamma_2(u)$. Since there are $k$ such vertices $v$, there are $k(k-1)$ edges between $\Gamma(u)$ and $\Gamma_2(u)$. As $\Gamma$ is $(G,2)$-distance transitive, the equation $k(k-1) = c_2 |\Gamma_2(u)|$ follows by counting the same quantity from the other side: each vertex in $\Gamma_2(u)$ is incident with exactly $c_2$ edges between $\Gamma_2(u)$ and $\Gamma(u)$. \end{proof} For a vertex $u\in\VGamma$, we denote by $G_u^{\Gamma_i(u)}$ the permutation group induced by $G_u$ on $\Gamma_i(u)$. \begin{lemma}\label{lem:c_2=2} Let $\Gamma$ be a $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graph with valency $k$ and girth $4$, and suppose that $c_2=2$. Then $G_u$ acts $2$-homogeneously, but not $2$-transitively on $\Gamma(u)$ for each $u\in \VGamma$. Further, $k=p^e\equiv 3 \pmod 4$ where $p$ is a prime. \end{lemma} \begin{proof} Since $c_2=2$, each vertex $w\in\Gamma_2(u)$ uniquely determines a $2$-subset in $\Gamma(u)$, namely the intersection $\Gamma(w) \cap \Gamma(u)$. We claim that the map $\psi:w\mapsto\Gamma(w)\cap\Gamma(u)$ is a bijection between $\Gamma_2(u)$ and the set of 2-subsets of $\Gamma(u)$. Suppose that $\psi(w_1)=\psi(w_2)=\{v_1,v_2\}$. Then $u,\ w_1,\ w_2\in\Gamma(v_1)$ and $v_2\in\Gamma_2(v_1)$. On the other hand, as $v_2$ is adjacent to $u,\ w_1,\ w_2$, there are three edges from $v_2$ to $\Gamma(v_1)$, which is impossible, as $c_2=2$. Hence $\psi$ is injective. Since $\Gamma$ has girth 4, it follows from Lemma \ref{lem:sizeofgamma2} that $|\Gamma_2(u)| = k(k-1)/2=\binom{k}{2}$, and so the map $\psi$ is a bijection. Hence $G_u$ is transitive on $\Gamma_2(u)$ if and only if it is transitive on the set of $2$-subsets in $\Gamma(u)$, that is, $G_u^{\Gamma(u)}$ acts $2$-homogeneously on $\Gamma(u)$. Since $\Gamma$ is not $(G,2)$-arc transitive, $G_u^{\Gamma(u)}$ is not $2$-transitive on $\Gamma(u)$. Thus by Lemma \ref{2dt-2homonot2t}, $k=p^e\equiv 3 \pmod 4$ where $p$ is a prime. \end{proof} In the following lemma we characterize $(G,2)$-distance transitive, but not $(G,2)$-arc transitive Hamming graphs over an alphabet of size $2$. \begin{lemma}\label{lem:cube1} Let $\Gamma = \Hamming(d,2)$ with $d > 2$, and let $G \leqslantslant\Aut\Gamma \cong S_2\wr S_{d}$. Then $\Gamma$ is $(G,2)$-distance transitive, but not $(G,2)$-arc transitive if and only if $G=S_2\wr H$ where $H$ is a $2$-homogeneous, but not $2$-transitive subgroup of $S_d$. Further, in this case, $d=p^e\equiv 3 \pmod 4$. \end{lemma} \begin{proof} By \cite[p.~222]{BCN}, $\Gamma$ is $\Aut \Gamma$-distance transitive of girth 4, valency $d$, and $c_2=2$. Assume that the action of $G$ on $\Gamma$ is 2-distance transitive, but not $2$-arc transitive. Then by Lemma~\ref{lem:c_2=2}, $G_u$ is 2-homogeneous, but not $2$-transitive on $\Gamma(u)$, for all $u$. Further, $d=p^e\equiv 3 \pmod 4$ where $p$ is a prime. Let $A=\Aut\Gamma=M\rtimes S_d$ where $M=(S_2)^d$. Let $u$ be the vertex $(1,\ldots,1)$ and set $H=G_u$. If $g\in G$, then $g=mh$ where $m\in M$ and $h\in S_d$, and so $h\in H$. Hence $G\leqslant MH$. Then, by Dedekind's Modular Law, $(G\cap M)H=G\cap (MH)=G$. Thus $G\cap M$ is a transitive subgroup of $G$. Since $M$ is regular, $G\cap M=M$, and so $M\leqslant G$. Thus $G=M\rtimes H=S_2\wr H$. As the action of $H$ on $\Gamma(u)$ is faithful, $H=G_u^{\Gamma(u)}$. Conversely, assume that $G=S_2\wr H$ and $H$ is a $2$-homogeneous, but not $2$-transitive subgroup of $S_d$. Then $G$ is transitive on $V\Gamma$. Since $G_u^{\Gamma(u)}=G_u=H$, $G_u^{\Gamma(u)}$ acts $2$-homogeneously, but not $2$-transitively on $\Gamma(u)$ for each $u\in \VGamma$. Hence $\Gamma$ is not $(G,2)$-arc transitive and $G_u^{\Gamma(u)}$ is transitive on the set of 2-subsets of $\Gamma(u)$. Since $\Gamma$ has girth 4 and $c_2=2$, we can construct a one-to-one correspondence between the 2-subsets of $\Gamma(u)$ and vertices of $\Gamma_2(u)$ as in the proof of Lemma~\ref{lem:c_2=2}. Thus $G_u$ is transitive on $\Gamma_2(u)$, so $\Gamma$ is $(G,2)$-distance transitive. \end{proof} We have treated the case where $c_2=2$. When $c_2$ is `large' (that is, close to the valency) we can say a lot about the structure of $\Gamma$. \begin{lemma}\label{lem:c_2=k} If $\Gamma$ is a connected $(G,2)$-distance transitive graph with valency $k$ and girth $4$, then the following are valid. \begin{enumerate} \item If $c_2=k$, then $\Gamma = \K_{k,k}$. \item If $k\geqslant 3$ and $c_2=k-1$, then $\Gamma = \gridcomp{(k+1)}$. \end{enumerate} \end{lemma} \begin{proof} (i) Let $(u,v,w)$ be a $2$-arc. Since $\Gamma$ has girth 4, $u$ and $w$ are nonadjacent, so $w$ has $k$ neighbors in $\Gamma(u)$, as $c_2=k$. Since the valency of $\Gamma$ is $k$, this forces $\Gamma(u) = \Gamma(w)$. By the $(G,2)$-distance transitivity of $\Gamma$, every vertex in $\Gamma_2(u)$ has all its neighbors in $\Gamma(u)$, and this implies that $\Gamma_3(u)$ is empty and there are no edges in $\Gamma_2(u)$. Thus $\Gamma$ is a bipartite graph and the two biparts are $\Gamma(u)$ and $\{ u \} \cup \Gamma_2(u)$. Every edge between the two biparts is present, so $\Gamma$ is a complete bipartite graph. Since $\Gamma$ is regular of valency $k$, we have $\Gamma = \K_{k,k}$. (ii) Let $(u,v,w)$ be a 2-arc. Since $\Gamma$ has girth 4 and $c_2=k-1$, by Lemma~\ref{lem:sizeofgamma2}, we have $|\Gamma_2(u)| = k$. Let $w'$ be the unique vertex in $\Gamma_2(u)$ that is not adjacent to $v$. Assume that the induced subgraph $[\Gamma_2(u)]$ contains an edge. As $G_u$ is transitive on $\Gamma_2(u)$, every vertex of $\Gamma_2(u)$ is adjacent to some vertex of $\Gamma_2(u)$. Since $\Gamma$ has girth 4, the $k-1$ vertices in $\Gamma_2(u)\cap \Gamma(v)$ are pairwise nonadjacent, so every vertex of $\Gamma_2(u)\cap \Gamma(v)$ is adjacent to $w'$, which is impossible, as $|\Gamma(u)\cap \Gamma(w')|=k-1$. Thus there are no edges in $[\Gamma_2(u)]$. Thus each vertex in $\Gamma_2(u)$ is adjacent to a unique vertex in $\Gamma_3(u)$. Let $z\in \Gamma_3(u)\cap \Gamma(w)$. Since $c_2=k-1$, every pair of vertices at distance $2$ have $k-1$ common neighbors, so $|\Gamma(v)\cap \Gamma(z)|=k-1$. Hence $z$ is adjacent to all vertices of $\Gamma_2(u)$ that are adjacent to $v$. If for all $v'\in \Gamma(u)$, $\Gamma_2(u)\cap \Gamma(v)=\Gamma_2(u)\cap \Gamma(v')$, then $|\Gamma_2(u)|=k-1$, which is a contradiction. Thus $\Gamma(u)$ contains a vertex $v'$ such that $\Gamma_2(u)\cap \Gamma(v)\neq \Gamma_2(u)\cap \Gamma(v')$. In particular, $\Gamma_2(u)= \Gamma_2(u)\cap (\Gamma(v)\cup \Gamma(v'))$. Now $v'$ and $z$ must have a common neighbor in $\Gamma_2(u)$, and so $v'$ and $z$ are at distance $2$. Thus, as $c_2=k-1$, $z$ is adjacent to all vertices of $\Gamma_2(u)$ that are adjacent to $v'$. Thus $z$ is adjacent to all vertices of $\Gamma_2(u)$. Since $|\Gamma_2(u)|=k$, we find that there are no more vertices in $\Gamma$. Therefore, we have determined $\Gamma$ completely, and $\Gamma = \gridcomp{(k+1)}$. \end{proof} \section{Proof of Main Results}\label{sect:proofs} We first prove Theorem~\ref{thm:valency stuff}. \begin{proof}[{\bf Proof of Theorem \ref{thm:valency stuff}}] Since $\Gamma$ has girth $4$, it follows that $2\leqslant c_2\leqslant k$. If $c_2=k$, then, by Lemma \ref{lem:c_2=k}, $\Gamma = \K_{k,k}$. However, by Lemma \ref{lem:complete bipartite}, $\Gamma$ is $(G,2)$-arc transitive, whenever it is $(G,2)$-distance transitive, and hence this case cannot arise. Thus $2\leqslant c_2\leqslant k-1$. Statement~(i) now follows from Lemmas~\ref{lem:c_2=k}(ii) and~\ref{lem:gridcomp}, while statement~(ii) follows from Lemma~\ref{lem:c_2=2} \end{proof} Next we prove Corollary~\ref{thm:primeval}. \begin{proof}[{\bf Proof of Corollary \ref{thm:primeval}}] If $p=2$, then $\Gamma$ is a cycle graph, so $\Gamma$ is $(G,2)$-distance transitive if and only if it is $(G,2)$-arc transitive, which is a contradiction. Thus $p\geqslant 3$. Then by Theorem \ref{thm:valency stuff}, either $\Gamma \cong \gridcomp{(p+1)}$, or $2\leqslant c_2 \leqslant p-2$. Assume that $2\leqslant c_2 \leqslant p-2$. It follows from Lemma \ref{lem:sizeofgamma2} that $p(p-1) = c_2 |\Gamma_2(u)|$. Since $2\leqslant c_2 \leqslant p-2$, $p$ and $c_2$ are coprime, so $c_2$ divides $p-1$. As $c_2<p-1$, we get $2\leqslant c_2\leqslant (p-1)/2$ and this proves~(i). Statement~(ii) follows from Theorem \ref{thm:valency stuff}(ii). Assume that $c_2= (p-1)/2$. By Lemma~\ref{lem:sizeofgamma2}, $|\Gamma_2(u)|=2p$. If $G_u$ were primitive on $\Gamma_2(u)$, then by Lemma \ref{val-2p-1}, we would have, $p=5$, and hence $c_2=2$. However, In this case $p\equiv 3 \pmod 4$, which is a contradiction. Thus $G_u$ is imprimitive on $\Gamma_2(u)$ and this shows~(iii). \end{proof} One can form an infinite family of examples that satisfy the conditions of Corollary \ref{thm:primeval} from Hamming graphs $\Hamming(p,2)$ using Lemma~\ref{lem:cube1}. In the following, we prove Theorem \ref{thm:small val}, that is, we determine all $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graphs of valency at most $5$. We split the proof into two parts, as we consider the girth 4 and~3 cases separately in Propositions~\ref{lem:valency 3} and~\ref{2dtval4-girth3}, respectively. \begin{proposition}\label{lem:valency 3} Let $\Gamma$ be a connected $(G,2)$-distance transitive, but not $(G,2)$-arc transitive graph of girth $4$ and valency $k\in\{3,4,5\}$. Then $\Gamma \cong \gridcomp{k+1}$, and $G$ satisfies Condition \ref{gridcondition}. \end{proposition} \begin{proof} We claim that $c_2=k-1$ in all cases. By Theorem \ref{thm:valency stuff}, $c_2\leqslant k-1$. If $k=3$, then $c_2\geqslant 2=k-1$ follows from the girth condition, and so $c_2=k-1$. If $k\in\{4,5\}$ and $c_2\leqslant k-2$, then we must have that $c_2=2$ (use Corollary~\ref{thm:primeval} for $k=5$). Hence, by Lemma \ref{lem:c_2=2}, $k \equiv 3 \pmod{4}$: a contradiction, as $k\in\{4,5\}$. Now the rest follows from Theorem~\ref{thm:valency stuff}(i). \end{proof} \begin{proposition}\label{2dtval4-girth3} Let $\Gamma$ be a connected $(G,2)$-distance transitive graph of girth $3$ and valency $4$ or $5$, and let $u\in V\Gamma$. Then one of the following is valid. \begin{enumerate} \item $\Gamma$ is the octahedron and either $G=S_2\wr S_3$ or $G$ is an index $2$ subgroup of $S_2\wr S_3$ and $G$ projects onto $S_3$; \item $\Gamma\cong \Hamming(2,3)$ and either $G=S_3\wr S_2$ or $G$ is an index $2$ subgroup of $S_3\wr S_2$ and $G$ projects onto $S_2$; \item $|\Gamma_2(u)|=8$ and $\Gamma$ is the line graph of a connected cubic $(G,3)$-arc transitive graph; \item $\Gamma$ is the icosahedron and $G = A_5$ or $A_5 \times S_2$. \end{enumerate} In cases (i)--(iii), the valency of $\Gamma$ is $4$, while in case~(iv), the valency is $5$. \end{proposition} \begin{proof} Suppose first that the valency is~4. Since $\Gamma$ is $(G,2)$-distance transitive of valency $4$ and girth $3$, it follows that the induced graph $[\Gamma(u)]$ is a vertex transitive graph with 4 vertices of valency $k$ where $1\leqslant k\leqslant 3$. If $[\Gamma(u)]$ has valency 3, then $[\Gamma(u)]$ is complete, and so $\Gamma$ is complete, which is a contradiction. If $[\Gamma(u)]$ has valency 2, then $[\Gamma(u)]\cong C_4$. Hence $|\Gamma_2(u)\cap \Gamma(v)|=1$ for any arc $(u,v)$, so $G_{u,v}$ is transitive on $\Gamma_2(u)\cap \Gamma(v)$, that is, $\Gamma$ is $(G,2)$-geodesic transitive. Thus by \cite[Corollary 1.4]{DJLP-2}, $\Gamma$ is the octahedron. It follows from Lemma \ref{lem:Octahedron} that either $G= S_2\wr S_3$, or $G$ is an index 2 subgroup of $S_2\wr S_3$ and $G$ projects onto $S_3$. Hence, case~(i) is valid. Now suppose that $[\Gamma(u)]$ has valency 1. Then $[\Gamma(u)]\cong 2\K_2$ and there are 8 edges between $\Gamma(u)$ and $\Gamma_2(u)$. Further, each arc lies in a unique triangle. Let $\Gamma(u)=\{v_1,v_2,v_3,v_4\}$ be such that $(v_1,v_2)$ and $(v_3,v_4)$ are two arcs. Then $|\Gamma_2(u)\cap \Gamma(v_1)|=2$, say $\Gamma_2(u)\cap \Gamma(v_1)=\{w_1,w_2\}$. Since $[\Gamma(v_1)]\cong 2\K_2$, it follows that $v_2$ is adjacent to neither $w_1$ nor $w_2$. As $|\Gamma_2(u)\cap \Gamma(v_2)|=2$, we have $|\Gamma_2(u)|\geqslant 4$. Since there are 8 edges between $\Gamma(u)$ and $\Gamma_2(u)$ and since $G_u$ is transitive on $\Gamma_2(u)$, we obtain that $8\mid |\Gamma_2(u)|$, and so $|\Gamma_2(u)|\in\{4,8\}$. Suppose first that $|\Gamma_2(u)|=4$. As noted above, $v_2$ is not adjacent to $w_1$ or $w_2$. Set $\Gamma_2(u)\cap\Gamma(v_2)=\{w_3,w_4\}$. Then $\Gamma_2(u)=\{w_1,w_2,w_3,w_4\}$. Since $[\Gamma(v_1)]\cong [\Gamma(v_2)] \cong 2\K_2$, it follows that $w_1,w_2$ are adjacent and, similarly, $w_3,w_4$ are adjacent. Since $|\Gamma_2(u)|=4$ and there are 8 edges between $\Gamma(u)$ and $\Gamma_2(u)$, we must have $|\Gamma(u)\cap \Gamma(w_i)|=2$. Since $v_2,w_1$ are nonadjacent, $w_1$ is adjacent either to $v_3$ or to $v_4$, say $v_3$. Then $\Gamma(u)\cap \Gamma(w_1)=\{v_1,v_3\}$. As each arc lies in a unique triangle and $(v_1,w_1,w_2)$ is a triangle, it follows that $v_3$ is not adjacent to $w_2$. Hence $v_3$ is adjacent to either $w_3$ or $w_4$, say $w_3$. Then $\Gamma(v_3)=\{u,v_4,w_1,w_3\}$. Since $[\Gamma(v_3)]\cong 2\K_2$ and $u,v_4$ are adjacent, it follows that $w_1,w_3$ are adjacent. Thus, $\Gamma(w_1)=\{v_1,w_2,v_3,w_3\}$. Finally, as $|\Gamma_2(u)\cap \Gamma(v_4)|=2$ and $v_4$ is adjacent to neither $w_1$ nor $w_3$, $v_4$ is adjacent to both $w_2$ and $w_4$. Since $[\Gamma(v_4)] \cong 2\K_2$ and $(v_3,u,v_4)$ is a triangle, it follows that $w_2$, $w_4$ are adjacent. Now, the graph $\Gamma$ is completely determined and $\Gamma\cong \Hamming(2,3)$. By \cite[Theorem 9.2.1]{BCN}, $\Gamma$ is $(\Aut\Gamma,2)$-distance transitive where $\Aut\Gamma\cong S_3\wr S_2$. Suppose that $G$ is a proper subgroup of $\Aut\Gamma$. Since $G_u$ is transitive on $\Gamma(u)$ and $|\Gamma(u)|=4$, $|G_u|$ is divisible by 4, so $|G|$ is divisible by $4|\VGamma|=36$. It follows that $|G|=36$, so $G$ is an index $2$ subgroup of $S_3\wr S_2$. Finally, as $G_u$ is transitive on $\Gamma(u)$, $G_u$ projects onto $S_2$. Thus~(ii) is valid. Let us now consider the case when $|\Gamma_2(u)|=8$. Then for each $z\in \Gamma_2(u)$, there is a unique 2-geodesic between $u$ and $z$. Hence there is a one-to-one correspondence between the set of 2-geodesics starting from $u$ and the set of vertices in $\Gamma_2(u)$. Since $G_u$ is transitive on $\Gamma_2(u)$, it follows that $G_u$ is transitive on the set of 2-geodesics starting from $u$, so $\Gamma$ is $(G,2)$-geodesic transitive. Therefore by Lemma \ref{2gt-val4}, $\Gamma$ is the line graph of a connected cubic $(G,3)$-arc transitive graph. Therefore~(iii) is valid. Assume now that the valency is 5. Let $(u,v)$ be an arc. Since $\Gamma$ is $G$-arc transitive, the induced subgraph $[\Gamma(u)]$ is vertex transitive. As $\Gamma$ has girth 3 and non-complete, the valency $k$ of $[\Gamma(u)]$ is at most $3$. Since $[\Gamma(u)]$ is undirected, it follows that $[\Gamma(u)]$ has $5k/2$ edges, and so $k$ is even; that is, $k=2$. Thus $[\Gamma(u)]\cong C_5$. Set $\Gamma(u)=\{v_1,v_2,v_3,v_4,v_5\}$ with $v_1=v$ and assume $(v_1,\ldots,v_5)$ is a 5-cycle. Then $|\Gamma_2(u)\cap \Gamma(v_1)|=2$ and say $\Gamma_2(u)\cap \Gamma(v_1)=\{w_1,w_2\}$. Then $\Gamma(v_1)=\{u,v_2,v_5,w_1,w_2\}$. As $[\Gamma(v_1)]\cong C_5$ and $(v_2,u,v_5)$ is a 2-arc, it follows that $w_1,w_2$ are adjacent, $v_2$ is adjacent to one of $w_1$ and $w_2$ and $v_5$ is adjacent to the other. Without loss of generality, assume $v_2$ is adjacent to $w_1$ and $v_5$ is adjacent to $w_2$. In particular, $v_2$ and $w_2$ are not adjacent. Moreover, $2\leqslant c_2\leqslant 4$. Since there are 10 edges between $\Gamma(u)$ and $ \Gamma_2(u)$, we have $10=c_2|\Gamma_2(u)|$, so $c_2=2$ and $|\Gamma_2(u)|=5$. Since $|\Gamma_2(u)\cap \Gamma(v_2)|=2$, there exists $w_3$ in $\Gamma_2(u)$ which is adjacent to $v_2$, and so $\Gamma(v_2)=\{u,v_1,v_3,w_1,w_3\}$. Note that $(w_1,v_1,u,v_3)$ is a 3-arc, and as $[\Gamma(v_2)]\cong C_5$, it follows that $w_3$ is adjacent to both $v_3$ and $w_1$. Since $G_u$ is transitive on $\Gamma_2(u)$, $[\Gamma_2(u)]$ is a vertex transitive graph. Recall that $w_1$ is adjacent to $w_2$ and $w_3$. It follows that $[\Gamma_2(u)]\cong C_5$. Thus $|\Gamma_3(u)\cap \Gamma(w_1)|=1$, say $\Gamma_3(u)\cap \Gamma(w_1)=\{e\}$. Then $(v_1,w_1,e)$ and $(v_2,w_1,e)$ are two 2-geodesics. As $c_2=2$, $|\Gamma(v_1)\cap \Gamma(e)|=|\Gamma(v_2)\cap \Gamma(e)|=2$. Hence $\{w_1,w_2,w_3\}\subseteq \Gamma_2(u)\cap \Gamma(e)$. Since $|\Gamma_2(u)\cap \Gamma(v_3)|=2$, there exists $w_4(\neq w_3)\in \Gamma_2(u)$ such that $v_3,w_4$ are adjacent. Noting that $\Gamma(u)\cap \Gamma(w_1)=\{v_1,v_2\}$ and $\Gamma(u)\cap \Gamma(w_2)=\{v_1,v_5\}$, we find $w_4\notin \{w_1,w_2,w_3\}$. Since $[\Gamma(v_3)]\cong C_5$ and $(w_3,v_2,u,v_4)$ is a 3-arc, it follows that $w_4$ is adjacent to both $v_4$ and $w_3$. As $(v_3,w_3,e)$ is a 2-geodesic, $|\Gamma(v_3)\cap \Gamma(e)|=2$, so $w_4\in \Gamma_2(u)\cap \Gamma(e)$. Now $(v_4,w_4,e)$ is a 2-geodesic, so $|\Gamma(v_4)\cap \Gamma(e)|=2$, hence $\Gamma_2(u)\cap \Gamma(v_4)\subset \Gamma(e)$. Let the remaining vertex of $\Gamma_2(u)$ be $w_5$. Since $|\Gamma(u)\cap \Gamma(w_5)|=2$, it follows that $w_5$ is adjacent to both $v_4,v_5$. Hence $\Gamma_2(u)\cap \Gamma(v_4)=\{w_4,w_5\}\subset \Gamma(e)$. Thus $\Gamma_2(u)=\Gamma(e)$, so $\Gamma_3(u)=\{e\}$. Now we have completely determined the graph $\Gamma$, and this graph is the icosahedron. Finally, by Lemma \ref{lem:Icosahedron}, $G\cong S_2\times A_5$ or $A_5$. \end{proof} \begin{proof}[The proof of Theorem~\ref{thm:small val}] If the valency of $\Gamma$ is 2 or the girth is greater than 4, then $\Gamma$ cannot be $(G,2)$-distance transitive, but not $(G,2)$-arc transitive. Hence the valency is at least 3. If the valency and the girth are both equal to 3, then $\Gamma=\K_4$. Hence Theorem~\ref{thm:small val} follows from Proposition~\ref{lem:valency 3}, in the case of girth 4, and from Proposition~\ref{2dtval4-girth3} in the case of girth~4. \end{proof} \end{document}
\begin{document} \title{High-order Foldy-Wouthuysen transformations of the Dirac and Dirac-Pauli Hamiltonians in the weak-field limit} \author{Tsung-Wei Chen} \email{[email protected]}\affiliation{Department of Physics, National Sun Yat-sen University, Kaohsiung 80424, Taiwan} \author{Dah-Wei Chiou} \email{[email protected]} \affiliation{Department of Physics and Center for Condensed Matter Sciences, National Taiwan University, Taipei 10617, Taiwan} \begin{abstract} The low-energy and weak-field limit of Dirac equation can be obtained by an order-by-order block diagonalization approach to any desired order in the parameter $\boldsymbol{\pi}/mc$ ($\boldsymbol{\pi}$ is the kinetic momentum and $m$ is the mass of the particle). In the previous work, it has been shown that, up to the order of $(\boldsymbol{\pi}/mc)^8$, the Dirac-Pauli Hamiltonian in the Foldy-Wouthuysen (FW) representation may be expressed as a closed form and consistent with the classical Hamiltonian, which is the sum of the classical relativistic Hamiltonian for orbital motion and the Thomas-Bargmann-Michel-Telegdi (T-BMT) Hamiltonian for spin precession. In order to investigate the exact validity of the correspondence between classical and Dirac-Pauli spinors, it is necessary to proceed to higher orders. In this paper, we investigate the FW representation of the Dirac and Dirac-Pauli Hamiltonians by using Kutzelnigg's diagonalization method. We show that the Kutzelnigg diagonalization method can be further simplified if nonlinear effects of static and homogeneous electromagnetic fields are neglected (in the weak-field limit). Up to the order of $(\boldsymbol{\pi}/mc)^{14}$, we find that the FW transformation for both Dirac and Dirac-Pauli Hamiltonians is in agreement with the classical Hamiltonian with the gyromagnetic ratio given by $g=2$ and $g\neq2$ respectively. Furthermore, with higher-order terms at hand, it is demonstrated that the unitary FW transformation admits a closed form in the low-energy and weak-field limit. \end{abstract} \pacs{03.65.Pm, 11.10.Ef, 71.70.Ej} \maketitle \section{Introduction} The relativistic quantum theory for a spin-1/2 particle is described by a spinor satisfying the Dirac equation \cite{Dirac1928, Dirac1982}. The four-component spinor of the Dirac particle is composed of two two-component Weyl spinors which correspond to the particle and antiparticle parts. Rigourously, because of the non-negligible probability of creation/annihilation of particle-antiparticle pairs, the Dirac equation is self-consistent only in the context of quantum field theory. For the purpose of obtaining the low-energy limit of Dirac equation without accounting for the field-theory particle-antiparticle interaction, the Dirac equation is converted to a two-component equation. The Pauli substraction method eliminates the two small components from the four-component spinor of Dirac equation and leads to the block-diagonal but energy-dependent effective Hamiltonian in which some non-hermitian terms may appear. Apart from the difficulties, in the seminal paper \cite{Foldy1950}, Foldy and Wouthuysen (FW) established a series of successive unitary transformations via decomposing the Hamiltonian into even and odd matrices; a block-diagonalized effective Hamiltonian can be constructed up to a certain order of $\boldsymbol{\pi}/mc$. The series of successive unitary transformations in the FW method can be replaced by a single transformation via the L\"{o}wding partitioning method \cite{Lowdin1951,Winkler2003}.\footnote{It should be emphasized that The FW transformation is not meant to be used for the second quantization, and furthermore it exists only in the weak-field limit or in some special cases (as studied in \secref{sec:exact solution}). If we ruthlessly try to second quantize the theory in the FW representation, whether we can succeed or not, we should perform the second quantization in a way very different from the conventional approach. This is because, in the FW representation, we encounter the non-locality due to \emph{zitterbewegung} (see also P.\ Strange in Ref.~\cite{Winkler2003}).} Furthermore, Eriksen developed a systematic derivation of the unitary transformation and gave an exact FW transformation for a charged spin-1/2 particle in interaction with non-explicitly time-dependent field~\cite{Eriksen1958}. The validity of the Eriksen method is investigated in Ref.\ \cite{Vries1968}. In Ref.\ \cite{Kutz1990}, Kutzelnigg developed a single unitary transformation that allows one to obtain the block-diagonalized Dirac Hamiltonian without evoking the decomposition of even and odd matrices used in FW method. Alternatively, the Dirac Hamiltonian can also be diagonalized via expansion in powers of Planck constant $\hbar$ \cite{Silenko03,Bliokh05,Goss07}, which enables us to investigate the influences of quantum corrections on the classical dynamics in strong fields \cite{Silenko08}. On the other hand, the classical relativistic dynamics for a charged particle with intrinsic spin in static and homogeneous electromagnetic fields is well understood. The orbital motion is governed by the classical relativistic Hamiltonian \begin{equation}\label{H orbit} H^c_\mathrm{orbit} =\sqrt{c^2\boldsymbol{\pi}^2+m^2c^4}\, +V(\mathbf{x}), \end{equation} where $\boldsymbol{\pi}=\mathbf{p}-q\mathbf{A}/c$ is the kinetic momentum operator with $\mathbf{A}$ being the magnetic vector potential~\cite{Jackson} and $V(\mathbf{x})$ the electric potential energy. The spin motion is governed by the Thomas-Bargmann-Michel-Teledgi (T-BMT) equation which describes the precession of spin as measured by the laboratory observer~\cite{BMT59}, \begin{equation}\label{Thomas} \frac{d\mathbf{s}}{dt}=\frac{q}{mc}\,\mathbf{s}\times\mathbf{F}(\mathbf{x}) \end{equation} with \begin{equation}\label{ThomasF} \begin{split} \mathbf{F}&=\left(\frac{g}{2}-1+\frac{1}{\gamma}\right)\mathbf{B}-\left(\frac{g}{2}-1\right)\frac{\gamma}{\gamma+1}(\boldsymbol{\beta}\cdot\mathbf{B})\boldsymbol{\beta}\\ &~~-\left(\frac{g}{2}-\frac{\gamma}{\gamma+1}\right)\boldsymbol{\beta}\times\mathbf{E}, \end{split} \end{equation} where $g$ is the gyromagnetic ratio, $\boldsymbol{\beta}$ the boost velocity, $\gamma=1/\sqrt{1-\boldsymbol{\beta}^2}$ the Lorentz factor and $\mathbf{E}$ and $\mathbf{B}$ are electric and magnetic fields measured in the laboratory frame. The intrinsic spin $\mathbf{s}$ in Eq.~(\ref{Thomas}) is being observed in the rest frame of the particle. Because $\{s_i,s_j\}=\epsilon_{ijk}s_k$, Eq.~(\ref{Thomas}) can be recast as the Hamilton's equation: \begin{equation} \frac{d\mathbf{s}}{dt}=\{\mathbf{s},H^{\mathrm{c}}_\mathrm{spin}\} \end{equation} with \begin{equation}\label{H dipole} H^{\mathrm{c}}_\mathrm{spin}=-\frac{q}{mc}\,\mathbf{s}\cdot \mathbf{F} \end{equation} called the T-BMT Hamiltonian. The combination of Eqs (\ref{H orbit}) and (\ref{H dipole}) is hereafter called the classical Hamiltonian $H_{\mathrm{c}}$, \begin{equation}\label{H classical} \begin{split} H_{\mathrm{c}}&=H^{\mathrm{c}}_\mathrm{orbit}+H^{\mathrm{c}}_\mathrm{spin}\\ &=\sqrt{c^2\boldsymbol{\pi}^2+m^2c^4}\, +V-\boldsymbol{\mu}\cdot \mathbf{F}, \end{split} \end{equation} where $\boldsymbol{\mu}=q\mathbf{s}/mc$ is the intrinsic magnetic moment of an electron. The connection between the Dirac equation and classical Hamiltonian has been investigated by several authors \cite{Foldy1950, Rubinow1963, Rafa1964, Froh1993, Silenko1995}. For a free Dirac particle, it has been shown that the exactly diagonalized Dirac Hamiltonian corresponds to the classical relativistic Hamiltonian \cite{Foldy1950}. In Refs.~\cite{Rubinow1963,Rafa1964}, it was shown that the T-BMT equation may be derived from the WKB wavefunction solutions to the Dirac equation. In the presence of external electromagnetic fields, the Dirac Hamiltonian in the FW representation has been block-diagonalized up to the order of $(\boldsymbol{\pi}/mc)^4$, but the connection is not explicit \cite{Froh1993}. Recently, in Ref.~\cite{TWChen2010}, it has been shown that up to $(\boldsymbol{\pi}/mc)^8$, the resulting FW transformed Dirac, or more generic, Dirac-Pauli \cite{Pauli1941} Hamiltonian in the presence of static and homogeneous electromagnetic fields may agree with the classical Hamiltonian [Eq.~(\ref{H classical})] in the weak-field limit. The order-by-order block-diagonalization methods to higher orders of $\boldsymbol{\pi}/mc$ can be used to investigate the validity of the connection. Furthermore, if the connection is indeed establishable (in a closed form), corrections to the classical T-BMT equation due to field inhomogeneity, if any, could also be included. Motivated by these regards, we adopt a systematic method that can substantially simplify the calculation of FW transformation to any higher orders in the FW representation of Dirac Hamiltonian. It must be stressed that block diagonaliation of a four-component Hamiltonian into two uncoupled two-component Hamiltonian is not unique, as any composition with additional unitary transformations that act separately on the positive and negative energy blocks will also do the job. Different block-diagonalization transformations are however unitarily equivalent to one another, and thus yield the same physics.\footnote{Once the Hamiltonian is block-diagonalized, further unitary transformations that do not mix the positive and negative energy blocks merely rotate the $2\times2$ Pauli matrices independently for the two blocks, keeping the physics unchanged.} The truly vexed question is: whether does the unitary bock-diagonalization transformation exist at all? In the absence of electric fields, we will show that the answer is affirmative. On the other hand, in the presence of electric fields, the answer seems to be negative, as the energy interacting with electromagnetic fields renders the probability of creation/annihilation of particle-antiparticle pairs non-negligible and thus the particle-antiparticle separation inconsistent. Nevertheless, in the weak-field limit, the interacting energy is well below the Dirac energy gap ($2mc^2$) and we will demonstrate that the unitary transformation exists and indeed admits a closed form in the low-energy and weak-field limit. In this article, we derive the FW transformed Dirac Hamiltonian up to the order of $(\boldsymbol{\pi}/mc)^{14}$ by using Kutzelnigg's diagonalization method \cite{Kutz1990}. The key feature of the Kutzelnigg approach is that it provides an exact block-diagonalized form of Dirac Hamiltonian involving a self-consistent equation [see Eq.~(\ref{EqX})]. The explicit form of the FW transformed Dirac Hamiltonian can be obtained by solving the self-consistent equation. We will show that the Kutzelnigg method can be further simplified in the weak-field limit, and this simplification enables us to obtain the higher-order terms systematically. We will show that the block diagonalization of Dirac and Dirac-Pauli Hamiltonians up to the order of $(\boldsymbol{\pi}/mc)^{14}$ in the Foldy-Wouthuysen representation is in agreement with classical Hamiltonian, and the closed form of the unitary transformation can be found. This article is organized as follows. In Sec.~\ref{sec:method}, we construct a unitary operator based on the Kutzelnigg method to obtain the exact FW transformed Dirac Hamiltonian and the self-consistent equation. The exact solution of the self-consistent equation is discussed. The FW transformed Dirac Hamiltonian in the presence of inhomogeneous electromagnetic fields are derived in Sec.~\ref{sec:fields}. The effective Hamiltonian up to $(\boldsymbol{\pi}/mc)^4$ for the inhomogeneous electromagnetic field is in agreement with the previous result shown in Refs.~\cite{Foldy1950,Froh1993}. The static and homogeneous electromagnetic fields are considered in Sec.~\ref{sec:HFW}, where the simplification of the effective Hamiltonian is discussed and the FW transformed Dirac Hamiltonian is obtained up to $(\boldsymbol{\pi}/mc)^{14}$. In Sec.~\ref{sec:TBMT}, the comparison with the classical relativistic Hamiltonian and T-BMT equation with $g=2$ is discussed. The FW transformed Dirac-Pauli Hamiltonian is shown in Sec.~\ref{sec:TBMT2}. In Sec.~\ref{sec:EUT}, we demonstrate that the exact unitary transformation matrix in the low-energy and weak-field limit can be formally obtained. The conclusions are summarized in Sec.~\ref{sec:conclusions}. Some calculational details are supplemented in Appendices. \section{Kutzelnigg diagonalization method for Dirac Hamiltonian}\label{sec:method} In this section, we use the unitary operator based on the Kutzelnigg diagonalization method \cite{Kutz1990} and apply the unitary operator to the Dirac Hamiltonian. We obtain the formally exact Foldy-Wouthuysen transformed Hamiltonian by requiring that the unitary transformation yields a block-diagonal form. The Dirac Hamiltonian in the presence of electromagnetic fields can be written as \begin{equation}\label{Dirac} \begin{split} H_D&=\left(\begin{array}{cc} V+mc^2& c\boldsymbol{\sigma}\cdot\boldsymbol{\pi}\\ c\boldsymbol{\sigma}\cdot\boldsymbol{\pi}&V-mc^2 \end{array}\right)\\ &\equiv\left(\begin{array}{cc}h_+& h_0\\ h_0&h_- \end{array}\right) \end{split} \end{equation} where $\boldsymbol{\pi}=\mathbf{p}-q\mathbf{A}/c$ is the kinetic momentum operator and $V=q\phi$. The electric field and magnetic field are $\mathbf{E}=-\nabla\phi$ and $\mathbf{B}=\nabla\times\mathbf{A}$, respectively. We note that in the static case, $\nabla\times\mathbf{E}=0$, and thus, $\boldsymbol{\pi}\times\mathbf{E}=-\mathbf{E}\times\boldsymbol{\pi}$. The wave function of the Dirac equation $H_D\psi=i\hbar\frac{\partial}{\partial t}\psi$ is a two two-spinors \begin{equation} \psi=\left(\begin{array}{c} \psi_+\\ \psi_- \end{array}\right). \end{equation} A unitary operator $U$ which \emph{formally} decouples positive and negative energy states can be written as the following form \cite{Kutz1990} \begin{equation}\label{U} U=\left(\begin{array}{cc} Y&YX^{\dag}\\ -ZX&Z \end{array}\right), \end{equation} where operators $Y$ and $Z$ are defined as \begin{equation}\label{Def:YandZ} Y=\frac{1}{\sqrt{1+X^{\dag}X}}, \qquad Z=\frac{1}{\sqrt{1+XX^{\dag}}}. \end{equation} Applying the unitary transformation Eq.~(\ref{U}) to Eq.~(\ref{Dirac}), $UH_DU^{\dag}$ is of the form \begin{equation}\label{UHU} UH_DU^{\dag}=\left(\begin{array}{cc} H_{\mathrm{FW}}&H_{X^{\dag}}\\ H_{X}&H' \end{array}\right) \end{equation} The unitary transformation transforms the wave function $\psi$ to a two-spinor, \begin{equation} U\left(\begin{array}{c} \psi_+\\ \psi_- \end{array}\right)=\left(\begin{array}{c} \psi_{\mathrm{FW}}\\ 0 \end{array}\right), \end{equation} where the wave function for the negative energy state must be zero and the FW transformed wave function is given by \begin{equation} \psi_{\mathrm{FW}}=\sqrt{1+X^{\dag}X}\,\psi_+. \end{equation} We require that the transformed Hamiltonian takes the block-diagonal form: \begin{equation}\label{UHU-D} UH_DU^{\dag}=\left(\begin{array}{cc} H_{\mathrm{FW}}&0\\ 0&H' \end{array}\right). \end{equation} We find that the requirement of the vanishing off-diagonal term $H_{X}=0$ yields the constraint on the $X$ operator: \begin{equation}\label{EqX} X=\frac{1}{2mc^2}\left\{-Xh_0X+h_0+[V,X]\right\}. \end{equation} Equation (\ref{EqX}) is a self-consistent formula for operator $X$. The resulting FW transformed Hamiltonian $H_{\mathrm{FW}}$ is given by \begin{equation}\label{Un-HFW} \begin{split} H_{\mathrm{FW}}=Y\left(h_++X^{\dag}h_0+h_0X+X^{\dag}h_-X\right)Y. \end{split} \end{equation} Because the operator $X$ plays an important role in generating the FW transformed Hamiltonian and the corresponding unitary operator, the operator $X$ for the Dirac Hamiltonian is hereafter called the \emph{Dirac generating operator}. To our knowledge, the exact solution of Eq.\ (\ref{EqX}) for a general potential is still unknown except for the two cases: a free particle and a particle subject only to magnetic fields. For the case with a nontrivial electric potential, we assume that the solution of Eq. (\ref{EqX}) can be obtained by using series expansion.\footnote{The series solution of the Dirac generating operator $X$ is not unique because any unitary transformation would lead to a satisfactory $X$ as long as it does not mix positive and negative energy states. This implies that the form of the block-diagonalized Hamiltonian is not unique. In this regard, we focus only on the series solution of $X$ that can correctly generate the FW transformed Dirac Hamiltonian linear in EM fields and up to order of $(\boldsymbol{\pi}/mc)^4$, as shown in Sec.\ref{sec:fields}.} \subsection{Exact solution of Dirac generating operator}\label{sec:exact solution} For a free particle ($\mathbf{A}=0$ and $V=0$), it can be shown that Eq. (\ref{EqX}) has an exact solution \begin{equation}\label{free-X} X=\frac{c(\boldsymbol{\sigma}\cdot\mathbf{p})}{mc^2+E_p}, \end{equation} where $E_p=\sqrt{m^2c^4+\mathbf{p}^2c^2}$. Using Eqs. (\ref{U}) and (\ref{Def:YandZ}), the unitary transformation matrix can be written as \begin{equation}\label{U-free} U=\frac{1}{\sqrt{2E_p(E_p+mc^2)}}\left(\begin{array}{cc} E_p+mc^2&c\boldsymbol{\sigma}\cdot\mathbf{p}\\ -c\boldsymbol{\sigma}\cdot\mathbf{p}&E_p+mc^2\\ \end{array}\right). \end{equation} Equation (\ref{U-free}) is the same with the result obtained from the standard FW transformation~\cite{Foldy1950}. The resulting FW transformed free-particle Dirac Hamiltonian is block-diagonalized, \begin{equation}\label{FWfree} H_{\mathrm{FW}}=\left(\begin{array}{cc} E_p&0\\ 0&-E_p\\ \end{array}\right). \end{equation} It is interesting to note that in the absence of electric field (i.e., $V=\text{const}$), Eq. (\ref{EqX}) also admits an exact solution \begin{equation}\label{Magnetic-X} X=\frac{1}{mc^2+E_{\pi}}(c\boldsymbol{\sigma}\cdot\boldsymbol{\pi}), \end{equation} where $E_{\pi}=\sqrt{m^2c^4+c^2(\boldsymbol{\sigma}\cdot\boldsymbol{\pi})^2}$. This can be proved by directly substituting Eq. (\ref{Magnetic-X}) into Eq. (\ref{EqX}) with $V=\text{const}$. The exact unitary transformation matrix can be formally constructed and the resulting FW transformed Hamiltonian can be obtained. In the presence of a nontrivial electric potential, it is difficult to obtain an exact solution because the term $[V,X]$ does not vanish. Therefore, the diagonalization procedure for the Dirac Hamiltonian must be performed order-by-order. It is necessary to choose a dimensionless quantity as the order-expanding parameter. We note that the form of Eq. (\ref{Magnetic-X}) can be rewritten as $\{1/[1+(\boldsymbol{\sigma}\cdot\boldsymbol{\xi})^2]\}\boldsymbol{\sigma}\cdot\boldsymbol{\xi}$ with the dimensionless quantity $\boldsymbol{\xi}=\boldsymbol{\pi}/mc$. The order-by-order block-diagonalized Hamiltonian can be expressed in terms of the order parameter $\boldsymbol{\xi}$. In this paper, we further focus on the weak-field limit in order to compare the block-diagonalizaed Hamiltonian with the classical counterpart. \subsection{Series expansion of Dirac generating operator} The upper-left diagonal term of $UH_DU^{\dag}$ is the FW transformed Hamiltonian $H_{\mathrm{FW}}$ under the constraint Eq.~(\ref{EqX}) and it can be written as (see Appendix \ref{App:Ham}) \begin{equation}\label{HFW} \begin{split} H_{\mathrm{FW}}=mc^2+e^{G/2}Ae^{-G/2}, \end{split} \end{equation} where operators $A$ (hereafter called the \emph{Dirac energy operator}) and $G$ (hereafter called the \emph{Dirac exponent operator}) are defined as \begin{equation}\label{AG} A\equiv V+h_0X, \qquad G\equiv\ln\left(1+X^{\dag}X\right). \end{equation} The other requirement $H_{X^{\dag}}=0$ gives the constraint on the hermitian of the Dirac generating operator $X^{\dag}$, which is simply the hermitian conjugate of Eq.~(\ref{EqX}), namely, $H_{X}^{\dag}=H_{X^{\dag}}$. It can be shown that $H_{X}=0$ and $H_{X^{\dag}}=0$ imply that (see Appendix \ref{App:Ham}) \begin{equation}\label{HFWdag} H_{\mathrm{FW}}^{\dag}=mc^2+e^{-G/2}A^{\dag}e^{G/2} \end{equation} and then Eq.~(\ref{HFW}) is a hermitian operator since the Dirac exponent operator is a hermitian operator. Equation (\ref{HFW}) can be simplified if we rewrite the Dirac energy operator $A$ as the sum of its hermitian ($A^H$) and anti-hermitian ($A^N$) parts, $A=A^H+A^N$, where \begin{equation} \begin{split} A^H=\frac{A+A^{\dag}}{2}, \qquad A^N=\frac{A-A^{\dag}}{2}. \end{split} \end{equation} Combining Eqs.~(\ref{HFW}) together with (\ref{HFWdag}), the FW transformed Dirac Hamiltonian $H_{\mathrm{FW}}$ is made up of $H_{\mathrm{FW}}=\left(H_{\mathrm{FW}}+H_{\mathrm{FW}}^{\dag}\right)/2$, and it can be written as \begin{equation}\label{HFW NE} H_{\mathrm{FW}}=mc^2+A^H+S, \end{equation} where the \emph{Dirac string operator} $S$ is given by \begin{equation}\label{EqS NE} \begin{split} S&=\frac{1}{2}[G,A^N]+\frac{1}{2!2^2}[G,[G,A^H]]+\frac{1}{3!2^3}[G,[G,[G,A^N]]]\\ &~~+\frac{1}{4!2^4}[G,[G,[G,[G,A^H]]]]+\cdots, \end{split} \end{equation} where we have used the Baker-Campbell-Hausdorff formula \cite{Sak}: $e^{B}De^{-B}=D+[B,D]+[B,[B,D]]/2!+[B,[B,[B,D]]]/3!\cdots$. We note that the anti-hermitian part of the Dirac energy operator always appears in those terms with odd numbers of Dirac exponent operators, and the hermitian part of the Dirac energy operator always appears in those terms with even numbers of Dirac exponent operators. Since the commutator of two hermitian operators must be an anti-hermitian operator, it can be shown that the Dirac string operator is a hermitian operator. In order to compare FW transformed Dirac Hamiltonian to the classical Hamiltonian, we solve the self-consistent equation for $X$ [Eq.~(\ref{EqX})] by power series expansion in terms of orders of $1/c$, \begin{equation}\label{SeriesX} X=\frac{X_1}{c}+\frac{X_2}{c^2}+\frac{X_3}{c^3}+\cdots. \end{equation} $X_1$ is the first order of the Dirac generating operator, $X_2$ the second, and so on. Substituting Eq.~(\ref{SeriesX}) into Eq.~(\ref{EqX}), we can obtain each order of the Dirac generating operator $X_{\ell}$. For order of $1/c$ and $1/c^2$, we have \begin{equation}\label{App:EqX1} \begin{split} &2mX_1=\boldsigma\cdot\boldpi,\\ &2mX_{2}=0. \end{split} \end{equation} The expanding terms of the Dirac generating operator with third and higher orders can be determined by the following equations, \begin{equation}\label{App:EqXoe} \begin{split} 2mX_{2j}&=-\sum_{k_1+k_2=2j-1}X_{k_1}\boldsigma\cdot\boldpi X_{k_2}+[V,X_{2j-2}],\\ 2mX_{2j+1}&=-\sum_{k_1+k_2=2j}X_{k_1}\boldsigma\cdot\boldpi X_{k_2}+[V,X_{2j-1}], \end{split} \end{equation} where $j=1,2,3\cdots$. Consider terms of even order of $1/c$, namely, $X_{2j}$. The fourth order of the Dirac generating operator $X_{4}$ is determined by $2mX_4=-(X_{1}\boldsigma\cdot\boldpi X_{2}+X_{1}\boldsigma\cdot\boldpi X_{2})+[V,X_2]$. Since the second order of $X$ is zero ($X_2=0$), the fourth order of $X$ also vanishes, i.e., $X_4=0$. The sixth order of $X$ is obtained by $2mX_6=-(X_{1}\boldsigma\cdot\boldpi X_{4}+X_{2}\boldsigma\cdot\boldpi X_{3}+X_{3}\boldsigma\cdot\boldpi X_{2}+X_{4}\boldsigma\cdot\boldpi X_{1})+[V,X_4]$. Because both the second and fourth order of the Dirac generating operators vanish, we find that the sixth order of Dirac generating operator $X_6$ is also zero, as well as $X_8$, $X_{10}$, and so on. Therefore, we have \begin{equation} X_2=X_4=X_6=\cdots=0, \end{equation} and the non-zero terms are those expanding terms of the Dirac generating operators with odd subscripts, namely, $X=X_1/c+X_3/c^3+X_5/c^5+\cdots$. Furthermore, since the operator $h_0=c\boldsigma\cdot\boldpi$ is of the order of $c$, the series expansion of $A=V+h_0X$ has only even powers of $c$: \begin{equation} A=A_0+\frac{A_2}{c^2}+\frac{A_4}{c^4}+\cdots, \end{equation} where the $\ell$th order of the Dirac energy operator $A_{\ell}$ is related to the $\ell$th order of Dirac generating operator $X_{\ell}$ by \begin{equation}\label{EqA} \begin{split} &A_0=V+\frac{h_0}{c}X_1,\\ &A_{\ell}=\frac{h_0}{c}X_{\ell+1}, \end{split} \end{equation} where $\ell=2,4,6,\cdots$. On the other hand, the series expansion of $\ln(1+y)$ is $\ln(1+y)=y-y^2/2+y^3/3-y^4/4+\cdots$. Because $y=X^{\dag}X$, the power series of $y$ contains only even powers of $c$: $y=y_2/c^2+y_4/c^4+y_6/c^6+\cdots$, where $y_{\ell}$ are given by \begin{equation}\label{y ell} y_{\ell}=\sum_{k_1+k_2=\ell}X^{\dag}_{k_1}X_{k_2}. \end{equation} For example, $y_6=X_1^{\dag}X_5+X_3^{\dag}X_3+X_5^{\dag}X_1$. Consequently, the Dirac exponent operator can only have terms with even powers of $c$ (we note that $G_0=0$) \begin{equation} G=\frac{G_2}{c^2}+\frac{G_4}{c^4}+\frac{G_6}{c^6}+\cdots, \end{equation} where the $\ell$th order of the Dirac exponent operator $G_{\ell}$ can be expressed in terms of $y_{\ell}$: \begin{equation}\label{G ell} \begin{split} G_{\ell}&=y_{\ell}-\frac{1}{2}\sum_{k_1+k_2=\ell}y_{k_1}y_{k_2}+\frac{1}{3}\sum_{k_1+k_2+k_3=\ell}y_{k_1}y_{k_2}y_{k_3}\\ &~~-\frac{1}{4}\sum_{k_1+\cdots+k_4=\ell}y_{k_1}y_{k_2}y_{k_3}y_{k_4}+\cdots. \end{split} \end{equation} For example, $G_6=y_6-(y_2y_4+y_4y_2)/2+y_2^3/3$. Therefore, the FW transformed Dirac Hamiltonian can be expanded in terms of $A_{\ell}$ and $G_{\ell}$ and has only even powers of $c$. That is, \begin{equation}\label{HFW sum} H_{\mathrm{FW}}=mc^2+\sum_{\ell}H^{(\ell)}_{\mathrm{FW}}, \end{equation} where the $\ell$th order of the FW transformed Dirac Hamiltonian denoted as $H_{\mathrm{FW}}^{(\ell)}$ ($\ell=0,2,4,6,\cdots$) are given by (up to $c^{12}$) \begin{equation}\label{EqExpandH} \begin{split} H_{\mathrm{FW}}^{(0)}&=A^H_0,\\ c^{\ell}H_{\mathrm{FW}}^{(\ell)}&=A^H_{\ell}+S_{\ell},\\ \end{split} \end{equation} where $\ell=2,4,6,\cdots,12$. The $\ell$th order of Dirac string operator $S_{\ell}$ is given by \begin{equation}\label{EqS} \begin{split} &S_{\ell}=\frac{1}{2}\mathop{\sum_{\ell_1+\ell_2=\ell}}[G_{\ell_1},A^N_{\ell_2}]\\ &~~+\frac{1}{2!2^2}\sum_{\ell_1+\ell_2+\ell_3=\ell}[G_{\ell_1},[G_{\ell_2},A^H_{\ell_3}]]\\ &~~+\frac{1}{3!2^3}\sum_{\ell_1+\cdots+\ell_4=\ell}[G_{\ell_1},[G_{\ell_2},[G_{\ell_3},A^N_{\ell_4}]]]\\ &~~+\frac{1}{4!2^4}\sum_{\ell_1+\cdots+\ell_5=\ell}[G_{\ell_1},[G_{\ell_2},[G_{\ell_3},[G_{\ell_4},A^H_{\ell_5}]]]]\\ &~~+\cdots. \end{split} \end{equation} As mentioned above, any unitary transformation would lead to a satisfactory generating operator as long as it does not mix positive and negative energy states. The non-uniqueness property of generating operator can be easily seen as follows. If we perform the Kutzelnigg diagonalization method upon Eq.\ (\ref{UHU-D}) again, then we obtain another block-diaogonalized Hamiltonian with new operator equation for the generating operator. The new diagonalized Hamiltonian $H_{\mathrm{FW}}'$ is determined by Eq.\ (\ref{Un-HFW}) with the replacements: $h_-\rightarrow H'$, $h_0=0$, and $h_+ \rightarrow H_{\mathrm{FW}}$. The form of new diagonalized Hamiltonian depends on the solution of the new generating operator. We use series expansion to construct the generating operator and require that the resulting generating operator can go back to the exact solution in the free-particle case where the Hamiltonian is block-diagonalized to Eq. (\ref{FWfree}). In this representation, the positive and negative energies are decoupled and have classical relativistic energy representation [\textit{c.f.}\ Eqs.\ (\ref{FWfree}) and (\ref{H orbit})], which is \emph{the} FW representation obtained in this article. Importantly, we will show that the series expansion of generating operator [Eq.\ (\ref{SeriesX})] can indeed generate the FW representation. In this sense, interestingly, we can obtain an exact solution of generating operator and find that the spin part of the resulting block-diagonalized Hamiltonian is equivalent to the T-BMT Hamiltonian. In the next section, we will show that by using Eqs.\ (\ref{EqExpandH}) and (\ref{EqS}) the effective Hamiltonian resulting from Foldy-Wouthuysen diagonalization method is equivalent to that from the Kutzelnigg diagonalization method up to terms with order of $(\boldsymbol{\pi}/mc)^4$, from which the fine structure, Darwin term and spin-orbit interaction can be deduced. \section{Inhomogeneous fields}\label{sec:fields} Up to this step, only two assumptions are made: (1) the electromagnetic fields are static, and (2) the Dirac generating operator $X$ can be solved by series expansion. We calculate the first two terms $H^{(0)}_{\mathrm{FW}}+H^{(2)}_{\mathrm{FW}}$ and show that the resulting Hamiltonian $H_{\mathrm{FW}}=mc^2+H^{(0)}_{FW}+H^{(2)}_{\mathrm{FW}}$ is in agreement with the previous result. The zeroth order of the FW transformed Dirac Hamiltonian is \begin{equation}\label{H0FW_1} H_{\mathrm{FW}}^{(0)}=A^H_0, \end{equation} where $A_0=V+(h_0/c)X_1$. The first order of the Dirac generating operator $X_1$ is given in Eq.~(\ref{App:EqX1}), which is valid for inhomogeneous fields. Using $[\pi_i,\pi_j]=\frac{iq\hbar}{c}\epsilon_{ijk}B_k$, we have $(\boldsigma\cdot\boldpi)^2=\boldsymbol{\pi}^2-\frac{q\hbar}{c}\boldsigma\cdot\mathbf{B}$, and $H_{FW}^{(0)}$ [Eq.~(\ref{H0FW_1})] becomes \begin{equation}\label{H0FW_2} H^{(0)}_{\mathrm{FW}}=V+\frac{\boldsymbol{\pi}^2}{2m}-\frac{q\hbar}{2mc}\boldsigma\cdot\mathbf{B}. \end{equation} We note that $A_0$ is already a hermitian operator, and thus $A^N_0=0$. The second and third terms of Eq.~(\ref{H0FW_2}) are the kinetic energy and Zeeman energy. The second order $H_{\mathrm{FW}}^{(2)}$ is given by \begin{equation}\label{H2FW_1} c^2H^{(2)}_{\mathrm{FW}}=A^H_2+S_2, \end{equation} where $A_2=(h_0/c)X_3$, $S_2=[G_2,A_0^N]/2$. For $X_3$, from Eq.~(\ref{App:EqXoe}) we have $X_3=-X_1\boldsigma\cdot\boldpi X_1+[V,X_1]$. Using $[V,\,\boldsigma\cdot\boldpi]=-iq\hbar\boldsigma\cdot\mathbf{E}$, we obtain \begin{equation}\label{App:EqX3} X_3=-\frac{1}{4}\frac{T}{m^2}\boldsigma\cdot\boldpi-\frac{1}{4}\frac{i\hbar}{m^2}\boldsymbol{\sigma}\cdot\mathbf{E}, \end{equation} where \begin{equation} T\equiv(\boldsigma\cdot\boldpi)^2/2m \end{equation} is the kinetic energy operator. The operators $X_3$ is valid for inhomogeneous fields. From Eqs.~(\ref{y ell}) and (\ref{G ell}), the operator $G_2$ is $X^{\dag}_1X_1=T/2m$. Since $A^N_0=0$, we have $S_2=[G_2,A^N_0]/2=0$. Substituting Eq.~(\ref{App:EqX3}) into $A_2$, we have \begin{equation} A_2=-\frac{(\boldsigma\cdot\boldpi)^4}{8m^3}-\frac{iq\hbar}{4m^2}(\boldsigma\cdot\boldpi)(\boldsigma\cdot\mathbf{E}). \end{equation} The hermitian part of $A_2$ is given by $A^H_2=(A_2+A^{\dag}_2)/2$, \begin{equation}\label{H2FW_2} A^H_2=-\frac{(\boldsigma\cdot\boldpi)^4}{8m^3}-\frac{iq\hbar}{8m^2}[\boldsigma\cdot\boldpi,\boldsigma\cdot\mathbf{E}]. \end{equation} Using $\sigma_i\sigma_j=\delta_{ij}+i\epsilon_{ijk}\sigma_k$, we have $[\boldsigma\cdot\boldpi,\boldsigma\cdot\mathbf{E}]=-i\hbar\nabla\cdot\mathbf{E}+i\boldsymbol{\sigma}\cdot\boldsymbol{\pi}\times\mathbf{E}-i\boldsymbol{\sigma}\cdot\mathbf{E}\times\boldsymbol{\pi}$. For static case, we have $\boldsymbol{\pi}\times\mathbf{E}=-\mathbf{E}\times\boldsymbol{\pi}$. Therefore, up to the second order of magnetic field, Eq.~(\ref{H2FW_2}) becomes \begin{equation}\label{H2FW_3} \begin{split} H^{(2)}_{\mathrm{FW}}&=-\frac{\boldsymbol{\pi}^4}{8m^3c^2}+\frac{q\hbar}{8m^3c^3}\left[\boldsymbol{\pi}^2(\boldsigma\cdot\mathbf{B})+(\boldsigma\cdot\mathbf{B})\boldsymbol{\pi}^2\right]\\ &~~-\frac{q\hbar}{4m^2c^2}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})-\frac{q\hbar^2}{8m^2c^2}\nabla\cdot\mathbf{E}. \end{split} \end{equation} The first term of Eq.~(\ref{H2FW_3}) is the relativistic correction to the kinetic energy. The second term of Eq.~(\ref{H2FW_3}) is the relativistic correction to the Zeeman energy. The fourth and fifth terms of Eq.~(\ref{H2FW_3}) are the spin-orbit interaction and the Darwin term which provides heuristic evidence of the Zitterbewegung phenomenon \cite{Darwin1928}. Combining Eqs.\ (\ref{H0FW_2}) and (\ref{H2FW_3}), we obtain the Foldy-Wouthuysen transformed Dirac Hamiltonian up to terms with $(\boldsymbol{\pi}/mc)^4$: \begin{equation}\label{HFW02} \begin{split} H_{\mathrm{FW}}&=mc^2+H^{(0)}_{\mathrm{FW}}+H^{(2)}_{\mathrm{FW}}\\ &=mc^2+V+\frac{\boldsymbol{\pi}^2}{2m}-\frac{q\hbar}{2mc}\boldsigma\cdot\mathbf{B}-\frac{\boldsymbol{\pi}^4}{8m^3c^2}\\ &~~+\frac{q\hbar}{8m^3c^3}\left[\boldsymbol{\pi}^2(\boldsigma\cdot\mathbf{B})+(\boldsigma\cdot\mathbf{B})\boldsymbol{\pi}^2\right]\\ &~~-\frac{q\hbar^2}{8m^2c^2}\nabla\cdot\mathbf{E}-\frac{q\hbar}{4m^2c^2}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}). \end{split} \end{equation} Equation (\ref{HFW02}) is in agreement with the earlier results \cite{Foldy1950, Froh1993} which are obtained by the standard FW method. If we take into account the terms of the second order in electromagnetic fields, our result gives $-\frac{e^2\hbar^2}{8m^3c^4}\mathbf{B}^2$. However, the FW diagonalization method shows that the terms of the second order in electromagnetic field should be $\frac{e^2\hbar^2}{8m^3c^4}(\mathbf{E}^2-\mathbf{B}^2)$. In comparison with the standard FW transformation method, this discrepancy suggests that the assumption that the series expansion of $X$ [Eq.~(\ref{SeriesX})] exists is valid only in the low-energy and weak-field limit or in the absence of an electric field. In the following sections, we will obtain the FW transformed Dirac and Dirac-Pauli Hamiltonians in the low-energy and weak-field limit. \section{FW transformed Dirac Hamiltonian}\label{sec:HFW} The previous section shows that the Kutzelnigg diagonalization method is valid when we consider only terms with linear electromagnetic fields. We focus only on linear terms of electromagnetic fields in comparison with the T-BMT equation. In this section, we consider the static and homogeneous electromagnetic field and neglect the product of fields in the FW transformed Dirac Hamiltonian. The FW transformed Dirac Hamiltonian contains the Dirac energy operator and Dirac string operator [see Eqs.\ (\ref{EqExpandH}) and (\ref{EqS})]. We will calculate $H_{\mathrm{FW}}^{(\ell)}$ from $\ell=0$ to $\ell=12$. Nevertheless, we have to emphasize that Eq.~(\ref{EqA}) implies that the $\ell$th order of Dirac energy operator is obtained from the next order of the Dirac generating operator. Therefore, we have to obtain the term of the generating operator up to the order of $1/c^{13}$, i.e. $X_{13}$. The explicit forms of the expanding terms of the generating operators can be derived by using Eqs.\ (\ref{App:EqX1}) and (\ref{App:EqXoe}). Up to the order of $1/c^{13}$, we have \begin{widetext} \begin{equation}\label{App:SolveX} \begin{split} &X_1=\frac{\boldsigma\cdot\boldpi}{2m},~X_3=-\frac{1}{4}\frac{T}{m^2}\boldsigma\cdot\boldpi-\frac{1}{4}\frac{iq\hbar}{m^2}\boldsymbol{\sigma}\cdot\mathbf{E},~X_5=\frac{1}{4}\frac{T^2}{m^3}\boldsigma\cdot\boldpi+\frac{3}{16}\frac{iq\hbar}{m^4}\boldsymbol{\pi}^2(\boldsigma\cdot\mathbf{E})+\frac{1}{8}\frac{iq\hbar}{m^4}(\mathbf{E}\cdot\boldsymbol{\pi})(\boldsigma\cdot\boldpi),\\ &X_7=-\frac{5}{16}\frac{T^3}{m^4}\boldsigma\cdot\boldpi-\frac{5}{32}\frac{iq\hbar}{m^6}\boldsymbol{\pi}^4(\boldsigma\cdot\mathbf{E})-\frac{3}{16}\frac{iq\hbar}{m^6}\boldsymbol{\pi}^2(\mathbf{E}\cdot\boldsymbol{\pi})(\boldsigma\cdot\boldpi),\\ &X_9=\frac{7}{16}\frac{T^4}{m^5}\boldsigma\cdot\boldpi+\frac{35}{256}\frac{iq\hbar}{m^8}\boldsymbol{\pi}^6(\boldsigma\cdot\mathbf{E})+\frac{29}{128}\frac{iq\hbar}{m^8}\boldsymbol{\pi}^4(\mathbf{E}\cdot\boldsymbol{\pi})(\boldsigma\cdot\boldpi),\\ &X_{11}=-\frac{21}{32}\frac{T^5}{m^6}\boldsigma\cdot\boldpi-\frac{63}{1024}\frac{iq\hbar}{m^{10}}\boldsymbol{\pi}^8(\boldsigma\cdot\mathbf{E})-\frac{65}{256}\frac{iq\hbar}{m^{10}}\boldsymbol{\pi}^6(\mathbf{E}\cdot\boldsymbol{\pi})(\boldsigma\cdot\boldpi),\\ &X_{13}=\frac{33}{32}\frac{T^6}{m^7}\boldsigma\cdot\boldpi+\frac{231}{2048}\frac{iq\hbar}{m^{12}}\boldsymbol{\pi}^{10}(\boldsigma\cdot\mathbf{E})+\frac{281}{1024}\frac{iq\hbar}{m^{12}}\boldsymbol{\pi}^{8}(\mathbf{E}\cdot\boldsymbol{\pi})(\boldsigma\cdot\boldpi), \end{split} \end{equation} \end{widetext} where $T=(\boldsigma\cdot\boldpi)^2/2m$. The forms of $X_1$ and $X_3$ in Eq.~(\ref{App:SolveX}) are also valid for inhomogeneous fields. The expanding terms of the Dirac generating operator from $X_5$ to $X_{13}$ in Eq.~(\ref{App:SolveX}) are valid only for homogeneous fields. Inserting Eqs.~(\ref{App:SolveX}) into Eq.~(\ref{EqA}), we can obtain each order of the Dirac energy operator. Furthermore, we rewrite each order of the Dirac energy operator $A_{\ell}$ as the combination of the hermitian part ($A^H_{\ell}$) and anti-hermitian part ($A^{N}_{\ell}$), \begin{equation}\label{EqAHN} A_{\ell}=A^{H}_{\ell}+A^{N}_{\ell}, \end{equation} where $A^H_{\ell}$ and $A^N_{\ell}$ satisfy $A^{H\dag}_{\ell}=A^H_{\ell}$ and $A^{N\dag}_{\ell}=-A^N_{\ell}$. The hermitian parts of the Dirac energy operator from $A^H_0$ to $A^H_{12}$ are given by \begin{equation}\label{EqAsH} \begin{split} &A^H_0=T+V,~A^H_2=-\frac{T^2}{2m}-\frac{q\hbar}{4m^2}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A^H_4=\frac{T^3}{2m^2}+\frac{3}{16}\frac{q\hbar}{m^4}\boldsymbol{\pi}^2\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A^H_6=-\frac{5}{8}\frac{T^4}{m^3}-\frac{5}{32}\frac{q\hbar}{m^6}\boldsymbol{\pi}^4\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A^H_8=\frac{7}{8}\frac{T^5}{m^4}+\frac{35}{256}\frac{q\hbar}{m^8}\boldsymbol{\pi}^6\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A^H_{10}=-\frac{21}{16}\frac{T^6}{m^5}-\frac{63}{512}\frac{q\hbar}{m^{10}}\boldsymbol{\pi}^8\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A^H_{12}=\frac{33}{16}\frac{T^7}{m^6}+\frac{231}{2048}\frac{q\hbar}{m^{12}}\boldsymbol{\pi}^{10}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ \end{split} \end{equation} The anti-hermitian parts of the Dirac energy operator from $A^N_0$ to $A^N_{12}$ are given by \begin{equation}\label{EqAsN} \begin{split} &A^N_0=0,~A^N_2=-\frac{iq\hbar}{4m^2}\mathbf{E}\cdot\boldsymbol{\pi},~A^N_4=+\frac{5}{16}\frac{iq\hbar}{m^4}\boldsymbol{\pi}^2\mathbf{E}\cdot\boldsymbol{\pi},\\ &A^N_6=-\frac{11}{32}\frac{iq\hbar}{m^6}\boldsymbol{\pi}^4\mathbf{E}\cdot\boldsymbol{\pi},~A^N_8=+\frac{93}{256}\frac{iq\hbar}{m^8}\boldsymbol{\pi}^6\mathbf{E}\cdot\boldsymbol{\pi},\\ &A^N_{10}=-\frac{193}{512}\frac{iq\hbar}{m^{10}}\boldsymbol{\pi}^8\mathbf{E}\cdot\boldsymbol{\pi},~A^N_{12}=+\frac{793}{2048}\frac{iq\hbar}{m^{12}}\boldsymbol{\pi}^{10}\mathbf{E}\cdot\boldsymbol{\pi}.\\ \end{split} \end{equation} We emphasize that the second and higher order of electromagnetic field will be neglected in Eqs.\ (\ref{EqAsH}) and (\ref{EqAsN}). In order to simplify the present expression, the form of $A_{\ell}$ still contains terms with non-linear electromagnetic fields because the operator $T$ can be written as $T=(\boldsigma\cdot\boldpi)^2/2m=\frac{1}{2m}(\boldsymbol{\pi}^2-\frac{q\hbar}{c}\boldsigma\cdot\mathbf{B})$. We will neglect these higher order terms when constructing Hamiltonian. On the other hand, to evaluate the Dirac string operator, we have to obtain the Dirac exponent operator by expanding $\ln(1+X^{\dag}X)$. After straightforward calculations, the expanding terms of the Dirac exponent operator $G_{\ell}$ (up to $1/c^{12}$) are given by \begin{equation}\label{EqGs} \begin{split} &G_2=\frac{T}{2m},~G_4=-\frac{5}{8}\frac{T^2}{m^2}-\frac{1}{4}\frac{q\hbar}{m^3}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G_6=\frac{11}{12}\frac{T^3}{m^3}+\frac{5}{16}\frac{q\hbar}{m^5}\boldsymbol{\pi}^2\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G_8=-\frac{93}{64}\frac{T^4}{m^4}-\frac{11}{32}\frac{q\hbar}{m^7}\boldsymbol{\pi}^4\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G_{10}=\frac{193}{80}\frac{T^5}{m^5}+\frac{93}{256}\frac{q\hbar}{m^9}\boldsymbol{\pi}^6\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G_{12}=-\frac{793}{192}\frac{T^6}{m^6}-\frac{193}{512}\frac{q\hbar}{m^{11}}\boldsymbol{\pi}^8\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}). \end{split} \end{equation} Since we always neglect terms with $E^2$, $B^2$, $EB$ and multiple products of them, the kinetic energy operator $T$ commutes with $\boldsymbol{\pi}^{2k}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})$ and $\boldsymbol{\pi}^{2k}\mathbf{E}\cdot\boldsymbol{\pi}$, and we have \begin{equation}\label{T comm} \begin{split} &[T,\boldsymbol{\pi}^{2k}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})]=0+o(f^2),\\ &[T,\boldsymbol{\pi}^{2k}\mathbf{E}\cdot\boldsymbol{\pi}]=0+o(f^2),\\ &[\boldsymbol{\pi}^{2k}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\boldsymbol{\pi}^{2n}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})]=0+o(f^2), \end{split} \end{equation} where $o(f^2)$ represents the second order and higher orders of homogeneous electromagnetic fields. Applying Eqs.\ (\ref{EqAsH}), (\ref{EqAsN}), (\ref{EqGs}) and (\ref{T comm}) to the Dirac string operators [Eq.~(\ref{EqS})], we find that all the non-vanishing Dirac string operators $S_{\ell}$ (from $\ell=2$ to $\ell=12$) are proportional to second and higher orders of electric and magnetic fields which are being neglected. This can also be proved as follows. Firstly, consider the Dirac string operator with only one Dirac exponent operator, $S=[G,A^N]/2+o(G^2)$. The Dirac exponent operator is $G=\sum_{\ell}G_{\ell}/c^{\ell}=G_T+G_{\mathrm{so}}$, where $G_T$ is the term with collections of the kinetic energy operator $T$, i.e., $G_T=T/2mc^2-(5/8)T^2/m^2c^4+(11/12)T^3/m^3c^6+\cdots$, and $G_{\mathrm{so}}=(-1/4m^3c^4+5\boldsymbol{\pi}^2/16m^5c^6-11\boldsymbol{\pi}^4/32m^7c^8+\cdots)\hbar\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})=F(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})$, where $F(\boldsymbol{\pi}^2)$ represents the power series of $\boldsymbol{\pi}^2$. The anti-hermitian part of the Dirac energy operator is $A^N=\sum_{\ell}A^{N}_{\ell}/c^{\ell}=(-1/4m^2c^2+5\boldsymbol{\pi}^2/16m^4c^4-11\boldsymbol{\pi}^4/32m^6c^6+\cdots)i\hbar\mathbf{E}\cdot\boldsymbol{\pi}=g(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}$, where $g(\boldsymbol{\pi}^2)$ represents the power series of $\boldsymbol{\pi}^2$. Therefore, $[G,A^N]$ can be written as $[G,A^N]=[G_T,A^N]+[G_{\mathrm{so}},A^N]$. Since we have $[T,\boldsymbol{\pi}^{2k}]=[T,\mathbf{E}\cdot\boldsymbol{\pi}]=0+o(f^2)$, thus $[G_T,A^N]=0+o(f^2)$. The commutator $[G_{\mathrm{so}},A^N]=[F(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),g(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}]$ also vanishes up to second-order terms of homogeneous electromagnetic field, because we have $[\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),g(\boldsymbol{\pi}^2)]=0+o(f^2)$, $[\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\mathbf{E}\cdot\boldsymbol{\pi}]=0+o(f^2)$, $[F(\boldsymbol{\pi}^2),\mathbf{E}\cdot\boldsymbol{\pi}]=0+o(f^2)$ and $[F(\boldsymbol{\pi}^2),g(\boldsymbol{\pi}^2)]=0$. We obtain $[G,A^N]=0+o(f^2)$, and thus, the terms containing odd numbers of $G$ in the Dirac string operator [see Eq.~(\ref{EqS NE})] always vanishes up to second-order terms of homogeneous electromagnetic fields. Secondly, consider the term containing two Dirac exponent operators in the Dirac string operator, $[G,[G,A^H]]$. The hermitian part of the Dirac energy operator can be written as $A^H=\sum_{\ell}A^{\ell}/c^{\ell}=V+A^H_T+A^H_{\mathrm{so}}$, where $A^H_{T}=T-T^2/2mc^2+T^3/2m^2c^4-5T^4/8m^3c^6+\cdots$ and $A^H_{\mathrm{so}}=K(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})$, where $K(\boldsymbol{\pi}^2)$ is the power series of $\boldsymbol{\pi}^2$. The commutator $[G,A^H]$ becomes $[G,A^H]=[G_T,V]+[G_T,A^H_T]+[G_T,A^{H}_{\mathrm{so}}]+[G_{\mathrm{so}},V]+[G_{\mathrm{so}},A^H_T]+[G_{\mathrm{so}},A^H_{\mathrm{so}}]$. Since we have $[T,\boldsymbol{\pi}^{2k}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})]=0+o(f^2)$ and $[\boldsymbol{\pi}^{2k}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\boldsymbol{\pi}^{2n}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})]=0+o(f^2)$, the commutators $[G_T,A^{H}_{\mathrm{so}}]$, $[G_{\mathrm{so}},A^H_T]$ and $[G_{\mathrm{so}},A^H_{\mathrm{so}}]$ vanish up to second-order terms of homogeneous electromagnetic fields as well as $[G_{\mathrm{so}},V]$. For the commutator $[G_T,V]$, using $[T,V]=-i\hbar\mathbf{E}\cdot\boldsymbol{\pi}/m$, we find that $[G_T,V]=R(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}+o(f^2)$, where $R(\boldsymbol{\pi}^2)$ is the power series of $\boldsymbol{\pi}^2$. That is, $[G,A^H]=R(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}+o(f^2)$. Similar to the commutator $[G,A^N]$, where $A^N=g(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}$, we find that this implies that $[G,[G,A^H]]=0+o(f^2)$. Therefore, the terms containing even numbers of the Dirac exponent operators in the Dirac string operator [see Eq.~(\ref{EqS NE})] always vanishes up to second-order terms of homogeneous electromagnetic fields. In short, it can be shown that from $\ell=0$ to $\ell=12$, the expanding terms of the Dirac string operator satisfies \begin{equation}\label{EqVanS} S_{\ell}=0+o(f^2), \end{equation} where $o(f^2)$ represents the second and higher orders of electromagnetic fields. Consider Eq.~(\ref{EqExpandH}) together with (\ref{EqVanS}), we find that $H^{(\ell)}_{FW}$ is exactly equal to $A^H_{\ell}$, i.e. \begin{equation}\label{EqHFW} c^{\ell}H_{FW}^{(\ell)}=A^H_{\ell}+o(f^2), \end{equation} where $\ell=0,2,4,\cdots,12$. Equation (\ref{EqHFW}) is the main result of this paper. This implies that the FW transformed Dirac Hamiltonian is only determined by the hermitian part of the Dirac energy operator regardless of the Dirac exponent operator $G$. We have shown that Eq.~(\ref{EqHFW}) is valid at least up to $1/c^{12}$. We believe that this result is valid to all higher orders of $1/c$. Equation (\ref{EqHFW}) enables us to solely focus on the hermitian part of the Dirac energy operator since the anti-hermitian part can be exactly cancelled by the remaining string operators. As a consequence, this result provides us a method to obtain higher order terms faster than traditional Foldy-Wouthuysen transformation. Comparing the form of the resulting FW transformed Dirac Hamiltonian with the classical Hamiltonian, we define the magnetic moment $\boldsymbol{\mu}$ and the scaled kinetic momentum $\boldsymbol{\xi}$ as \begin{equation}\label{def} \boldsymbol{\mu}=\frac{q\hbar}{2mc}\boldsymbol{\sigma}, \qquad \boldsymbol{\xi}=\frac{\boldsymbol{\pi}}{mc}. \end{equation} On the other hand, the kinetic energy operator $T$ in Eq.~(\ref{EqAsH}) can be replaced by $T=\frac{\boldsymbol{\pi}^2}{2m}-\frac{q\hbar}{2mc}\boldsigma\cdot\mathbf{B}$, and after neglecting second and higher orders of electromagnetic fields, the FW transformed Hamiltonian [Eq.~(\ref{EqHFW})] becomes \begin{widetext} \begin{equation}\label{HFWs} \begin{split} H^{(0)}_{\mathrm{FW}}&=V+\frac{1}{2}mc^2\boldsymbol{\xi}^2-\boldmu\cdot\mathbf{B},\\ H^{(2)}_{\mathrm{FW}}&=-\frac{1}{8}mc^2\boldsymbol{\xi}^4+\frac{1}{2}\boldsymbol{\xi}^2\boldmu\cdot\mathbf{B}-\frac{1}{2}\boldmu\cdot(\mathbf{E}\times\boldxi),\\ H^{(4)}_{\mathrm{FW}}&=+\frac{1}{16}mc^2\boldsymbol{\xi}^6-\frac{3}{8}\boldsymbol{\xi}^6\boldmu\cdot\mathbf{B}+\frac{3}{8}\boldsymbol{\xi}^4\boldmu\cdot(\mathbf{E}\times\boldxi),\\ H^{(6)}_{\mathrm{FW}}&=-\frac{5}{128}mc^2\boldsymbol{\xi}^8+\frac{5}{16}\boldsymbol{\xi}^4\boldmu\cdot\mathbf{B}-\frac{5}{16}\boldsymbol{\xi}^4\boldmu\cdot(\mathbf{E}\times\boldxi),\\ H^{(8)}_{\mathrm{FW}}&=+\frac{7}{256}mc^2\boldsymbol{\xi}^{10}-\frac{35}{128}\boldsymbol{\xi}^8\boldmu\cdot\mathbf{B}+\frac{35}{128}\boldsymbol{\xi}^6\boldmu\cdot(\mathbf{E}\times\boldxi),\\ H^{(10)}_{\mathrm{FW}}&=-\frac{21}{1024}mc^2\boldsymbol{\xi}^{12}+\frac{63}{256}\boldsymbol{\xi}^{10}\boldmu\cdot\mathbf{B}-\frac{63}{256}\boldsymbol{\xi}^8\boldmu\cdot(\mathbf{E}\times\boldxi),\\ H^{(12)}_{\mathrm{FW}}&=+\frac{33}{2048}mc^2\boldsymbol{\xi}^{14}+\frac{231}{1024}\boldsymbol{\xi}^{12}\boldmu\cdot\mathbf{B}-\frac{231}{1024}\boldsymbol{\xi}^{10}\boldmu\cdot(\mathbf{E}\times\boldxi). \end{split} \end{equation} \end{widetext} After substituting Eq.~(\ref{HFWs}) into Eq.~(\ref{HFW sum}), the FW transformed Dirac Hamiltonian becomes a sum of two terms: \begin{equation}\label{HFW OandS} \begin{split} H_{\mathrm{FW}}&=\sum_{\ell}H^{(\ell)}_{\mathrm{FW}}\\ &=H_{\mathrm{orbit}}+H_{\mathrm{spin}}, \end{split} \end{equation} where the orbital Hamiltonian $H_{\mathrm{orbit}}$ is the kinetic energy (including the rest mass energy) plus the potential energy, \begin{equation}\label{EqHo} \begin{split} H_{\mathrm{orbit}}&=mc^2(1+\frac{1}{2}\boldsymbol{\xi}^2-\frac{1}{8}\boldsymbol{\xi}^4+\frac{1}{16}\boldsymbol{\xi}^6-\frac{5}{128}\boldsymbol{\xi}^8\\ &~~+\frac{7}{256}\boldsymbol{\xi}^{10}-\frac{21}{1024}\boldsymbol{\xi}^{12}+\frac{33}{2048}\boldsymbol{\xi}^{14})+V, \end{split} \end{equation} and the spin Hamiltonian $H_{\mathrm{spin}}$ is the Hamiltonian of intrinsic magnetic moment in electromagnetic fields, \begin{equation}\label{EqHs} \begin{split} H_{\mathrm{spin}}&=-(1-\frac{1}{2}\boldsymbol{\xi}^2+\frac{3}{8}\boldsymbol{\xi}^4-\frac{5}{16}\boldsymbol{\xi}^6+\frac{35}{128}\boldsymbol{\xi}^8-\frac{63}{256}\boldsymbol{\xi}^{10}\\ &~~+\frac{231}{1024}\boldsymbol{\xi}^{12})\boldmu\cdot\mathbf{B}+(-\frac{1}{2}+\frac{3}{8}\boldsymbol{\xi}^2-\frac{5}{16}\boldsymbol{\xi}^4+\frac{35}{128}\boldsymbol{\xi}^6\\ &~~-\frac{63}{256}\boldsymbol{\xi}^8+\frac{231}{1024}\boldsymbol{\xi}^{10})\boldmu\cdot(\mathbf{E}\times\boldxi).\\ \end{split} \end{equation} In the following section, we will show that the FW transformed Dirac Hamiltonian is equivalent to the Hamiltonian obtained from T-BMT equation with $g=2$. \section{FW transformed Dirac Hamiltonian and classical Hamiltonian}\label{sec:TBMT} The orbital Hamiltonian $H_{\mathrm{orbit}}$ [Eq.~(\ref{EqHo})] is expected to be equivalent to the classical relativistic energy $\gamma mc^2+V$. However, the boost velocity in T-BMT equation is not $\boldsymbol{\xi}$ \cite{TWChen2010}. Take the series expansion of $(1+\boldsymbol{\xi}^2)^{1/2}$ into account, \begin{equation}\label{series_xi} \begin{split} (1+\boldsymbol{\xi}^2)^{1/2}&=1+\frac{1}{2}\boldsymbol{\xi}^2-\frac{1}{8}\boldsymbol{\xi}^4+\frac{1}{16}\boldsymbol{\xi}^6-\frac{5}{128}\boldsymbol{\xi}^8\\ &~~+\frac{7}{256}\boldsymbol{\xi}^{10}-\frac{21}{1024}\boldsymbol{\xi}^{12}+\frac{33}{2048}\boldsymbol{\xi}^{14}\\ &~~-\frac{429}{32768}\boldsymbol{\xi}^{16}+\cdots, \end{split} \end{equation} we find that the series of $\boldsymbol{\xi}^2$ in Eq.~(\ref{EqHo}) is exactly equal to Eq.~(\ref{series_xi}) up to $\boldsymbol{\xi}^{14}$. This enable us to define the boost operator $\widehat{\boldsymbol{\beta}}$ via the Lorentz operator $\widehat{\gamma}$, \begin{equation}\label{xiboost} (1+\boldsymbol{\xi}^2)^{1/2}=\widehat{\gamma}=\frac{1}{\sqrt{1-\widehat{\boldsymbol{\beta}}^2}}. \end{equation} In this sense, the orbital Hamiltonian can now be written as $H_{\mathrm{orbit}}=\widehat{\gamma} mc^2+V$. In classical relativistic theory, the Lorentz factor $\gamma$ is related to boost velocity by $\gamma=1/\sqrt{1-\beta^2}$. However, in the relativistic quantum mechanics since different components of the kinetic momentum operator $\boldsymbol{\pi}$ do not commute with one another, the boost operator $\widehat{\boldsymbol{\beta}}$ should not simply satisfy the form $\widehat{\gamma}=1/\sqrt{1-\widehat{\boldsymbol{\beta}}^2}$. We will go back to this point when discussing the spin Hamiltonian. The boost operator $\widehat{\boldsymbol{\beta}}$ plays an important role on showing the agreement between the spin Hamiltonian $H_{\mathrm{spin}}$ and the T-BMT equation. The spin Hamiltonian can be written as a sum of Zeeman Hamiltonian $H_{\mathrm{ze}}$ and spin-orbit interaction $H_{\mathrm{so}}$, \begin{equation} H_{\mathrm{spin}}=H_{\mathrm{ze}}+H_{\mathrm{so}}. \end{equation} The Zeeman Hamiltonian $H_{\mathrm{ze}}$ is the relativistic correction to the Zeeman energy: \begin{equation}\label{Hze} \begin{split} H_{\mathrm{ze}}&=-(1-\frac{1}{2}\boldsymbol{\xi}^2+\frac{3}{8}\boldsymbol{\xi}^4-\frac{5}{16}\boldsymbol{\xi}^6+\frac{35}{128}\boldsymbol{\xi}^8-\frac{63}{256}\boldsymbol{\xi}^{10}\\ &~~+\frac{231}{1024}\boldsymbol{\xi}^{12})\boldmu\cdot\mathbf{B}. \end{split} \end{equation} The spin-orbit interaction $H_{\mathrm{so}}$ is the interaction of electric field and the electric dipole moment arising from the boost on the intrinsic spin magnetic moment: \begin{equation}\label{Hso} \begin{split} H_{\mathrm{so}}&=-(\frac{1}{2}-\frac{3}{8}\boldsymbol{\xi}^2+\frac{5}{16}\boldsymbol{\xi}^4-\frac{35}{128}\boldsymbol{\xi}^6\\ &~~+\frac{63}{256}\boldsymbol{\xi}^8-\frac{231}{1024}\boldsymbol{\xi}^{10})\boldmu\cdot(\mathbf{E}\times\boldxi). \end{split} \end{equation} We first focus on the series in the Zeeman Hamiltonian. Consider the series expansion of $(1+\boldsymbol{\xi}^2)^{-1/2}$, \begin{equation}\label{Series1} \begin{split} (1+\boldsymbol{\xi}^2)^{-1/2}&=1-\frac{1}{2}\boldsymbol{\xi}^2+\frac{3}{8}\boldsymbol{\xi}^4-\frac{5}{16}\boldsymbol{\xi}^6+\frac{35}{128}\boldsymbol{\xi}^8\\ &~~-\frac{63}{256}\boldsymbol{\xi}^{10}+\frac{231}{1024}\boldsymbol{\xi}^{12}-\frac{429}{2048}\boldsymbol{\xi}^{14}+\cdots, \end{split} \end{equation} we find that the series in $H_{\mathrm{ze}}$ is exactly equal to $(1+\boldsymbol{\xi}^2)^{-1/2}$ up to $\boldsymbol{\xi}^{12}$. Therefore, the Zeeman Hamiltonian Eq.~(\ref{Hze}) can be written as \begin{equation}\label{Hze_boost} H_{\mathrm{ze}}=-\frac{1}{\widehat{\gamma}}\boldmu\cdot\mathbf{B}. \end{equation} On the other hand, the the spin-orbit term in the T-BMT Hamiltonian transforms like $[g/2-\gamma/(1+\gamma)]$ and $g=2$ for the Dirac Hamiltonian. Therefore, consider the series expansion of $(1-\widehat{\gamma}/(1+\widehat{\gamma}))(1/\widehat{\gamma})$, we have \begin{equation}\label{Series2} \begin{split} \left(1-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\frac{1}{\widehat{\gamma}}&=\frac{1}{\sqrt{1+\boldsymbol{\xi}^2}}-\frac{1}{1+\sqrt{1+\boldsymbol{\xi}^2}}\\ &=\frac{1}{2}-\frac{3}{8}\boldsymbol{\xi}^2+\frac{5}{16}\boldsymbol{\xi}^4-\frac{35}{128}\boldsymbol{\xi}^6\\ &~~+\frac{63}{256}\boldsymbol{\xi}^8-\frac{231}{1024}\boldsymbol{\xi}^{10}+\frac{429}{2048}\boldsymbol{\xi}^{12}+\cdots, \end{split} \end{equation} where $\widehat{\gamma}(1+\widehat{\gamma})^{-1}=(1+\widehat{\gamma})^{-1}\widehat{\gamma}$ was used.\footnote{The identity can be shown as follows. $\widehat{\gamma}(1+\widehat{\gamma})^{-1}=[(1+\widehat{\gamma})\widehat{\gamma}^{-1}]^{-1}=(\widehat{\gamma}^{-1}+1)^{-1}=[\widehat{\gamma}^{-1}(1+\widehat{\gamma})]^{-1}=(1+\widehat{\gamma})^{-1}\widehat{\gamma}$.} The series in Eq.~(\ref{Hso}) is in agreement with Eq.~(\ref{Series2}) up to $\boldsymbol{\xi}^{10}$. Therefore, we have \begin{equation}\label{Hso_boost} H_{\mathrm{so}}=-\left(1-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\frac{1}{\widehat{\gamma}}\boldmu\cdot(\mathbf{E}\times\boldxi). \end{equation} We note that if Eq.~(\ref{Hso_boost}) is in complete agreement with the T-BMT equation, the boost velocity operator $\widehat{\boldsymbol{\beta}}$ must be defined by $\widehat{\boldsymbol{\beta}}=\frac{1}{\widehat{\gamma}}\boldsymbol{\xi}$. In general, the commutator $[\boldsymbol{\xi},1/\widehat{\gamma}]$ is not equal to zero, and Eq.~(\ref{xiboost}) cannot be satisfied. However, since we require that the FW transformed Dirac Hamiltonian $H_{\mathrm{FW}}$ is linear in electromagnetic fields, the magnetic field obtained from the operator $[\boldsymbol{\xi},1/\widehat{\gamma}]$ should be neglected. In that sense, the commutator $[\boldsymbol{\xi},1/\widehat{\gamma}]$ should be identified as zero in this case, and the boost operator can be written as \begin{equation}\label{xiboost1} \widehat{\boldsymbol{\beta}}=\frac{1}{\widehat{\gamma}}\boldsymbol{\xi}=\boldsymbol{\xi}\frac{1}{\widehat{\gamma}}. \end{equation} It can be shown that Eq.~(\ref{xiboost1}) satisfies Eq.~(\ref{xiboost}). Therefore, the spin Hamiltonian Eq.~(\ref{EqHs}) with substitutions of Eqs.~(\ref{Hze_boost}) and (\ref{Hso_boost}) becomes \begin{equation}\label{Hspin} \begin{split} H_{\mathrm{spin}}&=H_{\mathrm{ze}}+H_{\mathrm{so}}\\ &=-\boldsymbol{\mu}\cdot\left[\frac{1}{\widehat{\gamma}}\mathbf{B}-\left(1-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\widehat{\boldsymbol{\beta}}\times\mathbf{E}\right]. \end{split} \end{equation} Up to the twentieth order there is a complete agreement between the spin part of the FW transformed Dirac Hamiltonian and the T-BMT equation with $g=2$. The FW transformed Hamiltonian is given by \begin{equation}\label{HFW total} \begin{split} H_{\mathrm{FW}}&=H_{\mathrm{orbit}}+H_{\mathrm{spin}}\\ &=V+\widehat{\gamma} mc^2-\boldsymbol{\mu}\cdot\left[\frac{1}{\widehat{\gamma}}\mathbf{B}-\left(1-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\widehat{\boldsymbol{\beta}}\times\mathbf{E}\right], \end{split} \end{equation} which is in agreement with the classical Hamiltonian with $g=2$. In the next section, we take into account the Pauli anomalous magnetic moment and show that the classical correspondence of the Dirac-Pauli Hamiltonian is the classical Hamiltonian with $g\neq2$. \section{FW transformation for Dirac-Pauli Hamiltonian}\label{sec:TBMT2} In the previous section, the agreement to the classical Hamiltonian is shown to be complete up to terms of the order $(\boldsymbol{\pi}/mc)^{14}$ in the absence of anomalous electron magnetic moment, i.e., $g=2$. The Dirac electron including the Pauli anomalous magnetic moment can be described by the Dirac-Pauli Hamiltonian denoted by $\mathcal{H}$ which contains the Dirac Hamiltonian as well as anomalous magnetic interaction $V_B$ and anomalous electric interaction $V_E$, \begin{equation} \begin{split} \mathcal{H}&=H_D+\left(\begin{array}{cc} V_B&iV_E\\ -iV_E&-V_B \end{array}\right)\\ &=\left(\begin{array}{cc} H_+&H_0\\ H_0^{\dag}&H_- \end{array}\right) \end{split} \end{equation} where $H_+=V+V_B+mc^2$, $H_-=V-V_B-mc^2$ and $H_0=h_0+iV_E$. The Dirac Hamiltonian $H_D$ is given in Eq.~(\ref{Dirac}), and \begin{equation} V_B=-\mu'\boldsigma\cdot\mathbf{B},~V_E=\mu'\boldsigma\cdot\mathbf{E}. \end{equation} The coefficient $\mu'$ is defined as \begin{equation} \mu'=\left(\frac{g}{2}-1\right)\frac{q\hbar}{2mc}. \end{equation} For an electron with $g=2$, we have $V_B=0$ and $V_E=0$. Applying the unitary transformation Eq.~(\ref{U}) \begin{equation}\label{UHDPU} U=\left(\begin{array}{cc} \mathcal{Y}&\mathcal{Y}\mathcal{X}^{\dag}\\ -\mathcal{Z}\mathcal{X}&\mathcal{Z} \end{array}\right) \end{equation} to the Dirac-Pauli Hamiltonian, the self-consistent equation for the Dirac-Pauli generating operator $\mathcal{X}$ is given by the requirement of vanishing off-diagonal term of $U\mathcal{H}U^{\dag}$, i.e. \begin{equation}\label{DP X} \begin{split} 2mc^2\mathcal{X}&=[V,\mathcal{X}]+h_0-\mathcal{X}h_0\mathcal{X}\\ &~~-iV_E-i\mathcal{X}V_E\mathcal{X}-\{\mathcal{X},V_B\}, \end{split} \end{equation} where $h_0=c\boldsigma\cdot\boldpi$. The FW transformed Dirac-Pauli Hamiltonian can be obtained from the upper-left block diagonal term of $U\mathcal{H}U^{\dag}$, and it is given by \begin{equation}\label{HFWDP0} \mathcal{H}_{\mathrm{FW}}= \mathcal{Y}\left(H_++\mathcal{X}^{\dag}H_0^{\dag} +H_0\mathcal{X}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)\mathcal{Y}. \end{equation} Similar to the derivation of Eq.~(\ref{HFW}), we find that the FW transformed Dirac-Pauli Hamiltonian [Eq.~(\ref{HFWDP0})] can also be simplified as (see Appendix~\ref{App:Ham2}) \begin{equation}\label{H FWDP} \mathcal{H}_{\mathrm{FW}}=mc^2+e^{\mathcal{G}/2}\mathcal{A}e^{-\mathcal{G}/2}, \end{equation} where the Dirac-Pauli energy operator $\mathcal{A}$ and the Pauli-Dirac exponent operator $\mathcal{G}$ are given by \begin{equation}\label{DP AG} \begin{split} &\mathcal{A}=V+h_0\mathcal{X}+V_B+iV_E\mathcal{X},\\ &\mathcal{G}=\ln\left(1+\mathcal{X}^{\dag}\mathcal{X}\right). \end{split} \end{equation} Similar to Eq.~(\ref{HFW NE}) obtained from the requirement of hermiticity of $H_{\mathrm{FW}}$, we find that the FW transformed Dirac-Pauli Hamiltonian also satisfies $\mathcal{H}_{\mathrm{FW}}=\mathcal{H}_{\mathrm{FW}}^{\dag}=mc^2+e^{-\mathcal{G}/2}\mathcal{A}e^{\mathcal{G}/2}$ and the FW transformed Dirac-Pauli Hamiltonian can be rewritten as \begin{equation} \mathcal{H}_{\mathrm{FW}}=mc^2+\mathcal{A}^H+\mathcal{S}, \end{equation} where $\mathcal{A}^H$ is the hermitian part of the Dirac-Pauli energy operator and the Dirac-Pauli string operator $\mathcal{S}$ is the same as Eq.~(\ref{EqS NE}) by the replacements $A\rightarrow\mathcal{A}$ and $G\rightarrow\mathcal{G}$, i.e. \begin{equation}\label{EqS DP NE} \begin{split} \mathcal{S}&=\frac{1}{2}[\mathcal{G},\mathcal{A}^N]+\frac{1}{2!2^2}[\mathcal{G},[\mathcal{G},\mathcal{A}^H]]+\frac{1}{3!2^3}[\mathcal{G},[\mathcal{G},[\mathcal{G},\mathcal{A}^N]]]\\ &~~+\frac{1}{4!2^4}[\mathcal{G},[\mathcal{G},[\mathcal{G},[\mathcal{G},\mathcal{A}^H]]]]+\cdots. \end{split} \end{equation} Similar to the Dirac string operator, the anti-hermitian part of the Dirac-Pauli energy operator always appears in those terms with odd numbers of Dirac-Pauli exponent operators, and the hermitian part of the Dirac-Pauli energy operator always appears in those terms with even numbers of Dirac exponent operators. The power series solutions to the Dirac-Pauli generating operator can be obtained by means of Eq.~(\ref{DP X}) via substitution of the series expansion $\mathcal{X}=\sum_i\mathcal{X}_k/c^k$, $k=1,2,3,\cdots$, and each order of Dirac-Pauli energy operator $\mathcal{A}_k$ can be obtained from $\mathcal{A}=\sum_k\mathcal{A}_k/c^k$ by using Eq.~(\ref{DP AG}). Each order of Dirac-Pauli energy operators can be decomposed into hermitian ($\mathcal{A}^H_k$) and anti-hermitian ($\mathcal{A}^N_k$) parts, $\mathcal{A}_k=\mathcal{A}^H_k+\mathcal{A}^N_k$. As a result, the FW transformed Dirac-Pauli Hamiltonian can be written as \begin{equation} \mathcal{H}_{FW}=mc^2+\sum_{k=0,1,2,\cdots}\mathcal{H}^{(k)}_{FW} \end{equation} with \begin{equation} c^k\mathcal{H}^{(k)}_{FW}=\mathcal{A}^H_{k}+\mathcal{S}_k. \end{equation} To obtain the FW transformed Dirac-Pauli Hamiltonian up to $k=12$, the largest order of the Dirac-Pauli generating operator must have the order of $k=13$, i.e., $\mathcal{X}_{13}$. This is because the operator $h_0=c\boldsymbol{\sigma}\cdot\boldsymbol{\pi}$ is of the order of $c$, the order of the Dirac-Pauli energy operator is lower than that of the Dirac-Pauli generating operator. Furthermore, since each order of the Dirac-Pauli generating operator must equal that of the Dirac generating operator when $g=2$, we can rewrite the Dirac-Pauli generating operator ($\mathcal{X}_k$) as the sum of the Dirac generating operator ($X_k$) and the anomalous generating operator ($X_k'$), namely, \begin{equation}\label{XX'} \mathcal{X}_k=X_k+X_k'+o(f^2), \end{equation} where the anomalous generating operator $X_k'$ vanishes when $g=2$. Similar to the derivation of power series solution to the Dirac generating operator shown in the previous section, the explicit forms of different orders of the anomalous generating operator $X_k'$ are given by ($k=1,2,\cdots,13$) \begin{widetext} \begin{equation}\label{X'k} \begin{split} &X_1'=0,~X_2'=0,~X_3'=-\frac{i\mu''}{2m}\boldsigma\cdot\mathbf{E},~X_4'=\frac{\mu''}{2m^2}\mathbf{B}\cdot\boldsymbol{\pi},~X_5'=\frac{3}{8}\frac{i\mu''}{m^3}\boldsymbol{\pi}^2(\boldsigma\cdot\mathbf{E})-\frac{i\mu''}{4m^3}(\boldsigma\cdot\boldpi)(\mathbf{E}\cdot\boldsymbol{\pi})\\ &X_6'=-\frac{3}{8}\frac{\mu''}{m^4}\boldsymbol{\pi}^2(\mathbf{B}\cdot\boldsymbol{\pi}),~X_7'=-\frac{5}{16}\frac{i\mu''}{m^5}\boldsymbol{\pi}^4(\boldsigma\cdot\mathbf{E})+\frac{1}{4}\frac{i\mu''}{m^5}\boldsymbol{\pi}^2(\boldsigma\cdot\boldpi)(\mathbf{E}\cdot\boldsymbol{\pi}),\\ &X_8'=\frac{5}{16}\frac{\mu''}{m^6}\boldsymbol{\pi}^4(\mathbf{B}\cdot\boldsymbol{\pi}),~X_9'=\frac{35}{128}\frac{i\mu''}{m^7}\boldsymbol{\pi}^6(\boldsigma\cdot\mathbf{E})-\frac{15}{64}\frac{i\mu''}{m^7}\boldsymbol{\pi}^4(\boldsigma\cdot\boldpi)(\mathbf{E}\cdot\boldsymbol{\pi}),\\ &X_{10}'=-\frac{35}{128}\frac{\mu''}{m^8}\boldsymbol{\pi}^6(\mathbf{B}\cdot\boldsymbol{\pi}),~X_{11}'=-\frac{63}{256}\frac{i\mu''}{m^9}\boldsymbol{\pi}^8(\boldsigma\cdot\mathbf{E})+\frac{7}{32}\frac{i\mu''}{m^9}\boldsymbol{\pi}^6(\boldsigma\cdot\boldpi)(\mathbf{E}\cdot\boldsymbol{\pi}),\\ &X_{12}'=\frac{63}{256}\frac{\mu''}{m^{10}}\boldsymbol{\pi}^8(\mathbf{B}\cdot\boldsymbol{\pi}),~X_{13}'=\frac{231}{1024}\frac{i\mu''}{m^{11}}\boldsymbol{\pi}^{10}(\boldsigma\cdot\mathbf{E})-\frac{105}{512}\frac{i\mu''}{m^{11}}\boldsymbol{\pi}^8(\boldsigma\cdot\boldpi)(\mathbf{E}\cdot\boldsymbol{\pi}),\\ \end{split} \end{equation} \end{widetext} where $\mu''=(g/2-1)q\hbar/2m=c\mu'$. We note that since the gyromagnetic ratio always accompanies linear-order terms of electric or magnetic fields, the operator $X_k'$ is proportional to electromagnetic fields and contains the kinetic momentum operator. Substituting Eq.~(\ref{XX'}) into Eq.~(\ref{DP AG}), the Dirac-Pauli energy operator ($\mathcal{A}_k$) can be written as the sum of the energy operator for the Dirac Hamiltonian ($A_k$) and anomalous energy operator ($A'_k$), \begin{equation} \mathcal{A}_k=A_k+A'_k+o(f^2), \end{equation} where the expanding terms of the Dirac energy operator $A_k$ from $k=0$ to $k=12$ are given in Eqs.~(\ref{EqAsH}) and (\ref{EqAsN}). The $k$th order of the anomalous energy operator is related to $k$th orders of Dirac generating operator and anomalous generating operator by \begin{equation} \begin{split} &A'_0=0,~A'_1=cV_B,\\ &A'_{k}=(h_0/c)X'_{k+1}+icV_EX_{k-1}, k=2,4,\cdots12,\\ &A'_{k}=(h_0/c)X_{k+1}, k=3,5,\cdots,11. \end{split} \end{equation} Using Eqs.~(\ref{App:SolveX}) and (\ref{X'k}), the hermitian parts of the expanding terms of the anomalous energy operator from zeroth order to twentieth orders are given by \begin{equation}\label{A'k H} \begin{split} &A'^H_0=0,~A'^H_1=-\mu''\boldsigma\cdot\mathbf{B},\\ &A'^H_2=-\frac{\mu''}{m}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A'^H_3=\frac{1}{2}\frac{\mu''}{m^2}(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &A'^H_4=\frac{\mu''}{m^3}\boldsymbol{\pi}^2\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A'^H_5=-\frac{3}{8}\frac{\mu''}{m^4}\boldsymbol{\pi}^2(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &A'^H_6=-\frac{3}{8}\frac{\mu''}{m^5}\boldsymbol{\pi}^4\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A'^H_7=\frac{5}{16}\frac{\mu''}{m^6}\boldsymbol{\pi}^4(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &A'^H_8=\frac{5}{16}\frac{\mu''}{m^7}\boldsymbol{\pi}^6\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})\\ &A'^H_9=-\frac{35}{128}\frac{\mu''}{m^8}\boldsymbol{\pi}^6(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &A'^H_{10}=-\frac{35}{128}\frac{\mu''}{m^9}\boldsymbol{\pi}^8\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &A'^H_{11}=\frac{63}{256}\frac{\mu''}{m^{10}}\boldsymbol{\pi}^8(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &A'^H_{12}=\frac{63}{256}\frac{\mu''}{m^{11}}\boldsymbol{\pi}^{10}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}).\\ \end{split} \end{equation} For example, consider the twentieth order of the anomalous operator $A'_{12}$, which is given by $A'_{12}=(h_0/c)X'_{13}+icV_EX_{11}$. Substituting $V_E$, $X'_{13}$ and $X_{11}$ into $A'_{12}$ and neglecting the second order of homogeneous electromagnetic fields, we find that \begin{equation} \begin{split} A'_{12}&=\frac{231}{1024}\frac{i\mu''}{m^{11}}\boldsymbol{\pi}^{10}(\boldsigma\cdot\boldpi)(\boldsigma\cdot\mathbf{E})-\frac{21}{32}\frac{i\mu''}{m^6}T^5(\boldsigma\cdot\mathbf{E})(\boldsigma\cdot\boldpi)\\ &~~-\frac{105}{512}\frac{i\mu''}{m^{11}}\boldsymbol{\pi}^{10}\boldsigma\cdot\mathbf{E}\\ &=\left(\frac{231}{1024}+\frac{21}{32}\times\frac{1}{32}\right)\frac{\mu''}{m^{11}}\boldsymbol{\pi}^{10}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})\\ &~~+\left(-\frac{105}{512}+\frac{231}{1024}-\frac{21}{32}\times\frac{1}{32}\right)\frac{i\mu''}{m^{11}}\boldsymbol{\pi}^{10}\boldsigma\cdot\mathbf{E}\\ &=\frac{63}{256}\frac{\mu''}{m^{11}}\boldsymbol{\pi}^{10}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}), \end{split} \end{equation} where in the second equality we have used $(\boldsigma\cdot\mathbf{E})(\boldsigma\cdot\boldpi)=\mathbf{E}\cdot\boldsymbol{\pi}+i\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})$ and $\mathbf{E}\times\boldsymbol{\pi}=-\boldsymbol{\pi}\times\mathbf{E}$ for homogeneous fields, and the kinetic energy operator is replaced by $T\rightarrow\boldsymbol{\pi}^2/2m$. The anti-hermitian part of $A'_{12}$ is $i\mu''\boldsymbol{\pi}^{10}\boldsigma\cdot\mathbf{E}/m^{11}$ and its numerical coefficient is zero. Interestingly, we find that all the anti-hermitian part of $A'_k$ from $k=0$ to $k=12$ vanish up to second-order terms of homogeneous electromagnetic fields, i.e. \begin{equation}\label{A'k N} A'^N_k=0+o(f^2). \end{equation} On the other hand, the series expansion of the Dirac-Pauli exponent operator $\mathcal{G}=\ln(1+\mathcal{X}^{\dag}\mathcal{X})$ can also be written as $\mathcal{G}=\sum_{k}\mathcal{G}_k/c^k$ and $\mathcal{G}_{k}=G_k+G'_k$, where $G_k$ (the $k$th order of the Dirac exponent operator) is given in Eq.~(\ref{EqGs}) and $G'_k$ is the $k$th order of the anomalous exponent operator. The expanding terms of the anomalous exponent operators $G'_k$ from $k=1$ to $k=12$ are as follows: \begin{equation}\label{G'k} \begin{split} &G'_1=0,~G'_2=0,~G'_3=0,G'_4=-\frac{\mu''}{2m^2}\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G'_5=\frac{\mu''}{2m^3}(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi), G'_6=\frac{5}{8}\frac{\mu''}{m^4}\boldsymbol{\pi}^2\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G'_7=-\frac{5}{8}\frac{\mu''}{m^5}\boldsymbol{\pi}^2(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &G'_8=-\frac{11}{16}\frac{\mu''}{m^6}\boldsymbol{\pi}^4\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G'_9=\frac{11}{16}\frac{\mu''}{m^7}\boldsymbol{\pi}^4(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &G'_{10}=\frac{93}{128}\frac{\mu''}{m^8}\boldsymbol{\pi}^6\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ &G'_{11}=-\frac{93}{128}\frac{\mu''}{m^9}\boldsymbol{\pi}^6(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\\ &G'_{12}=-\frac{193}{256}\frac{\hbar}{m^{10}}\boldsymbol{\pi}^8\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\\ \end{split} \end{equation} which all vanish when $g=2$ and each order of the anomalous exponent operator must be proportional to electromagnetic fields. Substitute Eqs.~(\ref{A'k H}), (\ref{A'k N}) and (\ref{G'k}) into Eq.~(\ref{EqS DP NE}), it can be shown that similar to the result of the Dirac string operator, the Dirac-Pauli string operator also vanishes up to second-order terms of homogeneous electromagnetic fields; i.e., we have \begin{equation} \mathcal{S}_{k}=0+o(f^2). \end{equation} This can be proved as follows. Firstly, consider the term containing only one Dirac-Pauli exponent operator in the Dirac-Pauli string operator [see Eq.~(\ref{EqS DP NE})]. It is given by $[\mathcal{G},\mathcal{A}^{N}]/2$. Since $\mathcal{G}=G+G'$ and $\mathcal{A}^{N}=A^N+A'^N$, we have $[\mathcal{G},\mathcal{A}^{H}]/2=[G,A^N]/2+[G,A'^N]/2+[G',A^N]/2+[G',A'^N]/2$, where $[G,A^N]/2$ is the Dirac string operator containing only one Dirac exponent operator and it has been shown that $[G,A^N]/2=0+o(f^2)$. The anomalous exponent operator can be written as $G'=\sum_{k}G'_k/c^k=F_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})+F_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi)$, where $F_1(\boldsymbol{\pi}^2)$ represents a power series of $\boldsymbol{\pi}^2$ as well as $F_2(\boldsymbol{\pi}^2)$. We note that $A'^N=\sum_{k}A'^N/c^k=0+o(f^2)$ [see Eq.~(\ref{A'k N})]. It is obvious that the second term $[G,A'^N]$ and fourth term $[G',A'^N]$ vanish up to second-order terms of homogeneous electromagnetic fields. The third term $[G',A^N]=[f_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})+f_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),g(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}]$ also vanishes because we have $[f_1(\boldsymbol{\pi}^2),g(\boldsymbol{\pi}^2)]=[f_2(\boldsymbol{\pi}^2),g(\boldsymbol{\pi}^2)]=0$ and $[\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),\mathbf{E}\cdot\boldsymbol{\pi}]=[(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),\mathbf{E}\cdot\boldsymbol{\pi}]=[\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),g(\boldsymbol{\pi}^2)]=[(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),g(\boldsymbol{\pi}^2)]=[f_1(\boldsymbol{\pi}^2),\mathbf{E}\cdot\boldsymbol{\pi}]=[f_2(\boldsymbol{\pi}^2),\mathbf{E}\cdot\boldsymbol{\pi}]=0+o(f^2)$. Therefore, we have $[\mathcal{G},\mathcal{A}^N]/2=0+o(f^2)$. Since the commutator $[\mathcal{G},\mathcal{A}^N]/2$ always appears in those terms with odd numbers of the Dirac-Pauli exponent operators [see Eq.~(\ref{EqS DP NE})], this implies that the terms with odd numbers of $\mathcal{G}$ always vanish up to second-order terms of homogeneous electromagnetic fields. Secondly, consider the terms with two Dirac-Pauli exponent operators in the Dirac-Pauli string operator. It is given by $[\mathcal{G},[\mathcal{G},\mathcal{A}^{H}]]/2!2^2=[G,[G,A^H]]/2!2^2+[G,[G,A'^H]]/2!2^2+[G,[G',A^H]]/2!2^2+[G',[G,A^H]]/2!2^2+o(f^2)$, where we have neglected the second-order terms of electromagnetic fields, such as $[G,[G',A'^H]]$, $[G',[G,A'^H]]$, $[G',[G',A^H]]$ and $[G',[G',A'^H]]$. The first term $[G,[G,A^H]]$ is the Dirac string operator containing only two Dirac exponent operators and it has been show that $[G,[G,A^H]]=0+o(f^2)$. The anomalous energy operator can be written as $A'^H=\sum_kA'^H_k/c^k=K_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})+K_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi)$, where $K_1(\boldsymbol{\pi}^2)$ represents the power series of $\boldsymbol{\pi}^2$ as well as $K_2(\boldsymbol{\pi}^2)$. The commutator $[G,A'^H]$ can be written as $[G,A'^H]=[G_T+G_{\mathrm{so}},A'^H]=[G_T,A'^H]+[G_{\mathrm{so}},A'^H]$, where $G_{\mathrm{so}}=F(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})$ and $G_T=T/2m-(5/8)T^2/m^2+(11/12)T^3/m^3+\cdots$. Using Eq.~(\ref{T comm}), it can be shown that $[G_T,A'^H]=0+o(f^2)$ and $[G_{\mathrm{so}},A'^H]=0+o(f^2)$, and thus, the second term $[G,[G,A'^H]]$ vanishes up to second-order terms of homogeneous electromagnetic fields. Consider the third term $[G,[G',A^H]]$, where the commutator $[G',A^H]$ becomes $[G',A^H]=[F_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi})+F_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),V+A^H_T+A^H_{\mathrm{so}}]=[F_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),V]+[F_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),V]+o(f^2)$. However, the two commutators $[F_1(\boldsymbol{\pi}^2)\boldsigma\cdot(\mathbf{E}\times\boldsymbol{\pi}),V]$ and $[F_2(\boldsymbol{\pi}^2)(\boldsigma\cdot\boldpi)(\mathbf{B}\cdot\boldpi),V]$ are proportional to the second order of homogeneous electromagnetic fields since $[\boldsymbol{\pi},V]=iq\hbar\mathbf{E}$, and thus we have $[G,[G',A^H]]=0+o(f^2)$. The fourth term $[G',[G,A^H]]$ also vanishes up to $o(f^2)$ since it has been shown that $[G,A^H]=R(\boldsymbol{\pi}^2)\mathbf{E}\cdot\boldsymbol{\pi}+o(f^2)$ and $G'$ contains first-order terms of homogeneous electromagnetic fields. Therefore, we have shown that $[\mathcal{G},[\mathcal{G},\mathcal{A}^{H}]]/2!2^2=0+o(f^2)$. Since the commutator $[\mathcal{G},[\mathcal{G},\mathcal{A}^H]]$ always appears in those terms with even number of the Dirac-Pauli exponent operator [see Eq.~(\ref{EqS DP NE})], this implies that the terms with even numbers of $\mathcal{G}$ always vanishes up to second order of homogeneous electromagnetic fields. As a consequence, the FW transformed Dirac-Pauli Hamiltonian is determined only by the hermitian part of Dirac-Pauli energy operator, i.e. \begin{equation} c^k\mathcal{H}^{(k)}_{\mathrm{FW}}=\mathcal{A}_k^H+o(f^2).\\ \end{equation} Since the Dirac-Pauli energy operator is composed of the Dirac energy operator and anomalous energy operator, $A^{H}_k=A^H_k+A'^H_k$, and the Dirac energy operator is related to the FW transformed Dirac Hamiltonian by $c^kH^{(k)}_{\mathrm{FW}}=A^H_k$, the FW transformed Dirac-Pauli Hamiltonian can be written as the sum of the FW transformed Dirac Hamiltonian and the anomalous Hamiltonian: \begin{equation} \mathcal{H}_{\mathrm{FW}}=H_{\mathrm{FW}}+H'_{\mathrm{FW}}. \end{equation} The $k$th order of the FW transformed Pauli-Dirac Hamiltonian can be written as \begin{equation} \mathcal{H}^{(k)}_{\mathrm{FW}}=H^{(k)}_{\mathrm{FW}}+H'^{(k)}_{\mathrm{FW}}, \end{equation} where the the $k$th order of the anomalous Hamiltonian $H'_{\mathrm{FW}}$ denoted as $H'^{(k)}_{\mathrm{FW}}$ is determined by the $k$th order of the anomalous energy operator: \begin{equation} c^kH'^{(k)}_{\mathrm{FW}}=A'^H_k. \end{equation} Using Eqs.~(\ref{A'k H}), (\ref{A'k N}) and (\ref{def}), the terms $H_{FW}^{(k)}$ from $k=0$ to $k=12$ are given by \begin{equation} \begin{split} &H'^{(0)}_{\mathrm{FW}}=0,~H'^{(1)}_{\mathrm{FW}}=-\left(\frac{g}{2}-1\right)\boldsymbol{\mu}\cdot\mathbf{B},\\ &H'^{(2)}_{\mathrm{FW}}=-\left(\frac{g}{2}-1\right)\boldmu\cdot(\mathbf{E}\times\boldxi),\\ &H'^{(3)}_{\mathrm{FW}}=\frac{1}{2}\left(\frac{g}{2}-1\right)(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}),\\ &H'^{(4)}_{\mathrm{FW}}=\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^2\boldmu\cdot(\mathbf{E}\times\boldxi),\\ &H'^{(5)}_{\mathrm{FW}}=-\frac{3}{8}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^2(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}),\\ &H'^{(6)}_{\mathrm{FW}}=-\frac{3}{8}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^4\boldmu\cdot(\mathbf{E}\times\boldxi),\\ &H'^{(7)}_{\mathrm{FW}}=\frac{5}{16}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^4(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}),\\ &H'^{(8)}_{\mathrm{FW}}=\frac{5}{16}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^6\boldmu\cdot(\mathbf{E}\times\boldxi),\\ &H'^{(9)}_{\mathrm{FW}}=-\frac{35}{128}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^6(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}),\\ &H'^{(10)}_{\mathrm{FW}}=-\frac{35}{128}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^8\boldmu\cdot(\mathbf{E}\times\boldxi),\\ &H'^{(11)}_{\mathrm{FW}}=\frac{63}{256}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^8(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}),\\ &H'^{(12)}_{\mathrm{FW}}=\frac{63}{256}\left(\frac{g}{2}-1\right)\boldsymbol{\xi}^{10}\boldmu\cdot(\mathbf{E}\times\boldxi). \end{split} \end{equation} By using Eqs.~(\ref{Series1}) and (\ref{Series2}), the anomalous Hamiltonian can be written as \begin{equation}\label{HFW' total} \begin{split} H'_{\mathrm{FW}}&=\sum_{k=0}^{12}H'^{(k)}_{\mathrm{FW}}\\ &=-\left(\frac{g}{2}-1\right)\boldsymbol{\mu}\cdot\mathbf{B}-\left(\frac{g}{2}-1\right)\frac{1}{\widehat{\gamma}}\boldmu\cdot(\mathbf{E}\times\boldxi)\\ &~~+\left(\frac{g}{2}-1\right)\left(1-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\frac{1}{\widehat{\gamma}}(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{B}\cdot\boldsymbol{\xi}). \end{split} \end{equation} Combining the FW transformed Dirac Hamiltonian [Eq.~(\ref{HFW total})] and the anomalous Hamiltonain [Eq.~(\ref{HFW' total})], we have (up to terms of $(\boldsymbol{\pi}/mc)^{14}$) \begin{equation}\label{HDP FW} \begin{split} \mathcal{H}_{\mathrm{FW}}&=H_{\mathrm{FW}}+H_{\mathrm{FW}}'\\ &=V+\widehat{\gamma} mc^2-\left(\frac{g}{2}-1+\frac{1}{\widehat{\gamma}}\right)\boldsymbol{\mu}\cdot\mathbf{B}\\ &~~+\left(\frac{g}{2}-\frac{\widehat{\gamma}}{1+\widehat{\gamma}}\right)\boldsymbol{\mu}\cdot(\widehat{\boldsymbol{\beta}}\times\mathbf{E})\\ &~~+\left(\frac{g}{2}-1\right)\frac{\widehat{\gamma}}{1+\widehat{\gamma}}(\boldsymbol{\mu}\cdot\widehat{\boldsymbol{\beta}})(\mathbf{B}\cdot\widehat{\boldsymbol{\beta}}). \end{split} \end{equation} Equation (\ref{HDP FW}) is in agreement with the classical Hamiltonian with $g\neq2$. The FW transformed Dirac-Pauli Hamiltonian [Eq.~(\ref{HFW' total})] can also be obtained by directly evaluating Eq.~(\ref{HFWDP0}). Since Eq.~(\ref{HFWDP0}) is explicitly hermitian, the calculation can be done without accounting for the Dirac-Pauli string operator and the separation of hermitian and anti-hermitian parts of the Dirac-Pauli energy operator \cite{CLChang}. Up to $(\boldsymbol{\pi}/mc)^{14}$, we find that the result shown in Ref.~\cite{CLChang} is in agreement with the present result. To find the classical correspondence of the quantum theory of charged spin-1/2 particle, we have to perform the FW transformation on the quantum Hamiltonian. The procedure presented in this paper provides us a more systematic and efficient method to obtain higher order expansion in the FW representation. \section{Exact unitary transformation}\label{sec:EUT} We now turn to the discussion of the \emph{exact} series expansions of the Dirac and Dirac-Pauli generating operators. The exact unitary transformation of a free particle Dirac Hamiltonian has been given in Eq.~\ref{U-free}. In the presence of electromagnetic fields, the series of successive FW transformations becomes much more complicated. However, it is still possible to obtain the exact unitary transformation by deducing the close form from the finite-order series expansion, if the order we obtained is high enough. For example, the exact unitary transformation of the free particle Dirac Hamiltonian can be obtained from the successive FW transformations, if terms in the series expansion is many enough to determine the closed form. Therefore, in order to find the closed form for generic cases, we must proceed to higher orders. On the other hand, it has been proposed that the low-energy and weak-field limit of the Dirac (resp. Dirac-Pauli) Hamiltonian is consistent with the classical Hamiltonian, which is the sum of the classical relativistic Hamiltonian and T-BMT Hamiltonian with $g=2$ (resp. $g\neq2$). This suggests that there exists an exact unitary transformation for the low-energy and weak-field limit. In this section we will find the closed form of the unitary transformation from the high-order series expansions of the generating operators. The unitary transformation matrix is related to the generating operator by Eqs. (\ref{U}) and (\ref{Def:YandZ}) in Kutzelnigg's diagonalization method. If the closed form of generating operator is found, the exact unitary transformation matrix can be obtained. For the low-energy and weak-field limit of the Dirac Hamiltonian, the Dirac generating operator can be written as \begin{equation}\label{EUT-X} \begin{split} X=\frac{X_1}{c}+\frac{X_3}{c^3}+\frac{X_5}{c^5}+\cdots. \end{split} \end{equation} In Sec.~\ref{sec:HFW}, we have obtained the terms $X_{\ell}$ up to order of $\ell=13$, which are given in Eq. (\ref{App:SolveX}). We find that Eq. (\ref{EUT-X}) with Eq. (\ref{App:SolveX}) can be incorporated into the closed form \begin{widetext} \begin{equation}\label{EUT-X-exact} \begin{split} X&=\frac{1}{1+\sqrt{1+(\boldsymbol{\sigma}\cdot\boldsymbol{\xi})^2}}\boldsymbol{\sigma}\cdot\boldsymbol{\xi}+\left(\frac{1}{\sqrt{1+\boldsymbol{\xi}^2}}-\frac{1}{1+\sqrt{1+\boldsymbol{\xi}^2}}\right)\frac{-i}{mc^2}\boldsymbol{\mu}\cdot\mathbf{E}+\left(\frac{1}{\sqrt{1+\boldsymbol{\xi}^2}}\frac{1}{1+\sqrt{1+\boldsymbol{\xi}^2}}\right)^2(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{E}\cdot\boldsymbol{\xi}), \end{split} \end{equation} \end{widetext} The magnetic field generated from the operator $(\boldsymbol{\sigma}\cdot\boldsymbol{\pi})^2$ is included in the first term of Eq. (\ref{EUT-X-exact}). In the absence of electromagnetic fields, the kinetic momentum $\boldsymbol{\pi}$ is replaced by the canonical momentum $\mathbf{p}$. In this case, Eq. (\ref{EUT-X-exact}) becomes $c\boldsymbol{\sigma}\cdot\mathbf{p}/[mc^2+\sqrt{m^2c^4+c^2\mathbf{p}^2}]$, which is the same as Eq. (\ref{free-X}), {and} the resulting unitary transformation is exactly Eq. (\ref{U-free}). We also note that in the absence of an electric field, Eq. (\ref{EUT-X-exact}) becomes Eq. (\ref{Magnetic-X}). Taking the anomalous magnetic moment into account, the Dirac-Pauli generating operator can be written as \begin{equation} \mathcal{X}=X+X', \end{equation} where $X$ is given in Eq. (\ref{EUT-X-exact}). We find that the anomalous generating operator $X'$ with Eq. (\ref{X'k}) can be incorporated into the closed form \begin{widetext} \begin{equation}\label{EUT-X'-exact} \begin{split} X'&=\frac{X_3'}{c^3}+\frac{X_4'}{c^4}+\frac{X_5'}{c^5}+\cdots\\ &=\left(\frac{1}{\sqrt{1+\boldsymbol{\xi}^2}}-\frac{1}{1+\sqrt{1+\boldsymbol{\xi}^2}}\right)\left(\frac{g}{2}-1\right)\frac{1}{mc^2}\left(-i\boldsymbol{\mu}\cdot\mathbf{E}+\frac{q\hbar}{2mc}\mathbf{B}\cdot\boldsymbol{\xi}\right)\\ &~~+\frac{1}{\sqrt{1+\boldsymbol{\xi}^2}}\left(\frac{1}{1+\sqrt{1+\boldsymbol{\xi}^2}}\right)^2\left[-\frac{i}{mc^2}\left(\frac{g}{2}-1\right)(\boldsymbol{\mu}\cdot\boldsymbol{\xi})(\mathbf{E}\cdot\boldsymbol{\xi})\right]. \end{split} \end{equation} \end{widetext} The closed forms of $X$ and $X'$ have been deduced from the high-order series expansions Eq. (\ref{App:SolveX}) and Eq. (\ref{X'k}) respectively, but the rigorous proofs are stilling missing. The merit of obtaining the closed forms is nevertheless enormous: it allows us to guess the generic forms of $X_\ell$ and $X'_\ell$ in the series expansions, which in turn enable us to conduct rigorous proofs by mathematical induction \cite{DWChiou2014}. With Eqs. (\ref{EUT-X-exact}) and (\ref{EUT-X'-exact}) at hand, we can formally construct the exact unitary transformation. However, the main problem to be addressed is that the resulting exact unitary matrix is valid only in the low-energy and weak-field limit. In this regard, when we apply the exact unitary transformation to the Dirac or Dirac-Pauli Hamiltonian, we have to neglect nonlinear electromagnetic effects. In strong fields, the particle's energy interacting with electromagnetic fields could exceed the Dirac energy gap ($2mc^2$) and it is no longer adequate to describe the relativistic quantum dynamics without taking into account the field-theory interaction to the antiparticle. In fact, some doubts have been thrown on the mathematical rigour of the FW transformation \cite{Thaller1992}. The study of this paper nevertheless suggests that the exact FW transformation indeed exists and is valid in the low-energy and weak-field limit and furthermore the FW transformed Hamiltonian agrees with the classical counterpart (see \cite{DWChiou2014} for closer investigations). \section{Conclusions and Discussion}\label{sec:conclusions} The motion of a particle endowed with charge and intrinsic spin is governed by the classical Lorentz equation and the T-BMT equation. Assuming that the canonical relation of classical spins (via Poisson brackets) is the same as that of quantum spins (via commutators), the T-BMT equation can be recast as the Hamilton's equation and the T-BMT Hamiltonian is obtained. By treating positions, momenta and spins as independent variables in pase space, the classical Hamiltonian describing the motion of spin-1/2 charged particle is the sum of the classical relativistic Hamiltonian and T-BMT Hamiltonian. On the other hand, the correspondence between the classical Hamiltonian and the low-energy and weak-field limit of Dirac equation has been investigated by several authors. For a free particle, the Foldy-Wouthuysen transformation of Dirac equation was shown to exactly lead to the classical relativistic Hamiltonian of a free particle. Intriguingly, when spin precession and interaction with electromagnetic fields are also taken into account, it was found that the connection between Dirac equation and classical Hamiltonian becomes explicit if the order-by-order block diagonalization of the the Dirac Hamiltonian can be proceed to higher-order terms. The low-energy and weak-field limit of the relativistic quantum theory of spin-1/2 charged particle is investigated by performing the Kutzelnigg diagonalisation method on the Dirac Hamiltonian. We show that in the presence of inhomogeneous electromagnetic fields the Foldy-Wouthuysen transformed Dirac Hamiltonian up to terms with $(\boldsymbol{\pi}/mc)^4$ can be reproduced by the Kutzelnigg diagonalisation method. When the electromagnetic fields are homogeneous and nonlinear effects are neglected, the Foldy-Wouthuysen transformation of the Dirac Hamiltonian is obtained up to terms of $(\boldsymbol{\pi}/mc)^{14}$. The series expansion of the orbital part of the transformed Dirac Hamiltonian in terms of the kinetic momentum enables us to define the boost velocity operator. According to the correspondence between the kinetic momentum and the boost velocity operator, we found that up to terms of $(\boldsymbol{\pi}/mc)^{14}$ the Foldy-Wouthuysen transformed Dirac Hamiltonian is consistent with the classical Hamiltonian with the gyromagnetic ratio given by $g=2$. Furthermore, when the anomalous magnetic moment is considered as well, we found that up to terms of $(\boldsymbol{\pi}/mc)^{14}$ the Foldy-Wouthuysen transformed Dirac-Pauli Hamiltonian is in agreement with the classical Hamiltonian with $g\neq2$. The investigation in this paper reveals the fact that the classical Hamiltonian (classical relativistic Hamiltonian plus the T-BMT Hamiltonian) must be the low-energy and weak-field limit of the Dirac-Pauli equation. As shown in the above sections, we can establish the connection order-by-order in the FW representation. Moreover, this implies that, in the low-energy and weak-field limit, there must exist an exact FW transformation that can block-diagonalize the Dirac-Pauli Hamiltonian to the form corresponding to the classical Hamiltonian. For a free particle, the exact unitary transformation has been obtained by Foldy and Wouthuysen, which alternatively can also be obtained by the order-by-order method. We found that the generating operators can be written as closed forms, and consequently we can formally construct the exact unitary transformation that block-diagonalizes the Dirac and Dirac-Pauli Hamiltonians. However, it should be emphasized that the exact unitary transformation is valid only in the low-energy and weak-field limit and existence of the exact unitary transformation demands a rigours proof \cite{DWChiou2014}. On the other hand, it is true that even if the unitary FW transformation exists, it is far from unique, as one can easily perform further unitary transformations which preserve the block decomposition upon the block-diagonalized Hamiltonian (see also \secref{sec:method}). While different block-diagonalization transformations are unitarily equivalent to one another and thus yield the same physics, however, the pertinent operators $\boldsymbol{\sigma}$, $\mathbf{x}$, and $\mathbf{p}$ may represent very different physical quantities in different representations. To figure out the operators' physical interpretations, it is crucial to compare the resulting FW transformed Hamiltonian to the classical counterpart in a certain classical limit via the \emph{correspondence principle}. In Kutzelnigg's method, $\boldsymbol{\sigma}$ , $\mathbf{x}$, and $\mathbf{p}$ simply represent the spin, position, and conjugate momentum of the particle (as decoupled from the antiparticle) in the resulting FW representation. In other words, Kutzelnigg's method does not give rise to further transformations that obscure the operators' interpretations other than block diagonalization. The correspondence we observed may be extended to the case of inhomogeneous electromagnetic fields (except that the Darwin term has no classical correspondence) \cite{TWChen2013}, but inhomogeneity gives rise to complications which make it cumbersome to obtain the FW transformation in an order-by-order scenario, including the Kutzelnigg method. We wish to tackle this problem in further research. \begin{acknowledgments} The authors are grateful to C.-L.\ Chang for sharing his calculations. T.W.C.\ would like to thank G.\ Y.\ Guo, R.\ Winkler and M.-C.\ Chang for valuable discussions. T.W.C.\ is supported by the National Science Council of Taiwan under Contract No.\ NSC 101-2112-M-110-013-MY3; D.W.C.\ is supported by the Center for Advanced Study in Theoretical Sciences at National Taiwan University. \end{acknowledgments} \appendix \section{Hermiticity of FW transformed Dirac Hamiltonian}\label{App:Ham} Under the unitary transformation [Eq.~(\ref{UHU})], the Foldy-Wouthuysen transformed Dirac Hamiltonian is given by the upper-left term of $UH_DU^{\dag}$, which is \begin{equation}\label{App:HFW} \begin{split} H_{\mathrm{FW}}&=\left(Yh_++YX^{\dag}h_0\right)Y+\left(Yh_0+YX^{\dag}h_-\right)XY\\ &=Y\left(h_++X^{\dag}h_0+h_0X+X^{\dag}h_-X\right)Y. \end{split} \end{equation} Since the operators $Y$, $h_+$ and $h_0$ are hermitian, it is easy to show that $H_{FW}$ also satisfies $H_{FW}=H_{FW}^{\dag}$. The two off-diagonal terms are given by \begin{equation}\label{App:HFWX} \begin{split} &H_{X}=Z\left(-Xh_++h_0-Xh_0X+h_-X\right)Y,\\ &H_{X^{\dag}}=Y\left(-h_+-X^{\dag}h_0X^{\dag}+h_0+X^{\dag}h_-\right)Z. \end{split} \end{equation} Equation (\ref{App:HFW}) can be further simplified by using $H_{X}=0$ and $H_{X^{\dag}}=0$. The brackets in the second equality of Eq.~(\ref{App:HFW}) can be rewritten as \begin{equation}\label{App:HFW1} \begin{split} &\left(h_++X^{\dag}h_0+h_0X+X^{\dag}h_-X\right)\\ &=V+mc^2+X^{\dag}h_0+h_0X+X^{\dag}\left(V-mc^2\right)X\\ &=V+h_0X+mc^2\left(1-X^{\dag}X\right)+\left(X^{\dag}VX+X^{\dag}h_0\right). \end{split} \end{equation} On the other hand, we have \begin{equation}\label{App:HFW2} \begin{split} &\left(X^{\dag}VX+X^{\dag}h_0\right)\\ &=X^{\dag}\left(VX+h_0\right)\\ &=X^{\dag}\left([V,X]+XV+h_0\right)\\ &=X^{\dag}\left(2mc^2X+Xh_0X+XV\right)\\ &=2mc^2X^{\dag}X+X^{\dag}Xh_0X+X^{\dag}XV, \end{split} \end{equation} where Eq.~(\ref{EqX}) was used in the third equality of Eq.~(\ref{App:HFW2}). Substituting Eq.~(\ref{App:HFW2}) into Eq.~(\ref{App:HFW1}), we have \begin{equation}\label{App:HFW3} \begin{split} &\left(h_++X^{\dag}h_0+h_0X+X^{\dag}h_-X\right)\\ &=V+h_0X+mc^2\left(1-X^{\dag}X\right)+2mc^2X^{\dag}X\\ &~~+X^{\dag}Xh_0X+X^{\dag}XV\\ &=\left(1+X^{\dag}X\right)V+\left(1+X^{\dag}X\right)h_0X+mc^2\left(1+X^{\dag}X\right)\\ &=Y^{-2}\left(V+h_0X+mc^2\right). \end{split} \end{equation} Inserting Eq.~(\ref{App:HFW3}) into Eq.~(\ref{App:HFW}), we obtain \begin{equation}\label{App:HFW4} \begin{split} H_{\mathrm{FW}}&=YY^{-2}\left(V+h_0X+mc^2\right)Y\\ &=mc^2+Y^{-1}\left(V+h_0X\right)Y. \end{split} \end{equation} The condition $H_{X^{\dag}}=0$ implies \begin{equation}\label{App:EqXdag} X^{\dag}=\frac{1}{2mc^2}\left(h_0-X^{\dag}h_0X^{\dag}+[X^{\dag},V]\right). \end{equation} Applying Eq.~(\ref{App:EqXdag}) to Eq.~(\ref{App:HFW}), we have \begin{equation} \begin{split} H_{\mathrm{FW}}&=Y\left(h_++X^{\dag}h_0+h_0X+X^{\dag}h_-X\right)Y\\ &=Y\left[V+X^{\dag}h_0+mc^2(1-X^{\dag}X)+\left(X^{\dag}V+h_0\right)X\right]Y\\ &=Y[V+X^{\dag}h_0+mc^2(1-X^{\dag}X)+2mc^2X^{\dag}X\\ &~~+VX^{\dag}X+X^{\dag}h_0X^{\dag}X]Y\\ &=Y\left(VY^{-2}+X^{\dag}h_0Y^{-2}+mc^2Y^{-2}\right)Y, \end{split} \end{equation} where Eq.~(\ref{App:EqXdag}) was used in the second equality. We obtain \begin{equation}\label{App:HFW5} H_{\mathrm{FW}}=mc^2+Y\left(V+X^{\dag}h_0\right)Y^{-1}. \end{equation} Because $Y$, $h_0$ and $V$ are hermitian operators, this implies that the hermitian of Eq.~(\ref{App:HFW4}) is $H^{\dag}_{\mathrm{FW}}=mc^2+Y\left(V+X^{\dag}h_0\right)Y^{-1}$, and this is the same as Eq.~(\ref{App:HFW5}). As a consequence, we have $H_{\mathrm{FW}}^{\dag}=H_{\mathrm{FW}}$. \section{Hermiticity of FW transformed Dirac-Pauli Hamiltonian}\label{App:Ham2} In this appendix, we will show that the FW transformed Dirac-Pauli Hamiltonian can be written as Eq.~(\ref{H FWDP}) and show that Eq.~(\ref{H FWDP}) is a hermitian operator. Under the unitary transformation [Eq.~(\ref{UHDPU})], the Foldy-Wouthuysen transformed Dirac-Pauli Hamiltonian is given by the upper-left term of $U\mathcal{H}U^{\dag}$: \begin{equation}\label{App:HDP FW} \mathcal{H}_{\mathrm{FW}}=\mathcal{Y}\left(H_++\mathcal{X}^{\dag}H^{\dag}_0+H_0\mathcal{X}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)\mathcal{Y}, \end{equation} where $H_+=V+V_B+mc^2$, $H_0=h_0+iV_E$ and $H_=V-V_B-mc^2$. The operator $h_0$ is $h_0=c\,\boldsigma\cdot\boldpi$. Since the operator $\mathcal{Y}$ is hermitian, it is easy to show that Eq.~(\ref{App:HDP FW}) also satisfies $\mathcal{H}_{\mathrm{FW}}=\mathcal{H}_{\mathrm{FW}}^{\dag}$. The two off-diagonal terms are required to vanish and they are given by \begin{equation}\label{App:HDP FWX1} -\mathcal{X}H_++H_0^{\dag}-\mathcal{X}H_0\mathcal{X}+H_-\mathcal{X}=0, \end{equation} and \begin{equation}\label{App:HDP FWX2} -H_+\mathcal{X}^{\dag}-\mathcal{X}^{\dag}H_0^{\dag}\mathcal{X}^{\dag}+H_0+\mathcal{X}^{\dag}H_-=0. \end{equation} By multiplying $\mathcal{X}^{\dag}$ on the left-hand side of Eq.~(\ref{App:HDP FWX1}), we have \begin{equation}\label{App:HDP FWX3} \left(\mathcal{X}^{\dag}H_0^{\dag}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)=\mathcal{X}^{\dag}\mathcal{X}H_++\mathcal{X}^{\dag}\mathcal{X}H_0\mathcal{X}. \end{equation} Substituting Eq.~(\ref{App:HDP FWX3}) into Eq.~(\ref{App:HDP FW}) by eliminating $\left(\mathcal{X}^{\dag}H_0^{\dag}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)$, we obtain \begin{equation}\label{App:HDP FW1} \mathcal{H}_{\mathrm{FW}}=\mathcal{Y}^{-1}\left(H_++H_0\mathcal{X}\right)\mathcal{Y}, \end{equation} where the definition of the operator $\mathcal{Y}=1/\sqrt{1+\mathcal{X}^{\dag}{X}}$ was used. On the other hand, multiplying $\mathcal{X}$ on the right-hand side of Eq.~(\ref{App:HDP FWX2}), we have \begin{equation}\label{App:HDP FWX4} \left(H_0\mathcal{X}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)=H_+\mathcal{X}^{\dag}\mathcal{X}+\mathcal{X}^{\dag}H_0^{\dag}\mathcal{X}^{\dag}\mathcal{X}. \end{equation} Substituting Eq.~(\ref{App:HDP FWX4}) into Eq.~(\ref{App:HDP FW}) and eliminating the term $\left(H_0\mathcal{X}+\mathcal{X}^{\dag}H_-\mathcal{X}\right)$, we obtain \begin{equation}\label{App:HDP FW2} \mathcal{H}_{\mathrm{FW}}=\mathcal{Y}\left(H_++\mathcal{X}^{\dag}H_0^{\dag}\right)\mathcal{Y}^{-1}. \end{equation} Because $\mathcal{Y}$ is a hermitian operator and so is $H_+$, this implies that the hermitian of Eq.~(\ref{App:HDP FW1}) is $\mathcal{H}^{\dag}_{\mathrm{FW}}=\mathcal{Y}\left(H_0+\mathcal{X}^{\dag}H_0^{\dag}\right)\mathcal{Y}^{-1}$, which is the same as Eq.~(\ref{App:HFW5}). As a consequence, we have $\mathcal{H}_{\mathrm{FW}}^{\dag}=\mathcal{H}_{\mathrm{FW}}$. On the other hand, $H_++H_0\mathcal{X}$ can be written as $\left(H_++H_0\mathcal{X}\right)=mc^2+V+V_B+(h_0+iV_E)\mathcal{X}$. Equation (\ref{App:HDP FW1}) can be simplified as \begin{equation} \mathcal{H}_{\mathrm{FW}}=mc^2+e^{\mathcal{G}/2}\mathcal{A}e^{-\mathcal{G}/2}, \end{equation} where the operators $\mathcal{A}$ and $\mathcal{G}$ are defined as $\mathcal{A}=V+h_0\mathcal{X}+V_B+iV_E\mathcal{X}$ and $\mathcal{G}=\ln\left(1+\mathcal{X}^{\dag}\mathcal{X}\right)$, respectively. \end{document}
\begin{document} \title[An extension of orthogonality relations]{An extension of orthogonality relations based\\ on norm derivatives} \author[A. Zamani and M.S. Moslehian]{Ali Zamani \MakeLowercase{and} Mohammad Sal Moslehian} \address [A. Zamani]{Department of Mathematics, Farhangian University, Tehran, Iran} \email{[email protected]} \address [M. S. Moslehian]{Department of Pure Mathematics, Ferdowsi University of Mashhad, P.O. Box 1159, Mashhad 91775, Iran} \email{[email protected], [email protected]} \subjclass[2010]{Primary 46B20; Secondary 47B49, 46C50.} \keywords{Norm derivative; orthogonality; orthogonality preserving mappings; smoothness.} \begin{abstract} We introduce the relation ${\rho}_{\lambda}$-orthogonality in the setting of normed spaces as an extension of some orthogonality relations based on norm derivatives, and present some of its essential properties. Among other things, we give a characterization of inner product spaces via the functional ${\rho}_{\lambda}$. Moreover, we consider a class of linear mappings preserving this new kind of orthogonality. In particular, we show that a linear mapping preserving ${\rho}_{\lambda}$-orthogonality has to be a similarity, that is, a scalar multiple of an isometry. \end{abstract} \maketitle \section{Introduction} In an inner product space $\big(H, \langle \cdot, \cdot\rangle\big)$, an element $x\in H$ is said to be orthogonal to $y\in H$ (written as $x\perp y$) if $\langle x, y\rangle = 0$. In the general setting of normed spaces, numerous notions of orthogonality have been introduced. Let $(X, \|\cdot\|)$ be a real normed linear space of dimension at least 2. One of the most important ones is the concept of the Birkhoff--James orthogonality ($B$-orthogonality) that reads as follows: If $x$ and $ y$ are elements of $X$, then $x$ is orthogonal to $y$ in the Birkhoff--James sense \cite{B, J}, in short $x\perp_By$, if \begin{align*} \|x + \lambda y\| \geq \|x\| \qquad (\lambda\in\mathbb{R}). \end{align*} Also, for $x, y\in X$ the isosceles-orthogonality ($I$-orthogonality) relation in $X$ (see \cite{J}) is defined by \begin{align*} x \perp_{I} y \Leftrightarrow \|x + y\| = \|x - y\|. \end{align*} One of the possible notions of orthogonality is connected with the so-called norm's derivatives, which are defined by \begin{align*} \rho_{-}(x,y):=\|x\|\lim_{t\rightarrow0^{-}}\frac{\|x+ty\|-\|x\|}{t} \end{align*} and \begin{align*} \rho_{+}(x,y):=\|x\|\lim_{t\rightarrow0^{+}}\frac{\|x+ty\|-\|x\|}{t}. \end{align*} Convexity of the norm yields that the above definitions are meaningful. The following properties, which will be used in the present paper can be found, for example, in \cite{A.S.T}. \begin{itemize} \item[(i)] For all $x, y \in X$, $\rho_{-}(x, y)\leq \rho_{+}(x, y)$ and $|\rho_{\pm}(x,y)| \leq \|x\|\|y\|.$ \item[(ii)] For all $x, y \in X$ and all $\alpha \in \mathbb{R}$, it holds that \begin{align*} \rho_{\pm}(\alpha x,y) = \rho_{\pm}(x,\alpha y)=\left\{\begin{array}{ll} \alpha \rho_{\pm}(x,y), &\alpha \geq 0,\\ \alpha \rho_{\mp}(x,y), &\alpha< 0.\end{array}\right. \end{align*} \item[(iii)] For all $x, y \in X$ and all $\alpha \in \mathbb{R}$, \begin{align*} \rho_{\pm}(x,\alpha x + y) = \alpha {\|x\|}^2 + \rho_{\pm}(x,y). \end{align*} \end{itemize} Recall that a support functional $F_x$ at a nonzero $x \in X$ is a norm one functional such that $F_x(x) = \|x\|$. By the Hahn--Banach theorem, there always exists at least one such functional for every $x \in X$. Recall also that $X$ is smooth at the point $x$ in $X$ if there exists a unique support functional at $x$, and it is called smooth if it is smooth at every $x \in X$. It is well known that $X$ is smooth at $x$ if and only if $\rho_{+}(x,y) = \rho_{-}(x,y)$ for all $y\in X$; see \cite{A.S.T}. It turns out that the smoothness is closely related to the Gateaux differentiability. Recall that the norm $\|\cdot\|$ is said to be Gateaux differentiable at $x \in X$ if the limit \begin{align*} f_x(y) = \lim_{t\rightarrow0}\frac{\|x+ty\|-\|x\|}{t} \end{align*} exists for all $y\in X$. We call such $f_x$ as the Gateaux differential at $x$ of $\|\cdot\|$. It is not difficult to verify that $f_x$ is a bounded linear functional on $X$. When $x$ is a smooth point, it is easy to see that $\rho_{+}(x,y) = \rho_{-}(x,y) = \|x\|f_x(y)$ for all $y\in X$. Therefore $X$ is smooth at $x$ if and only if the norm is the Gateaux differentiable at $x$. The orthogonality relations related to $\rho_{\pm}$ are defined as follows; see \cite{A.S.T, Mil}: \begin{align*} x\perp_{\rho_{\pm}}y \Leftrightarrow \rho_{\pm}(x, y) = 0 \end{align*} and \begin{align*} x\perp_{\rho}y \Leftrightarrow \rho(x,y):=\frac{\rho_-(x,y) + \rho_+(x,y)}{2} = 0. \end{align*} Also, the notion of $\rho_*$-orthogonality is introduced in \cite{C.L, M.Z.D} as \begin{align*} x\perp_{\rho_*}y \Leftrightarrow \rho_*(x,y) := \rho_-(x,y)\rho_+(x,y) = 0. \end{align*} Note that $\perp_{\rho_{\pm}}, \perp_{\rho}, \perp_{\rho_*} \subset \perp_B$. Furthermore, it is obvious that for a real inner product space all the above relations coincide with the standard orthogonality given by the inner product. For more information about the norm derivatives and their properties, interested readers are referred to \cite{A.S.T, C.W.2, C.W.3, Dra, W}. More recently, further properties of the relation $\perp_{\rho_*}$ are presented in \cite{M.Z.D}. Now, we introduce an orthogonality relation as an extension of orthogonality relations based on norm derivatives ${\rho_{\pm}}$. \begin{definition} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1]$. The element $x\in X$ is a ${\rho}_{\lambda}$-orthogonal to $y\in X$, denoted by $x\perp_{{\rho}_{\lambda}}y$, if \begin{align*} {\rho}_{\lambda}(x, y): = \lambda\rho_-(x,y) + (1 - \lambda)\rho_+(x,y) = 0. \end{align*} \end{definition} The main aim of the present work is to investigate the ${\rho}_{\lambda}$-orthogonality in a normed space $X$. In Section 2, we first give basic properties of the functional ${\rho}_{\lambda}$. In particular, we give a characterization of inner product spaces based on ${\rho}_{\lambda}$. Moreover, we give some characterizations of smooth spaces in terms of ${\rho}_{\lambda}$-orthogonality. In Section 3, we consider a class of linear mappings preserving this kind of orthogonality. In particular, we show that a linear mapping preserving ${\rho}_{\lambda}$-orthogonality has to be a similarity, that is, a scalar multiple of an isometry. \section{${\rho}_{\lambda}$-orthogonality and characterization of inner product spaces} We start this section with some properties of the functional ${\rho}_{\lambda}$. The following lemma will be used. \begin{lemma}\cite[Theorem 1]{Mal}\label{L22} For any nonzero elements $x$ and $y$ in a normed space $(X, \|\cdot\|)$, it is true that \begin{align*} \|x + y\| \leq \|x\| + \|y\| - \left(2 - \left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\|\right)\min\{\|x\|, \|y\|\}. \end{align*} \end{lemma} \begin{theorem}\label{T23} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1]$. Then \begin{itemize} \item[(i)] ${\rho}_{\lambda}(tx, y) = {\rho}_{\lambda}(x, ty) = t{\rho}_{\lambda}(x, y)$ for all $x, y\in X$ and all $t\geq0$. \item[(ii)] ${\rho}_{\lambda}(tx, y) = {\rho}_{\lambda}(x, ty) = t{\rho}_{1 - \lambda}(x, y)$ for all $x, y\in X$ and all $t<0$. \item[(iii)] ${\rho}_{\lambda}(x, tx + y) = t{\|x\|}^2 + {\rho}_{\lambda}(x, y)$ for all $x, y\in X$ and all $t\in\mathbb{R}$. \item[(iv)] If $x$ and $y$ are nonzero elements of $X$ such that $x\perp_{{\rho}_{\lambda}}y$, then $x$ and $y$ are linearly independent. \item[(v)] $(\|x\| - \|x - y\|)\|x\|\leq {\rho}_{\lambda}(x, y) \leq(\|x + y\| - \|x\|)\|x\|$ for all $x, y\in X$. \item[(vi)] $\big|{\rho}_{\lambda}(x, y)\big| \leq \|x\|\|y\|$ for all $x, y\in X$. \item[(vii)] If $x$ and $y$ are nonzero elements of $X$, then \begin{align*} \left(1 - \left\|\frac{x}{\|x\|} - \frac{y}{\|y\|}\right\|\right)\|x\|\|y\| \leq {\rho}_{\lambda}(x, y) \leq\left(\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1\right)\|x\|\|y\|. \end{align*} \end{itemize} \end{theorem} \begin{proof} The statements (i)--(vi) follow directly from the definition of the functional ${\rho}_{\lambda}$. To establish (vii) suppose that $x$ and $y$ are nonzero elements of $X$ and that $0<t<\frac{\|x\|}{\|y\|}$. Applying Lemma \ref{L22} to $x$ and $ty$, we get \begin{align*} \left(2 - \left\|\frac{x}{\|x\|} + \frac{ty}{\|ty\|}\right\|\right) \min\{\|x\|, \|ty\|\} \leq \|x\| + \|ty\| - \|x + ty\|, \end{align*} and hence \begin{align*} \left(2 - \left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\|\right)t\|y\| \leq \|x\| + t\|y\| - \|x + ty\|. \end{align*} Thus \begin{align*} \frac{\|x + ty\| - \|x\|}{t}\leq \left(\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1\right)\|y\|. \end{align*} It follows that \begin{align}\label{I21} \rho_{+}(x, y) \leq \left(\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1\right)\|x\|\,\|y\|. \end{align} Putting $-y$ instead of $y$ in \eqref{I21}, we get \begin{align}\label{I22} \rho_{-}(x, y)\geq \left(1 - \left\|\frac{x}{\|x\|} - \frac{y}{\|y\|}\right\|\right)\|x\|\,\|y\|. \end{align} Since $\rho_{-}(x, y) \leq \rho_{+}(x, y)$, from (\ref{I21}) and (\ref{I22}), we reach \begin{align}\label{I23} \left(1 - \left\|\frac{x}{\|x\|} - \frac{y}{\|y\|}\right\|\right)\|x\|\,\|y\| \leq \rho_{+}(x, y) \leq \left(\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1\right)\|x\|\,\|y\| \end{align} and \begin{align}\label{I24} \left(1 - \left\|\frac{x}{\|x\|} - \frac{y}{\|y\|}\right\|\right)\|x\|\,\|y\| \leq \rho_{-}(x, y) \leq \left(\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1\right)\|x\|\,\|y\|. \end{align} Now, from (\ref{I23}), (\ref{I24}), and the definition of ${\rho}_{\lambda}$, the proof is completed. \end{proof} \begin{remark} Since $- 1 \leq 1 - \left\|\frac{x}{\|x\|} - \frac{y}{\|y\|}\right\|$ and $\left\|\frac{x}{\|x\|} + \frac{y}{\|y\|}\right\| - 1 \leq 1$, the inequality (vii) of Theorem \ref{T23} is an improvement of the known inequality $\big|{\rho}_{\pm}(x, y)\big| \leq \|x\|\|y\|$. \end{remark} We recall the following lemma which gives a characterization of Birkhoff--James orthogonality. \begin{lemma}\cite[Theorem 50]{Dra}\label{L21} Let $X$ be a normed space, and let $x, y\in X$. Then the following conditions are equivalent: \begin{itemize} \item[(i)] $x\perp_B y$. \item[(ii)] $\rho_-(x,y)\leq 0 \leq \rho_+(x,y)$. \end{itemize} \end{lemma} \begin{theorem}\label{th.001} Let $X$ be a normed space, and let $\lambda \in [0, 1]$. Then $\perp_{{\rho}_{\lambda}} \subseteq \perp_{B}$. \end{theorem} \begin{proof} Let $x, y\in X$ and $x \perp_{{\rho}_{\lambda}}y$. Thus $\lambda\rho_-(x, y) = (\lambda - 1)\rho_+(x, y)$. Since $\rho_-(x, y) \leq \rho_+(x, y)$, we get $\rho_-(x, y) \leq 0 \leq \rho_+(x, y)$. Therefore, by Lemma \ref{L21}, we conclude that $x \perp_{B} y$. Hence $\perp_{{\rho}_{\lambda}} \subseteq \perp_{B}$. \end{proof} To get our next result, we need the following lemma. \begin{lemma}\cite[Corollary 11]{Dra}\label{L24} Let $X$ be a normed space and let $x, y\in X$ with $x\neq 0$. Then there exists a number $t\in\mathbb{R}$ such that $x\perp_{B} tx + y$. \end{lemma} \begin{theorem} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1]$. The following conditions are equivalent: \begin{itemize} \item[(i)] $\perp_{B}\subseteq \perp_{{\rho}_{\lambda}}$. \item[(ii)] $\perp_{B} = \perp_{{\rho}_{\lambda}}$. \item[(iii)] $X$ is smooth. \end{itemize} \end{theorem} \begin{proof} (i)$\Rightarrow$(ii) This implication follows immediately from Theorem \ref{th.001}. (ii)$\Rightarrow$(iii) Suppose (ii) holds. If $\lambda = \frac{1}{2}$, then \cite[Proposition 2.2.4]{A.S.T} implies that $X$ is smooth. Now, let $\lambda \neq \frac{1}{2}$ and $x, y\in X$. We should show that $\rho_-(x,y)=\rho_+(x,y)$. We may assume that $x\neq0$, otherwise $\rho_-(x,y)=\rho_+(x,y)$ trivially holds. By Lemma \ref{L24}, there exists a number $t\in\mathbb{R}$ such that $x\perp_{B} tx + y$. From the assumption, we have ${\rho}_{\lambda}(x, tx + y) = 0$. Hence $t{\|x\|}^2 + {\rho}_{\lambda}(x, y) = 0$, or equivalently, \begin{align}\label{I25} t{\|x\|}^2 + \lambda\rho_-(x, y) + (1 - \lambda) \rho_+(x, y) = 0. \end{align} We also have $-x\perp_{B} tx + y$, and so ${\rho}_{\lambda}(-x, tx + y) = 0$. Thus $-t{\|x\|}^2 - {\rho}_{1 - \lambda}(x, y) = 0$, or equivalently, \begin{align}\label{I26} -t{\|x\|}^2 - (1 - \lambda)\rho_-(x, y) - \lambda \rho_+(x, y) = 0. \end{align} Therefore, by (\ref{I25}) and (\ref{I26}), we have \begin{align*} (2\lambda - 1)\rho_-(x, y) + (1 - 2\lambda) \rho_+(x, y)= 0. \end{align*} Consequently, $\rho_-(x,y)=\rho_+(x,y)$. Therefore $X$ is smooth. (iii)$\Rightarrow$(i) Suppose that $X$ is smooth and that $x, y\in X$ such that $x\perp_{B}y$. It follows from Lemma \ref{L21} that $\rho_-(x, y) = \rho_+(x, y) = 0$, and this yields that $x\perp_{{\rho}_{\lambda}}y$. \end{proof} For nonsmooth spaces, the orthogonalities $\perp_{{\rho}_{\lambda}}$ and $\perp_{B}$ may not coincide. \begin{example} Consider the real space $X = \mathbb{R}^2$ equipped with the norm $\|(\alpha, \beta)\| = \max\{|\alpha|, |\beta|\}$. Let $x = (1, 1)$ and $y = (0, -1)$. Then, for every $\gamma \in \mathbb{R}$, we have \begin{align*} \|x + \gamma y\| = \|(1, 1 - \gamma)\| = \max\{1, |1 - \gamma|\}\geq 1 = \|x\|. \end{align*} Hence $x \perp_{B} y$. On the other hand, straightforward computations show that $\rho_-(x, y) = -1$ and $\rho_+(x, y) = 0$. It follows that ${\rho}_{\lambda}(x, y) = -\lambda$. Thus $x \not\perp_{{\rho}_{\lambda}}y$. \end{example} The following result is proved in \cite[Theorem 1]{C.W.2} and \cite[Theorem 3.1]{M.Z.D}. \begin{theorem} Let $(X, \|\cdot\|)$ be a real normed space. Then the following conditions are equivalent: \begin{align*} &(1) \perp_{\rho_-}\subseteq\perp_{\rho_+}.\quad (2) \perp_{\rho_+}\subseteq\perp_{\rho_-}.\quad (3) \perp_{\rho}\subseteq\perp_{\rho_-}.\\ &(4) \perp_{\rho_-}\subseteq\perp_{\rho}.\quad \,\,(5) \perp_{\rho} \subseteq\perp_{\rho_+}.\quad \,\,(6) \perp_{\rho_+}\subseteq\perp_{\rho}.\\ &(7) \perp_{\rho_*}\subseteq\perp_{\rho_-}.\quad (8) \perp_{\rho_*}\subseteq\perp_{\rho_+}.\quad (9) \perp_{\rho_*}\subseteq\perp_{\rho}.\\ &(10) \perp_{\rho}\subseteq\perp_{\rho_*}.\quad (11) \perp_{B}\subseteq\perp_{\rho_*}.\quad(12)\mbox{ $X$ is smooth}. \end{align*} \end{theorem} The relations $\perp_{\rho_-}$, $\perp_{\rho_+}$, $\perp_{\rho}$, and $\perp_{{\rho}_{\lambda}}$ are generally incomparable. The following example illustrates this fact. \begin{example}\label{ex.001} Consider the real normed space $X = \mathbb{R}^2$ with the norm $\|(\alpha, \beta)\| = \max\{|\alpha|, |\beta|\}$. (i) Let $x = (1, 1)$ and $y = (-\frac{1}{2\lambda}, \frac{1}{2(1 - \lambda)})$. Simple computations show that \begin{align*} \rho_-(x, y) = -\frac{1}{2\lambda} \quad \mbox{and} \quad \rho_+(x, y) = \frac{1}{2(1 - \lambda)}. \end{align*} So we get \begin{align*} \rho(x, y) = \frac{2\lambda - 1}{4\lambda(1- \lambda)} \quad \mbox{and} \quad {\rho}_{\lambda}(x, y) = 0. \end{align*} Hence $\perp_{{\rho}_{\lambda}}\nsubseteq \perp_{\rho_-}$, $\perp_{{\rho}_{\lambda}}\nsubseteq \perp_{\rho_+}$, and $\perp_{{\rho}_{\lambda}}\nsubseteq \perp_{\rho}$. (ii) Let $z = (1, 1)$, $w = (0, 1)$, $u = (0, -1)$, and $v = (1, -1)$. It is not hard to compute \begin{align*} \rho_-(z, w) = 0, \quad \rho_+(z, w) = 1, \quad {\rho}_{\lambda}(z, w) = 1 - \lambda, \end{align*} \begin{align*} \rho_-(z, u) = -1, \quad \rho_+(z, u) = 0, \quad {\rho}_{\lambda}(z, u) = -\lambda, \end{align*} and \begin{align*} \rho_-(z, v) = -1, \quad \rho_+(z, v) = 1, \quad \rho(z, v) = 0, \quad {\rho}_{\lambda}(z, v) = 1 - 2\lambda. \end{align*} Thus $\perp_{\rho_-}\nsubseteq \perp_{{\rho}_{\lambda}}$, $\perp_{\rho_+}\nsubseteq \perp_{{\rho}_{\lambda}}$, and $\perp_{\rho}\nsubseteq \perp_{{\rho}_{\lambda}}$. \end{example} The following result gives some characterizations of the smooth normed spaces based on the ${\rho}_{\lambda}$-orthogonality. \begin{theorem}\label{th.0101} Let $(X, \|\cdot\|)$ be a normed space, and let $\frac{1}{2} \neq \lambda \in [0, 1]$. The following conditions are equivalent: \begin{itemize} \item[(i)] $\perp_{\rho}\subseteq \perp_{{\rho}_{\lambda}}$. \item[(ii)] $\perp_{{\rho}_{\lambda}}\subseteq\perp_{\rho}$. \item[(iii)] $\perp_{{\rho}_{\lambda}} = \perp_{\rho}$. \item[(iv)] $X$ is smooth. \end{itemize} \end{theorem} \begin{proof} (i)$\Rightarrow$(iv) Let $x, y\in X\setminus\{0\}$. We have $x\perp_{\rho}\left(-\frac{\rho(x,y)}{\|x\|^2}x + y\right)$. It follows from (i) that $x\perp_{{\rho}_{\lambda}}\left(-\frac{\rho(x,y)}{\|x\|^2}x + y\right)$. From Theorem \ref{T23} (iii), we deduce that \begin{align*} -\rho(x,y) + {\rho}_{\lambda}(x,y) = {\rho}_{\lambda}\left(x, -\frac{\rho(x, y)}{\|x\|^2}x + y\right) = 0. \end{align*} Thus ${\rho}_{\lambda}(x, y) = \rho(x,y)$. It ensures that $(2\lambda - 1)\rho_-(x, y) = (2\lambda - 1)\rho_+(x, y)$, and therefore we get $\rho_-(x,y)=\rho_+(x,y)$. It follows that $X$ is smooth. The other implications can be proved similarly. \end{proof} If we consider $x\perp_{\rho_+}\left(-\frac{\rho_+(x,y)}{\|x\|^2}x + y\right)$ instead of $x\perp_{\rho}\left(-\frac{\rho(x,y)}{\|x\|^2}x + y\right)$, then, using the same reasoning as in the proof of Theorem \ref{th.0101}, we get the next result. \begin{theorem} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in (0, 1]$. The following conditions are equivalent: \begin{itemize} \item[(i)] $\perp_{\rho_+} \subseteq \perp_{{\rho}_{\lambda}}$ \item[(ii)] $\perp_{{\rho}_{\lambda}}\subseteq\perp_{\rho_+}$. \item[(iii)] $\perp_{{\rho}_{\lambda}} = \perp_{\rho_+}$. \item[(iv)] $X$ is smooth. \end{itemize} \end{theorem} In the following result we establish another characterizations of smooth spaces. \begin{theorem} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1)$. The following conditions are equivalent: \begin{itemize} \item[(i)] $\perp_{\rho_-} \subseteq \perp_{{\rho}_{\lambda}}$ \item[(ii)] $\perp_{{\rho}_{\lambda}}\subseteq\perp_{\rho_-}$. \item[(iii)] $\perp_{{\rho}_{\lambda}} = \perp_{\rho_-}$. \item[(iv)] $X$ is smooth. \end{itemize} \end{theorem} \begin{proof} The proof is similar to the proof of Theorem \ref{th.0101}, so we omit it. \end{proof} It is easy to see that, in a real inner product space $X$, the equality \begin{align}\label{I27} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2\langle x, y\rangle + {\|y\|}^2\langle y, x\rangle\Big) \qquad (x, y \in X) \end{align} holds, which is equivalent to the parallelogram equality \begin{align*} {\|x + y\|}^2 + {\|x - y\|}^2 = 2\big({\|x\|}^2 + {\|y\|}^2\big) \qquad (x, y \in X). \end{align*} In normed spaces, the equality \begin{align*} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2{\rho}_{\lambda}(x, y) + {\|y\|}^2 {\rho}_{\lambda}(y, x)\Big) \qquad (x, y \in X). \end{align*} is a generalization of the equality \eqref{I27}. In the following result we give a sufficient condition for a normed space to be smooth. We use some ideas of \cite[Theorem 5]{Mil}. \begin{theorem} Let $(X, \|\cdot\|)$ be a normed space and $\lambda \in [0, 1]$. Let \begin{align}\label{I28} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2{\rho}_{\lambda}(x, y) + {\|y\|}^2 {\rho}_{\lambda}(y, x)\Big) \qquad (x, y \in X). \end{align} Then $X$ is smooth. \end{theorem} \begin{proof} Let $x, y\in X\setminus \{0\}$ and $\lambda \in (0, 1]$. It follows from \eqref{I28} that \begin{align*} 8\Big({\|x\|}^2{\rho}_{\lambda}(x, y) &+ {\|y\|}^2 {\rho}_{\lambda}(y, x)\Big) \\& = {\|x + y\|}^4 - {\|x - y\|}^4 \\& = \lim_{t\rightarrow0^{+}}\Big(\big\|(x + \frac{t}{2}y) + y\big\|^4 - \big\|(x + \frac{t}{2}y) - y\big\|^4\Big) \\& = \lim_{t\rightarrow0^{+}}8\Big(\big\|x + \frac{t}{2}y\big\|^2{\rho}_{\lambda}(x + \frac{t}{2}y, y) + {\|y\|}^2 {\rho}_{\lambda}(y, x + \frac{t}{2}y)\Big) \\& = \lim_{t\rightarrow0^{+}}8\Big(\big\|x + \frac{t}{2}y\big\|^2{\rho}_{\lambda}(x + \frac{t}{2}y, y) + {\|y\|}^2 \big(\frac{t}{2}{\|y\|}^2 + {\rho}_{\lambda}(y, x)\big)\Big) \\& = 8\Big({\|x \|}^2\lim_{t\rightarrow0^{+}}{\rho}_{\lambda}(x + \frac{t}{2}y, y) + {\|y\|}^2 {\rho}_{\lambda}(y, x)\Big). \end{align*} Therefore \begin{align}\label{I29} \lim_{t\rightarrow0^{+}}{\rho}_{\lambda}(x + \frac{t}{2}y, y) = {\rho}_{\lambda}(x, y). \end{align} The equalities \eqref{I28} and \eqref{I29} imply that \begin{align*} \rho_+(x, y) &= \|x\|\lim_{t\rightarrow0^{+}}\frac{\|x + ty\| - \|x\|}{t} \\& = \|x\|\lim_{t\rightarrow0^{+}}\frac{8\Big({\|x + \frac{t}{2}y\|}^2{\rho}_{\lambda}(x + \frac{t}{2}y, \frac{t}{2}y) + {\|\frac{t}{2}y\|}^2 {\rho}_{\lambda}(\frac{t}{2}y, x + \frac{t}{2}y)\Big)}{t(\|x +ty\| + \|x\|)({\|x + ty\|}^2 + {\|x\|}^2)} \\& = \|x\|\lim_{t\rightarrow0^{+}}\frac{4{\|x + \frac{t}{2}y\|}^2{\rho}_{\lambda}(x + \frac{t}{2}y, y) + \frac{t^3}{2}{\|y\|}^4 + t^2{\|y\|}^2 {\rho}_{\lambda}(y, x)}{(\|x +ty\| + \|x\|)({\|x + ty\|}^2 + {\|x\|}^2)} \\& = \|x\|\frac{4{\|x\|}^2{\rho}_{\lambda}(x, y)}{(2\|x\|)(2{\|x\|}^2)} = {\rho}_{\lambda}(x, y), \end{align*} and hence $\rho_+(x, y) = {\rho}_{\lambda}(x, y)$. Since ${\rho}_{\lambda}(x, y) = \lambda \rho_-(x, y) + (1 - \lambda)\rho_+(x, y)$, we get $\rho_-(x, y) = \rho_+(x, y)$. It follows that $X$ is smooth. Now, let $\lambda = 0$. Then, by (\ref{I28}) we have \begin{align}\label{I280} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2{\rho}_+(x, y) + {\|y\|}^2 {\rho}_+(y, x)\Big) \qquad (x, y \in X). \end{align} If we replace $y$ by $-y$ in (\ref{I280}), then we obtain \begin{align*} {\|x - y\|}^4 - {\|x + y\|}^4 = 8\Big(-{\|x\|}^2{\rho}_-(x, y) - {\|y\|}^2 {\rho}_-(y, x)\Big), \end{align*} or equivalently, \begin{align}\label{I281} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2{\rho}_-(x, y) + {\|y\|}^2 {\rho}_-(y, x)\Big) \qquad (x, y \in X). \end{align} Add (\ref{I280}) and (\ref{I281}) to get \begin{align}\label{I282} {\|x + y\|}^4 - {\|x - y\|}^4 = 8\Big({\|x\|}^2{\rho}(x, y) + {\|y\|}^2 {\rho}(y, x)\Big) \qquad (x, y \in X). \end{align} Now, by (\ref{I282}) and the same reasoning as in the first part, we conclude that $X$ is smooth. \end{proof} Recall that a normed space $(X, \|\cdot\|)$ is uniformly convex whenever, for all $\varepsilon > 0$, there exists a $\xi > 0$ such that if $\|x\| = \|y\| = 1$ and $\|x - y\|\geq \varepsilon$, then $\left\|\frac{x + y}{2}\right\| \leq 1 - \xi$; see, for example, \cite{Dra}. In the following theorem we state a characterization of uniformly convex spaces via ${\rho}_{\lambda}$. \begin{theorem} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1]$. Then the following conditions are equivalent: \begin{itemize} \item[(i)] $X$ is uniformly convex. \item[(ii)] For all $\varepsilon > 0$, there exists a number $\delta > 0$ such that if $\|x\| = \|y\| = 1$ and $\|x - y\|\geq \varepsilon$, then ${\rho}_{\lambda}(x, y) \leq \frac{1- \delta^2}{1 + \delta^2}$. \end{itemize} \end{theorem} \begin{proof} (i)$\Rightarrow$(ii) Let $X$ be uniformly convex, and let $\varepsilon > 0$. There exists a number $\xi > 0$ such that if $\|x\| = \|y\| = 1$ and $\|x - y\|\geq \varepsilon$, then $\left\|\frac{x - y}{2}\right\| \leq 1 - \xi$. Thus, by Theorem \ref{T23}(v), we obtain \begin{align*} {\rho}_{\lambda}(x, y) \leq \|x + y\| - 1 \leq 2(1 - \xi) - 1 = \frac{1- \frac{\xi}{1 - \xi}}{1 + \frac{\xi}{1 - \xi}}. \end{align*} Put $\delta = \sqrt{\frac{\xi}{1 - \xi}}$. It follows from the above inequality that ${\rho}_{\lambda}(x, y) \leq \frac{1- \delta^2}{1 + \delta^2}$. (ii)$\Rightarrow$(i) Suppose (ii) holds. Let $\varepsilon > 0$, and choose a number $\delta > 0$ such that if $\|u\| = \|v\| = 1$ and $\|u - v\|\geq \frac{\varepsilon}{4}$, then ${\rho}_{\lambda}(u, v) \leq \frac{1- \delta^2}{1 + \delta^2}$. Put $\xi = \min\{\frac{\varepsilon}{4}, \frac{\delta^2}{1 + \delta^2}\}$. Now, let $\|x\| = \|y\| = 1$ and $\|x - y\|\geq \varepsilon$. If $\left\|\frac{x + y}{2}\right\| = 0$, then $\left\|\frac{x + y}{2}\right\| \leq 1 - \xi$ is evident. Therefore, let $\left\|\frac{x + y}{2}\right\| > 0$. So either $(2 - \|x + y\|) \geq 2\xi$ or $\|x + y\|\left\|\frac{x + y}{\|x + y\|} - x\right\|\geq \varepsilon - 2\xi$. (Indeed, otherwise we obtain \begin{align*} \|x - y\| = \left\|(2 - \|x + y\|)x - \|x + y\|\left(\frac{x + y}{\|x + y\|} - x\right)\right\| < 2\xi + \varepsilon - 2\xi = \varepsilon, \end{align*} contradicting our assumption.) If $(2 - \|x + y\|) \geq 2\xi$, then we get $\left\|\frac{x + y}{2}\right\| \leq 1 - \xi$. In addition, if $\|x + y\|\left\|\frac{x + y}{\|x + y\|} - x\right\|\geq \varepsilon - 2\xi$, then we reach \begin{align*} \left\|\frac{x + y}{\|x + y\|} - x\right\|\geq \frac{\varepsilon - 2\xi}{\|x + y\|} \geq \frac{\varepsilon - 2\xi}{2}\geq \frac{\varepsilon}{4}. \end{align*} Since $\|x\| = \left\|\frac{x + y}{\|x + y\|}\right\| = 1$ and $\left\|\frac{x + y}{\|x + y\|} - x\right\|\geq \frac{\varepsilon}{4}$, our assumption yields \begin{align}\label{I210} {\rho}_{\lambda}\left(\frac{x + y}{\|x + y\|}, x\right) \leq \frac{1- \delta^2}{1 + \delta^2}. \end{align} By Theorem \ref{T23}(v) and (\ref{I210}), we conclude that \begin{align*} \left\|\frac{x + y}{2}\right\| &= \frac{1}{2}\Big( 1 + \big(\|x + y\| - \|(x + y) - x\|\big)\Big) \\& \leq\frac{1}{2}\left( 1 + \frac{1}{\|x + y\|}{\rho}_{\lambda}(x + y, x)\right) \\& \leq\frac{1}{2}\left( 1 + \frac{1- \delta^2}{1 + \delta^2}\right) = 1 - \frac{\delta^2}{1 + \delta^2} \leq 1 - \xi. \end{align*} Thus $\left\|\frac{x + y}{2}\right\|\leq 1 - \xi$ and the proof is completed. \end{proof} We finish this section by applying our definition of the functional ${\rho}_{\lambda}$ to give a new characterization of inner product spaces. \begin{theorem}\label{T26} Let $(X, \|\cdot\|)$ be a normed space, and let $\lambda \in [0, 1]$. Then the following conditions are equivalent: \begin{itemize} \item[(i)] ${\rho}_{\lambda}(x, y) = {\rho}_{\lambda}(y, x)$ for all $x, y \in X$. \item[(ii)] The norm in $X$ comes from an inner product. \end{itemize} \end{theorem} \begin{proof} Obviously, (ii)$\Rightarrow$(i). Suppose (i) holds. This condition implies that ${\rho}_{1 - \lambda}(x, y) = {\rho}_{1 - \lambda}(y, x)$ for all $x, y \in X$. Indeed Theorem \ref{T23}(ii) implies \begin{align*} {\rho}_{1 - \lambda}(x, y) = -{\rho}_{\lambda}(-x, y) = -{\rho}_{\lambda}(y, -x) = {\rho}_{1 - \lambda}(y, x). \end{align*} Now, let $P$ be any two dimensional subspace of $X$. Define a mapping $\langle \cdot, \cdot\rangle:X\times X\rightarrow\mathbb{R}$ by \begin{align*} \langle x, y\rangle := \frac{{\rho}_{\lambda}(x, y) + {\rho}_{1 - \lambda}(x, y)}{2}, \qquad (x, y\in X). \end{align*} We will show that $\langle \cdot, \cdot\rangle$ is an inner product in $P$. It is easy to see that the mapping $\langle \cdot, \cdot\rangle$ is non-negative, symmetric, and homogeneous. Therefore, it is enough to show the additivity respect to the second variable. Take $x, y, z\in P$. We consider two cases: $\mathbf{Case \,1.}$ $x$ and $y$ are linearly dependent. Thus $y = tx$ for some $t\in\mathbb{R}$ and so \begin{align*} \langle x, y + z\rangle& = \langle x, tx + z\rangle \\&= \frac{{\rho}_{\lambda}(x, tx + z) + {\rho}_{1 - \lambda}(x, tx + z)}{2} \\ & = \frac{2t{\|x\|}^2 + {\rho}_{\lambda}(x, z) + {\rho}_{1 - \lambda}(x, z)}{2} \\& = \langle x, tx\rangle + \langle x, z\rangle = \langle x, y\rangle + \langle x, z\rangle. \end{align*} $\mathbf{Case \,2.}$ $x$ and $y$ are linearly independent. Hence $z = tx + ry$ for some $t, r\in\mathbb{R}$. We have \begin{align*} \langle x, y + z\rangle& = \langle x, tx + (1 + r)y\rangle \\& = \frac{{\rho}_{\lambda}\big(x, tx + (1 + r)y\big) + {\rho}_{1 - \lambda}\big(x, tx + (1 + r)y\big)}{2} \\ & = \frac{2t{\|x\|}^2 + {\rho}_{\lambda}\big(x, (1 + r)y\big) + {\rho}_{1 - \lambda}\big(x, (1 + r)y\big)}{2} \\ & = \langle x, tx\rangle + \langle x, (1 + r)y\rangle \\ & = \langle x, tx\rangle + (1 + r)\langle x, y\rangle \\& = \langle x, y\rangle + \big(\langle x, tx\rangle + \langle x, ry\rangle\big) \qquad (\mbox{by case 1}) \\&= \langle x, y \rangle + \langle x, tx + ry\rangle = \langle x, y \rangle + \langle x, z\rangle. \end{align*} Thus $\langle \cdot, \cdot\rangle$ is an inner product in $P$. So, by \cite[Theorem 1.4.5]{A.S.T}, the norm in $X$ comes from an inner product. \end{proof} \section{Linear mappings preserving ${\rho}_{\lambda}$-orthogonality} A mapping $T:H\rightarrow K$ between two inner product spaces $H$ and $K$ is said to be orthogonality preserving if $x\perp y$ ensures $Tx\perp Ty$ for every $x,y\in H$. It is well known that an orthogonality preserving linear mapping between two inner product spaces is necessarily a similarity, that is, there exists a positive constant $\gamma$ such that $\|Tx\| = \gamma\|x\|$ for all $x\in H$; see \cite{Ch.1, Z.M.F, Z.C.H.K}. Now, let $X$ and $Y$ be normed spaces, and let $\diamondsuit \in \{B, I, \rho_-, \rho_+, \rho, \rho_*, {\rho}_{\lambda}\}$. Let us consider the linear mappings $T:X\rightarrow Y$, which preserve the $\diamondsuit$-orthogonality in the following sense: \begin{align*} x\perp_{\diamondsuit} y\Rightarrow Tx\perp_{\diamondsuit} Ty \qquad(x,y\in X). \end{align*} \begin{remark} Such mappings can be very irregular, far from being continuous or linear; see \cite{Ch.1}. Therefore we restrict ourselves to linear mappings only. \end{remark} It is proved by Koldobsky \cite{K} (for real spaces) and Blanco and Turn\v{s}ek \cite{B.T} (for real and complex ones) that a linear mapping $T\colon X\to Y$ preserving $B$-orthogonality has to be a similarity. Martini and Wu \cite{M.W} proved the same result for mappings preserving $I$-orthogonality. In \cite{C.W.2,C.W.3, W}, for $\diamondsuit \in \{\rho_-, \rho_+, \rho\}$, Chmieli\'{n}ski and W\'{o}jcik proved that a linear mapping, which preserves $\diamondsuit$-orthogonality, is a similarity. Recently, the authors of the paper \cite{M.Z.D} studied $\rho_*$-orthogonality preserving mappings between real normed spaces. In particular, they showed that every linear mapping that preserves $\rho_*$-orthogonality is necessarily a similarity (The same result is obtained in \cite{C.L} by using a different approach for real and complex spaces). In this section, we show that every ${\rho}_{\lambda}$-orthogonality preserving linear mapping is necessarily a similarity as well. Throughout, we denote by $\mu^n$ the Lebesgue measure on $\mathbb{R}^n$. When $n = 1$ we simply write $\mu$. \begin{lemma}\cite[Theorem 1.18]{Ph}\label{L41} Every norm on $\mathbb{R}^n$ is Gateaux differentiable $\mu^n$--a.e. on $\mathbb{R}^n$. \end{lemma} The following lemma plays a crucial role in the proof of the next theorem. \begin{lemma}\cite[Lemma 2.4]{B.T}\label{L42} Let $\|\cdot\|$ be any norm on $\mathbb{R}^2$, and let $D\subseteq \mathbb{R}^2$ be a set of all nonsmooth points. Then there exists a path $\gamma : [0, 2] \rightarrow \mathbb{R}^2$ of the form: \begin{align*} \gamma(t):= \Bigg \{\begin{array}{ll} (1, t\xi), & t\in[0, 1], \\\\ \big(1, (2 - t)\xi + (t - 1)\big), & t\in[1, 2], \end{array} \end{align*} for some $\xi \in \mathbb{R}$, so that $\mu\{t: \gamma(t) \in D\} = 0$. \end{lemma} We are now in the position to establish the main result of this section. \begin{theorem}\label{T41} Let $X$ and $ Y$ be normed spaces, and let $T\,:X\longrightarrow Y$ be a nonzero linear bounded mapping. Then the following conditions are equivalent: \begin{itemize} \item[(i)] $T$ preserves ${\rho}_{\lambda}$-orthogonality. \item[(ii)] $\|Tx\| = \|T\|\,\|x\|$ for all $x\in X$. \item[(iii)] ${\rho}_{\lambda}(Tx, Ty) = \|T\|^2\,{\rho}_{\lambda}(x, y)$ for all $x, y\in X$. \end{itemize} \end{theorem} \begin{proof} The implications (ii)$\Rightarrow$(iii) and (iii)$\Rightarrow$(i) are clear and it remains to prove (i)$\Rightarrow$(ii). Now we adopt some techniques used by Blanco and Turn\v{s}ek \cite[Theorem 3.1]{B.T}. Suppose that (i) holds. Clearly we can assume $T \neq 0$. Let us first show that $T$ is injective. Suppose on the contrary that $Tx = 0$ for some $x\in X \setminus \{0\}$. Let $y$ be a element in $X$ which is independent of $x$. Then we can choose a number $n \in \mathbb{N}$ such that $\frac{\|y\|}{n\|x + \frac{1}{n}y\|} < 1$. Put $z = x + \frac{1}{n}y$. Therefore Theorem \ref{T23}(vi) implies that \begin{align}\label{I41} 0 < 1 - \frac{\|y\|}{n\|z\|} = 1 - \frac{\|z\|\,\|y\|}{n{\|z\|}^2} \leq 1 - \frac{{\rho}_{\lambda}(z, y)}{n{\|z\|}^2}. \end{align} On the other hand, ${\rho}_{\lambda}(z, -\frac{{\rho}_{\lambda}(z, y)}{{\|z\|}^2}z + y) = -\frac{{\rho}_{\lambda}(z, y)}{{\|z\|}^2}{\|z\|}^2 + {\rho}_{\lambda}(z, y) = 0$. Since $T$ preserves ${\rho}_{\lambda}$-orthogonality, it follows that \begin{align}\label{I42} \frac{1}{n}\left(1 -\frac{{\rho}_{\lambda}(z, y)}{{n\|z\|}^2}\right){\|Ty\|}^2 = {\rho}_{\lambda}(Tz, -\frac{{\rho}_{\lambda}(z, y)}{{\|z\|}^2}Tz + Ty) = 0. \end{align} Relations (\ref{I41}) and (\ref{I42}) yield $Ty = 0$. Hence $T = 0$, a contradiction. We show next that \begin{align*} \|x\| = \|y\| \,\Rightarrow \,\|Tx\| = \|Ty\| \qquad (x, y \in X), \end{align*} which gives (ii). If $x$ and $y$ are linearly dependent, then $x = ty$ for some $t\in \mathbb{R}$ with $|t| = 1$. Thus $\|Tx\| = \|tTy\| = \|Ty\|$. Now let us suppose that $x$ and $y$ are linearly independent. Let $M$ be the linear subspace spanned by $x$ and $y$. For $u \in M$, define ${\|u\|}_T : = \|Tu\|$. Since $T$ is injective, ${\|\cdot\|}_T$ is a norm on $M$. Let $\Delta$ be the set of all those points $u \in M$ at which at least one of the norms, $\|\cdot\|$ or ${\|\cdot\|}_T$, is not Gateaux differentiable. For $u \in M\setminus \Delta$, let $F_u$ and $G_u$ denote the support functionals at $u$ of $\|\cdot\|$ and ${\|\cdot\|}_T$ on $M$, respectively. Let $v \in \ker F_u$. Since $(M, \|\cdot\|)$ is smooth at $u$, we obtain ${\rho}_{\lambda}(u, v) = 0$, and hence ${\rho}_{\lambda}(Tu, Tv) = 0$. Moreover, since $(M, {\|\cdot\|}_T)$ is smooth at $u$, we have \begin{align}\label{I44} {\rho}_{\lambda}(Tu, Tv) = \lambda {\|u\|}_TG_u(v) + (1 - \lambda){\|u\|}_TG_u(v) = \|Tu\|G_u(v), \end{align} whence $G_u(v) = 0$. So, we have $\ker F_u \subseteq \ker G_u$ for all $u \in M\setminus \Delta$, or equivalently there exists a function $\varphi : M\setminus \Delta \rightarrow \mathbb{R}$ such that $G_u = \varphi(u) F_u$ for all $u \in M\setminus \Delta$. By \eqref{I44} we get $\|Tu\| = \varphi(u)\|u\|$ for all $u\in M\setminus \Delta$. So, we conclude that $g_u = \varphi(u) f_u$, where $f_u$ and $g_u$ are the Gateaux differentials at $u$ of $\|\cdot\|$ and ${\|\cdot\|}_T$, respectively. Define $L : \mathbb{R}^2 \rightarrow M$ by $L(r, t) := rx + t(y - x)$. Clearly, $L$ is a linear isomorphism. Set $D = L^{-1}(M)$. Then $D$ is the set of those points $(r, t) \in \mathbb{R}^2$ at which at least one of the functions $(r, t)\mapsto \|L((r, t)\|$ or $(r, t)\mapsto {\|L((r, t)\|}_T$ is not Gateaux differentiable. Both these functions are norms on $\mathbb{R}^4$. Hence, by Lemma \ref{L41}, $\mu^4(D) = 0$. Let $\gamma : [0, 2] \rightarrow \mathbb{R}^2$ be the path obtained in Lemma \ref{L42}. Then $\Phi : [0, 2] \rightarrow M$ defined by \begin{align*} \Phi(t) := \frac{\|x\|}{\|L(\gamma(t))\|} L(\gamma(t)) \qquad (t\in [0, 2]), \end{align*} is a path from $x$ to $y$ such that $\|\Phi(t)\| = \|x\|$ and $\mu\{t:\,\Phi(t)\in \Delta\} = \mu\{t:\,\gamma(t)\in D\} = 0$. Note that $t \mapsto \|L(\gamma(t))\|$ and $t \mapsto {\|L(\gamma(t))\|}_T$ are Lipschitz functions and, therefore, are absolutely continuous. Indeed, if $t_1, t_2\in[0, 1]$, then \begin{align*} \Big|\|L(\gamma(t_1))\| - \|L(\gamma(t_2))\|\Big| \leq |\xi||t_1 - t_2|\|y - x\|. \end{align*} In addition, if $t_1, t_2\in[1, 2]$, then $\Big|\|L(\gamma(t_1))\| - \|L(\gamma(t_2))\|\Big| \leq |1 - \xi||t_1 - t_2|\|y - x\|$. Finally, if $t_1\in[0, 1]$ and $t_2\in[1, 2]$, then \begin{align*} \Big|\|L(\gamma(t_1))\| - \|L(\gamma(t_2))\|\Big| \leq (1 + |\xi|)|t_1 - t_2|\,\|y - x\|. \end{align*} So $t \mapsto \|L(\gamma(t))\|$ satisfies Lipschitz conditions. Similarly, $t \mapsto {\|L(\gamma(t))\|}_T$ satisfies Lipschitz conditions. It follows that ${\|\Phi(t)\|}_T = \frac{\|x\|{\|L(\gamma(t))\|}_T}{\|L(\gamma(t))\|}$ is absolutely continuous and that $\mu\big\{t:\, \Phi'(t)\,\,\mbox{does not exist}\big\} = \mu\big\{t:\,\, {\|L(\gamma(t))\|}'\,\mbox{does not exist}\big\} = 0$. Since $t \mapsto \|\Phi(t)\| = \|x\|$ is a constant function, we obtain ${{\|\Phi(t)\|}'}_T = 0$ $\mu$--a.e. on $[0, 2]$. Thus $t \mapsto {\|\Phi(t)\|}_T$ is a constant function, and we arrive at $\|Tx\| = \|Ty\|$. \end{proof} Finally, taking $X = Y$ and $T = id$, one obtains, from Theorem \ref{T41}, the following result. \begin{corollary} Let $X$ be a normed space endowed with two norms ${\|\cdot\|}_1$ and ${\|\cdot\|}_2$, which generate respective functionals ${\rho}_{\lambda, 1}$ and ${\rho}_{\lambda, 2}$. Then the following conditions are equivalent: \begin{itemize} \item[(i)] There exist constants $0 < m \leq M$ such that \begin{align*} m|{\rho}_{\lambda, 1}(x, y)| \leq |{\rho}_{\lambda, 2}(x, y)| \leq M |{\rho}_{\lambda, 1}(x, y)| \qquad (x, y\in X). \end{align*} \item[(ii)] The spaces $(X, {\|\cdot\|}_1)$ and $(X, {\|\cdot\|}_2)$ are isometrically isomorphic. \end{itemize} \end{corollary} \textbf{Acknowledgement.} This research is supported by a grant from the Iran National Science Foundation (INSF- No. 95013683). \end{document}
\begin{document} \title{Fast and simple quantum state estimation} \author{Daniel Uzc\'ategui Contreras} \affiliation{Departamento de F\'{i}sica, Facultad de Ciencias B\'{a}sicas, Universidad de Antofagasta, Casilla 170, Antofagasta, Chile} \author{Gabriel Senno} \affiliation{ICFO-Institut de Ciencies Fotoniques, The Barcelona Institute of Science and Technology, Castelldefels (Barcelona), 08860, Spain} \author{Dardo Goyeneche} \affiliation{Departamento de F\'{i}sica, Facultad de Ciencias B\'{a}sicas, Universidad de Antofagasta, Casilla 170, Antofagasta, Chile} \date{December 14, 2020} \begin{abstract} We present an iterative method to solve the multipartite quantum state estimation problem. We demonstrate convergence for any informationally complete set of generalized quantum measurements in every finite dimension. Our method exhibits fast convergence in high dimension and strong robustness under the presence of realistic errors both in state preparation and measurement stages. In particular, for mutually unbiased bases and tensor product of generalized Pauli observables it converges in a single iteration. We show outperformance of our algorithm with respect to the state-of-the-art of maximum likelihood estimation methods both in runtime and fidelity of the reconstructed states. \end{abstract} \maketitle \section{Introduction} \emph{Quantum state estimation} is the process of reconstructing the density matrix from measurements performed over an ensemble of identically prepared quantum systems. In the early days of quantum theory, W. Pauli posed the question of whether position and momentum probability distributions univocally determine the state of a quantum particle \cite{P1933}, something that holds in classical mechanics. However, quantum states belong to an abstract Hilbert space whose dimension exponentially increases with the number of particles of the system. Thus, more information than classically expected is required to determine the state. Since then, it has been having an increasing interest to estimate the state of a quantum system from a given set of measurements and several solutions appeared. For instance, standard state tomography \cite{AJK05} reconstructs $d$-dimensional density matrices from $O(d^3)$ rank-one \emph{Projective Valued Measures (PVM)}, whereas \emph{mutually unbiased bases (MUB)} \cite{I1981, WF1989} and \emph{Symmetric Informationally Complete (SIC) Positive Operator Valued Measures (POVM)} \cite{RBSC2004} do the same task with $O(d^2)$ rank-one measurement projectors. In general, any tight quantum measurement \cite{SCOTT2006}, equivalently any complex projective $2$-design is informationally complete \cite{H82}. Quantum state tomography finds applications in communication systems \cite{MVRHZ04}, dissociating molecules \cite{SSJM2003} and characterization of optical devices \cite{DLPPS2002}. It is a standard tool for verification of quantum devices, e.g. estimating fidelity of two photon CNOT gates \cite{OPWRB2003}, and has been used to characterize quantum states of trapped ions \cite{HHRBCCKR2005}, cavity fields \cite{SDGBRH2012}, atomic ensembles \cite{CBSBOMA2013} and photons \cite{DPS03}. Aside from the experimental procedure of conducting a set of informationally complete measurements on a system, quantum tomography requires an algorithm for reconstructing the state from the measurement statistics. From a variety of techniques proposed, the approaches featuring in the majority of experiments are variants of linear inversion (LI) and maximum-likelihood quantum state estimation (MLE) \cite{paris2004quantum}. As its name suggests, with LI one determines the state of the quantum system under consideration by inverting the measurement map solving a set of linear equations with the measurement data as input. For relevant families of informationally-complete set of measurements, analytical expressions for the inverse maps are known, significantly speeding up the whole reconstruction effort, see e.g. \cite{guctua2020fast}. MLE aims to find the state that maximizes the probability of obtaining the given experimental data set, among the entire set of density matrices. Within the different implementations of this basic last idea, those currently achieving the best runtimes are variants of a projected-gradient-descent scheme, see \cite{shang2017superfast,bolduc2017projected}. Algorithms based on variants of linear inversion \cite{kaznady2009numerical,acharya2019comparative} are typically faster than those implementing MLE when the inversion process is taken from already existing expressions \cite{GKKT20}. On the other hand, when restrictions on the rank of the state being reconstructed apply, techniques based on the probabilistic method of compressed-sensing have proven to be very satisfactory \cite{gross2010quantum,cramer2010efficient,acharya2017statistical}. In particular, the statistics based on five rank-one projective measurements is good enough to have high fidelity reconstruct of rank-one quantum states, even under the presence of errors in both state preparation and measurement stages \cite{G15}. It is natural to wonder whether one can find a method achieving fidelities as good as those based on MLE, with markedly better runtimes and without rank restrictions. In this work, we present a general method for quantum state estimation achieving better runtimes and fidelities than the state-of-the-art implementations of MLE. This paper is organized as follows. In Section \ref{sec:pio}, we introduce the main ingredient of our algorithm: the \emph{Physical Imposition Operator}, a linear operator having an intuitive geometrical interpretation. In Section \ref{sec:algorithm}, we present our iterative algorithm for quantum state estimation based on the physical imposition operator and prove its convergence. In Section \ref{sec:ultra-convergence} we show that for a wide class of quantum measurements, which include mutually unbiased bases and tensor product of generalized Pauli observables for $N$ qudit systems, convergence is achieved in a single iteration. In Section \ref{sec:simulations}, we numerically study the performance of our algorithm in terms of runtime and fidelity estimation, finding an improvement with respect to the most efficient MLE-based method, as far as we know. Finally, in Section \ref{sec:conclusions} we provide conclusions and future lines of research. Proofs of all our results are presented in Appendix \ref{proofs}. \section{Imposing physical information}\label{sec:pio} Consider an experimental procedure $\mathcal{P}$ that prepares a quantum system in some \emph{unknown} state. Let us assume that, given some prior knowledge about $\mathcal{P}$, our best guess for $\rho$ is the state $\rho_0$, which could be even the maximally mixed state in absence of prior information. Next, we perform a POVM measurement $A$ composed by $m_A$ outcomes, i.e $A=\{E_i\}_{i\leq m_A}$ on an ensemble of systems independently prepared according to $\mathcal{P}$, obtaining the outcome statistics $\vec{p}=\{p_i\}_{i\leq m_A}$. Given this newly acquired information, \begin{quote} \emph{how can we update $\rho_0$ to reflect our new state of knowledge about the system?} \end{quote} To tackle this question, we introduce the \emph{physical imposition operator}, a linear map that replaces the initial predictions about observable $A$ contained in $\rho_0$ with an experimentally observed probability $p_i$. \begin{defi}[Physical imposition operator]\label{def:PIOO} Let $A=\{E_i\}_{i\leq {m_A}}$ be a POVM acting on a $d$-dimensional Hilbert space $\mathcal{H}_d$ and let $\vec{p}\in\mathbb{R}^{m_A}$ be a probability vector. The physical imposition operator associated to $E_i$ and $p_i$ is the linear map \begin{equation}\label{def:pio} T^{p_i}_{E_i}(\rho)=\rho+\frac{(p_i-\mathrm{Tr}[\rho E_i])E_i}{\mathrm{Tr}(E_i^2)}, \end{equation} for every $i\leq m_A$. \end{defi} In order to clarify the meaning of the physical imposition operator (\ref{def:pio}) let us assume for the moment that $A$ is a projective measurement. In such a case, operator $T^{p_i}_{E_i}(\rho)$ takes a quantum state $\rho$, removes the projection along the direction $E_i$, i.e. it removes the physical information about $E_i$ stored in the state $\rho$, and imposes a new projection along this direction weighted by the probability $p_i$. Here, $p_i$ can be either taken from experimental data or simulated by Born's rule with respect to a target state to reconstruct. Note that operator $\rho'=T^{p_i}_{E_i}(\rho)$ reflects the experimental knowledge about the quantum system. As we will show in Section \ref{sec:algorithm}, a successive iteration of PIO along an informationally complete set of quantum measurements allows us to reconstruct the quantum state. For POVM in general, operator (\ref{def:pio}) does not entirely impose the information about the outcome. However, after several imposition of all involved operators PIO the sequence of quantum states successfully converges to a quantum states containing all the physical information, as we demonstrate in Theorem \ref{thm:convergence}. To simplify notation, along the work we drop the superscript $p_i$ in $T_{E_i}^{p_i}$ when the considered probability $p_i$ is clear from the context. Let us now state some important facts about PIOs that easily arise from Definition \ref{def:PIOO}. From now on, $\mathfrak{D}(\rho,\sigma):=\mathrm{Tr}[(\rho-\sigma)^2]$ denotes the Hilbert-Schmidt distance between states $\rho$ and $\sigma$. \begin{prop}\label{prop:pioproperties} The following properties hold for any POVM $\{E_i\}_{i\leq m_A}$ and any $\rho,\sigma$ acting on $\mathcal{H}_d$: \begin{enumerate} \item Imposition of physical information: $\mathrm{Tr}[T^{p_i}_{E_i}(\rho)E_i]=p_i.$ \item Composition: $T^{p_j}_{E_j}\circ T^{p_i}_{E_i}(\rho)=T^{p_i}_{E_i}(\rho)+T^{p_j}_{E_j}(\rho)-\rho-\bigl(p_i-\mathrm{Tr}(\rho E_i)\bigr)\mathrm{Tr}(E_iE_j)E_j/\mathrm{Tr}(E_j)^2.$ \item Non-expansiveness: $\mathfrak{D}(T^{p_j}_{E_j}(\rho),T^{p_j}_{E_j}(\sigma))\leq\mathfrak{D}(\rho,\sigma).$ \end{enumerate} \end{prop} Some important observations arise from Prop. \ref{prop:pioproperties}. First, for $j=i$ in the above item \emph{2} we find that \begin{equation}\label{projection} T^2_{E_i}(\rho)=T_{E_i}(\rho), \end{equation} for any $\rho$, so operator $T_{E_i}$ is an \emph{orthogonal projection}, for every $i\leq m_A$ and any POVM $\{E_i\}_{i\leq m_A}$. Note that any quantum state $\sigma=T_{E_i}(\rho)$ is a fixed point of $T_{E_i}$, i.e. $T_{E_i}(\sigma)=\sigma $, which simply arises from (\ref{projection}). Roughly speaking, quantum states already having the physical information we want to impose are fixed points of the map $T_{E_j}$. This key property allows us to apply dynamical systems theory \cite{S94} to study the tomographic problem. We consider the alternating projection method, firstly studied by Von Neumann \cite{N49} for the case of two alternating projections and generalized by Halperin to any number of projections \cite{H62}. In Theorem \ref{thm:convergence}, we will show that composition of all physical imposition operators associated to an informationally complete set of POVM produces a linear map having a unique attrative fixed point, i.e., the solution to the quantum state tomography problem. The uniqueness of the fixed point guarantees a considerable speed up of the method in practice, as any chosen seed monotonically approaches to the solution of the problem. To simplify notation, we consider a single physical imposition operation $\mathcal{T}_A$ for an entire POVM A, defined as follows \begin{equation}\label{pio2} \mathcal{T}_A=T_{E_{m_A}}\circ\dots\circ T_{E_1}. \end{equation} Up to a constant factor proportional to identity, that we omit, operator $\mathcal{T}_A$ reduces to \begin{equation}\label{piopvm} \mathcal{T}_A(\rho)=\sum_{i=1}^{m_A}T_{E_i}(\rho), \end{equation} for any PVM $A$, what follows from considering (\ref{pio2}) and Prop.\ref{prop:pioproperties}. This additive property holding for PVM measurements plays an important role, as it helps to reduce the runtime of our algorithm. Precisely, this fact allows us to apply \emph{Kaczmarz method} \cite{K1937} instead of Halpering alternating projection method, for any informationally complete set of PVM. Kaczmarz method considers projections over the subspace generated by the intersection of all associated hyperplanes, defined by the linear system of equations (Born's rule). Let us introduce another relevant concept \begin{defi}[Generator state] Given a POVM $A=\{E_i\}_{i\leq m_A}$ and a probability vector $\vec{p}\in\mathbb{R}^{m_A}$, a quantum state $\rho_{gen}$ is called \emph{generator state} for $\vec{p}$ if $\mathrm{Tr}(\rho_{gen} E_i)=p_i$, for every $i\leq m_A$ . \end{defi} Note that $\rho_{gen}$ is a fixed point of $\mathcal{T}_{E_i}$, according to (\ref{pio2}) and Prop. \ref{prop:pioproperties}. State $\rho_{gen}$ plays an important role to implement numerical simulations, as it guarantees to generate sets of probability distributions compatible with the existence of a positive semidefinite solution to the quantum state tomography problem. To end this section, note that map $\mathcal{T}_A$ defined in (\ref{piopvm}) has a simple interpretation in the Bloch sphere for a qubit system, see Fig. \ref{Fig1}. The image of $\mathcal{T}_A$, i.e. $\mathcal{T}_A[\textrm{Herm}(\mathcal{H}_2)]$, is a plane that contains the disk $$D^{\vec{p}}_{A}=\{z=p_2-p_1\mid z=\mathrm{Tr}(\rho \sigma_z),\,p_i=\mathrm{Tr}(\rho E_i),\,\rho\geq0,\mathrm{Tr}(\rho)=1\},$$ i.e., the disk contains the full set of generator states $\rho_{gen}$. Note that $\mathcal{T}_A$ is not a completely positive trace preserving (CPTP) map, as $\mathcal{T}_A[\textrm{Herm}(\mathcal{H}_2)]$ extends beyond the disk $D^{\vec{p}}_{A}$, i.e. outside the space of states. Indeed, for any state $\rho$ that is not a convex combination of projectors $E_i$, there exists a probability distribution $\vec{p}$ such that $\mathcal{T}_A(\rho)$ is not positive semi-definite. Roughly speaking, any point inside the Bloch sphere from Fig. \ref{Fig1} but outside the blue vertical line is projected by $\mathcal{T}_A$ outside the sphere, for a sufficiently small disk $D^{\vec{p}}_A$. \begin{figure} \caption{Bloch sphere representation for a single qubit system and PVM measurements. The blue arrows define eigenvectors of $\sigma_z$. The disk shown represents the entire set of quantum states $\rho_{gen} \label{Fig1} \end{figure} \section{Algorithm for quantum state estimation}\label{sec:algorithm} In the practice of quantum state tomography, one collects a set of probability distributions $\vec{p_1},\dots,\vec{p_{\ell}}$ from a set of $\ell$ POVM measurements $A_1=\{E_i^{(1)}\}_{i\leq m_1},\dots,A_{\ell}=\{E_i^{(\ell)}\}_{i\leq m_{\ell}}$, implemented over an ensemble of physical systems identically prepared in a quantum state $\rho_{gen}$. The statistics collected allows a unique state reconstruction when considering an \emph{informationally-complete} (IC) sets of observables $A_1,\dots,A_\ell$. Our algorithm for quantum state estimation, Algorithm \ref{alg:pio1} below, defines a sequence of hermitian operators $\rho_n$, not necessarily composed by quantum states, that converges to the unique quantum state that is solution to the tomography problem, i.e. $\rho_{gen}$. For the moment, we assume error-free state tomography in our statements. The algorithm applies to any finite dimensional Hilbert space $\mathcal{H}_d$, and any informationally complete set of quantum observables. \begin{algorithm}[H]\caption{Quantum state estimation algorithm.}\label{alg:pio1} \begin{algorithmic} \Require dimension $d\in\mathbb{N}$, POVMs $A_1,\dots, A_{\ell}$ acting on $\mathcal{H}_{d}$, \\ \hspace{1cm} experimental frequencies $\vec{f}_1,\dots,\vec{f}_{\ell}\in \mathbb{R}^m$ and accuracy $\epsilon\in [0,1]$. \Ensure estimate $\rho_{\rm est}\in\mathcal{B}(\mathcal{H}_d)$. \State{$\rho_{0} = \mathbb{I}/d$} \State{$\rho = \mathcal{T}_{A_{\ell}}\circ\cdots\circ \mathcal{T}_{A_1}(\rho_{0})$} \Repeat{\\ \hspace*{0.5cm}$\rho_{\rm old} = \rho$\\ \hspace*{0.5cm}$\rho = \mathcal{T}_{A_{\ell}}\circ\cdots\circ \mathcal{T}_{A_1}(\rho_{\rm old})$} \Until{ $\mathfrak{D}(\rho,\rho_{\rm old})\leq\epsilon$ }\\ \Return{ $\argmin_{ \rho_{\rm est} \in \mathcal{ D(\mathcal{H}_{\rm d} }) } \mathfrak{D}(\rho,\rho_{\rm est})$ } \end{algorithmic} \end{algorithm} In Algorithm \ref{alg:pio1}, $\mathcal{ D( \mathcal{H}_{\rm d} ) }$ denotes the set of density operators over $\mathcal{H}_{\rm d}$. Theorem \ref{thm:convergence} below asserts the convergence of Algorithm \ref{alg:pio1} when the input frequencies are exact, i.e. Born-rule, probabilities of an IC set of POVMs. \begin{thm}\label{thm:convergence} Let $A_1,\dots,A_{\ell}$ be a set of informationally complete POVMs acting on a Hilbert space $\mathcal{H}_d$, associated to a compatible set of probability distributions $\vec{p_1},\dots,\vec{p_{\ell}}$. Therefore, Algorithm \ref{alg:pio1} converges to the unique solution to the quantum state tomography problem. \end{thm} Here, compatibility refers to the existence of a quantum state associated to exact probability distributions $\vec{p_1},\dots,\vec{p_{\ell}}$ what is guaranteed when probabilities come from a generator state $\rho_{gen}$. Theorem \ref{thm:convergence} asserts that the composite map $\mathcal{T}_{A_{\ell}}\circ\cdots\circ \mathcal{T}_{A_1}$ defines a dynamical system having a unique attractive fixed point. The successive iterations of Algorithm \ref{alg:pio1} define a \emph{Picard sequence} \cite{KCG90}: \begin{align}\label{eq:picard-sequence} \rho_0&=\mathbb{I}/d,\nonumber\\ \rho_n&= \mathcal{T}_{A_{\ell}}\circ\dots\circ \mathcal{T}_{A_1}(\rho_{n-1}),~n\geq 1. \end{align} Note that for arbitrary chosen set of observables, the composition of physical imposition operators depends on its ordering. According to Theorem \ref{thm:convergence}, this ordering does not affect the success of the convergence in infinitely many steps. However, in practice one is restricted to a finite sequence, where different orderings produce different quantum states as an output. Nonetheless, such difference tends to zero when the state $\rho_n$ is close to the attractive fixed point, i.e. solution to the state tomography problem. According to our experience from numerical simulations, we did not find any advantage from considering a special ordering for composition of operators. Figure \ref{Fig2} shows the convergence of $\rho_n$ in the Bloch sphere representation for a single qubit system and three PVMs taken at random. For certain families of measurements, e.g. mutually unbiased bases and tensor product of Pauli matrices, the resulting Picard sequences and, therefore, Algorithm \ref{alg:pio1} converge in a single iteration, see Prop. \ref{prop_singlestep}. That is, $\rho_n = \rho_{1}$ for every $n\geq 1$. We numerically observed this same behaviour for the $3^N$ product Pauli eigenbases in the space of $N$-qubits, with $1\leq N\leq 8$, conjecturing that it holds for every $N\in\mathbb{N}$, see Section \ref{sec:simulations-pauli}. \begin{figure} \caption{Graphical representation of the convergence of Algorithm \ref{alg:pio1} \label{Fig2} \end{figure} In a previous work \cite{GdlT2014}, a related algorithm was introduced for quantum state estimation. However, it has several disadvantages with respect to our work, namely: (\emph{i}) it works for pure states only; (\emph{ii}) the dynamics is non-linear, requiring a large runtime to converge (\emph{iii}) convergence to the target state is not guaranteed. The main reason behind this last property is the existence of a large amount of undesired basins of attraction, as the solution to the problem is not the only attractive fixed point; finally, (\emph{iv}) realistic state reconstruction is not possible due to the impossibility to introduce realistic noise, as it destroys purity. Note that Algorithm \ref{alg:pio1} does not reduce to the one defined in Ref. \cite{GdlT2014} when reconstructing pure states, as our imposition operator is linear. \subsection{Ultra-fast convergence}\label{sec:ultra-convergence} When considering maximal sets of mutually unbiased bases, the Picard sequences featuring in Algorithm \ref{alg:pio1} converge in a single iteration. This is so because the associated imposition operators commute for MUB. This single-iteration convergence is easy to visualize in the Bloch sphere for a qubit system, as the three disks associated to three MUB are mutually orthogonal, and orthogonal projections acting over orthogonal planes keep the impositions within the intersection of the disks. The same argument also holds in every dimension. Let us formalize this result. \begin{prop}\label{MUBcommute} Let $T_A$ and $T_B$ be two physical imposition operators associated to two mutually unbiased bases $A$ and $B$. Therefore, \begin{equation} T_B\circ T_A=T_A\circ T_B=T_A+T_B-\mathbb{I}. \end{equation} In particular, note that $T_A$ and $T_B$ commute. \end{prop} Also, it is easy to see from Item \emph{2}, Prop. \ref{prop:pioproperties} that operators $T_{E_i}$ commute when considering $E_i$ equal to the tensor product local Pauli group. In this case, operators $E_i$ do not form a POVM but given that they define an orthogonal basis in the matrix space, they are an informationaly complete set of observables. Let us now show the main result of this section: \begin{prop}\label{prop_singlestep} Algorithm \ref{alg:pio1} converges in a single iteration to the unique solution of the quantum state tomography problem for product of generalized Pauli operators and also for $d+1$ mutually unbiased bases, in any prime power dimension $d$. \end{prop} We observe from simulations that the speedup predicted by Prop. \ref{prop_singlestep} has no consequences in the reconstruction fidelity of our method, which is actually higher than the one provided by MLE. \section{Numerical study}\label{sec:simulations} Theoretical developments from Sections \ref{sec:pio} and \ref{sec:algorithm} apply to the ideal case of error free probabilities coming from an exact generator state $\rho_{gen}$. In practice, probabilities are estimated from frequencies, carrying errors due to finite statistics. Moreover, the states being prepared in each repetition of the experiment are affected by unavoidable systematic errors. These sources of errors imply that the output of Algorithm \ref{alg:pio1} is typically outside the set of quantum states when considering experimental data. We cope with this situation by finding the closest quantum state to the output, called $\rho_{\rm est}$ in Hilbert-Schmidt (a.k.a. Frobenius) distance , for which there are closed-form expressions \cite{guctua2020fast}. In the following, we provide numerical evidence for robustness of our method in the finite-statistics regime with white noise affecting the generator states, i.e. errors at the preparation stage. That is, we consider noisy states of the form $\tilde{\rho}(\lambda)=(1-\lambda)\rho+\lambda\mathbb{I}/d$, where $\lambda$ quantifies the amount of errors. We understand there are more sophisticated techniques to consider errors, e.g. ill-conditioned measurement matrices \cite{bolduc2017projected}. Nonetheless, we believe the consideration of another model to simulate a small amount of errors would not substantially change the exhibited results. We reconstruct the state for $N$-qubit systems with $1\leq N\leq 8$, by considering the following sets of measurements: a) Mutually unbiased bases, b) Tensor product of local Pauli bases and c) A set of $d+1$ informationally complete bases taken at random with Haar distribution. The last case does not have a physical relevance but illustrates performance of our algorithm for a set of measurements defined in an unbiased way. As a benchmark, we compare the performance of our method with the conjugate gradient, accelerated-gradient-descent (CG-AGP) implementation of Maximum Likelihood Estimation (MLE) \cite{shang2017superfast}. Computations were conducted on an Intel core i5-8265U laptop with 8gb RAM. For the CG-AGP algorithm, we used the implementation provided by authors of Ref. \cite{shang2017superfast}, see Ref. \cite{superfast-implementation}. We provide an implementation of our Algorithm \ref{alg:pio1} in Python \cite{pio-implementation}, together with the code to run the simulations presented in the current section. \subsection{Mutually unbiased bases}\label{sec:simulations-MUB} Figure \ref{fig:MUB} shows performance of Algorithm \ref{alg:pio1} in the reconstruction of $N$-qubit density matrices from the statistics of a maximal set of $2^N+1$ MUBs. We consider a generator state $\rho_{gen}$ in dimension $d$, taken at random according to the Haar measure distribution, with the addition of a $10\%$ level of white noise, i.e. $\tilde{\rho}(\lambda)=(1-\lambda)\rho+\lambda\mathbb{I}/2^N$, with $\lambda=0.1$. Here, it is important to remark that fidelities are compared with respect to the generator state $\rho_{gen}$, so that the additional white noise reflects the presence of systematic errors in the state preparation process. Probabilities are estimated from frequencies, i.e. $f_j=\mathcal{N}_j/\mathcal{N}$ with $\mathcal{N}_j$ the number of counts for outcome $j$ of some POVM and $\mathcal{N}=\sum_j \mathcal{N}_j$ the total number of counts. Our simulations consider $\mathcal{N}=100\times 2^N$ samples per measurement basis. Our figure of merit is the fidelity $F(\rho_n,\rho_{gen})=\mathrm{Tr}{\sqrt{\sqrt{\rho_{gen}}\rho_n\sqrt{\rho_{gen}}}}^2$ between the reconstructed state after $n$ iterations $\rho_n$ and the generator state $\rho_{gen}$. Runtime of the algorithm is averaged over 50 independent runs, each of them considering a generator state $\rho_{gen}$ chosen at random according to the Haar measure. \begin{figure} \caption{Performance of Algorithm 1 and the CG-AGP Super-Fast MLE method from \cite{shang2017superfast} \label{Fig3a} \label{Fig3b} \label{fig:MUB} \end{figure} \subsection{$N$-qubit Pauli bases}\label{sec:simulations-pauli} Here, we consider the reconstruction of $N$-qubit density matrices from the $3^N$ PVMs determined by all the products of single qubit Pauli eigenbases, for $N=1,\dots,8$. Similarly to the case of MUBs, Picard sequences $\rho_n=T_{Pauli}^n(\rho_0)$ converge in a single iteration when product of Pauli measurements are considered, for any generator state $\rho_{gen}$ and any initial state $\rho_0$. Figure \ref{fig:pauli} shows performance of a single iteration of these Picard sequences, where the generator state $\rho_{gen}$ is taken at random, according to the Haar measure. Algorithm CG-AGP exploits the product structure of the $N$-qubit Pauli bases to speedup its most computationally expensive part: the computation of the probabilities given by the successive estimates in the MLE optimization. It does so by working with reduced density matrices which, in turn, imply an efficient use of memory. In order to have a fair comparison with our method, we decided to include the time to compute the $N$-qubit observables from the single Pauli observables in the total runtime of our algorithm. In practice, however, one would preload them into memory, as they are, of course, not a function of the input, i.e. of the observed probabilities. Nonetheless, Fig. \ref{fig:pauli} shows that our Algorithm \ref{alg:pio1} has a considerable reduction of runtime and better fidelities with respect to the algorithm provided in Ref. \cite{shang2017superfast}. \subsection{Random measurements for $N$-qubit systems}\label{sec:simulations-random} The simulations in the preceding subsections correspond to informationally complete sets of measurements for which Algorithm \ref{alg:pio1} converges in a single iteration. To test whether the advantage over \cite{shang2017superfast} hinges critically on this fact, we have numerically tested our algorithm with sets of PVMs selected at random, with respect to the Haar measure. In Fig. \ref{fig:random-bases} we show that in this case, the advantage fidelity increases substantially, compared to Figs. \ref{fig:MUB} and \ref{fig:pauli}. \begin{figure} \caption{Performance of Algorithm \ref{alg:pio1} \label{fig:pauli-a} \label{fig:pauli-b} \label{fig:pauli} \end{figure} \begin{figure} \caption{Performance of Algorithm \ref{alg:pio1} \label{fig:random-a} \label{fig:random-b} \label{fig:random-bases} \end{figure} Finally, we would like to mention the \emph{Projective Least Squares} (PLS) quantum state reconstruction \cite{GKKT20}. This method outperforms both in runtime and fidelity our Algorithm \ref{alg:pio1}. This occurs when the linear inversion procedure required by the method \emph{is not} solved but taken from analytically existing reconstruction formula. Existing inversion formulas are known for to complex projective 2-designs, measurement composed by stabilizer states, Pauli observables and uniform/covariant POVM, see \cite{GKKT20}. However, when taking into account the cost of solving the linear inversion procedure, our method has a considerable advantage over PLS. For instance, PLS does not have such efficient speed up for a number of physically relevant observables for which there is no explicit inversion known, including the following cases: a) discrete Wigner functions reconstruction for arbitrary dimensional boson and fermions quantum systems from discrete quadratures, that be treated as observables by considering Ramsey techniques \cite{L96}, b) reconstruction of single quantized cavity mode from magnetic dipole measurements with Stern-Gerlach aparatus \cite{WCZ96}, c) minimal state reconstruction of $d$-dimensional quantum systems from POVM consisting on $d^2$ elements, inequivalent to SIC-POVM \cite{W06}, d) spin $s$ density matrix state reconstruction from Stern-Gerlach measurements \cite{WA99}, e) Quantum state tomography for multiparticle spin $1/2$ systems \cite{DMP03}, neither reduced to mutually unbiased bases nor local Pauli measurements. \section{Discussion and conclusions}\label{sec:conclusions} We introduced an iterative method for quantum state estimation of density matrices from any informationally complete set of quantum measurements in any finite dimensional Hilbert space. We demonstrated convergence to the unique solution for any informationally complete or overcomplete set of POVMs, see Theorem \cite{DMP03}. The method, based on dynamical systems theory, exhibited a simple and intuitive geometrical interpretation in the Bloch sphere for a single qubit system, see Figs. \ref{Fig1} and \ref{Fig2}. Our algorithm revealed an ultra-fast convergence for a wide class of measurements, including mutually unbiased bases and tensor product of generalized Pauli observables for an arbitrary large number of particles having $d$ internal levels. These results considerably improved both the runtime and fidelities reported by the CG-AGP Super-Fast MLE estimation \cite{shang2017superfast} for all the studied cases, see Section \ref{sec:ultra-convergence}. Furthermore, numerical simulations revealed strong robustness under the presence of realistic errors in both state preparation and measurement stages, see Figs. \ref{fig:MUB} to \ref{fig:random-bases}. We provided an easy to use code developed in Python to implement our algorithm, see \cite{pio-implementation}. As interesting future lines of research, we pose the following list of open issues: (\emph{i}) Find an upper bound for fidelity reconstruction of Algorithm \ref{alg:pio1} as a function of errors and number of iterations; (\emph{ii}) Characterize the full set of quantum measurements for which Algorithm \ref{alg:pio1} converges in a single iteration; (\emph{iii}) Extend our method to quantum process tomography. \textbf{Acknowledgements} It is a pleasure to thank Gustavo Ca\~nas C\'ardona, Zdenek Hradil, Felix Huber, Santiago G\'omez L\'opez, Kamil Korzekwa, Andrew Scott, Oliver Reardon-Smith, Stephen Walborn, Andreas Winter and Karol \.{Z}yczkowski for valuable comments. DG and DU are supported by Grant FONDECYT Iniciaci\'{o}n number 11180474, Chile. DU also acknowledges support from Project ANT1956, Universidad de Antofagasta, Chile. GS acknowledges support from the Government of Spain (FIS2020-TRANQI and Severo Ochoa CEX2019-000910-S), Fundació Cellex, Fundació Mir-Puig, Generalitat de Catalunya (CERCA, AGAUR SGR 1381) and the EU project QRANGE. This work was supported by MINEDUC-UA project, code ANT 1856. As regards to the authorship of the different sections, DUC and DG provided both the theoretical background as well as the new mathematical results, whereas DUC and GS contributed with numerical simulations. \appendix \section{Proof of results}\label{proofs} In this section we provide the proofs of all our results. \subsection{Algorithm for quantum state estimation} \textbf{Proposition II.1} \emph{The following properties hold for any POVM $\{E_i\}_{i\leq m}$ and any $\rho$ acting on $\mathcal{H}_d$: \begin{enumerate} \item Imposition of physical information: $\mathrm{Tr}[T^{p_i}_{E_i}(\rho)E_i]=p_i.$ \item Composition: $T^{p_j}_{E_j}\circ T^{p_i}_{E_i}(\rho)=T^{p_i}_{E_i}(\rho)+T^{p_j}_{E_j}(\rho)-\rho-\bigl(p_i-\mathrm{Tr}(\rho E_i)\bigr)\mathrm{Tr}(E_iE_j)E_j/\mathrm{Tr}(E_j)^2.$ \item Non-expansiveness: $\mathfrak{D}(T^{p_j}_{E_j}(\rho),T^{p_j}_{E_j}(\sigma))\leq\mathfrak{D}(\rho,\sigma).$ \end{enumerate} } \begin{proof} Items \emph{1} and \emph{2} easily arise from Definition \ref{def:pio}. In order to show the non-expansiveness stated in Item \emph{3}, let us apply Definition \ref{def:pio} to two states $\rho$ and $\sigma$, belonging to $\mathcal{H}_d$, i.e. \begin{equation}\label{pio1Ap} T^{p_i}_{E_i}(\rho)=\rho+\frac{(p_i-\mathrm{Tr}[\rho E_i])E_i}{\mathrm{Tr}(E_i^2)}, \end{equation} \begin{equation}\label{pio2Ap} T^{p_i}_{E_i}(\sigma)=\sigma +\frac{(p_i-\mathrm{Tr}[\sigma E_i])E_i}{\mathrm{Tr}(E_i^2)}. \end{equation} Subtracting \eqref{pio1Ap} from \eqref{pio2Ap} \begin{equation} T_{E_i}(\rho) - T_{E_i}(\sigma) = (\rho - \sigma) - \dfrac{\mathrm{Tr}[(\rho - \sigma) E_i]E_i}{\mathrm{Tr}(E_i^2)}, \end{equation} where we dropped the upper index $p_i$ from $T^{p_i}_{E_i}$. Now, let us compute $$\mathfrak{D}(T_{E_j}(\rho),T_{E_j}(\sigma))^2 = \mathrm{Tr}\bigl[ \bigl( T_{E_i}(\rho) - T_{E_i}(\sigma) \bigr)\bigl( T_{E_i}(\rho) - T_{E_i}(\sigma) \bigr)^{\dagger} \bigr].$$ Thus, \begin{eqnarray} \mathfrak{D}(T_{E_j}(\rho),T_{E_j}(\sigma))^2 &=& \mathfrak{D}(\rho, \sigma)^2 - 2\dfrac{\mathrm{Tr}[(\rho - \sigma) E_i]\mathrm{Tr}[(\rho - \sigma) E_i]}{\mathrm{Tr}(E_i^2)} + \dfrac{\bigl( \mathrm{Tr}[(\rho - \sigma) E_i]\bigr)^2 \mathrm{Tr}(E_i^2)}{\bigl( \mathrm{Tr}(E_i^2) \bigr)^2} \nonumber \\ &=&\mathfrak{D}(\rho, \sigma)^2 - \dfrac{\bigl( \mathrm{Tr}[(\rho - \sigma) E_i]\bigr)^2}{\mathrm{Tr}(E_i^2)}, \end{eqnarray} where $\mathfrak{D}(\rho, \sigma)^2 = \mathrm{Tr}\bigl[(\rho - \sigma)(\rho - \sigma)^{\dagger} \bigr]$. Therefore, $\mathfrak{D}(T_{E_j}(\rho),T_{E_j}(\sigma)) \leq \mathfrak{D}(\rho, \sigma)$ and item \emph{3} holds. \end{proof} \textbf{Theorem III.1} \emph{Let $A_1,\dots,A_{\ell}$ be a set of informationally complete POVMs acting on a Hilbert space $\mathcal{H}_d$, associated to a compatible set of probability distributions $\vec{p_1},\dots,\vec{p_{\ell}}$. Therefore, Algorithm \ref{alg:pio1} converges to the unique solution to the quantum state tomography problem.} \begin{proof} First, from item \emph{1} in Prop. \ref{prop:pioproperties} the generator state $\rho_{gen}$ is a fixed point of each imposition operator $\mathcal{T}_{A_i}$, for every chosen POVM measurement $A_1,\dots,A_{\ell}$. Hence, $\rho_{gen}$ is a fixed point of the composition of all involved operators. Moreover, this fixed point is unique, as there is no other quantum state having the same probability distributions for the considered measurements, as $A_1,\dots,A_{\ell}$ are informationally complete. Here, we are assuming error-free probability distributions. Finally, convergence of our sequences is guaranteed by the alternating projections method developed by Halperin, which states that successive iterations of non-expansive projections converge to a common fixed point of the involved maps, see Theorem 1 in \cite{H67}. \end{proof} \subsection{Single-step convergence} \textbf{Proposition III.1} \emph{Let $\mathcal{T}_A$ and $\mathcal{T}_B$ be physical imposition operators associated to two mutually unbiased bases $A$ and $B$, for $n$ qudit systems. Therefore \begin{equation} \mathcal{T}_A\circ \mathcal{T}_B=\mathcal{T}_A+\mathcal{T}_B-\mathbb{I}. \end{equation} In particular, notice that $\mathcal{T}_A$ and $\mathcal{T}_B$ commute.} \begin{proof} First, it is simple to show that $\mathcal{T}_A(\rho)=\rho_0+\sum_{j=0}^{m_A-1}\Pi_j(\rho-\rho_0)\Pi_j$ for any PVM $A$, where $\Pi_j=E_j$ are the subnormalized rank-one PVM elements. Thus, we have \begin{eqnarray*} T_B\circ T_A(\rho_0)&=&\rho_0+\sum_{j=0}^{m_A-1}\Pi^A_j(\rho-\rho_0)\Pi^A_j+\sum_{k=0}^{m_B-1}\Pi^B_k\left[\rho-\left(\rho_0+\sum_{j=0}^{m_A-1}\Pi^A_j(\rho-\rho_0)\Pi^A_j\right)\right]\Pi^B_k\\ &=&\rho_0+\sum_{j=0}^{m_A-1}\Pi^A_j(\rho-\rho_0)\Pi^A_j+\sum_{k=0}^{m_B-1}\Pi^B_k(\rho-\rho_0)\Pi^B_k+\sum_{j,k}\Pi^B_k\Pi^A_j(\rho-\rho_0)\Pi^A_j\Pi^B_k \end{eqnarray*} On the other hand, \begin{eqnarray*} \sum_{j,k}\Pi^B_k\Pi^A_j(\rho-\rho_0)\Pi^A_j\Pi^B_k&=&\sum_{j,k}\mathrm{Tr}(\Pi^A_j\Pi^B_k)\mathrm{Tr}\bigl((\rho-\rho_0)\Pi^A_j\bigr)\Pi^B_k\\ &=&\gamma(A,B)\sum_{j,k}\mathrm{Tr}\bigl((\rho-\rho_0)\Pi^A_j\bigr)\Pi^B_k\\ &=&\gamma(A,B)\mathrm{Tr}(\rho-\rho_0)\\ &=&0. \end{eqnarray*} Therefore, we have \begin{eqnarray} T_B\circ T_A(\rho_0)&=&\rho_0+\sum_{j=0}^{m_A-1}\Pi^A_j(\rho-\rho_0)\Pi^A_j+\sum_{k=0}^{m_B-1}\Pi^B_k(\rho-\rho_0)\Pi^B_k\\ &=&T_A(\rho_0)+T_B(\rho_0)-\rho_0, \end{eqnarray} for any initial state $\rho_0$. So, we have $T_B\circ T_A=T_A\circ T_B=T_A+T_B-\mathbb{I}$. \end{proof} \textbf{Proposition III.2} \emph{Algorithm \ref{alg:pio1} converges in a single iteration to the unique solution of the quantum state tomography problem for product of generalized Pauli operators and also for $d+1$ mutually unbiased bases, in any prime power dimension $d$.} \begin{proof} For generalized Pauli operators, commutativity of imposition operators comes from orthogonality condition $\mathrm{Tr}(E_iE_j)$, see item \emph{2} in Prop. \ref{prop:pioproperties}. Thus, we have \begin{eqnarray}\label{seq} \rho_n&=&(T_{E_{d^2}}\circ\cdots\circ T_{E_1})^n(\rho_0)\nonumber\\ &=&T^n_{E_{d^2}}\circ\cdots\circ T^n_{E_1}(\rho_0)\nonumber\\ &=&T_{E_{d^2}}\circ\cdots\circ T_{E_1}(\rho_0), \end{eqnarray} where the second step considers commutativity and the last step the fact that every $T_j$, $j=1,\dots,d+1$ is a projection. On the other hand, from Theorem \ref{thm:convergence} we know that $\rho_n\rightarrow\rho_{gen}$ when $n\rightarrow\infty$, for any generator state $\rho_{gen}$. From combining this result with (\ref{seq}) we have \begin{equation} T_{E_{d^2}}\circ\cdots\circ T_{E_1}(\rho_0)=\rho_{gen}, \end{equation} for any seed $\rho_0$ and any generator state $\rho_{gen}$, in any prime power dimension $d$. For MUB the result holds in the same way, where commutativity between the associated imposition operators associated to every PVM arises from see Prop. \ref{MUBcommute}. \end{proof} \section{An additional model of errors for the measurement process}\label{sec:simulations-Gaussian_Noisse} \begin{figure} \caption{A new error model for the measurement process, which considers a Gaussian perturbation of the spin direction to be measured together with finite statistics errors. Fidelity is averaged over 100 trials, having a randomly chosen generator state $\rho_{gen} \label{Fig6} \end{figure} Along the work, we implemented simulations considering errors in both state preparation and those arising from finite statistics. In this section, we consider an additional source of errors in the measurement process. Specifically, we consider errors in the measurement apparatus, which is modeled by adding Gaussian perturbations in the direction of spin observables. In figure \ref{Fig6}, we show fidelity for quantum state reconstruction for a spin $1/2$ particle from three spin observables along orthogonal directions. For the Gaussian noise model, such directions are affected by a Gaussian probability distribution having standard deviation $\nu$, centered in the ideally expected direction. That is, we consider the Gaussian probability distribution $p (x) \propto e^{-(x - \mu)^2/2\nu^2 }$ with $\mu = 0$, for entries of a spin direction $n$, associated to the observable $S = \vec{n}\cdot\vec{\sigma}$, where $\vec{\sigma}=(\sigma_x,\sigma_y,\sigma_z)$ is a vector composed by the three Pauli matrices. The amplitude of fluctuations can be controlled by adjusting the standard deviation $\nu$. \end{document}
\begin{equation}gin{document} \begin{array}selineskip .3in \title{\textbf{Predictive mean matching imputation in survey sampling}} \author{Shu Yang \and Jae Kwang Kim} \maketitle \begin{equation}gin{abstract} \textcolor{black}{Predictive mean matching imputation is popular for handling item nonresponse in survey sampling. In this article, we study the asymptotic properties of the predictive mean matching estimator of the population mean. }For variance estimation, the conventional bootstrap inference for matching estimators with fixed matches has been shown to be invalid due to the nonsmoothness nature of the matching estimator. \textcolor{black}{We propose asymptotically valid replication variance estimation. The key }strategy is to construct replicates of the estimator directly based on linear terms, instead of individual records of variables. Extension to nearest neighbor imputation is also discussed. A simulation study confirms that the new procedure provides valid variance estimation. \end{equation}d{abstract} {\em Key Words:} Bootstrap; Jackknife variance estimation; Martingale central limit theorem; Missing at random. \section*{1. Introduction} Predictive mean matching imputation \citep{rubin1986statistical,little1988missing} is popular for handling item nonresponse in survey sampling. Hot deck imputation within imputation cells is a special case, where the predictive mean function is constant within cells. On the other hand, predictive mean matching is a version of nearest neighbor imputation. In nearest neighbor imputation, the vector of the auxiliary variables $x$ is directly used in determining the nearest neighbor, while in predictive mean matching imputation, a scalar predictive mean function is used in determining the nearest neighbor. The nearest neighbor is then used as a donor for hot deck imputation. Although these imputation methods have a long history of application, there are relatively few papers on investigating their asymptotic properties.\textcolor{black}{{} \citet{kim2011variance} presented an application of nearest neighbor imputation for the US census long form data. \citet{vink2014predictive} and \citet{morris2014tuning} investigated using predictive mean matching as a tool for multiple imputation via simulation studies. \citet{chen2000nearest,chen2001jackknife} have developed a nice set of asymptotic theories for the nearest neighbor imputation estimator. In econometrics, \citet{abadie2006large,abadie2008failure,abadie2011bias,abadie2016matching} studied the matching estimator for causal effect estimation from observational studies. }Up to our best knowledge, there is no literature on theoretical investigation of \textcolor{black}{estimated predictive mean matching} for mean estimation in survey sampling, which motivates this article. Predictive mean matching is implemented in two steps. First, the predictive mean function is estimated. Second, for each nonrespondent, the nearest neighbor is identified among the respondents based on the predictive mean function, and then the observed outcome value of the nearest neighbor is used for imputation. Because the predictive mean function is estimated prior to matching, it is necessary to account for the uncertainty due to parameter estimation. Because of the non-smooth nature of matching, our derivation is based on the technique developed by \citet{andreou2012alternative}, which offers a general approach for deriving the limiting distribution of statistics that involve estimated nuisance parameters. \textcolor{black}{This technique has been successfully used in \citet{abadie2016matching} for the matching estimators of the average causal effects based on the estimated propensity score. We extend their results to the matching estimator in the survey sampling context. In addition, we establish robustness of the predictive mean matching estimator which is consistent if the mean function satisfies a certain Lipschitz continuity condition.} Lack of smoothness also makes the conventional replication methods invalid for variance estimation for the predictive mean matching estimator. \textcolor{black}{\citet{abadie2008failure} demonstrated the failure of the bootstrap for matching estimators with a fixed number of matches. We propose new replication variance estimation for the predictive mean matching estimator in survey sampling. Based on the martingale representation} of the predictive mean matching estimator, we construct replicates of the estimator directly based on its linear terms. In this way, the distribution of the number of times that each unit is used as a match can be preserved, which leads to a valid variance estimation. Furthermore, our replication variance method is flexible and can accommodate bootstrap, jackknife, among others. The rest of this paper is organized as follows. In Section 2, we introduce the basic set-up in the context of survey data and the predictive mean matching procedure. In Section 3, we establish and compare the asymptotic distributions of the predictive mean matching estimator when the predictive mean function is known or is estimated. In Section 4, we\textcolor{black}{{} propose the new replication variance estimators and establish their consistency. }In Section 5, we evaluate the finite sample performance of the proposed estimators via a simulation study. We end with a brief discussion in Section 6. All proofs are deferred to the Appendix. \section*{2. Basic Set-up\label{sec:Basic-Setup}} Let $\mathcal{F}_{N}=\{(x_{i},y_{i},\delta_{i}):i=1,\ldots,N\}$ denote a finite population, where $x_{i}$ is always observed, $y_{i}$ has missing values, and $\delta_{i}$ is the response indicator of $y_{i}$, i.e., $\delta_{i}=1$ if $y_{i}$ is observed and $0$ if it is missing. The $\delta_{i}$'s are defined throughout the finite population, as in Fay (1992), \citet{shao1999variance}, and \citet{kim2006replication}. We assume that $ {\mathcal{F}}_{N}$ is a random sample from a superpopulation model $\zeta$, and $N$ is known. Our objective is to estimate the finite population mean $\mu=N^{-1}\sum_{i=1}^{N}y_{i}$. Let $A$ denote an index set of the sample selected by a probability sampling design. Let $I_{i}$ be the sampling indicator, i.e., $I_{i}=1$ if unit $i$ is selected into the sample, and $I_{i}=0$ otherwise. Suppose that $\pi_{i}$, the probability of selection of $i$, is positive and known throughout the sample. We make the following assumption for the missing data process. \begin{equation}gin{assumption}[Missing at random and positivity]\label{asmp:MAR}The missing data process satisfies $\mbox{{\rm pr}}(\delta=1\mid x,y)=\mbox{{\rm pr}}(\delta=1\mid x)$, which is denoted by $p(x)$, and with probability $1$, $p(x)>\epsilon$ for a constant $\epsilon>0$. \end{equation}d{assumption} In order to construct the imputed values, we assume that \begin{equation}gin{equation} E(y_{i}\mid x_{i})=m(x_{i};\begin{equation}ta^{*}),\label{eq:mean} \end{equation}d{equation} holds for every unit in the population, where $m(\cdot)$ is a function of $x$ known up to $\begin{equation}ta^{*}$. Under Assumption \ref{asmp:MAR}, let the normalized estimating equation for \textbf{$\begin{equation}ta$} be \begin{equation}gin{equation} S_{N}(\begin{equation}ta)=\frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\delta_{i}g(x_{i};\begin{equation}ta)\{y_{i}-m(x_{i};\begin{equation}ta)\}=0,\label{eq:pesdo score} \end{equation}d{equation} where $g(x;\begin{equation}ta)$ is any function with which the solution to (\ref{eq:pesdo score}) exists uniquely. To simply the presentation, let $g(x;\begin{equation}ta)$ be $\dot{m}(x;\begin{equation}ta)=\partial m(x;\begin{equation}ta)/\partial\begin{equation}ta$. General functions $g(x;\begin{equation}ta)$ can be considered at the expense of heavier notation. Under certain regularity conditions (e.g. \citealp{fuller2009sampling},\textcolor{black}{{} Ch. }2), the solution $\hat{\begin{equation}ta}$ converges to $\begin{equation}ta^{*}$ in probability. Here, the probability distribution is the joint distribution of the sampling distribution and the superpopulation model (\ref{eq:mean}). The sampling weight $\pi_{i}^{-1}$ is used to obtain a consistent estimator of $\begin{equation}ta^{*}$ even under informative sampling \citep{berg2015}. Under the model (\ref{eq:mean}), the \textcolor{black}{predictive mean matching} method can be described as follows: \begin{equation}gin{description} \item [{Step$\ $1.}] Obtain a consistent estimator of $\begin{equation}ta$, denoted by $\hat{\begin{equation}ta}$, by solving (\ref{eq:pesdo score}). For each unit $i$ with $\delta_{i}=0$, obtain a predicted value of $y_{i}$ as $\hat{m}_{i}=m(x_{i};\hat{\begin{equation}ta})$. Find the nearest neighbor of unit $i$ from the respondents with the minimum distance between $\hat{m}_{j}$ and $\hat{m}_{i}$. Let $i(1)$ be the index of the nearest neighbor of unit $i$, which satisfies $d(\hat{m}_{i(1)},\hat{m}_{i})\le d(\hat{m}_{j},\hat{m}_{i}),$ for any $j\in A_{R}=\{i\in A:\delta_{i}=1\}$, where $d(\cdot,\cdot)$ denotes a generic distance function, e.g., $d(m_{i},m_{j})=|m_{i}-m_{j}|$ for scalar $m_{i}$ and $m_{j}$. \item [{Step$\ $2.}] The imputation estimator based on \textcolor{black}{predictive mean matching} is computed by \begin{equation}gin{equation} \hat{\mu}_{ {\mathrm{PMM}}}=\frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\left\{ \delta_{i}y_{i}+(1-\delta_{i})y_{i(1)}\right\} .\label{eq:pmm} \end{equation}d{equation} \end{equation}d{description} In (\ref{eq:pmm}), the imputed values are real observations. The imputation model is used only for identifying the nearest neighbor, but not for creating the imputed values. Variance estimation of $\hat{\mu}_{ {\mathrm{PMM}}}$ is challenging because of the nonsmoothness of the matching mechanism in Step 1. In the next section, we formally discuss the asymptotic properties of the predictive mean matching estimator. \section*{3. Main Result\label{sec:Main-Result}} \subsection*{3.1 Predictive mean matching} We introduce additional notation. Let $A=A_{R}\cup A_{M}$, where $A_{R}$ and $A_{M}$ are the sets of respondents and nonrespondents, respectively. Define $d_{ij}=1$ if $y_{j(1)}=y_{i}$, i.e., unit $i$ is used as a donor for unit $j\in A_{M}$, and $d_{ij}=0$ otherwise. We write $\hat{\mu}_{ {\mathrm{PMM}}}=\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$, where \begin{equation}gin{eqnarray} \hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta) & = & \frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\{\delta_{i}y_{i}+(1-\delta_{i})y_{i(1)}\}\noindentnumber \\ & = & \frac{1}{N}\left(\sum_{i\in A}\frac{1}{\pi_{i}}\delta_{i}y_{i}+\sum_{j\in A}\frac{1-\delta_{j}}{\pi_{j}}\sum_{i\in A}\delta_{i}d_{ij}y_{i}\right)\noindentnumber \\ & = & \frac{1}{N}\sum_{i\in A}\frac{\delta_{i}}{\pi_{i}}(1+k_{\begin{equation}ta,i})y_{i},\label{eq:expression} \end{equation}d{eqnarray} with \begin{equation}gin{equation} k_{\begin{equation}ta,i}=\sum_{j\in A}\frac{\pi_{i}}{\pi_{j}}(1-\delta_{j})d_{ij}.\label{eq:ki} \end{equation}d{equation} Under simple random sampling, $k_{\begin{equation}ta,i}=\sum_{j\in A}(1-\delta_{j})d_{ij}$ is the number of times that unit $i$ is used as the nearest neighbor for nonrespondents, where determination of the nearest neighbor is based on the predictive mean function $m(x_{i};\begin{equation}ta)$. We first consider the case when $\begin{equation}ta^{*}$, and hence $m(x_{i})=m(x_{i};\begin{equation}ta^{*})$, is known. Suppose that the superpopulation model satisfies the following assumption. \begin{equation}gin{assumption}\label{asmp:m} (i) The matching variable $m(x)$ has a compact and convex support, with its density bounded and bounded away from zero. Denote $m_{i}=m(x_{i})$. Let $g_{1}(m_{i})$ and $g_{0}(m_{i})$ be the conditional density of $m_{i}$ given $\delta_{i}=1$ and $\delta_{i}=0$, respectively. Suppose that there exist constants $C_{1L}$ and $C_{1U}$ such that $C_{1L}\leq g_{1}(m_{i})/g_{0}(m_{i})\leq C_{1U}$; (ii) there exists $\delta>0$ such that $E(|y|^{2+\delta}\mid x)$ is uniformly bounded for any $x$. \end{equation}d{assumption} Assumption \ref{asmp:m} (i) is a convenient regularity condition \citep{abadie2006large}. Assumption \ref{asmp:m} (ii) is a moment condition for establishing the central limit theorem. Denote $E_{p}(\cdot)$ and $ {\mathrm{var}}_{p}(\cdot)$ to be the expectation and the variance under the sampling design, respectively. We impose the following regularity conditions on the sampling design. \begin{equation}gin{assumption}\label{asmp:sampling} (i) There exist positive constants $C_{1}$ and $C_{2}$ such that $C_{1}\le\pi_{i}Nn^{-1}\le C_{2},$ for $i=1,\ldots,N$; (ii) $nN^{-1}=o(1)$; (iii) the sequence of the Hotvitz-Thompson estimators $\hat{\mu}_{ {\mathrm{HT}}}=N^{-1}\sum_{i\in A}\pi_{i}^{-1}y_{i}$ satisfies $ {\mathrm{var}}_{p}(\hat{\mu}_{ {\mathrm{HT}}})=O(n^{-1})$ and $\{ {\mathrm{var}}_{p}(\hat{\mu}_{ {\mathrm{HT}}})\}^{-1/2}(\hat{\mu}_{ {\mathrm{HT}}}-\mu)\mid\mathcal{F}_{N}\rightarrow {\mathcal{N}}(0,1)$ in distribution, as $n\rightarrow\infty$. \end{equation}d{assumption} Assumption \ref{asmp:sampling} is a widely accepted assumption in survey sampling (\citealp{fuller2009sampling}, \textcolor{black}{Ch. 1}). To study the asymptotic properties of the predictive mean matching estimator, we use the following decomposition: \begin{equation}gin{equation} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta)-\mu\}=D_{N}(\begin{equation}ta)+B_{N}(\begin{equation}ta),\label{eq:decomposition} \end{equation}d{equation} where \begin{equation}gin{equation} D_{N}(\begin{equation}ta)=\frac{n^{1/2}}{N}\left(\sum_{i\in A}\frac{1}{\pi_{i}}\left[m(x_{i};\begin{equation}ta)+\delta_{i}(1+k_{\begin{equation}ta,i})\{y_{i}-m(x_{i};\begin{equation}ta\}\right]-\mu\right),\label{eq:Dn} \end{equation}d{equation} and \begin{equation}gin{equation} B_{N}(\begin{equation}ta)=\frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}(1-\delta_{i})\{m(x_{i(1)};\begin{equation}ta)-m(x_{i};\begin{equation}ta)\}.\label{eq:Bn} \end{equation}d{equation} The difference $m(x_{i(1)};\begin{equation}ta^{*})-m(x_{i};\begin{equation}ta^{*})$ accounts for the matching discrepancy, and $B_{N}(\begin{equation}ta^{*})$ contributes to the asymptotic bias of the matching estimator. In general, if the matching variable $x$ is $p$-dimensional, \citet{abadie2006large} showed that $d(x_{i(1)},x_{i})=O_{p}(n^{-1/p})$. Therefore, for nearest neighbor imputation with $p\geq2$, the bias $B_{N}(\begin{equation}ta^{*})=O_{p}(n^{1/2-1/p})\neq o_{p}(1)$ is not negligible; whereas, for predictive mean matching, the matching variable is a scalar function $m(x)$, and hence $B_{N}(\begin{equation}ta^{*})=O_{p}(n^{-1/2})=o_{p}(1)$. We establish the asymptotic distribution of $\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})$. \begin{equation}gin{theorem}\label{Thm:1}Under Assumptions \ref{asmp:MAR}\textendash \ref{asmp:sampling}, suppose that $m(x)=E(y\mid x)=m(x;\begin{equation}ta^{*})$ and $\sigma^{2}(x)=\mathrm{var}(y\mid x)$. Then, $n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})-\mu\}\rightarrow {\mathcal{N}}(0,V_{1})$ in distribution, as $n\rightarrow\infty$, where \begin{equation}gin{equation} V_{1}=V^{m}+V^{e}\label{eq:V1} \end{equation}d{equation} with \begin{equation}gin{eqnarray*} V^{m} & = & \lim_{n\rightarrow\infty}nN^{-2}E[ {\mathrm{var}}_{p}\{\sum_{i\in A}\pi_{i}^{-1}m(x_{i})\}],\\ V^{e} & = & \lim_{n\rightarrow\infty}nN^{-2}E\{\sum_{i=1}^{N}\pi_{i}^{-1}(1-\pi_{i})\delta_{i}(1+k_{\begin{equation}ta^{*},i}){}^{2}\sigma^{2}(x_{i})\}, \end{equation}d{eqnarray*} and $k_{\begin{equation}ta,i}$ is defined in (\ref{eq:ki}). \end{equation}d{theorem} In practice, $\begin{equation}ta^{*}$ is unknown and therefore has to be estimated prior to matching. \textcolor{black}{Following \citet{abadie2016matching}, }the following theorem presents the approximate asymptotic distribution of $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$. \begin{equation}gin{theorem} \label{Thm:2} Under Assumptions \ref{asmp:MAR}\textendash \ref{asmp:sampling} and certain regularity conditions specified in the Appendix, $n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})-\mu\}\rightarrow {\mathcal{N}}(0,V_{2})$ in distribution, as $n\rightarrow\infty$, where $\hat{\begin{equation}ta}$ is the solution to the estimating equation (\ref{eq:pesdo score}) and \begin{equation}gin{equation} V_{2}=V_{1}-\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1}\gamma_{1}+\gamma_{2}^{\mathrm{\scriptscriptstyle T}}\left( {\tau}_{\begin{equation}ta^{*}}^{-1}V_{s} {\tau}_{\begin{equation}ta^{*}}^{-1}\right)\gamma_{2},\label{eq:sig2_adj} \end{equation}d{equation} $\gamma_{1}=\lim_{n\rightarrow\infty}nN^{-2}E\{\sum_{i=1}^{N}\pi_{i}^{-1}(1-\pi_{i})\delta_{i}(1+k_{\begin{equation}ta^{*},i})g(x_{i};\begin{equation}ta^{*})\sigma^{2}(x_{i})\},$ $\gamma_{2}=E\{\dot{m}(x;\begin{equation}ta^{*})\}$, $V_{1}$ is defined in (\ref{eq:V1}), $V_{s}= {\mathrm{var}}\{S_{N}(\begin{equation}ta^{*})\}$, $ {\tau}_{\begin{equation}ta}=E\{p(x)\dot{m}(x;\begin{equation}ta)$ $\dot{m}(x;\begin{equation}ta)^{\mathrm{\scriptscriptstyle T}}\}$, and $p(x)=\mbox{{\rm pr}}(\delta=1\mid x)$. \end{equation}d{theorem} The difference between $V_{2}$ and $V_{1}$, $-\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1}\gamma_{1}+\gamma_{2}^{\mathrm{\scriptscriptstyle T}}( {\tau}_{\begin{equation}ta^{*}}^{-1}V_{s} {\tau}_{\begin{equation}ta^{*}}^{-1})\gamma_{2}$, can be positive or negative. Thus, the estimation error in the predictive mean function should not be ignored. This is different from the result in \textcolor{black}{\citet{abadie2016matching} that matching on the estimated propensity score always improves the estimation efficiency when matching on the true propensity score. To explain the difference, we note that the propensity score is auxiliary for estimating the population mean of outcome; whereas the predictive mean function is not.} \subsection*{3.2 Nearest neighbor imputation } Nearest neighbor imputation can be described in the following steps: \begin{equation}gin{description} \item [{Step$\ $1.}] For each unit $i$ with $\delta_{i}=0$, find the nearest neighbor from the respondents with the minimum distance between $x_{j}$ and $x_{i}$. Let $i(1)$ be the index set of its nearest neighbor, which satisfies $d(x_{i(1)},x_{i})\le d(x_{j},x_{i}),$ for $j\in A_{R}$. \item [{Step$\ $2.}] The nearest neighbor imputation estimator of $\mu$ is computed by \begin{equation}gin{equation} \hat{\mu}_{ {\mathrm{NNI}}}=\frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\left\{ \delta_{i}y_{i}+(1-\delta_{i})y_{i(1)}\right\} =\frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\delta_{i}(1+k_{i})y_{i},\label{eq:nni} \end{equation}d{equation} where $k_{i}$ is defined similarly as in (\ref{eq:ki}), but with the matching variable $x$. \end{equation}d{description} Following (\ref{eq:decomposition}), write $n^{1/2}(\hat{\mu}_{ {\mathrm{NNI}}}-\mu)=D_{N}+B_{N},$ where \[ D_{N}=n^{1/2}\left(\frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\left[m(x_{i})+\delta_{i}(1+k_{i})\{y_{i}-m(x_{i})\}\right]-\mu\right), \] and \begin{equation}gin{equation} B_{N}=\frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}(1-\delta_{i})\{m(x_{i(1)})-m(x_{i})\}.\label{eq:bias} \end{equation}d{equation} Because the matching is based on a $p$-vector matching variable, the bias term $B_{N}=O_{p}(n^{1/2-1/p})$ with $p\geq2$ is not negligible. For bias correction, let $\hat{m}(x)$ be a consistent estimator of $m(x)=E(y\mid x)$. Then, we can estimate $B_{N}$ by $\hat{B}_{N}=n^{-1/2}N\sum_{i\in A}\pi_{i}^{-1}(1-\delta_{i})\{\hat{m}(x_{i(1)})-\hat{m}(x_{i})\}.$ A bias-corrected nearest neighbor imputation estimator of $\mu$ is \begin{equation}gin{equation} \tilde{\mu}_{ {\mathrm{NNI}}}=\frac{1}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\{\delta_{i}y_{i}+(1-\delta_{i})y_{i}^{*}\},\label{eq:NNI} \end{equation}d{equation} where $y_{i}^{*}=\hat{m}(x_{i})+y_{i(1)}-\hat{m}(x_{i(1)})$. Under certain regularity conditions imposed on the nonparametric estimator $\hat{m}(x)$, $\hat{B}_{N}$ is consistent for $B_{N}$, i.e., $\hat{B}_{N}-B_{N}=o_{p}(1).$ \textcolor{black}{Then, the bias-corrected nearest neighbor imputation estimator has the same limiting distribution as the predictive mean matching estimator with known $\begin{equation}ta^{*}$ has. } \subsection*{3.3 Robustness against the predictive mean function specification} To discuss the robustness of the predictive mean matching estimator against the predictive mean function specification, let $m(x;\begin{equation}ta)$ be a working model for $E(y\mid x)$, $\hat{\begin{equation}ta}$ be the estimator of $\begin{equation}ta$ solving (\ref{eq:pesdo score}), and $\begin{equation}ta^{*}$ be its probability limit. We also use $m=m(x;\begin{equation}ta^{*})$ for shorthand. We require the following assumption hold for the working model. \begin{equation}gin{assumption} \label{assumption-working model} \textcolor{black}{$E(y\mid m)$ is is Lipschitz continuous in $m$; i.e., there exists a constant $C_{3}$ such that $|E(y\mid m_{i})-E(y\mid m_{j})|\leq C_{3}|m_{i}-m_{j}|$, for any $i,j$. } \end{equation}d{assumption} Assumption \ref{assumption-working model} is trivial when $m(x;\begin{equation}ta)$ is correctly specified for $E(y\mid x)$, because in this case $E(y\mid m)=m$. \begin{equation}gin{theorem} Under Assumptions \ref{asmp:MAR}\textendash \ref{assumption-working model}, the predictive mean matching estimator based on the working model $m(x;\begin{equation}ta^{*})$ is consistent for $\mu$. \end{equation}d{theorem} The result can be obtained directly from the decomposition (\ref{eq:decomposition}) by replacing $m(x;\begin{equation}ta)$ in $D_{N}(\begin{equation}ta)$ and $B_{N}(\begin{equation}ta)$ with $E\{y\mid m(x;\begin{equation}ta)\}$. The new term $D_{N}(\begin{equation}ta^{*})$ is still consistent for zero; by Assumption \ref{assumption-working model}, the new bias term becomes \begin{equation}gin{eqnarray*} |B_{N}(\begin{equation}ta^{*})| & = & |\frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}(1-\delta_{i})\left[E\{y\mid m(x_{i(1)};\begin{equation}ta^{*})\}-E\{y\mid m(x_{i(1)};\begin{equation}ta^{*})\}\right]|\\ & \leq & \frac{n^{1/2}}{N}C_{3}\sum_{i\in A}\frac{1}{\pi_{i}}(1-\delta_{i})|m(x_{i(1)};\begin{equation}ta^{*})-m(x_{i};\begin{equation}ta^{*})|=O_{p}(n^{-1/2}). \end{equation}d{eqnarray*} \section*{4. Replication Variance Estimation\label{sec:Replication-variance-estimation} } We consider replication variance estimation \citep{rust1996variance,wolter2007introduction} for the predictive mean matching estimator. Let $\hat{\mu}$ be the Horvitz-Thompson estimator of $\mu.$ The replication variance estimator of $\hat{\mu}$ takes the form of \begin{equation}gin{equation} \hat{V}_{ {\mathrm{rep}}}(\hat{\mu})=\sum_{k=1}^{L}c_{k}(\hat{\mu}^{(k)}-\hat{\mu})^{2},\label{eq:replication variance} \end{equation}d{equation} where $L$ is the number of replicates, $c_{k}$ is the $k$th replication factor, and $\hat{\mu}^{(k)}$ is the $k$th replicate of $\hat{\mu}$. When $\hat{\mu}=\sum_{i\in A}\omega_{i}y_{i}$, we can write the replicate of $\hat{\mu}$ as $\hat{\mu}^{(k)}=\sum_{i\in A}\omega_{i}^{(k)}y_{i}$ with some $\omega_{i}^{(k)}$ for $i\in A$. The replications are constructed such that $E\{\hat{V}_{ {\mathrm{rep}}}(\hat{\mu})\}= {\mathrm{var}}(\hat{\mu})\{1+o(1)\}$.\textcolor{black}{{} For example, in delete-1 jackknife under }probability proportional to size sampling with $\omega_{i}=N^{-1}\pi_{i}^{-1}$, we have $L=n$, $c_{k}=(n-1)/n$, and $\omega_{i}^{(k)}=n\omega_{i}/(n-1)$ if $i\neq k$, and $\omega_{k}^{(k)}=0$. We propose a new replication variance estimation for the predictive mean matching estimator. We first consider $\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})$ with a known $\begin{equation}ta^{*}$ given in (\ref{eq:expression}). For simplicity, we suppress the dependence of quantities on $\begin{equation}ta^{*}$. Write $\hat{\mu}_{ {\mathrm{PMM}}}-\mu=(\hat{\mu}_{ {\mathrm{PMM}}}-\hat{\psi}_{ {\mathrm{HT}}})+(\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi})+(\mu_{\psi}-\mu),$ where $\hat{\psi}_{ {\mathrm{HT}}}=\sum_{i\in A}\omega_{i}\psi_{i}$, $\psi_{i}=m(x_{i})+\delta_{i}(1+k_{i})\{y_{i}-m(x_{i})\}$, $\mu_{\psi}=N^{-1}\sum_{i=1}^{N}\psi_{i}$. By Theorem \ref{Thm:1}, $\mu_{ {\mathrm{PMM}}}-\hat{\psi}_{ {\mathrm{HT}}}=o_{p}(n^{-1/2})$. Together with the fact that $\mu_{\psi}-\mu=O_{p}(N^{-1/2})$ and $nN^{-1}=o(1)$, $\hat{\mu}_{ {\mathrm{PMM}}}-\mu=\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi}+o_{p}(n^{-1/2})$. Therefore, with negligible sampling fractions, it is sufficient to estimate the variance of $\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi}$. Because $E_{p}(\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi})=0$, we have $ {\mathrm{var}}(\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi})=E\{ {\mathrm{var}}_{p}(\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi})\},$ which is essentially the sampling variance of $\hat{\psi}_{ {\mathrm{HT}}}$. This suggests that we can treat $\{\psi_{i}:i\in A\}$ as pseudo observations in applying the replication variance estimator. \citet{otsu2015bootstrap} used a similar idea to develop a wild bootstrap technique for a matching estimator. To be specific, we construct replicates of $\hat{\psi}_{ {\mathrm{HT}}}$ as follows: $\hat{\psi}_{ {\mathrm{HT}}}^{(k)}=\sum_{i\in A}\omega_{i}^{(k)}\psi_{i},$ where $\omega_{i}^{(k)}$ is the replication weight that account for complex sampling design. The replication variance estimator of $\hat{\psi}_{ {\mathrm{HT}}}$ is obtained by applying $\hat{V}_{ {\mathrm{rep}}}(\cdot)$ in (\ref{eq:replication variance}) for the above replicates $\hat{\psi}_{ {\mathrm{HT}}}^{(k)}$. It follows that $E\{\hat{V}_{ {\mathrm{rep}}}(\hat{\psi}_{ {\mathrm{HT}}})\}= {\mathrm{var}}(\hat{\psi}_{ {\mathrm{HT}}}-\mu_{\psi})\{1+o(1)\}= {\mathrm{var}}(\hat{\mu}_{ {\mathrm{PMM}}}-\mu)\{1+o(1)\}$. We now consider $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$, which can be expressed as $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})=\sum_{i\in A}\omega_{i}[m(x_{i};\hat{\begin{equation}ta})+\delta_{i}(1+k_{\hat{\begin{equation}ta},i})\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}]+o_{p}(n^{-1/2}).$ To compute the replicates of $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$, we propose two steps: \begin{equation}gin{description} \item [{Step$\ $1.}] Obtain the $k$th replicate of $\hat{\begin{equation}ta}$, denoted as $\hat{\begin{equation}ta}^{(k)}$, by solving $S_{N}^{(k)}(\begin{equation}ta)=\sum_{i\in A}\omega_{i}^{(k)}\delta_{i}$ $\times g(x_{i};\begin{equation}ta)\{y_{i}-m(x_{i};\begin{equation}ta)\}=0$. \item [{Step$\ $2.}] Obtain the $k$th replicate as \begin{equation}gin{equation} \hat{\mu}_{ {\mathrm{PMM}}}^{(k)}(\hat{\begin{equation}ta}^{(k)})=\sum_{i\in A}\omega_{i}^{(k)}[m(x_{i};\hat{\begin{equation}ta}^{(k)})+\delta_{i}(1+k_{\hat{\begin{equation}ta}^{(k)},i})\{y_{i}-m(x_{i};\hat{\begin{equation}ta}^{(k)})\}].\label{eq:k-th rep} \end{equation}d{equation} \end{equation}d{description} If $\begin{equation}ta^{*}$ is known, we do not need to reflect the effect of estimating $\begin{equation}ta^{*}$, and the above procedure with two steps reduces to the one we proposed for the case when $\begin{equation}ta^{*}$ is known. On the other hand, when $\begin{equation}ta^{*}$ is estimated, Step 1 is necessary, because as shown in Theorem \ref{Thm:2}, the predictive mean matching estimators by matching on the true and estimated predictive mean function may have different asymptotic distributions. The consistency of the replication variance estimator is presented in the following theorem. \begin{equation}gin{theorem} \label{Thm: ve}Under the assumptions in Theorem \ref{Thm:2}, suppose that $\hat{V}_{ {\mathrm{rep}}}(\hat{\mu})$ in (\ref{eq:replication variance}) is consistent for $ {\mathrm{var}}_{p}(\hat{\mu})$. Then, if $nN^{-1}=o(1)$, the replication variance estimators for $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$ is consistent, i.e., $n\hat{V}_{ {\mathrm{rep}}}\{\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})\}/V_{2}\rightarrow1$ in probability, as $n\rightarrow\infty$, where the replicates of $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$ are given in (\ref{eq:k-th rep}), and $V_{2}$ is given in (\ref{eq:sig2_adj}). \end{equation}d{theorem} \section*{5. A Simulation Study\label{sec:A-Simulation-Study}} In this simulation study, we investigate the performance of the proposed replication variance estimator. For generating finite populations of size $N=50,000$: first, let $x_{1i}$, $x_{2i}$ and $x_{3i}$ be generated independently from Uniform$[0,1]$, and $x_{4i}$, $x_{5i}$, $x_{6i}$ and $e_{i}$ be generated independently from $ {\mathcal{N}}(0,1)$; then, let $y_{i}$ be generated as (P1) $y_{i}=-1+x_{1i}+x_{2i}+e_{i}$, (P2) $y_{i}=-1.167+x_{1i}+x_{2i}+(x_{1i}-0.5)^{2}+(x_{2i}-0.5)^{2}+e_{i}$, and (P3) $y_{i}=-1.5+x_{1i}+\cdots+x_{6i}+e_{i}$. The covariates are fully observed, but $y_{i}$ is not. The response indicator of $y_{i}$, $\delta_{i}$, is generated from Bernoulli$(p_{i})$ with logit\{$p(x_{i})\}=0.2+x_{1i}+x_{2i}$. This results in the average response rate about $75\%$. The parameter of interest is $\mu=N^{-1}\sum_{i=1}^{N}y_{i}$. To generate samples, we consider two sampling designs: (S1) simple random sampling with $n=400$; (S2) probability proportional to size sampling. In (S2), for each unit in the population, we generate a size variable $s_{i}$ as $\log(|y_{i}+\nu_{i}|+4)$, where $\nu_{i}\sim {\mathcal{N}}(0,1)$. The selection probability is specified as $\pi_{i}=400s_{i}/\sum_{i=1}^{N}s_{i}$. Therefore, (S2) is informative, where units with larger $y_{i}$ values have larger probabilities to be selected into the sample. For estimation, we consider predictive mean matching imputation, nearest neighbor imputation, and stochastic regression imputation. In stochastic regression imputation, for units with $\delta_{i}=0$, the imputation of $y_{i}$ is obtained as $y_{i}^{*}=\hat{y}_{i}+\hat{e}_{i}^{*}$, where $\hat{y}_{i}=m(x_{i};\hat{\begin{equation}ta})$ and $\hat{e}_{i}^{*}$ is randomly selected from the observed residuals $\{\hat{e}_{i}=y_{i}-\hat{y}_{i}:\delta_{i}=1\}$. For (P1) and (P2), we specify the predictive mean function to be $m(x;\begin{equation}ta)=\begin{equation}ta_{0}+\begin{equation}ta_{1}x_{1}+\begin{equation}ta_{2}x_{2}$. Note that for (P1), $m(x;\begin{equation}ta)$ is correctly specified; whereas for (P2), $m(x;\begin{equation}ta)$ is misspecified. For (P3), we specify the mean function to be $m(x;\begin{equation}ta)=\begin{equation}ta_{0}+\begin{equation}ta^{\mathrm{\scriptscriptstyle T}}x$, where $x=(x_{1},\cdots,x_{6})$. We construct $95\%$ confidence intervals using $(\hat{\mu}_{I}-z_{0.975}\hat{V}_{I}^{1/2},\hat{\mu}_{I}+z_{0.975}\hat{V}_{I}^{1/2})$, where $\hat{\mu}_{I}$ is the point estimate and $\hat{V}_{I}$ is the variance estimate obtained by the proposed jackknife variance estimation. For stochastic regression imputation, the $k$th replicate of $\mu$ is given by $\hat{\mu}_{ {\mathrm{REG}}}^{(k)}(\hat{\begin{equation}ta}^{(k)})=\sum_{i\in A}\omega_{i}^{(k)}[m(x_{i};\hat{\begin{equation}ta}^{(k)})+\delta_{i}(1+k_{i})\{y_{i}-m(x_{i};\hat{\begin{equation}ta}^{(k)})\}],$ where $\hat{\begin{equation}ta}^{(k)}$ is obtained from the estimating equation of $\begin{equation}ta$ based on the replication weights, and $k_{i}$ is the number of times that $\hat{e}_{i}$ is selected to impute the missing values of $y$ based on the original data. Table \ref{tab:Sim1} presents the simulation results based on $2,000$ Monte Carlo samples. When the covariate is $2$-dimensional, all three imputation estimators have small biases, even when the mean function is misspecified. In addition, the proposed jackknife method provides valid coverage of confidence intervals for the predictive mean matching and stochastic regression imputation estimators in all scenarios. This suggests that the proposed replication method can be used widely even for stochastic regression imputation. When the covariate is $6$-dimensional, nearest neighbor imputation presents large biases and low coverage rates. \begin{equation}gin{table} \begin{equation}gin{centering} {\scriptsize{}\caption{{\scriptsize{}\label{tab:Sim1}}Simulation results: Bias ($\times10^{2}$) and S.E. ($\times10^{2}$) of the point estimator, Relative Bias of jackknife variance estimates ($\times10^{2}$) and Coverage Rate ($\%$) of $95\%$ confidence intervals.} } \par\end{equation}d{centering}{\scriptsize \par} \centering{} \begin{equation}gin{centering} \begin{equation}gin{tabular}{ccccccccccccc} \hline & \multicolumn{2}{c}{PMM} & \multicolumn{2}{c}{NNI} & \multicolumn{2}{c}{SRI} & \multicolumn{2}{c}{PMM} & \multicolumn{2}{c}{NNI} & \multicolumn{2}{c}{SRI}\tabularnewline & Bias & S.E. & Bias & S.E. & Bias & S.E. & RB & CR & RB & CR & RB & CR \tabularnewline \hline \multicolumn{13}{c}{Simple Random Sampling }\tabularnewline (P1) & -0.15 & 6.46 & -0.21 & 6.54 & -0.23 & 6.44 & 4 & 95.2 & 3 & 95.1 & 5 & 95.8\tabularnewline (P2) & -0.22 & 6.54 & -0.25 & 6.55 & -0.37 & 6.46 & 6 & 95.5 & 3 & 95.3 & 5 & 95.6\tabularnewline (P3) & 1.90 & 11.85 & 18.59 & 11.06 & 0.11 & 11.17 & 5 & 95.1 & 4 & 63.8 & 4 & 95.5\tabularnewline \multicolumn{13}{c}{Probability Proportional to Size Sampling}\tabularnewline (P1) & 0.05 & 6.46 & 0.13 & 6.37 & 0.18 & 6.53 & 3 & 95.3 & 3 & 94.8 & 2 & 94.9\tabularnewline (P2) & 0.30 & 6.52 & 0.12 & 6.47 & 0.16 & 6.60 & 2 & 95.3 & 0 & 95.3 & 3 & 94.9\tabularnewline (P3) & 1.33 & 10.99 & 17.53 & 10.70 & 0.40 & 11.10 & 6 & 95.6 & 3 & 65.5 & -3 & 95.6\tabularnewline \hline \end{equation}d{tabular} \par\end{equation}d{centering} \textsc{\textcolor{black}{\scriptsize{}PMM: predictive mean matching; NNI: nearest neighbor imputation; SRI: stochastic regression imputation.}}{\scriptsize{} }{\scriptsize \par} \end{equation}d{table} \section*{6. Discussion\label{sec:Discussion}} Propensity score matching has been recently proposed for inferring causal effects of treatments in the context of survey data; however, their asymptotic properties are underdeveloped \citep{lenis2017s}. Because causal inference is inherently a missing data problem (e.g., \citealp{ding2017causal}), the proposed methodology here can be easily generalized to investigate the asymptotic properties of propensity score matching estimators with survey weights. Instead of choosing the nearest neighbor as a donor for missing items, we can consider fractional imputation \citep{kim2004fractional,yang2016fi} using $K$ $(K>1)$ nearest neighbors. Such extension remains an interesting topic for future research. \section*{Appendix} \global\long\defA\arabic{equation}{A\arabic{equation}} \setcounter{equation}{0} \global\long\defA\arabic{section}{A\arabic{section}} \setcounter{equation}{0} \global\long\defA\arabic{table}{A\arabic{table}} \setcounter{equation}{0} \global\long\defA\arabic{example}{A\arabic{example}} \setcounter{equation}{0} \global\long\defA\arabic{theorem}{A\arabic{theorem}} \setcounter{equation}{0} \global\long\defA\arabic{condition}{A\arabic{condition}} \setcounter{equation}{0} \global\long\defA\arabic{remark}{A\arabic{remark}} \setcounter{equation}{0} \global\long\defA\arabic{step}{A\arabic{step}} \setcounter{equation}{0} \global\long\defA\arabic{assumption}{A\arabic{assumption}} \setcounter{equation}{0} \global\long\defA\arabic{proof}{A\arabic{proof}} \setcounter{equation}{0} \section*{A1 Proof for Theorem 1} Based on the decomposition in (6), write \begin{equation}gin{equation} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})-\mu\}=D_{N}(\begin{equation}ta^{*})+B_{N}(\begin{equation}ta^{*}),\label{eq:A1} \end{equation}d{equation} where $D_{N}(\begin{equation}ta)$ and $B_{N}(\begin{equation}ta)$ are defined in (7) and (8), respectively. \textcolor{black}{For simplicity, we introduce the following notation: $m_{i}=m(x_{i};\begin{equation}ta^{*})$ and $e_{i}=y_{i}-m_{i}$.} Under Assumption 2, for the predictive mean matching estimator, $m_{i(1)}-m_{i}=O_{p}(1)$. Together with Assumption 3, we derive the order of $B_{N}(\begin{equation}ta^{*})$ as \[ B_{N}(\begin{equation}ta^{*})=\frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}(1-\delta_{i})(m_{i(1)}-m_{i})=O_{p}(n^{-1/2})=o_{p}(1). \] Therefore, (\ref{eq:A1}) reduces to \[ n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})-\mu\}=D_{N}(\begin{equation}ta^{*})+o_{p}(1). \] Then, to study the asymptotic properties of $n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})-\mu\}$, we only need to study the asymptotic properties of $D_{N}(\begin{equation}ta^{*})$. We express \begin{equation}gin{eqnarray*} D_{N}(\begin{equation}ta^{*}) & = & \frac{n^{1/2}}{N}\left[\sum_{i\in A}\frac{1}{\pi_{i}}\left\{ m_{i}+\delta_{i}(1+k_{\begin{equation}ta^{*},i})e_{i}\right\} -\mu\right] \end{equation}d{eqnarray*} \begin{equation}gin{multline} =\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)m_{i}+\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)\delta_{i}(1+k_{\begin{equation}ta^{*},i})e_{i}\\ +\frac{n^{1/2}}{N}\sum_{i=1}^{N}(m_{i}-\mu)+\frac{n^{1/2}}{N}\sum_{i=1}^{N}\delta_{i}(1+k_{\begin{equation}ta^{*},i})e_{i}\\ =\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)m_{i}+\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)\delta_{i}(1+k_{\begin{equation}ta^{*},i})e_{i}+o_{p}(1),\label{eq:A2} \end{equation}d{multline} given $nN^{-1}=o(1)$. We can verify that the covariance of the two terms in (\ref{eq:A2}) is zero. Thus, the asymptotic variance of $D_{N}(\begin{equation}ta^{*})$ is \[ {\mathrm{var}}\left\{ \frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)m_{i}\right\} + {\mathrm{var}}\left\{ \frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)\delta_{i}(1+k_{\begin{equation}ta^{*},i})e_{i}\right\} . \] The first term, as $n\rightarrow\infty$, becomes \[ V^{m}=\lim_{n\rightarrow\infty}\frac{n}{N^{2}}E\left\{ {\mathrm{var}}_{p}\left(\sum_{i\in A}\frac{m_{i}}{\pi_{i}}\right)\right\} , \] and the second term, as $n\rightarrow\infty$, becomes \[ V^{e}= {\mathrm{plim}}\frac{n}{N^{2}}\sum_{i=1}^{N}\frac{1-\pi_{i}}{\pi_{i}}\delta_{i}(1+k_{\begin{equation}ta^{*},i})^{2} {\mathrm{var}}(e_{i}\mid x_{i}). \] The remaining is to show that $V^{e}=O(1)$. To do this, the key is to show that the moments of $k_{\begin{equation}ta^{*},i}$ are bounded. Under Assumption 3, it is easy to verify that \begin{equation}gin{equation} \underbar{\end{equation}suremath{\omega}}\tilde{k}_{\begin{equation}ta^{*},i}\leq k_{\begin{equation}ta^{*},i}\leq\begin{array}r{\omega}\tilde{k}_{\begin{equation}ta^{*},i},\label{eq:A3} \end{equation}d{equation} for some constants $\underbar{\end{equation}suremath{\omega}}$ and $\begin{array}r{\omega}$, where $\tilde{k}_{\begin{equation}ta^{*},i}=\sum_{j=1}^{n}(1-\delta_{j})d_{ij}$ is the number of unit $i$ used as a match for the nonrespondents. Under Assumption 2, $\tilde{k}_{\begin{equation}ta^{*},i}=O_{p}(1)$ and $E(\tilde{k}_{\begin{equation}ta^{*},i})$ and $E(\tilde{k}_{\begin{equation}ta^{*},i}^{2})$ are uniformly bounded over $n$ (\citealp{abadie2006large}, Lemma 3); therefore, together with (\ref{eq:A3}), we have $k_{\begin{equation}ta^{*},i}=O_{p}(1)$ and $E(k_{\begin{equation}ta^{*},i})$ and $E(k_{\begin{equation}ta^{*},i}^{2})$ are uniformly bounded over $n$. Therefore, a simple algebra yields $V^{e}=O(1)$. Combining all results, the asymptotic variance of $n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*})-\mu\}$ is $V^{m}+V^{e}$. By the central limit theorem, the result in Theorem 1 follows. \section*{A2 Le Cam's third Lemma} Consider two sequences of probability measures $(Q^{(N)})_{N=1}^{\infty}$ and $(P^{(N)})_{N=1}^{\infty}$. Assume that under $P^{(N)}$, a statistic $T_{N}$ and the likelihood ratios $dQ^{(N)}/dP^{(N)}$ satisfy \[ \left(\begin{equation}gin{array}{c} T_{N}\\ \log(dQ^{(N)}/dP^{(N)}) \end{equation}d{array}\right)\rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} 0\\ -\sigma^{2}/2 \end{equation}d{array}\right),\left(\begin{equation}gin{array}{cc} \tau^{2} & c\\ c & \sigma^{2} \end{equation}d{array}\right)\right\} \] in distribution, as $N\rightarrow\infty$. Then, under $Q^{(N)}$, \[ T_{N}\rightarrow {\mathcal{N}}(c,\tau^{2}) \] in distribution, as $N\rightarrow\infty$. See \citet{le1990asymptotics}, \citet{bickel1993efficient} and \citet{van1998asymptotic} for textbook discussions. \section*{A3 Proof for Theorem 2\label{sec:Proof-for-Theorem2} } Let\textcolor{blue}{{} }\textcolor{black}{$P$ be the distribution of $(x_{i},y_{i},\delta_{i},I_{i})$, for $i=1,\ldots,N$, }induced by the marginal distribution of $x_{i}$, the conditional distribution of $y_{i}$ given $x_{i}$, the conditional distribution of $\delta_{i}$ given $(x_{i},y_{i})$, and the conditional distribution of $I_{i}$ given $(x_{i},y_{i},\delta_{i})$. Consider $P$ to be restricted by the moment condition through the predictive mean function (1) with the true parameter value $\begin{equation}ta^{*}$. We can treat the consistent estimator $\hat{\begin{equation}ta}$ as the solution to the normalized estimating equation \begin{equation}gin{equation} S_{N}(\begin{equation}ta)=\frac{n^{1/2}}{N}\sum_{i=1}^{N}\frac{I_{i}}{\pi_{i}}\delta_{i}g(x_{i};\begin{equation}ta)\{y_{i}-m(x_{i};\begin{equation}ta)\}=0.\label{eq:pesdo score-1} \end{equation}d{equation} To discuss the asymptotic properties of $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$, we rely on Le Cam's third lemma\textcolor{blue}{{} }and consider an auxiliary parametric model $P^{\begin{equation}ta}$ defined locally around $\begin{equation}ta^{*}$ with a density \begin{equation}gin{equation} \frac{\exp\left\{ n^{1/2}(\begin{equation}ta-\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}(\begin{equation}ta^{*})-2^{-1}n(\begin{equation}ta-\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}(\begin{equation}ta-\begin{equation}ta^{*})\right\} }{E\left[\exp\left\{ n^{1/2}(\begin{equation}ta-\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}(\begin{equation}ta^{*})-2^{-1}n(\begin{equation}ta-\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}(\begin{equation}ta-\begin{equation}ta^{*})\right\} \right]}.\label{eq:par} \end{equation}d{equation} Because under $P^{\begin{equation}ta^{*}}$, $S_{N}(\begin{equation}ta^{*})\rightarrow {\mathcal{N}}(0,V_{s})$ in distribution, the normalizing constant in the denominator converges to $1$ as $n\rightarrow\infty$. The Fisher information under the parametric model (\ref{eq:par}) is $n\Lambda^{-1}.$ Therefore, $\hat{\begin{equation}ta}$ is efficient under (\ref{eq:par}). We now consider sequences that are local to $\begin{equation}ta^{*}$, $\begin{equation}ta_{N}=\begin{equation}ta^{*}+n^{-1/2}h$, indexed by $N$. In our context, we have the population size $N$ goes to infinity with sample size $n$. \textcolor{black}{Consider $(x_{i},y_{i},\delta_{i},I_{i})$, for $i=1,\ldots,N$,} with the local shift $P^{\begin{equation}ta_{N}}$ (\citealp{bickel1993efficient}). We make the following regularity assumptions: \begin{equation}gin{assumption}\label{asump:leCam3rd}(i) The superpopulation model is regular (\citealp{bickel1993efficient}, pp 12\textendash 13); (ii) under $P^{\begin{equation}ta_{N}}$: $S_{N}(\begin{equation}ta_{N})\rightarrow {\mathcal{N}}(0,V_{s})$ in distribution, as $n\rightarrow\infty$; (iii) $ {\tau}_{\begin{equation}ta}$ is nonsingular around $\begin{equation}ta^{*}$, and $n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta_{N})= {\tau}_{\begin{equation}ta^{*}}^{-1}S_{N}(\begin{equation}ta_{N})+o_{p}(1)$; (iv) for all bounded continuous functions $h(x,y,\delta,I)$, the conditional expectation $E_{\begin{equation}ta_{N}}\{h(x,y,\delta,I)\mid x,\delta=1\}$ converges in distribution to $E\{h(x,y,$ $\delta,I)\mid x,\delta=1\}$, where $E_{\begin{equation}ta_{N}}$ is the expectation with respect to $P^{\begin{equation}ta_{N}}$. \end{equation}d{assumption} We now give a sketch proof for Theorem 2. Under (\ref{eq:par}), the likelihood ratio under $P^{\begin{equation}ta_{N}}$ is \begin{equation}gin{eqnarray*} \log(dP^{\begin{equation}ta^{*}}/dP^{\begin{equation}ta_{N}}) & = & -h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}(\begin{equation}ta^{*})+\frac{1}{2}h^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}h+o_{p}(1)\\ & = & -h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}(\begin{equation}ta_{N})-\frac{1}{2}h^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}h+o_{p}(1), \end{equation}d{eqnarray*} where the second equality follows by the Taylor expansion of $S_{N}(\begin{equation}ta^{*})$ at $\begin{equation}ta_{N}$. We can derive that under $P^{\begin{equation}ta_{N}}${\small{}, \begin{equation}gin{multline} \left(\begin{equation}gin{array}{c} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N})-\mu(\begin{equation}ta_{N})\}\\ n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta_{N})\\ \log(dP^{\begin{equation}ta^{*}}/dP^{\begin{equation}ta_{N}}) \end{equation}d{array}\right)\\ \rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} 0\\ 0\\ \frac{-1}{2}h^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}h \end{equation}d{array}\right),\left(\begin{equation}gin{array}{ccc} V_{1} & \gamma_{1}^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}^{-1} & -\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1} {\tau}_{\begin{equation}ta^{*}}h\\ {\tau}_{\begin{equation}ta^{*}}^{-1}\gamma_{1} & \Lambda & -h\\ -h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}\gamma_{1} & -h^{\mathrm{\scriptscriptstyle T}} & h^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}h \end{equation}d{array}\right)\right\} \label{eq:(11)} \end{equation}d{multline} }in distribution, as $n\rightarrow\infty$. Here, we write $\mu=\mu(\begin{equation}ta_{N})$ to reflect its dependence on $\begin{equation}ta_{N}$. We then express $\mu(\begin{equation}ta_{N})=\mu(\begin{equation}ta^{*})+\gamma_{2}^{\mathrm{\scriptscriptstyle T}}(n^{-1/2}h)+o(n^{-1/2})$, and use the shorthand $\mu$ for $\mu(\begin{equation}ta^{*})$. By Le Cam's third lemma, under $P^{\begin{equation}ta^{*}}$, we have{\small{} \[ \left(\begin{equation}gin{array}{c} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N})-\mu\}\\ n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta_{N}) \end{equation}d{array}\right)\rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} -\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1} {\tau}_{\begin{equation}ta^{*}}h-\gamma_{2}^{\mathrm{\scriptscriptstyle T}}h\\ -h \end{equation}d{array}\right),\left(\begin{equation}gin{array}{cc} V_{1} & \gamma_{1}^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}^{-1}\\ {\tau}_{\begin{equation}ta^{*}}^{-1}\gamma_{1} & \Lambda \end{equation}d{array}\right)\right\} \] }in distribution, as $n\rightarrow\infty$. Replacing $\begin{equation}ta_{N}$ by $\begin{equation}ta^{*}+n^{-1/2}h$ yields that under $P^{\begin{equation}ta^{*}}$\textcolor{black}{\small{}, \[ \left(\begin{equation}gin{array}{c} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*}+n^{-1/2}h)-\mu\}\\ n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta^{*}) \end{equation}d{array}\right)\rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} -\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1} {\tau}_{\begin{equation}ta^{*}}h-\gamma_{2}^{\mathrm{\scriptscriptstyle T}}h\\ 0 \end{equation}d{array}\right),\left(\begin{equation}gin{array}{cc} V_{1} & \gamma_{1}^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}^{-1}\\ {\tau}_{\begin{equation}ta^{*}}^{-1}\gamma_{1} & \Lambda \end{equation}d{array}\right)\right\} \] }in distribution, as $n\rightarrow\infty$. Heuristically, if the normal distribution was exact, then \begin{equation}gin{equation} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*}+n^{-1/2}h)-\mu\}\mid n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta^{*})=h\sim {\mathcal{N}}\left(-\gamma_{2}^{\mathrm{\scriptscriptstyle T}}h,V_{1}-\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1}\gamma_{1}\right).\label{eq:(12)} \end{equation}d{equation} Given $n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta^{*})=h$, we have $\begin{equation}ta^{*}+n^{-1/2}h=\hat{\begin{equation}ta}$, and hence $\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta^{*}+n^{-1/2}h)=\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$. Integrating (\ref{eq:(12)}) over the asymptotic distribution of $n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta^{*})$, we derive \begin{equation}gin{equation} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})-\mu\}\sim {\mathcal{N}}\left(0,V_{1}-\gamma_{1}^{\mathrm{\scriptscriptstyle T}}V_{s}^{-1}\gamma_{1}+\gamma_{2}^{\mathrm{\scriptscriptstyle T}}\Lambda\gamma_{2}\right).\label{eq:(13)} \end{equation}d{equation} The formal technique to derive (\ref{eq:(13)}) can be find in \citet{andreou2012alternative}. (\ref{eq:(13)}) gives the result in Theorem 2. In the following, we provide the proof to (\ref{eq:(11)}). Asymptotic normality of $n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N})-\mu\}$ under $P^{\begin{equation}ta_{N}}$ follows from Theorem 1. Asymptotic joint normality of $n^{1/2}(\hat{\begin{equation}ta}-\begin{equation}ta_{N})$ and $\log(dP^{\begin{equation}ta^{*}}/dP^{\begin{equation}ta_{N}})$ follows from Assumption \ref{asump:leCam3rd}. Therefore, the remaining is to show that, under $P^{\begin{equation}ta_{N}}$: \begin{equation}gin{equation} \left(\begin{equation}gin{array}{c} D_{N}(\begin{equation}ta_{N})\\ S_{N}(\begin{equation}ta_{N}) \end{equation}d{array}\right)\rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} 0\\ 0 \end{equation}d{array}\right),\left(\begin{equation}gin{array}{cc} V_{1} & \gamma_{1}^{T}\\ \gamma_{1} & V_{s} \end{equation}d{array}\right)\right\} \label{eq:dist1} \end{equation}d{equation} in distribution, as $n\rightarrow\infty$. To prove (\ref{eq:dist1}), consider the linear combination $c_{1}D_{N}(\begin{equation}ta_{N})+c_{2}^{\mathrm{\scriptscriptstyle T}}S_{N}(\begin{equation}ta_{N})$, which has the same limiting distribution as\textcolor{black}{{} } \begin{equation}gin{eqnarray*} C_{N} & = & c_{1}\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)m(x_{i};\begin{equation}ta_{N})\\ & & +c_{1}\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)\delta_{i}(1+k_{\begin{equation}ta_{N},i})\{y_{i}-m(x_{i};\begin{equation}ta_{N})\}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}}\frac{n^{1/2}}{N}\sum_{i=1}^{N}\left(\frac{I_{i}}{\pi_{i}}-1\right)\delta_{i}g(x_{i};\begin{equation}ta_{N})\{y_{i}-m(x_{i};\begin{equation}ta_{N})\}, \end{equation}d{eqnarray*} \textcolor{black}{given $nN^{-1}=o(1)$.} We analyze $C_{N}$ using the martingale theory. First, we rewrite $C_{N}=\sum_{k=1}^{N}\xi_{N,k},$ where \[ \xi_{N,k}=c_{1}\frac{n^{1/2}}{N}\left(\frac{I_{k}}{\pi_{k}}-1\right)m(x_{k};\begin{equation}ta_{N}) \] \begin{equation}gin{eqnarray*} & & +c_{1}\frac{n^{1/2}}{N}\left(\frac{I_{k}}{\pi_{k}}-1\right)\delta_{k}(1+k_{\begin{equation}ta_{N},k})\{y_{k}-m(x_{k};\begin{equation}ta_{N})\}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}}\frac{n^{1/2}}{N}\left(\frac{I_{k}}{\pi_{k}}-1\right)\delta_{k}g(x_{k};\begin{equation}ta_{N})\{y_{k}-m(x_{k};\begin{equation}ta_{N})\}. \end{equation}d{eqnarray*} Consider the $\sigma$-fields $ {\mathcal{F}}_{N,k}=\sigma\{x_{1},\ldots,x_{N},\delta_{1},\ldots,\delta_{N},y_{1},\ldots,y_{k},I_{1},\ldots,I_{k}\}$ for $1\leq k\leq N$. Then, $\{\sum_{k=1}^{i}\xi_{N,k}, {\mathcal{F}}_{N,i},1\leq i\leq N\}$ is a martingale for each $N\geq1$. Therefore, the limiting distribution of $C_{N}$ can be studied using the martingale central limit theorem (Theorem 35.12, \citealp{billingsley1995probability}). Under Assumption 2, and the fact that $k_{\begin{equation}ta_{N},k}$ has uniformly bounded moments, it follows that $\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}(|\xi_{N,k}|^{2+\delta})\rightarrow0$ for some $\delta>0$. It then follows that Lindeberg's condition in Billingsley's theorem holds. As a result, we obtain that under $P^{\begin{equation}ta_{N}}$, $C_{N}\rightarrow {\mathcal{N}}(0,\sigma^{2})$ in distribution, as $n\rightarrow\infty$, where $\sigma^{2}= {\mathrm{plim}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}(\xi_{N,k}^{2}\mid {\mathcal{F}}_{N,k-1})$. Assumption \ref{asump:leCam3rd} further implies the following expressions:{\small{} \begin{equation}gin{eqnarray*} \sigma^{2} & = & {\mathrm{plim}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}(\xi_{N,k}^{2}\mid {\mathcal{F}}_{N,k-1})\\ & = & c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}\left[\left\{ \left(\frac{I_{k}}{\pi_{k}}-1\right)m(x_{k};\begin{equation}ta_{N})\right\} ^{2}\mid {\mathcal{F}}_{N,k-1}\right]\\ & & +c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}\left(\left[\left(\frac{I_{k}}{\pi_{k}}-1\right)\delta_{k}(1+k_{\begin{equation}ta_{N},k})\{y_{k}-m(x_{k};\begin{equation}ta_{N})\}\right]^{2}\mid {\mathcal{F}}_{N,k-1}\right)\\ & & +2c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}\left[\left(\frac{I_{k}}{\pi_{k}}-1\right)^{2}\delta_{k}(1+k_{\begin{equation}ta_{N},k})g(x_{k};\begin{equation}ta_{N})\{y_{k}-m(x_{k};\begin{equation}ta_{N})\}^{2}\mid {\mathcal{F}}_{N,k-1}\right]c_{1}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}}\left[\left(\frac{I_{k}}{\pi_{k}}-1\right)^{2}\delta_{k}g(x_{k};\begin{equation}ta_{N})g(x_{k};\begin{equation}ta_{N})^{\mathrm{\scriptscriptstyle T}}\{y_{k}-m(x_{k};\begin{equation}ta_{N})\}^{2}\mid {\mathcal{F}}_{N,k-1}\right]c_{2} \end{equation}d{eqnarray*} } \begin{equation}gin{eqnarray*} & = & c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}} {\mathrm{var}}_{p}\left(\sum_{k\in A}\frac{m_{k}}{\pi_{k}}\right)+c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}(1+k_{\begin{equation}ta^{*},k})^{2}\sigma^{2}(x_{k})\\ & & +2c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}(1+k_{\begin{equation}ta^{*},k})g(x_{k};\begin{equation}ta^{*})\sigma^{2}(x_{k})c_{1}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}g(x_{k};\begin{equation}ta^{*})g(x_{k};\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}}\sigma^{2}(x_{k})c_{2}\\ & = & c_{1}^{2}V^{m}+c_{1}^{2}V^{e}+2c_{2}^{\mathrm{\scriptscriptstyle T}}\gamma_{1}c_{1}+c_{2}^{\mathrm{\scriptscriptstyle T}}V_{s}c_{2}. \end{equation}d{eqnarray*} By the martingale central limit theorem, under $P^{\begin{equation}ta_{N}},$ (\ref{eq:dist1}) follows. \section*{A.4 Proof for Theorem 4} \textcolor{black}{The replication method implicitly induces replication weights $\omega_{i}^{*}$ and random variables $u_{i}$ such that $E^{*}(\omega_{i}^{*}u_{i})=N^{-1}\pi_{i}^{-1}$ and $ {\mathrm{var}}^{*}(\omega_{i}^{*}u_{i})=N^{-2}(1-\pi_{i})\pi_{i}^{-2}$, for $i=1,\ldots,N$, where $E^{*}(\cdot)$ and $ {\mathrm{var}}^{*}(\cdot)$ denote the expectation and variance for the resampling given the observed data. For example, in delete-1 jackknife under probability proportional to size sampling with $nN^{-1}=o(1)$, we have $\omega_{i}^{(k)}=(n-1)^{-1}n\omega_{i}$ if $i\neq k$, and $\omega_{k}^{(k)}=0$. }Then, the induced random variables $u_{i}$ follows a two-point mass distribution as \[ u_{i}=\begin{equation}gin{cases} 1, & \text{with probability \end{equation}suremath{\frac{n-1}{n}},}\\ 0, & \text{with probability }\frac{1}{n}, \end{equation}d{cases} \] and weights $\omega_{i}^{*}=(n-1)^{-1}n\omega_{i}.$ It is straightforward to verify that $E^{*}(\omega_{i}^{*}u_{i})=\omega_{i}=N^{-1}\pi_{i}^{-1}$ and $ {\mathrm{var}}^{*}\{(\omega_{i}^{*}u_{i})^{2}\}=(n-1)^{-1}\omega_{i}^{2}\approx n^{-1}N^{-2}(1-\pi_{i})\pi_{i}^{-2}$.\textcolor{black}{{} } The $k$the replication of $\hat{\begin{equation}ta}$, $\hat{\begin{equation}ta}^{(k)}$, can be viewed as one realization of $\hat{\begin{equation}ta}^{*}$ which is the solution to the estimating equation \begin{equation}gin{equation} S_{N}^{*}(\begin{equation}ta)=n^{1/2}\sum_{i\in A}\omega_{i}^{*}u_{i}\delta_{i}g(x_{i};\begin{equation}ta)\{y_{i}-m(x_{i};\begin{equation}ta)\}=0.\label{eq:pesdo score-1-1} \end{equation}d{equation} Let $P^{*}$ be the distribution of \textcolor{black}{$z_{i}^{*}=(\omega_{i}^{*}u_{i}x_{i},\omega_{i}^{*}u_{i}y_{i},\omega_{i}^{*}u_{i}\delta_{i},\omega_{i}^{*}u_{i}I_{i})$}, for $i=1,\ldots,N$, given the observed data induced by bootstrap resampling satisfying \begin{equation}gin{eqnarray*} E^{*}\{S_{N}^{*}(\hat{\begin{equation}ta})\} & = & n^{1/2}E^{*}\left[\sum_{i\in A}\omega_{i}^{*}u_{i}\delta_{i}g(x_{i};\hat{\begin{equation}ta})\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}\right]\\ & = & \frac{n^{1/2}}{N}\sum_{i\in A}\frac{1}{\pi_{i}}\delta_{i}g(x_{i};\hat{\begin{equation}ta})\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}=0, \end{equation}d{eqnarray*} and \begin{equation}gin{eqnarray*} & & E^{*}\left\{ S_{N}^{*}(\hat{\begin{equation}ta})S_{N}^{*}(\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\right\} \\ & = & E^{*}\left[\left\{ S_{N}^{*}(\hat{\begin{equation}ta})-S_{N}(\hat{\begin{equation}ta})\right\} \left\{ S_{N}^{*}(\hat{\begin{equation}ta})-S_{N}(\hat{\begin{equation}ta})\right\} ^{\mathrm{\scriptscriptstyle T}}\right]\\ & = & nE^{*}\left[\sum_{i\in A}\left(\omega_{i}^{*}u_{i}-\frac{1}{N\pi_{i}}\right)^{2}\delta_{i}g(x_{i};\hat{\begin{equation}ta})g(x_{i};\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}^{2}\right] \end{equation}d{eqnarray*} \begin{equation}gin{eqnarray*} & = & \frac{n}{N^{2}}\sum_{i\in A}\frac{1-\pi_{i}}{\pi_{i}^{2}}\delta_{i}g(x_{i};\hat{\begin{equation}ta})g(x_{i};\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}^{2}. \end{equation}d{eqnarray*} We consider an auxiliary parametric model $P^{\begin{equation}ta}$ defined locally around $\hat{\begin{equation}ta}$ with a density \begin{equation}gin{equation} \frac{\exp\left\{ n^{1/2}(\begin{equation}ta-\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}^{*}(\hat{\begin{equation}ta})-2^{-1}n(\begin{equation}ta-\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}(\begin{equation}ta-\hat{\begin{equation}ta})\right\} }{E^{*}\left[\exp\left\{ n^{1/2}(\begin{equation}ta-\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}^{*}(\hat{\begin{equation}ta})-2^{-1}n(\begin{equation}ta-\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\Lambda^{-1}(\begin{equation}ta-\hat{\begin{equation}ta})\right\} \right]}.\label{eq:par-1} \end{equation}d{equation} Consider sequences that are local to $\hat{\begin{equation}ta}$, $\begin{equation}ta_{N}^{*}=\hat{\begin{equation}ta}+n^{-1/2}h$, indexed by $N$\textcolor{black}{, and $z_{i}^{*}$, for $i=1,\ldots,N$,} with the local shift $P^{\begin{equation}ta_{N}^{*}}$. We make the following regularity assumptions: \begin{equation}gin{assumption}\label{asump:leCam3rd-1}(i) Model (\ref{eq:par-1}) is regular; (ii) under $P^{\begin{equation}ta_{N}^{*}}$: $S_{N}^{*}(\begin{equation}ta_{N}^{*})\rightarrow {\mathcal{N}}(0,V_{s})$ in distribution, as $n\rightarrow\infty$; (iii) $n^{1/2}(\hat{\begin{equation}ta}^{*}-\begin{equation}ta_{N}^{*})= {\tau}_{\begin{equation}ta^{*}}^{-1}S_{N}^{*}(\begin{equation}ta_{N}^{*})+o_{p}(1)$; (iv) for all bounded continuous functions $h(z_{i}^{*})$, the conditional expectation $E_{\begin{equation}ta_{N}^{*}}^{*}\{h(z_{i}^{*})\}$ converges in distribution to $E_{\hat{\begin{equation}ta}}^{*}\{h(z_{i}^{*})\}$ , where $E_{\begin{equation}ta_{N}^{*}}$ is the expectation with respect to $P^{\begin{equation}ta_{N}^{*}}$. \end{equation}d{assumption} Under (\ref{eq:par-1}), the likelihood ratio under $P^{\begin{equation}ta_{N}^{*}}$ is \begin{equation}gin{eqnarray*} \log(dP^{\hat{\begin{equation}ta}}/dP^{\begin{equation}ta_{N}^{*}}) & = & -h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}^{*}(\hat{\begin{equation}ta})+\frac{1}{2}h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1} {\tau}_{\begin{equation}ta^{*}}h+o_{p}(1)\\ & = & -h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1}S_{N}^{*}(\begin{equation}ta_{N}^{*})-\frac{1}{2}h^{\mathrm{\scriptscriptstyle T}} {\tau}_{\begin{equation}ta^{*}}V_{s}^{-1} {\tau}_{\begin{equation}ta^{*}}h+o_{p}(1), \end{equation}d{eqnarray*} where the second equality follows by the Taylor expansion of $S_{N}^{*}(\hat{\begin{equation}ta})$ at $\begin{equation}ta_{N}^{*}$. The $k$the replication of $\hat{\mu}_{ {\mathrm{PMM}}}(\hat{\begin{equation}ta})$, $\hat{\mu}_{ {\mathrm{PMM}}}^{(k)}(\hat{\begin{equation}ta}^{(k)})$, can be viewed as one realization of \begin{equation}gin{equation} \hat{\mu}_{ {\mathrm{PMM}}}^{*}(\hat{\begin{equation}ta}^{*})=\sum_{i\in A}\omega_{i}^{*}u_{i}[m(x_{i};\hat{\begin{equation}ta}^{*})+\delta_{i}(1+k_{\hat{\begin{equation}ta}^{*},i})\{y_{i}-m(x_{i};\hat{\begin{equation}ta}^{*})\}].\label{eq:k-th rep-1} \end{equation}d{equation} We can derive that under $P^{\begin{equation}ta_{N}^{*}}$, the sequence $[\begin{equation}gin{array}{c} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}^{*}(\begin{equation}ta_{N}^{*})-\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N}^{*})\}\end{equation}d{array}$ $n^{1/2}(\hat{\begin{equation}ta}^{*}-\begin{equation}ta_{N}^{*})^{\mathrm{\scriptscriptstyle T}}$ $\begin{equation}gin{array}{c} \log(dP^{\hat{\begin{equation}ta}}/dP^{\begin{equation}ta_{N}^{*}})]^{\mathrm{\scriptscriptstyle T}}\end{equation}d{array}$ has the same limiting distribution as in (\ref{eq:(11)}). Then, following the same argument in the proof of Theorem 2, we can obtain that the asymptotic conditional variance of $n^{1/2}\hat{\mu}_{ {\mathrm{PMM}}}^{*}(\hat{\begin{equation}ta}^{*})$, given the observed data, is $V_{2}$. The remaining is to show that, under $P^{\begin{equation}ta_{N}^{*}}$ given the observed data:{\small{} \begin{equation}gin{equation} \left(\begin{equation}gin{array}{c} n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}^{*}(\begin{equation}ta_{N}^{*})-\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N}^{*})\}\\ S_{N}^{*}(\begin{equation}ta_{N}^{*}) \end{equation}d{array}\right)\rightarrow {\mathcal{N}}\left\{ \left(\begin{equation}gin{array}{c} 0\\ 0 \end{equation}d{array}\right),\left(\begin{equation}gin{array}{cc} V_{1} & \gamma_{1}^{T}\\ \gamma_{1} & V_{s} \end{equation}d{array}\right)\right\} \label{eq:dist2} \end{equation}d{equation} }in distribution, as $n\rightarrow\infty$. To prove (\ref{eq:dist2}), given the observed data, consider the linear combination $c_{1}n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}^{*}(\begin{equation}ta_{N}^{*})-\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N}^{*})\}+c_{2}^{\mathrm{\scriptscriptstyle T}}S_{N}^{*}(\begin{equation}ta_{N}^{*})$, which has the same limiting distribution as \begin{equation}gin{eqnarray*} C_{N}^{*} & = & c_{1}n^{1/2}\sum_{i=1}^{N}I_{i}\left(\omega_{i}^{*}u_{i}-\frac{1}{N\pi_{i}}\right)m(x_{i};\begin{equation}ta_{N}^{*})\\ & & +c_{1}n^{1/2}\sum_{i=1}^{N}I_{i}\left(\omega_{i}^{*}u_{i}-\frac{1}{N\pi_{i}}\right)\delta_{i}(1+k_{\begin{equation}ta_{N}^{*},i})\{y_{i}-m(x_{i};\begin{equation}ta_{N}^{*})\}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}}n^{1/2}\sum_{i=1}^{N}I_{i}\left(\omega_{i}^{*}u_{i}-\frac{1}{N\pi_{i}}\right)\delta_{i}g(x_{i};\begin{equation}ta_{N}^{*})\{y_{i}-m(x_{i};\begin{equation}ta_{N}^{*})\}. \end{equation}d{eqnarray*} This is because under $P^{\begin{equation}ta_{N}^{*}}$, the extra term in $C_{N}^{*}$ compared with $c_{1}n^{1/2}\{\hat{\mu}_{ {\mathrm{PMM}}}^{*}(\begin{equation}ta_{N}^{*})-\hat{\mu}_{ {\mathrm{PMM}}}(\begin{equation}ta_{N}^{*})\}+c_{2}^{\mathrm{\scriptscriptstyle T}}S_{N}^{*}(\begin{equation}ta_{N}^{*})$ is \begin{equation}gin{eqnarray*} & & n^{1/2}\sum_{i=1}^{N}\frac{I_{i}}{N\pi_{i}}\delta_{i}g(x_{i};\begin{equation}ta_{N}^{*})\{y_{i}-m(x_{i};\begin{equation}ta_{N}^{*})\}\\ & = & \frac{n^{1/2}}{N}\sum_{i=1}^{N}\frac{I_{i}}{\pi_{i}}\delta_{i}g(x_{i};\hat{\begin{equation}ta})\{y_{i}-m(x_{i};\hat{\begin{equation}ta})\}+O_{p}(\begin{equation}ta_{N}^{*}-\hat{\begin{equation}ta})\\ & = & 0+O_{p}(n^{-1/2})=o_{p}(1). \end{equation}d{eqnarray*} We analyze $C_{N}^{*}$ using the martingale theory. First, we rewrite $C_{N}^{*}=\sum_{k=1}^{N}\xi_{N,k}^{*},$ where \begin{equation}gin{eqnarray*} \xi_{N,k}^{*} & = & c_{1}n^{1/2}I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)m(x_{k};\begin{equation}ta_{N}^{*})\\ & & +c_{1}n^{1/2}I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)\delta_{k}(1+k_{\begin{equation}ta_{N}^{*},k})\{y_{k}-m(x_{k};\begin{equation}ta_{N}^{*})\} \end{equation}d{eqnarray*} \begin{equation}gin{eqnarray*} & & +c_{2}^{\mathrm{\scriptscriptstyle T}}n^{1/2}I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)\delta_{k}g(x_{k};\begin{equation}ta_{N}^{*})\{y_{k}-m(x_{k};\begin{equation}ta_{N}^{*})\}. \end{equation}d{eqnarray*} for $1\leq k\leq N$. Consider the $\sigma$-fields \[ {\mathcal{F}}_{N,k}^{*}=\sigma\{x_{1},\ldots,x_{N},I_{1},\ldots,I_{N},\delta_{1},\ldots,\delta_{N},y_{1},\ldots,y_{N},\omega_{1}^{*}u_{1},\ldots,\omega_{k}^{*}u_{k}\} \] for $1\leq k\leq N$. Then, $\{\sum_{k=1}^{i}\xi_{N,k}^{*}, {\mathcal{F}}_{N,i}^{*},1\leq i\leq N\}$ is a martingale for each $N\geq1$. As a result, we obtain that under $P^{\begin{equation}ta_{N}^{*}}$, $C_{N}^{*}\rightarrow {\mathcal{N}}(0,\tilde{\sigma}^{2})$ in distribution, as $n\rightarrow\infty$, where{\small{} \begin{equation}gin{eqnarray*} \tilde{\sigma}^{2} & = & {\mathrm{plim}}\sum_{k=1}^{N}E_{\begin{equation}ta_{N}^{*}}^{*}(\xi_{N,k}^{*2}\mid {\mathcal{F}}_{N,k-1})\\ & = & c_{1}^{2} {\mathrm{plim}} n\sum_{k=1}^{N}E_{\begin{equation}ta_{N}^{*}}^{*}\left[\left\{ I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)m(x_{k};\begin{equation}ta_{N}^{*})\right\} ^{2}\mid {\mathcal{F}}_{N,k-1}\right]\\ & & +c_{1}^{2} {\mathrm{plim}} n\sum_{k=1}^{N}E_{\begin{equation}ta_{N}^{*}}^{*}\left(\left[I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)\delta_{k}(1+k_{\begin{equation}ta_{N}^{*},k})\{y_{k}-m(x_{k};\begin{equation}ta_{N}^{*})\}\right]^{2}\mid {\mathcal{F}}_{N,k-1}\right)\\ & & +2c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}} n\sum_{k=1}^{N}E_{\begin{equation}ta_{N}^{*}}^{*}\left[I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)^{2}\delta_{k}(1+k_{\begin{equation}ta_{N}^{*},k})g(x_{k};\begin{equation}ta_{N}^{*})\{y_{k}-m(x_{k};\begin{equation}ta_{N}^{*})\}^{2}c_{1}\mid {\mathcal{F}}_{N,k-1}\right]\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}} n\sum_{k=1}^{N}E_{\begin{equation}ta_{N}^{*}}^{*}\left[I_{k}\left(\omega_{k}^{*}u_{k}-\frac{1}{N\pi_{i}}\right)^{2}\delta_{k}g(x_{k};\begin{equation}ta_{N}^{*})g(x_{k};\begin{equation}ta_{N}^{*})^{\mathrm{\scriptscriptstyle T}}\{y_{k}-m(x_{k};\begin{equation}ta_{N}^{*})\}^{2}\mid {\mathcal{F}}_{N,k-1}\right]c_{2}\\ & = & c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{I_{k}(1-\pi_{k})}{\pi_{k}^{2}}m(x_{k};\hat{\begin{equation}ta})^{2}+c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{I_{k}(1-\pi_{k})}{\pi_{k}^{2}}\delta_{k}(1+k_{\hat{\begin{equation}ta},k})^{2}\{y_{k}-m(x_{k};\hat{\begin{equation}ta})\}^{2}\\ & & +2c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{I_{k}(1-\pi_{k})}{\pi_{k}^{2}}\delta_{k}(1+k_{\hat{\begin{equation}ta},k})g(x_{k};\hat{\begin{equation}ta})\{y_{k}-m(x_{k};\hat{\begin{equation}ta})\}^{2}c_{1}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{I_{k}(1-\pi_{k})}{\pi_{k}^{2}}\delta_{k}g(x_{k};\hat{\begin{equation}ta})g(x_{k};\hat{\begin{equation}ta})^{\mathrm{\scriptscriptstyle T}}\{y_{k}-m(x_{k};\hat{\begin{equation}ta})\}^{2}c_{2}\\ & = & c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}m(x_{k};\begin{equation}ta^{*})^{2}+c_{1}^{2} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}(1+k_{\begin{equation}ta^{*},k})^{2}\sigma^{2}(x_{k})\\ & & +2c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}(1+k_{\begin{equation}ta^{*},k})g(x_{k};\begin{equation}ta^{*})\sigma^{2}(x_{k})c_{1}\\ & & +c_{2}^{\mathrm{\scriptscriptstyle T}} {\mathrm{plim}}\frac{n}{N^{2}}\sum_{k=1}^{N}\frac{1-\pi_{k}}{\pi_{k}}\delta_{k}g(x_{k};\begin{equation}ta^{*})g(x_{k};\begin{equation}ta^{*})^{\mathrm{\scriptscriptstyle T}}\sigma^{2}(x_{k})c_{2}. \end{equation}d{eqnarray*} }Therefore, by the martingale central limit theorem, conditional on the observed data under $P^{\begin{equation}ta_{N}^{*}},$ (\ref{eq:dist2}) follows. \end{equation}d{document}
\textbf{e}gin{document} \title{Construction of scalar and vector finite element families on polygonal and polyhedral meshes} \author{Andrew Gillette\thanks{Department of Mathematics, University of Arizona, Tucson, AZ, USA, {\tt [email protected]}} \and Alexander Rand\thanks{CD-adapco, Austin, TX, USA, {\tt [email protected]}} \and Chandrajit Bajaj\thanks{Department of Computer Science, Institute for Computational Engineering and Sciences, University of Texas at Austin, Austin, TX, USA, {\tt [email protected]}} } \maketitle \textbf{e}gin{abstract} We combine theoretical results from polytope domain meshing, generalized barycentric coordinates, and finite element exterior calculus to construct scalar- and vector-valued basis functions for conforming finite element methods on generic convex polytope meshes in dimensions 2 and 3. Our construction recovers well-known bases for the lowest order N{\'e}d{\'e}lec, Raviart-Thomas, and Brezzi-Douglas-Marini elements on simplicial meshes and generalizes the notion of Whitney forms to non-simplicial convex polygons and polyhedra. We show that our basis functions lie in the correct function space with regards to global continuity and that they reproduce the requisite polynomial differential forms described by finite element exterior calculus. We present a method to count the number of basis functions required to ensure these two key properties. \end{abstract} \section{Introduction} In this work, we join and expand three threads of research in the analysis of modern finite element methods: polytope domain meshing, generalized barycentric coordinates, and families of finite-dimensional solution spaces characterized by finite element exterior calculus. It is well-known that on simplicial meshes, standard barycentric coordinates provide a local basis for the lowest-order $H^1$-conforming scalar-valued finite element spaces, commonly called the Lagrange elements. Further, local bases for the lowest-order vector-valued Brezzi-Douglas-Marini~\cite{BDM85}, Raviart-Thomas~\cite{RT1977}, and N{\'e}d{\'e}lec~\cite{BDDM87,N1980,N1986} finite element spaces on simplices can also be defined in a canonical fashion from an associated set of standard barycentric functions. Here, we use generalized barycentric coordinates in an analogous fashion on meshes of convex polytopes, in dimensions 2 and 3, to construct local bases with the same global continuity and polynomial reproduction properties as their simplicial counterparts. We have previously analyzed linear order, scalar-valued methods on polygonal meshes~\cite{GRB2011,RGB2011b} using four different types of generalized barycentric coordinates: Wachspress~\cite{W1975,W2011}, Sibson~\cite{F1990,S1980}, harmonic~\cite{C2008,JMRGS07,MKBWG2008}, and mean value~\cite{F2003,FHK2006,FKR2005}. The analysis was extended by Gillette, Floater and Sukumar in the case of Wachspress coordinates to convex polytopes in any dimension~\cite{FGS2013}, based on work by Warren and colleagues~\cite{JSWD2005,W1996,WSHD2007}. We have also shown how taking pairwise products of generalized barycentric coordinates can be used to construct quadratic order methods on polygons~\cite{RGB2011a}. Applications of generalized barycentric coordinates to finite element methods have primarily focused on scalar-valued PDE problems~\cite{MP2008,RS2006,SM2006,ST2004,WBG07}. \textbf{e}gin{table} \centering \textbf{e}gin{tabular}{c|c|c} ~n & k & functions\\ \hline ~2 & 0 & $\lambda_i$ \\ & 1 & $\lnl ij$\\ & & $\mathcal{W}_{ij}$ \\ & & $\textnormal{rot}~\lnl ij$ \\ & & $\textnormal{rot}~\mathcal{W}_{ij}$ \\ & 2 & $\lnldrnl ijk$ \\ & & $\mathcal{W}_{ijk}$ \\ \end{tabular} \qquad \qquad \textbf{e}gin{tabular}{c|c|c} ~n & k & functions\\ \hline ~3 & 0 & $\lambda_i$ \\ & 1 & $\lnl ij$ \\ & & $\mathcal{W}_{ij}$ \\ & 2 & $\lnlxnl ijk$ \\ & & $\mathcal{W}_{ijk}$ \\ & 3 & $\lnldnlxnl ijk\ell$ \\ & & $\mathcal{W}_{ijk\ell}$ \\ \end{tabular} \caption{For meshes of convex $n$-dimensional polytopes in $\R^n$, $n=2$ or $3$, computational basis functions for each differential form order $0\leq k\leq n$ are listed. The notation is defined in Section~\ref{sec:bkgd}. } \label{tab:comp-bases} \end{table} Our expansion in this paper to vector-valued methods is inspired by Whitney differential forms, first defined in~\cite{W1957}. Bossavit recognized that Whitney forms could be used to construct basis functions for computational electromagnetics~\cite{B1988a}. The theory of finite element exterior calculus unified subsequent research in this area~\cite{AFW2006}. In particular, Arnold, Falk and Winther showed how functions like those appearing in Table~\ref{tab:comp-bases} can be used to build spanning sets and bases for any the ${\mathcal P}_r\Lambda^k$ and ${\mathcal P}_r^-\Lambda^k$ spaces on simplices~\cite{AFW2009}. The FENiCS Project~\cite{Aetal2015} has implemented these functions on simplices as part of a broadly applicable open source finite element software package. Some prior work has explored the possibility of Whitney functions over non-simplicial elements in specific cases of rectangular grids~\cite{G2002}, square-base pyramids~\cite{GH1999}, and prisms~\cite{B2008}. Other authors have examined the ability of generalized Whitney functions to recover constant-valued forms in certain cases~\cite{ESW2006,KRS2011}, whereas here we show their ability to reproduce \textbf{\textit{all}} the elements of the spaces denoted ${\mathcal P}_1^-\Lambda^k$ in finite element exterior calculus. Gillette and Bajaj considered the use of generalized Whitney forms on polytope meshes defined by duality from a simplicial mesh~\cite{GB2010,GB2011}, which illustrated potential benefits to discrete exterior calculus~\cite{H2003}, computational magnetostatics, and Darcy flow modeling. Recent work~\cite{MRS2014} has also shown generalized barycentric coordinates to be effective when used in tandem with virtual element methods~\cite{dVBCMMR2013}, which are developed in a similar fashion to traditional mimetic methods~\cite{LMS2014}. \textbf{e}gin{table}[h] \centering \textbf{e}gin{tabular}{c|c|l|l} n & k & global continuity & polynomial reproduction\\ \hline 2 & 0 & $H^1(\mesh)$ & ${\mathcal P}_1 \Lambda^0(\mesh)$ \\ & 1 & $H(\textnormal{curl\,},\mesh)$, by Theorem~\ref{thm:hcurl-conf} & ${\mathcal P}_1 \Lambda^1(\mesh)$, by Theorem~\ref{thm:pr-lnl} \\ & & $H(\textnormal{curl\,},\mesh)$, by Theorem~\ref{thm:hcurl-conf} & ${\mathcal P}_1^- \Lambda^1(\mesh)$, by Theorem~\ref{thm:pr-whit-ij}\\ & & $H(\textnormal{div\,},\mesh)$, see Remark~\ref{rmk:rot-for-cty} & ${\mathcal P}_1 \Lambda^1(\mesh)$, by Corollary~\ref{cor:pr-lrnl} \\ & & $H(\textnormal{div\,},\mesh)$, see Remark~\ref{rmk:rot-for-cty} & ${\mathcal P}_1^- \Lambda^1(\mesh)$, by Corollary~\ref{cor:pr-rotWhit} \\ & 2 & none (piecewise linear) & ${\mathcal P}_1 \Lambda^2(\mesh)$, by Theorem~\ref{thm:pr-lnldrnl} \\ & & none (piecewise constant) & ${\mathcal P}_1^- \Lambda^2(\mesh)$, see Remark~\ref{rmk:topdim} \\ \hline 3 & 0 & $H^1(\mesh)$ & ${\mathcal P}_1 \Lambda^0(\mesh)$ \\ & 1 & $H(\textnormal{curl\,},\mesh)$, by Theorem~\ref{thm:hcurl-conf} & ${\mathcal P}_1 \Lambda^1(\mesh)$, by Theorem~\ref{thm:pr-lnl} \\ & & $H(\textnormal{curl\,},\mesh)$, by Theorem~\ref{thm:hcurl-conf} & ${\mathcal P}_1^- \Lambda^1(\mesh)$, by Theorem~\ref{thm:pr-whit-ij}\\ & 2 & $H(\textnormal{div\,},\mesh)$, by Theorem~\ref{thm:hdiv-conf} & ${\mathcal P}_1 \Lambda^2(\mesh)$, by Theorem~\ref{thm:pr-lnlxnl}\\ & & $H(\textnormal{div\,},\mesh)$, by Theorem~\ref{thm:hdiv-conf} & ${\mathcal P}_1^- \Lambda^2(\mesh)$, by Theorem~\ref{thm:pr-whit-ijk}\\ & 3 & none (piecewise linear) & ${\mathcal P}_1 \Lambda^3(\mesh)$, see Remark~\ref{rmk:topdim} \\ & & none (piecewise constant) & ${\mathcal P}_1^- \Lambda^3(\mesh)$, see Remark~\ref{rmk:topdim} \\ \hline \end{tabular} \caption{Summary of the global continuity and polynomial reproduction properties of the spaces considered.} \label{tab:results-summary} \end{table} Using the bases defined in Table~\ref{tab:comp-bases}, our main results are summarized in Table~\ref{tab:results-summary}. On a mesh of convex $n$-dimensional polytopes in $\R^n$ with $n=2$ or $3$, we construct computational basis functions associated to the polytope elements for each differential form order $k$ as indicated. Each function is built from generalized barycentric coordinates, denoted $\lambda_i$, and their gradients; formulae for the Whitney-like functions, denoted $\mathcal{W}$, are given in Section~\ref{subset:whit-forms}. In the vector-valued cases ($0<k<n$), we prove that the functions agree on tangential or normal components at inter-element boundaries, providing global continuity in $H(\textnormal{curl})$ or $H(\textnormal{div})$. The two families of polynomial differential forms that are reproduced, ${\mathcal P}_r\Lambda^k$ and ${\mathcal P}_r^-\Lambda^k$, were shown to recover and generalize the classical simplicial finite element spaces mentioned previously, via the theory of finite element exterior calculus~\cite{AFW2006,AFW2010}. The outline of the paper is as follows. In Section~\ref{sec:bkgd}, we describe relevant theory and prior work in the areas of finite element exterior calculus, generalized barycentric coordinates, and Whitney forms. In Section~\ref{sec:global-cnty}, we show how the functions listed in Table~\ref{tab:comp-bases} can be used to build piecewise-defined functions with global continuity in $H^1$, $H(\textnormal{curl})$ or $H(\textnormal{div})$, as indicated. In Section~\ref{sec:poly-repro}, we show how these same functions can reproduce the requisite polynomial differential forms from ${\mathcal P}_1\Lambda^k$ or ${\mathcal P}_1^-\Lambda^k$, as indicated in Table~\ref{tab:comp-bases}, by exhibiting explicit linear combinations whose coefficients depend only on the location of the vertices of the mesh. In Section~\ref{sec:polyg-fams}, we count the basis functions constructed by our approach on generic polygons and polyhedra and explain how the size of the basis could be reduced in certain cases. \section{Background and prior work} \label{sec:bkgd} \subsection{Spaces from Finite Element Exterior Calculus} \label{subsec:feec} Finite element spaces can be broadly classified according to three parameters: $n$, the spatial dimension of the domain, $r$, the order of error decay, and $k$, the differential form order of the solution space. The $k$ parameter can be understood in terms of the classical finite element sequence for a domain $\Omega\subset\R^n$ with $n=2$ or $3$, commonly written as \[\xymatrix @R=.02in{ n=2: & {H^1} \ar[rr]^-{\text{grad}} && {H(\textnormal{curl})} \ar@{<->}[rr]^-{\text{rot}} && {H(\textnormal{div})} \ar[rr]^-{\text{div}} && {L^2}\\ n=3: & {H^1} \ar[rr]^-{\text{grad}} && {H(\textnormal{curl})} \ar[rr]^-{\text{curl}} && {H(\textnormal{div})} \ar[rr]^-{\text{div}} && {L^2} }\] Note that for $n=2$, given $\vec F(x,y) := \twovec{F_1(x,y)}{F_2(x,y)}$, we use the definitions: \[\textnormal{curl\,}\,\vec F := \overline{\textbf{f}}rac{\partial F_1}{\partial y}- \overline{\textbf{f}}rac{\partial F_2}{\partial x},\quad\textnormal{rot}\,\vec F := \textbf{e}gin{bmatrix} 0 & {-1} \\ 1 & 0 \end{bmatrix}\vec F\quad\text{and}\quad \textnormal{div\,}\vec F := \overline{\textbf{f}}rac{\partial F_1}{\partial x}+ \overline{\textbf{f}}rac{\partial F_2}{\partial y}.\] \text{}\\ Thus, in $\R^2$, we have both $\textnormal{curl\,}\nabla\partialhi =0$ and $\textnormal{div\,}\textnormal{rot}\,\nabla\partialhi=0$ for any $\partialhi\in H^2$. Put differently, $\textnormal{rot}$ gives an isomorphism from $H(\textnormal{curl})$ to $H(\textnormal{div})$ in $\R^2$. In some cases we will write $H(\textnormal{curl\,},\Omega)$ and $H(\textnormal{div\,},\Omega)$ if we wish to emphasize the domain in consideration. In the terminology of differential topology, the applicable sequence is described more simply as the $L^2$ deRham complex of $\Omega$. The spaces are re-cast as differential form spaces $H\Lambda^k$ and the operators as instances of the exterior derivative $d_k$, yielding \[\xymatrix @R=.02in{ n=2: & {H\Lambda^0} \ar[rr]^-{d_0} && {H\Lambda^1} \ar[rr]^-{d_1} && {H\Lambda^2} \\ n=3: & {H\Lambda^0} \ar[rr]^-{d_0} && {H\Lambda^1} \ar[rr]^-{d_1} && {H\Lambda^2} \ar[rr]^-{d_2} && {H\Lambda^3} }\] Finite element methods seek approximate solutions to a PDE in finite dimensional subspaces $\Lambda^k_h$ of the $H\Lambda^k$ spaces, where $h$ denotes the maximum diameter of a domain element associated to the subspace. The theory of finite element exterior calculus classifies two families of suitable choices of $\Lambda^k_h$ spaces on meshes of simplices, denoted ${\mathcal P}_r\Lambda^k$ and ${\mathcal P}_r^-\Lambda^k$~\cite{AFW2006,AFW2010}. The space ${\mathcal P}_r\Lambda^k$ is defined as ``those differential forms which, when applied to a constant vector field, have the indicated polynomial dependence''~\cite[p. 328]{AFW2010}. This can be interpreted informally as the set of differential $k$ forms with polynomial coefficients of total degree at most $r$. The space ${\mathcal P}_r^-\Lambda^k$ is then defined as the direct sum \textbf{e}gin{equation} \label{eq:prminus-decomp} {\mathcal P}_r^-\Lambda^k := {\mathcal P}_{r-1}\Lambda^k \oplus \kappa{\mathcal H}_{r-1}\Lambda^{k+1}, \end{equation} where $\kappa$ is the Koszul operator and ${\mathcal H}_r$ denotes homogeneous polynomials of degree $r$~\cite[p. 331]{AFW2010}. We will use the coordinate formulation of $\kappa$, given in~\cite[p. 329]{AFW2010} as follows. Let $\omega\in\Lambda^k$ and suppose that it can be written in local coordinates as $\omega_x=a(x)dx_{\sigma_1}\wedge\cdots\wedge dx_{\sigma_k}$. Then $\kappa\omega$ is written as \textbf{e}gin{equation} \label{eq:def-kappa} (\kappa\omega)_x := \sum_{i=1}^k (-1)^{i+1}a(x)x_{\sigma(i)}dx_{\sigma_1}\wedge\cdots\wedge\widehat{dx_{\sigma_i}}\wedge\cdots\wedge dx_{\sigma_k}, \end{equation} where $\wedge$ denotes the wedge product and $\widehat{dx_{\sigma_i}}$ means that the term is omitted. For example, let $n=3$ and write $x,y,z$ for $x_1,x_2,x_3$. Then $dydz\in{\mathcal H}_0\Lambda^2$ and $\kappa dydz = ydz-zdy\in{\mathcal H}_1\Lambda^1$. We summarize the relationship between the spaces ${\mathcal P}_1\Lambda^k$, ${\mathcal P}_1^-\Lambda^k$ and certain well-known finite element families in dimension $n=2$ or $3$ in Table~\ref{tab:fe-spaces}. \textbf{e}gin{table}[ht] \textbf{e}gin{center} \textbf{e}gin{tabular}{c|c|c|c|l|c} n & k & dim & space & classical description & reference\\ \hline 2 & 0 & 3 & ${\mathcal P}_1 \Lambda^0(\mathcal{T})$ & Lagrange, degree $\leq 1$ &\\ & & 3 & ${\mathcal P}_1^- \Lambda^0(\mathcal{T})$ & Lagrange, degree $\leq 1$ &\\ & 1 & 6 & ${\mathcal P}_1 \Lambda^1(\mathcal{T})$ & Brezzi-Douglas-Marini, degree $\leq 1$ & \cite{BDM85}\\ & & 3 & ${\mathcal P}_1^- \Lambda^1(\mathcal{T})$ & Raviart-Thomas, order $0$ & \cite{RT1977}\\ & 2 & 3 & ${\mathcal P}_1 \Lambda^2(\mathcal{T})$ & discontinuous linear &\\ & & 1 & ${\mathcal P}_1^- \Lambda^2(\mathcal{T})$ & discontinuous piecewise constant &\\ \hline 3 & 0 & 4 & ${\mathcal P}_1 \Lambda^0(\mathcal{T})$ & Lagrange, degree $\leq 1$ &\\ & & 4 & ${\mathcal P}_1^- \Lambda^0(\mathcal{T})$ & Lagrange, degree $\leq 1$ &\\ & 1 & 12 & ${\mathcal P}_1 \Lambda^1(\mathcal{T})$ & N{\'e}d{\'e}lec~second kind $H(\textnormal{curl})$, degree $\leq 1$ & \cite{N1986,BDDM87}\\ & & 6 & ${\mathcal P}_1^- \Lambda^1(\mathcal{T})$ & N{\'e}d{\'e}lec~first kind $H(\textnormal{curl})$, order $0$ & \cite{N1980}\\ & 2 & 12 & ${\mathcal P}_1 \Lambda^2(\mathcal{T})$ & N{\'e}d{\'e}lec~second kind $H(\textnormal{div})$, degree $\leq 1$ &\cite{N1986,BDDM87}\\ & & 4 & ${\mathcal P}_1^- \Lambda^2(\mathcal{T})$ & N{\'e}d{\'e}lec~first kind $H(\textnormal{div})$, order $0$ & \cite{N1980}\\ & 3 & 4 & ${\mathcal P}_1 \Lambda^3(\mathcal{T})$ & discontinuous linear & \\ & & 1 & ${\mathcal P}_1^- \Lambda^3(\mathcal{T})$ & discontinuous piecewise constant & \\ \hline \end{tabular} \end{center} \caption{Correspondence between ${\mathcal P}_1\Lambda^k(\mathcal{T})$, ${\mathcal P}_1^-\Lambda^k(\mathcal{T})$ and common finite element spaces associated to a simplex $\mathcal{T}$ of dimension $n$. Further explanation of these relationships can be found in~\cite{AFW2006,AFW2010}. Our constructions, when reduced to simplices, recover known local bases for each of these spaces.} \label{tab:fe-spaces} \end{table} A crucial property of ${\mathcal P}_r\Lambda^k$ and ${\mathcal P}_r^-\Lambda^k$ is that each includes in its span a sufficient number of polynomial differential $k$-forms to ensure an \textit{a priori} error estimate of order $r$ in $H\Lambda^k$ norm. In the classical description of finite element spaces, this approximation power is immediate; any computational or `local' basis used for implementation of these spaces must, by definition, span the requisite polynomial differential forms. The main results of this paper are proofs that generalized barycentric coordinates can be used as local bases on polygonal and polyhedral element geometries to create analogues to the lowest order ${\mathcal P}_r\Lambda^k$ and ${\mathcal P}_r^-\Lambda^k$ spaces with the same polynomial approximation power and global continuity properties. In the remainder of the paper, we will frequently use standard vector proxies~\cite{AMR1988} in place of differential form notation, as indicated here: \textbf{e}gin{align*} \twovecT {u_1}{u_2} & \;\;\longleftrightarrow \;\; u_1dx_1+u_2dx_2\in\Lambda^1(\R^2), \\ \threevecT {v_1}{v_2}{v_3} & \;\;\longleftrightarrow\;\; v_1dx_1+v_2dx_2+v_3dx_3\in\Lambda^1(\R^3),\\ \threevecT {w_1}{w_2}{w_3} & \;\;\longleftrightarrow\;\; w_1dx_2dx_3+w_2dx_3dx_1+w_3dx_1dx_2\in\Lambda^2(\R^3). \end{align*} \subsection{Generalized Barycentric Coordinates} \label{subsec:gbcs} Let $\mathfrak m$ be a convex $n$-dimensional polytope in $\R^n$ with vertex set $\{\textbf{v}_i\}$, written as column vectors. A set of non-negative functions $\{\lambda_i\}:\mathfrak m\rightarrow\R$ are called \textbf{generalized barycentric coordinates} on $\mathfrak m$ if for any linear function $L:\mathfrak m\rightarrow\R$, we can write \textbf{e}gin{equation} \label{eq:lin-comp} L = \sum_i L(\textbf{v}_i)\lambda_i, \end{equation} We will use the notation $\mathbb I$ to denote the $n\times n$ identity matrix and $\textbf{x}$ to denote the vector ${\textbf{e}gin{bmatrix} x_1 & x_2 & \cdots & x_n\end{bmatrix}}^T$ where $x_i$ is the $i$th coordinate in $\R^n$. We have the following useful identities: \textbf{e}gin{align} \sum_i\lambda_i(\textbf{x}) &= 1 \label{eq:pof1} \\ \sum_i\textbf{v}_i\lambda_i(\textbf{x}) &= \textbf{x} \label{eq:pofx}\\ \sum_i\nabla\lambda_i (\textbf{x})& = 0 \label{eq:gradsum0} \\ \sum_i\textbf{v}_i\nabla\lambda_i^T(\textbf{x}) & = \mathbb I \label{eq:vgradsumI} \end{align} Equations (\ref{eq:pof1}) and (\ref{eq:pofx}) follow immediately from (\ref{eq:lin-comp}) while (\ref{eq:gradsum0}) and (\ref{eq:vgradsumI}) follow by taking the gradient of equations (\ref{eq:pof1}) and (\ref{eq:pofx}), respectively. If $\textbf{x}$ is constrained to an $n-1$ dimensional facet of $\mathfrak m$ and the index set of the summations are limited to those vertices that define $\mathfrak m$, then (\ref{eq:pof1})-(\ref{eq:vgradsumI}) still hold; in particular, this implies that generalized barycentric coordinates on a polyhedron restrict to generalized barycentric coordinates on each of its polygonal faces. As mentioned in the introduction, there are many approaches to defining generalized barycentric coordinates. In regards to applications in finite element methods, the Wachspress coordinates~\cite{W1975,W2011} are commonly used as they are rational functions in both 2D and 3D with explicit formulae; code for their implementation in MATLAB is given in the appendix of~\cite{FGS2013}. Other practical choices of generalized barycentric coordinates for finite elements include mean value~\cite{F2003}, maximum entropy~\cite{HS2008,Su04}, and moving least squares~\cite{MS10}. The results of this work do not rely on any properties of the coordinates other than their non-negativity and linear reproduction property (\ref{eq:lin-comp}). \subsection{Whitney forms} \label{subset:whit-forms} Let $\mathfrak m$ be a convex $n$-dimensional polytope in $\R^n$ with vertex set \textnormal{$\{\textbf{v}_i\}$} and an associated set of generalized barycentric coordinates $\{\lambda_i\}$. Define associated sets of index pairs and triples by \textbf{e}gin{align} E & := \{(i,j) ~:~ \textbf{v}_i,\textbf{v}_j\in\mathfrak m\} \label{eq:Em-def},\\ T & := \{(i,j,k) ~:~ \textbf{v}_i,\textbf{v}_j,\textbf{v}_k\in\mathfrak m\} \label{eq:Tm-def}. \end{align} If $\mathfrak m$ is a \textit{simplex}, the elements of the set \[\left\{\lnl ij - \lnl ji~:~(i,j)\in E\right\}\] are called Whitney 1-forms and are part of a more general construction~\cite{W1957}, which we now present. Again, if $\mathfrak m$ is a $\textit{simplex}$, the Whitney $k$-forms are elements of the set \textbf{e}gin{equation} \label{eq:whit-def} \left\{k!\sum_{i=0}^k(-1)^i\; \lambda_{j_i} \;d\lambda_{j_0}\wedge\ldots\wedge\widehat{d\lambda_{j_i}}\wedge\ldots\wedge d\lambda_{j_k}\right\}, \end{equation} where $j_0,\ldots,j_k$ are indices of vertices of $\mathfrak m$. As before, $\wedge$ denotes the wedge product and $\widehat{dx_{\sigma_i}}$ means that the term is omitted. Up to sign, this yields a set of $n+1\choose k+1$ distinct functions and provides a local basis for ${\mathcal P}_1^-\Lambda^k$~\cite{AFW2009}. We now generalize these definitions to the case where $\mathfrak m$ is non necessarily a simplex. For any $(i,j)\in E$, define a generalized Whitney 1-form on $\mathfrak m$ by \textbf{e}gin{align} \label{eq:whit-edge-def} \mathcal{W}_{ij} & := \lambda_i\nabla\lambda_j-\lambda_j\nabla\lambda_i. \end{align} If $n=3$, then for any $(i,j,k)\in T$, define a generalized Whitney 2-form on $\mathfrak m$ by \textbf{e}gin{align} \label{eq:whit-tri-def} \mathcal{W}_{ijk} & := (\mathcal{W}C ijk) + (\mathcal{W}C jki) + (\mathcal{W}C kij). \end{align} Note that $\mathcal{W}_{ii}=0$ and if $i$, $j$, and $k$ are not distinct then $\mathcal{W}_{ijk}=0$. Whitney forms have natural interpretations as vector fields when $k=1$ or $n-1$. Interpolation of vector fields requires less data regularity than the canonical scalar interpolation theory using nodal values. Averaged interpolation developed for scalar spaces~\cite{Cl75,SZ90} has been extended to families of spaces from finite element exterior calculus~\cite{CW08}. Recent results on polygons and polyhedra can be extended to less regular data with average interpolation following the framework in~\cite{Ra12}, based on affine invariance of the coordinates. \section{Global Continuity Results} \label{sec:global-cnty} We first present results about the global continuity properties of vector-valued functions defined in terms of generalized barycentric coordinates and their gradients over a mesh of $n$-dimensional polytopes in $\R^n$ with $n=2$ or $3$. By `mesh' we mean a cellular complex in which each cell is a polygon (for $n=2$) or polyhedron (for $n=3$); for more on cellular complexes see e.g.~\cite{C2008}. Voronoi meshes are examples of cellular complexes since they are composed of $n$-dimensional polytopes that meet along their $n-1$ dimensional facets. We say that a function is defined `piecewise with respect to a mesh' when the definition of the function on the interior of a mesh element depends only on geometrical properties of the element (as opposed to depending on adjacent elements, for instance). We begin with a general result about global continuity in such a setting.\\ \textbf{e}gin{prop} \label{prop:trace2conf} Fix a mesh $\mesh$ of $n$-dimensional polytopes in $\R^n$ with $n=2$ or $3$. Let $\textnormal{$\textbf{u}$}$ be a vector field defined piecewise with respect to $\mesh$. Let $\mathfrak f$ be a face of codimension 1 with $\textnormal{$\textbf{u}$}_1$, $\textnormal{$\textbf{u}$}_2$ denoting the values of $\textnormal{$\textbf{u}$}$ on $\mathfrak f$ as defined by the two $n$-dimensional mesh elements sharing $\mathfrak f$. Write $\textnormal{$\textbf{u}$}_i = T_\mathfrak f(\textnormal{$\textbf{u}$}_i) + N_\mathfrak f(\textnormal{$\textbf{u}$}_i)$ where $T_\mathfrak f(\textnormal{$\textbf{u}$}_i)$ and $N_\mathfrak f(\textnormal{$\textbf{u}$}_i)$ are the vector projections of $\textnormal{$\textbf{u}$}_i$ onto $\mathfrak f$ and its outward normal, respectively. \renewcommand{(\roman{enumi}.)}{(\roman{enumi}.)} \textbf{e}gin{enumerate} \item If $T_\mathfrak f(\textnormal{$\textbf{u}$}_1)=T_\mathfrak f(\textnormal{$\textbf{u}$}_2)$ for all $\mathfrak f\in \mesh$ then $\textnormal{$\textbf{u}$}\in H(\textnormal{curl\,},\mesh)$. \item If $N_\mathfrak f(\textnormal{$\textbf{u}$}_1)=N_\mathfrak f(\textnormal{$\textbf{u}$}_2)$ for all $\mathfrak f\in \mesh$ then $\textnormal{$\textbf{u}$}\in H(\textnormal{div\,},\mesh)$. \end{enumerate} \end{prop} \text{}\\ The results of Proposition~\ref{prop:trace2conf} are well-known in the finite element community; see e.g.~Ern and Guermond~\cite[Section 1.4]{EG04}.\\ \textbf{e}gin{prop} \label{prop:bctrcfaceprop} Let $\mathfrak m$ be a convex $n$-dimensional polytope in $\R^n$ with vertex set \textnormal{$\{\textbf{v}_i\}_{i\in I}$} and an associated set of generalized barycentric coordinates $\{\lambda_i\}_{i\in I}$. Let $\mathfrak f$ be a face of $\mathfrak m$ of codimension 1 whose vertices are indexed by $J\subsetneq I$. If $k\not\in J$ then $\lambda_k\equiv 0$ on $\mathfrak f$ and $\nabla\lambda_k$ is normal to $\mathfrak f$ on $\mathfrak f$, pointing inward. \end{prop} \textbf{e}gin{proof} Fix a point $\textbf{x}_0\in\mathfrak m$. Observe that $\sum_{i\in I}\textbf{v}_i\lambda_i(\textbf{x}_0)$ is a point in $\mathfrak m$ lying in the interior of the convex hull of those $\textbf{v}_i$ for which $\lambda_i(\textbf{x}_0)>0$, since the $\lambda_i$ are non-negative by definition. By (\ref{eq:pofx}), this summation is equal to $\textbf{x}_0$. Hence, if $\textbf{x}_0\in \mathfrak f$, then $\lambda_k\equiv 0$ on $\mathfrak f$ unless $k\in J$, proving the first claim. The same argument implies that for any $k\not\in $J, $\mathfrak f$ is part of the zero level set of $\lambda_k$. Hence, for $k\not\in J$, $\nabla\lambda_k$ is orthogonal to $\mathfrak f$ on $\mathfrak f$. In that case, $\nabla\lambda_k$ points inward since $\lambda_k$ has support inside $\mathfrak m$ but not on the other side of $\mathfrak f$. \end{proof} \textbf{e}gin{figure} \textbf{e}gin{center} \philudegraphics[height=.3\textwidth]{fig/l3g2-right} ~\philudegraphics[height=.3\textwidth]{fig/l4g1-left} \end{center} \caption{The $H(\textnormal{curl})$ conformity condition of Proposition~\ref{prop:trace2conf} is satisfied automatically by the $\lambda_i\nabla\lambda_j$ functions, as shown in the example above. When the elements are brought together, the vector fields will agree on the projection to the shared edge at any point along the shared edge. Here, $i$ and $j$ are the indices for the vertices at the top and bottom, respectively, of the shared edge. For this example, we used the Wachspress functions to compute the vector functions on each element and MATLAB to visualize the result.} \label{fig:conf} \end{figure} We now show that generalized barycentric coordinates and their gradients defined over individual elements in a mesh of polytopes naturally stitch together to build conforming finite elements with global continuity of the expected kind. Figure~\ref{fig:conf} presents an example of two vector functions agreeing on their tangential projections along a shared edge. To be clear about the context, we introduce notation for generalized barycentric hat functions, defined piecewise over a mesh of polytopes $\{\mathfrak m\}$ by \[\hat\lambda_i(\textbf{x})= \textbf{e}gin{cases} \lambda_i(\textbf{x})~\text{as defined on $\mathfrak m$} & \text{if $\textbf{x}\in\mathfrak m$ and $\textbf{v}_i\in \mathfrak m$;} \\ 0 & \text{if $\textbf{x}\in\mathfrak m$ but $\textbf{v}_i \not\in \mathfrak m$.} \end{cases}\] Note that generalized barycentric coordinates $\lambda_i$ are usually indexed locally on a particular polytope while the $\hat\lambda_i$ require a global indexing of the vertices to consistently identify matching functions across element boundaries. Further, $\hat\lambda_i$ is well-defined at vertices and edges of the mesh as any choice of generalized barycentric coordinates on a particular element will give the same value at such points. If $\textbf{x}$ belongs to the interior of shared faces between polyhedra in $\R^3$ (or higher order analogues), $\hat\lambda_i(\textbf{x})$ is well-defined so long as the same \textit{kind} of generalized barycentric coordinates are chosen on each of the incident polyhedra (e.g.\ Wachspress or mean value). Our first result about global continuity concerns functions of the form $\hlnhl ij$, where $i$ and $j$ are indices of vertices belonging to at least one fixed mesh element $\mathfrak m$. Note that the vertices $\textbf{v}_i$ and $\textbf{v}_j$ need not define an edge of $\mathfrak m$.\\ \textbf{e}gin{theorem} \label{thm:hcurl-conf} Fix a mesh $\mesh$ of $n$-dimensional polytopes $\{\mathfrak m\}$ in $\R^n$ with $n=2$ or $3$ and assign some ordering $\textnormal{$\textbf{v}_1,\ldots,\textbf{v}_p$}$ to all the vertices in the mesh. Fix an associated set of generalized barycentric coordinate hat functions $\textnormal{$\hat\lambda_1,\ldots,\hat\lambda_p$}$. Let \[\textnormal{$\textbf{u}$}\in\textnormal{span}~\left\{\hlnhl ij~:~\textnormal{$\exists~\mathfrak m\in\mesh$ such that $\textbf{v}_i,\textbf{v}_j\in\mathfrak m$}\right\}.\] Then $\textnormal{$\textbf{u}$}\in H(\textnormal{curl\,},\mesh)$. \end{theorem} \textbf{e}gin{proof} Following the notation of Proposition~\ref{prop:trace2conf}, it suffices to show that $T_\mathfrak f(\textnormal{$\textbf{u}$}_1)=T_\mathfrak f(\textnormal{$\textbf{u}$}_2)$ for an arbitrary face $\mathfrak f\in \mesh$ of codimension 1. Consider an arbitrary term $c_{ij}\hlnhl ij$ in the linear combination defining $\textnormal{$\textbf{u}$}$. Observe that if $\textbf{v}_i\not\in \mathfrak f$, then by Proposition~\ref{prop:bctrcfaceprop}, $\hat \lambda_i\equiv 0$ on $\mathfrak f$ and hence $\textnormal{$\textbf{u}$}\equiv 0$ on $\mathfrak f$. Further, if $\textbf{v}_j\not\in \mathfrak f$, then $\nabla\hat \lambda_j$ is orthogonal to $\mathfrak f$. Therefore, without loss of generality, we can reduce to the case where $\textbf{v}_i,\textbf{v}_j\in \mathfrak f$. Since $\hat \lambda_i$ and $\hat \lambda_j$ are both $C^0$ on $\mesh$, their well-defined values on $\mathfrak f$ suffice to determine the projection of $\hlnhl ij$ to $\mathfrak f$. Since the choice of pair $ij$ was arbitrary, we have $T_\mathfrak f(\textnormal{$\textbf{u}$}_1)=T_\mathfrak f(\textnormal{$\textbf{u}$}_2)$, completing the proof. \end{proof} \text{} \textbf{e}gin{remark} \label{rmk:rot-for-cty} {\em When $n=2$, we may replace $\hlnhl ij$ in the statement Theorem~\ref{thm:hcurl-conf} by $\textnormal{rot}~\hlnhl ij$ and conclude that $\textnormal{$\textbf{u}$}\in H(\textnormal{div\,},\mesh)$. This is immediate since $\textnormal{rot}$ gives an isomorphism between $H(\textnormal{curl})$ and $H(\textnormal{div})$ in $\R^2$, as discussed in Section~\ref{subsec:feec}. When $n=3$, we construct functions in $H(\textnormal{div\,},\mesh)$ using triples of indices associated to vertices of mesh elements, according to the next result. } \end{remark} \text{} \textbf{e}gin{theorem} \label{thm:hdiv-conf} Fix a mesh $\mesh$ of polyhedra $\{\mathfrak m\}$ in $\R^3$ and assign some ordering $\textnormal{$\textbf{v}_1,\ldots,\textbf{v}_p$}$ to all the vertices in the mesh. Fix an associated set of generalized barycentric coordinate hat functions $\textnormal{$\hat\lambda_1,\ldots,\hat\lambda_p$}$. Let \[\textnormal{$\textbf{u}$}\in\textnormal{span}~\left\{\hatlnlxnl ijk~:~\textnormal{$\exists~\mathfrak m\in\mesh$ such that $\textbf{v}_i,\textbf{v}_j,\textbf{v}_k\in\mathfrak m$}\right\}.\] Then $\textnormal{$\textbf{u}$}\in H(\textnormal{div\,},\mesh)$. \end{theorem} \textbf{e}gin{proof} Again following the notation of Proposition~\ref{prop:trace2conf}, it suffices to show that $N_\mathfrak f(\textnormal{$\textbf{u}$}_1)=N_\mathfrak f(\textnormal{$\textbf{u}$}_2)$ for an arbitrary face $\mathfrak f\in \mesh$ of codimension one whose vertices are indexed by $J$. We will use the shorthand notation \[\xi_{ijk} := \hatlnlxnl ijk. \] Consider an arbitrary term $c_{ijk}\xi_{ijk}$ in the linear combination defining $\textnormal{$\textbf{u}$}$. We will first show that $\xi_{ijk}$ has a non-zero normal component on $\mathfrak f$ only if $i,j,k\in J$. If $i\not\in J$ then $\hat\lambda_i\equiv0$ on $\mathfrak f$ by Proposition~\ref{prop:bctrcfaceprop}, making $\xi_{ijk}\equiv 0$ on $\mathfrak f$, as well. If $i\in J$ but $j,k\not\in J$, then $\nabla\hat\lambda_j$ and $\nabla\hat\lambda_k$ are both normal to $\mathfrak f$ on $\mathfrak f$ by Proposition~\ref{prop:bctrcfaceprop}. Hence, their cross product is zero and again $\xi_{ijk}\equiv 0$ on $F$. If $i,j\in J$ but $k\not\in J$ then again $\nabla\hat\lambda_k\partialerp \mathfrak f$ on $\mathfrak f$. Since $\nabla\hat\lambda_j\times\nabla\hat\lambda_k\partialerp \nabla\hat\lambda_k$, we conclude that $\xi_{ijk}$ has no normal component on $\mathfrak f$. The same argument holds for the case $i,k\in J$, $j\not\in J$. The only remaining case is $i,j,k\in J$, proving the claim. Thus, without loss of generality, we assume that $i,j,k\in J$. Since $\hat\lambda_j$ and $\hat\lambda_k$ are both $C^0$ on $\mesh$, their well-defined values on $\mathfrak f$ suffice to determine the projection of $\nabla\hat\lambda_j$ and $\nabla\hat\lambda_k$ to $\mathfrak f$, which then uniquely defines the normal component of $\nabla\hat\lambda_j\times\nabla\hat\lambda_k$ on $\mathfrak f$. Since $\hat\lambda_i$ is also $C^0$ on $\mesh$, and the choice of $i,j,k$ was arbitrary, we have $N_\mathfrak f(\textnormal{$\textbf{u}$}_1)=N_\mathfrak f(\textnormal{$\textbf{u}$}_2)$, completing the proof. \end{proof} \text{}\\ \section{Polynomial Reproduction Results} \label{sec:poly-repro} We now show how generalized barycentric coordinate functions $\lambda_i$ and their gradients can reproduce all the polynomial differential forms in ${\mathcal P}_1\Lambda^k$ and ${\mathcal P}_1^-\Lambda^k$ for $0\leq k\leq n$ with $n=2$ or $3$. The results for the functions $\lnl ij$ and $\mathcal{W}_{ij}$ extend immediately to any value of $n\geq 2$ since those functions do not use any dimension-specific operators like $\times$ or $\textnormal{rot}$. \\ \textbf{e}gin{theorem} \label{thm:pr-lnl} Fix $n\geq 2$. Let $\mathfrak m$ be a convex $n$-dimensional polytope in $\R^n$ with vertex set \textnormal{$\{\textbf{v}_i\}$}. Given any set of generalized barycentric coordinates $\{\lambda_i\}$ associated to $\mathfrak m$, \textbf{e}gin{equation} \label{eq:lnl-id} \textnormal{$ \sum_{i,j}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T= \mathbb I, $} \end{equation} where $\mathbb I$ is the $n\times n$ identity matrix. Further, for any $n\times n$ matrix $\A$, \textbf{e}gin{equation} \label{eq:lnl-Ax} \textnormal{$ \sum_{i,j}(\A\textbf{v}_i \cdot\textbf{v}_j)(\lnl ij) =\A\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\lnl ij\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1\Lambda^1(\mathfrak m).$ \end{theorem} \textbf{e}gin{proof} From (\ref{eq:pof1}) - (\ref{eq:vgradsumI}), we see that \textbf{e}gin{align*} \sum_{i,j}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T & = \left(\sum_{i}\lambda_i\right)\left(\sum_j\nabla\lambda_j\textbf{v}_j^T\right) - \left(\sum_{j}\nabla\lambda_j\right)\left(\sum_i\lambda_i\textbf{v}_i^T\right) \\ & = 1(\mathbb I^T) - 0(\textbf{x}^T) = \mathbb I, \end{align*} establishing (\ref{eq:lnl-id}). Similarly for (\ref{eq:lnl-Ax}), a bit of algebra yields \textbf{e}gin{align*} \sum_{i,j}(\A\textbf{v}_i \cdot\textbf{v}_j)(\lnl ij) & = \sum_{i,j}(\lnl ij) \textbf{v}_j^T \A\textbf{v}_i = \sum_{i,j} \nabla\lambda_j \textbf{v}_j^T \A\textbf{v}_i \lambda_i\\ & = \left(\sum_{j} \nabla\lambda_j \textbf{v}_j^T\right)\A\left(\sum_i \textbf{v}_i \lambda_i\right) = \mathbb I^T\A\textbf{x} = \A\textbf{x} \end{align*} We have shown that any vector of linear polynomials can be written as a linear combination of $\lnl ij$ functions, hence the span of these functions contains the vector proxies for all elements of ${\mathcal P}_1\Lambda^1(\mathfrak m)$. \end{proof} \text{}\\ \textbf{e}gin{cor} \label{cor:pr-lrnl} Let $\mathfrak m$ be a convex polygon in $\R^2$ with vertex set \textnormal{$\{\textbf{v}_i\}$}. Given any set of generalized barycentric coordinates $\{\lambda_i\}$ associated to $\mathfrak m$, \textbf{e}gin{equation} \label{eq:lrnl-id} \textnormal{$ \sum_{i,j}\textnormal{rot}\,\lnl ij(\textnormal{rot}(\textbf{v}_j-\textbf{v}_i))^T= \mathbb I, $} \end{equation} where $\mathbb I$ is the $2\times 2$ identity matrix. Further, for any $2\times 2$ matrix $\A$, \textbf{e}gin{equation} \label{eq:lrnl-Ax} \textnormal{$ \sum_{i,j}(-\textnormal{rot}\,\A\,\textbf{v}_i \cdot\textbf{v}_j)(\textnormal{rot}\, \lnl ij) =\A\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\textnormal{rot}\lnl ij\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1\Lambda^1(\mathfrak m).$ \end{cor} \textbf{e}gin{proof} For (\ref{eq:lrnl-id}), observe that for any $\textbf{w},\textbf{y}\in\R^2$, $\displaystyle\textbf{w}\textbf{y}^T = \textbf{e}gin{bmatrix} a & b \\ c & d\end{bmatrix}$ implies $\displaystyle(\textnormal{rot}~\textbf{w})(\textnormal{rot}~\textbf{y})^T = \textbf{e}gin{bmatrix} d & -c \\ -b & a \end{bmatrix}$. Hence, the result follows immediately from (\ref{eq:lnl-id}). For (\ref{eq:lrnl-Ax}), note $\textnormal{rot}^{-1}=-\textnormal{rot}$ and define $\B:=-\textnormal{rot}\,\A$. Using $\B$ as the matrix in (\ref{eq:lnl-Ax}), we have \[\sum_{i,j}(\B\textbf{v}_i \cdot\textbf{v}_j)(\lnl ij) =\B\textbf{x}\] Applying $\textnormal{rot}$ to both sides of the above yields the result. \end{proof} \text{}\\ \textbf{e}gin{theorem} \label{thm:pr-lnlxnl} Let $\mathfrak m$ be a convex polyhedron in $\R^3$ with vertex set \textnormal{$\{\textbf{v}_i\}$}. Given any set of generalized barycentric coordinates $\{\lambda_i\}$ associated to $\mathfrak m$, \textbf{e}gin{equation} \label{eq:lnlxnl-id} \textnormal{$ \overline{\textbf{f}}rac 12\sum_{i,j,k}\lnlxnl ijk\left((\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i)\right)^T= \mathbb I , $} \end{equation} where $\mathbb I$ is the $n\times n$ identity matrix. Further, for any $n\times n$ matrix $\A$, \textbf{e}gin{equation} \label{eq:lnlxnl-Ax} \textnormal{$ \overline{\textbf{f}}rac 12\sum_{i,j,k}(\A\textbf{v}_i \cdot(\textbf{v}_j\times\textbf{v}_k))(\lnlxnl ijk) =\A\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\lnlxnl ijk\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j,\textbf{v}_k\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1\Lambda^2(\mathfrak m).$ \end{theorem} \textbf{e}gin{proof} We start with (\ref{eq:lnlxnl-id}). First, observe that \[(\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i) = \textbf{v}_i\times\textbf{v}_j +\textbf{v}_j\times\textbf{v}_k + \textbf{v}_k\times\textbf{v}_i.\] By (\ref{eq:gradsum0}), we have that \textbf{e}gin{align*} \sum_{i,j,k}\lnlxnl ijk\left(\textbf{v}_i\times\textbf{v}_j\right)^T & = \sum_{i,j} \lambda_i \left(\nabla \lambda_j \times \left( \sum_k \nabla \lambda_k \right)\right) \left(\textbf{v}_i\times\textbf{v}_j\right)^T =0. \end{align*} A similar argument shows that replacing $\textbf{v}_i\times\textbf{v}_j$ with $\textbf{v}_k\times\textbf{v}_i$ also yields the zero matrix. Hence, \textbf{e}gin{align*} \sum_{i,j,k}\lnlxnl ijk\left((\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i)\right)^T &= \sum_{i,j,k}\lnlxnl ijk\left(\textbf{v}_j\times\textbf{v}_k\right)^T \end{align*} \textbf{e}gin{align*} = \sum_i \lambda_i \sum_{j,k}\left(\nabla \lambda_j \times \nabla \lambda_k\right) \left(\textbf{v}_j\times\textbf{v}_k\right)^T & =\sum_{j,k}\left(\nabla \lambda_j \times \nabla \lambda_k\right) \left(\textbf{v}_j\times\textbf{v}_k\right)^T. \end{align*} To simplify this further, we use the Kronecker delta symbol $\delta_{i_1i_2}$ and the 3D Levi-Civita symbol $\varepsilon_{i_1 i_2 i_3}$. It suffices to show that the entry in row $r$, column $c$ of the matrix $\sum_{j,k}(\nlxnl jk)\left(\textbf{v}_j\times \textbf{v}_k\right)^T$ is $2\delta_{rc}$. We see that \textbf{e}gin{align*} \left[\sum_{j,k}(\nlxnl jk)\left(\textbf{v}_j\times \textbf{v}_k\right)^T\right]_{rc} & = \sum_{j,k}\varepsilon_{r\ell m}(\nl j)_\ell(\nl k)_m \varepsilon_{cpq} (\textbf{v}_j)_p(\textbf{v}_k)_q \\ & = \varepsilon_{r\ell m}\varepsilon_{cpq} \sum_{j}(\textbf{v}_j)_p(\nl j)_\ell \sum_{k} (\textbf{v}_k)_q(\nl k)_m \\ & = \varepsilon_{r\ell m}\varepsilon_{cpq} \delta_{\ell p}\delta_{mq}. \end{align*} The last step in the above chain of equalities follows from (\ref{eq:vgradsumI}). Observe that $\varepsilon_{r\ell m}\varepsilon_{cpq} \delta_{\ell p}\delta_{mq}= \varepsilon_{r\ell m}\varepsilon_{c\ell m} = 2\delta_{r c}$, as desired. For (\ref{eq:lnlxnl-Ax}), observe that \textbf{e}gin{align*} \sum_{i,j,k}(\A\textbf{v}_i \cdot(\textbf{v}_j\times\textbf{v}_k))(\lnlxnl ijk) & = \left(\sum_{i}\A\textbf{v}_i\lambda_i\right)\cdot \sum_{j,k} (\textbf{v}_j\times\textbf{v}_k)(\nlxnl jk) \\ & = \sum_{j,k} (\nlxnl jk)(\textbf{v}_j\times\textbf{v}_k)^T \left(\A \sum_{i}\textbf{v}_i\lambda_i\right) \\ & = 2\;\mathbb I\;\A\textbf{x} = 2\A\textbf{x}. \end{align*} Note that we used the proof of (\ref{eq:lnlxnl-id}) to rewrite the sum over $j,k$ as $2\mathbb I$. We have shown that any vector of linear polynomials can be written as a linear combination of $\lnlxnl ijk$ functions, hence the span of these functions contains the vector proxies for all elements of ${\mathcal P}_1\Lambda^2(\mathfrak m)$. \end{proof} \text{}\\ \textbf{e}gin{theorem} \label{thm:pr-lnldrnl} Let $\mathfrak m$ be a convex polygon in $\R^2$ with vertex set \textnormal{$\{\textbf{v}_i\}$}. Given any set of generalized barycentric coordinates $\{\lambda_i\}$ associated to $\mathfrak m$, \textbf{e}gin{equation} \label{eq:lnldrnl-one} \textnormal{$ \overline{\textbf{f}}rac 12\sum_{i,j,k}\lnldrnl ijk\left((\textbf{v}_j-\textbf{v}_i)\cdot \textnormal{rot}(\textbf{v}_k-\textbf{v}_i)\right)= 1. $} \end{equation} Further, for any vector $\textbf{a} \in \R^2$, \textbf{e}gin{equation} \label{eq:lnldrnl-Ax} \textnormal{$ \overline{\textbf{f}}rac 12\sum_{i,j,k}(\textbf{a}^T\textbf{v}_i (\textbf{v}_j\cdot \textnormal{rot}\textbf{v}_k))(\lnldrnl ijk) =\textbf{a}^T\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\lnldrnl ijk\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j,\textbf{v}_k\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1\Lambda^2(\mathfrak m).$ \end{theorem} \textbf{e}gin{proof} The proof is essentially identical to that of Theorem~\ref{thm:pr-lnlxnl}. First, \[(\textbf{v}_j-\textbf{v}_i) \cdot \textnormal{rot}(\textbf{v}_k-\textbf{v}_i) = \textbf{v}_i\cdot \textnormal{rot}\textbf{v}_j +\textbf{v}_j\cdot \textnormal{rot}\textbf{v}_k + \textbf{v}_k\cdot \textnormal{rot}\textbf{v}_i,\] and by (\ref{eq:gradsum0}), \textbf{e}gin{align*} \sum_{i,j,k}\lnldrnl ijk & \left(\textbf{v}_i\cdot \textnormal{rot}\textbf{v}_j\right) = \sum_{i,j} \lambda_i \left(\nabla \lambda_j \cdot \textnormal{rot} \left( \sum_k \nabla \lambda_k \right)\right) \left(\textbf{v}_i\cdot \textnormal{rot} \textbf{v}_j\right) =0. \end{align*} A similar argument shows that replacing $\textbf{v}_i\cdot \textnormal{rot} \textbf{v}_j$ with $\textbf{v}_k\cdot \textnormal{rot}\textbf{v}_i$ also yields zero. Hence as before, \textbf{e}gin{align*} \sum_{i,j,k}\lnldrnl ijk & \left((\textbf{v}_j-\textbf{v}_i)\cdot \textnormal{rot}(\textbf{v}_k-\textbf{v}_i)\right)^T =\sum_{j,k}\left(\nabla \lambda_j \cdot \textnormal{rot} \nabla \lambda_k\right) \left(\textbf{v}_j\cdot \textnormal{rot}\textbf{v}_k\right)^T. \end{align*} Finally, the same argument holds using the 2D Levi-Civita symbol: \textbf{e}gin{align*} \sum_{j,k}(\nldrnl jk)\left(\textbf{v}_j\cdot\textnormal{rot} \textbf{v}_k\right) & = \sum_{j,k}\varepsilon_{\ell m}(\nl j)_\ell(\nl k)_m \varepsilon_{p q} (\textbf{v}_j)_p(\textbf{v}_k)_q \\ & = \varepsilon_{\ell m}\varepsilon_{pq} \sum_{j}(\textbf{v}_j)_p(\nl j)_\ell \sum_{k} (\textbf{v}_k)_q(\nl k)_m \\ & = \varepsilon_{\ell m}\varepsilon_{pq} \delta_{\ell p}\delta_{mq} = \varepsilon_{\ell m}\varepsilon_{\ell m} = 2, \end{align*} establishing (\ref{eq:lnldrnl-one}). For (\ref{eq:lnldrnl-Ax}), observe that \textbf{e}gin{align*} \sum_{i,j,k}(\textbf{a}^T\textbf{v}_i & (\textbf{v}_j\cdot \textnormal{rot} \textbf{v}_k)) (\lnldrnl ijk) \\ & = \left(\sum_{i}\textbf{a}^T\textbf{v}_i\lambda_i\right) \sum_{j,k} (\textbf{v}_j\cdot \textnormal{rot} \textbf{v}_k)(\nldrnl jk) \\ & = \sum_{j,k} (\nldrnl jk)(\textbf{v}_j\cdot \textnormal{rot} \textbf{v}_k)^T \left(\textbf{a}^T \sum_{i}\textbf{v}_i\lambda_i\right) = 2\textbf{a}^T\textbf{x}. \end{align*} \end{proof} \textbf{e}gin{remark} {\em The proof of Theorem~\ref{thm:pr-lnldrnl} can also be obtained by augmenting the 2D vectors and matrices with zeros to make 3D vectors and matrices and recognizing (\ref{eq:lnldrnl-one}) as the element equality in the third row and third column of (\ref{eq:lnlxnl-id}). } \end{remark} \text{}\\ We also have polynomial reproduction results using the Whitney-like basis functions (\ref{eq:whit-edge-def}) and (\ref{eq:whit-tri-def}). Recall that ${\mathcal H}_r$ denotes homogeneous polynomials of degree $r$ and let $\M_{n\times n}$ denote $n\times n$ matrices. We have the following theorems.\\ \textbf{e}gin{theorem} \label{thm:pr-whit-ij} Fix $n\geq 2$. Let $\mathfrak m$ be a convex $n$-dimensional polytope in $\R^n$ with vertex set \textnormal{$\{\textbf{v}_i\}$} and an associated set of generalized barycentric coordinates $\{\lambda_i\}$. Then \textbf{e}gin{equation} \label{eq:whit1-id} \textnormal{$ \sum_{i<j}\mathcal{W}_{ij}(\textbf{v}_j-\textbf{v}_i)^T=\mathbb I. $} \end{equation} Further, define a map $\Phi:{\mathcal H}_1\Lambda^1(\R^n)\rightarrow \M_{n\times n}$ by \[\textnormal{$ \sum_{i=1}^n\left(\sum_{j=1}^n a_{ij}x_j\right) dx_i \longmapsto \left [ \text{sign }(a_{ij}) \right ] . $}\] Then for all $\omega\in {\mathcal H}_0\Lambda^2(\R^n)$, \textbf{e}gin{equation} \label{eq:whit1-x} \textnormal{$ \sum_{i<j}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) \mathcal{W}_{ij} = (\Phi(\kappa\omega))\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\mathcal{W}_{ij}\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1^-\Lambda^1(\mathfrak m).$ \end{theorem} \textbf{e}gin{proof} For (\ref{eq:whit1-id}), we reorganize the summation and apply (\ref{eq:lnl-id}) to see that \textbf{e}gin{align*} \sum_{i<j}\mathcal{W}_{ij}(\textbf{v}_j-\textbf{v}_i)^T & = \sum_{i<j}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T - \sum_{i<j}\lnl ji(\textbf{v}_j-\textbf{v}_i)^T \\ & = \sum_{i<j}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T + \sum_{j<i}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T \\ & =\sum_{i,j}\lnl ij(\textbf{v}_j-\textbf{v}_i)^T = \mathbb I. \end{align*} For (\ref{eq:whit1-x}), fix $\omega\in {\mathcal H}_0\Lambda^2(\R^n)$ and express it as \[\omega = \sum_{i<j}a_{ij}dx_idx_j,\] for some coefficients $a_{ij}\in\R$. Then \[\kappa\omega = \sum_{i<j}a_{ij}(x_idx_j-x_jdx_i).\] The entries of the matrix $\Phi(\kappa\omega)$ are thus given by \textbf{e}gin{equation} \label{eq:phi-entries} \left[\Phi(\kappa\omega)\right]_{ij} = \textbf{e}gin{cases} \text{sign }(a_{ij}) & \mbox{if } i<j, \\ -\text{sign }(a_{ij}) & \mbox{if } i>j, \\ 0 & \mbox{if } i=j. \\ \end{cases} \end{equation} From (\ref{eq:lnl-Ax}), we have that \[\sum_{i,j}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) \lnl ij = (\Phi(\kappa\omega))\textbf{x},\qquad \overline{\textbf{f}}orall \omega\in {\mathcal H}_0\Lambda^2(\R^n)\] Since $\Phi(\kappa\omega)$ is anti-symmetric by (\ref{eq:phi-entries}), we have that \textbf{e}gin{align*} \sum_{i,j}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) & \lnl ij \\ & = \sum_{i<j}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) \lnl ij + \sum_{j<i}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) \lnl ij \\ & = \sum_{i<j}\left(\Phi(\kappa\omega)\textbf{v}_i)\cdot \textbf{v}_j\right) \mathcal{W}_{ij}. \end{align*} We have shown that any vector proxy of an element of ${\mathcal P}_0\Lambda^1(\mathfrak m)$ or $\kappa{\mathcal H}_{0}\Lambda^{2}(\mathfrak m)$ can be written as a linear combination of $\mathcal{W}_{ij}$ functions. By (\ref{eq:prminus-decomp}), we conclude that the span of the $\mathcal{W}_{ij}$ functions contains the vector proxies for all elements of ${\mathcal P}_1^-\Lambda^1(\mathfrak m)$. \end{proof} \text{}\\ \textbf{e}gin{cor} \label{cor:pr-rotWhit} Let $\mathfrak m$ be a convex polygon in $\R^2$ with vertex set \textnormal{$\{\textbf{v}_i\}$}. Given any set of generalized barycentric coordinates $\{\lambda_i\}$ associated to $\mathfrak m$, \textbf{e}gin{equation} \label{eq:rotWhit-id} \textnormal{$ \sum_{i<j}\textnormal{rot}~\mathcal{W}_{ij}~\textnormal{rot}(\textbf{v}_j-\textbf{v}_i)^T=\mathbb I, $} \end{equation} where $\mathbb I$ is the $2\times 2$ identity matrix. Further, \textbf{e}gin{equation} \label{eq:rotWhit-x} \textnormal{$ \sum_{i<j}\left((\textnormal{rot}~\textbf{v}_i)\cdot \textbf{v}_j\right) \textnormal{rot}~\mathcal{W}_{ij} = \textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\textnormal{rot}\;\mathcal{W}_{ij}\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1^-\Lambda^1(\mathfrak m).$ \end{cor} \textbf{e}gin{proof} By the same argument as the proof of (\ref{eq:lrnl-id}) in Corollary~\ref{cor:pr-lrnl}, the identity (\ref{eq:rotWhit-id}) follows immediately from (\ref{eq:whit1-id}). For (\ref{eq:rotWhit-x}), observe that setting $\omega:=1\in {\mathcal H}_0\Lambda^2(\R^2)$, we have that $\Phi(\kappa\omega)=\textnormal{rot}$. Therefore, (\ref{eq:whit1-x}) implies that \[\sum_{i<j}\left(\textnormal{rot}~\textbf{v}_i)\cdot \textbf{v}_j\right) \mathcal{W}_{ij} = \textnormal{rot}~\textbf{x}.\] Applying $\textnormal{rot}$ to both sides of the above equation completes the proof. \end{proof} \text{}\\ \textbf{e}gin{theorem} \label{thm:pr-whit-ijk} Let $\mathfrak m$ be a convex polyhedron in $\R^3$ with vertex set \textnormal{$\{\textbf{v}_i\}$} and an associated set of generalized barycentric coordinates $\{\lambda_i\}$. Then \textbf{e}gin{equation} \label{eq:whit3-id} \textnormal{$ \sum_{i<j<k}\mathcal{W}_{ijk} \left((\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i)\right)^T = \mathbb I, $} \end{equation} and \textbf{e}gin{equation} \label{eq:whit3-x} \textnormal{$ \sum_{i<j<k}(\textbf{v}_i\cdot(\textbf{v}_j\times\textbf{v}_k))\mathcal{W}_{ijk}=\textbf{x}. $} \end{equation} Thus, $\displaystyle\textnormal{span}\left\{\mathcal{W}_{ijk}\;:\;\textnormal{$\textbf{v}_i,\textbf{v}_j,\textbf{v}_k\in\mathfrak m$}\right\}\supseteq {\mathcal P}_1^-\Lambda^2(\mathfrak m).$ \end{theorem} \textbf{e}gin{proof} We adopt the shorthand notations \[\xi_{ijk} := \lambda_i\nabla\lambda_j\times\nabla\lambda_k, \quad \textbf{z}_{ijk}:= (\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i), \quad \textbf{v}_{ijk}:= \textbf{v}_i\cdot(\textbf{v}_j\times\textbf{v}_k).\] For (\ref{eq:whit3-id}), we re-write (\ref{eq:lnlxnl-id}) as \[ \sum_{i,j,k}\xi_{ijk}{\textbf{z}_{ijk}}^T= 2\mathbb I. \] Observe that $\xi_{ijk}{\textbf{z}_{ijk}}^T=(-\xi_{ikj})(-\textbf{z}_{ikj})^T=\xi_{ikj}{\textbf{z}_{ikj}}^T$ and $\textbf{z}_{ijk}=0$ if $i$, $j$, $k$ are not distinct. Thus, \textbf{e}gin{align*} 2\mathbb I & = \sum_{\substack{i<j<k \\ k<i<j \\ j<k<i}}\xi_{ijk}{\textbf{z}_{ijk}}^T + \sum_{\substack{i<k<j \\ k<j<i \\ j<i<k}}\xi_{ikj}{\textbf{z}_{ikj}}^T. \end{align*} The two summations have different labels for the indices but are otherwise identical. Therefore, \textbf{e}gin{align*} \mathbb I & = \sum_{i<j<k}\xi_{ijk}{\textbf{z}_{ijk}}^T + \sum_{k<i<j}\xi_{ijk}{\textbf{z}_{ijk}}^T + \sum_{j<k<i}\xi_{ijk}{\textbf{z}_{ijk}}^T \\ & = \sum_{i<j<k}\xi_{ijk}{\textbf{z}_{ijk}}^T + \xi_{jki}{\textbf{z}_{jki}}^T + \xi_{kij}{\textbf{z}_{kij}}^T \\ & = \sum_{i<j<k}(\xi_{ijk}+\xi_{jki}+\xi_{kij}){\textbf{z}_{ijk}}^T \\ & = \sum_{i<j<k}\mathcal{W}_{ijk}\left((\textbf{v}_j-\textbf{v}_i)\times(\textbf{v}_k-\textbf{v}_i)\right)^T. \end{align*} For (\ref{eq:whit3-x}), we take $\A$ as the identity, and re-write (\ref{eq:lnlxnl-Ax}) as \[ \sum_{i,j,k}\textbf{v}_{ijk}\xi_{ijk} =2\textbf{x}. \] Observe that $\textbf{v}_{ijk}\xi_{ijk}=(-\textbf{v}_{ikj})(-\xi_{ikj})=\textbf{v}_{ikj}\xi_{ikj}$ and $\textbf{v}_{ijk}=0$ if $i$, $j$, $k$ are not distinct. Thus, \textbf{e}gin{align*} 2\textbf{x} & = \sum_{\substack{i<j<k \\ k<i<j \\ j<k<i}}\textbf{v}_{ijk}\xi_{ijk} + \sum_{\substack{i<k<j \\ k<j<i \\ j<i<k}}\textbf{v}_{ikj}\xi_{ikj} . \end{align*} The rest of the argument follows similarly, yielding \textbf{e}gin{align*} \textbf{x} & = \sum_{i<j<k}\textbf{v}_{ijk}\xi_{ijk} + \sum_{k<i<j}\textbf{v}_{ijk}\xi_{ijk} + \sum_{j<k<i}\textbf{v}_{ijk}\xi_{ijk} \\ & = \sum_{i<j<k}(\textbf{v}_i\cdot(\textbf{v}_j\times\textbf{v}_k))\mathcal{W}_{ijk}. \end{align*} Note that ${\mathcal H}_{0}\Lambda^{3}(\mathfrak m)$ is generated by the volume form $\eta=dxdydz$ and that $\kappa\eta$ has vector proxy $\textbf{x}$. Thus, by (\ref{eq:prminus-decomp}), we have shown that the span of the $\mathcal{W}_{ijk}$ functions contains the vector proxy of any element of ${\mathcal P}_1^-\Lambda^2(\mathfrak m)$. \end{proof} \text{}\\ \textbf{e}gin{remark} \label{rmk:topdim} \em There are some additional constructions in this same vein that could be considered. On a polygon in $\R^2$, we can define $\mathcal{W}_{ijk}$ in the same way as (\ref{eq:whit-tri-def}), interpreting $\times$ as the two dimensional cross product. Likewise, on a polyhedron in $\R^3$, we can define $\mathcal{W}_{ijk\ell}$ according to formula (\ref{eq:whit-def}), yielding functions that are summations of terms like $\lambda_i(\nabla\lambda_j\cdot(\nlxnl k\ell)$. These constructions will yield the expected polynomial reproduction results, yet they are not of practical interest in finite element contexts, as we will see in the next section. \end{remark} \section{Polygonal and Polyhedral Finite Element Families} \label{sec:polyg-fams} Let $\mesh$ be a mesh of convex $n$-dimensional polytopes $\{\mathfrak m\}$ in $\R^n$ with $n=2$ or $3$ and assign some ordering $\textnormal{$\textbf{v}_1,\ldots,\textbf{v}_p$}$ to all the vertices in the mesh. Fix an associated set of generalized barycentric hat functions $\textnormal{$\hat\lambda_1,\ldots,\hat\lambda_p$}$ as in Section~\ref{sec:global-cnty}. In Table~\ref{tab:comp-bases}, we list all the types of scalar-valued and vector-valued functions that we have defined this setting. When used over all elements in a mesh of polygons or polyhedra, these functions have global continuity and polynomial reproduction properties as indicated in Table~\ref{tab:results-summary}. These two properties -- global continuity and polynomial reproduction -- are essential and intertwined necessities in the construction of $H\Lambda^k$-conforming finite element methods on \textit{any} type of domain mesh. Global continuity of type $H\Lambda^k$ ensures that the piecewise-defined approximate solution is an element of the function space $H\Lambda^k$ in which a solution is sought. Polynomial reproduction of type ${\mathcal P}_1\Lambda^k$ or ${\mathcal P}_1^-\Lambda^k$ ensures that the error between the true solution and the approximate solution decays linearly with respect to the maximum diameter of a mesh element, as measured in $H\Lambda^k$ norm. On meshes of simplicial elements, the basis functions listed in Table~\ref{tab:comp-bases} are known and often used as local bases for the corresponding classical finite element spaces listed in Table~\ref{tab:fe-spaces}, meaning our approach recapitulates known methods on simplicial meshes. \textbf{e}gin{table}[t] \centering \textbf{e}gin{tabular}{c|c|c|c|c|c} n & k & space & \# construction & \# boundary & \# polynomial \\ \hline &&&&&\\[-2mm] 2 & 0 & ${\mathcal P}_1 \Lambda^0(\mathfrak m)$ & $v$ & $v$ & $3$\\ & & ${\mathcal P}_1^- \Lambda^0(\mathfrak m)$ & $v$ & $v$ & $3$\\ &&&&&\\[-2mm] & 1 & ${\mathcal P}_1 \Lambda^1(\mathfrak m)$ & $\displaystyle 2{v \choose 2}$ & $2e$ & $6$\\ &&&&&\\[-2mm] & & ${\mathcal P}_1^- \Lambda^1(\mathfrak m)$ & $\displaystyle {v \choose 2}$ & $e$ & $3$\\ &&&&&\\[-2mm] & 2 & ${\mathcal P}_1 \Lambda^2(\mathfrak m)$ & $\displaystyle 3{v \choose 3}$ & $0$ & $3$\\ &&&&&\\[-2mm] & & ${\mathcal P}_1^- \Lambda^2(\mathfrak m)$ & $\displaystyle {v \choose 3}$ & $0$ & $1$\\ [3mm] \hline \end{tabular}\\ \caption{Dimension counts relevant to serendipity-style reductions in basis size are shown. Here, $v$ and $e$ denote the number of vertices and edges in the polygonal element $\mathfrak m$. The column `\# construction' gives the number of basis functions we define (cf.\ Table~\ref{tab:comp-bases}), `\# boundary' gives the number of basis functions related to inter-element continuity, and `\# polynomial' gives the dimension of the contained space of polynomial differential forms.} \label{tab:sizes-2d} \end{table} \partialaragraph*{\underline{Relation to quadrilateral and serendipity elements}} Consider the scalar bi-quadratic element on \textit{rectangles}, which has nine degrees of freedom: one associated to each vertex, one to each edge midpoint, and one to the center of the square. It has long been known that the `serendipity' element, which has only the eight degrees of freedom associated to the vertices and edge midpoints of the rectangle, is also an $H^1$-conforming, quadratic order method. In this case, polynomial reproduction requires the containment of ${\mathcal P}_2\Lambda^0(\mathfrak m)$ in the span of the basis functions, meaning at least six functions are required per element $\mathfrak m\in\mesh$. To ensure global continuity of $H^1$, however, the method must agree `up to quadratics' on each edge, which necessitates the eight degrees of freedom associated to the boundary. Therefore, the serendipity space associated to the scalar bi-quadratic element on a rectangle has dimension eight. In a previous paper~\cite{RGB2011a}, we generalized this `serendipity' reduction to ${\mathcal P}_2\Lambda^0(\mesh)$ where $\mesh$ is a mesh of strictly convex polygons in $\R^2$. For a simple polygon with $n$ vertices (and thus $n$ edges), polynomial reproduction still only requires $6$ basis functions, while global continuity of $H^1$ still requires reproduction of quadratics on edges, leading to a total of $2n$ basis functions required per element $\mathfrak m\in\mesh$. Given a convex polygon, our approach takes the $n+{n \choose 2}$ pairwise products of all the $\lambda_i$ functions and forms explicit linear combinations to yield a set of $2n$ basis functions with the required global $H^1$ continuity and polynomial reproduction properties. \partialaragraph*{\underline{Reduction of basis size}} A similar reduction procedure can be applied to the polygonal and polyhedral spaces described in Table~\ref{tab:comp-bases}. A key observation is that the continuity results of Theorems \ref{thm:hcurl-conf} and \ref{thm:hdiv-conf} only rely on the agreement of basis functions whose indices are of vertices on a shared boundary edge (in 2D) or face (in 3D). For example, if vertices $\textbf{v}_i$ and $\textbf{v}_j$ form the edge of a polygon in a 2D mesh, $H(\textnormal{curl\,},\mesh)$ continuity across the edge comes from identical tangential contributions in the $\lnl ij$ and $\lnl ji$ functions from either element containing this edge and zero tangential contributions from all other basis functions. Thus, basis functions whose indices do not belong to a single polygon edge (in 2D) or polyhedral face (in 3D) do not contribute to inter-element continuity, allowing the basis size to be reduced. \textbf{e}gin{table}[ht] \centering \textbf{e}gin{tabular}{c|c|c|c|c|c} n & k & space & \# construction & \# boundary & \# polynomial \\ \hline &&&&&\\[-2mm] 3 & 0 & ${\mathcal P}_1 \Lambda^0(\mathfrak m)$ & $v$ & $v$ & $4$\\ & & ${\mathcal P}_1^- \Lambda^0(\mathfrak m)$ & $v$ & $v$ & $4$\\ & 1 & ${\mathcal P}_1 \Lambda^1(\mathfrak m)$ & $\displaystyle 2 {v \choose 2}$ & $\displaystyle \left(\sum_{a=1}^f v_a(v_a-1)\right) - 2e$ & $12$\\ & & ${\mathcal P}_1^- \Lambda^1(\mathfrak m)$ & $\displaystyle {v \choose 2}$ & $\displaystyle \left(\sum_{a=1}^f {v_a\choose 2}\right) - e$ & $6$\\ &&&&&\\[-3mm] & 2 & ${\mathcal P}_1 \Lambda^2(\mathfrak m)$ & $\displaystyle 3{v \choose 3}$ & $\displaystyle \sum_{a=1}^f \overline{\textbf{f}}rac{v_a(v_a-1)(v_a-2)}{2}$ & $12$\\ &&&&&\\[-3mm] & & ${\mathcal P}_1^- \Lambda^2(\mathfrak m)$ & $\displaystyle {v \choose 3}$ & $\displaystyle \sum_{a=1}^f {v \choose 3}$ & $4$\\ & 3 & ${\mathcal P}_1 \Lambda^3(\mathfrak m)$ & $\displaystyle 4{v \choose 4}$ & $0$ & $4$\\ &&&&&\\[-2mm] & & ${\mathcal P}_1^- \Lambda^3(\mathfrak m)$ & $\displaystyle {v \choose 4}$ & $0$ & $1$\\[3mm] \hline \end{tabular}\\ \caption{The $n=3$ version of Table~\ref{tab:sizes-2d}. Here, $f$ denotes the number of faces on a polyhedral element $\mathfrak m$ and $v_a$ denotes the number of vertices on a particular face $\mathfrak f_a$. The entries of the `\# boundary' column are determined by counting functions associated to each face of the polyhedron and, in the $k=1$ cases, accounting for double-counting by subtraction. } \label{tab:sizes-3d} \end{table} To quantify the extent to which the bases we have defined could be reduced without affecting the global continuity properties, we count the number of functions associated with codimension 1 faces for each space considered. For a polygon in 2D, the results are summarized in Table~\ref{tab:sizes-2d}. The $k=0$ case is optimal in the sense that every basis function $\lambda_i$ contributes to the $H^1$-continuity in some way, meaning no basis reduction is available. In the $k=1$ cases, the number of basis functions we construct is quadratic in the number of vertices, $v$, of the polygon, but the number associated with the boundary is only linear in the number of edges, $e$. Since $e=v$ for a simple polygon, this suggests a basis reduction procedure would be both relevant and useful; the description of such a reduction will be the focus of a future work. In the $k=2$ cases, our procedure constructs $O(v^3)$ basis functions but no inter-element continuity is required; in these cases, a discontinuous Galerkin or other type of finite element method would be more practical. For a polyhedron $\mathfrak m$ in 3D, the results are summarized in Table~\ref{tab:sizes-3d}. As in 2D, the basis for the $k=0$ case cannot be reduced while the bases for the $k=n$ cases would not be practical for implementation since no inter-element continuity is required. In the $k=1$ cases, the number of basis functions we construct is again quadratic in $v$, while the number of basis functions required for continuity can be reduced for non-simplicial polyhedra. For instance, if $\mathfrak m$ is a hexahedron, our construction for ${\mathcal P}_1\Lambda^1$ gives 56 functions but only 48 are relevant to continuity; in the ${\mathcal P}_1^-\Lambda^1$ case, we construct 28 functions but only 20 are relevant to continuity. In the $k=2$ cases, a similar reduction is possible for non-simplicial polyhedra. Again in the case of a hexahedron, we construct 168 functions for ${\mathcal P}_1\Lambda^1$ and 56 functions for ${\mathcal P}_1^-\Lambda^1$, but the elements require only 72 and 24 functions, respectively, for inter-element continuity. \partialaragraph*{\underline{Current and future directions}} It remains to discover additional properties of Whitney-like basis functions built from generalized barycentric coordinates and their use in finite element methods. In the time since this manuscript first appeared online, Chen and Wang~\cite{CW2015} have presented an approach for constructing `minimal dimension' local basis sets based on the results of this paper. Their theoretical and numerical results indicate that minimal spaces can, indeed, be constructed using the methods presented here with expected rates of convergence on certain classes of polygons and polyhedra. We expect that the ideas introduced here will continue to influence the rapidly expanding use of polytopal finite element methods in scientific and engineering applications.\\ \noindent \textbf{Acknowledgements.} The authors would like to thank the anonymous referees for their helpful suggestions to improve the paper. AG was supported in part by NSF Award 1522289 and a J.\ Tinsley Oden Fellowship. CB was supported by was supported in part by a grant from NIH (R01-GM117594) and contract (BD-4485) from Sandia National Labs. Sean Stephens helped produce the figure. \end{document}
\begin{equation}gin{document} \title{Eventual regularization of the slightly supercritical fractional Burgers equation} \begin{equation}gin{abstract} We prove that a weak solution of a slightly supercritical fractional Burgers equation becomes H\"older continuous for large time. \end{abstract} \section{Introduction} We consider the fractional Burgers equation \begin{equation}gin{equation} \label{e:burgers} \theta_t + \theta \cdot \theta_x + (-\lap)^s \theta = 0. \end{equation} It is well known that solutions $\theta$ of the subcritical ($s>1/2$) and critical ($s=1/2$) Burgers equation are smooth \cite{kiselev-blow}, \cite{DongDuLi}, \cite{chan2008regularity}. There are parallel results for the quasi-geostrophic equation. In the subcritical case, the solutions are smooth \cite{MR1709781}. In the critical case the solutions are also smooth, which was proved independently by Kiselev, Nazarov and Volberg \cite{kiselev2007global} and Caffarelli and Vasseur \cite{caffarelli2006drift} using different methods. The proof by Kiselev, Nazarov and Volberg is based on their previous work on the Burgers equation and consists of showing that certain modulus of continuity (that is essentially Lipschitz for nearby points) is preserved by the flow. The proof by Caffarelli and Vasseur is more involved and consists in proving a H\"older continuity result using classical ideas of De Giorgi. The two different methods were also used in the context of the critical Burgers equation. The method of modulus of continuity was used in \cite{kiselev-blow} to show smoothness of solutions in the periodic setting. On the other hand, the parabolic De Giorgi method developed in \cite{caffarelli2006drift} was used in \cite{chan2008regularity} to show smoothness of solutions in the non-periodic setting. For the case of the supercritical quasi-geostrophic equation, it was shown that the solutions are smooth for large time if $s = 1/2 - \varepsilon$ for a small $\varepsilon$ \cite{silvestre2008eventual} extending the methods of Caffarelli and Vasseur. More precisely the idea is to use the \emph{extra room} in the improvement of oscillation lemma to compensate for the bad scaling. In this article, we prove that the solutions of a slightly supercritical fractional Burger's equation become regular for large time. It is a similar result to the one shown in \cite{silvestre2008eventual} for the quasi-geostrophic equation. It is important to point out that in \cite{kiselev-blow},\cite{ADV},\cite{DongDuLi} it was shown that singularities indeed occur for any $s < 1/2$. What we show here is that they disappear after a certain amount of time. Even though singularities may (and sometimes do) appear during an interval of time $[0,T]$, for $t>T$ they do not occur any more. The amount of time $T$ that we need to wait depends on the initial data and the value of $s$. For any given initial data, $T \to 0$ as $s \to 1/2$. The essential idea of the proof is to combine the ideas from \cite{chan2008regularity} and \cite{silvestre2008eventual}. On the other hand, we can present a completely self contained proof which has been simplified considerably. The idea in the proofs in this paper is still to make the improvement of oscillation in parabolic cylinders compete with the deterioration of the equation due to scaling. The improvement of oscillation lemma is the lemma which allows us to show H\"older continuity when we iterate it at different scales (as in the classical methods of De Giorgi). We present a simple and completely self contained proof of this crucial lemma in this paper (section \ref{s:oscillation}). An alternative approach could be to redo the proof in \cite{chan2008regularity} adapted to general powers of the Laplacian using the extension in \cite{caffarelli2007extension}. We find a few advantages in the choice of presenting this new proof of the oscillation lemma in this article. One is that it makes the paper self contained. It also provides a proof that does not use the extension argument and thus it could be generalized to other integral operators instead of the fractional Laplacian. The new proof is essentially a parabolic adaptation of the ideas in \cite{silvestre2006holder}. This proof uses strongly that the equation is non-local. This idea is also used in \cite{hj-new} to obtain a H\"older estimate for critical advection diffusion equations for bounded flows that are not necessarily divergence free. We now state the main result. \begin{equation}gin{thm}\label{mainthm} There exists a universal constant $\alpha\in (0,\frac 12)$ such that if $\theta$ is a solution of \eqref{e:burgers} in $\mathbb R \times [0,+\infty]$ with $\frac{1-\alpha}{2}<s\leq \frac 12$ and initial data $\theta_{0}\in L^{2}$, then there exists $T^{\ast}>0$ such that when $t>T^{\ast}$, $\theta(t)$ is $C^{\alpha}$ ($T^*$ depending only on $\norm{\theta_0}_{L^2}$). \end{thm} \begin{equation}gin{remark} We note that we believe this could be extended to data in any $L^{p}, 1\leq p < \infty$, but for simplicity we do not pursue this here. \end{remark} \noindent Notation: \\ $Q_r = [-r,r] \times [-r^{2s},0]$.\\ $\osc_{Q_r} \theta = \sup_{Q_r} \theta - \inf_{Q_r} \theta$. \section{Preliminaries} \subsection{The notion of a solution and vanishing viscosity approximation} By a \emph{solution} of \eqref{e:burgers} we mean a weak solution (a solution in the sense of distributions) that can be obtained through the vanishing viscosity method. In other words it is a limit as $\varepsilon_1 \rightarrow 0$ of solutions satisfying \begin{equation}gin{equation} \label{e:burger-approx} \begin{equation}gin{aligned} \theta_t + \theta \cdot \theta_x + (-\lap)^s \theta - \varepsilon_1 \lap \theta &= 0,\\ \theta(\cdot,0)&=\theta_0 \in L^2(\mathbb R), \end{aligned} \end{equation} where $\theta_0$ is an initial data for \eqref{e:burgers}. For every $\varepsilon_1>0$ and $\theta_0 \in L^2$, the equation \eqref{e:burger-approx} has a solution $\theta$ which is $C^\infty$ for all $t>0$. We list the properties of such solution in the next elementary lemma. \begin{equation}gin{lemma}\label{l:properties-approx} For every $\varepsilon_1>0$ and $\theta_0 \in L^2$, the equation \eqref{e:burger-approx} is well posed and its solution $\theta$ satisfies \begin{equation}gin{enumerate} \item $\theta(\cdot, t) \in C^{\infty}$ for every $t>0$. \item Energy equality: \[\norm{\theta(\cdot, t)}_{L^2(\mathbb R)}^2 + \int_0^t \norm{\theta(\cdot, t)}_{\dot H^s(\mathbb R)}^2 + \varepsilon_1 \norm{\theta(\cdot, t)}_{\dot H^1(\mathbb R)}^2 \dd t = \norm{\theta_0}_{L^2(\mathbb R)}^2.\] where $\dot H^s$ stands for the homogeneous Sobolev space. \item For every $t>0$, $\theta(x,t) \to 0$ as $x \to \pm \infty$. \end{enumerate} \end{lemma} \begin{equation}gin{proof} We consider the operator that maps $\theta$ to the solution of \[ \tilde \theta_t + (-\lap)^s \tilde \theta - \varepsilon_1 \lap \tilde \theta =- \theta \ \theta_x. \] Then we see that the map $A : \theta \mapsto \tilde \theta$ is a contraction in the norm \[ |||\theta||| = \sup_{[0,T] } \norm{\theta(\cdot,t)}_{L^2} + t^{1/2} \norm{\partial_x \theta(\cdot,t)}_{L^2} \] To see that we note \[ |||e^{-t((-\lap)^{s}-\varepsilon_{1}\lap)}\theta_{0}|||\leq C\norm{\theta_{0}}_{L^{2}}. \] (This is an elementary computation using Fourier transform). Given $\theta_1$ and $\theta_2$ such that $||| \theta_i ||| \leq R$ for $i = 1,2$, we estimate $|||A\theta_1 - A \theta_2|||$ using Duhamel formula. On one hand we have \begin{equation}gin{align*} || A \theta_1(\cdot,t) - A \theta_2(\cdot,t) ||_{L^2} &\leq C \int_0^t \norm{\theta_1(\cdot,r) \partial_x \theta_1(\cdot,r) - \theta_2(\cdot,r) \partial_x \theta_2(\cdot,r)}_{L^2} \dd r \\ &\leq C \int_0^t \norm{\theta_1-\theta_2}_{L^\infty} \norm{\partial_x \theta_1}_{L^2} + \norm{\theta_2}_{L^\infty} \norm{\partial_x \theta_1 - \partial_x \theta_2}_{L^2} \dd r \\ \intertext{Using the interpolation inequality: $||f||_{L^\infty} \leq ||f||_{L^2}^{1/2} ||f'||_{L^2}^{1/2}$,} &\leq C R \ |||\theta_1 - \theta_2||| \int_0^t (t-r)^{-1/4} \dd r \leq C R |||\theta_1 - \theta_2||| t^{3/4}. \end{align*} On the other hand, we also estimate \begin{equation}gin{align*} t^{1/2} || \partial_x A\theta_1(\cdot,t) - \partial_x A \theta_2(\cdot,t) ||_{L^2} &\leq C t^{1/2} \int_0^t (t-r)^{-{1/2}} \norm{\theta_1(\cdot,r) \partial_x \theta_1(\cdot,r) - \theta_2(\cdot,r) \partial_x \theta_2(\cdot,r)}_{L^2} \dd r \\ &\leq C R t^{1/2} \ |||\theta_1 - \theta_2||| \int_0^t (t-r)^{-3/4} \dd r \leq C R |||\theta_1 - \theta_2||| t^{3/4} \end{align*} Thus, if we choose $T$ small enough (depending on $R$), $A$ will be a contraction in the ball of radius $R$ with respect to the norm $||| \cdot |||$. Therefore, the equation \eqref{e:burger-approx} has a unique solution locally in time for which the norm $|||\cdot|||$ is bounded. A standard bootstrap argument proves that moreover $|||\partial_x^k \theta|||_{L^2} \leq C t^{-k/2}$ for all $k \geq 0$. This proves 1. and 3. for short time. The energy equality 2. follows immediately by multiplying equation \eqref{e:burger-approx} by $\theta$ and integrating by parts. Since the $L^2$ norm of the solution is non increasing, the solution can be continued forever, thus 1. and 3. hold for all time. \end{proof} If we let $\varepsilon_1 \to 0$, the energy estimate allows us to obtain a subsequence of solutions of the approximated problem that converges weakly in $L^\infty(L^2) \cap L^2(\dot H^s)$ to a weak solution for which the energy inequality holds. In a later section, we will also prove a bound of the $L^\infty$ norm of $\theta(\cdot, t)$ for $t>0$, that is also independent of $\varepsilon_1$, thus we can also find a subsequence that converges weak-$\ast$ in $L^{\infty}((t,+\infty) \times \mathbb R)$ for every $t>0$. \subsection{A word about scaling} There is a one-parameter group of scalings that keeps the equation invariant. It is given by $\theta_r = r^{2s-1}\theta(rx, r^{2s}t )$. If $\theta$ solves \eqref{e:burgers}, then so does $\theta_r$. In the critical case $s=1/2$, the scaling of the equation keeps the $L^\infty$ norm fixed. This case is critical because the scaling coincides with the a priori estimate given by the maximum principle. We can consider a one parameter scaling that preserves H\"older spaces. The function $\theta_r = r^{-\alpha}\theta(rx, r^{2s}t)$ has the same $C^\alpha$ semi-norm as $\theta$. If we want to prove that $\theta \in C^\alpha$, we will have to deal with this type of scaling, but in this case the equation is not conserved. Instead, if $\theta$ satisfies \eqref{e:burgers}, $\theta_r$ satisfies \[ \partial_t \theta_r + r^{2s - 1 + \alpha} \theta_r \cdot \partial_x \theta_r + (-\lap)^s \theta_r = 0. \] We have an extra factor in front of the nonlinear term. Note that if $\alpha > 1-2s$ (only slightly supercritical) and $r < 1$ (zoom in), this factor is smaller than one. In the case of the equation with the extra term $\varepsilon_1 \lap \theta$, the viscosity will have a larger effect in smaller scales. Indeed, if $\theta$ satisfies \eqref{e:burger-approx}, $\theta_r$ satisfies \[ \partial_t \theta_r + r^{2s - 1 + \alpha} \theta_r \cdot \partial_x \theta_r + (-\lap)^s \theta_r + r^{2s-2} \varepsilon_1 \lap \theta_r= 0. \] \section{$L^\infty$ Decay} First, as an immediate consequence of the energy equality in Lemma \ref{l:properties-approx} we have the following lemma. \begin{equation}gin{lemma}\label{l:l2} If $\theta$ is a solution of \eqref{e:burgers}, then \[ \norm{\theta(t)}_{L^2(\mathbb R)}\leq \norm{\theta_0}_{L^2{(\mathbb R)}}. \] \end{lemma} Nonincreasing properties of $L^p$ norms as above for general $1<p\leq \infty$ for the quasi-geostrophic equations were showed in \cite{Resnickthesis},\cite{CordobaDouble}. Now we have a theorem about the decay of the $L^\infty$ norm. See also \cite{kiselev-blow},\cite{caffarelli2006drift},\cite{chan2008regularity},\cite{silvestre2008eventual}. \begin{equation}gin{thm}\label{thmdecay} If $\theta$ is a solution of \eqref{e:burgers}, then \begin{equation}gin{align} \sup_{x\in \mathbb R}\abs{\theta(x,t)}\leq C(s) t^{-\frac{1}{4s}}\norm{\theta_0}_{L^2(\mathbb R)}, \end{align} where $C(s)=\frac{2s}{C_s^{1/4s}}\sqrt{\tfrac{2}{1+4s}}$, and $C_s$ is the constant appearing the integral formulation of the fractional Laplacian below. \end{thm} \begin{equation}gin{proof} Let $T>0$ and suppose $\theta$ is a solution of \eqref{e:burger-approx}. Define \[ F(x,t)=t^\frac{1}{p}\theta(x,t), \] for some $p$ to be chosen later. By Lemma \ref{l:properties-approx} there must exist a point $(x_0,t_0)$ such that \begin{equation}gin{align*} \sup_{\mathbb R\times[0,T]}F(x,t)=F(x_0,t_0)<\infty. \end{align*} Observe that $F$ satisfies the following equation \begin{equation}\label{Feq} F_t-\varepsilonilon \lap F+(-\lap)^sF=\frac{1}{pt}F-\frac{1}{t^\frac1p}F\cdot F_x. \end{equation} At $(x_0,t_0)$ we have \begin{equation}gin{align*} F_t\geq 0,\quad F_x=0,\quad-\lap F\geq 0. \end{align*} Then by \eqref{Feq} \begin{equation}\label{Feq1} (-\lap)^s F(x_0,t_0)\leq \frac{1}{pt_0}F(x_0,t_0). \end{equation} Using $F(x_0,t_0)-F(y,t_0)\geq 0$ for all $y\in \mathbb R$, we compute a lower bound for $(-\lap)^sF(x_0,t_0)$ as follows \begin{equation}gin{align} (-\lap)^sF(x_0,t_0)&=C_s \int_\mathbb R \frac{F(x_0,t_0)-F(y,t_0)}{\abs{x_0-y}^{1+2s}}dy\nonumber\\ &\geq C_s \int_{\abs{x_0-y}>R} \frac{F(x_0,t_0)-F(y,t_0)}{\abs{x_0-y}^{1+2s}}dy,\quad\mbox{for any $R>0$}\nonumber\\ &= \frac{C_s}{sR^{2s}}F(x_0,t_0)- C_s\int_{\abs{x_0-y}>R} \frac{F(y,t_0)}{\abs{x_0-y}^{1+2s}}dy\label{Feq2}. \end{align} Next by Cauchy Schwarz \begin{equation}gin{align} \int_{\abs{x_0-y}>R} \frac{F(y,t_0)}{\abs{x_0-y}^{1+2s}}dy&\leq \frac{\tilde {C_s}}{R^{1/2+2s}}\norm{F(t_0)}_{L^2(\mathbb R)}\nonumber\\ &=\frac{\tilde {C_s}t_0^\frac{1}{p}}{R^{1/2+2s}}\norm{\theta(t_0)}_{L^2(\mathbb R)}\leq\frac{\tilde {C_s}t_0^\frac{1}{p}}{R^{1/2+2s}}\norm{\theta_0}_{L^2(\mathbb R)},\label{Feq3} \end{align} where the last inequality follows from Lemma \ref{l:l2} and $\tilde {C_s}=(\tfrac{2}{1+4s})^\frac 12.$ Combine \eqref{Feq1}-\eqref{Feq3} to obtain \[ \frac{1}{pt_0}F(x_0,t_0)\geq C_s(\frac{1}{sR^{2s}}F(x_0,t_0)-\frac{\tilde{C_s}t_0^\frac{1}{p} }{R^{1/2+2s}}\norm{\theta_0}_{L^2(\mathbb R)}), \] or equivalently \[ (\frac{C_s}{sR^{2s}}-\frac{1}{pt_0})F(x_0,t_0)\leq \frac{\tilde{C_s}C_st_0^\frac1p}{R^{1/2+2s}}\norm{\theta_0}_{L^2(\mathbb R)}. \] Let $p=4s$, and choose $R$ so that $\frac{C_s}{R^{2s}}=\frac{1}{2t_0}$. Rearranging we have \[ F(x_0,t_0)\leq C(s)\norm{\theta_0}_{L^2(\mathbb R)}, \] with $C(s)$ as in the statement of the theorem. Finally, from the definition of $F$ \[ \sup_{\mathbb R\times[0,T]}t^\frac{1}{4s}\theta(x,t)\leq C(s)\norm{\theta_0}_{L^2(\mathbb R)}, \] or \[ \sup_{\mathbb R\times[0,T]}\theta(x,t)\leq t^{-\frac{1}{4s}}C(s)\norm{\theta_0}_{L^2(\mathbb R)}, \] and since the estimate is independent of $\varepsilonilon_1$ and $T$ is arbitrary, the theorem follows (note this gives an upper bound for $\theta$. To obtain a lower bound we can redo the proof with $F$ defined by $-t^\frac{1}{p}\theta(x,t)$.). \end{proof} \begin{equation}gin{remark} Note that an estimate like \eqref{Feq3} could be obtained using any $L^p$ norm instead of $L^2$. We chose to use $L^2$ because it is the norm that is easiest to show that it stays bounded (using the energy inequality). \end{remark} \section{The oscillation lemma} \label{s:oscillation} \begin{equation}gin{lemma} \label{l:measureToPointEstimate} Let $M_{0} \geqslant 2$ and $s \in [\frac{1}{4} , \frac{1}{2}]$. Assume $\theta\leq 1$ in $\mathbb{R} \times [-\frac{2}{M_{0}} , 0]$ and $\theta$ is a subsolution of \[ \theta_{t} + M \theta \cdot \theta_{x} + (-\triangle )^{s} \theta - \varepsilon_{1} \triangle \theta \leq \varepsilon_{0} , \] in the set $[-5, 5 ] \times [-\frac{2}{M_{0}} , 0] $ where $|M| \leq M_{0}$ and $0 < \varepsilon_{1} \leq 10^{3/2}$. Assume also that \[ |\{\theta \leq 0\}\cap ( [-1,1] \times [-\frac{2}{M_{0}} , -\frac{1}{M_{0}}]) | \geq \mu. \] Then, if $\varepsilon_0$ is small enough (depending only on $\mu$ and $M_0$) there is a $\lambda>0$ (depending only on $\mu$ and $M_0$) such that $\theta \leq 1-\lambda$ in $[-1,1] \times [-\frac{1}{M_{0}} , 0]$. \end{lemma} We will apply the lemma above only to the case when $M$ is constant in $Q_1$. This is not necessary to prove the lemma as it will be apparent in the proof. We are not aware of any possible application of the lemma with variable $M$ (even discontinuous). \begin{equation}gin{proof} Let $m: [-\frac{2}{M_{0}} , 0] \to \mathbb R$ be the solution of the following ODE: \begin{equation}gin{equation} \label{e:ODEform} \begin{equation}gin{aligned} m(- \frac{2}{M_{0}} ) &= 0, \\ m'(t) &= c_0 | \{x \in [-1,1]: \theta(x,t) \leq 0\}| - C_1 m(t). \end{aligned} \end{equation} The above ODE can be solved explicitly and $m(t)$ has the formula \[ m(t) = \int_{- \frac{2}{M_{0}} }^t c_0 | \{x : \theta(x,s) \leq 0\} \cap B_1 | e^{-C_1 (t-s)} \dd s. \] We will show that if $c_0$ is small and $C_1$ is large, then $\theta \leq 1 - m(t) + \varepsilon_0$ in $[-1,1] \times [- \frac{1}{M_{0}} ,0]$. This naturally implies the result of the lemma since for $t \in [- \frac{1}{M_{0}} ,0]$, \[ m(t) \geq c_0 e^{- \frac{2C_{1}}{M_{0}} } |\{ \theta \leq 0 \} \cap [-1,1] \times [-\frac{2}{M_{0}} , - \frac{1}{M_{0}} ] | \geq c_0 e^{- \frac{2C_{1}}{M_{0}} }\mu.\] So we can set $\lambda = c_0 e^{- \frac{2C_{1}}{M_{0}} }\mu/2$ for $\varepsilon_0$ small. Let $\begin{equation}ta : \mathbb R \to \mathbb R$ be a fixed smooth nonincreasing function such that $\begin{equation}ta(x)=1$ if $x \leq 1$ and $\begin{equation}ta(x)=0$ if $x \geq 2$. Moreover, we can take $\begin{equation}ta$ with only one inflection point between $0$ and $2$, so that if $\begin{equation}ta \leq \begin{equation}ta_0$ then $\begin{equation}ta'' \geq 0$. Let $b(x,t) = \begin{equation}ta(|x|+ M_{0} t) = \begin{equation}ta (|x| - M_{0} |t|)$. As a function of $x$, $b(x,t)$ looks like a bump function for every fixed $t$. By construction $b_{xx} \geq 0$ if $b \leq \begin{equation}ta_0$. Moreover, at those points where $b = 0$ (precisely where $|x| \geq 2-M_{0}t = 2 + M_{0} |t|)$, $(-\lap)^s b < 0$. Since $b$ is smooth, $(-\lap)^s b$ is continuous and it remains negative for $b$ small enough. Thus, there is some constant $\begin{equation}ta_1$ such that $b_{xx} \geq 0$ and $(-\lap)^s b \leq 0$ if $b \leq \begin{equation}ta_1$. Assume that $\theta(x,t) > 1 - m(t) + \varepsilon_0 (1+t)$ for some point $(x,t) \in [-1,1] \times [-\frac{1}{M_{0}},0]$. We will arrive to a contradiction by looking at the maximum of the function \[ w(x,t) = \theta(x,t) + m(t) b(x,t) - \varepsilon_0 (1+t). \] We are assuming that there is one point in $[-1,1] \times [-\frac{1}{M_{0}},0]$ where $w(x,t) > 1$. Let $(x_0,t_0)$ be the point that realizes the maximum of $w$: \[ w(x_0,t_0) = \max_{\mathbb R \times [-\frac{2}{M_{0}},0]} w(x,t).\] (Note $(x_0,t_0)$ exists by the definition of $w$ and Lemma \ref{l:properties-approx}.) Since $w(x_0,t_0) > 1$, by using the fact that $\theta (x_0 , t_0) \leq 1$, we deduce $m(t_0) b(x_0, t_0) > \varepsilon_0 (1+t_0) >0$, which further implies $m(t_0) >0$ (this tells us that $t_{0} > -\frac{2}{M_{0}}$) and $b(x_0 , t_0) > 0$, so $|x_0| < 2 + M_{0} |t_{0}| \leq 4 $. Since the function $w$ realizes a maximum at $(x_0,t_0)$, we have the following elementary inequalities: \begin{equation}gin{align*} w(x_0,t_0) &> 1\\ w_t(x_0,t_0) &\geq 0 \\ w_x(x_0,t_0) &= 0 \\ \lap w(x_0,t_0) &\leq 0 \\ (-\lap)^s w(x_0,t_0) &\geq 0 \end{align*} The last inequality can be turned into a more useful estimate by recalling the integral formula of $(-\lap)^s w$ and looking at the set of points where $\theta \leq 0$. \[ \begin{equation}gin{aligned} (-\lap)^s w(x_0,t_0) &= C_s \int_{\mathbb R} \frac{ w(x_0,t_0) - w(y,t_0) }{|x_0-y|^{1+2s}} \dd y \qquad \text{(Note the integrand is nonnegative)} \\ &\geq C_s \int_{\{y \in [-1,1] : \theta(y,t_0) \leq 0\}} (w(x_0,t_0) - w(y,t_0)) 5^{-1-2s} \dd y\\ &\geq C_s (1-m(t_0)) 5^{-1-2s} |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \\ &\geq \frac{C_{s}}{25} (1-m(t_0))|\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| , \end{aligned} \] where the last inequality is valid since $5^{1+2s} \leq 25$ for $\frac{1}{4} \leq s \leq \frac{1}{2}$. We choose the constant $c_0$ in order to make sure that $m(t)$ stays below $1/4$ (simply by choosing $c_0 < 1/8$), and we choose $c_0 \leq \frac{3}{4}\frac{C_{s}}{25} $, so that \begin{equation}gin{equation} \label{e:bound-fraclap} (-\lap)^s w(x_0,t_0) \geq c_0 |\{y \in [-1,1] : \theta(y,t) \leq 0\}|. \end{equation} Note that the constant $C_s$ in the integral form of the fractional Laplacian stays bounded and away from zero as long as $s$ stays away from $0$ and $1$. We can consider $C_s$ bounded above and below independently of $s$ as long as $s$ stays in a range away from $0$ and $1$, like for example $s \in [1/4,1/2]$. Now we recall that $w = \theta + mb - \varepsilon_0 (1+t)$ and we rewrite the inequalities in terms of $\theta$. \begin{equation}gin{align*} 1 \geq \theta(x_0,t_0) &\geq 1 - m(t_0) b(x_0,t_0) \geq 3/4 \\ \theta_t (x_0,t_0) &\geq -m'(t_0) b(t_0,x_0) + m(t_0)M_{0} |b_x(x_0,t_0)| + \varepsilon_0\\ \theta_x (x_0,t_0) &= -m(t_0) b_x(x_0 ,t_0) \\ \lap \theta(x_0,t_0) &\leq -m(t_0) \lap b(x_0,t_0) \\ (-\lap)^s \theta(x_0,t_0) &\geq -m(t_0) (-\lap)^s b(x_0,t_0) + c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \end{align*} We consider two cases and obtain a contradiction in both. Either $b(x_0,t_0) > \begin{equation}ta_1$ or $b(x_0,t_0) \leq \begin{equation}ta_1$. Let us start with the latter. If $b(x_0,t_0) \leq \begin{equation}ta_1$, then $\lap b(x_0,t_0) \geq 0$ and $(-\lap)^s b(x_0,t_0) \leq 0$, then \begin{equation}gin{align*} \lap \theta(x_0,t_0) &\leq -m(t_0) \lap b(x_0,t_0) \leq 0 \\ (-\lap)^s \theta(x_0,t_0) &\geq c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \end{align*} Therefore \[ \varepsilon_0 \geq \theta_t + M \theta \theta_x + (-\lap)^s \theta - \varepsilon_1 \lap \theta \geq \varepsilon_0 - m'(t_0) b(x_0) + c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| , \] where in the last inequality, we have implicitly use the fact that \[m(t_0)\big(M_0|b_x(x_0 ,t_0)| - M\theta (x_0 ,t_0 ) b_x(x_0 ,t_0)\big) \geq 0,\] since $1\geq\theta (x_0 ,t_0)\geq\frac 34$ and $|M|\leq M_0$. So we obtain \begin{equation}gin{equation*} - m'(t_0) b(x_0) + c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \leq 0, \end{equation*} but this is a contradiction with \eqref{e:ODEform} for any $C_1 \geq 0$. Let us now analyze the case $b(x_0,t_0) > \begin{equation}ta_1$. Since $b$ is a smooth, compactly supported function, there is some constant $C$ (depending on $M_{0}$), such that $|\lap b| \leq C$ and $|(-\lap)^s b| \leq C$. Then we have the bounds \begin{equation}gin{align*} \lap \theta(x_0,t_0) &\leq -m(t_0) \lap b(x_0,t_0) \leq C m(t_0) \\ (-\lap)^s \theta(x_0,t_0) &\geq c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| - C m(t_0) \end{align*} Therefore \[ \varepsilon_0 \geq \theta_t + M \theta \theta_x + (-\lap)^s \theta - \varepsilon_1 \lap \theta \geq \varepsilon_0 - m'(t_0) b(x_0 ,t_0 ) - C m(t_0) + c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \] and we have \begin{equation}gin{equation*} - m'(t_0) b(x_0,t_0) - C m(t_0) + c_0 |\{y \in [-1,1] : \theta(y,t_0) \leq 0\}| \leq 0. \end{equation*} We replace the value of $m'(t_0)$ in the above inequality using \eqref{e:ODEform} and obtain \[ (C_1 b(x_0,t_0)- C) m(t_0) + c_0 (1- b(x_0,t_0)) |\{y \in [-1,1] : \theta(y,t) \leq 0\}| \leq 0. \] Recalling that $b(x_0,t_0) \geq \begin{equation}ta_1$, we arrive at a contradiction if $C_1$ is chosen large enough. \end{proof} \begin{equation}gin{lemma} \label{l:oscillation-approx} Let $s \in [\frac{1}{4} , \frac{1}{2}]$, and let $\theta$ be a solution of \begin{equation}\label{e:osc2} \theta_t + M \theta \cdot \theta_x + (-\lap)^s \theta - \varepsilon_1 \lap \theta \leq 0, \end{equation} where $|M|\leq 1$ and $\varepsilon_1 \leq 1$. Assume that $|\theta| \leq 1$ in $Q_1$ and $|\theta(x)| \leq |500x|^{2\alpha}$ for $|x|>1$. Then if $\alpha$ is small enough, there is a $\lambda>0$ (which does not depend on $\varepsilon_1$) such that $\osc_{Q_{1/400}} \theta \leq 2-\lambda$. \end{lemma} There is no deep reason for the choice of the number $500$ in the above lemma. But the smaller the cube is, say $Q_{\frac{1}{400}}$, on which the improved oscillation occurs, we need a number, say $500$, which is greater than $400$ in order to make inequality \eqref{c2} hold. In principle, $500$ can be replaced by any number greater than $400$. \begin{equation}gin{proof} We want to apply Lemma \ref{l:measureToPointEstimate} to $\theta$. We check if we have the required hypothesis. We set $M_0=2\cdot10^{1/2}$. (The reason for this choice will become clear shortly.) Next, $\theta$ will be either nonnegative or nonpositive in half of the points in $[-10,10] \times [-\frac{2}{M_{0}} , -\frac{1}{M_{0}}] $ (in measure). Let us assume $| \{ (x,t) \in [-1,1] \times [-\frac{2}{M_{0}} , -\frac{1}{M_{0}}] : \theta(x,t) \leq 0 \} | \geq \mu =\frac{1}{M_0}$. (Otherwise, we would continue the proof with $-\theta$ instead of $\theta$ and $-M$.) Next, the hypothesis that we are missing is that $\theta $ may be larger than $1$ outside $Q_1$. Thus we define \[ \overline \theta = \min(\theta,1) .\] We show below $\overline \theta$ satisfies \begin{equation}\label{e:osc21} \overline \theta_{t} + M \overline \theta \cdot \overline \theta_{x} + (-\triangle)^{s} \overline \theta - \varepsilon_{1} \triangle \overline \theta \leq \varepsilon_{0}. \end{equation} over $Q_{1/2}$ for $\varepsilonilon_{0}$ small enough. Since $\theta$ satisfies \eqref{e:osc2} and $\overline \theta=\theta$ on $Q_1$ we must only check the difference of $(-\lap)^s \theta$ and $(-\lap)^s \overline \theta$ since this is the only nonlocal term in the equation. Let $|x| \leq 1/2$ (note below that we cannot take $x\in Q_1$) \[ \begin{equation}gin{aligned} (-\lap)^s \overline \theta(x,t) - (-\lap)^s \theta(x,t) &= C_s \int_{\mathbb R} \frac{ \overline \theta(x,t) - \theta(x,t) - \overline \theta(y,t) + \theta(y,t) }{|x_0-y|^{1+2s}} \dd y \\ &= C_s \int_{\{y : \theta(y,t) > 1\}} \frac{ \theta(y,t) - 1 }{|x_0-y|^{1+2s}} \dd y \\ & \leq C \int_{\{|y| > 1\}} \frac{|500y|^{2\alpha } - 1 }{|y|^{\frac{3}{2}}} \dd y =: \omega(\alpha), \end{aligned}\] where, in the last inequality, we have used the assumption that $ \frac{1}{4} \leq s \leq \frac{1}{2}$. Notice $\omega(\alpha) \to 0$ as $\alpha \to 0$. So we can choose $\alpha>0$ such that $\omega(\alpha) < \varepsilon_0$. Hence $\overline \theta$ satisfies \eqref{e:osc21} over $Q_{1/2}$ as claimed. However, in order to apply Lemma \ref{l:measureToPointEstimate}, we need to rescale so that we can have that the inequality holds on $[-5,5]\times [-\frac 2 M_0, 0]$. Since we also need to preserve the condition $\overline \theta \leq 1$ after rescaling, we choose to work with the function $ \overline \theta^{*} (x,t) = \overline \theta (\frac{1}{10} x , \frac{1}{10^{2s}} t) $. Observe that $\overline \theta^{*}$ satisfies the following differential inequality over $Q_5$. \begin{equation}gin{equation} \overline \theta^{*}_{t} + 10^{1-2s} M \overline \theta^{*} \cdot \overline \theta^{*}_{x} + (-\triangle )^{s} \overline \theta^{*} - 10^{2-2s} \varepsilon_{1} \triangle \overline \theta^{*} \leq \frac{\varepsilon_{0}}{10^{2s}} \leq \varepsilon_{0}. \end{equation} Observe that with $M_0=2 \cdot 10^{\frac{1}{2}}$, $[-5,5]\times [-\frac 2 M_0, 0] \subset Q_5$, and $10^{1-2s} |M | \leq M_{0}$. Also $10^{2-2s} \varepsilon_{1} \leq 10^{3/2}$, and since by construction $\overline \theta^{*}\leq 1 \in \mathbb R \times [-\frac 2 M_0, 0]$, we now finally can apply Lemma \ref{l:measureToPointEstimate} and obtain that $ \overline \theta^{*} \leq 1 - \lambda $ over $[-1,1]\times [-\frac{1}{M_{0}} , 0]$, where $\lambda$ depends only on $M_{0} = 2\cdot 10^{\frac{1}{2}}$. However, since we would like to have an improved oscillation on a parabolic cube, we note that $Q_{1/40}=[-\frac{1}{40} , \frac{1}{40}] \times [-\frac{1}{40^{2s}} , 0] \subset [-1,1] \times [-\frac{1}{M_{0}} , 0]$, for $\frac{1}{4} \leq s \leq \frac{1}{2}$. So we have $\overline \theta^{*} \leq 1- \lambda $ over $Q_{1/40}$ . Hence by rescaling $\theta =\overline \theta \leq 1- \lambda$ in $Q_{1/400}$. This completes the proof. \end{proof} \section{Proof of the main result} To simplify the exposition of the proof of theorem \ref{t:main-viscosity}, we first state and establish the following technical but elementary lemma. \begin{equation}gin{lemma}\label{l:techlemma} For any $\rho \in (0 , \frac{1}{400})$, there exists some $\alpha_{1} \in (0, \frac{1}{2} )$, depending only on $\rho$, such that for any $0 < \alpha < \alpha_{1}$, the following holds: \begin{equation}gin{align} 1&<\frac{1}{400 \rho } - \frac{1}{\rho } (1- \rho^{\alpha }), \label{c1}\\ \rho^{-\alpha } (2 - \rho^{\alpha })& < 500^{2\alpha } \{ \frac{1}{400 \rho} - \frac{1}{\rho } (1- \rho^{\alpha }) \}^{2\alpha },\label{c2}\\ \rho^{-\alpha } ( 500^{2\alpha } + 1 - \rho^{\alpha } ) & < 500^{2 \alpha } \{ \frac{1}{ \rho } - \frac{1}{\rho } (1- \rho^{\alpha }) \}^{2\alpha }.\label{c3} \end{align} \end{lemma} \begin{equation}gin{proof} \eqref{c1} is immediate by the assumptions on $\rho$. So is \eqref{c2} after we observe that it is equivalent to \[ \rho^{-\frac 12}(2-\rho^\alpha)^\frac{1}{2\alpha} < 500\left(\frac{1}{400\rho}- \frac{1}{\rho}(1-\rho^\alpha)\right). \] Since $\lim_{\alpha \rightarrow 0} \rho^{-\frac 12} (2-\rho^\alpha)^\frac{1}{2\alpha} = \frac{1}{\rho} < \frac{500}{400\rho} =\lim_{\alpha \rightarrow 0} 500\left(\frac{1}{400\rho}- \frac{1}{\rho}(1-\rho^\alpha)\right),$ by continuity, the above inequality holds for sufficiently small $\alpha > 0$. We rearrange \eqref{c3}, and note that it follows from showing that \[ f(\alpha)=\rho^{\alpha}(500^{2\alpha}+1-\rho^\alpha)-500^{2\alpha} \rho^{2\alpha^2}, \] has a local maximum at $0$. This is indeed true, since $f(0)=f'(0)=0$, and \[ f''(0)=\ln \rho (4\ln 500-4-2\ln \rho)<0 , \] for any fixed $\rho \in (0 , \frac{1}{400})$. \end{proof} \begin{equation}gin{thm} \label{t:main-viscosity} Let $\theta$ be a solution of \eqref{e:burger-approx} with $|\theta| \leq 1$ in $\mathbb R \times [-1,0]$. There is a small $\alpha \in (0, \frac{1}{2})$ such that if $\frac{1- \alpha}{2} < s < 1/2$ then $\theta$ satisfies \[ |\theta(y,0) - \theta(x,0)| \leq C |x-y|^\alpha \] for some constant $C$ (independent of $\varepsilon_1$) and for all points such that $|x-y|> c \varepsilon_1^{2-2s}$. \end{thm} \begin{equation}gin{proof} Fix $\rho \in (0, \frac{1}{400})$. Let $\alpha_{0}$, and $\alpha_{1}$ be as in Lemma \ref{l:oscillation-approx}, and Lemma \ref{l:techlemma} respectively. Take $\alpha = \min \{ \frac{\alpha_{0}}{2} , \frac{\alpha_{1}}{2}\}$ ($\alpha$ depends only on $\rho$). Next let $\lambda$ be as in Lemma \ref{l:oscillation-approx}. Then if necessary, we can either make $\lambda$ or $\alpha$ smaller, so that $2-\lambda=2\rho^{\alpha}$. Finally, set $\frac{ 1 - \alpha }{2} < s < \frac{1}{2} $. We define the sequence $\theta_k$ recursively for all nonnegative integers $k$ such that $\rho^{(2-2s)k} \geq \varepsilon_1$. We will do it so that every $\theta_k$ satisfies \begin{equation}gin{align} \partial_t \theta_k + M_k \theta_k \partial_x \theta_k + (-\lap)^s \theta_k - \rho^{(2s-2)k} \varepsilon_1 \lap \theta_{k}&= 0 \ \ \text{in } Q_1 \text{ with } M_k \leq 1, \label{e:h1}\\ |\theta_k(x,t)| &\leq 1 \ \ \text{for } (x,t) \in Q_1, \label{e:h2}\\ |\theta_k(x,t)| &\leq 500^{2\alpha} |x|^{2\alpha} \ \ \text{for } |x| \geq 1 \text{ and } t \in [-1,0], \label{e:h3} \end{align} For all $k$, we will have $\theta_k(x,0) = \rho^{-\alpha k} \theta(\rho^k x,0)$. So \eqref{e:h2} implies immediately the result of this theorem. We have to construct the sequence $\theta_k$. We start with $\theta_0 = \theta$ and $M_0=1$ which clearly satisfy the assumptions. Now we define the following ones recursively. Let us assume that we have constructed up to $\theta_k$ and let us construct $\theta_{k+1}$. Given the assumptions \eqref{e:h1}, \eqref{e:h2} and \eqref{e:h3}, we can apply Lemma \ref{l:oscillation-approx} as long as $\varepsilon_1 < \rho^{(2-2s)k}$ and obtain that $\osc_{Q_{1/400}} \theta_k \leq 2-\lambda=2\rho^\alpha$. If $\varepsilon_1 \geq \rho^{(2-2s)k}$, we stop the iteration, i.e., we iterate only until the viscosity term becomes large. Since $\osc_{Q_{1/400}} \theta_k \leq 2-\lambda$, there is a number $d \in [-\lambda/2,\lambda/2]$ such that \begin{equation}\label{m1} -1+\lambda/2 \leq \theta_k-d \leq 1 - \lambda/2 ,\quad \forall (x,t) \in Q_{1/400}. \end{equation} Now we define $\theta_{k+1}$ as follows, \[ \theta_{k+1} (x,t) = \rho^{-\alpha} [\theta_k\big( \rho (x + L_t),\rho^{2s}t\big) - d], \] where $L_t=\rho^{2s-1}M_k d t$. The function $\theta_{k+1}$ satisfies the equation \[\partial_t \theta_{k+1} + \rho^{\alpha + 2s - 1} M_k \theta_{k+1} \partial_x \theta_{k+1} + (-\lap)^s \theta_{k+1} - \rho^{(2s-2)(k+1)} \varepsilon_1 \lap \theta_{k} = 0\] so we define $M_{k+1} = \rho^{\alpha + 2s - 1} M_k$. Due to the fact that $\alpha + 2s -1 >0$ for our choice of $s \in (\frac{1- \alpha }{2} , \frac{1}{2})$, we have $M_{k+1} \leq M_k$. Hence, we know that $\theta_{k+1}$ satisfies \eqref{e:h1}. Now, since the graph of $500^{2\alpha} |x|^{2\alpha}$ is symmetric about the y-axis, without loss of generality, suppose $d<0$, so $L_t>0$. To establish \eqref{e:h2} for $\theta_{k+1}$, we first note that by \eqref{m1} we have \begin{equation}\label{m2} -1 + \lambda/2\leq\theta_k\big( \rho (x + L_t),\rho^{2s}t\big) - d \leq 1 - \lambda/2 ,\quad \forall x \in [-\frac{1}{400\rho}-L_t,\frac{1}{400\rho}-L_t], t\in [0,1]. \end{equation} Next we show that the absolute value of the transport term $L_t = \rho^{2s-1} M_k d t $ is small enough, so that $[-1,1]\subset [-\frac{1}{400\rho}-L_t,\frac{1}{400\rho}-L_t]$. Indeed, since $M_k d t\leq \frac{\lambda}{2}=(1-\rho^\alpha)$ we have \begin{equation}gin{align*} \frac{1}{400\rho}-\rho^{2s-1} M_k d t&\geq \frac{1}{400\rho}-\rho^{2s-1}(1-\rho^\alpha)\\ &\geq \frac{1}{400\rho}-\frac{1}{\rho}(1-\rho^\alpha) > 1, \end{align*} which holds by $\eqref{c1}$. We conclude $[-1,1]\subset [-\frac{1}{400\rho}-L_t,\frac{1}{400\rho}-L_t] $. Thus by \eqref{m2} for all $(x,t) \in Q_{1}$ \[ |\theta_{k+1}(x,t)| \leq \rho^{-\alpha} |\theta_k\big( \rho (x + L_t),\rho^{2s}t\big)-d| \leq \frac 1 {1-\lambda/2} (1 - \lambda/2 ) = 1, \] so \eqref{e:h2} holds as needed. Now we introduce \[ \psi(x)=\left\{\begin{equation}gin{array}{l} 1 \ \ \quad\quad\quad\quad\quad\mbox {if}\mbox\quad \abs{x}<1,\\ 500^{2\alpha}\abs{x}^{2\alpha} \quad\mbox{ if}\mbox\quad \abs{x}\geq 1 . \end{array}\right. \] By the inductive hypothesis \[ |\theta_k(x,t)| \leq \psi (x),\quad t \in [-1,0]. \] Then observe that by definition of $\theta_{k+1}$, in order to establish \eqref{e:h3} for $\theta_{k+1}$, it is enough to show \begin{equation}\label{m3} \big(\rho^{-\alpha}\psi(\rho(x+L_t))+\rho^{-\alpha} |d|\big) \chi_{\{ |x + L_{t}| \geqslant \frac{1}{400\rho} \} } \leq \psi(x). \end{equation} First we note that \[ \big(\rho^{-\alpha}\psi(\rho(x+L_t))+\rho^{-\alpha} |d|\big) \chi_{\{ |x + L_{t}| \geqslant \frac{1}{400\rho } \}} \leqslant \phi_{1}(x) + \phi_{2}(x), \] where $\phi_{1}(x) = \rho^{-\alpha } (2- \rho^{\alpha } ) \chi_{\{ \frac{1}{400\rho } \leqslant |x + L_{t}| < \frac{1}{\rho} \} }$ and $\phi_{2}(x) = \{ \rho^{-\alpha} \psi ( \rho ( x + L_{t} ) ) + \rho^{-\alpha } (1-\rho^{\alpha }) \} \chi_{ \{ |x + L_{t}| \geqslant \frac{1}{\rho } \} }$. So \eqref{m3} will follow if we can show that $\phi_{1} < \psi$ and $\phi_{2} < \psi$. To show $\phi_{1} < \psi$, we observe that, by \eqref{c2} we have \[ \phi_{1} (\frac{1}{400\rho } - L_{t} ) = \rho^{-\alpha } (2- \rho^{\alpha } ) < \psi ( \frac{1}{400\rho } - \frac{1}{\rho } (1- \rho^{\alpha }) ) \leqslant \psi ( \frac{1}{400\rho } - \rho^{2s-1} (1- \rho^{\alpha }) ) \leqslant \psi ( \frac{1}{400\rho } - L_{t} ). \] Since $ \phi_{1} $ is constant over $[\frac{1}{400\rho } - L_{t} , \frac{1}{\rho} - L_{t}]$, and $\psi (x)$ is strictly increasing for $x \geqslant \frac{1}{400\rho} - L_{t} $, it follows that $ \phi_{1} (\frac{1}{400\rho } - L_{t} ) < \psi ( \frac{1}{400\rho } - L_{t} ) $ implies $\phi_{1} \chi_{[\frac{1}{400\rho } - L_{t} , \frac{1}{\rho} - L_{t}] } < \psi $. On the other hand, it is quite obvious that we must have $ \phi_{1} \chi_{[ -\frac{1}{ \rho } - L_{t} , -\frac{1}{400\rho} - L_{t}] } < \psi.$ Hence we deduce that $\phi_{1} < \psi$. To prove $ \phi_{2} < \psi $, we just need to observe that by \eqref{c3} \[ \phi_{2} ( \frac{1}{\rho } - L_{t} ) = \rho^{-\alpha} \{ 500^{2\alpha} + 1 - \rho^{\alpha } \} < \psi ( \frac{1}{\rho } -\frac{1}{\rho } (1-\rho^{\alpha }) ) \leqslant \psi ( \frac{1}{ \rho } - \rho^{2s-1} (1- \rho^{\alpha }) ) \leqslant \psi (\frac{1}{\rho } - L_{t} ). \] Now, for any point $x \in [ \frac{1}{\rho } - L_{t} , +\infty )$ the derivative of $\phi_{2}$ at $x$ is strictly less than the derivative of $\psi$ at $x$. Because of this, $ \phi_{2} ( \frac{1}{\rho } - L_{t} ) < \psi (\frac{1}{\rho } - L_{t} ) $ at once implies that $\phi_{2} \chi_{[ \frac{1}{\rho} - L_{t} , +\infty )} < \psi$. On the other hand, we also have $\phi_{2} \chi_{(-\infty , -\frac{1}{\rho} - L_{t} ]} < \psi$. Hence we conclude that $\phi_{2} < \psi$, and this completes the proof. \end{proof} \begin{equation}gin{cor} \label{c:main1} Let $\theta$ be a solution of \eqref{e:burgers} with $|\theta| \leq 1$ in $\mathbb R \times [-1,1]$. There is a small $\alpha \in (0, \frac{1}{2})$ such that if $\frac{1- \alpha}{2} < s < 1/2$ then $\theta(\cdot, t) \in C^\alpha$ for all $t \geq 0$. \end{cor} \begin{equation}gin{proof} For every $\varepsilon_1$, we have a solution $\theta^{\varepsilon_1}$ of \eqref{e:burger-approx} for which we can apply Theorem \ref{t:main-viscosity} in any interval of time $[-1+t,t]$. Since neither constant $\alpha$ or $C$ depend on $\varepsilon_1$, then for any $h \in \mathbb R$, \[ \theta^{\varepsilon_1}(x+h,t) - \theta^\varepsilon(x,t) \leq C |h|^\alpha \ \ \text{for all $x \in \mathbb R$ and $t \in [0,1]$}\] for all $\varepsilon_1$ small enough (depending on $|h|$). This estimate passes to the limit as $\varepsilon_1 \to 0$ since $\theta^{\varepsilon_1}(\cdot,0) \to \theta(\cdot,0)$ weak-$\ast$ in $L^\infty$. Moreover, it will hold for all $h$ at the limit, which finishes the proof. \end{proof} Now the proof of the main result follows immediately. \begin{equation}gin{proof}[Proof of Theorem \ref{mainthm}] For any initial data $\theta_0 \in L^2$, by Theorem \ref{thmdecay} $\norm{\theta(-,t)}_{L^\infty(\mathbb R)}$ decays. So all we have to do is wait until it is less than one, and we can apply Corollary \ref{c:main1}. \end{proof} \begin{equation}gin{remark} The only part of the paper where we use that the solution is in $L^2$ is in the proof of the decay of the $L^\infty$ norm (Theorem \ref{thmdecay}). For the rest of the paper, all we use is that the $L^\infty$ norm of $\theta$ will eventually become smaller than one so that we can apply Corollary \ref{c:main1}. Of course there is nothing special about the number one, and a similar estimate can be obtained just by assuming that $\norm{\theta}_{L^\infty} \leq C$. However, the value of $\alpha$ would depend on this $C$. \end{remark} \section*{Acknowledgment} Luis Silvestre was partially supported by NSF grant DMS-0901995 and the Alfred P. Sloan foundation. \end{document}